xref: /linux/net/bluetooth/hci_sync.c (revision 96b82af3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * BlueZ - Bluetooth protocol stack for Linux
4  *
5  * Copyright (C) 2021 Intel Corporation
6  * Copyright 2023 NXP
7  */
8 
9 #include <linux/property.h>
10 
11 #include <net/bluetooth/bluetooth.h>
12 #include <net/bluetooth/hci_core.h>
13 #include <net/bluetooth/mgmt.h>
14 
15 #include "hci_codec.h"
16 #include "hci_debugfs.h"
17 #include "smp.h"
18 #include "eir.h"
19 #include "msft.h"
20 #include "aosp.h"
21 #include "leds.h"
22 
hci_cmd_sync_complete(struct hci_dev * hdev,u8 result,u16 opcode,struct sk_buff * skb)23 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
24 				  struct sk_buff *skb)
25 {
26 	bt_dev_dbg(hdev, "result 0x%2.2x", result);
27 
28 	if (hdev->req_status != HCI_REQ_PEND)
29 		return;
30 
31 	hdev->req_result = result;
32 	hdev->req_status = HCI_REQ_DONE;
33 
34 	/* Free the request command so it is not used as response */
35 	kfree_skb(hdev->req_skb);
36 	hdev->req_skb = NULL;
37 
38 	if (skb) {
39 		struct sock *sk = hci_skb_sk(skb);
40 
41 		/* Drop sk reference if set */
42 		if (sk)
43 			sock_put(sk);
44 
45 		hdev->req_rsp = skb_get(skb);
46 	}
47 
48 	wake_up_interruptible(&hdev->req_wait_q);
49 }
50 
hci_cmd_sync_alloc(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,struct sock * sk)51 struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen,
52 				   const void *param, struct sock *sk)
53 {
54 	int len = HCI_COMMAND_HDR_SIZE + plen;
55 	struct hci_command_hdr *hdr;
56 	struct sk_buff *skb;
57 
58 	skb = bt_skb_alloc(len, GFP_ATOMIC);
59 	if (!skb)
60 		return NULL;
61 
62 	hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
63 	hdr->opcode = cpu_to_le16(opcode);
64 	hdr->plen   = plen;
65 
66 	if (plen)
67 		skb_put_data(skb, param, plen);
68 
69 	bt_dev_dbg(hdev, "skb len %d", skb->len);
70 
71 	hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
72 	hci_skb_opcode(skb) = opcode;
73 
74 	/* Grab a reference if command needs to be associated with a sock (e.g.
75 	 * likely mgmt socket that initiated the command).
76 	 */
77 	if (sk) {
78 		hci_skb_sk(skb) = sk;
79 		sock_hold(sk);
80 	}
81 
82 	return skb;
83 }
84 
hci_cmd_sync_add(struct hci_request * req,u16 opcode,u32 plen,const void * param,u8 event,struct sock * sk)85 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen,
86 			     const void *param, u8 event, struct sock *sk)
87 {
88 	struct hci_dev *hdev = req->hdev;
89 	struct sk_buff *skb;
90 
91 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
92 
93 	/* If an error occurred during request building, there is no point in
94 	 * queueing the HCI command. We can simply return.
95 	 */
96 	if (req->err)
97 		return;
98 
99 	skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk);
100 	if (!skb) {
101 		bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
102 			   opcode);
103 		req->err = -ENOMEM;
104 		return;
105 	}
106 
107 	if (skb_queue_empty(&req->cmd_q))
108 		bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
109 
110 	hci_skb_event(skb) = event;
111 
112 	skb_queue_tail(&req->cmd_q, skb);
113 }
114 
hci_cmd_sync_run(struct hci_request * req)115 static int hci_cmd_sync_run(struct hci_request *req)
116 {
117 	struct hci_dev *hdev = req->hdev;
118 	struct sk_buff *skb;
119 	unsigned long flags;
120 
121 	bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q));
122 
123 	/* If an error occurred during request building, remove all HCI
124 	 * commands queued on the HCI request queue.
125 	 */
126 	if (req->err) {
127 		skb_queue_purge(&req->cmd_q);
128 		return req->err;
129 	}
130 
131 	/* Do not allow empty requests */
132 	if (skb_queue_empty(&req->cmd_q))
133 		return -ENODATA;
134 
135 	skb = skb_peek_tail(&req->cmd_q);
136 	bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete;
137 	bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
138 
139 	spin_lock_irqsave(&hdev->cmd_q.lock, flags);
140 	skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
141 	spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
142 
143 	queue_work(hdev->workqueue, &hdev->cmd_work);
144 
145 	return 0;
146 }
147 
hci_request_init(struct hci_request * req,struct hci_dev * hdev)148 static void hci_request_init(struct hci_request *req, struct hci_dev *hdev)
149 {
150 	skb_queue_head_init(&req->cmd_q);
151 	req->hdev = hdev;
152 	req->err = 0;
153 }
154 
155 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)156 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
157 				  const void *param, u8 event, u32 timeout,
158 				  struct sock *sk)
159 {
160 	struct hci_request req;
161 	struct sk_buff *skb;
162 	int err = 0;
163 
164 	bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode);
165 
166 	hci_request_init(&req, hdev);
167 
168 	hci_cmd_sync_add(&req, opcode, plen, param, event, sk);
169 
170 	hdev->req_status = HCI_REQ_PEND;
171 
172 	err = hci_cmd_sync_run(&req);
173 	if (err < 0)
174 		return ERR_PTR(err);
175 
176 	err = wait_event_interruptible_timeout(hdev->req_wait_q,
177 					       hdev->req_status != HCI_REQ_PEND,
178 					       timeout);
179 
180 	if (err == -ERESTARTSYS)
181 		return ERR_PTR(-EINTR);
182 
183 	switch (hdev->req_status) {
184 	case HCI_REQ_DONE:
185 		err = -bt_to_errno(hdev->req_result);
186 		break;
187 
188 	case HCI_REQ_CANCELED:
189 		err = -hdev->req_result;
190 		break;
191 
192 	default:
193 		err = -ETIMEDOUT;
194 		break;
195 	}
196 
197 	hdev->req_status = 0;
198 	hdev->req_result = 0;
199 	skb = hdev->req_rsp;
200 	hdev->req_rsp = NULL;
201 
202 	bt_dev_dbg(hdev, "end: err %d", err);
203 
204 	if (err < 0) {
205 		kfree_skb(skb);
206 		return ERR_PTR(err);
207 	}
208 
209 	return skb;
210 }
211 EXPORT_SYMBOL(__hci_cmd_sync_sk);
212 
213 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)214 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
215 			       const void *param, u32 timeout)
216 {
217 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL);
218 }
219 EXPORT_SYMBOL(__hci_cmd_sync);
220 
221 /* Send HCI command and wait for command complete event */
hci_cmd_sync(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)222 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
223 			     const void *param, u32 timeout)
224 {
225 	struct sk_buff *skb;
226 
227 	if (!test_bit(HCI_UP, &hdev->flags))
228 		return ERR_PTR(-ENETDOWN);
229 
230 	bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen);
231 
232 	hci_req_sync_lock(hdev);
233 	skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout);
234 	hci_req_sync_unlock(hdev);
235 
236 	return skb;
237 }
238 EXPORT_SYMBOL(hci_cmd_sync);
239 
240 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_ev(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout)241 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
242 				  const void *param, u8 event, u32 timeout)
243 {
244 	return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout,
245 				 NULL);
246 }
247 EXPORT_SYMBOL(__hci_cmd_sync_ev);
248 
249 /* This function requires the caller holds hdev->req_lock. */
__hci_cmd_sync_status_sk(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u8 event,u32 timeout,struct sock * sk)250 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,
251 			     const void *param, u8 event, u32 timeout,
252 			     struct sock *sk)
253 {
254 	struct sk_buff *skb;
255 	u8 status;
256 
257 	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);
258 	if (IS_ERR(skb)) {
259 		if (!event)
260 			bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode,
261 				   PTR_ERR(skb));
262 		return PTR_ERR(skb);
263 	}
264 
265 	/* If command return a status event skb will be set to NULL as there are
266 	 * no parameters, in case of failure IS_ERR(skb) would have be set to
267 	 * the actual error would be found with PTR_ERR(skb).
268 	 */
269 	if (!skb)
270 		return 0;
271 
272 	status = skb->data[0];
273 
274 	kfree_skb(skb);
275 
276 	return status;
277 }
278 EXPORT_SYMBOL(__hci_cmd_sync_status_sk);
279 
__hci_cmd_sync_status(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)280 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
281 			  const void *param, u32 timeout)
282 {
283 	return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout,
284 					NULL);
285 }
286 EXPORT_SYMBOL(__hci_cmd_sync_status);
287 
hci_cmd_sync_status(struct hci_dev * hdev,u16 opcode,u32 plen,const void * param,u32 timeout)288 int hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen,
289 			const void *param, u32 timeout)
290 {
291 	int err;
292 
293 	hci_req_sync_lock(hdev);
294 	err = __hci_cmd_sync_status(hdev, opcode, plen, param, timeout);
295 	hci_req_sync_unlock(hdev);
296 
297 	return err;
298 }
299 EXPORT_SYMBOL(hci_cmd_sync_status);
300 
hci_cmd_sync_work(struct work_struct * work)301 static void hci_cmd_sync_work(struct work_struct *work)
302 {
303 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work);
304 
305 	bt_dev_dbg(hdev, "");
306 
307 	/* Dequeue all entries and run them */
308 	while (1) {
309 		struct hci_cmd_sync_work_entry *entry;
310 
311 		mutex_lock(&hdev->cmd_sync_work_lock);
312 		entry = list_first_entry_or_null(&hdev->cmd_sync_work_list,
313 						 struct hci_cmd_sync_work_entry,
314 						 list);
315 		if (entry)
316 			list_del(&entry->list);
317 		mutex_unlock(&hdev->cmd_sync_work_lock);
318 
319 		if (!entry)
320 			break;
321 
322 		bt_dev_dbg(hdev, "entry %p", entry);
323 
324 		if (entry->func) {
325 			int err;
326 
327 			hci_req_sync_lock(hdev);
328 			err = entry->func(hdev, entry->data);
329 			if (entry->destroy)
330 				entry->destroy(hdev, entry->data, err);
331 			hci_req_sync_unlock(hdev);
332 		}
333 
334 		kfree(entry);
335 	}
336 }
337 
hci_cmd_sync_cancel_work(struct work_struct * work)338 static void hci_cmd_sync_cancel_work(struct work_struct *work)
339 {
340 	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work);
341 
342 	cancel_delayed_work_sync(&hdev->cmd_timer);
343 	cancel_delayed_work_sync(&hdev->ncmd_timer);
344 	atomic_set(&hdev->cmd_cnt, 1);
345 
346 	wake_up_interruptible(&hdev->req_wait_q);
347 }
348 
349 static int hci_scan_disable_sync(struct hci_dev *hdev);
scan_disable_sync(struct hci_dev * hdev,void * data)350 static int scan_disable_sync(struct hci_dev *hdev, void *data)
351 {
352 	return hci_scan_disable_sync(hdev);
353 }
354 
interleaved_inquiry_sync(struct hci_dev * hdev,void * data)355 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data)
356 {
357 	return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0);
358 }
359 
le_scan_disable(struct work_struct * work)360 static void le_scan_disable(struct work_struct *work)
361 {
362 	struct hci_dev *hdev = container_of(work, struct hci_dev,
363 					    le_scan_disable.work);
364 	int status;
365 
366 	bt_dev_dbg(hdev, "");
367 	hci_dev_lock(hdev);
368 
369 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
370 		goto _return;
371 
372 	status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL);
373 	if (status) {
374 		bt_dev_err(hdev, "failed to disable LE scan: %d", status);
375 		goto _return;
376 	}
377 
378 	/* If we were running LE only scan, change discovery state. If
379 	 * we were running both LE and BR/EDR inquiry simultaneously,
380 	 * and BR/EDR inquiry is already finished, stop discovery,
381 	 * otherwise BR/EDR inquiry will stop discovery when finished.
382 	 * If we will resolve remote device name, do not change
383 	 * discovery state.
384 	 */
385 
386 	if (hdev->discovery.type == DISCOV_TYPE_LE)
387 		goto discov_stopped;
388 
389 	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
390 		goto _return;
391 
392 	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
393 		if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
394 		    hdev->discovery.state != DISCOVERY_RESOLVING)
395 			goto discov_stopped;
396 
397 		goto _return;
398 	}
399 
400 	status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL);
401 	if (status) {
402 		bt_dev_err(hdev, "inquiry failed: status %d", status);
403 		goto discov_stopped;
404 	}
405 
406 	goto _return;
407 
408 discov_stopped:
409 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
410 
411 _return:
412 	hci_dev_unlock(hdev);
413 }
414 
415 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
416 				       u8 filter_dup);
417 
reenable_adv_sync(struct hci_dev * hdev,void * data)418 static int reenable_adv_sync(struct hci_dev *hdev, void *data)
419 {
420 	bt_dev_dbg(hdev, "");
421 
422 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
423 	    list_empty(&hdev->adv_instances))
424 		return 0;
425 
426 	if (hdev->cur_adv_instance) {
427 		return hci_schedule_adv_instance_sync(hdev,
428 						      hdev->cur_adv_instance,
429 						      true);
430 	} else {
431 		if (ext_adv_capable(hdev)) {
432 			hci_start_ext_adv_sync(hdev, 0x00);
433 		} else {
434 			hci_update_adv_data_sync(hdev, 0x00);
435 			hci_update_scan_rsp_data_sync(hdev, 0x00);
436 			hci_enable_advertising_sync(hdev);
437 		}
438 	}
439 
440 	return 0;
441 }
442 
reenable_adv(struct work_struct * work)443 static void reenable_adv(struct work_struct *work)
444 {
445 	struct hci_dev *hdev = container_of(work, struct hci_dev,
446 					    reenable_adv_work);
447 	int status;
448 
449 	bt_dev_dbg(hdev, "");
450 
451 	hci_dev_lock(hdev);
452 
453 	status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL);
454 	if (status)
455 		bt_dev_err(hdev, "failed to reenable ADV: %d", status);
456 
457 	hci_dev_unlock(hdev);
458 }
459 
cancel_adv_timeout(struct hci_dev * hdev)460 static void cancel_adv_timeout(struct hci_dev *hdev)
461 {
462 	if (hdev->adv_instance_timeout) {
463 		hdev->adv_instance_timeout = 0;
464 		cancel_delayed_work(&hdev->adv_instance_expire);
465 	}
466 }
467 
468 /* For a single instance:
469  * - force == true: The instance will be removed even when its remaining
470  *   lifetime is not zero.
471  * - force == false: the instance will be deactivated but kept stored unless
472  *   the remaining lifetime is zero.
473  *
474  * For instance == 0x00:
475  * - force == true: All instances will be removed regardless of their timeout
476  *   setting.
477  * - force == false: Only instances that have a timeout will be removed.
478  */
hci_clear_adv_instance_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)479 int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk,
480 				u8 instance, bool force)
481 {
482 	struct adv_info *adv_instance, *n, *next_instance = NULL;
483 	int err;
484 	u8 rem_inst;
485 
486 	/* Cancel any timeout concerning the removed instance(s). */
487 	if (!instance || hdev->cur_adv_instance == instance)
488 		cancel_adv_timeout(hdev);
489 
490 	/* Get the next instance to advertise BEFORE we remove
491 	 * the current one. This can be the same instance again
492 	 * if there is only one instance.
493 	 */
494 	if (instance && hdev->cur_adv_instance == instance)
495 		next_instance = hci_get_next_instance(hdev, instance);
496 
497 	if (instance == 0x00) {
498 		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
499 					 list) {
500 			if (!(force || adv_instance->timeout))
501 				continue;
502 
503 			rem_inst = adv_instance->instance;
504 			err = hci_remove_adv_instance(hdev, rem_inst);
505 			if (!err)
506 				mgmt_advertising_removed(sk, hdev, rem_inst);
507 		}
508 	} else {
509 		adv_instance = hci_find_adv_instance(hdev, instance);
510 
511 		if (force || (adv_instance && adv_instance->timeout &&
512 			      !adv_instance->remaining_time)) {
513 			/* Don't advertise a removed instance. */
514 			if (next_instance &&
515 			    next_instance->instance == instance)
516 				next_instance = NULL;
517 
518 			err = hci_remove_adv_instance(hdev, instance);
519 			if (!err)
520 				mgmt_advertising_removed(sk, hdev, instance);
521 		}
522 	}
523 
524 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
525 		return 0;
526 
527 	if (next_instance && !ext_adv_capable(hdev))
528 		return hci_schedule_adv_instance_sync(hdev,
529 						      next_instance->instance,
530 						      false);
531 
532 	return 0;
533 }
534 
adv_timeout_expire_sync(struct hci_dev * hdev,void * data)535 static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data)
536 {
537 	u8 instance = *(u8 *)data;
538 
539 	kfree(data);
540 
541 	hci_clear_adv_instance_sync(hdev, NULL, instance, false);
542 
543 	if (list_empty(&hdev->adv_instances))
544 		return hci_disable_advertising_sync(hdev);
545 
546 	return 0;
547 }
548 
adv_timeout_expire(struct work_struct * work)549 static void adv_timeout_expire(struct work_struct *work)
550 {
551 	u8 *inst_ptr;
552 	struct hci_dev *hdev = container_of(work, struct hci_dev,
553 					    adv_instance_expire.work);
554 
555 	bt_dev_dbg(hdev, "");
556 
557 	hci_dev_lock(hdev);
558 
559 	hdev->adv_instance_timeout = 0;
560 
561 	if (hdev->cur_adv_instance == 0x00)
562 		goto unlock;
563 
564 	inst_ptr = kmalloc(1, GFP_KERNEL);
565 	if (!inst_ptr)
566 		goto unlock;
567 
568 	*inst_ptr = hdev->cur_adv_instance;
569 	hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL);
570 
571 unlock:
572 	hci_dev_unlock(hdev);
573 }
574 
is_interleave_scanning(struct hci_dev * hdev)575 static bool is_interleave_scanning(struct hci_dev *hdev)
576 {
577 	return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE;
578 }
579 
580 static int hci_passive_scan_sync(struct hci_dev *hdev);
581 
interleave_scan_work(struct work_struct * work)582 static void interleave_scan_work(struct work_struct *work)
583 {
584 	struct hci_dev *hdev = container_of(work, struct hci_dev,
585 					    interleave_scan.work);
586 	unsigned long timeout;
587 
588 	if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) {
589 		timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration);
590 	} else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) {
591 		timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration);
592 	} else {
593 		bt_dev_err(hdev, "unexpected error");
594 		return;
595 	}
596 
597 	hci_passive_scan_sync(hdev);
598 
599 	hci_dev_lock(hdev);
600 
601 	switch (hdev->interleave_scan_state) {
602 	case INTERLEAVE_SCAN_ALLOWLIST:
603 		bt_dev_dbg(hdev, "next state: allowlist");
604 		hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
605 		break;
606 	case INTERLEAVE_SCAN_NO_FILTER:
607 		bt_dev_dbg(hdev, "next state: no filter");
608 		hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST;
609 		break;
610 	case INTERLEAVE_SCAN_NONE:
611 		bt_dev_err(hdev, "unexpected error");
612 	}
613 
614 	hci_dev_unlock(hdev);
615 
616 	/* Don't continue interleaving if it was canceled */
617 	if (is_interleave_scanning(hdev))
618 		queue_delayed_work(hdev->req_workqueue,
619 				   &hdev->interleave_scan, timeout);
620 }
621 
hci_cmd_sync_init(struct hci_dev * hdev)622 void hci_cmd_sync_init(struct hci_dev *hdev)
623 {
624 	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work);
625 	INIT_LIST_HEAD(&hdev->cmd_sync_work_list);
626 	mutex_init(&hdev->cmd_sync_work_lock);
627 	mutex_init(&hdev->unregister_lock);
628 
629 	INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work);
630 	INIT_WORK(&hdev->reenable_adv_work, reenable_adv);
631 	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable);
632 	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
633 	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);
634 }
635 
_hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry,int err)636 static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
637 				       struct hci_cmd_sync_work_entry *entry,
638 				       int err)
639 {
640 	if (entry->destroy)
641 		entry->destroy(hdev, entry->data, err);
642 
643 	list_del(&entry->list);
644 	kfree(entry);
645 }
646 
hci_cmd_sync_clear(struct hci_dev * hdev)647 void hci_cmd_sync_clear(struct hci_dev *hdev)
648 {
649 	struct hci_cmd_sync_work_entry *entry, *tmp;
650 
651 	cancel_work_sync(&hdev->cmd_sync_work);
652 	cancel_work_sync(&hdev->reenable_adv_work);
653 
654 	mutex_lock(&hdev->cmd_sync_work_lock);
655 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list)
656 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
657 	mutex_unlock(&hdev->cmd_sync_work_lock);
658 }
659 
hci_cmd_sync_cancel(struct hci_dev * hdev,int err)660 void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)
661 {
662 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
663 
664 	if (hdev->req_status == HCI_REQ_PEND) {
665 		hdev->req_result = err;
666 		hdev->req_status = HCI_REQ_CANCELED;
667 
668 		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);
669 	}
670 }
671 EXPORT_SYMBOL(hci_cmd_sync_cancel);
672 
673 /* Cancel ongoing command request synchronously:
674  *
675  * - Set result and mark status to HCI_REQ_CANCELED
676  * - Wakeup command sync thread
677  */
hci_cmd_sync_cancel_sync(struct hci_dev * hdev,int err)678 void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)
679 {
680 	bt_dev_dbg(hdev, "err 0x%2.2x", err);
681 
682 	if (hdev->req_status == HCI_REQ_PEND) {
683 		/* req_result is __u32 so error must be positive to be properly
684 		 * propagated.
685 		 */
686 		hdev->req_result = err < 0 ? -err : err;
687 		hdev->req_status = HCI_REQ_CANCELED;
688 
689 		wake_up_interruptible(&hdev->req_wait_q);
690 	}
691 }
692 EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);
693 
694 /* Submit HCI command to be run in as cmd_sync_work:
695  *
696  * - hdev must _not_ be unregistered
697  */
hci_cmd_sync_submit(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)698 int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
699 			void *data, hci_cmd_sync_work_destroy_t destroy)
700 {
701 	struct hci_cmd_sync_work_entry *entry;
702 	int err = 0;
703 
704 	mutex_lock(&hdev->unregister_lock);
705 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
706 		err = -ENODEV;
707 		goto unlock;
708 	}
709 
710 	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
711 	if (!entry) {
712 		err = -ENOMEM;
713 		goto unlock;
714 	}
715 	entry->func = func;
716 	entry->data = data;
717 	entry->destroy = destroy;
718 
719 	mutex_lock(&hdev->cmd_sync_work_lock);
720 	list_add_tail(&entry->list, &hdev->cmd_sync_work_list);
721 	mutex_unlock(&hdev->cmd_sync_work_lock);
722 
723 	queue_work(hdev->req_workqueue, &hdev->cmd_sync_work);
724 
725 unlock:
726 	mutex_unlock(&hdev->unregister_lock);
727 	return err;
728 }
729 EXPORT_SYMBOL(hci_cmd_sync_submit);
730 
731 /* Queue HCI command:
732  *
733  * - hdev must be running
734  */
hci_cmd_sync_queue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)735 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
736 		       void *data, hci_cmd_sync_work_destroy_t destroy)
737 {
738 	/* Only queue command if hdev is running which means it had been opened
739 	 * and is either on init phase or is already up.
740 	 */
741 	if (!test_bit(HCI_RUNNING, &hdev->flags))
742 		return -ENETDOWN;
743 
744 	return hci_cmd_sync_submit(hdev, func, data, destroy);
745 }
746 EXPORT_SYMBOL(hci_cmd_sync_queue);
747 
748 static struct hci_cmd_sync_work_entry *
_hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)749 _hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
750 			   void *data, hci_cmd_sync_work_destroy_t destroy)
751 {
752 	struct hci_cmd_sync_work_entry *entry, *tmp;
753 
754 	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {
755 		if (func && entry->func != func)
756 			continue;
757 
758 		if (data && entry->data != data)
759 			continue;
760 
761 		if (destroy && entry->destroy != destroy)
762 			continue;
763 
764 		return entry;
765 	}
766 
767 	return NULL;
768 }
769 
770 /* Queue HCI command entry once:
771  *
772  * - Lookup if an entry already exist and only if it doesn't creates a new entry
773  *   and queue it.
774  */
hci_cmd_sync_queue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)775 int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
776 			    void *data, hci_cmd_sync_work_destroy_t destroy)
777 {
778 	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy))
779 		return 0;
780 
781 	return hci_cmd_sync_queue(hdev, func, data, destroy);
782 }
783 EXPORT_SYMBOL(hci_cmd_sync_queue_once);
784 
785 /* Lookup HCI command entry:
786  *
787  * - Return first entry that matches by function callback or data or
788  *   destroy callback.
789  */
790 struct hci_cmd_sync_work_entry *
hci_cmd_sync_lookup_entry(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)791 hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
792 			  void *data, hci_cmd_sync_work_destroy_t destroy)
793 {
794 	struct hci_cmd_sync_work_entry *entry;
795 
796 	mutex_lock(&hdev->cmd_sync_work_lock);
797 	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
798 	mutex_unlock(&hdev->cmd_sync_work_lock);
799 
800 	return entry;
801 }
802 EXPORT_SYMBOL(hci_cmd_sync_lookup_entry);
803 
804 /* Cancel HCI command entry */
hci_cmd_sync_cancel_entry(struct hci_dev * hdev,struct hci_cmd_sync_work_entry * entry)805 void hci_cmd_sync_cancel_entry(struct hci_dev *hdev,
806 			       struct hci_cmd_sync_work_entry *entry)
807 {
808 	mutex_lock(&hdev->cmd_sync_work_lock);
809 	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
810 	mutex_unlock(&hdev->cmd_sync_work_lock);
811 }
812 EXPORT_SYMBOL(hci_cmd_sync_cancel_entry);
813 
814 /* Dequeue one HCI command entry:
815  *
816  * - Lookup and cancel first entry that matches.
817  */
hci_cmd_sync_dequeue_once(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)818 bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev,
819 			       hci_cmd_sync_work_func_t func,
820 			       void *data, hci_cmd_sync_work_destroy_t destroy)
821 {
822 	struct hci_cmd_sync_work_entry *entry;
823 
824 	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy);
825 	if (!entry)
826 		return false;
827 
828 	hci_cmd_sync_cancel_entry(hdev, entry);
829 
830 	return true;
831 }
832 EXPORT_SYMBOL(hci_cmd_sync_dequeue_once);
833 
834 /* Dequeue HCI command entry:
835  *
836  * - Lookup and cancel any entry that matches by function callback or data or
837  *   destroy callback.
838  */
hci_cmd_sync_dequeue(struct hci_dev * hdev,hci_cmd_sync_work_func_t func,void * data,hci_cmd_sync_work_destroy_t destroy)839 bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,
840 			  void *data, hci_cmd_sync_work_destroy_t destroy)
841 {
842 	struct hci_cmd_sync_work_entry *entry;
843 	bool ret = false;
844 
845 	mutex_lock(&hdev->cmd_sync_work_lock);
846 	while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data,
847 						   destroy))) {
848 		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);
849 		ret = true;
850 	}
851 	mutex_unlock(&hdev->cmd_sync_work_lock);
852 
853 	return ret;
854 }
855 EXPORT_SYMBOL(hci_cmd_sync_dequeue);
856 
hci_update_eir_sync(struct hci_dev * hdev)857 int hci_update_eir_sync(struct hci_dev *hdev)
858 {
859 	struct hci_cp_write_eir cp;
860 
861 	bt_dev_dbg(hdev, "");
862 
863 	if (!hdev_is_powered(hdev))
864 		return 0;
865 
866 	if (!lmp_ext_inq_capable(hdev))
867 		return 0;
868 
869 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
870 		return 0;
871 
872 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
873 		return 0;
874 
875 	memset(&cp, 0, sizeof(cp));
876 
877 	eir_create(hdev, cp.data);
878 
879 	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
880 		return 0;
881 
882 	memcpy(hdev->eir, cp.data, sizeof(cp.data));
883 
884 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
885 				     HCI_CMD_TIMEOUT);
886 }
887 
get_service_classes(struct hci_dev * hdev)888 static u8 get_service_classes(struct hci_dev *hdev)
889 {
890 	struct bt_uuid *uuid;
891 	u8 val = 0;
892 
893 	list_for_each_entry(uuid, &hdev->uuids, list)
894 		val |= uuid->svc_hint;
895 
896 	return val;
897 }
898 
hci_update_class_sync(struct hci_dev * hdev)899 int hci_update_class_sync(struct hci_dev *hdev)
900 {
901 	u8 cod[3];
902 
903 	bt_dev_dbg(hdev, "");
904 
905 	if (!hdev_is_powered(hdev))
906 		return 0;
907 
908 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
909 		return 0;
910 
911 	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
912 		return 0;
913 
914 	cod[0] = hdev->minor_class;
915 	cod[1] = hdev->major_class;
916 	cod[2] = get_service_classes(hdev);
917 
918 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
919 		cod[1] |= 0x20;
920 
921 	if (memcmp(cod, hdev->dev_class, 3) == 0)
922 		return 0;
923 
924 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV,
925 				     sizeof(cod), cod, HCI_CMD_TIMEOUT);
926 }
927 
is_advertising_allowed(struct hci_dev * hdev,bool connectable)928 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
929 {
930 	/* If there is no connection we are OK to advertise. */
931 	if (hci_conn_num(hdev, LE_LINK) == 0)
932 		return true;
933 
934 	/* Check le_states if there is any connection in peripheral role. */
935 	if (hdev->conn_hash.le_num_peripheral > 0) {
936 		/* Peripheral connection state and non connectable mode
937 		 * bit 20.
938 		 */
939 		if (!connectable && !(hdev->le_states[2] & 0x10))
940 			return false;
941 
942 		/* Peripheral connection state and connectable mode bit 38
943 		 * and scannable bit 21.
944 		 */
945 		if (connectable && (!(hdev->le_states[4] & 0x40) ||
946 				    !(hdev->le_states[2] & 0x20)))
947 			return false;
948 	}
949 
950 	/* Check le_states if there is any connection in central role. */
951 	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) {
952 		/* Central connection state and non connectable mode bit 18. */
953 		if (!connectable && !(hdev->le_states[2] & 0x02))
954 			return false;
955 
956 		/* Central connection state and connectable mode bit 35 and
957 		 * scannable 19.
958 		 */
959 		if (connectable && (!(hdev->le_states[4] & 0x08) ||
960 				    !(hdev->le_states[2] & 0x08)))
961 			return false;
962 	}
963 
964 	return true;
965 }
966 
adv_use_rpa(struct hci_dev * hdev,uint32_t flags)967 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
968 {
969 	/* If privacy is not enabled don't use RPA */
970 	if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
971 		return false;
972 
973 	/* If basic privacy mode is enabled use RPA */
974 	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
975 		return true;
976 
977 	/* If limited privacy mode is enabled don't use RPA if we're
978 	 * both discoverable and bondable.
979 	 */
980 	if ((flags & MGMT_ADV_FLAG_DISCOV) &&
981 	    hci_dev_test_flag(hdev, HCI_BONDABLE))
982 		return false;
983 
984 	/* We're neither bondable nor discoverable in the limited
985 	 * privacy mode, therefore use RPA.
986 	 */
987 	return true;
988 }
989 
hci_set_random_addr_sync(struct hci_dev * hdev,bdaddr_t * rpa)990 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa)
991 {
992 	/* If we're advertising or initiating an LE connection we can't
993 	 * go ahead and change the random address at this time. This is
994 	 * because the eventual initiator address used for the
995 	 * subsequently created connection will be undefined (some
996 	 * controllers use the new address and others the one we had
997 	 * when the operation started).
998 	 *
999 	 * In this kind of scenario skip the update and let the random
1000 	 * address be updated at the next cycle.
1001 	 */
1002 	if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
1003 	    hci_lookup_le_connect(hdev)) {
1004 		bt_dev_dbg(hdev, "Deferring random address update");
1005 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1006 		return 0;
1007 	}
1008 
1009 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR,
1010 				     6, rpa, HCI_CMD_TIMEOUT);
1011 }
1012 
hci_update_random_address_sync(struct hci_dev * hdev,bool require_privacy,bool rpa,u8 * own_addr_type)1013 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,
1014 				   bool rpa, u8 *own_addr_type)
1015 {
1016 	int err;
1017 
1018 	/* If privacy is enabled use a resolvable private address. If
1019 	 * current RPA has expired or there is something else than
1020 	 * the current RPA in use, then generate a new one.
1021 	 */
1022 	if (rpa) {
1023 		/* If Controller supports LL Privacy use own address type is
1024 		 * 0x03
1025 		 */
1026 		if (use_ll_privacy(hdev))
1027 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
1028 		else
1029 			*own_addr_type = ADDR_LE_DEV_RANDOM;
1030 
1031 		/* Check if RPA is valid */
1032 		if (rpa_valid(hdev))
1033 			return 0;
1034 
1035 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1036 		if (err < 0) {
1037 			bt_dev_err(hdev, "failed to generate new RPA");
1038 			return err;
1039 		}
1040 
1041 		err = hci_set_random_addr_sync(hdev, &hdev->rpa);
1042 		if (err)
1043 			return err;
1044 
1045 		return 0;
1046 	}
1047 
1048 	/* In case of required privacy without resolvable private address,
1049 	 * use an non-resolvable private address. This is useful for active
1050 	 * scanning and non-connectable advertising.
1051 	 */
1052 	if (require_privacy) {
1053 		bdaddr_t nrpa;
1054 
1055 		while (true) {
1056 			/* The non-resolvable private address is generated
1057 			 * from random six bytes with the two most significant
1058 			 * bits cleared.
1059 			 */
1060 			get_random_bytes(&nrpa, 6);
1061 			nrpa.b[5] &= 0x3f;
1062 
1063 			/* The non-resolvable private address shall not be
1064 			 * equal to the public address.
1065 			 */
1066 			if (bacmp(&hdev->bdaddr, &nrpa))
1067 				break;
1068 		}
1069 
1070 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1071 
1072 		return hci_set_random_addr_sync(hdev, &nrpa);
1073 	}
1074 
1075 	/* If forcing static address is in use or there is no public
1076 	 * address use the static address as random address (but skip
1077 	 * the HCI command if the current random address is already the
1078 	 * static one.
1079 	 *
1080 	 * In case BR/EDR has been disabled on a dual-mode controller
1081 	 * and a static address has been configured, then use that
1082 	 * address instead of the public BR/EDR address.
1083 	 */
1084 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
1085 	    !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
1086 	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
1087 	     bacmp(&hdev->static_addr, BDADDR_ANY))) {
1088 		*own_addr_type = ADDR_LE_DEV_RANDOM;
1089 		if (bacmp(&hdev->static_addr, &hdev->random_addr))
1090 			return hci_set_random_addr_sync(hdev,
1091 							&hdev->static_addr);
1092 		return 0;
1093 	}
1094 
1095 	/* Neither privacy nor static address is being used so use a
1096 	 * public address.
1097 	 */
1098 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
1099 
1100 	return 0;
1101 }
1102 
hci_disable_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1103 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1104 {
1105 	struct hci_cp_le_set_ext_adv_enable *cp;
1106 	struct hci_cp_ext_adv_set *set;
1107 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1108 	u8 size;
1109 	struct adv_info *adv = NULL;
1110 
1111 	/* If request specifies an instance that doesn't exist, fail */
1112 	if (instance > 0) {
1113 		adv = hci_find_adv_instance(hdev, instance);
1114 		if (!adv)
1115 			return -EINVAL;
1116 
1117 		/* If not enabled there is nothing to do */
1118 		if (!adv->enabled)
1119 			return 0;
1120 	}
1121 
1122 	memset(data, 0, sizeof(data));
1123 
1124 	cp = (void *)data;
1125 	set = (void *)cp->data;
1126 
1127 	/* Instance 0x00 indicates all advertising instances will be disabled */
1128 	cp->num_of_sets = !!instance;
1129 	cp->enable = 0x00;
1130 
1131 	set->handle = adv ? adv->handle : instance;
1132 
1133 	size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets;
1134 
1135 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1136 				     size, data, HCI_CMD_TIMEOUT);
1137 }
1138 
hci_set_adv_set_random_addr_sync(struct hci_dev * hdev,u8 instance,bdaddr_t * random_addr)1139 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance,
1140 					    bdaddr_t *random_addr)
1141 {
1142 	struct hci_cp_le_set_adv_set_rand_addr cp;
1143 	int err;
1144 
1145 	if (!instance) {
1146 		/* Instance 0x00 doesn't have an adv_info, instead it uses
1147 		 * hdev->random_addr to track its address so whenever it needs
1148 		 * to be updated this also set the random address since
1149 		 * hdev->random_addr is shared with scan state machine.
1150 		 */
1151 		err = hci_set_random_addr_sync(hdev, random_addr);
1152 		if (err)
1153 			return err;
1154 	}
1155 
1156 	memset(&cp, 0, sizeof(cp));
1157 
1158 	cp.handle = instance;
1159 	bacpy(&cp.bdaddr, random_addr);
1160 
1161 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR,
1162 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1163 }
1164 
hci_setup_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance)1165 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance)
1166 {
1167 	struct hci_cp_le_set_ext_adv_params cp;
1168 	bool connectable;
1169 	u32 flags;
1170 	bdaddr_t random_addr;
1171 	u8 own_addr_type;
1172 	int err;
1173 	struct adv_info *adv;
1174 	bool secondary_adv;
1175 
1176 	if (instance > 0) {
1177 		adv = hci_find_adv_instance(hdev, instance);
1178 		if (!adv)
1179 			return -EINVAL;
1180 	} else {
1181 		adv = NULL;
1182 	}
1183 
1184 	/* Updating parameters of an active instance will return a
1185 	 * Command Disallowed error, so we must first disable the
1186 	 * instance if it is active.
1187 	 */
1188 	if (adv && !adv->pending) {
1189 		err = hci_disable_ext_adv_instance_sync(hdev, instance);
1190 		if (err)
1191 			return err;
1192 	}
1193 
1194 	flags = hci_adv_instance_flags(hdev, instance);
1195 
1196 	/* If the "connectable" instance flag was not set, then choose between
1197 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1198 	 */
1199 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1200 		      mgmt_get_connectable(hdev);
1201 
1202 	if (!is_advertising_allowed(hdev, connectable))
1203 		return -EPERM;
1204 
1205 	/* Set require_privacy to true only when non-connectable
1206 	 * advertising is used. In that case it is fine to use a
1207 	 * non-resolvable private address.
1208 	 */
1209 	err = hci_get_random_address(hdev, !connectable,
1210 				     adv_use_rpa(hdev, flags), adv,
1211 				     &own_addr_type, &random_addr);
1212 	if (err < 0)
1213 		return err;
1214 
1215 	memset(&cp, 0, sizeof(cp));
1216 
1217 	if (adv) {
1218 		hci_cpu_to_le24(adv->min_interval, cp.min_interval);
1219 		hci_cpu_to_le24(adv->max_interval, cp.max_interval);
1220 		cp.tx_power = adv->tx_power;
1221 	} else {
1222 		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval);
1223 		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval);
1224 		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
1225 	}
1226 
1227 	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK);
1228 
1229 	if (connectable) {
1230 		if (secondary_adv)
1231 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND);
1232 		else
1233 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1234 	} else if (hci_adv_instance_is_scannable(hdev, instance) ||
1235 		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) {
1236 		if (secondary_adv)
1237 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND);
1238 		else
1239 			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1240 	} else {
1241 		if (secondary_adv)
1242 			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND);
1243 		else
1244 			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1245 	}
1246 
1247 	/* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter
1248 	 * contains the peer’s Identity Address and the Peer_Address_Type
1249 	 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01).
1250 	 * These parameters are used to locate the corresponding local IRK in
1251 	 * the resolving list; this IRK is used to generate their own address
1252 	 * used in the advertisement.
1253 	 */
1254 	if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED)
1255 		hci_copy_identity_address(hdev, &cp.peer_addr,
1256 					  &cp.peer_addr_type);
1257 
1258 	cp.own_addr_type = own_addr_type;
1259 	cp.channel_map = hdev->le_adv_channel_map;
1260 	cp.handle = adv ? adv->handle : instance;
1261 
1262 	if (flags & MGMT_ADV_FLAG_SEC_2M) {
1263 		cp.primary_phy = HCI_ADV_PHY_1M;
1264 		cp.secondary_phy = HCI_ADV_PHY_2M;
1265 	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) {
1266 		cp.primary_phy = HCI_ADV_PHY_CODED;
1267 		cp.secondary_phy = HCI_ADV_PHY_CODED;
1268 	} else {
1269 		/* In all other cases use 1M */
1270 		cp.primary_phy = HCI_ADV_PHY_1M;
1271 		cp.secondary_phy = HCI_ADV_PHY_1M;
1272 	}
1273 
1274 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
1275 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1276 	if (err)
1277 		return err;
1278 
1279 	if ((own_addr_type == ADDR_LE_DEV_RANDOM ||
1280 	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) &&
1281 	    bacmp(&random_addr, BDADDR_ANY)) {
1282 		/* Check if random address need to be updated */
1283 		if (adv) {
1284 			if (!bacmp(&random_addr, &adv->random_addr))
1285 				return 0;
1286 		} else {
1287 			if (!bacmp(&random_addr, &hdev->random_addr))
1288 				return 0;
1289 		}
1290 
1291 		return hci_set_adv_set_random_addr_sync(hdev, instance,
1292 							&random_addr);
1293 	}
1294 
1295 	return 0;
1296 }
1297 
hci_set_ext_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1298 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1299 {
1300 	DEFINE_FLEX(struct hci_cp_le_set_ext_scan_rsp_data, pdu, data, length,
1301 		    HCI_MAX_EXT_AD_LENGTH);
1302 	u8 len;
1303 	struct adv_info *adv = NULL;
1304 	int err;
1305 
1306 	if (instance) {
1307 		adv = hci_find_adv_instance(hdev, instance);
1308 		if (!adv || !adv->scan_rsp_changed)
1309 			return 0;
1310 	}
1311 
1312 	len = eir_create_scan_rsp(hdev, instance, pdu->data);
1313 
1314 	pdu->handle = adv ? adv->handle : instance;
1315 	pdu->length = len;
1316 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1317 	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1318 
1319 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA,
1320 				    struct_size(pdu, data, len), pdu,
1321 				    HCI_CMD_TIMEOUT);
1322 	if (err)
1323 		return err;
1324 
1325 	if (adv) {
1326 		adv->scan_rsp_changed = false;
1327 	} else {
1328 		memcpy(hdev->scan_rsp_data, pdu->data, len);
1329 		hdev->scan_rsp_data_len = len;
1330 	}
1331 
1332 	return 0;
1333 }
1334 
__hci_set_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1335 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1336 {
1337 	struct hci_cp_le_set_scan_rsp_data cp;
1338 	u8 len;
1339 
1340 	memset(&cp, 0, sizeof(cp));
1341 
1342 	len = eir_create_scan_rsp(hdev, instance, cp.data);
1343 
1344 	if (hdev->scan_rsp_data_len == len &&
1345 	    !memcmp(cp.data, hdev->scan_rsp_data, len))
1346 		return 0;
1347 
1348 	memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1349 	hdev->scan_rsp_data_len = len;
1350 
1351 	cp.length = len;
1352 
1353 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA,
1354 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1355 }
1356 
hci_update_scan_rsp_data_sync(struct hci_dev * hdev,u8 instance)1357 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance)
1358 {
1359 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1360 		return 0;
1361 
1362 	if (ext_adv_capable(hdev))
1363 		return hci_set_ext_scan_rsp_data_sync(hdev, instance);
1364 
1365 	return __hci_set_scan_rsp_data_sync(hdev, instance);
1366 }
1367 
hci_enable_ext_advertising_sync(struct hci_dev * hdev,u8 instance)1368 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance)
1369 {
1370 	struct hci_cp_le_set_ext_adv_enable *cp;
1371 	struct hci_cp_ext_adv_set *set;
1372 	u8 data[sizeof(*cp) + sizeof(*set) * 1];
1373 	struct adv_info *adv;
1374 
1375 	if (instance > 0) {
1376 		adv = hci_find_adv_instance(hdev, instance);
1377 		if (!adv)
1378 			return -EINVAL;
1379 		/* If already enabled there is nothing to do */
1380 		if (adv->enabled)
1381 			return 0;
1382 	} else {
1383 		adv = NULL;
1384 	}
1385 
1386 	cp = (void *)data;
1387 	set = (void *)cp->data;
1388 
1389 	memset(cp, 0, sizeof(*cp));
1390 
1391 	cp->enable = 0x01;
1392 	cp->num_of_sets = 0x01;
1393 
1394 	memset(set, 0, sizeof(*set));
1395 
1396 	set->handle = adv ? adv->handle : instance;
1397 
1398 	/* Set duration per instance since controller is responsible for
1399 	 * scheduling it.
1400 	 */
1401 	if (adv && adv->timeout) {
1402 		u16 duration = adv->timeout * MSEC_PER_SEC;
1403 
1404 		/* Time = N * 10 ms */
1405 		set->duration = cpu_to_le16(duration / 10);
1406 	}
1407 
1408 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1409 				     sizeof(*cp) +
1410 				     sizeof(*set) * cp->num_of_sets,
1411 				     data, HCI_CMD_TIMEOUT);
1412 }
1413 
hci_start_ext_adv_sync(struct hci_dev * hdev,u8 instance)1414 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance)
1415 {
1416 	int err;
1417 
1418 	err = hci_setup_ext_adv_instance_sync(hdev, instance);
1419 	if (err)
1420 		return err;
1421 
1422 	err = hci_set_ext_scan_rsp_data_sync(hdev, instance);
1423 	if (err)
1424 		return err;
1425 
1426 	return hci_enable_ext_advertising_sync(hdev, instance);
1427 }
1428 
hci_disable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1429 int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1430 {
1431 	struct hci_cp_le_set_per_adv_enable cp;
1432 	struct adv_info *adv = NULL;
1433 
1434 	/* If periodic advertising already disabled there is nothing to do. */
1435 	adv = hci_find_adv_instance(hdev, instance);
1436 	if (!adv || !adv->periodic || !adv->enabled)
1437 		return 0;
1438 
1439 	memset(&cp, 0, sizeof(cp));
1440 
1441 	cp.enable = 0x00;
1442 	cp.handle = instance;
1443 
1444 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1445 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1446 }
1447 
hci_set_per_adv_params_sync(struct hci_dev * hdev,u8 instance,u16 min_interval,u16 max_interval)1448 static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance,
1449 				       u16 min_interval, u16 max_interval)
1450 {
1451 	struct hci_cp_le_set_per_adv_params cp;
1452 
1453 	memset(&cp, 0, sizeof(cp));
1454 
1455 	if (!min_interval)
1456 		min_interval = DISCOV_LE_PER_ADV_INT_MIN;
1457 
1458 	if (!max_interval)
1459 		max_interval = DISCOV_LE_PER_ADV_INT_MAX;
1460 
1461 	cp.handle = instance;
1462 	cp.min_interval = cpu_to_le16(min_interval);
1463 	cp.max_interval = cpu_to_le16(max_interval);
1464 	cp.periodic_properties = 0x0000;
1465 
1466 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS,
1467 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1468 }
1469 
hci_set_per_adv_data_sync(struct hci_dev * hdev,u8 instance)1470 static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance)
1471 {
1472 	DEFINE_FLEX(struct hci_cp_le_set_per_adv_data, pdu, data, length,
1473 		    HCI_MAX_PER_AD_LENGTH);
1474 	u8 len;
1475 	struct adv_info *adv = NULL;
1476 
1477 	if (instance) {
1478 		adv = hci_find_adv_instance(hdev, instance);
1479 		if (!adv || !adv->periodic)
1480 			return 0;
1481 	}
1482 
1483 	len = eir_create_per_adv_data(hdev, instance, pdu->data);
1484 
1485 	pdu->length = len;
1486 	pdu->handle = adv ? adv->handle : instance;
1487 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1488 
1489 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA,
1490 				     struct_size(pdu, data, len), pdu,
1491 				     HCI_CMD_TIMEOUT);
1492 }
1493 
hci_enable_per_advertising_sync(struct hci_dev * hdev,u8 instance)1494 static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance)
1495 {
1496 	struct hci_cp_le_set_per_adv_enable cp;
1497 	struct adv_info *adv = NULL;
1498 
1499 	/* If periodic advertising already enabled there is nothing to do. */
1500 	adv = hci_find_adv_instance(hdev, instance);
1501 	if (adv && adv->periodic && adv->enabled)
1502 		return 0;
1503 
1504 	memset(&cp, 0, sizeof(cp));
1505 
1506 	cp.enable = 0x01;
1507 	cp.handle = instance;
1508 
1509 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE,
1510 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1511 }
1512 
1513 /* Checks if periodic advertising data contains a Basic Announcement and if it
1514  * does generates a Broadcast ID and add Broadcast Announcement.
1515  */
hci_adv_bcast_annoucement(struct hci_dev * hdev,struct adv_info * adv)1516 static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv)
1517 {
1518 	u8 bid[3];
1519 	u8 ad[4 + 3];
1520 
1521 	/* Skip if NULL adv as instance 0x00 is used for general purpose
1522 	 * advertising so it cannot used for the likes of Broadcast Announcement
1523 	 * as it can be overwritten at any point.
1524 	 */
1525 	if (!adv)
1526 		return 0;
1527 
1528 	/* Check if PA data doesn't contains a Basic Audio Announcement then
1529 	 * there is nothing to do.
1530 	 */
1531 	if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len,
1532 				  0x1851, NULL))
1533 		return 0;
1534 
1535 	/* Check if advertising data already has a Broadcast Announcement since
1536 	 * the process may want to control the Broadcast ID directly and in that
1537 	 * case the kernel shall no interfere.
1538 	 */
1539 	if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852,
1540 				 NULL))
1541 		return 0;
1542 
1543 	/* Generate Broadcast ID */
1544 	get_random_bytes(bid, sizeof(bid));
1545 	eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid));
1546 	hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL);
1547 
1548 	return hci_update_adv_data_sync(hdev, adv->instance);
1549 }
1550 
hci_start_per_adv_sync(struct hci_dev * hdev,u8 instance,u8 data_len,u8 * data,u32 flags,u16 min_interval,u16 max_interval,u16 sync_interval)1551 int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,
1552 			   u8 *data, u32 flags, u16 min_interval,
1553 			   u16 max_interval, u16 sync_interval)
1554 {
1555 	struct adv_info *adv = NULL;
1556 	int err;
1557 	bool added = false;
1558 
1559 	hci_disable_per_advertising_sync(hdev, instance);
1560 
1561 	if (instance) {
1562 		adv = hci_find_adv_instance(hdev, instance);
1563 		/* Create an instance if that could not be found */
1564 		if (!adv) {
1565 			adv = hci_add_per_instance(hdev, instance, flags,
1566 						   data_len, data,
1567 						   sync_interval,
1568 						   sync_interval);
1569 			if (IS_ERR(adv))
1570 				return PTR_ERR(adv);
1571 			adv->pending = false;
1572 			added = true;
1573 		}
1574 	}
1575 
1576 	/* Start advertising */
1577 	err = hci_start_ext_adv_sync(hdev, instance);
1578 	if (err < 0)
1579 		goto fail;
1580 
1581 	err = hci_adv_bcast_annoucement(hdev, adv);
1582 	if (err < 0)
1583 		goto fail;
1584 
1585 	err = hci_set_per_adv_params_sync(hdev, instance, min_interval,
1586 					  max_interval);
1587 	if (err < 0)
1588 		goto fail;
1589 
1590 	err = hci_set_per_adv_data_sync(hdev, instance);
1591 	if (err < 0)
1592 		goto fail;
1593 
1594 	err = hci_enable_per_advertising_sync(hdev, instance);
1595 	if (err < 0)
1596 		goto fail;
1597 
1598 	return 0;
1599 
1600 fail:
1601 	if (added)
1602 		hci_remove_adv_instance(hdev, instance);
1603 
1604 	return err;
1605 }
1606 
hci_start_adv_sync(struct hci_dev * hdev,u8 instance)1607 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance)
1608 {
1609 	int err;
1610 
1611 	if (ext_adv_capable(hdev))
1612 		return hci_start_ext_adv_sync(hdev, instance);
1613 
1614 	err = hci_update_adv_data_sync(hdev, instance);
1615 	if (err)
1616 		return err;
1617 
1618 	err = hci_update_scan_rsp_data_sync(hdev, instance);
1619 	if (err)
1620 		return err;
1621 
1622 	return hci_enable_advertising_sync(hdev);
1623 }
1624 
hci_enable_advertising_sync(struct hci_dev * hdev)1625 int hci_enable_advertising_sync(struct hci_dev *hdev)
1626 {
1627 	struct adv_info *adv_instance;
1628 	struct hci_cp_le_set_adv_param cp;
1629 	u8 own_addr_type, enable = 0x01;
1630 	bool connectable;
1631 	u16 adv_min_interval, adv_max_interval;
1632 	u32 flags;
1633 	u8 status;
1634 
1635 	if (ext_adv_capable(hdev))
1636 		return hci_enable_ext_advertising_sync(hdev,
1637 						       hdev->cur_adv_instance);
1638 
1639 	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance);
1640 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
1641 
1642 	/* If the "connectable" instance flag was not set, then choose between
1643 	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1644 	 */
1645 	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1646 		      mgmt_get_connectable(hdev);
1647 
1648 	if (!is_advertising_allowed(hdev, connectable))
1649 		return -EINVAL;
1650 
1651 	status = hci_disable_advertising_sync(hdev);
1652 	if (status)
1653 		return status;
1654 
1655 	/* Clear the HCI_LE_ADV bit temporarily so that the
1656 	 * hci_update_random_address knows that it's safe to go ahead
1657 	 * and write a new random address. The flag will be set back on
1658 	 * as soon as the SET_ADV_ENABLE HCI command completes.
1659 	 */
1660 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
1661 
1662 	/* Set require_privacy to true only when non-connectable
1663 	 * advertising is used. In that case it is fine to use a
1664 	 * non-resolvable private address.
1665 	 */
1666 	status = hci_update_random_address_sync(hdev, !connectable,
1667 						adv_use_rpa(hdev, flags),
1668 						&own_addr_type);
1669 	if (status)
1670 		return status;
1671 
1672 	memset(&cp, 0, sizeof(cp));
1673 
1674 	if (adv_instance) {
1675 		adv_min_interval = adv_instance->min_interval;
1676 		adv_max_interval = adv_instance->max_interval;
1677 	} else {
1678 		adv_min_interval = hdev->le_adv_min_interval;
1679 		adv_max_interval = hdev->le_adv_max_interval;
1680 	}
1681 
1682 	if (connectable) {
1683 		cp.type = LE_ADV_IND;
1684 	} else {
1685 		if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance))
1686 			cp.type = LE_ADV_SCAN_IND;
1687 		else
1688 			cp.type = LE_ADV_NONCONN_IND;
1689 
1690 		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) ||
1691 		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1692 			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN;
1693 			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX;
1694 		}
1695 	}
1696 
1697 	cp.min_interval = cpu_to_le16(adv_min_interval);
1698 	cp.max_interval = cpu_to_le16(adv_max_interval);
1699 	cp.own_address_type = own_addr_type;
1700 	cp.channel_map = hdev->le_adv_channel_map;
1701 
1702 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
1703 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1704 	if (status)
1705 		return status;
1706 
1707 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
1708 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
1709 }
1710 
enable_advertising_sync(struct hci_dev * hdev,void * data)1711 static int enable_advertising_sync(struct hci_dev *hdev, void *data)
1712 {
1713 	return hci_enable_advertising_sync(hdev);
1714 }
1715 
hci_enable_advertising(struct hci_dev * hdev)1716 int hci_enable_advertising(struct hci_dev *hdev)
1717 {
1718 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1719 	    list_empty(&hdev->adv_instances))
1720 		return 0;
1721 
1722 	return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL);
1723 }
1724 
hci_remove_ext_adv_instance_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)1725 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1726 				     struct sock *sk)
1727 {
1728 	int err;
1729 
1730 	if (!ext_adv_capable(hdev))
1731 		return 0;
1732 
1733 	err = hci_disable_ext_adv_instance_sync(hdev, instance);
1734 	if (err)
1735 		return err;
1736 
1737 	/* If request specifies an instance that doesn't exist, fail */
1738 	if (instance > 0 && !hci_find_adv_instance(hdev, instance))
1739 		return -EINVAL;
1740 
1741 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET,
1742 					sizeof(instance), &instance, 0,
1743 					HCI_CMD_TIMEOUT, sk);
1744 }
1745 
remove_ext_adv_sync(struct hci_dev * hdev,void * data)1746 static int remove_ext_adv_sync(struct hci_dev *hdev, void *data)
1747 {
1748 	struct adv_info *adv = data;
1749 	u8 instance = 0;
1750 
1751 	if (adv)
1752 		instance = adv->instance;
1753 
1754 	return hci_remove_ext_adv_instance_sync(hdev, instance, NULL);
1755 }
1756 
hci_remove_ext_adv_instance(struct hci_dev * hdev,u8 instance)1757 int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance)
1758 {
1759 	struct adv_info *adv = NULL;
1760 
1761 	if (instance) {
1762 		adv = hci_find_adv_instance(hdev, instance);
1763 		if (!adv)
1764 			return -EINVAL;
1765 	}
1766 
1767 	return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL);
1768 }
1769 
hci_le_terminate_big_sync(struct hci_dev * hdev,u8 handle,u8 reason)1770 int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)
1771 {
1772 	struct hci_cp_le_term_big cp;
1773 
1774 	memset(&cp, 0, sizeof(cp));
1775 	cp.handle = handle;
1776 	cp.reason = reason;
1777 
1778 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG,
1779 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1780 }
1781 
hci_set_ext_adv_data_sync(struct hci_dev * hdev,u8 instance)1782 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)
1783 {
1784 	DEFINE_FLEX(struct hci_cp_le_set_ext_adv_data, pdu, data, length,
1785 		    HCI_MAX_EXT_AD_LENGTH);
1786 	u8 len;
1787 	struct adv_info *adv = NULL;
1788 	int err;
1789 
1790 	if (instance) {
1791 		adv = hci_find_adv_instance(hdev, instance);
1792 		if (!adv || !adv->adv_data_changed)
1793 			return 0;
1794 	}
1795 
1796 	len = eir_create_adv_data(hdev, instance, pdu->data);
1797 
1798 	pdu->length = len;
1799 	pdu->handle = adv ? adv->handle : instance;
1800 	pdu->operation = LE_SET_ADV_DATA_OP_COMPLETE;
1801 	pdu->frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1802 
1803 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA,
1804 				    struct_size(pdu, data, len), pdu,
1805 				    HCI_CMD_TIMEOUT);
1806 	if (err)
1807 		return err;
1808 
1809 	/* Update data if the command succeed */
1810 	if (adv) {
1811 		adv->adv_data_changed = false;
1812 	} else {
1813 		memcpy(hdev->adv_data, pdu->data, len);
1814 		hdev->adv_data_len = len;
1815 	}
1816 
1817 	return 0;
1818 }
1819 
hci_set_adv_data_sync(struct hci_dev * hdev,u8 instance)1820 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance)
1821 {
1822 	struct hci_cp_le_set_adv_data cp;
1823 	u8 len;
1824 
1825 	memset(&cp, 0, sizeof(cp));
1826 
1827 	len = eir_create_adv_data(hdev, instance, cp.data);
1828 
1829 	/* There's nothing to do if the data hasn't changed */
1830 	if (hdev->adv_data_len == len &&
1831 	    memcmp(cp.data, hdev->adv_data, len) == 0)
1832 		return 0;
1833 
1834 	memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1835 	hdev->adv_data_len = len;
1836 
1837 	cp.length = len;
1838 
1839 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA,
1840 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
1841 }
1842 
hci_update_adv_data_sync(struct hci_dev * hdev,u8 instance)1843 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance)
1844 {
1845 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1846 		return 0;
1847 
1848 	if (ext_adv_capable(hdev))
1849 		return hci_set_ext_adv_data_sync(hdev, instance);
1850 
1851 	return hci_set_adv_data_sync(hdev, instance);
1852 }
1853 
hci_schedule_adv_instance_sync(struct hci_dev * hdev,u8 instance,bool force)1854 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,
1855 				   bool force)
1856 {
1857 	struct adv_info *adv = NULL;
1858 	u16 timeout;
1859 
1860 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev))
1861 		return -EPERM;
1862 
1863 	if (hdev->adv_instance_timeout)
1864 		return -EBUSY;
1865 
1866 	adv = hci_find_adv_instance(hdev, instance);
1867 	if (!adv)
1868 		return -ENOENT;
1869 
1870 	/* A zero timeout means unlimited advertising. As long as there is
1871 	 * only one instance, duration should be ignored. We still set a timeout
1872 	 * in case further instances are being added later on.
1873 	 *
1874 	 * If the remaining lifetime of the instance is more than the duration
1875 	 * then the timeout corresponds to the duration, otherwise it will be
1876 	 * reduced to the remaining instance lifetime.
1877 	 */
1878 	if (adv->timeout == 0 || adv->duration <= adv->remaining_time)
1879 		timeout = adv->duration;
1880 	else
1881 		timeout = adv->remaining_time;
1882 
1883 	/* The remaining time is being reduced unless the instance is being
1884 	 * advertised without time limit.
1885 	 */
1886 	if (adv->timeout)
1887 		adv->remaining_time = adv->remaining_time - timeout;
1888 
1889 	/* Only use work for scheduling instances with legacy advertising */
1890 	if (!ext_adv_capable(hdev)) {
1891 		hdev->adv_instance_timeout = timeout;
1892 		queue_delayed_work(hdev->req_workqueue,
1893 				   &hdev->adv_instance_expire,
1894 				   msecs_to_jiffies(timeout * 1000));
1895 	}
1896 
1897 	/* If we're just re-scheduling the same instance again then do not
1898 	 * execute any HCI commands. This happens when a single instance is
1899 	 * being advertised.
1900 	 */
1901 	if (!force && hdev->cur_adv_instance == instance &&
1902 	    hci_dev_test_flag(hdev, HCI_LE_ADV))
1903 		return 0;
1904 
1905 	hdev->cur_adv_instance = instance;
1906 
1907 	return hci_start_adv_sync(hdev, instance);
1908 }
1909 
hci_clear_adv_sets_sync(struct hci_dev * hdev,struct sock * sk)1910 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)
1911 {
1912 	int err;
1913 
1914 	if (!ext_adv_capable(hdev))
1915 		return 0;
1916 
1917 	/* Disable instance 0x00 to disable all instances */
1918 	err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
1919 	if (err)
1920 		return err;
1921 
1922 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS,
1923 					0, NULL, 0, HCI_CMD_TIMEOUT, sk);
1924 }
1925 
hci_clear_adv_sync(struct hci_dev * hdev,struct sock * sk,bool force)1926 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)
1927 {
1928 	struct adv_info *adv, *n;
1929 	int err = 0;
1930 
1931 	if (ext_adv_capable(hdev))
1932 		/* Remove all existing sets */
1933 		err = hci_clear_adv_sets_sync(hdev, sk);
1934 	if (ext_adv_capable(hdev))
1935 		return err;
1936 
1937 	/* This is safe as long as there is no command send while the lock is
1938 	 * held.
1939 	 */
1940 	hci_dev_lock(hdev);
1941 
1942 	/* Cleanup non-ext instances */
1943 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
1944 		u8 instance = adv->instance;
1945 		int err;
1946 
1947 		if (!(force || adv->timeout))
1948 			continue;
1949 
1950 		err = hci_remove_adv_instance(hdev, instance);
1951 		if (!err)
1952 			mgmt_advertising_removed(sk, hdev, instance);
1953 	}
1954 
1955 	hci_dev_unlock(hdev);
1956 
1957 	return 0;
1958 }
1959 
hci_remove_adv_sync(struct hci_dev * hdev,u8 instance,struct sock * sk)1960 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,
1961 			       struct sock *sk)
1962 {
1963 	int err = 0;
1964 
1965 	/* If we use extended advertising, instance has to be removed first. */
1966 	if (ext_adv_capable(hdev))
1967 		err = hci_remove_ext_adv_instance_sync(hdev, instance, sk);
1968 	if (ext_adv_capable(hdev))
1969 		return err;
1970 
1971 	/* This is safe as long as there is no command send while the lock is
1972 	 * held.
1973 	 */
1974 	hci_dev_lock(hdev);
1975 
1976 	err = hci_remove_adv_instance(hdev, instance);
1977 	if (!err)
1978 		mgmt_advertising_removed(sk, hdev, instance);
1979 
1980 	hci_dev_unlock(hdev);
1981 
1982 	return err;
1983 }
1984 
1985 /* For a single instance:
1986  * - force == true: The instance will be removed even when its remaining
1987  *   lifetime is not zero.
1988  * - force == false: the instance will be deactivated but kept stored unless
1989  *   the remaining lifetime is zero.
1990  *
1991  * For instance == 0x00:
1992  * - force == true: All instances will be removed regardless of their timeout
1993  *   setting.
1994  * - force == false: Only instances that have a timeout will be removed.
1995  */
hci_remove_advertising_sync(struct hci_dev * hdev,struct sock * sk,u8 instance,bool force)1996 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,
1997 				u8 instance, bool force)
1998 {
1999 	struct adv_info *next = NULL;
2000 	int err;
2001 
2002 	/* Cancel any timeout concerning the removed instance(s). */
2003 	if (!instance || hdev->cur_adv_instance == instance)
2004 		cancel_adv_timeout(hdev);
2005 
2006 	/* Get the next instance to advertise BEFORE we remove
2007 	 * the current one. This can be the same instance again
2008 	 * if there is only one instance.
2009 	 */
2010 	if (hdev->cur_adv_instance == instance)
2011 		next = hci_get_next_instance(hdev, instance);
2012 
2013 	if (!instance) {
2014 		err = hci_clear_adv_sync(hdev, sk, force);
2015 		if (err)
2016 			return err;
2017 	} else {
2018 		struct adv_info *adv = hci_find_adv_instance(hdev, instance);
2019 
2020 		if (force || (adv && adv->timeout && !adv->remaining_time)) {
2021 			/* Don't advertise a removed instance. */
2022 			if (next && next->instance == instance)
2023 				next = NULL;
2024 
2025 			err = hci_remove_adv_sync(hdev, instance, sk);
2026 			if (err)
2027 				return err;
2028 		}
2029 	}
2030 
2031 	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING))
2032 		return 0;
2033 
2034 	if (next && !ext_adv_capable(hdev))
2035 		hci_schedule_adv_instance_sync(hdev, next->instance, false);
2036 
2037 	return 0;
2038 }
2039 
hci_read_rssi_sync(struct hci_dev * hdev,__le16 handle)2040 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle)
2041 {
2042 	struct hci_cp_read_rssi cp;
2043 
2044 	cp.handle = handle;
2045 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI,
2046 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2047 }
2048 
hci_read_clock_sync(struct hci_dev * hdev,struct hci_cp_read_clock * cp)2049 int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp)
2050 {
2051 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK,
2052 					sizeof(*cp), cp, HCI_CMD_TIMEOUT);
2053 }
2054 
hci_read_tx_power_sync(struct hci_dev * hdev,__le16 handle,u8 type)2055 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)
2056 {
2057 	struct hci_cp_read_tx_power cp;
2058 
2059 	cp.handle = handle;
2060 	cp.type = type;
2061 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER,
2062 					sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2063 }
2064 
hci_disable_advertising_sync(struct hci_dev * hdev)2065 int hci_disable_advertising_sync(struct hci_dev *hdev)
2066 {
2067 	u8 enable = 0x00;
2068 	int err = 0;
2069 
2070 	/* If controller is not advertising we are done. */
2071 	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))
2072 		return 0;
2073 
2074 	if (ext_adv_capable(hdev))
2075 		err = hci_disable_ext_adv_instance_sync(hdev, 0x00);
2076 	if (ext_adv_capable(hdev))
2077 		return err;
2078 
2079 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
2080 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
2081 }
2082 
hci_le_set_ext_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2083 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,
2084 					   u8 filter_dup)
2085 {
2086 	struct hci_cp_le_set_ext_scan_enable cp;
2087 
2088 	memset(&cp, 0, sizeof(cp));
2089 	cp.enable = val;
2090 
2091 	if (hci_dev_test_flag(hdev, HCI_MESH))
2092 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2093 	else
2094 		cp.filter_dup = filter_dup;
2095 
2096 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2097 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2098 }
2099 
hci_le_set_scan_enable_sync(struct hci_dev * hdev,u8 val,u8 filter_dup)2100 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,
2101 				       u8 filter_dup)
2102 {
2103 	struct hci_cp_le_set_scan_enable cp;
2104 
2105 	if (use_ext_scan(hdev))
2106 		return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup);
2107 
2108 	memset(&cp, 0, sizeof(cp));
2109 	cp.enable = val;
2110 
2111 	if (val && hci_dev_test_flag(hdev, HCI_MESH))
2112 		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
2113 	else
2114 		cp.filter_dup = filter_dup;
2115 
2116 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,
2117 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2118 }
2119 
hci_le_set_addr_resolution_enable_sync(struct hci_dev * hdev,u8 val)2120 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val)
2121 {
2122 	if (!use_ll_privacy(hdev))
2123 		return 0;
2124 
2125 	/* If controller is not/already resolving we are done. */
2126 	if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2127 		return 0;
2128 
2129 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE,
2130 				     sizeof(val), &val, HCI_CMD_TIMEOUT);
2131 }
2132 
hci_scan_disable_sync(struct hci_dev * hdev)2133 static int hci_scan_disable_sync(struct hci_dev *hdev)
2134 {
2135 	int err;
2136 
2137 	/* If controller is not scanning we are done. */
2138 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2139 		return 0;
2140 
2141 	if (hdev->scanning_paused) {
2142 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2143 		return 0;
2144 	}
2145 
2146 	err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00);
2147 	if (err) {
2148 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
2149 		return err;
2150 	}
2151 
2152 	return err;
2153 }
2154 
scan_use_rpa(struct hci_dev * hdev)2155 static bool scan_use_rpa(struct hci_dev *hdev)
2156 {
2157 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
2158 }
2159 
hci_start_interleave_scan(struct hci_dev * hdev)2160 static void hci_start_interleave_scan(struct hci_dev *hdev)
2161 {
2162 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER;
2163 	queue_delayed_work(hdev->req_workqueue,
2164 			   &hdev->interleave_scan, 0);
2165 }
2166 
cancel_interleave_scan(struct hci_dev * hdev)2167 static void cancel_interleave_scan(struct hci_dev *hdev)
2168 {
2169 	bt_dev_dbg(hdev, "cancelling interleave scan");
2170 
2171 	cancel_delayed_work_sync(&hdev->interleave_scan);
2172 
2173 	hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE;
2174 }
2175 
2176 /* Return true if interleave_scan wasn't started until exiting this function,
2177  * otherwise, return false
2178  */
hci_update_interleaved_scan_sync(struct hci_dev * hdev)2179 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev)
2180 {
2181 	/* Do interleaved scan only if all of the following are true:
2182 	 * - There is at least one ADV monitor
2183 	 * - At least one pending LE connection or one device to be scanned for
2184 	 * - Monitor offloading is not supported
2185 	 * If so, we should alternate between allowlist scan and one without
2186 	 * any filters to save power.
2187 	 */
2188 	bool use_interleaving = hci_is_adv_monitoring(hdev) &&
2189 				!(list_empty(&hdev->pend_le_conns) &&
2190 				  list_empty(&hdev->pend_le_reports)) &&
2191 				hci_get_adv_monitor_offload_ext(hdev) ==
2192 				    HCI_ADV_MONITOR_EXT_NONE;
2193 	bool is_interleaving = is_interleave_scanning(hdev);
2194 
2195 	if (use_interleaving && !is_interleaving) {
2196 		hci_start_interleave_scan(hdev);
2197 		bt_dev_dbg(hdev, "starting interleave scan");
2198 		return true;
2199 	}
2200 
2201 	if (!use_interleaving && is_interleaving)
2202 		cancel_interleave_scan(hdev);
2203 
2204 	return false;
2205 }
2206 
2207 /* Removes connection to resolve list if needed.*/
hci_le_del_resolve_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2208 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev,
2209 					bdaddr_t *bdaddr, u8 bdaddr_type)
2210 {
2211 	struct hci_cp_le_del_from_resolv_list cp;
2212 	struct bdaddr_list_with_irk *entry;
2213 
2214 	if (!use_ll_privacy(hdev))
2215 		return 0;
2216 
2217 	/* Check if the IRK has been programmed */
2218 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr,
2219 						bdaddr_type);
2220 	if (!entry)
2221 		return 0;
2222 
2223 	cp.bdaddr_type = bdaddr_type;
2224 	bacpy(&cp.bdaddr, bdaddr);
2225 
2226 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST,
2227 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2228 }
2229 
hci_le_del_accept_list_sync(struct hci_dev * hdev,bdaddr_t * bdaddr,u8 bdaddr_type)2230 static int hci_le_del_accept_list_sync(struct hci_dev *hdev,
2231 				       bdaddr_t *bdaddr, u8 bdaddr_type)
2232 {
2233 	struct hci_cp_le_del_from_accept_list cp;
2234 	int err;
2235 
2236 	/* Check if device is on accept list before removing it */
2237 	if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type))
2238 		return 0;
2239 
2240 	cp.bdaddr_type = bdaddr_type;
2241 	bacpy(&cp.bdaddr, bdaddr);
2242 
2243 	/* Ignore errors when removing from resolving list as that is likely
2244 	 * that the device was never added.
2245 	 */
2246 	hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2247 
2248 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST,
2249 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2250 	if (err) {
2251 		bt_dev_err(hdev, "Unable to remove from allow list: %d", err);
2252 		return err;
2253 	}
2254 
2255 	bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr,
2256 		   cp.bdaddr_type);
2257 
2258 	return 0;
2259 }
2260 
2261 struct conn_params {
2262 	bdaddr_t addr;
2263 	u8 addr_type;
2264 	hci_conn_flags_t flags;
2265 	u8 privacy_mode;
2266 };
2267 
2268 /* Adds connection to resolve list if needed.
2269  * Setting params to NULL programs local hdev->irk
2270  */
hci_le_add_resolve_list_sync(struct hci_dev * hdev,struct conn_params * params)2271 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev,
2272 					struct conn_params *params)
2273 {
2274 	struct hci_cp_le_add_to_resolv_list cp;
2275 	struct smp_irk *irk;
2276 	struct bdaddr_list_with_irk *entry;
2277 	struct hci_conn_params *p;
2278 
2279 	if (!use_ll_privacy(hdev))
2280 		return 0;
2281 
2282 	/* Attempt to program local identity address, type and irk if params is
2283 	 * NULL.
2284 	 */
2285 	if (!params) {
2286 		if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
2287 			return 0;
2288 
2289 		hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type);
2290 		memcpy(cp.peer_irk, hdev->irk, 16);
2291 		goto done;
2292 	}
2293 
2294 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2295 	if (!irk)
2296 		return 0;
2297 
2298 	/* Check if the IK has _not_ been programmed yet. */
2299 	entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list,
2300 						&params->addr,
2301 						params->addr_type);
2302 	if (entry)
2303 		return 0;
2304 
2305 	cp.bdaddr_type = params->addr_type;
2306 	bacpy(&cp.bdaddr, &params->addr);
2307 	memcpy(cp.peer_irk, irk->val, 16);
2308 
2309 	/* Default privacy mode is always Network */
2310 	params->privacy_mode = HCI_NETWORK_PRIVACY;
2311 
2312 	rcu_read_lock();
2313 	p = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2314 				      &params->addr, params->addr_type);
2315 	if (!p)
2316 		p = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2317 					      &params->addr, params->addr_type);
2318 	if (p)
2319 		WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY);
2320 	rcu_read_unlock();
2321 
2322 done:
2323 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
2324 		memcpy(cp.local_irk, hdev->irk, 16);
2325 	else
2326 		memset(cp.local_irk, 0, 16);
2327 
2328 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST,
2329 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2330 }
2331 
2332 /* Set Device Privacy Mode. */
hci_le_set_privacy_mode_sync(struct hci_dev * hdev,struct conn_params * params)2333 static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev,
2334 					struct conn_params *params)
2335 {
2336 	struct hci_cp_le_set_privacy_mode cp;
2337 	struct smp_irk *irk;
2338 
2339 	/* If device privacy mode has already been set there is nothing to do */
2340 	if (params->privacy_mode == HCI_DEVICE_PRIVACY)
2341 		return 0;
2342 
2343 	/* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also
2344 	 * indicates that LL Privacy has been enabled and
2345 	 * HCI_OP_LE_SET_PRIVACY_MODE is supported.
2346 	 */
2347 	if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY))
2348 		return 0;
2349 
2350 	irk = hci_find_irk_by_addr(hdev, &params->addr, params->addr_type);
2351 	if (!irk)
2352 		return 0;
2353 
2354 	memset(&cp, 0, sizeof(cp));
2355 	cp.bdaddr_type = irk->addr_type;
2356 	bacpy(&cp.bdaddr, &irk->bdaddr);
2357 	cp.mode = HCI_DEVICE_PRIVACY;
2358 
2359 	/* Note: params->privacy_mode is not updated since it is a copy */
2360 
2361 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE,
2362 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2363 }
2364 
2365 /* Adds connection to allow list if needed, if the device uses RPA (has IRK)
2366  * this attempts to program the device in the resolving list as well and
2367  * properly set the privacy mode.
2368  */
hci_le_add_accept_list_sync(struct hci_dev * hdev,struct conn_params * params,u8 * num_entries)2369 static int hci_le_add_accept_list_sync(struct hci_dev *hdev,
2370 				       struct conn_params *params,
2371 				       u8 *num_entries)
2372 {
2373 	struct hci_cp_le_add_to_accept_list cp;
2374 	int err;
2375 
2376 	/* During suspend, only wakeable devices can be in acceptlist */
2377 	if (hdev->suspended &&
2378 	    !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) {
2379 		hci_le_del_accept_list_sync(hdev, &params->addr,
2380 					    params->addr_type);
2381 		return 0;
2382 	}
2383 
2384 	/* Select filter policy to accept all advertising */
2385 	if (*num_entries >= hdev->le_accept_list_size)
2386 		return -ENOSPC;
2387 
2388 	/* Accept list can not be used with RPAs */
2389 	if (!use_ll_privacy(hdev) &&
2390 	    hci_find_irk_by_addr(hdev, &params->addr, params->addr_type))
2391 		return -EINVAL;
2392 
2393 	/* Attempt to program the device in the resolving list first to avoid
2394 	 * having to rollback in case it fails since the resolving list is
2395 	 * dynamic it can probably be smaller than the accept list.
2396 	 */
2397 	err = hci_le_add_resolve_list_sync(hdev, params);
2398 	if (err) {
2399 		bt_dev_err(hdev, "Unable to add to resolve list: %d", err);
2400 		return err;
2401 	}
2402 
2403 	/* Set Privacy Mode */
2404 	err = hci_le_set_privacy_mode_sync(hdev, params);
2405 	if (err) {
2406 		bt_dev_err(hdev, "Unable to set privacy mode: %d", err);
2407 		return err;
2408 	}
2409 
2410 	/* Check if already in accept list */
2411 	if (hci_bdaddr_list_lookup(&hdev->le_accept_list, &params->addr,
2412 				   params->addr_type))
2413 		return 0;
2414 
2415 	*num_entries += 1;
2416 	cp.bdaddr_type = params->addr_type;
2417 	bacpy(&cp.bdaddr, &params->addr);
2418 
2419 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST,
2420 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2421 	if (err) {
2422 		bt_dev_err(hdev, "Unable to add to allow list: %d", err);
2423 		/* Rollback the device from the resolving list */
2424 		hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type);
2425 		return err;
2426 	}
2427 
2428 	bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr,
2429 		   cp.bdaddr_type);
2430 
2431 	return 0;
2432 }
2433 
2434 /* This function disables/pause all advertising instances */
hci_pause_advertising_sync(struct hci_dev * hdev)2435 static int hci_pause_advertising_sync(struct hci_dev *hdev)
2436 {
2437 	int err;
2438 	int old_state;
2439 
2440 	/* If already been paused there is nothing to do. */
2441 	if (hdev->advertising_paused)
2442 		return 0;
2443 
2444 	bt_dev_dbg(hdev, "Pausing directed advertising");
2445 
2446 	/* Stop directed advertising */
2447 	old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
2448 	if (old_state) {
2449 		/* When discoverable timeout triggers, then just make sure
2450 		 * the limited discoverable flag is cleared. Even in the case
2451 		 * of a timeout triggered from general discoverable, it is
2452 		 * safe to unconditionally clear the flag.
2453 		 */
2454 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2455 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2456 		hdev->discov_timeout = 0;
2457 	}
2458 
2459 	bt_dev_dbg(hdev, "Pausing advertising instances");
2460 
2461 	/* Call to disable any advertisements active on the controller.
2462 	 * This will succeed even if no advertisements are configured.
2463 	 */
2464 	err = hci_disable_advertising_sync(hdev);
2465 	if (err)
2466 		return err;
2467 
2468 	/* If we are using software rotation, pause the loop */
2469 	if (!ext_adv_capable(hdev))
2470 		cancel_adv_timeout(hdev);
2471 
2472 	hdev->advertising_paused = true;
2473 	hdev->advertising_old_state = old_state;
2474 
2475 	return 0;
2476 }
2477 
2478 /* This function enables all user advertising instances */
hci_resume_advertising_sync(struct hci_dev * hdev)2479 static int hci_resume_advertising_sync(struct hci_dev *hdev)
2480 {
2481 	struct adv_info *adv, *tmp;
2482 	int err;
2483 
2484 	/* If advertising has not been paused there is nothing  to do. */
2485 	if (!hdev->advertising_paused)
2486 		return 0;
2487 
2488 	/* Resume directed advertising */
2489 	hdev->advertising_paused = false;
2490 	if (hdev->advertising_old_state) {
2491 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
2492 		hdev->advertising_old_state = 0;
2493 	}
2494 
2495 	bt_dev_dbg(hdev, "Resuming advertising instances");
2496 
2497 	if (ext_adv_capable(hdev)) {
2498 		/* Call for each tracked instance to be re-enabled */
2499 		list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) {
2500 			err = hci_enable_ext_advertising_sync(hdev,
2501 							      adv->instance);
2502 			if (!err)
2503 				continue;
2504 
2505 			/* If the instance cannot be resumed remove it */
2506 			hci_remove_ext_adv_instance_sync(hdev, adv->instance,
2507 							 NULL);
2508 		}
2509 	} else {
2510 		/* Schedule for most recent instance to be restarted and begin
2511 		 * the software rotation loop
2512 		 */
2513 		err = hci_schedule_adv_instance_sync(hdev,
2514 						     hdev->cur_adv_instance,
2515 						     true);
2516 	}
2517 
2518 	hdev->advertising_paused = false;
2519 
2520 	return err;
2521 }
2522 
hci_pause_addr_resolution(struct hci_dev * hdev)2523 static int hci_pause_addr_resolution(struct hci_dev *hdev)
2524 {
2525 	int err;
2526 
2527 	if (!use_ll_privacy(hdev))
2528 		return 0;
2529 
2530 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION))
2531 		return 0;
2532 
2533 	/* Cannot disable addr resolution if scanning is enabled or
2534 	 * when initiating an LE connection.
2535 	 */
2536 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2537 	    hci_lookup_le_connect(hdev)) {
2538 		bt_dev_err(hdev, "Command not allowed when scan/LE connect");
2539 		return -EPERM;
2540 	}
2541 
2542 	/* Cannot disable addr resolution if advertising is enabled. */
2543 	err = hci_pause_advertising_sync(hdev);
2544 	if (err) {
2545 		bt_dev_err(hdev, "Pause advertising failed: %d", err);
2546 		return err;
2547 	}
2548 
2549 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2550 	if (err)
2551 		bt_dev_err(hdev, "Unable to disable Address Resolution: %d",
2552 			   err);
2553 
2554 	/* Return if address resolution is disabled and RPA is not used. */
2555 	if (!err && scan_use_rpa(hdev))
2556 		return 0;
2557 
2558 	hci_resume_advertising_sync(hdev);
2559 	return err;
2560 }
2561 
hci_read_local_oob_data_sync(struct hci_dev * hdev,bool extended,struct sock * sk)2562 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev,
2563 					     bool extended, struct sock *sk)
2564 {
2565 	u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA :
2566 					HCI_OP_READ_LOCAL_OOB_DATA;
2567 
2568 	return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk);
2569 }
2570 
conn_params_copy(struct list_head * list,size_t * n)2571 static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)
2572 {
2573 	struct hci_conn_params *params;
2574 	struct conn_params *p;
2575 	size_t i;
2576 
2577 	rcu_read_lock();
2578 
2579 	i = 0;
2580 	list_for_each_entry_rcu(params, list, action)
2581 		++i;
2582 	*n = i;
2583 
2584 	rcu_read_unlock();
2585 
2586 	p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL);
2587 	if (!p)
2588 		return NULL;
2589 
2590 	rcu_read_lock();
2591 
2592 	i = 0;
2593 	list_for_each_entry_rcu(params, list, action) {
2594 		/* Racing adds are handled in next scan update */
2595 		if (i >= *n)
2596 			break;
2597 
2598 		/* No hdev->lock, but: addr, addr_type are immutable.
2599 		 * privacy_mode is only written by us or in
2600 		 * hci_cc_le_set_privacy_mode that we wait for.
2601 		 * We should be idempotent so MGMT updating flags
2602 		 * while we are processing is OK.
2603 		 */
2604 		bacpy(&p[i].addr, &params->addr);
2605 		p[i].addr_type = params->addr_type;
2606 		p[i].flags = READ_ONCE(params->flags);
2607 		p[i].privacy_mode = READ_ONCE(params->privacy_mode);
2608 		++i;
2609 	}
2610 
2611 	rcu_read_unlock();
2612 
2613 	*n = i;
2614 	return p;
2615 }
2616 
2617 /* Clear LE Accept List */
hci_le_clear_accept_list_sync(struct hci_dev * hdev)2618 static int hci_le_clear_accept_list_sync(struct hci_dev *hdev)
2619 {
2620 	if (!(hdev->commands[26] & 0x80))
2621 		return 0;
2622 
2623 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL,
2624 				     HCI_CMD_TIMEOUT);
2625 }
2626 
2627 /* Device must not be scanning when updating the accept list.
2628  *
2629  * Update is done using the following sequence:
2630  *
2631  * use_ll_privacy((Disable Advertising) -> Disable Resolving List) ->
2632  * Remove Devices From Accept List ->
2633  * (has IRK && use_ll_privacy(Remove Devices From Resolving List))->
2634  * Add Devices to Accept List ->
2635  * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) ->
2636  * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) ->
2637  * Enable Scanning
2638  *
2639  * In case of failure advertising shall be restored to its original state and
2640  * return would disable accept list since either accept or resolving list could
2641  * not be programmed.
2642  *
2643  */
hci_update_accept_list_sync(struct hci_dev * hdev)2644 static u8 hci_update_accept_list_sync(struct hci_dev *hdev)
2645 {
2646 	struct conn_params *params;
2647 	struct bdaddr_list *b, *t;
2648 	u8 num_entries = 0;
2649 	bool pend_conn, pend_report;
2650 	u8 filter_policy;
2651 	size_t i, n;
2652 	int err;
2653 
2654 	/* Pause advertising if resolving list can be used as controllers
2655 	 * cannot accept resolving list modifications while advertising.
2656 	 */
2657 	if (use_ll_privacy(hdev)) {
2658 		err = hci_pause_advertising_sync(hdev);
2659 		if (err) {
2660 			bt_dev_err(hdev, "pause advertising failed: %d", err);
2661 			return 0x00;
2662 		}
2663 	}
2664 
2665 	/* Disable address resolution while reprogramming accept list since
2666 	 * devices that do have an IRK will be programmed in the resolving list
2667 	 * when LL Privacy is enabled.
2668 	 */
2669 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00);
2670 	if (err) {
2671 		bt_dev_err(hdev, "Unable to disable LL privacy: %d", err);
2672 		goto done;
2673 	}
2674 
2675 	/* Force address filtering if PA Sync is in progress */
2676 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2677 		struct hci_cp_le_pa_create_sync *sent;
2678 
2679 		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC);
2680 		if (sent) {
2681 			struct conn_params pa;
2682 
2683 			memset(&pa, 0, sizeof(pa));
2684 
2685 			bacpy(&pa.addr, &sent->addr);
2686 			pa.addr_type = sent->addr_type;
2687 
2688 			/* Clear first since there could be addresses left
2689 			 * behind.
2690 			 */
2691 			hci_le_clear_accept_list_sync(hdev);
2692 
2693 			num_entries = 1;
2694 			err = hci_le_add_accept_list_sync(hdev, &pa,
2695 							  &num_entries);
2696 			goto done;
2697 		}
2698 	}
2699 
2700 	/* Go through the current accept list programmed into the
2701 	 * controller one by one and check if that address is connected or is
2702 	 * still in the list of pending connections or list of devices to
2703 	 * report. If not present in either list, then remove it from
2704 	 * the controller.
2705 	 */
2706 	list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) {
2707 		if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type))
2708 			continue;
2709 
2710 		/* Pointers not dereferenced, no locks needed */
2711 		pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
2712 						      &b->bdaddr,
2713 						      b->bdaddr_type);
2714 		pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
2715 							&b->bdaddr,
2716 							b->bdaddr_type);
2717 
2718 		/* If the device is not likely to connect or report,
2719 		 * remove it from the acceptlist.
2720 		 */
2721 		if (!pend_conn && !pend_report) {
2722 			hci_le_del_accept_list_sync(hdev, &b->bdaddr,
2723 						    b->bdaddr_type);
2724 			continue;
2725 		}
2726 
2727 		num_entries++;
2728 	}
2729 
2730 	/* Since all no longer valid accept list entries have been
2731 	 * removed, walk through the list of pending connections
2732 	 * and ensure that any new device gets programmed into
2733 	 * the controller.
2734 	 *
2735 	 * If the list of the devices is larger than the list of
2736 	 * available accept list entries in the controller, then
2737 	 * just abort and return filer policy value to not use the
2738 	 * accept list.
2739 	 *
2740 	 * The list and params may be mutated while we wait for events,
2741 	 * so make a copy and iterate it.
2742 	 */
2743 
2744 	params = conn_params_copy(&hdev->pend_le_conns, &n);
2745 	if (!params) {
2746 		err = -ENOMEM;
2747 		goto done;
2748 	}
2749 
2750 	for (i = 0; i < n; ++i) {
2751 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2752 						  &num_entries);
2753 		if (err) {
2754 			kvfree(params);
2755 			goto done;
2756 		}
2757 	}
2758 
2759 	kvfree(params);
2760 
2761 	/* After adding all new pending connections, walk through
2762 	 * the list of pending reports and also add these to the
2763 	 * accept list if there is still space. Abort if space runs out.
2764 	 */
2765 
2766 	params = conn_params_copy(&hdev->pend_le_reports, &n);
2767 	if (!params) {
2768 		err = -ENOMEM;
2769 		goto done;
2770 	}
2771 
2772 	for (i = 0; i < n; ++i) {
2773 		err = hci_le_add_accept_list_sync(hdev, &params[i],
2774 						  &num_entries);
2775 		if (err) {
2776 			kvfree(params);
2777 			goto done;
2778 		}
2779 	}
2780 
2781 	kvfree(params);
2782 
2783 	/* Use the allowlist unless the following conditions are all true:
2784 	 * - We are not currently suspending
2785 	 * - There are 1 or more ADV monitors registered and it's not offloaded
2786 	 * - Interleaved scanning is not currently using the allowlist
2787 	 */
2788 	if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended &&
2789 	    hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE &&
2790 	    hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST)
2791 		err = -EINVAL;
2792 
2793 done:
2794 	filter_policy = err ? 0x00 : 0x01;
2795 
2796 	/* Enable address resolution when LL Privacy is enabled. */
2797 	err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
2798 	if (err)
2799 		bt_dev_err(hdev, "Unable to enable LL privacy: %d", err);
2800 
2801 	/* Resume advertising if it was paused */
2802 	if (use_ll_privacy(hdev))
2803 		hci_resume_advertising_sync(hdev);
2804 
2805 	/* Select filter policy to use accept list */
2806 	return filter_policy;
2807 }
2808 
hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params * cp,u8 type,u16 interval,u16 window)2809 static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp,
2810 				   u8 type, u16 interval, u16 window)
2811 {
2812 	cp->type = type;
2813 	cp->interval = cpu_to_le16(interval);
2814 	cp->window = cpu_to_le16(window);
2815 }
2816 
hci_le_set_ext_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2817 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,
2818 					  u16 interval, u16 window,
2819 					  u8 own_addr_type, u8 filter_policy)
2820 {
2821 	struct hci_cp_le_set_ext_scan_params *cp;
2822 	struct hci_cp_le_scan_phy_params *phy;
2823 	u8 data[sizeof(*cp) + sizeof(*phy) * 2];
2824 	u8 num_phy = 0x00;
2825 
2826 	cp = (void *)data;
2827 	phy = (void *)cp->data;
2828 
2829 	memset(data, 0, sizeof(data));
2830 
2831 	cp->own_addr_type = own_addr_type;
2832 	cp->filter_policy = filter_policy;
2833 
2834 	/* Check if PA Sync is in progress then select the PHY based on the
2835 	 * hci_conn.iso_qos.
2836 	 */
2837 	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
2838 		struct hci_cp_le_add_to_accept_list *sent;
2839 
2840 		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST);
2841 		if (sent) {
2842 			struct hci_conn *conn;
2843 
2844 			conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK,
2845 						       &sent->bdaddr);
2846 			if (conn) {
2847 				struct bt_iso_qos *qos = &conn->iso_qos;
2848 
2849 				if (qos->bcast.in.phy & BT_ISO_PHY_1M ||
2850 				    qos->bcast.in.phy & BT_ISO_PHY_2M) {
2851 					cp->scanning_phys |= LE_SCAN_PHY_1M;
2852 					hci_le_scan_phy_params(phy, type,
2853 							       interval,
2854 							       window);
2855 					num_phy++;
2856 					phy++;
2857 				}
2858 
2859 				if (qos->bcast.in.phy & BT_ISO_PHY_CODED) {
2860 					cp->scanning_phys |= LE_SCAN_PHY_CODED;
2861 					hci_le_scan_phy_params(phy, type,
2862 							       interval * 3,
2863 							       window * 3);
2864 					num_phy++;
2865 					phy++;
2866 				}
2867 
2868 				if (num_phy)
2869 					goto done;
2870 			}
2871 		}
2872 	}
2873 
2874 	if (scan_1m(hdev) || scan_2m(hdev)) {
2875 		cp->scanning_phys |= LE_SCAN_PHY_1M;
2876 		hci_le_scan_phy_params(phy, type, interval, window);
2877 		num_phy++;
2878 		phy++;
2879 	}
2880 
2881 	if (scan_coded(hdev)) {
2882 		cp->scanning_phys |= LE_SCAN_PHY_CODED;
2883 		hci_le_scan_phy_params(phy, type, interval * 3, window * 3);
2884 		num_phy++;
2885 		phy++;
2886 	}
2887 
2888 done:
2889 	if (!num_phy)
2890 		return -EINVAL;
2891 
2892 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
2893 				     sizeof(*cp) + sizeof(*phy) * num_phy,
2894 				     data, HCI_CMD_TIMEOUT);
2895 }
2896 
hci_le_set_scan_param_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy)2897 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type,
2898 				      u16 interval, u16 window,
2899 				      u8 own_addr_type, u8 filter_policy)
2900 {
2901 	struct hci_cp_le_set_scan_param cp;
2902 
2903 	if (use_ext_scan(hdev))
2904 		return hci_le_set_ext_scan_param_sync(hdev, type, interval,
2905 						      window, own_addr_type,
2906 						      filter_policy);
2907 
2908 	memset(&cp, 0, sizeof(cp));
2909 	cp.type = type;
2910 	cp.interval = cpu_to_le16(interval);
2911 	cp.window = cpu_to_le16(window);
2912 	cp.own_address_type = own_addr_type;
2913 	cp.filter_policy = filter_policy;
2914 
2915 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM,
2916 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
2917 }
2918 
hci_start_scan_sync(struct hci_dev * hdev,u8 type,u16 interval,u16 window,u8 own_addr_type,u8 filter_policy,u8 filter_dup)2919 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval,
2920 			       u16 window, u8 own_addr_type, u8 filter_policy,
2921 			       u8 filter_dup)
2922 {
2923 	int err;
2924 
2925 	if (hdev->scanning_paused) {
2926 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2927 		return 0;
2928 	}
2929 
2930 	err = hci_le_set_scan_param_sync(hdev, type, interval, window,
2931 					 own_addr_type, filter_policy);
2932 	if (err)
2933 		return err;
2934 
2935 	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup);
2936 }
2937 
hci_passive_scan_sync(struct hci_dev * hdev)2938 static int hci_passive_scan_sync(struct hci_dev *hdev)
2939 {
2940 	u8 own_addr_type;
2941 	u8 filter_policy;
2942 	u16 window, interval;
2943 	u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;
2944 	int err;
2945 
2946 	if (hdev->scanning_paused) {
2947 		bt_dev_dbg(hdev, "Scanning is paused for suspend");
2948 		return 0;
2949 	}
2950 
2951 	err = hci_scan_disable_sync(hdev);
2952 	if (err) {
2953 		bt_dev_err(hdev, "disable scanning failed: %d", err);
2954 		return err;
2955 	}
2956 
2957 	/* Set require_privacy to false since no SCAN_REQ are send
2958 	 * during passive scanning. Not using an non-resolvable address
2959 	 * here is important so that peer devices using direct
2960 	 * advertising with our address will be correctly reported
2961 	 * by the controller.
2962 	 */
2963 	if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev),
2964 					   &own_addr_type))
2965 		return 0;
2966 
2967 	if (hdev->enable_advmon_interleave_scan &&
2968 	    hci_update_interleaved_scan_sync(hdev))
2969 		return 0;
2970 
2971 	bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state);
2972 
2973 	/* Adding or removing entries from the accept list must
2974 	 * happen before enabling scanning. The controller does
2975 	 * not allow accept list modification while scanning.
2976 	 */
2977 	filter_policy = hci_update_accept_list_sync(hdev);
2978 
2979 	/* If suspended and filter_policy set to 0x00 (no acceptlist) then
2980 	 * passive scanning cannot be started since that would require the host
2981 	 * to be woken up to process the reports.
2982 	 */
2983 	if (hdev->suspended && !filter_policy) {
2984 		/* Check if accept list is empty then there is no need to scan
2985 		 * while suspended.
2986 		 */
2987 		if (list_empty(&hdev->le_accept_list))
2988 			return 0;
2989 
2990 		/* If there are devices is the accept_list that means some
2991 		 * devices could not be programmed which in non-suspended case
2992 		 * means filter_policy needs to be set to 0x00 so the host needs
2993 		 * to filter, but since this is treating suspended case we
2994 		 * can ignore device needing host to filter to allow devices in
2995 		 * the acceptlist to be able to wakeup the system.
2996 		 */
2997 		filter_policy = 0x01;
2998 	}
2999 
3000 	/* When the controller is using random resolvable addresses and
3001 	 * with that having LE privacy enabled, then controllers with
3002 	 * Extended Scanner Filter Policies support can now enable support
3003 	 * for handling directed advertising.
3004 	 *
3005 	 * So instead of using filter polices 0x00 (no acceptlist)
3006 	 * and 0x01 (acceptlist enabled) use the new filter policies
3007 	 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled).
3008 	 */
3009 	if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
3010 	    (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
3011 		filter_policy |= 0x02;
3012 
3013 	if (hdev->suspended) {
3014 		window = hdev->le_scan_window_suspend;
3015 		interval = hdev->le_scan_int_suspend;
3016 	} else if (hci_is_le_conn_scanning(hdev)) {
3017 		window = hdev->le_scan_window_connect;
3018 		interval = hdev->le_scan_int_connect;
3019 	} else if (hci_is_adv_monitoring(hdev)) {
3020 		window = hdev->le_scan_window_adv_monitor;
3021 		interval = hdev->le_scan_int_adv_monitor;
3022 	} else {
3023 		window = hdev->le_scan_window;
3024 		interval = hdev->le_scan_interval;
3025 	}
3026 
3027 	/* Disable all filtering for Mesh */
3028 	if (hci_dev_test_flag(hdev, HCI_MESH)) {
3029 		filter_policy = 0;
3030 		filter_dups = LE_SCAN_FILTER_DUP_DISABLE;
3031 	}
3032 
3033 	bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);
3034 
3035 	return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window,
3036 				   own_addr_type, filter_policy, filter_dups);
3037 }
3038 
3039 /* This function controls the passive scanning based on hdev->pend_le_conns
3040  * list. If there are pending LE connection we start the background scanning,
3041  * otherwise we stop it in the following sequence:
3042  *
3043  * If there are devices to scan:
3044  *
3045  * Disable Scanning -> Update Accept List ->
3046  * use_ll_privacy((Disable Advertising) -> Disable Resolving List ->
3047  * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) ->
3048  * Enable Scanning
3049  *
3050  * Otherwise:
3051  *
3052  * Disable Scanning
3053  */
hci_update_passive_scan_sync(struct hci_dev * hdev)3054 int hci_update_passive_scan_sync(struct hci_dev *hdev)
3055 {
3056 	int err;
3057 
3058 	if (!test_bit(HCI_UP, &hdev->flags) ||
3059 	    test_bit(HCI_INIT, &hdev->flags) ||
3060 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3061 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3062 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3063 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3064 		return 0;
3065 
3066 	/* No point in doing scanning if LE support hasn't been enabled */
3067 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3068 		return 0;
3069 
3070 	/* If discovery is active don't interfere with it */
3071 	if (hdev->discovery.state != DISCOVERY_STOPPED)
3072 		return 0;
3073 
3074 	/* Reset RSSI and UUID filters when starting background scanning
3075 	 * since these filters are meant for service discovery only.
3076 	 *
3077 	 * The Start Discovery and Start Service Discovery operations
3078 	 * ensure to set proper values for RSSI threshold and UUID
3079 	 * filter list. So it is safe to just reset them here.
3080 	 */
3081 	hci_discovery_filter_clear(hdev);
3082 
3083 	bt_dev_dbg(hdev, "ADV monitoring is %s",
3084 		   hci_is_adv_monitoring(hdev) ? "on" : "off");
3085 
3086 	if (!hci_dev_test_flag(hdev, HCI_MESH) &&
3087 	    list_empty(&hdev->pend_le_conns) &&
3088 	    list_empty(&hdev->pend_le_reports) &&
3089 	    !hci_is_adv_monitoring(hdev) &&
3090 	    !hci_dev_test_flag(hdev, HCI_PA_SYNC)) {
3091 		/* If there is no pending LE connections or devices
3092 		 * to be scanned for or no ADV monitors, we should stop the
3093 		 * background scanning.
3094 		 */
3095 
3096 		bt_dev_dbg(hdev, "stopping background scanning");
3097 
3098 		err = hci_scan_disable_sync(hdev);
3099 		if (err)
3100 			bt_dev_err(hdev, "stop background scanning failed: %d",
3101 				   err);
3102 	} else {
3103 		/* If there is at least one pending LE connection, we should
3104 		 * keep the background scan running.
3105 		 */
3106 
3107 		/* If controller is connecting, we should not start scanning
3108 		 * since some controllers are not able to scan and connect at
3109 		 * the same time.
3110 		 */
3111 		if (hci_lookup_le_connect(hdev))
3112 			return 0;
3113 
3114 		bt_dev_dbg(hdev, "start background scanning");
3115 
3116 		err = hci_passive_scan_sync(hdev);
3117 		if (err)
3118 			bt_dev_err(hdev, "start background scanning failed: %d",
3119 				   err);
3120 	}
3121 
3122 	return err;
3123 }
3124 
update_scan_sync(struct hci_dev * hdev,void * data)3125 static int update_scan_sync(struct hci_dev *hdev, void *data)
3126 {
3127 	return hci_update_scan_sync(hdev);
3128 }
3129 
hci_update_scan(struct hci_dev * hdev)3130 int hci_update_scan(struct hci_dev *hdev)
3131 {
3132 	return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL);
3133 }
3134 
update_passive_scan_sync(struct hci_dev * hdev,void * data)3135 static int update_passive_scan_sync(struct hci_dev *hdev, void *data)
3136 {
3137 	return hci_update_passive_scan_sync(hdev);
3138 }
3139 
hci_update_passive_scan(struct hci_dev * hdev)3140 int hci_update_passive_scan(struct hci_dev *hdev)
3141 {
3142 	/* Only queue if it would have any effect */
3143 	if (!test_bit(HCI_UP, &hdev->flags) ||
3144 	    test_bit(HCI_INIT, &hdev->flags) ||
3145 	    hci_dev_test_flag(hdev, HCI_SETUP) ||
3146 	    hci_dev_test_flag(hdev, HCI_CONFIG) ||
3147 	    hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
3148 	    hci_dev_test_flag(hdev, HCI_UNREGISTER))
3149 		return 0;
3150 
3151 	return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL,
3152 				       NULL);
3153 }
3154 
hci_write_sc_support_sync(struct hci_dev * hdev,u8 val)3155 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val)
3156 {
3157 	int err;
3158 
3159 	if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev))
3160 		return 0;
3161 
3162 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
3163 				    sizeof(val), &val, HCI_CMD_TIMEOUT);
3164 
3165 	if (!err) {
3166 		if (val) {
3167 			hdev->features[1][0] |= LMP_HOST_SC;
3168 			hci_dev_set_flag(hdev, HCI_SC_ENABLED);
3169 		} else {
3170 			hdev->features[1][0] &= ~LMP_HOST_SC;
3171 			hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
3172 		}
3173 	}
3174 
3175 	return err;
3176 }
3177 
hci_write_ssp_mode_sync(struct hci_dev * hdev,u8 mode)3178 int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode)
3179 {
3180 	int err;
3181 
3182 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
3183 	    lmp_host_ssp_capable(hdev))
3184 		return 0;
3185 
3186 	if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) {
3187 		__hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
3188 				      sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3189 	}
3190 
3191 	err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3192 				    sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3193 	if (err)
3194 		return err;
3195 
3196 	return hci_write_sc_support_sync(hdev, 0x01);
3197 }
3198 
hci_write_le_host_supported_sync(struct hci_dev * hdev,u8 le,u8 simul)3199 int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul)
3200 {
3201 	struct hci_cp_write_le_host_supported cp;
3202 
3203 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) ||
3204 	    !lmp_bredr_capable(hdev))
3205 		return 0;
3206 
3207 	/* Check first if we already have the right host state
3208 	 * (host features set)
3209 	 */
3210 	if (le == lmp_host_le_capable(hdev) &&
3211 	    simul == lmp_host_le_br_capable(hdev))
3212 		return 0;
3213 
3214 	memset(&cp, 0, sizeof(cp));
3215 
3216 	cp.le = le;
3217 	cp.simul = simul;
3218 
3219 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
3220 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3221 }
3222 
hci_powered_update_adv_sync(struct hci_dev * hdev)3223 static int hci_powered_update_adv_sync(struct hci_dev *hdev)
3224 {
3225 	struct adv_info *adv, *tmp;
3226 	int err;
3227 
3228 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
3229 		return 0;
3230 
3231 	/* If RPA Resolution has not been enable yet it means the
3232 	 * resolving list is empty and we should attempt to program the
3233 	 * local IRK in order to support using own_addr_type
3234 	 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03).
3235 	 */
3236 	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) {
3237 		hci_le_add_resolve_list_sync(hdev, NULL);
3238 		hci_le_set_addr_resolution_enable_sync(hdev, 0x01);
3239 	}
3240 
3241 	/* Make sure the controller has a good default for
3242 	 * advertising data. This also applies to the case
3243 	 * where BR/EDR was toggled during the AUTO_OFF phase.
3244 	 */
3245 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
3246 	    list_empty(&hdev->adv_instances)) {
3247 		if (ext_adv_capable(hdev)) {
3248 			err = hci_setup_ext_adv_instance_sync(hdev, 0x00);
3249 			if (!err)
3250 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3251 		} else {
3252 			err = hci_update_adv_data_sync(hdev, 0x00);
3253 			if (!err)
3254 				hci_update_scan_rsp_data_sync(hdev, 0x00);
3255 		}
3256 
3257 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
3258 			hci_enable_advertising_sync(hdev);
3259 	}
3260 
3261 	/* Call for each tracked instance to be scheduled */
3262 	list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list)
3263 		hci_schedule_adv_instance_sync(hdev, adv->instance, true);
3264 
3265 	return 0;
3266 }
3267 
hci_write_auth_enable_sync(struct hci_dev * hdev)3268 static int hci_write_auth_enable_sync(struct hci_dev *hdev)
3269 {
3270 	u8 link_sec;
3271 
3272 	link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
3273 	if (link_sec == test_bit(HCI_AUTH, &hdev->flags))
3274 		return 0;
3275 
3276 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE,
3277 				     sizeof(link_sec), &link_sec,
3278 				     HCI_CMD_TIMEOUT);
3279 }
3280 
hci_write_fast_connectable_sync(struct hci_dev * hdev,bool enable)3281 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable)
3282 {
3283 	struct hci_cp_write_page_scan_activity cp;
3284 	u8 type;
3285 	int err = 0;
3286 
3287 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3288 		return 0;
3289 
3290 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3291 		return 0;
3292 
3293 	memset(&cp, 0, sizeof(cp));
3294 
3295 	if (enable) {
3296 		type = PAGE_SCAN_TYPE_INTERLACED;
3297 
3298 		/* 160 msec page scan interval */
3299 		cp.interval = cpu_to_le16(0x0100);
3300 	} else {
3301 		type = hdev->def_page_scan_type;
3302 		cp.interval = cpu_to_le16(hdev->def_page_scan_int);
3303 	}
3304 
3305 	cp.window = cpu_to_le16(hdev->def_page_scan_window);
3306 
3307 	if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval ||
3308 	    __cpu_to_le16(hdev->page_scan_window) != cp.window) {
3309 		err = __hci_cmd_sync_status(hdev,
3310 					    HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
3311 					    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3312 		if (err)
3313 			return err;
3314 	}
3315 
3316 	if (hdev->page_scan_type != type)
3317 		err = __hci_cmd_sync_status(hdev,
3318 					    HCI_OP_WRITE_PAGE_SCAN_TYPE,
3319 					    sizeof(type), &type,
3320 					    HCI_CMD_TIMEOUT);
3321 
3322 	return err;
3323 }
3324 
disconnected_accept_list_entries(struct hci_dev * hdev)3325 static bool disconnected_accept_list_entries(struct hci_dev *hdev)
3326 {
3327 	struct bdaddr_list *b;
3328 
3329 	list_for_each_entry(b, &hdev->accept_list, list) {
3330 		struct hci_conn *conn;
3331 
3332 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
3333 		if (!conn)
3334 			return true;
3335 
3336 		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3337 			return true;
3338 	}
3339 
3340 	return false;
3341 }
3342 
hci_write_scan_enable_sync(struct hci_dev * hdev,u8 val)3343 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val)
3344 {
3345 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE,
3346 					    sizeof(val), &val,
3347 					    HCI_CMD_TIMEOUT);
3348 }
3349 
hci_update_scan_sync(struct hci_dev * hdev)3350 int hci_update_scan_sync(struct hci_dev *hdev)
3351 {
3352 	u8 scan;
3353 
3354 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3355 		return 0;
3356 
3357 	if (!hdev_is_powered(hdev))
3358 		return 0;
3359 
3360 	if (mgmt_powering_down(hdev))
3361 		return 0;
3362 
3363 	if (hdev->scanning_paused)
3364 		return 0;
3365 
3366 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
3367 	    disconnected_accept_list_entries(hdev))
3368 		scan = SCAN_PAGE;
3369 	else
3370 		scan = SCAN_DISABLED;
3371 
3372 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
3373 		scan |= SCAN_INQUIRY;
3374 
3375 	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
3376 	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
3377 		return 0;
3378 
3379 	return hci_write_scan_enable_sync(hdev, scan);
3380 }
3381 
hci_update_name_sync(struct hci_dev * hdev)3382 int hci_update_name_sync(struct hci_dev *hdev)
3383 {
3384 	struct hci_cp_write_local_name cp;
3385 
3386 	memset(&cp, 0, sizeof(cp));
3387 
3388 	memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
3389 
3390 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME,
3391 					    sizeof(cp), &cp,
3392 					    HCI_CMD_TIMEOUT);
3393 }
3394 
3395 /* This function perform powered update HCI command sequence after the HCI init
3396  * sequence which end up resetting all states, the sequence is as follows:
3397  *
3398  * HCI_SSP_ENABLED(Enable SSP)
3399  * HCI_LE_ENABLED(Enable LE)
3400  * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) ->
3401  * Update adv data)
3402  * Enable Authentication
3403  * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class ->
3404  * Set Name -> Set EIR)
3405  * HCI_FORCE_STATIC_ADDR | BDADDR_ANY && !HCI_BREDR_ENABLED (Set Static Address)
3406  */
hci_powered_update_sync(struct hci_dev * hdev)3407 int hci_powered_update_sync(struct hci_dev *hdev)
3408 {
3409 	int err;
3410 
3411 	/* Register the available SMP channels (BR/EDR and LE) only when
3412 	 * successfully powering on the controller. This late
3413 	 * registration is required so that LE SMP can clearly decide if
3414 	 * the public address or static address is used.
3415 	 */
3416 	smp_register(hdev);
3417 
3418 	err = hci_write_ssp_mode_sync(hdev, 0x01);
3419 	if (err)
3420 		return err;
3421 
3422 	err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00);
3423 	if (err)
3424 		return err;
3425 
3426 	err = hci_powered_update_adv_sync(hdev);
3427 	if (err)
3428 		return err;
3429 
3430 	err = hci_write_auth_enable_sync(hdev);
3431 	if (err)
3432 		return err;
3433 
3434 	if (lmp_bredr_capable(hdev)) {
3435 		if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
3436 			hci_write_fast_connectable_sync(hdev, true);
3437 		else
3438 			hci_write_fast_connectable_sync(hdev, false);
3439 		hci_update_scan_sync(hdev);
3440 		hci_update_class_sync(hdev);
3441 		hci_update_name_sync(hdev);
3442 		hci_update_eir_sync(hdev);
3443 	}
3444 
3445 	/* If forcing static address is in use or there is no public
3446 	 * address use the static address as random address (but skip
3447 	 * the HCI command if the current random address is already the
3448 	 * static one.
3449 	 *
3450 	 * In case BR/EDR has been disabled on a dual-mode controller
3451 	 * and a static address has been configured, then use that
3452 	 * address instead of the public BR/EDR address.
3453 	 */
3454 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
3455 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3456 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))) {
3457 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
3458 			return hci_set_random_addr_sync(hdev,
3459 							&hdev->static_addr);
3460 	}
3461 
3462 	return 0;
3463 }
3464 
3465 /**
3466  * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address
3467  *				       (BD_ADDR) for a HCI device from
3468  *				       a firmware node property.
3469  * @hdev:	The HCI device
3470  *
3471  * Search the firmware node for 'local-bd-address'.
3472  *
3473  * All-zero BD addresses are rejected, because those could be properties
3474  * that exist in the firmware tables, but were not updated by the firmware. For
3475  * example, the DTS could define 'local-bd-address', with zero BD addresses.
3476  */
hci_dev_get_bd_addr_from_property(struct hci_dev * hdev)3477 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev)
3478 {
3479 	struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent);
3480 	bdaddr_t ba;
3481 	int ret;
3482 
3483 	ret = fwnode_property_read_u8_array(fwnode, "local-bd-address",
3484 					    (u8 *)&ba, sizeof(ba));
3485 	if (ret < 0 || !bacmp(&ba, BDADDR_ANY))
3486 		return;
3487 
3488 	if (test_bit(HCI_QUIRK_BDADDR_PROPERTY_BROKEN, &hdev->quirks))
3489 		baswap(&hdev->public_addr, &ba);
3490 	else
3491 		bacpy(&hdev->public_addr, &ba);
3492 }
3493 
3494 struct hci_init_stage {
3495 	int (*func)(struct hci_dev *hdev);
3496 };
3497 
3498 /* Run init stage NULL terminated function table */
hci_init_stage_sync(struct hci_dev * hdev,const struct hci_init_stage * stage)3499 static int hci_init_stage_sync(struct hci_dev *hdev,
3500 			       const struct hci_init_stage *stage)
3501 {
3502 	size_t i;
3503 
3504 	for (i = 0; stage[i].func; i++) {
3505 		int err;
3506 
3507 		err = stage[i].func(hdev);
3508 		if (err)
3509 			return err;
3510 	}
3511 
3512 	return 0;
3513 }
3514 
3515 /* Read Local Version */
hci_read_local_version_sync(struct hci_dev * hdev)3516 static int hci_read_local_version_sync(struct hci_dev *hdev)
3517 {
3518 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION,
3519 				     0, NULL, HCI_CMD_TIMEOUT);
3520 }
3521 
3522 /* Read BD Address */
hci_read_bd_addr_sync(struct hci_dev * hdev)3523 static int hci_read_bd_addr_sync(struct hci_dev *hdev)
3524 {
3525 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR,
3526 				     0, NULL, HCI_CMD_TIMEOUT);
3527 }
3528 
3529 #define HCI_INIT(_func) \
3530 { \
3531 	.func = _func, \
3532 }
3533 
3534 static const struct hci_init_stage hci_init0[] = {
3535 	/* HCI_OP_READ_LOCAL_VERSION */
3536 	HCI_INIT(hci_read_local_version_sync),
3537 	/* HCI_OP_READ_BD_ADDR */
3538 	HCI_INIT(hci_read_bd_addr_sync),
3539 	{}
3540 };
3541 
hci_reset_sync(struct hci_dev * hdev)3542 int hci_reset_sync(struct hci_dev *hdev)
3543 {
3544 	int err;
3545 
3546 	set_bit(HCI_RESET, &hdev->flags);
3547 
3548 	err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL,
3549 				    HCI_CMD_TIMEOUT);
3550 	if (err)
3551 		return err;
3552 
3553 	return 0;
3554 }
3555 
hci_init0_sync(struct hci_dev * hdev)3556 static int hci_init0_sync(struct hci_dev *hdev)
3557 {
3558 	int err;
3559 
3560 	bt_dev_dbg(hdev, "");
3561 
3562 	/* Reset */
3563 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3564 		err = hci_reset_sync(hdev);
3565 		if (err)
3566 			return err;
3567 	}
3568 
3569 	return hci_init_stage_sync(hdev, hci_init0);
3570 }
3571 
hci_unconf_init_sync(struct hci_dev * hdev)3572 static int hci_unconf_init_sync(struct hci_dev *hdev)
3573 {
3574 	int err;
3575 
3576 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3577 		return 0;
3578 
3579 	err = hci_init0_sync(hdev);
3580 	if (err < 0)
3581 		return err;
3582 
3583 	if (hci_dev_test_flag(hdev, HCI_SETUP))
3584 		hci_debugfs_create_basic(hdev);
3585 
3586 	return 0;
3587 }
3588 
3589 /* Read Local Supported Features. */
hci_read_local_features_sync(struct hci_dev * hdev)3590 static int hci_read_local_features_sync(struct hci_dev *hdev)
3591 {
3592 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES,
3593 				     0, NULL, HCI_CMD_TIMEOUT);
3594 }
3595 
3596 /* BR Controller init stage 1 command sequence */
3597 static const struct hci_init_stage br_init1[] = {
3598 	/* HCI_OP_READ_LOCAL_FEATURES */
3599 	HCI_INIT(hci_read_local_features_sync),
3600 	/* HCI_OP_READ_LOCAL_VERSION */
3601 	HCI_INIT(hci_read_local_version_sync),
3602 	/* HCI_OP_READ_BD_ADDR */
3603 	HCI_INIT(hci_read_bd_addr_sync),
3604 	{}
3605 };
3606 
3607 /* Read Local Commands */
hci_read_local_cmds_sync(struct hci_dev * hdev)3608 static int hci_read_local_cmds_sync(struct hci_dev *hdev)
3609 {
3610 	/* All Bluetooth 1.2 and later controllers should support the
3611 	 * HCI command for reading the local supported commands.
3612 	 *
3613 	 * Unfortunately some controllers indicate Bluetooth 1.2 support,
3614 	 * but do not have support for this command. If that is the case,
3615 	 * the driver can quirk the behavior and skip reading the local
3616 	 * supported commands.
3617 	 */
3618 	if (hdev->hci_ver > BLUETOOTH_VER_1_1 &&
3619 	    !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks))
3620 		return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS,
3621 					     0, NULL, HCI_CMD_TIMEOUT);
3622 
3623 	return 0;
3624 }
3625 
hci_init1_sync(struct hci_dev * hdev)3626 static int hci_init1_sync(struct hci_dev *hdev)
3627 {
3628 	int err;
3629 
3630 	bt_dev_dbg(hdev, "");
3631 
3632 	/* Reset */
3633 	if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
3634 		err = hci_reset_sync(hdev);
3635 		if (err)
3636 			return err;
3637 	}
3638 
3639 	return hci_init_stage_sync(hdev, br_init1);
3640 }
3641 
3642 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
hci_read_buffer_size_sync(struct hci_dev * hdev)3643 static int hci_read_buffer_size_sync(struct hci_dev *hdev)
3644 {
3645 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE,
3646 				     0, NULL, HCI_CMD_TIMEOUT);
3647 }
3648 
3649 /* Read Class of Device */
hci_read_dev_class_sync(struct hci_dev * hdev)3650 static int hci_read_dev_class_sync(struct hci_dev *hdev)
3651 {
3652 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV,
3653 				     0, NULL, HCI_CMD_TIMEOUT);
3654 }
3655 
3656 /* Read Local Name */
hci_read_local_name_sync(struct hci_dev * hdev)3657 static int hci_read_local_name_sync(struct hci_dev *hdev)
3658 {
3659 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME,
3660 				     0, NULL, HCI_CMD_TIMEOUT);
3661 }
3662 
3663 /* Read Voice Setting */
hci_read_voice_setting_sync(struct hci_dev * hdev)3664 static int hci_read_voice_setting_sync(struct hci_dev *hdev)
3665 {
3666 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING,
3667 				     0, NULL, HCI_CMD_TIMEOUT);
3668 }
3669 
3670 /* Read Number of Supported IAC */
hci_read_num_supported_iac_sync(struct hci_dev * hdev)3671 static int hci_read_num_supported_iac_sync(struct hci_dev *hdev)
3672 {
3673 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC,
3674 				     0, NULL, HCI_CMD_TIMEOUT);
3675 }
3676 
3677 /* Read Current IAC LAP */
hci_read_current_iac_lap_sync(struct hci_dev * hdev)3678 static int hci_read_current_iac_lap_sync(struct hci_dev *hdev)
3679 {
3680 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP,
3681 				     0, NULL, HCI_CMD_TIMEOUT);
3682 }
3683 
hci_set_event_filter_sync(struct hci_dev * hdev,u8 flt_type,u8 cond_type,bdaddr_t * bdaddr,u8 auto_accept)3684 static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type,
3685 				     u8 cond_type, bdaddr_t *bdaddr,
3686 				     u8 auto_accept)
3687 {
3688 	struct hci_cp_set_event_filter cp;
3689 
3690 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
3691 		return 0;
3692 
3693 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3694 		return 0;
3695 
3696 	memset(&cp, 0, sizeof(cp));
3697 	cp.flt_type = flt_type;
3698 
3699 	if (flt_type != HCI_FLT_CLEAR_ALL) {
3700 		cp.cond_type = cond_type;
3701 		bacpy(&cp.addr_conn_flt.bdaddr, bdaddr);
3702 		cp.addr_conn_flt.auto_accept = auto_accept;
3703 	}
3704 
3705 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT,
3706 				     flt_type == HCI_FLT_CLEAR_ALL ?
3707 				     sizeof(cp.flt_type) : sizeof(cp), &cp,
3708 				     HCI_CMD_TIMEOUT);
3709 }
3710 
hci_clear_event_filter_sync(struct hci_dev * hdev)3711 static int hci_clear_event_filter_sync(struct hci_dev *hdev)
3712 {
3713 	if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED))
3714 		return 0;
3715 
3716 	/* In theory the state machine should not reach here unless
3717 	 * a hci_set_event_filter_sync() call succeeds, but we do
3718 	 * the check both for parity and as a future reminder.
3719 	 */
3720 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
3721 		return 0;
3722 
3723 	return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00,
3724 					 BDADDR_ANY, 0x00);
3725 }
3726 
3727 /* Connection accept timeout ~20 secs */
hci_write_ca_timeout_sync(struct hci_dev * hdev)3728 static int hci_write_ca_timeout_sync(struct hci_dev *hdev)
3729 {
3730 	__le16 param = cpu_to_le16(0x7d00);
3731 
3732 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT,
3733 				     sizeof(param), &param, HCI_CMD_TIMEOUT);
3734 }
3735 
3736 /* BR Controller init stage 2 command sequence */
3737 static const struct hci_init_stage br_init2[] = {
3738 	/* HCI_OP_READ_BUFFER_SIZE */
3739 	HCI_INIT(hci_read_buffer_size_sync),
3740 	/* HCI_OP_READ_CLASS_OF_DEV */
3741 	HCI_INIT(hci_read_dev_class_sync),
3742 	/* HCI_OP_READ_LOCAL_NAME */
3743 	HCI_INIT(hci_read_local_name_sync),
3744 	/* HCI_OP_READ_VOICE_SETTING */
3745 	HCI_INIT(hci_read_voice_setting_sync),
3746 	/* HCI_OP_READ_NUM_SUPPORTED_IAC */
3747 	HCI_INIT(hci_read_num_supported_iac_sync),
3748 	/* HCI_OP_READ_CURRENT_IAC_LAP */
3749 	HCI_INIT(hci_read_current_iac_lap_sync),
3750 	/* HCI_OP_SET_EVENT_FLT */
3751 	HCI_INIT(hci_clear_event_filter_sync),
3752 	/* HCI_OP_WRITE_CA_TIMEOUT */
3753 	HCI_INIT(hci_write_ca_timeout_sync),
3754 	{}
3755 };
3756 
hci_write_ssp_mode_1_sync(struct hci_dev * hdev)3757 static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev)
3758 {
3759 	u8 mode = 0x01;
3760 
3761 	if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3762 		return 0;
3763 
3764 	/* When SSP is available, then the host features page
3765 	 * should also be available as well. However some
3766 	 * controllers list the max_page as 0 as long as SSP
3767 	 * has not been enabled. To achieve proper debugging
3768 	 * output, force the minimum max_page to 1 at least.
3769 	 */
3770 	hdev->max_page = 0x01;
3771 
3772 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE,
3773 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3774 }
3775 
hci_write_eir_sync(struct hci_dev * hdev)3776 static int hci_write_eir_sync(struct hci_dev *hdev)
3777 {
3778 	struct hci_cp_write_eir cp;
3779 
3780 	if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
3781 		return 0;
3782 
3783 	memset(hdev->eir, 0, sizeof(hdev->eir));
3784 	memset(&cp, 0, sizeof(cp));
3785 
3786 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp,
3787 				     HCI_CMD_TIMEOUT);
3788 }
3789 
hci_write_inquiry_mode_sync(struct hci_dev * hdev)3790 static int hci_write_inquiry_mode_sync(struct hci_dev *hdev)
3791 {
3792 	u8 mode;
3793 
3794 	if (!lmp_inq_rssi_capable(hdev) &&
3795 	    !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3796 		return 0;
3797 
3798 	/* If Extended Inquiry Result events are supported, then
3799 	 * they are clearly preferred over Inquiry Result with RSSI
3800 	 * events.
3801 	 */
3802 	mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01;
3803 
3804 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE,
3805 				     sizeof(mode), &mode, HCI_CMD_TIMEOUT);
3806 }
3807 
hci_read_inq_rsp_tx_power_sync(struct hci_dev * hdev)3808 static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev)
3809 {
3810 	if (!lmp_inq_tx_pwr_capable(hdev))
3811 		return 0;
3812 
3813 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER,
3814 				     0, NULL, HCI_CMD_TIMEOUT);
3815 }
3816 
hci_read_local_ext_features_sync(struct hci_dev * hdev,u8 page)3817 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page)
3818 {
3819 	struct hci_cp_read_local_ext_features cp;
3820 
3821 	if (!lmp_ext_feat_capable(hdev))
3822 		return 0;
3823 
3824 	memset(&cp, 0, sizeof(cp));
3825 	cp.page = page;
3826 
3827 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
3828 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
3829 }
3830 
hci_read_local_ext_features_1_sync(struct hci_dev * hdev)3831 static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev)
3832 {
3833 	return hci_read_local_ext_features_sync(hdev, 0x01);
3834 }
3835 
3836 /* HCI Controller init stage 2 command sequence */
3837 static const struct hci_init_stage hci_init2[] = {
3838 	/* HCI_OP_READ_LOCAL_COMMANDS */
3839 	HCI_INIT(hci_read_local_cmds_sync),
3840 	/* HCI_OP_WRITE_SSP_MODE */
3841 	HCI_INIT(hci_write_ssp_mode_1_sync),
3842 	/* HCI_OP_WRITE_EIR */
3843 	HCI_INIT(hci_write_eir_sync),
3844 	/* HCI_OP_WRITE_INQUIRY_MODE */
3845 	HCI_INIT(hci_write_inquiry_mode_sync),
3846 	/* HCI_OP_READ_INQ_RSP_TX_POWER */
3847 	HCI_INIT(hci_read_inq_rsp_tx_power_sync),
3848 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
3849 	HCI_INIT(hci_read_local_ext_features_1_sync),
3850 	/* HCI_OP_WRITE_AUTH_ENABLE */
3851 	HCI_INIT(hci_write_auth_enable_sync),
3852 	{}
3853 };
3854 
3855 /* Read LE Buffer Size */
hci_le_read_buffer_size_sync(struct hci_dev * hdev)3856 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev)
3857 {
3858 	/* Use Read LE Buffer Size V2 if supported */
3859 	if (iso_capable(hdev) && hdev->commands[41] & 0x20)
3860 		return __hci_cmd_sync_status(hdev,
3861 					     HCI_OP_LE_READ_BUFFER_SIZE_V2,
3862 					     0, NULL, HCI_CMD_TIMEOUT);
3863 
3864 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE,
3865 				     0, NULL, HCI_CMD_TIMEOUT);
3866 }
3867 
3868 /* Read LE Local Supported Features */
hci_le_read_local_features_sync(struct hci_dev * hdev)3869 static int hci_le_read_local_features_sync(struct hci_dev *hdev)
3870 {
3871 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES,
3872 				     0, NULL, HCI_CMD_TIMEOUT);
3873 }
3874 
3875 /* Read LE Supported States */
hci_le_read_supported_states_sync(struct hci_dev * hdev)3876 static int hci_le_read_supported_states_sync(struct hci_dev *hdev)
3877 {
3878 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES,
3879 				     0, NULL, HCI_CMD_TIMEOUT);
3880 }
3881 
3882 /* LE Controller init stage 2 command sequence */
3883 static const struct hci_init_stage le_init2[] = {
3884 	/* HCI_OP_LE_READ_LOCAL_FEATURES */
3885 	HCI_INIT(hci_le_read_local_features_sync),
3886 	/* HCI_OP_LE_READ_BUFFER_SIZE */
3887 	HCI_INIT(hci_le_read_buffer_size_sync),
3888 	/* HCI_OP_LE_READ_SUPPORTED_STATES */
3889 	HCI_INIT(hci_le_read_supported_states_sync),
3890 	{}
3891 };
3892 
hci_init2_sync(struct hci_dev * hdev)3893 static int hci_init2_sync(struct hci_dev *hdev)
3894 {
3895 	int err;
3896 
3897 	bt_dev_dbg(hdev, "");
3898 
3899 	err = hci_init_stage_sync(hdev, hci_init2);
3900 	if (err)
3901 		return err;
3902 
3903 	if (lmp_bredr_capable(hdev)) {
3904 		err = hci_init_stage_sync(hdev, br_init2);
3905 		if (err)
3906 			return err;
3907 	} else {
3908 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
3909 	}
3910 
3911 	if (lmp_le_capable(hdev)) {
3912 		err = hci_init_stage_sync(hdev, le_init2);
3913 		if (err)
3914 			return err;
3915 		/* LE-only controllers have LE implicitly enabled */
3916 		if (!lmp_bredr_capable(hdev))
3917 			hci_dev_set_flag(hdev, HCI_LE_ENABLED);
3918 	}
3919 
3920 	return 0;
3921 }
3922 
hci_set_event_mask_sync(struct hci_dev * hdev)3923 static int hci_set_event_mask_sync(struct hci_dev *hdev)
3924 {
3925 	/* The second byte is 0xff instead of 0x9f (two reserved bits
3926 	 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
3927 	 * command otherwise.
3928 	 */
3929 	u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
3930 
3931 	/* CSR 1.1 dongles does not accept any bitfield so don't try to set
3932 	 * any event mask for pre 1.2 devices.
3933 	 */
3934 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
3935 		return 0;
3936 
3937 	if (lmp_bredr_capable(hdev)) {
3938 		events[4] |= 0x01; /* Flow Specification Complete */
3939 
3940 		/* Don't set Disconnect Complete and mode change when
3941 		 * suspended as that would wakeup the host when disconnecting
3942 		 * due to suspend.
3943 		 */
3944 		if (hdev->suspended) {
3945 			events[0] &= 0xef;
3946 			events[2] &= 0xf7;
3947 		}
3948 	} else {
3949 		/* Use a different default for LE-only devices */
3950 		memset(events, 0, sizeof(events));
3951 		events[1] |= 0x20; /* Command Complete */
3952 		events[1] |= 0x40; /* Command Status */
3953 		events[1] |= 0x80; /* Hardware Error */
3954 
3955 		/* If the controller supports the Disconnect command, enable
3956 		 * the corresponding event. In addition enable packet flow
3957 		 * control related events.
3958 		 */
3959 		if (hdev->commands[0] & 0x20) {
3960 			/* Don't set Disconnect Complete when suspended as that
3961 			 * would wakeup the host when disconnecting due to
3962 			 * suspend.
3963 			 */
3964 			if (!hdev->suspended)
3965 				events[0] |= 0x10; /* Disconnection Complete */
3966 			events[2] |= 0x04; /* Number of Completed Packets */
3967 			events[3] |= 0x02; /* Data Buffer Overflow */
3968 		}
3969 
3970 		/* If the controller supports the Read Remote Version
3971 		 * Information command, enable the corresponding event.
3972 		 */
3973 		if (hdev->commands[2] & 0x80)
3974 			events[1] |= 0x08; /* Read Remote Version Information
3975 					    * Complete
3976 					    */
3977 
3978 		if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
3979 			events[0] |= 0x80; /* Encryption Change */
3980 			events[5] |= 0x80; /* Encryption Key Refresh Complete */
3981 		}
3982 	}
3983 
3984 	if (lmp_inq_rssi_capable(hdev) ||
3985 	    test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks))
3986 		events[4] |= 0x02; /* Inquiry Result with RSSI */
3987 
3988 	if (lmp_ext_feat_capable(hdev))
3989 		events[4] |= 0x04; /* Read Remote Extended Features Complete */
3990 
3991 	if (lmp_esco_capable(hdev)) {
3992 		events[5] |= 0x08; /* Synchronous Connection Complete */
3993 		events[5] |= 0x10; /* Synchronous Connection Changed */
3994 	}
3995 
3996 	if (lmp_sniffsubr_capable(hdev))
3997 		events[5] |= 0x20; /* Sniff Subrating */
3998 
3999 	if (lmp_pause_enc_capable(hdev))
4000 		events[5] |= 0x80; /* Encryption Key Refresh Complete */
4001 
4002 	if (lmp_ext_inq_capable(hdev))
4003 		events[5] |= 0x40; /* Extended Inquiry Result */
4004 
4005 	if (lmp_no_flush_capable(hdev))
4006 		events[7] |= 0x01; /* Enhanced Flush Complete */
4007 
4008 	if (lmp_lsto_capable(hdev))
4009 		events[6] |= 0x80; /* Link Supervision Timeout Changed */
4010 
4011 	if (lmp_ssp_capable(hdev)) {
4012 		events[6] |= 0x01;	/* IO Capability Request */
4013 		events[6] |= 0x02;	/* IO Capability Response */
4014 		events[6] |= 0x04;	/* User Confirmation Request */
4015 		events[6] |= 0x08;	/* User Passkey Request */
4016 		events[6] |= 0x10;	/* Remote OOB Data Request */
4017 		events[6] |= 0x20;	/* Simple Pairing Complete */
4018 		events[7] |= 0x04;	/* User Passkey Notification */
4019 		events[7] |= 0x08;	/* Keypress Notification */
4020 		events[7] |= 0x10;	/* Remote Host Supported
4021 					 * Features Notification
4022 					 */
4023 	}
4024 
4025 	if (lmp_le_capable(hdev))
4026 		events[7] |= 0x20;	/* LE Meta-Event */
4027 
4028 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK,
4029 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4030 }
4031 
hci_read_stored_link_key_sync(struct hci_dev * hdev)4032 static int hci_read_stored_link_key_sync(struct hci_dev *hdev)
4033 {
4034 	struct hci_cp_read_stored_link_key cp;
4035 
4036 	if (!(hdev->commands[6] & 0x20) ||
4037 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4038 		return 0;
4039 
4040 	memset(&cp, 0, sizeof(cp));
4041 	bacpy(&cp.bdaddr, BDADDR_ANY);
4042 	cp.read_all = 0x01;
4043 
4044 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY,
4045 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4046 }
4047 
hci_setup_link_policy_sync(struct hci_dev * hdev)4048 static int hci_setup_link_policy_sync(struct hci_dev *hdev)
4049 {
4050 	struct hci_cp_write_def_link_policy cp;
4051 	u16 link_policy = 0;
4052 
4053 	if (!(hdev->commands[5] & 0x10))
4054 		return 0;
4055 
4056 	memset(&cp, 0, sizeof(cp));
4057 
4058 	if (lmp_rswitch_capable(hdev))
4059 		link_policy |= HCI_LP_RSWITCH;
4060 	if (lmp_hold_capable(hdev))
4061 		link_policy |= HCI_LP_HOLD;
4062 	if (lmp_sniff_capable(hdev))
4063 		link_policy |= HCI_LP_SNIFF;
4064 	if (lmp_park_capable(hdev))
4065 		link_policy |= HCI_LP_PARK;
4066 
4067 	cp.policy = cpu_to_le16(link_policy);
4068 
4069 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
4070 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4071 }
4072 
hci_read_page_scan_activity_sync(struct hci_dev * hdev)4073 static int hci_read_page_scan_activity_sync(struct hci_dev *hdev)
4074 {
4075 	if (!(hdev->commands[8] & 0x01))
4076 		return 0;
4077 
4078 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY,
4079 				     0, NULL, HCI_CMD_TIMEOUT);
4080 }
4081 
hci_read_def_err_data_reporting_sync(struct hci_dev * hdev)4082 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev)
4083 {
4084 	if (!(hdev->commands[18] & 0x04) ||
4085 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4086 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4087 		return 0;
4088 
4089 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING,
4090 				     0, NULL, HCI_CMD_TIMEOUT);
4091 }
4092 
hci_read_page_scan_type_sync(struct hci_dev * hdev)4093 static int hci_read_page_scan_type_sync(struct hci_dev *hdev)
4094 {
4095 	/* Some older Broadcom based Bluetooth 1.2 controllers do not
4096 	 * support the Read Page Scan Type command. Check support for
4097 	 * this command in the bit mask of supported commands.
4098 	 */
4099 	if (!(hdev->commands[13] & 0x01))
4100 		return 0;
4101 
4102 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE,
4103 				     0, NULL, HCI_CMD_TIMEOUT);
4104 }
4105 
4106 /* Read features beyond page 1 if available */
hci_read_local_ext_features_all_sync(struct hci_dev * hdev)4107 static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev)
4108 {
4109 	u8 page;
4110 	int err;
4111 
4112 	if (!lmp_ext_feat_capable(hdev))
4113 		return 0;
4114 
4115 	for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page;
4116 	     page++) {
4117 		err = hci_read_local_ext_features_sync(hdev, page);
4118 		if (err)
4119 			return err;
4120 	}
4121 
4122 	return 0;
4123 }
4124 
4125 /* HCI Controller init stage 3 command sequence */
4126 static const struct hci_init_stage hci_init3[] = {
4127 	/* HCI_OP_SET_EVENT_MASK */
4128 	HCI_INIT(hci_set_event_mask_sync),
4129 	/* HCI_OP_READ_STORED_LINK_KEY */
4130 	HCI_INIT(hci_read_stored_link_key_sync),
4131 	/* HCI_OP_WRITE_DEF_LINK_POLICY */
4132 	HCI_INIT(hci_setup_link_policy_sync),
4133 	/* HCI_OP_READ_PAGE_SCAN_ACTIVITY */
4134 	HCI_INIT(hci_read_page_scan_activity_sync),
4135 	/* HCI_OP_READ_DEF_ERR_DATA_REPORTING */
4136 	HCI_INIT(hci_read_def_err_data_reporting_sync),
4137 	/* HCI_OP_READ_PAGE_SCAN_TYPE */
4138 	HCI_INIT(hci_read_page_scan_type_sync),
4139 	/* HCI_OP_READ_LOCAL_EXT_FEATURES */
4140 	HCI_INIT(hci_read_local_ext_features_all_sync),
4141 	{}
4142 };
4143 
hci_le_set_event_mask_sync(struct hci_dev * hdev)4144 static int hci_le_set_event_mask_sync(struct hci_dev *hdev)
4145 {
4146 	u8 events[8];
4147 
4148 	if (!lmp_le_capable(hdev))
4149 		return 0;
4150 
4151 	memset(events, 0, sizeof(events));
4152 
4153 	if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
4154 		events[0] |= 0x10;	/* LE Long Term Key Request */
4155 
4156 	/* If controller supports the Connection Parameters Request
4157 	 * Link Layer Procedure, enable the corresponding event.
4158 	 */
4159 	if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
4160 		/* LE Remote Connection Parameter Request */
4161 		events[0] |= 0x20;
4162 
4163 	/* If the controller supports the Data Length Extension
4164 	 * feature, enable the corresponding event.
4165 	 */
4166 	if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)
4167 		events[0] |= 0x40;	/* LE Data Length Change */
4168 
4169 	/* If the controller supports LL Privacy feature or LE Extended Adv,
4170 	 * enable the corresponding event.
4171 	 */
4172 	if (use_enhanced_conn_complete(hdev))
4173 		events[1] |= 0x02;	/* LE Enhanced Connection Complete */
4174 
4175 	/* If the controller supports Extended Scanner Filter
4176 	 * Policies, enable the corresponding event.
4177 	 */
4178 	if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)
4179 		events[1] |= 0x04;	/* LE Direct Advertising Report */
4180 
4181 	/* If the controller supports Channel Selection Algorithm #2
4182 	 * feature, enable the corresponding event.
4183 	 */
4184 	if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2)
4185 		events[2] |= 0x08;	/* LE Channel Selection Algorithm */
4186 
4187 	/* If the controller supports the LE Set Scan Enable command,
4188 	 * enable the corresponding advertising report event.
4189 	 */
4190 	if (hdev->commands[26] & 0x08)
4191 		events[0] |= 0x02;	/* LE Advertising Report */
4192 
4193 	/* If the controller supports the LE Create Connection
4194 	 * command, enable the corresponding event.
4195 	 */
4196 	if (hdev->commands[26] & 0x10)
4197 		events[0] |= 0x01;	/* LE Connection Complete */
4198 
4199 	/* If the controller supports the LE Connection Update
4200 	 * command, enable the corresponding event.
4201 	 */
4202 	if (hdev->commands[27] & 0x04)
4203 		events[0] |= 0x04;	/* LE Connection Update Complete */
4204 
4205 	/* If the controller supports the LE Read Remote Used Features
4206 	 * command, enable the corresponding event.
4207 	 */
4208 	if (hdev->commands[27] & 0x20)
4209 		/* LE Read Remote Used Features Complete */
4210 		events[0] |= 0x08;
4211 
4212 	/* If the controller supports the LE Read Local P-256
4213 	 * Public Key command, enable the corresponding event.
4214 	 */
4215 	if (hdev->commands[34] & 0x02)
4216 		/* LE Read Local P-256 Public Key Complete */
4217 		events[0] |= 0x80;
4218 
4219 	/* If the controller supports the LE Generate DHKey
4220 	 * command, enable the corresponding event.
4221 	 */
4222 	if (hdev->commands[34] & 0x04)
4223 		events[1] |= 0x01;	/* LE Generate DHKey Complete */
4224 
4225 	/* If the controller supports the LE Set Default PHY or
4226 	 * LE Set PHY commands, enable the corresponding event.
4227 	 */
4228 	if (hdev->commands[35] & (0x20 | 0x40))
4229 		events[1] |= 0x08;        /* LE PHY Update Complete */
4230 
4231 	/* If the controller supports LE Set Extended Scan Parameters
4232 	 * and LE Set Extended Scan Enable commands, enable the
4233 	 * corresponding event.
4234 	 */
4235 	if (use_ext_scan(hdev))
4236 		events[1] |= 0x10;	/* LE Extended Advertising Report */
4237 
4238 	/* If the controller supports the LE Extended Advertising
4239 	 * command, enable the corresponding event.
4240 	 */
4241 	if (ext_adv_capable(hdev))
4242 		events[2] |= 0x02;	/* LE Advertising Set Terminated */
4243 
4244 	if (cis_capable(hdev)) {
4245 		events[3] |= 0x01;	/* LE CIS Established */
4246 		if (cis_peripheral_capable(hdev))
4247 			events[3] |= 0x02; /* LE CIS Request */
4248 	}
4249 
4250 	if (bis_capable(hdev)) {
4251 		events[1] |= 0x20;	/* LE PA Report */
4252 		events[1] |= 0x40;	/* LE PA Sync Established */
4253 		events[3] |= 0x04;	/* LE Create BIG Complete */
4254 		events[3] |= 0x08;	/* LE Terminate BIG Complete */
4255 		events[3] |= 0x10;	/* LE BIG Sync Established */
4256 		events[3] |= 0x20;	/* LE BIG Sync Loss */
4257 		events[4] |= 0x02;	/* LE BIG Info Advertising Report */
4258 	}
4259 
4260 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK,
4261 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4262 }
4263 
4264 /* Read LE Advertising Channel TX Power */
hci_le_read_adv_tx_power_sync(struct hci_dev * hdev)4265 static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev)
4266 {
4267 	if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) {
4268 		/* HCI TS spec forbids mixing of legacy and extended
4269 		 * advertising commands wherein READ_ADV_TX_POWER is
4270 		 * also included. So do not call it if extended adv
4271 		 * is supported otherwise controller will return
4272 		 * COMMAND_DISALLOWED for extended commands.
4273 		 */
4274 		return __hci_cmd_sync_status(hdev,
4275 					       HCI_OP_LE_READ_ADV_TX_POWER,
4276 					       0, NULL, HCI_CMD_TIMEOUT);
4277 	}
4278 
4279 	return 0;
4280 }
4281 
4282 /* Read LE Min/Max Tx Power*/
hci_le_read_tx_power_sync(struct hci_dev * hdev)4283 static int hci_le_read_tx_power_sync(struct hci_dev *hdev)
4284 {
4285 	if (!(hdev->commands[38] & 0x80) ||
4286 	    test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks))
4287 		return 0;
4288 
4289 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER,
4290 				     0, NULL, HCI_CMD_TIMEOUT);
4291 }
4292 
4293 /* Read LE Accept List Size */
hci_le_read_accept_list_size_sync(struct hci_dev * hdev)4294 static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)
4295 {
4296 	if (!(hdev->commands[26] & 0x40))
4297 		return 0;
4298 
4299 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE,
4300 				     0, NULL, HCI_CMD_TIMEOUT);
4301 }
4302 
4303 /* Read LE Resolving List Size */
hci_le_read_resolv_list_size_sync(struct hci_dev * hdev)4304 static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)
4305 {
4306 	if (!(hdev->commands[34] & 0x40))
4307 		return 0;
4308 
4309 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE,
4310 				     0, NULL, HCI_CMD_TIMEOUT);
4311 }
4312 
4313 /* Clear LE Resolving List */
hci_le_clear_resolv_list_sync(struct hci_dev * hdev)4314 static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev)
4315 {
4316 	if (!(hdev->commands[34] & 0x20))
4317 		return 0;
4318 
4319 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL,
4320 				     HCI_CMD_TIMEOUT);
4321 }
4322 
4323 /* Set RPA timeout */
hci_le_set_rpa_timeout_sync(struct hci_dev * hdev)4324 static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev)
4325 {
4326 	__le16 timeout = cpu_to_le16(hdev->rpa_timeout);
4327 
4328 	if (!(hdev->commands[35] & 0x04) ||
4329 	    test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks))
4330 		return 0;
4331 
4332 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT,
4333 				     sizeof(timeout), &timeout,
4334 				     HCI_CMD_TIMEOUT);
4335 }
4336 
4337 /* Read LE Maximum Data Length */
hci_le_read_max_data_len_sync(struct hci_dev * hdev)4338 static int hci_le_read_max_data_len_sync(struct hci_dev *hdev)
4339 {
4340 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4341 		return 0;
4342 
4343 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL,
4344 				     HCI_CMD_TIMEOUT);
4345 }
4346 
4347 /* Read LE Suggested Default Data Length */
hci_le_read_def_data_len_sync(struct hci_dev * hdev)4348 static int hci_le_read_def_data_len_sync(struct hci_dev *hdev)
4349 {
4350 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4351 		return 0;
4352 
4353 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL,
4354 				     HCI_CMD_TIMEOUT);
4355 }
4356 
4357 /* Read LE Number of Supported Advertising Sets */
hci_le_read_num_support_adv_sets_sync(struct hci_dev * hdev)4358 static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev)
4359 {
4360 	if (!ext_adv_capable(hdev))
4361 		return 0;
4362 
4363 	return __hci_cmd_sync_status(hdev,
4364 				     HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS,
4365 				     0, NULL, HCI_CMD_TIMEOUT);
4366 }
4367 
4368 /* Write LE Host Supported */
hci_set_le_support_sync(struct hci_dev * hdev)4369 static int hci_set_le_support_sync(struct hci_dev *hdev)
4370 {
4371 	struct hci_cp_write_le_host_supported cp;
4372 
4373 	/* LE-only devices do not support explicit enablement */
4374 	if (!lmp_bredr_capable(hdev))
4375 		return 0;
4376 
4377 	memset(&cp, 0, sizeof(cp));
4378 
4379 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
4380 		cp.le = 0x01;
4381 		cp.simul = 0x00;
4382 	}
4383 
4384 	if (cp.le == lmp_host_le_capable(hdev))
4385 		return 0;
4386 
4387 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
4388 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4389 }
4390 
4391 /* LE Set Host Feature */
hci_le_set_host_feature_sync(struct hci_dev * hdev)4392 static int hci_le_set_host_feature_sync(struct hci_dev *hdev)
4393 {
4394 	struct hci_cp_le_set_host_feature cp;
4395 
4396 	if (!cis_capable(hdev))
4397 		return 0;
4398 
4399 	memset(&cp, 0, sizeof(cp));
4400 
4401 	/* Connected Isochronous Channels (Host Support) */
4402 	cp.bit_number = 32;
4403 	cp.bit_value = 1;
4404 
4405 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE,
4406 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4407 }
4408 
4409 /* LE Controller init stage 3 command sequence */
4410 static const struct hci_init_stage le_init3[] = {
4411 	/* HCI_OP_LE_SET_EVENT_MASK */
4412 	HCI_INIT(hci_le_set_event_mask_sync),
4413 	/* HCI_OP_LE_READ_ADV_TX_POWER */
4414 	HCI_INIT(hci_le_read_adv_tx_power_sync),
4415 	/* HCI_OP_LE_READ_TRANSMIT_POWER */
4416 	HCI_INIT(hci_le_read_tx_power_sync),
4417 	/* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */
4418 	HCI_INIT(hci_le_read_accept_list_size_sync),
4419 	/* HCI_OP_LE_CLEAR_ACCEPT_LIST */
4420 	HCI_INIT(hci_le_clear_accept_list_sync),
4421 	/* HCI_OP_LE_READ_RESOLV_LIST_SIZE */
4422 	HCI_INIT(hci_le_read_resolv_list_size_sync),
4423 	/* HCI_OP_LE_CLEAR_RESOLV_LIST */
4424 	HCI_INIT(hci_le_clear_resolv_list_sync),
4425 	/* HCI_OP_LE_SET_RPA_TIMEOUT */
4426 	HCI_INIT(hci_le_set_rpa_timeout_sync),
4427 	/* HCI_OP_LE_READ_MAX_DATA_LEN */
4428 	HCI_INIT(hci_le_read_max_data_len_sync),
4429 	/* HCI_OP_LE_READ_DEF_DATA_LEN */
4430 	HCI_INIT(hci_le_read_def_data_len_sync),
4431 	/* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */
4432 	HCI_INIT(hci_le_read_num_support_adv_sets_sync),
4433 	/* HCI_OP_WRITE_LE_HOST_SUPPORTED */
4434 	HCI_INIT(hci_set_le_support_sync),
4435 	/* HCI_OP_LE_SET_HOST_FEATURE */
4436 	HCI_INIT(hci_le_set_host_feature_sync),
4437 	{}
4438 };
4439 
hci_init3_sync(struct hci_dev * hdev)4440 static int hci_init3_sync(struct hci_dev *hdev)
4441 {
4442 	int err;
4443 
4444 	bt_dev_dbg(hdev, "");
4445 
4446 	err = hci_init_stage_sync(hdev, hci_init3);
4447 	if (err)
4448 		return err;
4449 
4450 	if (lmp_le_capable(hdev))
4451 		return hci_init_stage_sync(hdev, le_init3);
4452 
4453 	return 0;
4454 }
4455 
hci_delete_stored_link_key_sync(struct hci_dev * hdev)4456 static int hci_delete_stored_link_key_sync(struct hci_dev *hdev)
4457 {
4458 	struct hci_cp_delete_stored_link_key cp;
4459 
4460 	/* Some Broadcom based Bluetooth controllers do not support the
4461 	 * Delete Stored Link Key command. They are clearly indicating its
4462 	 * absence in the bit mask of supported commands.
4463 	 *
4464 	 * Check the supported commands and only if the command is marked
4465 	 * as supported send it. If not supported assume that the controller
4466 	 * does not have actual support for stored link keys which makes this
4467 	 * command redundant anyway.
4468 	 *
4469 	 * Some controllers indicate that they support handling deleting
4470 	 * stored link keys, but they don't. The quirk lets a driver
4471 	 * just disable this command.
4472 	 */
4473 	if (!(hdev->commands[6] & 0x80) ||
4474 	    test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks))
4475 		return 0;
4476 
4477 	memset(&cp, 0, sizeof(cp));
4478 	bacpy(&cp.bdaddr, BDADDR_ANY);
4479 	cp.delete_all = 0x01;
4480 
4481 	return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY,
4482 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4483 }
4484 
hci_set_event_mask_page_2_sync(struct hci_dev * hdev)4485 static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev)
4486 {
4487 	u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
4488 	bool changed = false;
4489 
4490 	/* Set event mask page 2 if the HCI command for it is supported */
4491 	if (!(hdev->commands[22] & 0x04))
4492 		return 0;
4493 
4494 	/* If Connectionless Peripheral Broadcast central role is supported
4495 	 * enable all necessary events for it.
4496 	 */
4497 	if (lmp_cpb_central_capable(hdev)) {
4498 		events[1] |= 0x40;	/* Triggered Clock Capture */
4499 		events[1] |= 0x80;	/* Synchronization Train Complete */
4500 		events[2] |= 0x08;	/* Truncated Page Complete */
4501 		events[2] |= 0x20;	/* CPB Channel Map Change */
4502 		changed = true;
4503 	}
4504 
4505 	/* If Connectionless Peripheral Broadcast peripheral role is supported
4506 	 * enable all necessary events for it.
4507 	 */
4508 	if (lmp_cpb_peripheral_capable(hdev)) {
4509 		events[2] |= 0x01;	/* Synchronization Train Received */
4510 		events[2] |= 0x02;	/* CPB Receive */
4511 		events[2] |= 0x04;	/* CPB Timeout */
4512 		events[2] |= 0x10;	/* Peripheral Page Response Timeout */
4513 		changed = true;
4514 	}
4515 
4516 	/* Enable Authenticated Payload Timeout Expired event if supported */
4517 	if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) {
4518 		events[2] |= 0x80;
4519 		changed = true;
4520 	}
4521 
4522 	/* Some Broadcom based controllers indicate support for Set Event
4523 	 * Mask Page 2 command, but then actually do not support it. Since
4524 	 * the default value is all bits set to zero, the command is only
4525 	 * required if the event mask has to be changed. In case no change
4526 	 * to the event mask is needed, skip this command.
4527 	 */
4528 	if (!changed)
4529 		return 0;
4530 
4531 	return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2,
4532 				     sizeof(events), events, HCI_CMD_TIMEOUT);
4533 }
4534 
4535 /* Read local codec list if the HCI command is supported */
hci_read_local_codecs_sync(struct hci_dev * hdev)4536 static int hci_read_local_codecs_sync(struct hci_dev *hdev)
4537 {
4538 	if (hdev->commands[45] & 0x04)
4539 		hci_read_supported_codecs_v2(hdev);
4540 	else if (hdev->commands[29] & 0x20)
4541 		hci_read_supported_codecs(hdev);
4542 
4543 	return 0;
4544 }
4545 
4546 /* Read local pairing options if the HCI command is supported */
hci_read_local_pairing_opts_sync(struct hci_dev * hdev)4547 static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev)
4548 {
4549 	if (!(hdev->commands[41] & 0x08))
4550 		return 0;
4551 
4552 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS,
4553 				     0, NULL, HCI_CMD_TIMEOUT);
4554 }
4555 
4556 /* Get MWS transport configuration if the HCI command is supported */
hci_get_mws_transport_config_sync(struct hci_dev * hdev)4557 static int hci_get_mws_transport_config_sync(struct hci_dev *hdev)
4558 {
4559 	if (!mws_transport_config_capable(hdev))
4560 		return 0;
4561 
4562 	return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG,
4563 				     0, NULL, HCI_CMD_TIMEOUT);
4564 }
4565 
4566 /* Check for Synchronization Train support */
hci_read_sync_train_params_sync(struct hci_dev * hdev)4567 static int hci_read_sync_train_params_sync(struct hci_dev *hdev)
4568 {
4569 	if (!lmp_sync_train_capable(hdev))
4570 		return 0;
4571 
4572 	return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS,
4573 				     0, NULL, HCI_CMD_TIMEOUT);
4574 }
4575 
4576 /* Enable Secure Connections if supported and configured */
hci_write_sc_support_1_sync(struct hci_dev * hdev)4577 static int hci_write_sc_support_1_sync(struct hci_dev *hdev)
4578 {
4579 	u8 support = 0x01;
4580 
4581 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) ||
4582 	    !bredr_sc_enabled(hdev))
4583 		return 0;
4584 
4585 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT,
4586 				     sizeof(support), &support,
4587 				     HCI_CMD_TIMEOUT);
4588 }
4589 
4590 /* Set erroneous data reporting if supported to the wideband speech
4591  * setting value
4592  */
hci_set_err_data_report_sync(struct hci_dev * hdev)4593 static int hci_set_err_data_report_sync(struct hci_dev *hdev)
4594 {
4595 	struct hci_cp_write_def_err_data_reporting cp;
4596 	bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED);
4597 
4598 	if (!(hdev->commands[18] & 0x08) ||
4599 	    !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) ||
4600 	    test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks))
4601 		return 0;
4602 
4603 	if (enabled == hdev->err_data_reporting)
4604 		return 0;
4605 
4606 	memset(&cp, 0, sizeof(cp));
4607 	cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED :
4608 				ERR_DATA_REPORTING_DISABLED;
4609 
4610 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
4611 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4612 }
4613 
4614 static const struct hci_init_stage hci_init4[] = {
4615 	 /* HCI_OP_DELETE_STORED_LINK_KEY */
4616 	HCI_INIT(hci_delete_stored_link_key_sync),
4617 	/* HCI_OP_SET_EVENT_MASK_PAGE_2 */
4618 	HCI_INIT(hci_set_event_mask_page_2_sync),
4619 	/* HCI_OP_READ_LOCAL_CODECS */
4620 	HCI_INIT(hci_read_local_codecs_sync),
4621 	 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */
4622 	HCI_INIT(hci_read_local_pairing_opts_sync),
4623 	 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */
4624 	HCI_INIT(hci_get_mws_transport_config_sync),
4625 	 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */
4626 	HCI_INIT(hci_read_sync_train_params_sync),
4627 	/* HCI_OP_WRITE_SC_SUPPORT */
4628 	HCI_INIT(hci_write_sc_support_1_sync),
4629 	/* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */
4630 	HCI_INIT(hci_set_err_data_report_sync),
4631 	{}
4632 };
4633 
4634 /* Set Suggested Default Data Length to maximum if supported */
hci_le_set_write_def_data_len_sync(struct hci_dev * hdev)4635 static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev)
4636 {
4637 	struct hci_cp_le_write_def_data_len cp;
4638 
4639 	if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT))
4640 		return 0;
4641 
4642 	memset(&cp, 0, sizeof(cp));
4643 	cp.tx_len = cpu_to_le16(hdev->le_max_tx_len);
4644 	cp.tx_time = cpu_to_le16(hdev->le_max_tx_time);
4645 
4646 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN,
4647 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4648 }
4649 
4650 /* Set Default PHY parameters if command is supported, enables all supported
4651  * PHYs according to the LE Features bits.
4652  */
hci_le_set_default_phy_sync(struct hci_dev * hdev)4653 static int hci_le_set_default_phy_sync(struct hci_dev *hdev)
4654 {
4655 	struct hci_cp_le_set_default_phy cp;
4656 
4657 	if (!(hdev->commands[35] & 0x20)) {
4658 		/* If the command is not supported it means only 1M PHY is
4659 		 * supported.
4660 		 */
4661 		hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
4662 		hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
4663 		return 0;
4664 	}
4665 
4666 	memset(&cp, 0, sizeof(cp));
4667 	cp.all_phys = 0x00;
4668 	cp.tx_phys = HCI_LE_SET_PHY_1M;
4669 	cp.rx_phys = HCI_LE_SET_PHY_1M;
4670 
4671 	/* Enables 2M PHY if supported */
4672 	if (le_2m_capable(hdev)) {
4673 		cp.tx_phys |= HCI_LE_SET_PHY_2M;
4674 		cp.rx_phys |= HCI_LE_SET_PHY_2M;
4675 	}
4676 
4677 	/* Enables Coded PHY if supported */
4678 	if (le_coded_capable(hdev)) {
4679 		cp.tx_phys |= HCI_LE_SET_PHY_CODED;
4680 		cp.rx_phys |= HCI_LE_SET_PHY_CODED;
4681 	}
4682 
4683 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
4684 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
4685 }
4686 
4687 static const struct hci_init_stage le_init4[] = {
4688 	/* HCI_OP_LE_WRITE_DEF_DATA_LEN */
4689 	HCI_INIT(hci_le_set_write_def_data_len_sync),
4690 	/* HCI_OP_LE_SET_DEFAULT_PHY */
4691 	HCI_INIT(hci_le_set_default_phy_sync),
4692 	{}
4693 };
4694 
hci_init4_sync(struct hci_dev * hdev)4695 static int hci_init4_sync(struct hci_dev *hdev)
4696 {
4697 	int err;
4698 
4699 	bt_dev_dbg(hdev, "");
4700 
4701 	err = hci_init_stage_sync(hdev, hci_init4);
4702 	if (err)
4703 		return err;
4704 
4705 	if (lmp_le_capable(hdev))
4706 		return hci_init_stage_sync(hdev, le_init4);
4707 
4708 	return 0;
4709 }
4710 
hci_init_sync(struct hci_dev * hdev)4711 static int hci_init_sync(struct hci_dev *hdev)
4712 {
4713 	int err;
4714 
4715 	err = hci_init1_sync(hdev);
4716 	if (err < 0)
4717 		return err;
4718 
4719 	if (hci_dev_test_flag(hdev, HCI_SETUP))
4720 		hci_debugfs_create_basic(hdev);
4721 
4722 	err = hci_init2_sync(hdev);
4723 	if (err < 0)
4724 		return err;
4725 
4726 	err = hci_init3_sync(hdev);
4727 	if (err < 0)
4728 		return err;
4729 
4730 	err = hci_init4_sync(hdev);
4731 	if (err < 0)
4732 		return err;
4733 
4734 	/* This function is only called when the controller is actually in
4735 	 * configured state. When the controller is marked as unconfigured,
4736 	 * this initialization procedure is not run.
4737 	 *
4738 	 * It means that it is possible that a controller runs through its
4739 	 * setup phase and then discovers missing settings. If that is the
4740 	 * case, then this function will not be called. It then will only
4741 	 * be called during the config phase.
4742 	 *
4743 	 * So only when in setup phase or config phase, create the debugfs
4744 	 * entries and register the SMP channels.
4745 	 */
4746 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4747 	    !hci_dev_test_flag(hdev, HCI_CONFIG))
4748 		return 0;
4749 
4750 	if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED))
4751 		return 0;
4752 
4753 	hci_debugfs_create_common(hdev);
4754 
4755 	if (lmp_bredr_capable(hdev))
4756 		hci_debugfs_create_bredr(hdev);
4757 
4758 	if (lmp_le_capable(hdev))
4759 		hci_debugfs_create_le(hdev);
4760 
4761 	return 0;
4762 }
4763 
4764 #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc }
4765 
4766 static const struct {
4767 	unsigned long quirk;
4768 	const char *desc;
4769 } hci_broken_table[] = {
4770 	HCI_QUIRK_BROKEN(LOCAL_COMMANDS,
4771 			 "HCI Read Local Supported Commands not supported"),
4772 	HCI_QUIRK_BROKEN(STORED_LINK_KEY,
4773 			 "HCI Delete Stored Link Key command is advertised, "
4774 			 "but not supported."),
4775 	HCI_QUIRK_BROKEN(ERR_DATA_REPORTING,
4776 			 "HCI Read Default Erroneous Data Reporting command is "
4777 			 "advertised, but not supported."),
4778 	HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER,
4779 			 "HCI Read Transmit Power Level command is advertised, "
4780 			 "but not supported."),
4781 	HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL,
4782 			 "HCI Set Event Filter command not supported."),
4783 	HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN,
4784 			 "HCI Enhanced Setup Synchronous Connection command is "
4785 			 "advertised, but not supported."),
4786 	HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT,
4787 			 "HCI LE Set Random Private Address Timeout command is "
4788 			 "advertised, but not supported."),
4789 	HCI_QUIRK_BROKEN(LE_CODED,
4790 			 "HCI LE Coded PHY feature bit is set, "
4791 			 "but its usage is not supported.")
4792 };
4793 
4794 /* This function handles hdev setup stage:
4795  *
4796  * Calls hdev->setup
4797  * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set.
4798  */
hci_dev_setup_sync(struct hci_dev * hdev)4799 static int hci_dev_setup_sync(struct hci_dev *hdev)
4800 {
4801 	int ret = 0;
4802 	bool invalid_bdaddr;
4803 	size_t i;
4804 
4805 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4806 	    !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks))
4807 		return 0;
4808 
4809 	bt_dev_dbg(hdev, "");
4810 
4811 	hci_sock_dev_event(hdev, HCI_DEV_SETUP);
4812 
4813 	if (hdev->setup)
4814 		ret = hdev->setup(hdev);
4815 
4816 	for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) {
4817 		if (test_bit(hci_broken_table[i].quirk, &hdev->quirks))
4818 			bt_dev_warn(hdev, "%s", hci_broken_table[i].desc);
4819 	}
4820 
4821 	/* The transport driver can set the quirk to mark the
4822 	 * BD_ADDR invalid before creating the HCI device or in
4823 	 * its setup callback.
4824 	 */
4825 	invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
4826 			 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks);
4827 	if (!ret) {
4828 		if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) &&
4829 		    !bacmp(&hdev->public_addr, BDADDR_ANY))
4830 			hci_dev_get_bd_addr_from_property(hdev);
4831 
4832 		if (invalid_bdaddr && bacmp(&hdev->public_addr, BDADDR_ANY) &&
4833 		    hdev->set_bdaddr) {
4834 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4835 			if (!ret)
4836 				invalid_bdaddr = false;
4837 		}
4838 	}
4839 
4840 	/* The transport driver can set these quirks before
4841 	 * creating the HCI device or in its setup callback.
4842 	 *
4843 	 * For the invalid BD_ADDR quirk it is possible that
4844 	 * it becomes a valid address if the bootloader does
4845 	 * provide it (see above).
4846 	 *
4847 	 * In case any of them is set, the controller has to
4848 	 * start up as unconfigured.
4849 	 */
4850 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
4851 	    invalid_bdaddr)
4852 		hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
4853 
4854 	/* For an unconfigured controller it is required to
4855 	 * read at least the version information provided by
4856 	 * the Read Local Version Information command.
4857 	 *
4858 	 * If the set_bdaddr driver callback is provided, then
4859 	 * also the original Bluetooth public device address
4860 	 * will be read using the Read BD Address command.
4861 	 */
4862 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
4863 		return hci_unconf_init_sync(hdev);
4864 
4865 	return ret;
4866 }
4867 
4868 /* This function handles hdev init stage:
4869  *
4870  * Calls hci_dev_setup_sync to perform setup stage
4871  * Calls hci_init_sync to perform HCI command init sequence
4872  */
hci_dev_init_sync(struct hci_dev * hdev)4873 static int hci_dev_init_sync(struct hci_dev *hdev)
4874 {
4875 	int ret;
4876 
4877 	bt_dev_dbg(hdev, "");
4878 
4879 	atomic_set(&hdev->cmd_cnt, 1);
4880 	set_bit(HCI_INIT, &hdev->flags);
4881 
4882 	ret = hci_dev_setup_sync(hdev);
4883 
4884 	if (hci_dev_test_flag(hdev, HCI_CONFIG)) {
4885 		/* If public address change is configured, ensure that
4886 		 * the address gets programmed. If the driver does not
4887 		 * support changing the public address, fail the power
4888 		 * on procedure.
4889 		 */
4890 		if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
4891 		    hdev->set_bdaddr)
4892 			ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
4893 		else
4894 			ret = -EADDRNOTAVAIL;
4895 	}
4896 
4897 	if (!ret) {
4898 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4899 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4900 			ret = hci_init_sync(hdev);
4901 			if (!ret && hdev->post_init)
4902 				ret = hdev->post_init(hdev);
4903 		}
4904 	}
4905 
4906 	/* If the HCI Reset command is clearing all diagnostic settings,
4907 	 * then they need to be reprogrammed after the init procedure
4908 	 * completed.
4909 	 */
4910 	if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
4911 	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4912 	    hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
4913 		ret = hdev->set_diag(hdev, true);
4914 
4915 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
4916 		msft_do_open(hdev);
4917 		aosp_do_open(hdev);
4918 	}
4919 
4920 	clear_bit(HCI_INIT, &hdev->flags);
4921 
4922 	return ret;
4923 }
4924 
hci_dev_open_sync(struct hci_dev * hdev)4925 int hci_dev_open_sync(struct hci_dev *hdev)
4926 {
4927 	int ret;
4928 
4929 	bt_dev_dbg(hdev, "");
4930 
4931 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
4932 		ret = -ENODEV;
4933 		goto done;
4934 	}
4935 
4936 	if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4937 	    !hci_dev_test_flag(hdev, HCI_CONFIG)) {
4938 		/* Check for rfkill but allow the HCI setup stage to
4939 		 * proceed (which in itself doesn't cause any RF activity).
4940 		 */
4941 		if (hci_dev_test_flag(hdev, HCI_RFKILLED)) {
4942 			ret = -ERFKILL;
4943 			goto done;
4944 		}
4945 
4946 		/* Check for valid public address or a configured static
4947 		 * random address, but let the HCI setup proceed to
4948 		 * be able to determine if there is a public address
4949 		 * or not.
4950 		 *
4951 		 * In case of user channel usage, it is not important
4952 		 * if a public address or static random address is
4953 		 * available.
4954 		 */
4955 		if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4956 		    !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
4957 		    !bacmp(&hdev->static_addr, BDADDR_ANY)) {
4958 			ret = -EADDRNOTAVAIL;
4959 			goto done;
4960 		}
4961 	}
4962 
4963 	if (test_bit(HCI_UP, &hdev->flags)) {
4964 		ret = -EALREADY;
4965 		goto done;
4966 	}
4967 
4968 	if (hdev->open(hdev)) {
4969 		ret = -EIO;
4970 		goto done;
4971 	}
4972 
4973 	hci_devcd_reset(hdev);
4974 
4975 	set_bit(HCI_RUNNING, &hdev->flags);
4976 	hci_sock_dev_event(hdev, HCI_DEV_OPEN);
4977 
4978 	ret = hci_dev_init_sync(hdev);
4979 	if (!ret) {
4980 		hci_dev_hold(hdev);
4981 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
4982 		hci_adv_instances_set_rpa_expired(hdev, true);
4983 		set_bit(HCI_UP, &hdev->flags);
4984 		hci_sock_dev_event(hdev, HCI_DEV_UP);
4985 		hci_leds_update_powered(hdev, true);
4986 		if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
4987 		    !hci_dev_test_flag(hdev, HCI_CONFIG) &&
4988 		    !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
4989 		    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4990 		    hci_dev_test_flag(hdev, HCI_MGMT)) {
4991 			ret = hci_powered_update_sync(hdev);
4992 			mgmt_power_on(hdev, ret);
4993 		}
4994 	} else {
4995 		/* Init failed, cleanup */
4996 		flush_work(&hdev->tx_work);
4997 
4998 		/* Since hci_rx_work() is possible to awake new cmd_work
4999 		 * it should be flushed first to avoid unexpected call of
5000 		 * hci_cmd_work()
5001 		 */
5002 		flush_work(&hdev->rx_work);
5003 		flush_work(&hdev->cmd_work);
5004 
5005 		skb_queue_purge(&hdev->cmd_q);
5006 		skb_queue_purge(&hdev->rx_q);
5007 
5008 		if (hdev->flush)
5009 			hdev->flush(hdev);
5010 
5011 		if (hdev->sent_cmd) {
5012 			cancel_delayed_work_sync(&hdev->cmd_timer);
5013 			kfree_skb(hdev->sent_cmd);
5014 			hdev->sent_cmd = NULL;
5015 		}
5016 
5017 		if (hdev->req_skb) {
5018 			kfree_skb(hdev->req_skb);
5019 			hdev->req_skb = NULL;
5020 		}
5021 
5022 		clear_bit(HCI_RUNNING, &hdev->flags);
5023 		hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5024 
5025 		hdev->close(hdev);
5026 		hdev->flags &= BIT(HCI_RAW);
5027 	}
5028 
5029 done:
5030 	return ret;
5031 }
5032 
5033 /* This function requires the caller holds hdev->lock */
hci_pend_le_actions_clear(struct hci_dev * hdev)5034 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
5035 {
5036 	struct hci_conn_params *p;
5037 
5038 	list_for_each_entry(p, &hdev->le_conn_params, list) {
5039 		hci_pend_le_list_del_init(p);
5040 		if (p->conn) {
5041 			hci_conn_drop(p->conn);
5042 			hci_conn_put(p->conn);
5043 			p->conn = NULL;
5044 		}
5045 	}
5046 
5047 	BT_DBG("All LE pending actions cleared");
5048 }
5049 
hci_dev_shutdown(struct hci_dev * hdev)5050 static int hci_dev_shutdown(struct hci_dev *hdev)
5051 {
5052 	int err = 0;
5053 	/* Similar to how we first do setup and then set the exclusive access
5054 	 * bit for userspace, we must first unset userchannel and then clean up.
5055 	 * Otherwise, the kernel can't properly use the hci channel to clean up
5056 	 * the controller (some shutdown routines require sending additional
5057 	 * commands to the controller for example).
5058 	 */
5059 	bool was_userchannel =
5060 		hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL);
5061 
5062 	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
5063 	    test_bit(HCI_UP, &hdev->flags)) {
5064 		/* Execute vendor specific shutdown routine */
5065 		if (hdev->shutdown)
5066 			err = hdev->shutdown(hdev);
5067 	}
5068 
5069 	if (was_userchannel)
5070 		hci_dev_set_flag(hdev, HCI_USER_CHANNEL);
5071 
5072 	return err;
5073 }
5074 
hci_dev_close_sync(struct hci_dev * hdev)5075 int hci_dev_close_sync(struct hci_dev *hdev)
5076 {
5077 	bool auto_off;
5078 	int err = 0;
5079 
5080 	bt_dev_dbg(hdev, "");
5081 
5082 	cancel_delayed_work(&hdev->power_off);
5083 	cancel_delayed_work(&hdev->ncmd_timer);
5084 	cancel_delayed_work(&hdev->le_scan_disable);
5085 
5086 	hci_cmd_sync_cancel_sync(hdev, ENODEV);
5087 
5088 	cancel_interleave_scan(hdev);
5089 
5090 	if (hdev->adv_instance_timeout) {
5091 		cancel_delayed_work_sync(&hdev->adv_instance_expire);
5092 		hdev->adv_instance_timeout = 0;
5093 	}
5094 
5095 	err = hci_dev_shutdown(hdev);
5096 
5097 	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
5098 		cancel_delayed_work_sync(&hdev->cmd_timer);
5099 		return err;
5100 	}
5101 
5102 	hci_leds_update_powered(hdev, false);
5103 
5104 	/* Flush RX and TX works */
5105 	flush_work(&hdev->tx_work);
5106 	flush_work(&hdev->rx_work);
5107 
5108 	if (hdev->discov_timeout > 0) {
5109 		hdev->discov_timeout = 0;
5110 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
5111 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
5112 	}
5113 
5114 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
5115 		cancel_delayed_work(&hdev->service_cache);
5116 
5117 	if (hci_dev_test_flag(hdev, HCI_MGMT)) {
5118 		struct adv_info *adv_instance;
5119 
5120 		cancel_delayed_work_sync(&hdev->rpa_expired);
5121 
5122 		list_for_each_entry(adv_instance, &hdev->adv_instances, list)
5123 			cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
5124 	}
5125 
5126 	/* Avoid potential lockdep warnings from the *_flush() calls by
5127 	 * ensuring the workqueue is empty up front.
5128 	 */
5129 	drain_workqueue(hdev->workqueue);
5130 
5131 	hci_dev_lock(hdev);
5132 
5133 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5134 
5135 	auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF);
5136 
5137 	if (!auto_off && !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
5138 	    hci_dev_test_flag(hdev, HCI_MGMT))
5139 		__mgmt_power_off(hdev);
5140 
5141 	hci_inquiry_cache_flush(hdev);
5142 	hci_pend_le_actions_clear(hdev);
5143 	hci_conn_hash_flush(hdev);
5144 	/* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */
5145 	smp_unregister(hdev);
5146 	hci_dev_unlock(hdev);
5147 
5148 	hci_sock_dev_event(hdev, HCI_DEV_DOWN);
5149 
5150 	if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
5151 		aosp_do_close(hdev);
5152 		msft_do_close(hdev);
5153 	}
5154 
5155 	if (hdev->flush)
5156 		hdev->flush(hdev);
5157 
5158 	/* Reset device */
5159 	skb_queue_purge(&hdev->cmd_q);
5160 	atomic_set(&hdev->cmd_cnt, 1);
5161 	if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) &&
5162 	    !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
5163 		set_bit(HCI_INIT, &hdev->flags);
5164 		hci_reset_sync(hdev);
5165 		clear_bit(HCI_INIT, &hdev->flags);
5166 	}
5167 
5168 	/* flush cmd  work */
5169 	flush_work(&hdev->cmd_work);
5170 
5171 	/* Drop queues */
5172 	skb_queue_purge(&hdev->rx_q);
5173 	skb_queue_purge(&hdev->cmd_q);
5174 	skb_queue_purge(&hdev->raw_q);
5175 
5176 	/* Drop last sent command */
5177 	if (hdev->sent_cmd) {
5178 		cancel_delayed_work_sync(&hdev->cmd_timer);
5179 		kfree_skb(hdev->sent_cmd);
5180 		hdev->sent_cmd = NULL;
5181 	}
5182 
5183 	/* Drop last request */
5184 	if (hdev->req_skb) {
5185 		kfree_skb(hdev->req_skb);
5186 		hdev->req_skb = NULL;
5187 	}
5188 
5189 	clear_bit(HCI_RUNNING, &hdev->flags);
5190 	hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
5191 
5192 	/* After this point our queues are empty and no tasks are scheduled. */
5193 	hdev->close(hdev);
5194 
5195 	/* Clear flags */
5196 	hdev->flags &= BIT(HCI_RAW);
5197 	hci_dev_clear_volatile_flags(hdev);
5198 
5199 	memset(hdev->eir, 0, sizeof(hdev->eir));
5200 	memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
5201 	bacpy(&hdev->random_addr, BDADDR_ANY);
5202 	hci_codec_list_clear(&hdev->local_codecs);
5203 
5204 	hci_dev_put(hdev);
5205 	return err;
5206 }
5207 
5208 /* This function perform power on HCI command sequence as follows:
5209  *
5210  * If controller is already up (HCI_UP) performs hci_powered_update_sync
5211  * sequence otherwise run hci_dev_open_sync which will follow with
5212  * hci_powered_update_sync after the init sequence is completed.
5213  */
hci_power_on_sync(struct hci_dev * hdev)5214 static int hci_power_on_sync(struct hci_dev *hdev)
5215 {
5216 	int err;
5217 
5218 	if (test_bit(HCI_UP, &hdev->flags) &&
5219 	    hci_dev_test_flag(hdev, HCI_MGMT) &&
5220 	    hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
5221 		cancel_delayed_work(&hdev->power_off);
5222 		return hci_powered_update_sync(hdev);
5223 	}
5224 
5225 	err = hci_dev_open_sync(hdev);
5226 	if (err < 0)
5227 		return err;
5228 
5229 	/* During the HCI setup phase, a few error conditions are
5230 	 * ignored and they need to be checked now. If they are still
5231 	 * valid, it is important to return the device back off.
5232 	 */
5233 	if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
5234 	    hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
5235 	    (!bacmp(&hdev->bdaddr, BDADDR_ANY) &&
5236 	     !bacmp(&hdev->static_addr, BDADDR_ANY))) {
5237 		hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
5238 		hci_dev_close_sync(hdev);
5239 	} else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
5240 		queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
5241 				   HCI_AUTO_OFF_TIMEOUT);
5242 	}
5243 
5244 	if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
5245 		/* For unconfigured devices, set the HCI_RAW flag
5246 		 * so that userspace can easily identify them.
5247 		 */
5248 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5249 			set_bit(HCI_RAW, &hdev->flags);
5250 
5251 		/* For fully configured devices, this will send
5252 		 * the Index Added event. For unconfigured devices,
5253 		 * it will send Unconfigued Index Added event.
5254 		 *
5255 		 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
5256 		 * and no event will be send.
5257 		 */
5258 		mgmt_index_added(hdev);
5259 	} else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
5260 		/* When the controller is now configured, then it
5261 		 * is important to clear the HCI_RAW flag.
5262 		 */
5263 		if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
5264 			clear_bit(HCI_RAW, &hdev->flags);
5265 
5266 		/* Powering on the controller with HCI_CONFIG set only
5267 		 * happens with the transition from unconfigured to
5268 		 * configured. This will send the Index Added event.
5269 		 */
5270 		mgmt_index_added(hdev);
5271 	}
5272 
5273 	return 0;
5274 }
5275 
hci_remote_name_cancel_sync(struct hci_dev * hdev,bdaddr_t * addr)5276 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr)
5277 {
5278 	struct hci_cp_remote_name_req_cancel cp;
5279 
5280 	memset(&cp, 0, sizeof(cp));
5281 	bacpy(&cp.bdaddr, addr);
5282 
5283 	return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL,
5284 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5285 }
5286 
hci_stop_discovery_sync(struct hci_dev * hdev)5287 int hci_stop_discovery_sync(struct hci_dev *hdev)
5288 {
5289 	struct discovery_state *d = &hdev->discovery;
5290 	struct inquiry_entry *e;
5291 	int err;
5292 
5293 	bt_dev_dbg(hdev, "state %u", hdev->discovery.state);
5294 
5295 	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
5296 		if (test_bit(HCI_INQUIRY, &hdev->flags)) {
5297 			err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL,
5298 						    0, NULL, HCI_CMD_TIMEOUT);
5299 			if (err)
5300 				return err;
5301 		}
5302 
5303 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
5304 			cancel_delayed_work(&hdev->le_scan_disable);
5305 
5306 			err = hci_scan_disable_sync(hdev);
5307 			if (err)
5308 				return err;
5309 		}
5310 
5311 	} else {
5312 		err = hci_scan_disable_sync(hdev);
5313 		if (err)
5314 			return err;
5315 	}
5316 
5317 	/* Resume advertising if it was paused */
5318 	if (use_ll_privacy(hdev))
5319 		hci_resume_advertising_sync(hdev);
5320 
5321 	/* No further actions needed for LE-only discovery */
5322 	if (d->type == DISCOV_TYPE_LE)
5323 		return 0;
5324 
5325 	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
5326 		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
5327 						     NAME_PENDING);
5328 		if (!e)
5329 			return 0;
5330 
5331 		return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr);
5332 	}
5333 
5334 	return 0;
5335 }
5336 
hci_disconnect_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5337 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn,
5338 			       u8 reason)
5339 {
5340 	struct hci_cp_disconnect cp;
5341 
5342 	if (test_bit(HCI_CONN_BIG_CREATED, &conn->flags)) {
5343 		/* This is a BIS connection, hci_conn_del will
5344 		 * do the necessary cleanup.
5345 		 */
5346 		hci_dev_lock(hdev);
5347 		hci_conn_failed(conn, reason);
5348 		hci_dev_unlock(hdev);
5349 
5350 		return 0;
5351 	}
5352 
5353 	memset(&cp, 0, sizeof(cp));
5354 	cp.handle = cpu_to_le16(conn->handle);
5355 	cp.reason = reason;
5356 
5357 	/* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5358 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5359 	 * used when suspending or powering off, where we don't want to wait
5360 	 * for the peer's response.
5361 	 */
5362 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5363 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT,
5364 						sizeof(cp), &cp,
5365 						HCI_EV_DISCONN_COMPLETE,
5366 						HCI_CMD_TIMEOUT, NULL);
5367 
5368 	return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp,
5369 				     HCI_CMD_TIMEOUT);
5370 }
5371 
hci_le_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5372 static int hci_le_connect_cancel_sync(struct hci_dev *hdev,
5373 				      struct hci_conn *conn, u8 reason)
5374 {
5375 	/* Return reason if scanning since the connection shall probably be
5376 	 * cleanup directly.
5377 	 */
5378 	if (test_bit(HCI_CONN_SCANNING, &conn->flags))
5379 		return reason;
5380 
5381 	if (conn->role == HCI_ROLE_SLAVE ||
5382 	    test_and_set_bit(HCI_CONN_CANCEL, &conn->flags))
5383 		return 0;
5384 
5385 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL,
5386 				     0, NULL, HCI_CMD_TIMEOUT);
5387 }
5388 
hci_connect_cancel_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5389 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn,
5390 				   u8 reason)
5391 {
5392 	if (conn->type == LE_LINK)
5393 		return hci_le_connect_cancel_sync(hdev, conn, reason);
5394 
5395 	if (conn->type == ISO_LINK) {
5396 		/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
5397 		 * page 1857:
5398 		 *
5399 		 * If this command is issued for a CIS on the Central and the
5400 		 * CIS is successfully terminated before being established,
5401 		 * then an HCI_LE_CIS_Established event shall also be sent for
5402 		 * this CIS with the Status Operation Cancelled by Host (0x44).
5403 		 */
5404 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
5405 			return hci_disconnect_sync(hdev, conn, reason);
5406 
5407 		/* CIS with no Create CIS sent have nothing to cancel */
5408 		if (bacmp(&conn->dst, BDADDR_ANY))
5409 			return HCI_ERROR_LOCAL_HOST_TERM;
5410 
5411 		/* There is no way to cancel a BIS without terminating the BIG
5412 		 * which is done later on connection cleanup.
5413 		 */
5414 		return 0;
5415 	}
5416 
5417 	if (hdev->hci_ver < BLUETOOTH_VER_1_2)
5418 		return 0;
5419 
5420 	/* Wait for HCI_EV_CONN_COMPLETE, not HCI_EV_CMD_STATUS, when the
5421 	 * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is
5422 	 * used when suspending or powering off, where we don't want to wait
5423 	 * for the peer's response.
5424 	 */
5425 	if (reason != HCI_ERROR_REMOTE_POWER_OFF)
5426 		return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN_CANCEL,
5427 						6, &conn->dst,
5428 						HCI_EV_CONN_COMPLETE,
5429 						HCI_CMD_TIMEOUT, NULL);
5430 
5431 	return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL,
5432 				     6, &conn->dst, HCI_CMD_TIMEOUT);
5433 }
5434 
hci_reject_sco_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5435 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn,
5436 			       u8 reason)
5437 {
5438 	struct hci_cp_reject_sync_conn_req cp;
5439 
5440 	memset(&cp, 0, sizeof(cp));
5441 	bacpy(&cp.bdaddr, &conn->dst);
5442 	cp.reason = reason;
5443 
5444 	/* SCO rejection has its own limited set of
5445 	 * allowed error values (0x0D-0x0F).
5446 	 */
5447 	if (reason < 0x0d || reason > 0x0f)
5448 		cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
5449 
5450 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ,
5451 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5452 }
5453 
hci_le_reject_cis_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5454 static int hci_le_reject_cis_sync(struct hci_dev *hdev, struct hci_conn *conn,
5455 				  u8 reason)
5456 {
5457 	struct hci_cp_le_reject_cis cp;
5458 
5459 	memset(&cp, 0, sizeof(cp));
5460 	cp.handle = cpu_to_le16(conn->handle);
5461 	cp.reason = reason;
5462 
5463 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REJECT_CIS,
5464 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5465 }
5466 
hci_reject_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5467 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn,
5468 				u8 reason)
5469 {
5470 	struct hci_cp_reject_conn_req cp;
5471 
5472 	if (conn->type == ISO_LINK)
5473 		return hci_le_reject_cis_sync(hdev, conn, reason);
5474 
5475 	if (conn->type == SCO_LINK || conn->type == ESCO_LINK)
5476 		return hci_reject_sco_sync(hdev, conn, reason);
5477 
5478 	memset(&cp, 0, sizeof(cp));
5479 	bacpy(&cp.bdaddr, &conn->dst);
5480 	cp.reason = reason;
5481 
5482 	return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ,
5483 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5484 }
5485 
hci_abort_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 reason)5486 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason)
5487 {
5488 	int err = 0;
5489 	u16 handle = conn->handle;
5490 	bool disconnect = false;
5491 	struct hci_conn *c;
5492 
5493 	switch (conn->state) {
5494 	case BT_CONNECTED:
5495 	case BT_CONFIG:
5496 		err = hci_disconnect_sync(hdev, conn, reason);
5497 		break;
5498 	case BT_CONNECT:
5499 		err = hci_connect_cancel_sync(hdev, conn, reason);
5500 		break;
5501 	case BT_CONNECT2:
5502 		err = hci_reject_conn_sync(hdev, conn, reason);
5503 		break;
5504 	case BT_OPEN:
5505 	case BT_BOUND:
5506 		break;
5507 	default:
5508 		disconnect = true;
5509 		break;
5510 	}
5511 
5512 	hci_dev_lock(hdev);
5513 
5514 	/* Check if the connection has been cleaned up concurrently */
5515 	c = hci_conn_hash_lookup_handle(hdev, handle);
5516 	if (!c || c != conn) {
5517 		err = 0;
5518 		goto unlock;
5519 	}
5520 
5521 	/* Cleanup hci_conn object if it cannot be cancelled as it
5522 	 * likelly means the controller and host stack are out of sync
5523 	 * or in case of LE it was still scanning so it can be cleanup
5524 	 * safely.
5525 	 */
5526 	if (disconnect) {
5527 		conn->state = BT_CLOSED;
5528 		hci_disconn_cfm(conn, reason);
5529 		hci_conn_del(conn);
5530 	} else {
5531 		hci_conn_failed(conn, reason);
5532 	}
5533 
5534 unlock:
5535 	hci_dev_unlock(hdev);
5536 	return err;
5537 }
5538 
hci_disconnect_all_sync(struct hci_dev * hdev,u8 reason)5539 static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason)
5540 {
5541 	struct list_head *head = &hdev->conn_hash.list;
5542 	struct hci_conn *conn;
5543 
5544 	rcu_read_lock();
5545 	while ((conn = list_first_or_null_rcu(head, struct hci_conn, list))) {
5546 		/* Make sure the connection is not freed while unlocking */
5547 		conn = hci_conn_get(conn);
5548 		rcu_read_unlock();
5549 		/* Disregard possible errors since hci_conn_del shall have been
5550 		 * called even in case of errors had occurred since it would
5551 		 * then cause hci_conn_failed to be called which calls
5552 		 * hci_conn_del internally.
5553 		 */
5554 		hci_abort_conn_sync(hdev, conn, reason);
5555 		hci_conn_put(conn);
5556 		rcu_read_lock();
5557 	}
5558 	rcu_read_unlock();
5559 
5560 	return 0;
5561 }
5562 
5563 /* This function perform power off HCI command sequence as follows:
5564  *
5565  * Clear Advertising
5566  * Stop Discovery
5567  * Disconnect all connections
5568  * hci_dev_close_sync
5569  */
hci_power_off_sync(struct hci_dev * hdev)5570 static int hci_power_off_sync(struct hci_dev *hdev)
5571 {
5572 	int err;
5573 
5574 	/* If controller is already down there is nothing to do */
5575 	if (!test_bit(HCI_UP, &hdev->flags))
5576 		return 0;
5577 
5578 	hci_dev_set_flag(hdev, HCI_POWERING_DOWN);
5579 
5580 	if (test_bit(HCI_ISCAN, &hdev->flags) ||
5581 	    test_bit(HCI_PSCAN, &hdev->flags)) {
5582 		err = hci_write_scan_enable_sync(hdev, 0x00);
5583 		if (err)
5584 			goto out;
5585 	}
5586 
5587 	err = hci_clear_adv_sync(hdev, NULL, false);
5588 	if (err)
5589 		goto out;
5590 
5591 	err = hci_stop_discovery_sync(hdev);
5592 	if (err)
5593 		goto out;
5594 
5595 	/* Terminated due to Power Off */
5596 	err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
5597 	if (err)
5598 		goto out;
5599 
5600 	err = hci_dev_close_sync(hdev);
5601 
5602 out:
5603 	hci_dev_clear_flag(hdev, HCI_POWERING_DOWN);
5604 	return err;
5605 }
5606 
hci_set_powered_sync(struct hci_dev * hdev,u8 val)5607 int hci_set_powered_sync(struct hci_dev *hdev, u8 val)
5608 {
5609 	if (val)
5610 		return hci_power_on_sync(hdev);
5611 
5612 	return hci_power_off_sync(hdev);
5613 }
5614 
hci_write_iac_sync(struct hci_dev * hdev)5615 static int hci_write_iac_sync(struct hci_dev *hdev)
5616 {
5617 	struct hci_cp_write_current_iac_lap cp;
5618 
5619 	if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
5620 		return 0;
5621 
5622 	memset(&cp, 0, sizeof(cp));
5623 
5624 	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
5625 		/* Limited discoverable mode */
5626 		cp.num_iac = min_t(u8, hdev->num_iac, 2);
5627 		cp.iac_lap[0] = 0x00;	/* LIAC */
5628 		cp.iac_lap[1] = 0x8b;
5629 		cp.iac_lap[2] = 0x9e;
5630 		cp.iac_lap[3] = 0x33;	/* GIAC */
5631 		cp.iac_lap[4] = 0x8b;
5632 		cp.iac_lap[5] = 0x9e;
5633 	} else {
5634 		/* General discoverable mode */
5635 		cp.num_iac = 1;
5636 		cp.iac_lap[0] = 0x33;	/* GIAC */
5637 		cp.iac_lap[1] = 0x8b;
5638 		cp.iac_lap[2] = 0x9e;
5639 	}
5640 
5641 	return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP,
5642 				     (cp.num_iac * 3) + 1, &cp,
5643 				     HCI_CMD_TIMEOUT);
5644 }
5645 
hci_update_discoverable_sync(struct hci_dev * hdev)5646 int hci_update_discoverable_sync(struct hci_dev *hdev)
5647 {
5648 	int err = 0;
5649 
5650 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
5651 		err = hci_write_iac_sync(hdev);
5652 		if (err)
5653 			return err;
5654 
5655 		err = hci_update_scan_sync(hdev);
5656 		if (err)
5657 			return err;
5658 
5659 		err = hci_update_class_sync(hdev);
5660 		if (err)
5661 			return err;
5662 	}
5663 
5664 	/* Advertising instances don't use the global discoverable setting, so
5665 	 * only update AD if advertising was enabled using Set Advertising.
5666 	 */
5667 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
5668 		err = hci_update_adv_data_sync(hdev, 0x00);
5669 		if (err)
5670 			return err;
5671 
5672 		/* Discoverable mode affects the local advertising
5673 		 * address in limited privacy mode.
5674 		 */
5675 		if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
5676 			if (ext_adv_capable(hdev))
5677 				err = hci_start_ext_adv_sync(hdev, 0x00);
5678 			else
5679 				err = hci_enable_advertising_sync(hdev);
5680 		}
5681 	}
5682 
5683 	return err;
5684 }
5685 
update_discoverable_sync(struct hci_dev * hdev,void * data)5686 static int update_discoverable_sync(struct hci_dev *hdev, void *data)
5687 {
5688 	return hci_update_discoverable_sync(hdev);
5689 }
5690 
hci_update_discoverable(struct hci_dev * hdev)5691 int hci_update_discoverable(struct hci_dev *hdev)
5692 {
5693 	/* Only queue if it would have any effect */
5694 	if (hdev_is_powered(hdev) &&
5695 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5696 	    hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
5697 	    hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
5698 		return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL,
5699 					  NULL);
5700 
5701 	return 0;
5702 }
5703 
hci_update_connectable_sync(struct hci_dev * hdev)5704 int hci_update_connectable_sync(struct hci_dev *hdev)
5705 {
5706 	int err;
5707 
5708 	err = hci_update_scan_sync(hdev);
5709 	if (err)
5710 		return err;
5711 
5712 	/* If BR/EDR is not enabled and we disable advertising as a
5713 	 * by-product of disabling connectable, we need to update the
5714 	 * advertising flags.
5715 	 */
5716 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5717 		err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
5718 
5719 	/* Update the advertising parameters if necessary */
5720 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5721 	    !list_empty(&hdev->adv_instances)) {
5722 		if (ext_adv_capable(hdev))
5723 			err = hci_start_ext_adv_sync(hdev,
5724 						     hdev->cur_adv_instance);
5725 		else
5726 			err = hci_enable_advertising_sync(hdev);
5727 
5728 		if (err)
5729 			return err;
5730 	}
5731 
5732 	return hci_update_passive_scan_sync(hdev);
5733 }
5734 
hci_inquiry_sync(struct hci_dev * hdev,u8 length,u8 num_rsp)5735 int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp)
5736 {
5737 	const u8 giac[3] = { 0x33, 0x8b, 0x9e };
5738 	const u8 liac[3] = { 0x00, 0x8b, 0x9e };
5739 	struct hci_cp_inquiry cp;
5740 
5741 	bt_dev_dbg(hdev, "");
5742 
5743 	if (test_bit(HCI_INQUIRY, &hdev->flags))
5744 		return 0;
5745 
5746 	hci_dev_lock(hdev);
5747 	hci_inquiry_cache_flush(hdev);
5748 	hci_dev_unlock(hdev);
5749 
5750 	memset(&cp, 0, sizeof(cp));
5751 
5752 	if (hdev->discovery.limited)
5753 		memcpy(&cp.lap, liac, sizeof(cp.lap));
5754 	else
5755 		memcpy(&cp.lap, giac, sizeof(cp.lap));
5756 
5757 	cp.length = length;
5758 	cp.num_rsp = num_rsp;
5759 
5760 	return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY,
5761 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
5762 }
5763 
hci_active_scan_sync(struct hci_dev * hdev,uint16_t interval)5764 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)
5765 {
5766 	u8 own_addr_type;
5767 	/* Accept list is not used for discovery */
5768 	u8 filter_policy = 0x00;
5769 	/* Default is to enable duplicates filter */
5770 	u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5771 	int err;
5772 
5773 	bt_dev_dbg(hdev, "");
5774 
5775 	/* If controller is scanning, it means the passive scanning is
5776 	 * running. Thus, we should temporarily stop it in order to set the
5777 	 * discovery scanning parameters.
5778 	 */
5779 	err = hci_scan_disable_sync(hdev);
5780 	if (err) {
5781 		bt_dev_err(hdev, "Unable to disable scanning: %d", err);
5782 		return err;
5783 	}
5784 
5785 	cancel_interleave_scan(hdev);
5786 
5787 	/* Pause address resolution for active scan and stop advertising if
5788 	 * privacy is enabled.
5789 	 */
5790 	err = hci_pause_addr_resolution(hdev);
5791 	if (err)
5792 		goto failed;
5793 
5794 	/* All active scans will be done with either a resolvable private
5795 	 * address (when privacy feature has been enabled) or non-resolvable
5796 	 * private address.
5797 	 */
5798 	err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev),
5799 					     &own_addr_type);
5800 	if (err < 0)
5801 		own_addr_type = ADDR_LE_DEV_PUBLIC;
5802 
5803 	if (hci_is_adv_monitoring(hdev) ||
5804 	    (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
5805 	    hdev->discovery.result_filtering)) {
5806 		/* Duplicate filter should be disabled when some advertisement
5807 		 * monitor is activated, otherwise AdvMon can only receive one
5808 		 * advertisement for one peer(*) during active scanning, and
5809 		 * might report loss to these peers.
5810 		 *
5811 		 * If controller does strict duplicate filtering and the
5812 		 * discovery requires result filtering disables controller based
5813 		 * filtering since that can cause reports that would match the
5814 		 * host filter to not be reported.
5815 		 */
5816 		filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5817 	}
5818 
5819 	err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval,
5820 				  hdev->le_scan_window_discovery,
5821 				  own_addr_type, filter_policy, filter_dup);
5822 	if (!err)
5823 		return err;
5824 
5825 failed:
5826 	/* Resume advertising if it was paused */
5827 	if (use_ll_privacy(hdev))
5828 		hci_resume_advertising_sync(hdev);
5829 
5830 	/* Resume passive scanning */
5831 	hci_update_passive_scan_sync(hdev);
5832 	return err;
5833 }
5834 
hci_start_interleaved_discovery_sync(struct hci_dev * hdev)5835 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev)
5836 {
5837 	int err;
5838 
5839 	bt_dev_dbg(hdev, "");
5840 
5841 	err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2);
5842 	if (err)
5843 		return err;
5844 
5845 	return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5846 }
5847 
hci_start_discovery_sync(struct hci_dev * hdev)5848 int hci_start_discovery_sync(struct hci_dev *hdev)
5849 {
5850 	unsigned long timeout;
5851 	int err;
5852 
5853 	bt_dev_dbg(hdev, "type %u", hdev->discovery.type);
5854 
5855 	switch (hdev->discovery.type) {
5856 	case DISCOV_TYPE_BREDR:
5857 		return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0);
5858 	case DISCOV_TYPE_INTERLEAVED:
5859 		/* When running simultaneous discovery, the LE scanning time
5860 		 * should occupy the whole discovery time sine BR/EDR inquiry
5861 		 * and LE scanning are scheduled by the controller.
5862 		 *
5863 		 * For interleaving discovery in comparison, BR/EDR inquiry
5864 		 * and LE scanning are done sequentially with separate
5865 		 * timeouts.
5866 		 */
5867 		if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
5868 			     &hdev->quirks)) {
5869 			timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5870 			/* During simultaneous discovery, we double LE scan
5871 			 * interval. We must leave some time for the controller
5872 			 * to do BR/EDR inquiry.
5873 			 */
5874 			err = hci_start_interleaved_discovery_sync(hdev);
5875 			break;
5876 		}
5877 
5878 		timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
5879 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5880 		break;
5881 	case DISCOV_TYPE_LE:
5882 		timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
5883 		err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery);
5884 		break;
5885 	default:
5886 		return -EINVAL;
5887 	}
5888 
5889 	if (err)
5890 		return err;
5891 
5892 	bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout));
5893 
5894 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
5895 			   timeout);
5896 	return 0;
5897 }
5898 
hci_suspend_monitor_sync(struct hci_dev * hdev)5899 static void hci_suspend_monitor_sync(struct hci_dev *hdev)
5900 {
5901 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
5902 	case HCI_ADV_MONITOR_EXT_MSFT:
5903 		msft_suspend_sync(hdev);
5904 		break;
5905 	default:
5906 		return;
5907 	}
5908 }
5909 
5910 /* This function disables discovery and mark it as paused */
hci_pause_discovery_sync(struct hci_dev * hdev)5911 static int hci_pause_discovery_sync(struct hci_dev *hdev)
5912 {
5913 	int old_state = hdev->discovery.state;
5914 	int err;
5915 
5916 	/* If discovery already stopped/stopping/paused there nothing to do */
5917 	if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING ||
5918 	    hdev->discovery_paused)
5919 		return 0;
5920 
5921 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5922 	err = hci_stop_discovery_sync(hdev);
5923 	if (err)
5924 		return err;
5925 
5926 	hdev->discovery_paused = true;
5927 	hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5928 
5929 	return 0;
5930 }
5931 
hci_update_event_filter_sync(struct hci_dev * hdev)5932 static int hci_update_event_filter_sync(struct hci_dev *hdev)
5933 {
5934 	struct bdaddr_list_with_flags *b;
5935 	u8 scan = SCAN_DISABLED;
5936 	bool scanning = test_bit(HCI_PSCAN, &hdev->flags);
5937 	int err;
5938 
5939 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
5940 		return 0;
5941 
5942 	/* Some fake CSR controllers lock up after setting this type of
5943 	 * filter, so avoid sending the request altogether.
5944 	 */
5945 	if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks))
5946 		return 0;
5947 
5948 	/* Always clear event filter when starting */
5949 	hci_clear_event_filter_sync(hdev);
5950 
5951 	list_for_each_entry(b, &hdev->accept_list, list) {
5952 		if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP))
5953 			continue;
5954 
5955 		bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
5956 
5957 		err =  hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP,
5958 						 HCI_CONN_SETUP_ALLOW_BDADDR,
5959 						 &b->bdaddr,
5960 						 HCI_CONN_SETUP_AUTO_ON);
5961 		if (err)
5962 			bt_dev_dbg(hdev, "Failed to set event filter for %pMR",
5963 				   &b->bdaddr);
5964 		else
5965 			scan = SCAN_PAGE;
5966 	}
5967 
5968 	if (scan && !scanning)
5969 		hci_write_scan_enable_sync(hdev, scan);
5970 	else if (!scan && scanning)
5971 		hci_write_scan_enable_sync(hdev, scan);
5972 
5973 	return 0;
5974 }
5975 
5976 /* This function disables scan (BR and LE) and mark it as paused */
hci_pause_scan_sync(struct hci_dev * hdev)5977 static int hci_pause_scan_sync(struct hci_dev *hdev)
5978 {
5979 	if (hdev->scanning_paused)
5980 		return 0;
5981 
5982 	/* Disable page scan if enabled */
5983 	if (test_bit(HCI_PSCAN, &hdev->flags))
5984 		hci_write_scan_enable_sync(hdev, SCAN_DISABLED);
5985 
5986 	hci_scan_disable_sync(hdev);
5987 
5988 	hdev->scanning_paused = true;
5989 
5990 	return 0;
5991 }
5992 
5993 /* This function performs the HCI suspend procedures in the follow order:
5994  *
5995  * Pause discovery (active scanning/inquiry)
5996  * Pause Directed Advertising/Advertising
5997  * Pause Scanning (passive scanning in case discovery was not active)
5998  * Disconnect all connections
5999  * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup
6000  * otherwise:
6001  * Update event mask (only set events that are allowed to wake up the host)
6002  * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP)
6003  * Update passive scanning (lower duty cycle)
6004  * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE
6005  */
hci_suspend_sync(struct hci_dev * hdev)6006 int hci_suspend_sync(struct hci_dev *hdev)
6007 {
6008 	int err;
6009 
6010 	/* If marked as suspended there nothing to do */
6011 	if (hdev->suspended)
6012 		return 0;
6013 
6014 	/* Mark device as suspended */
6015 	hdev->suspended = true;
6016 
6017 	/* Pause discovery if not already stopped */
6018 	hci_pause_discovery_sync(hdev);
6019 
6020 	/* Pause other advertisements */
6021 	hci_pause_advertising_sync(hdev);
6022 
6023 	/* Suspend monitor filters */
6024 	hci_suspend_monitor_sync(hdev);
6025 
6026 	/* Prevent disconnects from causing scanning to be re-enabled */
6027 	hci_pause_scan_sync(hdev);
6028 
6029 	if (hci_conn_count(hdev)) {
6030 		/* Soft disconnect everything (power off) */
6031 		err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);
6032 		if (err) {
6033 			/* Set state to BT_RUNNING so resume doesn't notify */
6034 			hdev->suspend_state = BT_RUNNING;
6035 			hci_resume_sync(hdev);
6036 			return err;
6037 		}
6038 
6039 		/* Update event mask so only the allowed event can wakeup the
6040 		 * host.
6041 		 */
6042 		hci_set_event_mask_sync(hdev);
6043 	}
6044 
6045 	/* Only configure accept list if disconnect succeeded and wake
6046 	 * isn't being prevented.
6047 	 */
6048 	if (!hdev->wakeup || !hdev->wakeup(hdev)) {
6049 		hdev->suspend_state = BT_SUSPEND_DISCONNECT;
6050 		return 0;
6051 	}
6052 
6053 	/* Unpause to take care of updating scanning params */
6054 	hdev->scanning_paused = false;
6055 
6056 	/* Enable event filter for paired devices */
6057 	hci_update_event_filter_sync(hdev);
6058 
6059 	/* Update LE passive scan if enabled */
6060 	hci_update_passive_scan_sync(hdev);
6061 
6062 	/* Pause scan changes again. */
6063 	hdev->scanning_paused = true;
6064 
6065 	hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE;
6066 
6067 	return 0;
6068 }
6069 
6070 /* This function resumes discovery */
hci_resume_discovery_sync(struct hci_dev * hdev)6071 static int hci_resume_discovery_sync(struct hci_dev *hdev)
6072 {
6073 	int err;
6074 
6075 	/* If discovery not paused there nothing to do */
6076 	if (!hdev->discovery_paused)
6077 		return 0;
6078 
6079 	hdev->discovery_paused = false;
6080 
6081 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
6082 
6083 	err = hci_start_discovery_sync(hdev);
6084 
6085 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED :
6086 				DISCOVERY_FINDING);
6087 
6088 	return err;
6089 }
6090 
hci_resume_monitor_sync(struct hci_dev * hdev)6091 static void hci_resume_monitor_sync(struct hci_dev *hdev)
6092 {
6093 	switch (hci_get_adv_monitor_offload_ext(hdev)) {
6094 	case HCI_ADV_MONITOR_EXT_MSFT:
6095 		msft_resume_sync(hdev);
6096 		break;
6097 	default:
6098 		return;
6099 	}
6100 }
6101 
6102 /* This function resume scan and reset paused flag */
hci_resume_scan_sync(struct hci_dev * hdev)6103 static int hci_resume_scan_sync(struct hci_dev *hdev)
6104 {
6105 	if (!hdev->scanning_paused)
6106 		return 0;
6107 
6108 	hdev->scanning_paused = false;
6109 
6110 	hci_update_scan_sync(hdev);
6111 
6112 	/* Reset passive scanning to normal */
6113 	hci_update_passive_scan_sync(hdev);
6114 
6115 	return 0;
6116 }
6117 
6118 /* This function performs the HCI suspend procedures in the follow order:
6119  *
6120  * Restore event mask
6121  * Clear event filter
6122  * Update passive scanning (normal duty cycle)
6123  * Resume Directed Advertising/Advertising
6124  * Resume discovery (active scanning/inquiry)
6125  */
hci_resume_sync(struct hci_dev * hdev)6126 int hci_resume_sync(struct hci_dev *hdev)
6127 {
6128 	/* If not marked as suspended there nothing to do */
6129 	if (!hdev->suspended)
6130 		return 0;
6131 
6132 	hdev->suspended = false;
6133 
6134 	/* Restore event mask */
6135 	hci_set_event_mask_sync(hdev);
6136 
6137 	/* Clear any event filters and restore scan state */
6138 	hci_clear_event_filter_sync(hdev);
6139 
6140 	/* Resume scanning */
6141 	hci_resume_scan_sync(hdev);
6142 
6143 	/* Resume monitor filters */
6144 	hci_resume_monitor_sync(hdev);
6145 
6146 	/* Resume other advertisements */
6147 	hci_resume_advertising_sync(hdev);
6148 
6149 	/* Resume discovery */
6150 	hci_resume_discovery_sync(hdev);
6151 
6152 	return 0;
6153 }
6154 
conn_use_rpa(struct hci_conn * conn)6155 static bool conn_use_rpa(struct hci_conn *conn)
6156 {
6157 	struct hci_dev *hdev = conn->hdev;
6158 
6159 	return hci_dev_test_flag(hdev, HCI_PRIVACY);
6160 }
6161 
hci_le_ext_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6162 static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev,
6163 						struct hci_conn *conn)
6164 {
6165 	struct hci_cp_le_set_ext_adv_params cp;
6166 	int err;
6167 	bdaddr_t random_addr;
6168 	u8 own_addr_type;
6169 
6170 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6171 					     &own_addr_type);
6172 	if (err)
6173 		return err;
6174 
6175 	/* Set require_privacy to false so that the remote device has a
6176 	 * chance of identifying us.
6177 	 */
6178 	err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL,
6179 				     &own_addr_type, &random_addr);
6180 	if (err)
6181 		return err;
6182 
6183 	memset(&cp, 0, sizeof(cp));
6184 
6185 	cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND);
6186 	cp.channel_map = hdev->le_adv_channel_map;
6187 	cp.tx_power = HCI_TX_POWER_INVALID;
6188 	cp.primary_phy = HCI_ADV_PHY_1M;
6189 	cp.secondary_phy = HCI_ADV_PHY_1M;
6190 	cp.handle = 0x00; /* Use instance 0 for directed adv */
6191 	cp.own_addr_type = own_addr_type;
6192 	cp.peer_addr_type = conn->dst_type;
6193 	bacpy(&cp.peer_addr, &conn->dst);
6194 
6195 	/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
6196 	 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND
6197 	 * does not supports advertising data when the advertising set already
6198 	 * contains some, the controller shall return erroc code 'Invalid
6199 	 * HCI Command Parameters(0x12).
6200 	 * So it is required to remove adv set for handle 0x00. since we use
6201 	 * instance 0 for directed adv.
6202 	 */
6203 	err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL);
6204 	if (err)
6205 		return err;
6206 
6207 	err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS,
6208 				    sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6209 	if (err)
6210 		return err;
6211 
6212 	/* Check if random address need to be updated */
6213 	if (own_addr_type == ADDR_LE_DEV_RANDOM &&
6214 	    bacmp(&random_addr, BDADDR_ANY) &&
6215 	    bacmp(&random_addr, &hdev->random_addr)) {
6216 		err = hci_set_adv_set_random_addr_sync(hdev, 0x00,
6217 						       &random_addr);
6218 		if (err)
6219 			return err;
6220 	}
6221 
6222 	return hci_enable_ext_advertising_sync(hdev, 0x00);
6223 }
6224 
hci_le_directed_advertising_sync(struct hci_dev * hdev,struct hci_conn * conn)6225 static int hci_le_directed_advertising_sync(struct hci_dev *hdev,
6226 					    struct hci_conn *conn)
6227 {
6228 	struct hci_cp_le_set_adv_param cp;
6229 	u8 status;
6230 	u8 own_addr_type;
6231 	u8 enable;
6232 
6233 	if (ext_adv_capable(hdev))
6234 		return hci_le_ext_directed_advertising_sync(hdev, conn);
6235 
6236 	/* Clear the HCI_LE_ADV bit temporarily so that the
6237 	 * hci_update_random_address knows that it's safe to go ahead
6238 	 * and write a new random address. The flag will be set back on
6239 	 * as soon as the SET_ADV_ENABLE HCI command completes.
6240 	 */
6241 	hci_dev_clear_flag(hdev, HCI_LE_ADV);
6242 
6243 	/* Set require_privacy to false so that the remote device has a
6244 	 * chance of identifying us.
6245 	 */
6246 	status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6247 						&own_addr_type);
6248 	if (status)
6249 		return status;
6250 
6251 	memset(&cp, 0, sizeof(cp));
6252 
6253 	/* Some controllers might reject command if intervals are not
6254 	 * within range for undirected advertising.
6255 	 * BCM20702A0 is known to be affected by this.
6256 	 */
6257 	cp.min_interval = cpu_to_le16(0x0020);
6258 	cp.max_interval = cpu_to_le16(0x0020);
6259 
6260 	cp.type = LE_ADV_DIRECT_IND;
6261 	cp.own_address_type = own_addr_type;
6262 	cp.direct_addr_type = conn->dst_type;
6263 	bacpy(&cp.direct_addr, &conn->dst);
6264 	cp.channel_map = hdev->le_adv_channel_map;
6265 
6266 	status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM,
6267 				       sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6268 	if (status)
6269 		return status;
6270 
6271 	enable = 0x01;
6272 
6273 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,
6274 				     sizeof(enable), &enable, HCI_CMD_TIMEOUT);
6275 }
6276 
set_ext_conn_params(struct hci_conn * conn,struct hci_cp_le_ext_conn_param * p)6277 static void set_ext_conn_params(struct hci_conn *conn,
6278 				struct hci_cp_le_ext_conn_param *p)
6279 {
6280 	struct hci_dev *hdev = conn->hdev;
6281 
6282 	memset(p, 0, sizeof(*p));
6283 
6284 	p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6285 	p->scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6286 	p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6287 	p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6288 	p->conn_latency = cpu_to_le16(conn->le_conn_latency);
6289 	p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6290 	p->min_ce_len = cpu_to_le16(0x0000);
6291 	p->max_ce_len = cpu_to_le16(0x0000);
6292 }
6293 
hci_le_ext_create_conn_sync(struct hci_dev * hdev,struct hci_conn * conn,u8 own_addr_type)6294 static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,
6295 				       struct hci_conn *conn, u8 own_addr_type)
6296 {
6297 	struct hci_cp_le_ext_create_conn *cp;
6298 	struct hci_cp_le_ext_conn_param *p;
6299 	u8 data[sizeof(*cp) + sizeof(*p) * 3];
6300 	u32 plen;
6301 
6302 	cp = (void *)data;
6303 	p = (void *)cp->data;
6304 
6305 	memset(cp, 0, sizeof(*cp));
6306 
6307 	bacpy(&cp->peer_addr, &conn->dst);
6308 	cp->peer_addr_type = conn->dst_type;
6309 	cp->own_addr_type = own_addr_type;
6310 
6311 	plen = sizeof(*cp);
6312 
6313 	if (scan_1m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_1M ||
6314 			      conn->le_adv_sec_phy == HCI_ADV_PHY_1M)) {
6315 		cp->phys |= LE_SCAN_PHY_1M;
6316 		set_ext_conn_params(conn, p);
6317 
6318 		p++;
6319 		plen += sizeof(*p);
6320 	}
6321 
6322 	if (scan_2m(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_2M ||
6323 			      conn->le_adv_sec_phy == HCI_ADV_PHY_2M)) {
6324 		cp->phys |= LE_SCAN_PHY_2M;
6325 		set_ext_conn_params(conn, p);
6326 
6327 		p++;
6328 		plen += sizeof(*p);
6329 	}
6330 
6331 	if (scan_coded(hdev) && (conn->le_adv_phy == HCI_ADV_PHY_CODED ||
6332 				 conn->le_adv_sec_phy == HCI_ADV_PHY_CODED)) {
6333 		cp->phys |= LE_SCAN_PHY_CODED;
6334 		set_ext_conn_params(conn, p);
6335 
6336 		plen += sizeof(*p);
6337 	}
6338 
6339 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN,
6340 					plen, data,
6341 					HCI_EV_LE_ENHANCED_CONN_COMPLETE,
6342 					conn->conn_timeout, NULL);
6343 }
6344 
hci_le_create_conn_sync(struct hci_dev * hdev,void * data)6345 static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)
6346 {
6347 	struct hci_cp_le_create_conn cp;
6348 	struct hci_conn_params *params;
6349 	u8 own_addr_type;
6350 	int err;
6351 	struct hci_conn *conn = data;
6352 
6353 	if (!hci_conn_valid(hdev, conn))
6354 		return -ECANCELED;
6355 
6356 	bt_dev_dbg(hdev, "conn %p", conn);
6357 
6358 	clear_bit(HCI_CONN_SCANNING, &conn->flags);
6359 	conn->state = BT_CONNECT;
6360 
6361 	/* If requested to connect as peripheral use directed advertising */
6362 	if (conn->role == HCI_ROLE_SLAVE) {
6363 		/* If we're active scanning and simultaneous roles is not
6364 		 * enabled simply reject the attempt.
6365 		 */
6366 		if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6367 		    hdev->le_scan_type == LE_SCAN_ACTIVE &&
6368 		    !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) {
6369 			hci_conn_del(conn);
6370 			return -EBUSY;
6371 		}
6372 
6373 		/* Pause advertising while doing directed advertising. */
6374 		hci_pause_advertising_sync(hdev);
6375 
6376 		err = hci_le_directed_advertising_sync(hdev, conn);
6377 		goto done;
6378 	}
6379 
6380 	/* Disable advertising if simultaneous roles is not in use. */
6381 	if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
6382 		hci_pause_advertising_sync(hdev);
6383 
6384 	params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
6385 	if (params) {
6386 		conn->le_conn_min_interval = params->conn_min_interval;
6387 		conn->le_conn_max_interval = params->conn_max_interval;
6388 		conn->le_conn_latency = params->conn_latency;
6389 		conn->le_supv_timeout = params->supervision_timeout;
6390 	} else {
6391 		conn->le_conn_min_interval = hdev->le_conn_min_interval;
6392 		conn->le_conn_max_interval = hdev->le_conn_max_interval;
6393 		conn->le_conn_latency = hdev->le_conn_latency;
6394 		conn->le_supv_timeout = hdev->le_supv_timeout;
6395 	}
6396 
6397 	/* If controller is scanning, we stop it since some controllers are
6398 	 * not able to scan and connect at the same time. Also set the
6399 	 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
6400 	 * handler for scan disabling knows to set the correct discovery
6401 	 * state.
6402 	 */
6403 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
6404 		hci_scan_disable_sync(hdev);
6405 		hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
6406 	}
6407 
6408 	/* Update random address, but set require_privacy to false so
6409 	 * that we never connect with an non-resolvable address.
6410 	 */
6411 	err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn),
6412 					     &own_addr_type);
6413 	if (err)
6414 		goto done;
6415 
6416 	if (use_ext_conn(hdev)) {
6417 		err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type);
6418 		goto done;
6419 	}
6420 
6421 	memset(&cp, 0, sizeof(cp));
6422 
6423 	cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect);
6424 	cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect);
6425 
6426 	bacpy(&cp.peer_addr, &conn->dst);
6427 	cp.peer_addr_type = conn->dst_type;
6428 	cp.own_address_type = own_addr_type;
6429 	cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
6430 	cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
6431 	cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
6432 	cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
6433 	cp.min_ce_len = cpu_to_le16(0x0000);
6434 	cp.max_ce_len = cpu_to_le16(0x0000);
6435 
6436 	/* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261:
6437 	 *
6438 	 * If this event is unmasked and the HCI_LE_Connection_Complete event
6439 	 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is
6440 	 * sent when a new connection has been created.
6441 	 */
6442 	err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN,
6443 				       sizeof(cp), &cp,
6444 				       use_enhanced_conn_complete(hdev) ?
6445 				       HCI_EV_LE_ENHANCED_CONN_COMPLETE :
6446 				       HCI_EV_LE_CONN_COMPLETE,
6447 				       conn->conn_timeout, NULL);
6448 
6449 done:
6450 	if (err == -ETIMEDOUT)
6451 		hci_le_connect_cancel_sync(hdev, conn, 0x00);
6452 
6453 	/* Re-enable advertising after the connection attempt is finished. */
6454 	hci_resume_advertising_sync(hdev);
6455 	return err;
6456 }
6457 
hci_le_create_cis_sync(struct hci_dev * hdev)6458 int hci_le_create_cis_sync(struct hci_dev *hdev)
6459 {
6460 	DEFINE_FLEX(struct hci_cp_le_create_cis, cmd, cis, num_cis, 0x1f);
6461 	size_t aux_num_cis = 0;
6462 	struct hci_conn *conn;
6463 	u8 cig = BT_ISO_QOS_CIG_UNSET;
6464 
6465 	/* The spec allows only one pending LE Create CIS command at a time. If
6466 	 * the command is pending now, don't do anything. We check for pending
6467 	 * connections after each CIS Established event.
6468 	 *
6469 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6470 	 * page 2566:
6471 	 *
6472 	 * If the Host issues this command before all the
6473 	 * HCI_LE_CIS_Established events from the previous use of the
6474 	 * command have been generated, the Controller shall return the
6475 	 * error code Command Disallowed (0x0C).
6476 	 *
6477 	 * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E
6478 	 * page 2567:
6479 	 *
6480 	 * When the Controller receives the HCI_LE_Create_CIS command, the
6481 	 * Controller sends the HCI_Command_Status event to the Host. An
6482 	 * HCI_LE_CIS_Established event will be generated for each CIS when it
6483 	 * is established or if it is disconnected or considered lost before
6484 	 * being established; until all the events are generated, the command
6485 	 * remains pending.
6486 	 */
6487 
6488 	hci_dev_lock(hdev);
6489 
6490 	rcu_read_lock();
6491 
6492 	/* Wait until previous Create CIS has completed */
6493 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6494 		if (test_bit(HCI_CONN_CREATE_CIS, &conn->flags))
6495 			goto done;
6496 	}
6497 
6498 	/* Find CIG with all CIS ready */
6499 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6500 		struct hci_conn *link;
6501 
6502 		if (hci_conn_check_create_cis(conn))
6503 			continue;
6504 
6505 		cig = conn->iso_qos.ucast.cig;
6506 
6507 		list_for_each_entry_rcu(link, &hdev->conn_hash.list, list) {
6508 			if (hci_conn_check_create_cis(link) > 0 &&
6509 			    link->iso_qos.ucast.cig == cig &&
6510 			    link->state != BT_CONNECTED) {
6511 				cig = BT_ISO_QOS_CIG_UNSET;
6512 				break;
6513 			}
6514 		}
6515 
6516 		if (cig != BT_ISO_QOS_CIG_UNSET)
6517 			break;
6518 	}
6519 
6520 	if (cig == BT_ISO_QOS_CIG_UNSET)
6521 		goto done;
6522 
6523 	list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) {
6524 		struct hci_cis *cis = &cmd->cis[aux_num_cis];
6525 
6526 		if (hci_conn_check_create_cis(conn) ||
6527 		    conn->iso_qos.ucast.cig != cig)
6528 			continue;
6529 
6530 		set_bit(HCI_CONN_CREATE_CIS, &conn->flags);
6531 		cis->acl_handle = cpu_to_le16(conn->parent->handle);
6532 		cis->cis_handle = cpu_to_le16(conn->handle);
6533 		aux_num_cis++;
6534 
6535 		if (aux_num_cis >= cmd->num_cis)
6536 			break;
6537 	}
6538 	cmd->num_cis = aux_num_cis;
6539 
6540 done:
6541 	rcu_read_unlock();
6542 
6543 	hci_dev_unlock(hdev);
6544 
6545 	if (!aux_num_cis)
6546 		return 0;
6547 
6548 	/* Wait for HCI_LE_CIS_Established */
6549 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS,
6550 					struct_size(cmd, cis, cmd->num_cis),
6551 					cmd, HCI_EVT_LE_CIS_ESTABLISHED,
6552 					conn->conn_timeout, NULL);
6553 }
6554 
hci_le_remove_cig_sync(struct hci_dev * hdev,u8 handle)6555 int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle)
6556 {
6557 	struct hci_cp_le_remove_cig cp;
6558 
6559 	memset(&cp, 0, sizeof(cp));
6560 	cp.cig_id = handle;
6561 
6562 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp),
6563 				     &cp, HCI_CMD_TIMEOUT);
6564 }
6565 
hci_le_big_terminate_sync(struct hci_dev * hdev,u8 handle)6566 int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle)
6567 {
6568 	struct hci_cp_le_big_term_sync cp;
6569 
6570 	memset(&cp, 0, sizeof(cp));
6571 	cp.handle = handle;
6572 
6573 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC,
6574 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6575 }
6576 
hci_le_pa_terminate_sync(struct hci_dev * hdev,u16 handle)6577 int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)
6578 {
6579 	struct hci_cp_le_pa_term_sync cp;
6580 
6581 	memset(&cp, 0, sizeof(cp));
6582 	cp.handle = cpu_to_le16(handle);
6583 
6584 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,
6585 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6586 }
6587 
hci_get_random_address(struct hci_dev * hdev,bool require_privacy,bool use_rpa,struct adv_info * adv_instance,u8 * own_addr_type,bdaddr_t * rand_addr)6588 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
6589 			   bool use_rpa, struct adv_info *adv_instance,
6590 			   u8 *own_addr_type, bdaddr_t *rand_addr)
6591 {
6592 	int err;
6593 
6594 	bacpy(rand_addr, BDADDR_ANY);
6595 
6596 	/* If privacy is enabled use a resolvable private address. If
6597 	 * current RPA has expired then generate a new one.
6598 	 */
6599 	if (use_rpa) {
6600 		/* If Controller supports LL Privacy use own address type is
6601 		 * 0x03
6602 		 */
6603 		if (use_ll_privacy(hdev))
6604 			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED;
6605 		else
6606 			*own_addr_type = ADDR_LE_DEV_RANDOM;
6607 
6608 		if (adv_instance) {
6609 			if (adv_rpa_valid(adv_instance))
6610 				return 0;
6611 		} else {
6612 			if (rpa_valid(hdev))
6613 				return 0;
6614 		}
6615 
6616 		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
6617 		if (err < 0) {
6618 			bt_dev_err(hdev, "failed to generate new RPA");
6619 			return err;
6620 		}
6621 
6622 		bacpy(rand_addr, &hdev->rpa);
6623 
6624 		return 0;
6625 	}
6626 
6627 	/* In case of required privacy without resolvable private address,
6628 	 * use an non-resolvable private address. This is useful for
6629 	 * non-connectable advertising.
6630 	 */
6631 	if (require_privacy) {
6632 		bdaddr_t nrpa;
6633 
6634 		while (true) {
6635 			/* The non-resolvable private address is generated
6636 			 * from random six bytes with the two most significant
6637 			 * bits cleared.
6638 			 */
6639 			get_random_bytes(&nrpa, 6);
6640 			nrpa.b[5] &= 0x3f;
6641 
6642 			/* The non-resolvable private address shall not be
6643 			 * equal to the public address.
6644 			 */
6645 			if (bacmp(&hdev->bdaddr, &nrpa))
6646 				break;
6647 		}
6648 
6649 		*own_addr_type = ADDR_LE_DEV_RANDOM;
6650 		bacpy(rand_addr, &nrpa);
6651 
6652 		return 0;
6653 	}
6654 
6655 	/* No privacy so use a public address. */
6656 	*own_addr_type = ADDR_LE_DEV_PUBLIC;
6657 
6658 	return 0;
6659 }
6660 
_update_adv_data_sync(struct hci_dev * hdev,void * data)6661 static int _update_adv_data_sync(struct hci_dev *hdev, void *data)
6662 {
6663 	u8 instance = PTR_UINT(data);
6664 
6665 	return hci_update_adv_data_sync(hdev, instance);
6666 }
6667 
hci_update_adv_data(struct hci_dev * hdev,u8 instance)6668 int hci_update_adv_data(struct hci_dev *hdev, u8 instance)
6669 {
6670 	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,
6671 				  UINT_PTR(instance), NULL);
6672 }
6673 
hci_acl_create_conn_sync(struct hci_dev * hdev,void * data)6674 static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data)
6675 {
6676 	struct hci_conn *conn = data;
6677 	struct inquiry_entry *ie;
6678 	struct hci_cp_create_conn cp;
6679 	int err;
6680 
6681 	if (!hci_conn_valid(hdev, conn))
6682 		return -ECANCELED;
6683 
6684 	/* Many controllers disallow HCI Create Connection while it is doing
6685 	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
6686 	 * Connection. This may cause the MGMT discovering state to become false
6687 	 * without user space's request but it is okay since the MGMT Discovery
6688 	 * APIs do not promise that discovery should be done forever. Instead,
6689 	 * the user space monitors the status of MGMT discovering and it may
6690 	 * request for discovery again when this flag becomes false.
6691 	 */
6692 	if (test_bit(HCI_INQUIRY, &hdev->flags)) {
6693 		err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0,
6694 					    NULL, HCI_CMD_TIMEOUT);
6695 		if (err)
6696 			bt_dev_warn(hdev, "Failed to cancel inquiry %d", err);
6697 	}
6698 
6699 	conn->state = BT_CONNECT;
6700 	conn->out = true;
6701 	conn->role = HCI_ROLE_MASTER;
6702 
6703 	conn->attempt++;
6704 
6705 	conn->link_policy = hdev->link_policy;
6706 
6707 	memset(&cp, 0, sizeof(cp));
6708 	bacpy(&cp.bdaddr, &conn->dst);
6709 	cp.pscan_rep_mode = 0x02;
6710 
6711 	ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
6712 	if (ie) {
6713 		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
6714 			cp.pscan_rep_mode = ie->data.pscan_rep_mode;
6715 			cp.pscan_mode     = ie->data.pscan_mode;
6716 			cp.clock_offset   = ie->data.clock_offset |
6717 					    cpu_to_le16(0x8000);
6718 		}
6719 
6720 		memcpy(conn->dev_class, ie->data.dev_class, 3);
6721 	}
6722 
6723 	cp.pkt_type = cpu_to_le16(conn->pkt_type);
6724 	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
6725 		cp.role_switch = 0x01;
6726 	else
6727 		cp.role_switch = 0x00;
6728 
6729 	return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN,
6730 					sizeof(cp), &cp,
6731 					HCI_EV_CONN_COMPLETE,
6732 					conn->conn_timeout, NULL);
6733 }
6734 
hci_connect_acl_sync(struct hci_dev * hdev,struct hci_conn * conn)6735 int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn)
6736 {
6737 	return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn,
6738 				       NULL);
6739 }
6740 
create_le_conn_complete(struct hci_dev * hdev,void * data,int err)6741 static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err)
6742 {
6743 	struct hci_conn *conn = data;
6744 
6745 	bt_dev_dbg(hdev, "err %d", err);
6746 
6747 	if (err == -ECANCELED)
6748 		return;
6749 
6750 	hci_dev_lock(hdev);
6751 
6752 	if (!hci_conn_valid(hdev, conn))
6753 		goto done;
6754 
6755 	if (!err) {
6756 		hci_connect_le_scan_cleanup(conn, 0x00);
6757 		goto done;
6758 	}
6759 
6760 	/* Check if connection is still pending */
6761 	if (conn != hci_lookup_le_connect(hdev))
6762 		goto done;
6763 
6764 	/* Flush to make sure we send create conn cancel command if needed */
6765 	flush_delayed_work(&conn->le_conn_timeout);
6766 	hci_conn_failed(conn, bt_status(err));
6767 
6768 done:
6769 	hci_dev_unlock(hdev);
6770 }
6771 
hci_connect_le_sync(struct hci_dev * hdev,struct hci_conn * conn)6772 int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn)
6773 {
6774 	return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn,
6775 				       create_le_conn_complete);
6776 }
6777 
hci_cancel_connect_sync(struct hci_dev * hdev,struct hci_conn * conn)6778 int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn)
6779 {
6780 	if (conn->state != BT_OPEN)
6781 		return -EINVAL;
6782 
6783 	switch (conn->type) {
6784 	case ACL_LINK:
6785 		return !hci_cmd_sync_dequeue_once(hdev,
6786 						  hci_acl_create_conn_sync,
6787 						  conn, NULL);
6788 	case LE_LINK:
6789 		return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync,
6790 						  conn, create_le_conn_complete);
6791 	}
6792 
6793 	return -ENOENT;
6794 }
6795 
hci_le_conn_update_sync(struct hci_dev * hdev,struct hci_conn * conn,struct hci_conn_params * params)6796 int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn,
6797 			    struct hci_conn_params *params)
6798 {
6799 	struct hci_cp_le_conn_update cp;
6800 
6801 	memset(&cp, 0, sizeof(cp));
6802 	cp.handle		= cpu_to_le16(conn->handle);
6803 	cp.conn_interval_min	= cpu_to_le16(params->conn_min_interval);
6804 	cp.conn_interval_max	= cpu_to_le16(params->conn_max_interval);
6805 	cp.conn_latency		= cpu_to_le16(params->conn_latency);
6806 	cp.supervision_timeout	= cpu_to_le16(params->supervision_timeout);
6807 	cp.min_ce_len		= cpu_to_le16(0x0000);
6808 	cp.max_ce_len		= cpu_to_le16(0x0000);
6809 
6810 	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE,
6811 				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);
6812 }
6813