1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * BlueZ - Bluetooth protocol stack for Linux 4 * 5 * Copyright (C) 2021 Intel Corporation 6 */ 7 8 #include <linux/property.h> 9 10 #include <net/bluetooth/bluetooth.h> 11 #include <net/bluetooth/hci_core.h> 12 #include <net/bluetooth/mgmt.h> 13 14 #include "hci_request.h" 15 #include "hci_codec.h" 16 #include "hci_debugfs.h" 17 #include "smp.h" 18 #include "eir.h" 19 #include "msft.h" 20 #include "aosp.h" 21 #include "leds.h" 22 23 static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, 24 struct sk_buff *skb) 25 { 26 bt_dev_dbg(hdev, "result 0x%2.2x", result); 27 28 if (hdev->req_status != HCI_REQ_PEND) 29 return; 30 31 hdev->req_result = result; 32 hdev->req_status = HCI_REQ_DONE; 33 34 if (skb) { 35 struct sock *sk = hci_skb_sk(skb); 36 37 /* Drop sk reference if set */ 38 if (sk) 39 sock_put(sk); 40 41 hdev->req_skb = skb_get(skb); 42 } 43 44 wake_up_interruptible(&hdev->req_wait_q); 45 } 46 47 static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, 48 u32 plen, const void *param, 49 struct sock *sk) 50 { 51 int len = HCI_COMMAND_HDR_SIZE + plen; 52 struct hci_command_hdr *hdr; 53 struct sk_buff *skb; 54 55 skb = bt_skb_alloc(len, GFP_ATOMIC); 56 if (!skb) 57 return NULL; 58 59 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); 60 hdr->opcode = cpu_to_le16(opcode); 61 hdr->plen = plen; 62 63 if (plen) 64 skb_put_data(skb, param, plen); 65 66 bt_dev_dbg(hdev, "skb len %d", skb->len); 67 68 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; 69 hci_skb_opcode(skb) = opcode; 70 71 /* Grab a reference if command needs to be associated with a sock (e.g. 72 * likely mgmt socket that initiated the command). 73 */ 74 if (sk) { 75 hci_skb_sk(skb) = sk; 76 sock_hold(sk); 77 } 78 79 return skb; 80 } 81 82 static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, 83 const void *param, u8 event, struct sock *sk) 84 { 85 struct hci_dev *hdev = req->hdev; 86 struct sk_buff *skb; 87 88 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 89 90 /* If an error occurred during request building, there is no point in 91 * queueing the HCI command. We can simply return. 92 */ 93 if (req->err) 94 return; 95 96 skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); 97 if (!skb) { 98 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", 99 opcode); 100 req->err = -ENOMEM; 101 return; 102 } 103 104 if (skb_queue_empty(&req->cmd_q)) 105 bt_cb(skb)->hci.req_flags |= HCI_REQ_START; 106 107 hci_skb_event(skb) = event; 108 109 skb_queue_tail(&req->cmd_q, skb); 110 } 111 112 static int hci_cmd_sync_run(struct hci_request *req) 113 { 114 struct hci_dev *hdev = req->hdev; 115 struct sk_buff *skb; 116 unsigned long flags; 117 118 bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); 119 120 /* If an error occurred during request building, remove all HCI 121 * commands queued on the HCI request queue. 122 */ 123 if (req->err) { 124 skb_queue_purge(&req->cmd_q); 125 return req->err; 126 } 127 128 /* Do not allow empty requests */ 129 if (skb_queue_empty(&req->cmd_q)) 130 return -ENODATA; 131 132 skb = skb_peek_tail(&req->cmd_q); 133 bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; 134 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; 135 136 spin_lock_irqsave(&hdev->cmd_q.lock, flags); 137 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); 138 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); 139 140 queue_work(hdev->workqueue, &hdev->cmd_work); 141 142 return 0; 143 } 144 145 /* This function requires the caller holds hdev->req_lock. */ 146 struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, 147 const void *param, u8 event, u32 timeout, 148 struct sock *sk) 149 { 150 struct hci_request req; 151 struct sk_buff *skb; 152 int err = 0; 153 154 bt_dev_dbg(hdev, "Opcode 0x%4x", opcode); 155 156 hci_req_init(&req, hdev); 157 158 hci_cmd_sync_add(&req, opcode, plen, param, event, sk); 159 160 hdev->req_status = HCI_REQ_PEND; 161 162 err = hci_cmd_sync_run(&req); 163 if (err < 0) 164 return ERR_PTR(err); 165 166 err = wait_event_interruptible_timeout(hdev->req_wait_q, 167 hdev->req_status != HCI_REQ_PEND, 168 timeout); 169 170 if (err == -ERESTARTSYS) 171 return ERR_PTR(-EINTR); 172 173 switch (hdev->req_status) { 174 case HCI_REQ_DONE: 175 err = -bt_to_errno(hdev->req_result); 176 break; 177 178 case HCI_REQ_CANCELED: 179 err = -hdev->req_result; 180 break; 181 182 default: 183 err = -ETIMEDOUT; 184 break; 185 } 186 187 hdev->req_status = 0; 188 hdev->req_result = 0; 189 skb = hdev->req_skb; 190 hdev->req_skb = NULL; 191 192 bt_dev_dbg(hdev, "end: err %d", err); 193 194 if (err < 0) { 195 kfree_skb(skb); 196 return ERR_PTR(err); 197 } 198 199 return skb; 200 } 201 EXPORT_SYMBOL(__hci_cmd_sync_sk); 202 203 /* This function requires the caller holds hdev->req_lock. */ 204 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 205 const void *param, u32 timeout) 206 { 207 return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); 208 } 209 EXPORT_SYMBOL(__hci_cmd_sync); 210 211 /* Send HCI command and wait for command complete event */ 212 struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 213 const void *param, u32 timeout) 214 { 215 struct sk_buff *skb; 216 217 if (!test_bit(HCI_UP, &hdev->flags)) 218 return ERR_PTR(-ENETDOWN); 219 220 bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 221 222 hci_req_sync_lock(hdev); 223 skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); 224 hci_req_sync_unlock(hdev); 225 226 return skb; 227 } 228 EXPORT_SYMBOL(hci_cmd_sync); 229 230 /* This function requires the caller holds hdev->req_lock. */ 231 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, 232 const void *param, u8 event, u32 timeout) 233 { 234 return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, 235 NULL); 236 } 237 EXPORT_SYMBOL(__hci_cmd_sync_ev); 238 239 /* This function requires the caller holds hdev->req_lock. */ 240 int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, 241 const void *param, u8 event, u32 timeout, 242 struct sock *sk) 243 { 244 struct sk_buff *skb; 245 u8 status; 246 247 skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); 248 if (IS_ERR(skb)) { 249 bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode, 250 PTR_ERR(skb)); 251 return PTR_ERR(skb); 252 } 253 254 /* If command return a status event skb will be set to NULL as there are 255 * no parameters, in case of failure IS_ERR(skb) would have be set to 256 * the actual error would be found with PTR_ERR(skb). 257 */ 258 if (!skb) 259 return 0; 260 261 status = skb->data[0]; 262 263 kfree_skb(skb); 264 265 return status; 266 } 267 EXPORT_SYMBOL(__hci_cmd_sync_status_sk); 268 269 int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, 270 const void *param, u32 timeout) 271 { 272 return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, 273 NULL); 274 } 275 EXPORT_SYMBOL(__hci_cmd_sync_status); 276 277 static void hci_cmd_sync_work(struct work_struct *work) 278 { 279 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); 280 281 bt_dev_dbg(hdev, ""); 282 283 /* Dequeue all entries and run them */ 284 while (1) { 285 struct hci_cmd_sync_work_entry *entry; 286 287 mutex_lock(&hdev->cmd_sync_work_lock); 288 entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, 289 struct hci_cmd_sync_work_entry, 290 list); 291 if (entry) 292 list_del(&entry->list); 293 mutex_unlock(&hdev->cmd_sync_work_lock); 294 295 if (!entry) 296 break; 297 298 bt_dev_dbg(hdev, "entry %p", entry); 299 300 if (entry->func) { 301 int err; 302 303 hci_req_sync_lock(hdev); 304 err = entry->func(hdev, entry->data); 305 if (entry->destroy) 306 entry->destroy(hdev, entry->data, err); 307 hci_req_sync_unlock(hdev); 308 } 309 310 kfree(entry); 311 } 312 } 313 314 static void hci_cmd_sync_cancel_work(struct work_struct *work) 315 { 316 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); 317 318 cancel_delayed_work_sync(&hdev->cmd_timer); 319 cancel_delayed_work_sync(&hdev->ncmd_timer); 320 atomic_set(&hdev->cmd_cnt, 1); 321 322 wake_up_interruptible(&hdev->req_wait_q); 323 } 324 325 static int hci_scan_disable_sync(struct hci_dev *hdev); 326 static int scan_disable_sync(struct hci_dev *hdev, void *data) 327 { 328 return hci_scan_disable_sync(hdev); 329 } 330 331 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length); 332 static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) 333 { 334 return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); 335 } 336 337 static void le_scan_disable(struct work_struct *work) 338 { 339 struct hci_dev *hdev = container_of(work, struct hci_dev, 340 le_scan_disable.work); 341 int status; 342 343 bt_dev_dbg(hdev, ""); 344 hci_dev_lock(hdev); 345 346 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) 347 goto _return; 348 349 cancel_delayed_work(&hdev->le_scan_restart); 350 351 status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); 352 if (status) { 353 bt_dev_err(hdev, "failed to disable LE scan: %d", status); 354 goto _return; 355 } 356 357 hdev->discovery.scan_start = 0; 358 359 /* If we were running LE only scan, change discovery state. If 360 * we were running both LE and BR/EDR inquiry simultaneously, 361 * and BR/EDR inquiry is already finished, stop discovery, 362 * otherwise BR/EDR inquiry will stop discovery when finished. 363 * If we will resolve remote device name, do not change 364 * discovery state. 365 */ 366 367 if (hdev->discovery.type == DISCOV_TYPE_LE) 368 goto discov_stopped; 369 370 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) 371 goto _return; 372 373 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { 374 if (!test_bit(HCI_INQUIRY, &hdev->flags) && 375 hdev->discovery.state != DISCOVERY_RESOLVING) 376 goto discov_stopped; 377 378 goto _return; 379 } 380 381 status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); 382 if (status) { 383 bt_dev_err(hdev, "inquiry failed: status %d", status); 384 goto discov_stopped; 385 } 386 387 goto _return; 388 389 discov_stopped: 390 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 391 392 _return: 393 hci_dev_unlock(hdev); 394 } 395 396 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, 397 u8 filter_dup); 398 static int hci_le_scan_restart_sync(struct hci_dev *hdev) 399 { 400 /* If controller is not scanning we are done. */ 401 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) 402 return 0; 403 404 if (hdev->scanning_paused) { 405 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 406 return 0; 407 } 408 409 hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); 410 return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, 411 LE_SCAN_FILTER_DUP_ENABLE); 412 } 413 414 static int le_scan_restart_sync(struct hci_dev *hdev, void *data) 415 { 416 return hci_le_scan_restart_sync(hdev); 417 } 418 419 static void le_scan_restart(struct work_struct *work) 420 { 421 struct hci_dev *hdev = container_of(work, struct hci_dev, 422 le_scan_restart.work); 423 unsigned long timeout, duration, scan_start, now; 424 int status; 425 426 bt_dev_dbg(hdev, ""); 427 428 hci_dev_lock(hdev); 429 430 status = hci_cmd_sync_queue(hdev, le_scan_restart_sync, NULL, NULL); 431 if (status) { 432 bt_dev_err(hdev, "failed to restart LE scan: status %d", 433 status); 434 goto unlock; 435 } 436 437 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || 438 !hdev->discovery.scan_start) 439 goto unlock; 440 441 /* When the scan was started, hdev->le_scan_disable has been queued 442 * after duration from scan_start. During scan restart this job 443 * has been canceled, and we need to queue it again after proper 444 * timeout, to make sure that scan does not run indefinitely. 445 */ 446 duration = hdev->discovery.scan_duration; 447 scan_start = hdev->discovery.scan_start; 448 now = jiffies; 449 if (now - scan_start <= duration) { 450 int elapsed; 451 452 if (now >= scan_start) 453 elapsed = now - scan_start; 454 else 455 elapsed = ULONG_MAX - scan_start + now; 456 457 timeout = duration - elapsed; 458 } else { 459 timeout = 0; 460 } 461 462 queue_delayed_work(hdev->req_workqueue, 463 &hdev->le_scan_disable, timeout); 464 465 unlock: 466 hci_dev_unlock(hdev); 467 } 468 469 static int reenable_adv_sync(struct hci_dev *hdev, void *data) 470 { 471 bt_dev_dbg(hdev, ""); 472 473 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && 474 list_empty(&hdev->adv_instances)) 475 return 0; 476 477 if (hdev->cur_adv_instance) { 478 return hci_schedule_adv_instance_sync(hdev, 479 hdev->cur_adv_instance, 480 true); 481 } else { 482 if (ext_adv_capable(hdev)) { 483 hci_start_ext_adv_sync(hdev, 0x00); 484 } else { 485 hci_update_adv_data_sync(hdev, 0x00); 486 hci_update_scan_rsp_data_sync(hdev, 0x00); 487 hci_enable_advertising_sync(hdev); 488 } 489 } 490 491 return 0; 492 } 493 494 static void reenable_adv(struct work_struct *work) 495 { 496 struct hci_dev *hdev = container_of(work, struct hci_dev, 497 reenable_adv_work); 498 int status; 499 500 bt_dev_dbg(hdev, ""); 501 502 hci_dev_lock(hdev); 503 504 status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); 505 if (status) 506 bt_dev_err(hdev, "failed to reenable ADV: %d", status); 507 508 hci_dev_unlock(hdev); 509 } 510 511 static void cancel_adv_timeout(struct hci_dev *hdev) 512 { 513 if (hdev->adv_instance_timeout) { 514 hdev->adv_instance_timeout = 0; 515 cancel_delayed_work(&hdev->adv_instance_expire); 516 } 517 } 518 519 /* For a single instance: 520 * - force == true: The instance will be removed even when its remaining 521 * lifetime is not zero. 522 * - force == false: the instance will be deactivated but kept stored unless 523 * the remaining lifetime is zero. 524 * 525 * For instance == 0x00: 526 * - force == true: All instances will be removed regardless of their timeout 527 * setting. 528 * - force == false: Only instances that have a timeout will be removed. 529 */ 530 int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, 531 u8 instance, bool force) 532 { 533 struct adv_info *adv_instance, *n, *next_instance = NULL; 534 int err; 535 u8 rem_inst; 536 537 /* Cancel any timeout concerning the removed instance(s). */ 538 if (!instance || hdev->cur_adv_instance == instance) 539 cancel_adv_timeout(hdev); 540 541 /* Get the next instance to advertise BEFORE we remove 542 * the current one. This can be the same instance again 543 * if there is only one instance. 544 */ 545 if (instance && hdev->cur_adv_instance == instance) 546 next_instance = hci_get_next_instance(hdev, instance); 547 548 if (instance == 0x00) { 549 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, 550 list) { 551 if (!(force || adv_instance->timeout)) 552 continue; 553 554 rem_inst = adv_instance->instance; 555 err = hci_remove_adv_instance(hdev, rem_inst); 556 if (!err) 557 mgmt_advertising_removed(sk, hdev, rem_inst); 558 } 559 } else { 560 adv_instance = hci_find_adv_instance(hdev, instance); 561 562 if (force || (adv_instance && adv_instance->timeout && 563 !adv_instance->remaining_time)) { 564 /* Don't advertise a removed instance. */ 565 if (next_instance && 566 next_instance->instance == instance) 567 next_instance = NULL; 568 569 err = hci_remove_adv_instance(hdev, instance); 570 if (!err) 571 mgmt_advertising_removed(sk, hdev, instance); 572 } 573 } 574 575 if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) 576 return 0; 577 578 if (next_instance && !ext_adv_capable(hdev)) 579 return hci_schedule_adv_instance_sync(hdev, 580 next_instance->instance, 581 false); 582 583 return 0; 584 } 585 586 static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) 587 { 588 u8 instance = *(u8 *)data; 589 590 kfree(data); 591 592 hci_clear_adv_instance_sync(hdev, NULL, instance, false); 593 594 if (list_empty(&hdev->adv_instances)) 595 return hci_disable_advertising_sync(hdev); 596 597 return 0; 598 } 599 600 static void adv_timeout_expire(struct work_struct *work) 601 { 602 u8 *inst_ptr; 603 struct hci_dev *hdev = container_of(work, struct hci_dev, 604 adv_instance_expire.work); 605 606 bt_dev_dbg(hdev, ""); 607 608 hci_dev_lock(hdev); 609 610 hdev->adv_instance_timeout = 0; 611 612 if (hdev->cur_adv_instance == 0x00) 613 goto unlock; 614 615 inst_ptr = kmalloc(1, GFP_KERNEL); 616 if (!inst_ptr) 617 goto unlock; 618 619 *inst_ptr = hdev->cur_adv_instance; 620 hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); 621 622 unlock: 623 hci_dev_unlock(hdev); 624 } 625 626 void hci_cmd_sync_init(struct hci_dev *hdev) 627 { 628 INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); 629 INIT_LIST_HEAD(&hdev->cmd_sync_work_list); 630 mutex_init(&hdev->cmd_sync_work_lock); 631 632 INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); 633 INIT_WORK(&hdev->reenable_adv_work, reenable_adv); 634 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); 635 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart); 636 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); 637 } 638 639 void hci_cmd_sync_clear(struct hci_dev *hdev) 640 { 641 struct hci_cmd_sync_work_entry *entry, *tmp; 642 643 cancel_work_sync(&hdev->cmd_sync_work); 644 cancel_work_sync(&hdev->reenable_adv_work); 645 646 list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { 647 if (entry->destroy) 648 entry->destroy(hdev, entry->data, -ECANCELED); 649 650 list_del(&entry->list); 651 kfree(entry); 652 } 653 } 654 655 void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err) 656 { 657 bt_dev_dbg(hdev, "err 0x%2.2x", err); 658 659 if (hdev->req_status == HCI_REQ_PEND) { 660 hdev->req_result = err; 661 hdev->req_status = HCI_REQ_CANCELED; 662 663 cancel_delayed_work_sync(&hdev->cmd_timer); 664 cancel_delayed_work_sync(&hdev->ncmd_timer); 665 atomic_set(&hdev->cmd_cnt, 1); 666 667 wake_up_interruptible(&hdev->req_wait_q); 668 } 669 } 670 671 void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) 672 { 673 bt_dev_dbg(hdev, "err 0x%2.2x", err); 674 675 if (hdev->req_status == HCI_REQ_PEND) { 676 hdev->req_result = err; 677 hdev->req_status = HCI_REQ_CANCELED; 678 679 queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); 680 } 681 } 682 EXPORT_SYMBOL(hci_cmd_sync_cancel); 683 684 int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, 685 void *data, hci_cmd_sync_work_destroy_t destroy) 686 { 687 struct hci_cmd_sync_work_entry *entry; 688 689 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) 690 return -ENODEV; 691 692 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 693 if (!entry) 694 return -ENOMEM; 695 696 entry->func = func; 697 entry->data = data; 698 entry->destroy = destroy; 699 700 mutex_lock(&hdev->cmd_sync_work_lock); 701 list_add_tail(&entry->list, &hdev->cmd_sync_work_list); 702 mutex_unlock(&hdev->cmd_sync_work_lock); 703 704 queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); 705 706 return 0; 707 } 708 EXPORT_SYMBOL(hci_cmd_sync_queue); 709 710 int hci_update_eir_sync(struct hci_dev *hdev) 711 { 712 struct hci_cp_write_eir cp; 713 714 bt_dev_dbg(hdev, ""); 715 716 if (!hdev_is_powered(hdev)) 717 return 0; 718 719 if (!lmp_ext_inq_capable(hdev)) 720 return 0; 721 722 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 723 return 0; 724 725 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) 726 return 0; 727 728 memset(&cp, 0, sizeof(cp)); 729 730 eir_create(hdev, cp.data); 731 732 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) 733 return 0; 734 735 memcpy(hdev->eir, cp.data, sizeof(cp.data)); 736 737 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, 738 HCI_CMD_TIMEOUT); 739 } 740 741 static u8 get_service_classes(struct hci_dev *hdev) 742 { 743 struct bt_uuid *uuid; 744 u8 val = 0; 745 746 list_for_each_entry(uuid, &hdev->uuids, list) 747 val |= uuid->svc_hint; 748 749 return val; 750 } 751 752 int hci_update_class_sync(struct hci_dev *hdev) 753 { 754 u8 cod[3]; 755 756 bt_dev_dbg(hdev, ""); 757 758 if (!hdev_is_powered(hdev)) 759 return 0; 760 761 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 762 return 0; 763 764 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) 765 return 0; 766 767 cod[0] = hdev->minor_class; 768 cod[1] = hdev->major_class; 769 cod[2] = get_service_classes(hdev); 770 771 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) 772 cod[1] |= 0x20; 773 774 if (memcmp(cod, hdev->dev_class, 3) == 0) 775 return 0; 776 777 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, 778 sizeof(cod), cod, HCI_CMD_TIMEOUT); 779 } 780 781 static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) 782 { 783 /* If there is no connection we are OK to advertise. */ 784 if (hci_conn_num(hdev, LE_LINK) == 0) 785 return true; 786 787 /* Check le_states if there is any connection in peripheral role. */ 788 if (hdev->conn_hash.le_num_peripheral > 0) { 789 /* Peripheral connection state and non connectable mode 790 * bit 20. 791 */ 792 if (!connectable && !(hdev->le_states[2] & 0x10)) 793 return false; 794 795 /* Peripheral connection state and connectable mode bit 38 796 * and scannable bit 21. 797 */ 798 if (connectable && (!(hdev->le_states[4] & 0x40) || 799 !(hdev->le_states[2] & 0x20))) 800 return false; 801 } 802 803 /* Check le_states if there is any connection in central role. */ 804 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { 805 /* Central connection state and non connectable mode bit 18. */ 806 if (!connectable && !(hdev->le_states[2] & 0x02)) 807 return false; 808 809 /* Central connection state and connectable mode bit 35 and 810 * scannable 19. 811 */ 812 if (connectable && (!(hdev->le_states[4] & 0x08) || 813 !(hdev->le_states[2] & 0x08))) 814 return false; 815 } 816 817 return true; 818 } 819 820 static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) 821 { 822 /* If privacy is not enabled don't use RPA */ 823 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 824 return false; 825 826 /* If basic privacy mode is enabled use RPA */ 827 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) 828 return true; 829 830 /* If limited privacy mode is enabled don't use RPA if we're 831 * both discoverable and bondable. 832 */ 833 if ((flags & MGMT_ADV_FLAG_DISCOV) && 834 hci_dev_test_flag(hdev, HCI_BONDABLE)) 835 return false; 836 837 /* We're neither bondable nor discoverable in the limited 838 * privacy mode, therefore use RPA. 839 */ 840 return true; 841 } 842 843 static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) 844 { 845 /* If we're advertising or initiating an LE connection we can't 846 * go ahead and change the random address at this time. This is 847 * because the eventual initiator address used for the 848 * subsequently created connection will be undefined (some 849 * controllers use the new address and others the one we had 850 * when the operation started). 851 * 852 * In this kind of scenario skip the update and let the random 853 * address be updated at the next cycle. 854 */ 855 if (hci_dev_test_flag(hdev, HCI_LE_ADV) || 856 hci_lookup_le_connect(hdev)) { 857 bt_dev_dbg(hdev, "Deferring random address update"); 858 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 859 return 0; 860 } 861 862 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, 863 6, rpa, HCI_CMD_TIMEOUT); 864 } 865 866 int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, 867 bool rpa, u8 *own_addr_type) 868 { 869 int err; 870 871 /* If privacy is enabled use a resolvable private address. If 872 * current RPA has expired or there is something else than 873 * the current RPA in use, then generate a new one. 874 */ 875 if (rpa) { 876 /* If Controller supports LL Privacy use own address type is 877 * 0x03 878 */ 879 if (use_ll_privacy(hdev)) 880 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; 881 else 882 *own_addr_type = ADDR_LE_DEV_RANDOM; 883 884 /* Check if RPA is valid */ 885 if (rpa_valid(hdev)) 886 return 0; 887 888 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); 889 if (err < 0) { 890 bt_dev_err(hdev, "failed to generate new RPA"); 891 return err; 892 } 893 894 err = hci_set_random_addr_sync(hdev, &hdev->rpa); 895 if (err) 896 return err; 897 898 return 0; 899 } 900 901 /* In case of required privacy without resolvable private address, 902 * use an non-resolvable private address. This is useful for active 903 * scanning and non-connectable advertising. 904 */ 905 if (require_privacy) { 906 bdaddr_t nrpa; 907 908 while (true) { 909 /* The non-resolvable private address is generated 910 * from random six bytes with the two most significant 911 * bits cleared. 912 */ 913 get_random_bytes(&nrpa, 6); 914 nrpa.b[5] &= 0x3f; 915 916 /* The non-resolvable private address shall not be 917 * equal to the public address. 918 */ 919 if (bacmp(&hdev->bdaddr, &nrpa)) 920 break; 921 } 922 923 *own_addr_type = ADDR_LE_DEV_RANDOM; 924 925 return hci_set_random_addr_sync(hdev, &nrpa); 926 } 927 928 /* If forcing static address is in use or there is no public 929 * address use the static address as random address (but skip 930 * the HCI command if the current random address is already the 931 * static one. 932 * 933 * In case BR/EDR has been disabled on a dual-mode controller 934 * and a static address has been configured, then use that 935 * address instead of the public BR/EDR address. 936 */ 937 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || 938 !bacmp(&hdev->bdaddr, BDADDR_ANY) || 939 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && 940 bacmp(&hdev->static_addr, BDADDR_ANY))) { 941 *own_addr_type = ADDR_LE_DEV_RANDOM; 942 if (bacmp(&hdev->static_addr, &hdev->random_addr)) 943 return hci_set_random_addr_sync(hdev, 944 &hdev->static_addr); 945 return 0; 946 } 947 948 /* Neither privacy nor static address is being used so use a 949 * public address. 950 */ 951 *own_addr_type = ADDR_LE_DEV_PUBLIC; 952 953 return 0; 954 } 955 956 static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) 957 { 958 struct hci_cp_le_set_ext_adv_enable *cp; 959 struct hci_cp_ext_adv_set *set; 960 u8 data[sizeof(*cp) + sizeof(*set) * 1]; 961 u8 size; 962 963 /* If request specifies an instance that doesn't exist, fail */ 964 if (instance > 0) { 965 struct adv_info *adv; 966 967 adv = hci_find_adv_instance(hdev, instance); 968 if (!adv) 969 return -EINVAL; 970 971 /* If not enabled there is nothing to do */ 972 if (!adv->enabled) 973 return 0; 974 } 975 976 memset(data, 0, sizeof(data)); 977 978 cp = (void *)data; 979 set = (void *)cp->data; 980 981 /* Instance 0x00 indicates all advertising instances will be disabled */ 982 cp->num_of_sets = !!instance; 983 cp->enable = 0x00; 984 985 set->handle = instance; 986 987 size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; 988 989 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, 990 size, data, HCI_CMD_TIMEOUT); 991 } 992 993 static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, 994 bdaddr_t *random_addr) 995 { 996 struct hci_cp_le_set_adv_set_rand_addr cp; 997 int err; 998 999 if (!instance) { 1000 /* Instance 0x00 doesn't have an adv_info, instead it uses 1001 * hdev->random_addr to track its address so whenever it needs 1002 * to be updated this also set the random address since 1003 * hdev->random_addr is shared with scan state machine. 1004 */ 1005 err = hci_set_random_addr_sync(hdev, random_addr); 1006 if (err) 1007 return err; 1008 } 1009 1010 memset(&cp, 0, sizeof(cp)); 1011 1012 cp.handle = instance; 1013 bacpy(&cp.bdaddr, random_addr); 1014 1015 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, 1016 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1017 } 1018 1019 int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) 1020 { 1021 struct hci_cp_le_set_ext_adv_params cp; 1022 bool connectable; 1023 u32 flags; 1024 bdaddr_t random_addr; 1025 u8 own_addr_type; 1026 int err; 1027 struct adv_info *adv; 1028 bool secondary_adv; 1029 1030 if (instance > 0) { 1031 adv = hci_find_adv_instance(hdev, instance); 1032 if (!adv) 1033 return -EINVAL; 1034 } else { 1035 adv = NULL; 1036 } 1037 1038 /* Updating parameters of an active instance will return a 1039 * Command Disallowed error, so we must first disable the 1040 * instance if it is active. 1041 */ 1042 if (adv && !adv->pending) { 1043 err = hci_disable_ext_adv_instance_sync(hdev, instance); 1044 if (err) 1045 return err; 1046 } 1047 1048 flags = hci_adv_instance_flags(hdev, instance); 1049 1050 /* If the "connectable" instance flag was not set, then choose between 1051 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. 1052 */ 1053 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || 1054 mgmt_get_connectable(hdev); 1055 1056 if (!is_advertising_allowed(hdev, connectable)) 1057 return -EPERM; 1058 1059 /* Set require_privacy to true only when non-connectable 1060 * advertising is used. In that case it is fine to use a 1061 * non-resolvable private address. 1062 */ 1063 err = hci_get_random_address(hdev, !connectable, 1064 adv_use_rpa(hdev, flags), adv, 1065 &own_addr_type, &random_addr); 1066 if (err < 0) 1067 return err; 1068 1069 memset(&cp, 0, sizeof(cp)); 1070 1071 if (adv) { 1072 hci_cpu_to_le24(adv->min_interval, cp.min_interval); 1073 hci_cpu_to_le24(adv->max_interval, cp.max_interval); 1074 cp.tx_power = adv->tx_power; 1075 } else { 1076 hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); 1077 hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); 1078 cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; 1079 } 1080 1081 secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); 1082 1083 if (connectable) { 1084 if (secondary_adv) 1085 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); 1086 else 1087 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); 1088 } else if (hci_adv_instance_is_scannable(hdev, instance) || 1089 (flags & MGMT_ADV_PARAM_SCAN_RSP)) { 1090 if (secondary_adv) 1091 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); 1092 else 1093 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); 1094 } else { 1095 if (secondary_adv) 1096 cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); 1097 else 1098 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); 1099 } 1100 1101 /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter 1102 * contains the peer’s Identity Address and the Peer_Address_Type 1103 * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). 1104 * These parameters are used to locate the corresponding local IRK in 1105 * the resolving list; this IRK is used to generate their own address 1106 * used in the advertisement. 1107 */ 1108 if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) 1109 hci_copy_identity_address(hdev, &cp.peer_addr, 1110 &cp.peer_addr_type); 1111 1112 cp.own_addr_type = own_addr_type; 1113 cp.channel_map = hdev->le_adv_channel_map; 1114 cp.handle = instance; 1115 1116 if (flags & MGMT_ADV_FLAG_SEC_2M) { 1117 cp.primary_phy = HCI_ADV_PHY_1M; 1118 cp.secondary_phy = HCI_ADV_PHY_2M; 1119 } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { 1120 cp.primary_phy = HCI_ADV_PHY_CODED; 1121 cp.secondary_phy = HCI_ADV_PHY_CODED; 1122 } else { 1123 /* In all other cases use 1M */ 1124 cp.primary_phy = HCI_ADV_PHY_1M; 1125 cp.secondary_phy = HCI_ADV_PHY_1M; 1126 } 1127 1128 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, 1129 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1130 if (err) 1131 return err; 1132 1133 if ((own_addr_type == ADDR_LE_DEV_RANDOM || 1134 own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && 1135 bacmp(&random_addr, BDADDR_ANY)) { 1136 /* Check if random address need to be updated */ 1137 if (adv) { 1138 if (!bacmp(&random_addr, &adv->random_addr)) 1139 return 0; 1140 } else { 1141 if (!bacmp(&random_addr, &hdev->random_addr)) 1142 return 0; 1143 } 1144 1145 return hci_set_adv_set_random_addr_sync(hdev, instance, 1146 &random_addr); 1147 } 1148 1149 return 0; 1150 } 1151 1152 static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) 1153 { 1154 struct { 1155 struct hci_cp_le_set_ext_scan_rsp_data cp; 1156 u8 data[HCI_MAX_EXT_AD_LENGTH]; 1157 } pdu; 1158 u8 len; 1159 struct adv_info *adv = NULL; 1160 int err; 1161 1162 memset(&pdu, 0, sizeof(pdu)); 1163 1164 if (instance) { 1165 adv = hci_find_adv_instance(hdev, instance); 1166 if (!adv || !adv->scan_rsp_changed) 1167 return 0; 1168 } 1169 1170 len = eir_create_scan_rsp(hdev, instance, pdu.data); 1171 1172 pdu.cp.handle = instance; 1173 pdu.cp.length = len; 1174 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1175 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; 1176 1177 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, 1178 sizeof(pdu.cp) + len, &pdu.cp, 1179 HCI_CMD_TIMEOUT); 1180 if (err) 1181 return err; 1182 1183 if (adv) { 1184 adv->scan_rsp_changed = false; 1185 } else { 1186 memcpy(hdev->scan_rsp_data, pdu.data, len); 1187 hdev->scan_rsp_data_len = len; 1188 } 1189 1190 return 0; 1191 } 1192 1193 static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) 1194 { 1195 struct hci_cp_le_set_scan_rsp_data cp; 1196 u8 len; 1197 1198 memset(&cp, 0, sizeof(cp)); 1199 1200 len = eir_create_scan_rsp(hdev, instance, cp.data); 1201 1202 if (hdev->scan_rsp_data_len == len && 1203 !memcmp(cp.data, hdev->scan_rsp_data, len)) 1204 return 0; 1205 1206 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); 1207 hdev->scan_rsp_data_len = len; 1208 1209 cp.length = len; 1210 1211 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, 1212 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1213 } 1214 1215 int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) 1216 { 1217 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1218 return 0; 1219 1220 if (ext_adv_capable(hdev)) 1221 return hci_set_ext_scan_rsp_data_sync(hdev, instance); 1222 1223 return __hci_set_scan_rsp_data_sync(hdev, instance); 1224 } 1225 1226 int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) 1227 { 1228 struct hci_cp_le_set_ext_adv_enable *cp; 1229 struct hci_cp_ext_adv_set *set; 1230 u8 data[sizeof(*cp) + sizeof(*set) * 1]; 1231 struct adv_info *adv; 1232 1233 if (instance > 0) { 1234 adv = hci_find_adv_instance(hdev, instance); 1235 if (!adv) 1236 return -EINVAL; 1237 /* If already enabled there is nothing to do */ 1238 if (adv->enabled) 1239 return 0; 1240 } else { 1241 adv = NULL; 1242 } 1243 1244 cp = (void *)data; 1245 set = (void *)cp->data; 1246 1247 memset(cp, 0, sizeof(*cp)); 1248 1249 cp->enable = 0x01; 1250 cp->num_of_sets = 0x01; 1251 1252 memset(set, 0, sizeof(*set)); 1253 1254 set->handle = instance; 1255 1256 /* Set duration per instance since controller is responsible for 1257 * scheduling it. 1258 */ 1259 if (adv && adv->timeout) { 1260 u16 duration = adv->timeout * MSEC_PER_SEC; 1261 1262 /* Time = N * 10 ms */ 1263 set->duration = cpu_to_le16(duration / 10); 1264 } 1265 1266 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, 1267 sizeof(*cp) + 1268 sizeof(*set) * cp->num_of_sets, 1269 data, HCI_CMD_TIMEOUT); 1270 } 1271 1272 int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) 1273 { 1274 int err; 1275 1276 err = hci_setup_ext_adv_instance_sync(hdev, instance); 1277 if (err) 1278 return err; 1279 1280 err = hci_set_ext_scan_rsp_data_sync(hdev, instance); 1281 if (err) 1282 return err; 1283 1284 return hci_enable_ext_advertising_sync(hdev, instance); 1285 } 1286 1287 static int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) 1288 { 1289 struct hci_cp_le_set_per_adv_enable cp; 1290 1291 /* If periodic advertising already disabled there is nothing to do. */ 1292 if (!hci_dev_test_flag(hdev, HCI_LE_PER_ADV)) 1293 return 0; 1294 1295 memset(&cp, 0, sizeof(cp)); 1296 1297 cp.enable = 0x00; 1298 cp.handle = instance; 1299 1300 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, 1301 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1302 } 1303 1304 static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, 1305 u16 min_interval, u16 max_interval) 1306 { 1307 struct hci_cp_le_set_per_adv_params cp; 1308 1309 memset(&cp, 0, sizeof(cp)); 1310 1311 if (!min_interval) 1312 min_interval = DISCOV_LE_PER_ADV_INT_MIN; 1313 1314 if (!max_interval) 1315 max_interval = DISCOV_LE_PER_ADV_INT_MAX; 1316 1317 cp.handle = instance; 1318 cp.min_interval = cpu_to_le16(min_interval); 1319 cp.max_interval = cpu_to_le16(max_interval); 1320 cp.periodic_properties = 0x0000; 1321 1322 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, 1323 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1324 } 1325 1326 static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) 1327 { 1328 struct { 1329 struct hci_cp_le_set_per_adv_data cp; 1330 u8 data[HCI_MAX_PER_AD_LENGTH]; 1331 } pdu; 1332 u8 len; 1333 1334 memset(&pdu, 0, sizeof(pdu)); 1335 1336 if (instance) { 1337 struct adv_info *adv = hci_find_adv_instance(hdev, instance); 1338 1339 if (!adv || !adv->periodic) 1340 return 0; 1341 } 1342 1343 len = eir_create_per_adv_data(hdev, instance, pdu.data); 1344 1345 pdu.cp.length = len; 1346 pdu.cp.handle = instance; 1347 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1348 1349 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, 1350 sizeof(pdu.cp) + len, &pdu, 1351 HCI_CMD_TIMEOUT); 1352 } 1353 1354 static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) 1355 { 1356 struct hci_cp_le_set_per_adv_enable cp; 1357 1358 /* If periodic advertising already enabled there is nothing to do. */ 1359 if (hci_dev_test_flag(hdev, HCI_LE_PER_ADV)) 1360 return 0; 1361 1362 memset(&cp, 0, sizeof(cp)); 1363 1364 cp.enable = 0x01; 1365 cp.handle = instance; 1366 1367 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, 1368 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1369 } 1370 1371 /* Checks if periodic advertising data contains a Basic Announcement and if it 1372 * does generates a Broadcast ID and add Broadcast Announcement. 1373 */ 1374 static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) 1375 { 1376 u8 bid[3]; 1377 u8 ad[4 + 3]; 1378 1379 /* Skip if NULL adv as instance 0x00 is used for general purpose 1380 * advertising so it cannot used for the likes of Broadcast Announcement 1381 * as it can be overwritten at any point. 1382 */ 1383 if (!adv) 1384 return 0; 1385 1386 /* Check if PA data doesn't contains a Basic Audio Announcement then 1387 * there is nothing to do. 1388 */ 1389 if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, 1390 0x1851, NULL)) 1391 return 0; 1392 1393 /* Check if advertising data already has a Broadcast Announcement since 1394 * the process may want to control the Broadcast ID directly and in that 1395 * case the kernel shall no interfere. 1396 */ 1397 if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, 1398 NULL)) 1399 return 0; 1400 1401 /* Generate Broadcast ID */ 1402 get_random_bytes(bid, sizeof(bid)); 1403 eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); 1404 hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); 1405 1406 return hci_update_adv_data_sync(hdev, adv->instance); 1407 } 1408 1409 int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, 1410 u8 *data, u32 flags, u16 min_interval, 1411 u16 max_interval, u16 sync_interval) 1412 { 1413 struct adv_info *adv = NULL; 1414 int err; 1415 bool added = false; 1416 1417 hci_disable_per_advertising_sync(hdev, instance); 1418 1419 if (instance) { 1420 adv = hci_find_adv_instance(hdev, instance); 1421 /* Create an instance if that could not be found */ 1422 if (!adv) { 1423 adv = hci_add_per_instance(hdev, instance, flags, 1424 data_len, data, 1425 sync_interval, 1426 sync_interval); 1427 if (IS_ERR(adv)) 1428 return PTR_ERR(adv); 1429 added = true; 1430 } 1431 } 1432 1433 /* Only start advertising if instance 0 or if a dedicated instance has 1434 * been added. 1435 */ 1436 if (!adv || added) { 1437 err = hci_start_ext_adv_sync(hdev, instance); 1438 if (err < 0) 1439 goto fail; 1440 1441 err = hci_adv_bcast_annoucement(hdev, adv); 1442 if (err < 0) 1443 goto fail; 1444 } 1445 1446 err = hci_set_per_adv_params_sync(hdev, instance, min_interval, 1447 max_interval); 1448 if (err < 0) 1449 goto fail; 1450 1451 err = hci_set_per_adv_data_sync(hdev, instance); 1452 if (err < 0) 1453 goto fail; 1454 1455 err = hci_enable_per_advertising_sync(hdev, instance); 1456 if (err < 0) 1457 goto fail; 1458 1459 return 0; 1460 1461 fail: 1462 if (added) 1463 hci_remove_adv_instance(hdev, instance); 1464 1465 return err; 1466 } 1467 1468 static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) 1469 { 1470 int err; 1471 1472 if (ext_adv_capable(hdev)) 1473 return hci_start_ext_adv_sync(hdev, instance); 1474 1475 err = hci_update_adv_data_sync(hdev, instance); 1476 if (err) 1477 return err; 1478 1479 err = hci_update_scan_rsp_data_sync(hdev, instance); 1480 if (err) 1481 return err; 1482 1483 return hci_enable_advertising_sync(hdev); 1484 } 1485 1486 int hci_enable_advertising_sync(struct hci_dev *hdev) 1487 { 1488 struct adv_info *adv_instance; 1489 struct hci_cp_le_set_adv_param cp; 1490 u8 own_addr_type, enable = 0x01; 1491 bool connectable; 1492 u16 adv_min_interval, adv_max_interval; 1493 u32 flags; 1494 u8 status; 1495 1496 if (ext_adv_capable(hdev)) 1497 return hci_enable_ext_advertising_sync(hdev, 1498 hdev->cur_adv_instance); 1499 1500 flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); 1501 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); 1502 1503 /* If the "connectable" instance flag was not set, then choose between 1504 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. 1505 */ 1506 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || 1507 mgmt_get_connectable(hdev); 1508 1509 if (!is_advertising_allowed(hdev, connectable)) 1510 return -EINVAL; 1511 1512 status = hci_disable_advertising_sync(hdev); 1513 if (status) 1514 return status; 1515 1516 /* Clear the HCI_LE_ADV bit temporarily so that the 1517 * hci_update_random_address knows that it's safe to go ahead 1518 * and write a new random address. The flag will be set back on 1519 * as soon as the SET_ADV_ENABLE HCI command completes. 1520 */ 1521 hci_dev_clear_flag(hdev, HCI_LE_ADV); 1522 1523 /* Set require_privacy to true only when non-connectable 1524 * advertising is used. In that case it is fine to use a 1525 * non-resolvable private address. 1526 */ 1527 status = hci_update_random_address_sync(hdev, !connectable, 1528 adv_use_rpa(hdev, flags), 1529 &own_addr_type); 1530 if (status) 1531 return status; 1532 1533 memset(&cp, 0, sizeof(cp)); 1534 1535 if (adv_instance) { 1536 adv_min_interval = adv_instance->min_interval; 1537 adv_max_interval = adv_instance->max_interval; 1538 } else { 1539 adv_min_interval = hdev->le_adv_min_interval; 1540 adv_max_interval = hdev->le_adv_max_interval; 1541 } 1542 1543 if (connectable) { 1544 cp.type = LE_ADV_IND; 1545 } else { 1546 if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) 1547 cp.type = LE_ADV_SCAN_IND; 1548 else 1549 cp.type = LE_ADV_NONCONN_IND; 1550 1551 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || 1552 hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { 1553 adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; 1554 adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; 1555 } 1556 } 1557 1558 cp.min_interval = cpu_to_le16(adv_min_interval); 1559 cp.max_interval = cpu_to_le16(adv_max_interval); 1560 cp.own_address_type = own_addr_type; 1561 cp.channel_map = hdev->le_adv_channel_map; 1562 1563 status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, 1564 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1565 if (status) 1566 return status; 1567 1568 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, 1569 sizeof(enable), &enable, HCI_CMD_TIMEOUT); 1570 } 1571 1572 static int enable_advertising_sync(struct hci_dev *hdev, void *data) 1573 { 1574 return hci_enable_advertising_sync(hdev); 1575 } 1576 1577 int hci_enable_advertising(struct hci_dev *hdev) 1578 { 1579 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && 1580 list_empty(&hdev->adv_instances)) 1581 return 0; 1582 1583 return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); 1584 } 1585 1586 int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, 1587 struct sock *sk) 1588 { 1589 int err; 1590 1591 if (!ext_adv_capable(hdev)) 1592 return 0; 1593 1594 err = hci_disable_ext_adv_instance_sync(hdev, instance); 1595 if (err) 1596 return err; 1597 1598 /* If request specifies an instance that doesn't exist, fail */ 1599 if (instance > 0 && !hci_find_adv_instance(hdev, instance)) 1600 return -EINVAL; 1601 1602 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, 1603 sizeof(instance), &instance, 0, 1604 HCI_CMD_TIMEOUT, sk); 1605 } 1606 1607 static int remove_ext_adv_sync(struct hci_dev *hdev, void *data) 1608 { 1609 struct adv_info *adv = data; 1610 u8 instance = 0; 1611 1612 if (adv) 1613 instance = adv->instance; 1614 1615 return hci_remove_ext_adv_instance_sync(hdev, instance, NULL); 1616 } 1617 1618 int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance) 1619 { 1620 struct adv_info *adv = NULL; 1621 1622 if (instance) { 1623 adv = hci_find_adv_instance(hdev, instance); 1624 if (!adv) 1625 return -EINVAL; 1626 } 1627 1628 return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL); 1629 } 1630 1631 int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) 1632 { 1633 struct hci_cp_le_term_big cp; 1634 1635 memset(&cp, 0, sizeof(cp)); 1636 cp.handle = handle; 1637 cp.reason = reason; 1638 1639 return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, 1640 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1641 } 1642 1643 static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) 1644 { 1645 struct { 1646 struct hci_cp_le_set_ext_adv_data cp; 1647 u8 data[HCI_MAX_EXT_AD_LENGTH]; 1648 } pdu; 1649 u8 len; 1650 struct adv_info *adv = NULL; 1651 int err; 1652 1653 memset(&pdu, 0, sizeof(pdu)); 1654 1655 if (instance) { 1656 adv = hci_find_adv_instance(hdev, instance); 1657 if (!adv || !adv->adv_data_changed) 1658 return 0; 1659 } 1660 1661 len = eir_create_adv_data(hdev, instance, pdu.data); 1662 1663 pdu.cp.length = len; 1664 pdu.cp.handle = instance; 1665 pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; 1666 pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; 1667 1668 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, 1669 sizeof(pdu.cp) + len, &pdu.cp, 1670 HCI_CMD_TIMEOUT); 1671 if (err) 1672 return err; 1673 1674 /* Update data if the command succeed */ 1675 if (adv) { 1676 adv->adv_data_changed = false; 1677 } else { 1678 memcpy(hdev->adv_data, pdu.data, len); 1679 hdev->adv_data_len = len; 1680 } 1681 1682 return 0; 1683 } 1684 1685 static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) 1686 { 1687 struct hci_cp_le_set_adv_data cp; 1688 u8 len; 1689 1690 memset(&cp, 0, sizeof(cp)); 1691 1692 len = eir_create_adv_data(hdev, instance, cp.data); 1693 1694 /* There's nothing to do if the data hasn't changed */ 1695 if (hdev->adv_data_len == len && 1696 memcmp(cp.data, hdev->adv_data, len) == 0) 1697 return 0; 1698 1699 memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); 1700 hdev->adv_data_len = len; 1701 1702 cp.length = len; 1703 1704 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, 1705 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1706 } 1707 1708 int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) 1709 { 1710 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 1711 return 0; 1712 1713 if (ext_adv_capable(hdev)) 1714 return hci_set_ext_adv_data_sync(hdev, instance); 1715 1716 return hci_set_adv_data_sync(hdev, instance); 1717 } 1718 1719 int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, 1720 bool force) 1721 { 1722 struct adv_info *adv = NULL; 1723 u16 timeout; 1724 1725 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) 1726 return -EPERM; 1727 1728 if (hdev->adv_instance_timeout) 1729 return -EBUSY; 1730 1731 adv = hci_find_adv_instance(hdev, instance); 1732 if (!adv) 1733 return -ENOENT; 1734 1735 /* A zero timeout means unlimited advertising. As long as there is 1736 * only one instance, duration should be ignored. We still set a timeout 1737 * in case further instances are being added later on. 1738 * 1739 * If the remaining lifetime of the instance is more than the duration 1740 * then the timeout corresponds to the duration, otherwise it will be 1741 * reduced to the remaining instance lifetime. 1742 */ 1743 if (adv->timeout == 0 || adv->duration <= adv->remaining_time) 1744 timeout = adv->duration; 1745 else 1746 timeout = adv->remaining_time; 1747 1748 /* The remaining time is being reduced unless the instance is being 1749 * advertised without time limit. 1750 */ 1751 if (adv->timeout) 1752 adv->remaining_time = adv->remaining_time - timeout; 1753 1754 /* Only use work for scheduling instances with legacy advertising */ 1755 if (!ext_adv_capable(hdev)) { 1756 hdev->adv_instance_timeout = timeout; 1757 queue_delayed_work(hdev->req_workqueue, 1758 &hdev->adv_instance_expire, 1759 msecs_to_jiffies(timeout * 1000)); 1760 } 1761 1762 /* If we're just re-scheduling the same instance again then do not 1763 * execute any HCI commands. This happens when a single instance is 1764 * being advertised. 1765 */ 1766 if (!force && hdev->cur_adv_instance == instance && 1767 hci_dev_test_flag(hdev, HCI_LE_ADV)) 1768 return 0; 1769 1770 hdev->cur_adv_instance = instance; 1771 1772 return hci_start_adv_sync(hdev, instance); 1773 } 1774 1775 static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) 1776 { 1777 int err; 1778 1779 if (!ext_adv_capable(hdev)) 1780 return 0; 1781 1782 /* Disable instance 0x00 to disable all instances */ 1783 err = hci_disable_ext_adv_instance_sync(hdev, 0x00); 1784 if (err) 1785 return err; 1786 1787 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, 1788 0, NULL, 0, HCI_CMD_TIMEOUT, sk); 1789 } 1790 1791 static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) 1792 { 1793 struct adv_info *adv, *n; 1794 int err = 0; 1795 1796 if (ext_adv_capable(hdev)) 1797 /* Remove all existing sets */ 1798 err = hci_clear_adv_sets_sync(hdev, sk); 1799 if (ext_adv_capable(hdev)) 1800 return err; 1801 1802 /* This is safe as long as there is no command send while the lock is 1803 * held. 1804 */ 1805 hci_dev_lock(hdev); 1806 1807 /* Cleanup non-ext instances */ 1808 list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { 1809 u8 instance = adv->instance; 1810 int err; 1811 1812 if (!(force || adv->timeout)) 1813 continue; 1814 1815 err = hci_remove_adv_instance(hdev, instance); 1816 if (!err) 1817 mgmt_advertising_removed(sk, hdev, instance); 1818 } 1819 1820 hci_dev_unlock(hdev); 1821 1822 return 0; 1823 } 1824 1825 static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, 1826 struct sock *sk) 1827 { 1828 int err = 0; 1829 1830 /* If we use extended advertising, instance has to be removed first. */ 1831 if (ext_adv_capable(hdev)) 1832 err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); 1833 if (ext_adv_capable(hdev)) 1834 return err; 1835 1836 /* This is safe as long as there is no command send while the lock is 1837 * held. 1838 */ 1839 hci_dev_lock(hdev); 1840 1841 err = hci_remove_adv_instance(hdev, instance); 1842 if (!err) 1843 mgmt_advertising_removed(sk, hdev, instance); 1844 1845 hci_dev_unlock(hdev); 1846 1847 return err; 1848 } 1849 1850 /* For a single instance: 1851 * - force == true: The instance will be removed even when its remaining 1852 * lifetime is not zero. 1853 * - force == false: the instance will be deactivated but kept stored unless 1854 * the remaining lifetime is zero. 1855 * 1856 * For instance == 0x00: 1857 * - force == true: All instances will be removed regardless of their timeout 1858 * setting. 1859 * - force == false: Only instances that have a timeout will be removed. 1860 */ 1861 int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, 1862 u8 instance, bool force) 1863 { 1864 struct adv_info *next = NULL; 1865 int err; 1866 1867 /* Cancel any timeout concerning the removed instance(s). */ 1868 if (!instance || hdev->cur_adv_instance == instance) 1869 cancel_adv_timeout(hdev); 1870 1871 /* Get the next instance to advertise BEFORE we remove 1872 * the current one. This can be the same instance again 1873 * if there is only one instance. 1874 */ 1875 if (hdev->cur_adv_instance == instance) 1876 next = hci_get_next_instance(hdev, instance); 1877 1878 if (!instance) { 1879 err = hci_clear_adv_sync(hdev, sk, force); 1880 if (err) 1881 return err; 1882 } else { 1883 struct adv_info *adv = hci_find_adv_instance(hdev, instance); 1884 1885 if (force || (adv && adv->timeout && !adv->remaining_time)) { 1886 /* Don't advertise a removed instance. */ 1887 if (next && next->instance == instance) 1888 next = NULL; 1889 1890 err = hci_remove_adv_sync(hdev, instance, sk); 1891 if (err) 1892 return err; 1893 } 1894 } 1895 1896 if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) 1897 return 0; 1898 1899 if (next && !ext_adv_capable(hdev)) 1900 hci_schedule_adv_instance_sync(hdev, next->instance, false); 1901 1902 return 0; 1903 } 1904 1905 int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) 1906 { 1907 struct hci_cp_read_rssi cp; 1908 1909 cp.handle = handle; 1910 return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, 1911 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1912 } 1913 1914 int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) 1915 { 1916 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, 1917 sizeof(*cp), cp, HCI_CMD_TIMEOUT); 1918 } 1919 1920 int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) 1921 { 1922 struct hci_cp_read_tx_power cp; 1923 1924 cp.handle = handle; 1925 cp.type = type; 1926 return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, 1927 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1928 } 1929 1930 int hci_disable_advertising_sync(struct hci_dev *hdev) 1931 { 1932 u8 enable = 0x00; 1933 int err = 0; 1934 1935 /* If controller is not advertising we are done. */ 1936 if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) 1937 return 0; 1938 1939 if (ext_adv_capable(hdev)) 1940 err = hci_disable_ext_adv_instance_sync(hdev, 0x00); 1941 if (ext_adv_capable(hdev)) 1942 return err; 1943 1944 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, 1945 sizeof(enable), &enable, HCI_CMD_TIMEOUT); 1946 } 1947 1948 static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, 1949 u8 filter_dup) 1950 { 1951 struct hci_cp_le_set_ext_scan_enable cp; 1952 1953 memset(&cp, 0, sizeof(cp)); 1954 cp.enable = val; 1955 1956 if (hci_dev_test_flag(hdev, HCI_MESH)) 1957 cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 1958 else 1959 cp.filter_dup = filter_dup; 1960 1961 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, 1962 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1963 } 1964 1965 static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, 1966 u8 filter_dup) 1967 { 1968 struct hci_cp_le_set_scan_enable cp; 1969 1970 if (use_ext_scan(hdev)) 1971 return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); 1972 1973 memset(&cp, 0, sizeof(cp)); 1974 cp.enable = val; 1975 1976 if (val && hci_dev_test_flag(hdev, HCI_MESH)) 1977 cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 1978 else 1979 cp.filter_dup = filter_dup; 1980 1981 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, 1982 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 1983 } 1984 1985 static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) 1986 { 1987 if (!use_ll_privacy(hdev)) 1988 return 0; 1989 1990 /* If controller is not/already resolving we are done. */ 1991 if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) 1992 return 0; 1993 1994 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1995 sizeof(val), &val, HCI_CMD_TIMEOUT); 1996 } 1997 1998 static int hci_scan_disable_sync(struct hci_dev *hdev) 1999 { 2000 int err; 2001 2002 /* If controller is not scanning we are done. */ 2003 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) 2004 return 0; 2005 2006 if (hdev->scanning_paused) { 2007 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 2008 return 0; 2009 } 2010 2011 err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); 2012 if (err) { 2013 bt_dev_err(hdev, "Unable to disable scanning: %d", err); 2014 return err; 2015 } 2016 2017 return err; 2018 } 2019 2020 static bool scan_use_rpa(struct hci_dev *hdev) 2021 { 2022 return hci_dev_test_flag(hdev, HCI_PRIVACY); 2023 } 2024 2025 static void hci_start_interleave_scan(struct hci_dev *hdev) 2026 { 2027 hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; 2028 queue_delayed_work(hdev->req_workqueue, 2029 &hdev->interleave_scan, 0); 2030 } 2031 2032 static bool is_interleave_scanning(struct hci_dev *hdev) 2033 { 2034 return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; 2035 } 2036 2037 static void cancel_interleave_scan(struct hci_dev *hdev) 2038 { 2039 bt_dev_dbg(hdev, "cancelling interleave scan"); 2040 2041 cancel_delayed_work_sync(&hdev->interleave_scan); 2042 2043 hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; 2044 } 2045 2046 /* Return true if interleave_scan wasn't started until exiting this function, 2047 * otherwise, return false 2048 */ 2049 static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) 2050 { 2051 /* Do interleaved scan only if all of the following are true: 2052 * - There is at least one ADV monitor 2053 * - At least one pending LE connection or one device to be scanned for 2054 * - Monitor offloading is not supported 2055 * If so, we should alternate between allowlist scan and one without 2056 * any filters to save power. 2057 */ 2058 bool use_interleaving = hci_is_adv_monitoring(hdev) && 2059 !(list_empty(&hdev->pend_le_conns) && 2060 list_empty(&hdev->pend_le_reports)) && 2061 hci_get_adv_monitor_offload_ext(hdev) == 2062 HCI_ADV_MONITOR_EXT_NONE; 2063 bool is_interleaving = is_interleave_scanning(hdev); 2064 2065 if (use_interleaving && !is_interleaving) { 2066 hci_start_interleave_scan(hdev); 2067 bt_dev_dbg(hdev, "starting interleave scan"); 2068 return true; 2069 } 2070 2071 if (!use_interleaving && is_interleaving) 2072 cancel_interleave_scan(hdev); 2073 2074 return false; 2075 } 2076 2077 /* Removes connection to resolve list if needed.*/ 2078 static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, 2079 bdaddr_t *bdaddr, u8 bdaddr_type) 2080 { 2081 struct hci_cp_le_del_from_resolv_list cp; 2082 struct bdaddr_list_with_irk *entry; 2083 2084 if (!use_ll_privacy(hdev)) 2085 return 0; 2086 2087 /* Check if the IRK has been programmed */ 2088 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, 2089 bdaddr_type); 2090 if (!entry) 2091 return 0; 2092 2093 cp.bdaddr_type = bdaddr_type; 2094 bacpy(&cp.bdaddr, bdaddr); 2095 2096 return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, 2097 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2098 } 2099 2100 static int hci_le_del_accept_list_sync(struct hci_dev *hdev, 2101 bdaddr_t *bdaddr, u8 bdaddr_type) 2102 { 2103 struct hci_cp_le_del_from_accept_list cp; 2104 int err; 2105 2106 /* Check if device is on accept list before removing it */ 2107 if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) 2108 return 0; 2109 2110 cp.bdaddr_type = bdaddr_type; 2111 bacpy(&cp.bdaddr, bdaddr); 2112 2113 /* Ignore errors when removing from resolving list as that is likely 2114 * that the device was never added. 2115 */ 2116 hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); 2117 2118 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, 2119 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2120 if (err) { 2121 bt_dev_err(hdev, "Unable to remove from allow list: %d", err); 2122 return err; 2123 } 2124 2125 bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, 2126 cp.bdaddr_type); 2127 2128 return 0; 2129 } 2130 2131 /* Adds connection to resolve list if needed. 2132 * Setting params to NULL programs local hdev->irk 2133 */ 2134 static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, 2135 struct hci_conn_params *params) 2136 { 2137 struct hci_cp_le_add_to_resolv_list cp; 2138 struct smp_irk *irk; 2139 struct bdaddr_list_with_irk *entry; 2140 2141 if (!use_ll_privacy(hdev)) 2142 return 0; 2143 2144 /* Attempt to program local identity address, type and irk if params is 2145 * NULL. 2146 */ 2147 if (!params) { 2148 if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) 2149 return 0; 2150 2151 hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); 2152 memcpy(cp.peer_irk, hdev->irk, 16); 2153 goto done; 2154 } 2155 2156 irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); 2157 if (!irk) 2158 return 0; 2159 2160 /* Check if the IK has _not_ been programmed yet. */ 2161 entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, 2162 ¶ms->addr, 2163 params->addr_type); 2164 if (entry) 2165 return 0; 2166 2167 cp.bdaddr_type = params->addr_type; 2168 bacpy(&cp.bdaddr, ¶ms->addr); 2169 memcpy(cp.peer_irk, irk->val, 16); 2170 2171 /* Default privacy mode is always Network */ 2172 params->privacy_mode = HCI_NETWORK_PRIVACY; 2173 2174 done: 2175 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) 2176 memcpy(cp.local_irk, hdev->irk, 16); 2177 else 2178 memset(cp.local_irk, 0, 16); 2179 2180 return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, 2181 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2182 } 2183 2184 /* Set Device Privacy Mode. */ 2185 static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, 2186 struct hci_conn_params *params) 2187 { 2188 struct hci_cp_le_set_privacy_mode cp; 2189 struct smp_irk *irk; 2190 2191 /* If device privacy mode has already been set there is nothing to do */ 2192 if (params->privacy_mode == HCI_DEVICE_PRIVACY) 2193 return 0; 2194 2195 /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also 2196 * indicates that LL Privacy has been enabled and 2197 * HCI_OP_LE_SET_PRIVACY_MODE is supported. 2198 */ 2199 if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) 2200 return 0; 2201 2202 irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); 2203 if (!irk) 2204 return 0; 2205 2206 memset(&cp, 0, sizeof(cp)); 2207 cp.bdaddr_type = irk->addr_type; 2208 bacpy(&cp.bdaddr, &irk->bdaddr); 2209 cp.mode = HCI_DEVICE_PRIVACY; 2210 2211 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, 2212 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2213 } 2214 2215 /* Adds connection to allow list if needed, if the device uses RPA (has IRK) 2216 * this attempts to program the device in the resolving list as well and 2217 * properly set the privacy mode. 2218 */ 2219 static int hci_le_add_accept_list_sync(struct hci_dev *hdev, 2220 struct hci_conn_params *params, 2221 u8 *num_entries) 2222 { 2223 struct hci_cp_le_add_to_accept_list cp; 2224 int err; 2225 2226 /* During suspend, only wakeable devices can be in acceptlist */ 2227 if (hdev->suspended && 2228 !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) 2229 return 0; 2230 2231 /* Select filter policy to accept all advertising */ 2232 if (*num_entries >= hdev->le_accept_list_size) 2233 return -ENOSPC; 2234 2235 /* Accept list can not be used with RPAs */ 2236 if (!use_ll_privacy(hdev) && 2237 hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) 2238 return -EINVAL; 2239 2240 /* Attempt to program the device in the resolving list first to avoid 2241 * having to rollback in case it fails since the resolving list is 2242 * dynamic it can probably be smaller than the accept list. 2243 */ 2244 err = hci_le_add_resolve_list_sync(hdev, params); 2245 if (err) { 2246 bt_dev_err(hdev, "Unable to add to resolve list: %d", err); 2247 return err; 2248 } 2249 2250 /* Set Privacy Mode */ 2251 err = hci_le_set_privacy_mode_sync(hdev, params); 2252 if (err) { 2253 bt_dev_err(hdev, "Unable to set privacy mode: %d", err); 2254 return err; 2255 } 2256 2257 /* Check if already in accept list */ 2258 if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, 2259 params->addr_type)) 2260 return 0; 2261 2262 *num_entries += 1; 2263 cp.bdaddr_type = params->addr_type; 2264 bacpy(&cp.bdaddr, ¶ms->addr); 2265 2266 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, 2267 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2268 if (err) { 2269 bt_dev_err(hdev, "Unable to add to allow list: %d", err); 2270 /* Rollback the device from the resolving list */ 2271 hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); 2272 return err; 2273 } 2274 2275 bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, 2276 cp.bdaddr_type); 2277 2278 return 0; 2279 } 2280 2281 /* This function disables/pause all advertising instances */ 2282 static int hci_pause_advertising_sync(struct hci_dev *hdev) 2283 { 2284 int err; 2285 int old_state; 2286 2287 /* If already been paused there is nothing to do. */ 2288 if (hdev->advertising_paused) 2289 return 0; 2290 2291 bt_dev_dbg(hdev, "Pausing directed advertising"); 2292 2293 /* Stop directed advertising */ 2294 old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); 2295 if (old_state) { 2296 /* When discoverable timeout triggers, then just make sure 2297 * the limited discoverable flag is cleared. Even in the case 2298 * of a timeout triggered from general discoverable, it is 2299 * safe to unconditionally clear the flag. 2300 */ 2301 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 2302 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); 2303 hdev->discov_timeout = 0; 2304 } 2305 2306 bt_dev_dbg(hdev, "Pausing advertising instances"); 2307 2308 /* Call to disable any advertisements active on the controller. 2309 * This will succeed even if no advertisements are configured. 2310 */ 2311 err = hci_disable_advertising_sync(hdev); 2312 if (err) 2313 return err; 2314 2315 /* If we are using software rotation, pause the loop */ 2316 if (!ext_adv_capable(hdev)) 2317 cancel_adv_timeout(hdev); 2318 2319 hdev->advertising_paused = true; 2320 hdev->advertising_old_state = old_state; 2321 2322 return 0; 2323 } 2324 2325 /* This function enables all user advertising instances */ 2326 static int hci_resume_advertising_sync(struct hci_dev *hdev) 2327 { 2328 struct adv_info *adv, *tmp; 2329 int err; 2330 2331 /* If advertising has not been paused there is nothing to do. */ 2332 if (!hdev->advertising_paused) 2333 return 0; 2334 2335 /* Resume directed advertising */ 2336 hdev->advertising_paused = false; 2337 if (hdev->advertising_old_state) { 2338 hci_dev_set_flag(hdev, HCI_ADVERTISING); 2339 hdev->advertising_old_state = 0; 2340 } 2341 2342 bt_dev_dbg(hdev, "Resuming advertising instances"); 2343 2344 if (ext_adv_capable(hdev)) { 2345 /* Call for each tracked instance to be re-enabled */ 2346 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { 2347 err = hci_enable_ext_advertising_sync(hdev, 2348 adv->instance); 2349 if (!err) 2350 continue; 2351 2352 /* If the instance cannot be resumed remove it */ 2353 hci_remove_ext_adv_instance_sync(hdev, adv->instance, 2354 NULL); 2355 } 2356 } else { 2357 /* Schedule for most recent instance to be restarted and begin 2358 * the software rotation loop 2359 */ 2360 err = hci_schedule_adv_instance_sync(hdev, 2361 hdev->cur_adv_instance, 2362 true); 2363 } 2364 2365 hdev->advertising_paused = false; 2366 2367 return err; 2368 } 2369 2370 struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, 2371 bool extended, struct sock *sk) 2372 { 2373 u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : 2374 HCI_OP_READ_LOCAL_OOB_DATA; 2375 2376 return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); 2377 } 2378 2379 /* Device must not be scanning when updating the accept list. 2380 * 2381 * Update is done using the following sequence: 2382 * 2383 * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> 2384 * Remove Devices From Accept List -> 2385 * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> 2386 * Add Devices to Accept List -> 2387 * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> 2388 * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> 2389 * Enable Scanning 2390 * 2391 * In case of failure advertising shall be restored to its original state and 2392 * return would disable accept list since either accept or resolving list could 2393 * not be programmed. 2394 * 2395 */ 2396 static u8 hci_update_accept_list_sync(struct hci_dev *hdev) 2397 { 2398 struct hci_conn_params *params; 2399 struct bdaddr_list *b, *t; 2400 u8 num_entries = 0; 2401 bool pend_conn, pend_report; 2402 u8 filter_policy; 2403 int err; 2404 2405 /* Pause advertising if resolving list can be used as controllers are 2406 * cannot accept resolving list modifications while advertising. 2407 */ 2408 if (use_ll_privacy(hdev)) { 2409 err = hci_pause_advertising_sync(hdev); 2410 if (err) { 2411 bt_dev_err(hdev, "pause advertising failed: %d", err); 2412 return 0x00; 2413 } 2414 } 2415 2416 /* Disable address resolution while reprogramming accept list since 2417 * devices that do have an IRK will be programmed in the resolving list 2418 * when LL Privacy is enabled. 2419 */ 2420 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); 2421 if (err) { 2422 bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); 2423 goto done; 2424 } 2425 2426 /* Go through the current accept list programmed into the 2427 * controller one by one and check if that address is connected or is 2428 * still in the list of pending connections or list of devices to 2429 * report. If not present in either list, then remove it from 2430 * the controller. 2431 */ 2432 list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { 2433 if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) 2434 continue; 2435 2436 pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, 2437 &b->bdaddr, 2438 b->bdaddr_type); 2439 pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, 2440 &b->bdaddr, 2441 b->bdaddr_type); 2442 2443 /* If the device is not likely to connect or report, 2444 * remove it from the acceptlist. 2445 */ 2446 if (!pend_conn && !pend_report) { 2447 hci_le_del_accept_list_sync(hdev, &b->bdaddr, 2448 b->bdaddr_type); 2449 continue; 2450 } 2451 2452 num_entries++; 2453 } 2454 2455 /* Since all no longer valid accept list entries have been 2456 * removed, walk through the list of pending connections 2457 * and ensure that any new device gets programmed into 2458 * the controller. 2459 * 2460 * If the list of the devices is larger than the list of 2461 * available accept list entries in the controller, then 2462 * just abort and return filer policy value to not use the 2463 * accept list. 2464 */ 2465 list_for_each_entry(params, &hdev->pend_le_conns, action) { 2466 err = hci_le_add_accept_list_sync(hdev, params, &num_entries); 2467 if (err) 2468 goto done; 2469 } 2470 2471 /* After adding all new pending connections, walk through 2472 * the list of pending reports and also add these to the 2473 * accept list if there is still space. Abort if space runs out. 2474 */ 2475 list_for_each_entry(params, &hdev->pend_le_reports, action) { 2476 err = hci_le_add_accept_list_sync(hdev, params, &num_entries); 2477 if (err) 2478 goto done; 2479 } 2480 2481 /* Use the allowlist unless the following conditions are all true: 2482 * - We are not currently suspending 2483 * - There are 1 or more ADV monitors registered and it's not offloaded 2484 * - Interleaved scanning is not currently using the allowlist 2485 */ 2486 if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && 2487 hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && 2488 hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) 2489 err = -EINVAL; 2490 2491 done: 2492 filter_policy = err ? 0x00 : 0x01; 2493 2494 /* Enable address resolution when LL Privacy is enabled. */ 2495 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); 2496 if (err) 2497 bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); 2498 2499 /* Resume advertising if it was paused */ 2500 if (use_ll_privacy(hdev)) 2501 hci_resume_advertising_sync(hdev); 2502 2503 /* Select filter policy to use accept list */ 2504 return filter_policy; 2505 } 2506 2507 /* Returns true if an le connection is in the scanning state */ 2508 static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) 2509 { 2510 struct hci_conn_hash *h = &hdev->conn_hash; 2511 struct hci_conn *c; 2512 2513 rcu_read_lock(); 2514 2515 list_for_each_entry_rcu(c, &h->list, list) { 2516 if (c->type == LE_LINK && c->state == BT_CONNECT && 2517 test_bit(HCI_CONN_SCANNING, &c->flags)) { 2518 rcu_read_unlock(); 2519 return true; 2520 } 2521 } 2522 2523 rcu_read_unlock(); 2524 2525 return false; 2526 } 2527 2528 static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, 2529 u16 interval, u16 window, 2530 u8 own_addr_type, u8 filter_policy) 2531 { 2532 struct hci_cp_le_set_ext_scan_params *cp; 2533 struct hci_cp_le_scan_phy_params *phy; 2534 u8 data[sizeof(*cp) + sizeof(*phy) * 2]; 2535 u8 num_phy = 0; 2536 2537 cp = (void *)data; 2538 phy = (void *)cp->data; 2539 2540 memset(data, 0, sizeof(data)); 2541 2542 cp->own_addr_type = own_addr_type; 2543 cp->filter_policy = filter_policy; 2544 2545 if (scan_1m(hdev) || scan_2m(hdev)) { 2546 cp->scanning_phys |= LE_SCAN_PHY_1M; 2547 2548 phy->type = type; 2549 phy->interval = cpu_to_le16(interval); 2550 phy->window = cpu_to_le16(window); 2551 2552 num_phy++; 2553 phy++; 2554 } 2555 2556 if (scan_coded(hdev)) { 2557 cp->scanning_phys |= LE_SCAN_PHY_CODED; 2558 2559 phy->type = type; 2560 phy->interval = cpu_to_le16(interval); 2561 phy->window = cpu_to_le16(window); 2562 2563 num_phy++; 2564 phy++; 2565 } 2566 2567 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, 2568 sizeof(*cp) + sizeof(*phy) * num_phy, 2569 data, HCI_CMD_TIMEOUT); 2570 } 2571 2572 static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, 2573 u16 interval, u16 window, 2574 u8 own_addr_type, u8 filter_policy) 2575 { 2576 struct hci_cp_le_set_scan_param cp; 2577 2578 if (use_ext_scan(hdev)) 2579 return hci_le_set_ext_scan_param_sync(hdev, type, interval, 2580 window, own_addr_type, 2581 filter_policy); 2582 2583 memset(&cp, 0, sizeof(cp)); 2584 cp.type = type; 2585 cp.interval = cpu_to_le16(interval); 2586 cp.window = cpu_to_le16(window); 2587 cp.own_address_type = own_addr_type; 2588 cp.filter_policy = filter_policy; 2589 2590 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, 2591 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2592 } 2593 2594 static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, 2595 u16 window, u8 own_addr_type, u8 filter_policy, 2596 u8 filter_dup) 2597 { 2598 int err; 2599 2600 if (hdev->scanning_paused) { 2601 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 2602 return 0; 2603 } 2604 2605 err = hci_le_set_scan_param_sync(hdev, type, interval, window, 2606 own_addr_type, filter_policy); 2607 if (err) 2608 return err; 2609 2610 return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); 2611 } 2612 2613 static int hci_passive_scan_sync(struct hci_dev *hdev) 2614 { 2615 u8 own_addr_type; 2616 u8 filter_policy; 2617 u16 window, interval; 2618 u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; 2619 int err; 2620 2621 if (hdev->scanning_paused) { 2622 bt_dev_dbg(hdev, "Scanning is paused for suspend"); 2623 return 0; 2624 } 2625 2626 err = hci_scan_disable_sync(hdev); 2627 if (err) { 2628 bt_dev_err(hdev, "disable scanning failed: %d", err); 2629 return err; 2630 } 2631 2632 /* Set require_privacy to false since no SCAN_REQ are send 2633 * during passive scanning. Not using an non-resolvable address 2634 * here is important so that peer devices using direct 2635 * advertising with our address will be correctly reported 2636 * by the controller. 2637 */ 2638 if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev), 2639 &own_addr_type)) 2640 return 0; 2641 2642 if (hdev->enable_advmon_interleave_scan && 2643 hci_update_interleaved_scan_sync(hdev)) 2644 return 0; 2645 2646 bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); 2647 2648 /* Adding or removing entries from the accept list must 2649 * happen before enabling scanning. The controller does 2650 * not allow accept list modification while scanning. 2651 */ 2652 filter_policy = hci_update_accept_list_sync(hdev); 2653 2654 /* When the controller is using random resolvable addresses and 2655 * with that having LE privacy enabled, then controllers with 2656 * Extended Scanner Filter Policies support can now enable support 2657 * for handling directed advertising. 2658 * 2659 * So instead of using filter polices 0x00 (no acceptlist) 2660 * and 0x01 (acceptlist enabled) use the new filter policies 2661 * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). 2662 */ 2663 if (hci_dev_test_flag(hdev, HCI_PRIVACY) && 2664 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) 2665 filter_policy |= 0x02; 2666 2667 if (hdev->suspended) { 2668 window = hdev->le_scan_window_suspend; 2669 interval = hdev->le_scan_int_suspend; 2670 } else if (hci_is_le_conn_scanning(hdev)) { 2671 window = hdev->le_scan_window_connect; 2672 interval = hdev->le_scan_int_connect; 2673 } else if (hci_is_adv_monitoring(hdev)) { 2674 window = hdev->le_scan_window_adv_monitor; 2675 interval = hdev->le_scan_int_adv_monitor; 2676 } else { 2677 window = hdev->le_scan_window; 2678 interval = hdev->le_scan_interval; 2679 } 2680 2681 /* Disable all filtering for Mesh */ 2682 if (hci_dev_test_flag(hdev, HCI_MESH)) { 2683 filter_policy = 0; 2684 filter_dups = LE_SCAN_FILTER_DUP_DISABLE; 2685 } 2686 2687 bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); 2688 2689 return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, 2690 own_addr_type, filter_policy, filter_dups); 2691 } 2692 2693 /* This function controls the passive scanning based on hdev->pend_le_conns 2694 * list. If there are pending LE connection we start the background scanning, 2695 * otherwise we stop it in the following sequence: 2696 * 2697 * If there are devices to scan: 2698 * 2699 * Disable Scanning -> Update Accept List -> 2700 * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> 2701 * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> 2702 * Enable Scanning 2703 * 2704 * Otherwise: 2705 * 2706 * Disable Scanning 2707 */ 2708 int hci_update_passive_scan_sync(struct hci_dev *hdev) 2709 { 2710 int err; 2711 2712 if (!test_bit(HCI_UP, &hdev->flags) || 2713 test_bit(HCI_INIT, &hdev->flags) || 2714 hci_dev_test_flag(hdev, HCI_SETUP) || 2715 hci_dev_test_flag(hdev, HCI_CONFIG) || 2716 hci_dev_test_flag(hdev, HCI_AUTO_OFF) || 2717 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2718 return 0; 2719 2720 /* No point in doing scanning if LE support hasn't been enabled */ 2721 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 2722 return 0; 2723 2724 /* If discovery is active don't interfere with it */ 2725 if (hdev->discovery.state != DISCOVERY_STOPPED) 2726 return 0; 2727 2728 /* Reset RSSI and UUID filters when starting background scanning 2729 * since these filters are meant for service discovery only. 2730 * 2731 * The Start Discovery and Start Service Discovery operations 2732 * ensure to set proper values for RSSI threshold and UUID 2733 * filter list. So it is safe to just reset them here. 2734 */ 2735 hci_discovery_filter_clear(hdev); 2736 2737 bt_dev_dbg(hdev, "ADV monitoring is %s", 2738 hci_is_adv_monitoring(hdev) ? "on" : "off"); 2739 2740 if (!hci_dev_test_flag(hdev, HCI_MESH) && 2741 list_empty(&hdev->pend_le_conns) && 2742 list_empty(&hdev->pend_le_reports) && 2743 !hci_is_adv_monitoring(hdev) && 2744 !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { 2745 /* If there is no pending LE connections or devices 2746 * to be scanned for or no ADV monitors, we should stop the 2747 * background scanning. 2748 */ 2749 2750 bt_dev_dbg(hdev, "stopping background scanning"); 2751 2752 err = hci_scan_disable_sync(hdev); 2753 if (err) 2754 bt_dev_err(hdev, "stop background scanning failed: %d", 2755 err); 2756 } else { 2757 /* If there is at least one pending LE connection, we should 2758 * keep the background scan running. 2759 */ 2760 2761 /* If controller is connecting, we should not start scanning 2762 * since some controllers are not able to scan and connect at 2763 * the same time. 2764 */ 2765 if (hci_lookup_le_connect(hdev)) 2766 return 0; 2767 2768 bt_dev_dbg(hdev, "start background scanning"); 2769 2770 err = hci_passive_scan_sync(hdev); 2771 if (err) 2772 bt_dev_err(hdev, "start background scanning failed: %d", 2773 err); 2774 } 2775 2776 return err; 2777 } 2778 2779 static int update_scan_sync(struct hci_dev *hdev, void *data) 2780 { 2781 return hci_update_scan_sync(hdev); 2782 } 2783 2784 int hci_update_scan(struct hci_dev *hdev) 2785 { 2786 return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); 2787 } 2788 2789 static int update_passive_scan_sync(struct hci_dev *hdev, void *data) 2790 { 2791 return hci_update_passive_scan_sync(hdev); 2792 } 2793 2794 int hci_update_passive_scan(struct hci_dev *hdev) 2795 { 2796 /* Only queue if it would have any effect */ 2797 if (!test_bit(HCI_UP, &hdev->flags) || 2798 test_bit(HCI_INIT, &hdev->flags) || 2799 hci_dev_test_flag(hdev, HCI_SETUP) || 2800 hci_dev_test_flag(hdev, HCI_CONFIG) || 2801 hci_dev_test_flag(hdev, HCI_AUTO_OFF) || 2802 hci_dev_test_flag(hdev, HCI_UNREGISTER)) 2803 return 0; 2804 2805 return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); 2806 } 2807 2808 int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) 2809 { 2810 int err; 2811 2812 if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) 2813 return 0; 2814 2815 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, 2816 sizeof(val), &val, HCI_CMD_TIMEOUT); 2817 2818 if (!err) { 2819 if (val) { 2820 hdev->features[1][0] |= LMP_HOST_SC; 2821 hci_dev_set_flag(hdev, HCI_SC_ENABLED); 2822 } else { 2823 hdev->features[1][0] &= ~LMP_HOST_SC; 2824 hci_dev_clear_flag(hdev, HCI_SC_ENABLED); 2825 } 2826 } 2827 2828 return err; 2829 } 2830 2831 int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) 2832 { 2833 int err; 2834 2835 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || 2836 lmp_host_ssp_capable(hdev)) 2837 return 0; 2838 2839 if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { 2840 __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, 2841 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 2842 } 2843 2844 err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, 2845 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 2846 if (err) 2847 return err; 2848 2849 return hci_write_sc_support_sync(hdev, 0x01); 2850 } 2851 2852 int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) 2853 { 2854 struct hci_cp_write_le_host_supported cp; 2855 2856 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || 2857 !lmp_bredr_capable(hdev)) 2858 return 0; 2859 2860 /* Check first if we already have the right host state 2861 * (host features set) 2862 */ 2863 if (le == lmp_host_le_capable(hdev) && 2864 simul == lmp_host_le_br_capable(hdev)) 2865 return 0; 2866 2867 memset(&cp, 0, sizeof(cp)); 2868 2869 cp.le = le; 2870 cp.simul = simul; 2871 2872 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, 2873 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2874 } 2875 2876 static int hci_powered_update_adv_sync(struct hci_dev *hdev) 2877 { 2878 struct adv_info *adv, *tmp; 2879 int err; 2880 2881 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) 2882 return 0; 2883 2884 /* If RPA Resolution has not been enable yet it means the 2885 * resolving list is empty and we should attempt to program the 2886 * local IRK in order to support using own_addr_type 2887 * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). 2888 */ 2889 if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { 2890 hci_le_add_resolve_list_sync(hdev, NULL); 2891 hci_le_set_addr_resolution_enable_sync(hdev, 0x01); 2892 } 2893 2894 /* Make sure the controller has a good default for 2895 * advertising data. This also applies to the case 2896 * where BR/EDR was toggled during the AUTO_OFF phase. 2897 */ 2898 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || 2899 list_empty(&hdev->adv_instances)) { 2900 if (ext_adv_capable(hdev)) { 2901 err = hci_setup_ext_adv_instance_sync(hdev, 0x00); 2902 if (!err) 2903 hci_update_scan_rsp_data_sync(hdev, 0x00); 2904 } else { 2905 err = hci_update_adv_data_sync(hdev, 0x00); 2906 if (!err) 2907 hci_update_scan_rsp_data_sync(hdev, 0x00); 2908 } 2909 2910 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) 2911 hci_enable_advertising_sync(hdev); 2912 } 2913 2914 /* Call for each tracked instance to be scheduled */ 2915 list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) 2916 hci_schedule_adv_instance_sync(hdev, adv->instance, true); 2917 2918 return 0; 2919 } 2920 2921 static int hci_write_auth_enable_sync(struct hci_dev *hdev) 2922 { 2923 u8 link_sec; 2924 2925 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); 2926 if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) 2927 return 0; 2928 2929 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, 2930 sizeof(link_sec), &link_sec, 2931 HCI_CMD_TIMEOUT); 2932 } 2933 2934 int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) 2935 { 2936 struct hci_cp_write_page_scan_activity cp; 2937 u8 type; 2938 int err = 0; 2939 2940 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 2941 return 0; 2942 2943 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 2944 return 0; 2945 2946 memset(&cp, 0, sizeof(cp)); 2947 2948 if (enable) { 2949 type = PAGE_SCAN_TYPE_INTERLACED; 2950 2951 /* 160 msec page scan interval */ 2952 cp.interval = cpu_to_le16(0x0100); 2953 } else { 2954 type = hdev->def_page_scan_type; 2955 cp.interval = cpu_to_le16(hdev->def_page_scan_int); 2956 } 2957 2958 cp.window = cpu_to_le16(hdev->def_page_scan_window); 2959 2960 if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || 2961 __cpu_to_le16(hdev->page_scan_window) != cp.window) { 2962 err = __hci_cmd_sync_status(hdev, 2963 HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, 2964 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 2965 if (err) 2966 return err; 2967 } 2968 2969 if (hdev->page_scan_type != type) 2970 err = __hci_cmd_sync_status(hdev, 2971 HCI_OP_WRITE_PAGE_SCAN_TYPE, 2972 sizeof(type), &type, 2973 HCI_CMD_TIMEOUT); 2974 2975 return err; 2976 } 2977 2978 static bool disconnected_accept_list_entries(struct hci_dev *hdev) 2979 { 2980 struct bdaddr_list *b; 2981 2982 list_for_each_entry(b, &hdev->accept_list, list) { 2983 struct hci_conn *conn; 2984 2985 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); 2986 if (!conn) 2987 return true; 2988 2989 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) 2990 return true; 2991 } 2992 2993 return false; 2994 } 2995 2996 static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) 2997 { 2998 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, 2999 sizeof(val), &val, 3000 HCI_CMD_TIMEOUT); 3001 } 3002 3003 int hci_update_scan_sync(struct hci_dev *hdev) 3004 { 3005 u8 scan; 3006 3007 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 3008 return 0; 3009 3010 if (!hdev_is_powered(hdev)) 3011 return 0; 3012 3013 if (mgmt_powering_down(hdev)) 3014 return 0; 3015 3016 if (hdev->scanning_paused) 3017 return 0; 3018 3019 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || 3020 disconnected_accept_list_entries(hdev)) 3021 scan = SCAN_PAGE; 3022 else 3023 scan = SCAN_DISABLED; 3024 3025 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 3026 scan |= SCAN_INQUIRY; 3027 3028 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && 3029 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) 3030 return 0; 3031 3032 return hci_write_scan_enable_sync(hdev, scan); 3033 } 3034 3035 int hci_update_name_sync(struct hci_dev *hdev) 3036 { 3037 struct hci_cp_write_local_name cp; 3038 3039 memset(&cp, 0, sizeof(cp)); 3040 3041 memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); 3042 3043 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, 3044 sizeof(cp), &cp, 3045 HCI_CMD_TIMEOUT); 3046 } 3047 3048 /* This function perform powered update HCI command sequence after the HCI init 3049 * sequence which end up resetting all states, the sequence is as follows: 3050 * 3051 * HCI_SSP_ENABLED(Enable SSP) 3052 * HCI_LE_ENABLED(Enable LE) 3053 * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> 3054 * Update adv data) 3055 * Enable Authentication 3056 * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> 3057 * Set Name -> Set EIR) 3058 */ 3059 int hci_powered_update_sync(struct hci_dev *hdev) 3060 { 3061 int err; 3062 3063 /* Register the available SMP channels (BR/EDR and LE) only when 3064 * successfully powering on the controller. This late 3065 * registration is required so that LE SMP can clearly decide if 3066 * the public address or static address is used. 3067 */ 3068 smp_register(hdev); 3069 3070 err = hci_write_ssp_mode_sync(hdev, 0x01); 3071 if (err) 3072 return err; 3073 3074 err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00); 3075 if (err) 3076 return err; 3077 3078 err = hci_powered_update_adv_sync(hdev); 3079 if (err) 3080 return err; 3081 3082 err = hci_write_auth_enable_sync(hdev); 3083 if (err) 3084 return err; 3085 3086 if (lmp_bredr_capable(hdev)) { 3087 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) 3088 hci_write_fast_connectable_sync(hdev, true); 3089 else 3090 hci_write_fast_connectable_sync(hdev, false); 3091 hci_update_scan_sync(hdev); 3092 hci_update_class_sync(hdev); 3093 hci_update_name_sync(hdev); 3094 hci_update_eir_sync(hdev); 3095 } 3096 3097 return 0; 3098 } 3099 3100 /** 3101 * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address 3102 * (BD_ADDR) for a HCI device from 3103 * a firmware node property. 3104 * @hdev: The HCI device 3105 * 3106 * Search the firmware node for 'local-bd-address'. 3107 * 3108 * All-zero BD addresses are rejected, because those could be properties 3109 * that exist in the firmware tables, but were not updated by the firmware. For 3110 * example, the DTS could define 'local-bd-address', with zero BD addresses. 3111 */ 3112 static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) 3113 { 3114 struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); 3115 bdaddr_t ba; 3116 int ret; 3117 3118 ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", 3119 (u8 *)&ba, sizeof(ba)); 3120 if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) 3121 return; 3122 3123 bacpy(&hdev->public_addr, &ba); 3124 } 3125 3126 struct hci_init_stage { 3127 int (*func)(struct hci_dev *hdev); 3128 }; 3129 3130 /* Run init stage NULL terminated function table */ 3131 static int hci_init_stage_sync(struct hci_dev *hdev, 3132 const struct hci_init_stage *stage) 3133 { 3134 size_t i; 3135 3136 for (i = 0; stage[i].func; i++) { 3137 int err; 3138 3139 err = stage[i].func(hdev); 3140 if (err) 3141 return err; 3142 } 3143 3144 return 0; 3145 } 3146 3147 /* Read Local Version */ 3148 static int hci_read_local_version_sync(struct hci_dev *hdev) 3149 { 3150 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, 3151 0, NULL, HCI_CMD_TIMEOUT); 3152 } 3153 3154 /* Read BD Address */ 3155 static int hci_read_bd_addr_sync(struct hci_dev *hdev) 3156 { 3157 return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, 3158 0, NULL, HCI_CMD_TIMEOUT); 3159 } 3160 3161 #define HCI_INIT(_func) \ 3162 { \ 3163 .func = _func, \ 3164 } 3165 3166 static const struct hci_init_stage hci_init0[] = { 3167 /* HCI_OP_READ_LOCAL_VERSION */ 3168 HCI_INIT(hci_read_local_version_sync), 3169 /* HCI_OP_READ_BD_ADDR */ 3170 HCI_INIT(hci_read_bd_addr_sync), 3171 {} 3172 }; 3173 3174 int hci_reset_sync(struct hci_dev *hdev) 3175 { 3176 int err; 3177 3178 set_bit(HCI_RESET, &hdev->flags); 3179 3180 err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, 3181 HCI_CMD_TIMEOUT); 3182 if (err) 3183 return err; 3184 3185 return 0; 3186 } 3187 3188 static int hci_init0_sync(struct hci_dev *hdev) 3189 { 3190 int err; 3191 3192 bt_dev_dbg(hdev, ""); 3193 3194 /* Reset */ 3195 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 3196 err = hci_reset_sync(hdev); 3197 if (err) 3198 return err; 3199 } 3200 3201 return hci_init_stage_sync(hdev, hci_init0); 3202 } 3203 3204 static int hci_unconf_init_sync(struct hci_dev *hdev) 3205 { 3206 int err; 3207 3208 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) 3209 return 0; 3210 3211 err = hci_init0_sync(hdev); 3212 if (err < 0) 3213 return err; 3214 3215 if (hci_dev_test_flag(hdev, HCI_SETUP)) 3216 hci_debugfs_create_basic(hdev); 3217 3218 return 0; 3219 } 3220 3221 /* Read Local Supported Features. */ 3222 static int hci_read_local_features_sync(struct hci_dev *hdev) 3223 { 3224 /* Not all AMP controllers support this command */ 3225 if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) 3226 return 0; 3227 3228 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, 3229 0, NULL, HCI_CMD_TIMEOUT); 3230 } 3231 3232 /* BR Controller init stage 1 command sequence */ 3233 static const struct hci_init_stage br_init1[] = { 3234 /* HCI_OP_READ_LOCAL_FEATURES */ 3235 HCI_INIT(hci_read_local_features_sync), 3236 /* HCI_OP_READ_LOCAL_VERSION */ 3237 HCI_INIT(hci_read_local_version_sync), 3238 /* HCI_OP_READ_BD_ADDR */ 3239 HCI_INIT(hci_read_bd_addr_sync), 3240 {} 3241 }; 3242 3243 /* Read Local Commands */ 3244 static int hci_read_local_cmds_sync(struct hci_dev *hdev) 3245 { 3246 /* All Bluetooth 1.2 and later controllers should support the 3247 * HCI command for reading the local supported commands. 3248 * 3249 * Unfortunately some controllers indicate Bluetooth 1.2 support, 3250 * but do not have support for this command. If that is the case, 3251 * the driver can quirk the behavior and skip reading the local 3252 * supported commands. 3253 */ 3254 if (hdev->hci_ver > BLUETOOTH_VER_1_1 && 3255 !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) 3256 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, 3257 0, NULL, HCI_CMD_TIMEOUT); 3258 3259 return 0; 3260 } 3261 3262 /* Read Local AMP Info */ 3263 static int hci_read_local_amp_info_sync(struct hci_dev *hdev) 3264 { 3265 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 3266 0, NULL, HCI_CMD_TIMEOUT); 3267 } 3268 3269 /* Read Data Blk size */ 3270 static int hci_read_data_block_size_sync(struct hci_dev *hdev) 3271 { 3272 return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 3273 0, NULL, HCI_CMD_TIMEOUT); 3274 } 3275 3276 /* Read Flow Control Mode */ 3277 static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) 3278 { 3279 return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, 3280 0, NULL, HCI_CMD_TIMEOUT); 3281 } 3282 3283 /* Read Location Data */ 3284 static int hci_read_location_data_sync(struct hci_dev *hdev) 3285 { 3286 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, 3287 0, NULL, HCI_CMD_TIMEOUT); 3288 } 3289 3290 /* AMP Controller init stage 1 command sequence */ 3291 static const struct hci_init_stage amp_init1[] = { 3292 /* HCI_OP_READ_LOCAL_VERSION */ 3293 HCI_INIT(hci_read_local_version_sync), 3294 /* HCI_OP_READ_LOCAL_COMMANDS */ 3295 HCI_INIT(hci_read_local_cmds_sync), 3296 /* HCI_OP_READ_LOCAL_AMP_INFO */ 3297 HCI_INIT(hci_read_local_amp_info_sync), 3298 /* HCI_OP_READ_DATA_BLOCK_SIZE */ 3299 HCI_INIT(hci_read_data_block_size_sync), 3300 /* HCI_OP_READ_FLOW_CONTROL_MODE */ 3301 HCI_INIT(hci_read_flow_control_mode_sync), 3302 /* HCI_OP_READ_LOCATION_DATA */ 3303 HCI_INIT(hci_read_location_data_sync), 3304 }; 3305 3306 static int hci_init1_sync(struct hci_dev *hdev) 3307 { 3308 int err; 3309 3310 bt_dev_dbg(hdev, ""); 3311 3312 /* Reset */ 3313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 3314 err = hci_reset_sync(hdev); 3315 if (err) 3316 return err; 3317 } 3318 3319 switch (hdev->dev_type) { 3320 case HCI_PRIMARY: 3321 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; 3322 return hci_init_stage_sync(hdev, br_init1); 3323 case HCI_AMP: 3324 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; 3325 return hci_init_stage_sync(hdev, amp_init1); 3326 default: 3327 bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); 3328 break; 3329 } 3330 3331 return 0; 3332 } 3333 3334 /* AMP Controller init stage 2 command sequence */ 3335 static const struct hci_init_stage amp_init2[] = { 3336 /* HCI_OP_READ_LOCAL_FEATURES */ 3337 HCI_INIT(hci_read_local_features_sync), 3338 }; 3339 3340 /* Read Buffer Size (ACL mtu, max pkt, etc.) */ 3341 static int hci_read_buffer_size_sync(struct hci_dev *hdev) 3342 { 3343 return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, 3344 0, NULL, HCI_CMD_TIMEOUT); 3345 } 3346 3347 /* Read Class of Device */ 3348 static int hci_read_dev_class_sync(struct hci_dev *hdev) 3349 { 3350 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, 3351 0, NULL, HCI_CMD_TIMEOUT); 3352 } 3353 3354 /* Read Local Name */ 3355 static int hci_read_local_name_sync(struct hci_dev *hdev) 3356 { 3357 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, 3358 0, NULL, HCI_CMD_TIMEOUT); 3359 } 3360 3361 /* Read Voice Setting */ 3362 static int hci_read_voice_setting_sync(struct hci_dev *hdev) 3363 { 3364 return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, 3365 0, NULL, HCI_CMD_TIMEOUT); 3366 } 3367 3368 /* Read Number of Supported IAC */ 3369 static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) 3370 { 3371 return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, 3372 0, NULL, HCI_CMD_TIMEOUT); 3373 } 3374 3375 /* Read Current IAC LAP */ 3376 static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) 3377 { 3378 return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, 3379 0, NULL, HCI_CMD_TIMEOUT); 3380 } 3381 3382 static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, 3383 u8 cond_type, bdaddr_t *bdaddr, 3384 u8 auto_accept) 3385 { 3386 struct hci_cp_set_event_filter cp; 3387 3388 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 3389 return 0; 3390 3391 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) 3392 return 0; 3393 3394 memset(&cp, 0, sizeof(cp)); 3395 cp.flt_type = flt_type; 3396 3397 if (flt_type != HCI_FLT_CLEAR_ALL) { 3398 cp.cond_type = cond_type; 3399 bacpy(&cp.addr_conn_flt.bdaddr, bdaddr); 3400 cp.addr_conn_flt.auto_accept = auto_accept; 3401 } 3402 3403 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, 3404 flt_type == HCI_FLT_CLEAR_ALL ? 3405 sizeof(cp.flt_type) : sizeof(cp), &cp, 3406 HCI_CMD_TIMEOUT); 3407 } 3408 3409 static int hci_clear_event_filter_sync(struct hci_dev *hdev) 3410 { 3411 if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) 3412 return 0; 3413 3414 /* In theory the state machine should not reach here unless 3415 * a hci_set_event_filter_sync() call succeeds, but we do 3416 * the check both for parity and as a future reminder. 3417 */ 3418 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) 3419 return 0; 3420 3421 return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, 3422 BDADDR_ANY, 0x00); 3423 } 3424 3425 /* Connection accept timeout ~20 secs */ 3426 static int hci_write_ca_timeout_sync(struct hci_dev *hdev) 3427 { 3428 __le16 param = cpu_to_le16(0x7d00); 3429 3430 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, 3431 sizeof(param), ¶m, HCI_CMD_TIMEOUT); 3432 } 3433 3434 /* BR Controller init stage 2 command sequence */ 3435 static const struct hci_init_stage br_init2[] = { 3436 /* HCI_OP_READ_BUFFER_SIZE */ 3437 HCI_INIT(hci_read_buffer_size_sync), 3438 /* HCI_OP_READ_CLASS_OF_DEV */ 3439 HCI_INIT(hci_read_dev_class_sync), 3440 /* HCI_OP_READ_LOCAL_NAME */ 3441 HCI_INIT(hci_read_local_name_sync), 3442 /* HCI_OP_READ_VOICE_SETTING */ 3443 HCI_INIT(hci_read_voice_setting_sync), 3444 /* HCI_OP_READ_NUM_SUPPORTED_IAC */ 3445 HCI_INIT(hci_read_num_supported_iac_sync), 3446 /* HCI_OP_READ_CURRENT_IAC_LAP */ 3447 HCI_INIT(hci_read_current_iac_lap_sync), 3448 /* HCI_OP_SET_EVENT_FLT */ 3449 HCI_INIT(hci_clear_event_filter_sync), 3450 /* HCI_OP_WRITE_CA_TIMEOUT */ 3451 HCI_INIT(hci_write_ca_timeout_sync), 3452 {} 3453 }; 3454 3455 static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) 3456 { 3457 u8 mode = 0x01; 3458 3459 if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 3460 return 0; 3461 3462 /* When SSP is available, then the host features page 3463 * should also be available as well. However some 3464 * controllers list the max_page as 0 as long as SSP 3465 * has not been enabled. To achieve proper debugging 3466 * output, force the minimum max_page to 1 at least. 3467 */ 3468 hdev->max_page = 0x01; 3469 3470 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, 3471 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 3472 } 3473 3474 static int hci_write_eir_sync(struct hci_dev *hdev) 3475 { 3476 struct hci_cp_write_eir cp; 3477 3478 if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) 3479 return 0; 3480 3481 memset(hdev->eir, 0, sizeof(hdev->eir)); 3482 memset(&cp, 0, sizeof(cp)); 3483 3484 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, 3485 HCI_CMD_TIMEOUT); 3486 } 3487 3488 static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) 3489 { 3490 u8 mode; 3491 3492 if (!lmp_inq_rssi_capable(hdev) && 3493 !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) 3494 return 0; 3495 3496 /* If Extended Inquiry Result events are supported, then 3497 * they are clearly preferred over Inquiry Result with RSSI 3498 * events. 3499 */ 3500 mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; 3501 3502 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, 3503 sizeof(mode), &mode, HCI_CMD_TIMEOUT); 3504 } 3505 3506 static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) 3507 { 3508 if (!lmp_inq_tx_pwr_capable(hdev)) 3509 return 0; 3510 3511 return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 3512 0, NULL, HCI_CMD_TIMEOUT); 3513 } 3514 3515 static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) 3516 { 3517 struct hci_cp_read_local_ext_features cp; 3518 3519 if (!lmp_ext_feat_capable(hdev)) 3520 return 0; 3521 3522 memset(&cp, 0, sizeof(cp)); 3523 cp.page = page; 3524 3525 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, 3526 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3527 } 3528 3529 static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) 3530 { 3531 return hci_read_local_ext_features_sync(hdev, 0x01); 3532 } 3533 3534 /* HCI Controller init stage 2 command sequence */ 3535 static const struct hci_init_stage hci_init2[] = { 3536 /* HCI_OP_READ_LOCAL_COMMANDS */ 3537 HCI_INIT(hci_read_local_cmds_sync), 3538 /* HCI_OP_WRITE_SSP_MODE */ 3539 HCI_INIT(hci_write_ssp_mode_1_sync), 3540 /* HCI_OP_WRITE_EIR */ 3541 HCI_INIT(hci_write_eir_sync), 3542 /* HCI_OP_WRITE_INQUIRY_MODE */ 3543 HCI_INIT(hci_write_inquiry_mode_sync), 3544 /* HCI_OP_READ_INQ_RSP_TX_POWER */ 3545 HCI_INIT(hci_read_inq_rsp_tx_power_sync), 3546 /* HCI_OP_READ_LOCAL_EXT_FEATURES */ 3547 HCI_INIT(hci_read_local_ext_features_1_sync), 3548 /* HCI_OP_WRITE_AUTH_ENABLE */ 3549 HCI_INIT(hci_write_auth_enable_sync), 3550 {} 3551 }; 3552 3553 /* Read LE Buffer Size */ 3554 static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) 3555 { 3556 /* Use Read LE Buffer Size V2 if supported */ 3557 if (hdev->commands[41] & 0x20) 3558 return __hci_cmd_sync_status(hdev, 3559 HCI_OP_LE_READ_BUFFER_SIZE_V2, 3560 0, NULL, HCI_CMD_TIMEOUT); 3561 3562 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 3563 0, NULL, HCI_CMD_TIMEOUT); 3564 } 3565 3566 /* Read LE Local Supported Features */ 3567 static int hci_le_read_local_features_sync(struct hci_dev *hdev) 3568 { 3569 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, 3570 0, NULL, HCI_CMD_TIMEOUT); 3571 } 3572 3573 /* Read LE Supported States */ 3574 static int hci_le_read_supported_states_sync(struct hci_dev *hdev) 3575 { 3576 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, 3577 0, NULL, HCI_CMD_TIMEOUT); 3578 } 3579 3580 /* LE Controller init stage 2 command sequence */ 3581 static const struct hci_init_stage le_init2[] = { 3582 /* HCI_OP_LE_READ_BUFFER_SIZE */ 3583 HCI_INIT(hci_le_read_buffer_size_sync), 3584 /* HCI_OP_LE_READ_LOCAL_FEATURES */ 3585 HCI_INIT(hci_le_read_local_features_sync), 3586 /* HCI_OP_LE_READ_SUPPORTED_STATES */ 3587 HCI_INIT(hci_le_read_supported_states_sync), 3588 {} 3589 }; 3590 3591 static int hci_init2_sync(struct hci_dev *hdev) 3592 { 3593 int err; 3594 3595 bt_dev_dbg(hdev, ""); 3596 3597 if (hdev->dev_type == HCI_AMP) 3598 return hci_init_stage_sync(hdev, amp_init2); 3599 3600 err = hci_init_stage_sync(hdev, hci_init2); 3601 if (err) 3602 return err; 3603 3604 if (lmp_bredr_capable(hdev)) { 3605 err = hci_init_stage_sync(hdev, br_init2); 3606 if (err) 3607 return err; 3608 } else { 3609 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); 3610 } 3611 3612 if (lmp_le_capable(hdev)) { 3613 err = hci_init_stage_sync(hdev, le_init2); 3614 if (err) 3615 return err; 3616 /* LE-only controllers have LE implicitly enabled */ 3617 if (!lmp_bredr_capable(hdev)) 3618 hci_dev_set_flag(hdev, HCI_LE_ENABLED); 3619 } 3620 3621 return 0; 3622 } 3623 3624 static int hci_set_event_mask_sync(struct hci_dev *hdev) 3625 { 3626 /* The second byte is 0xff instead of 0x9f (two reserved bits 3627 * disabled) since a Broadcom 1.2 dongle doesn't respond to the 3628 * command otherwise. 3629 */ 3630 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; 3631 3632 /* CSR 1.1 dongles does not accept any bitfield so don't try to set 3633 * any event mask for pre 1.2 devices. 3634 */ 3635 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 3636 return 0; 3637 3638 if (lmp_bredr_capable(hdev)) { 3639 events[4] |= 0x01; /* Flow Specification Complete */ 3640 3641 /* Don't set Disconnect Complete when suspended as that 3642 * would wakeup the host when disconnecting due to 3643 * suspend. 3644 */ 3645 if (hdev->suspended) 3646 events[0] &= 0xef; 3647 } else { 3648 /* Use a different default for LE-only devices */ 3649 memset(events, 0, sizeof(events)); 3650 events[1] |= 0x20; /* Command Complete */ 3651 events[1] |= 0x40; /* Command Status */ 3652 events[1] |= 0x80; /* Hardware Error */ 3653 3654 /* If the controller supports the Disconnect command, enable 3655 * the corresponding event. In addition enable packet flow 3656 * control related events. 3657 */ 3658 if (hdev->commands[0] & 0x20) { 3659 /* Don't set Disconnect Complete when suspended as that 3660 * would wakeup the host when disconnecting due to 3661 * suspend. 3662 */ 3663 if (!hdev->suspended) 3664 events[0] |= 0x10; /* Disconnection Complete */ 3665 events[2] |= 0x04; /* Number of Completed Packets */ 3666 events[3] |= 0x02; /* Data Buffer Overflow */ 3667 } 3668 3669 /* If the controller supports the Read Remote Version 3670 * Information command, enable the corresponding event. 3671 */ 3672 if (hdev->commands[2] & 0x80) 3673 events[1] |= 0x08; /* Read Remote Version Information 3674 * Complete 3675 */ 3676 3677 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { 3678 events[0] |= 0x80; /* Encryption Change */ 3679 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 3680 } 3681 } 3682 3683 if (lmp_inq_rssi_capable(hdev) || 3684 test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) 3685 events[4] |= 0x02; /* Inquiry Result with RSSI */ 3686 3687 if (lmp_ext_feat_capable(hdev)) 3688 events[4] |= 0x04; /* Read Remote Extended Features Complete */ 3689 3690 if (lmp_esco_capable(hdev)) { 3691 events[5] |= 0x08; /* Synchronous Connection Complete */ 3692 events[5] |= 0x10; /* Synchronous Connection Changed */ 3693 } 3694 3695 if (lmp_sniffsubr_capable(hdev)) 3696 events[5] |= 0x20; /* Sniff Subrating */ 3697 3698 if (lmp_pause_enc_capable(hdev)) 3699 events[5] |= 0x80; /* Encryption Key Refresh Complete */ 3700 3701 if (lmp_ext_inq_capable(hdev)) 3702 events[5] |= 0x40; /* Extended Inquiry Result */ 3703 3704 if (lmp_no_flush_capable(hdev)) 3705 events[7] |= 0x01; /* Enhanced Flush Complete */ 3706 3707 if (lmp_lsto_capable(hdev)) 3708 events[6] |= 0x80; /* Link Supervision Timeout Changed */ 3709 3710 if (lmp_ssp_capable(hdev)) { 3711 events[6] |= 0x01; /* IO Capability Request */ 3712 events[6] |= 0x02; /* IO Capability Response */ 3713 events[6] |= 0x04; /* User Confirmation Request */ 3714 events[6] |= 0x08; /* User Passkey Request */ 3715 events[6] |= 0x10; /* Remote OOB Data Request */ 3716 events[6] |= 0x20; /* Simple Pairing Complete */ 3717 events[7] |= 0x04; /* User Passkey Notification */ 3718 events[7] |= 0x08; /* Keypress Notification */ 3719 events[7] |= 0x10; /* Remote Host Supported 3720 * Features Notification 3721 */ 3722 } 3723 3724 if (lmp_le_capable(hdev)) 3725 events[7] |= 0x20; /* LE Meta-Event */ 3726 3727 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, 3728 sizeof(events), events, HCI_CMD_TIMEOUT); 3729 } 3730 3731 static int hci_read_stored_link_key_sync(struct hci_dev *hdev) 3732 { 3733 struct hci_cp_read_stored_link_key cp; 3734 3735 if (!(hdev->commands[6] & 0x20) || 3736 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) 3737 return 0; 3738 3739 memset(&cp, 0, sizeof(cp)); 3740 bacpy(&cp.bdaddr, BDADDR_ANY); 3741 cp.read_all = 0x01; 3742 3743 return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, 3744 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3745 } 3746 3747 static int hci_setup_link_policy_sync(struct hci_dev *hdev) 3748 { 3749 struct hci_cp_write_def_link_policy cp; 3750 u16 link_policy = 0; 3751 3752 if (!(hdev->commands[5] & 0x10)) 3753 return 0; 3754 3755 memset(&cp, 0, sizeof(cp)); 3756 3757 if (lmp_rswitch_capable(hdev)) 3758 link_policy |= HCI_LP_RSWITCH; 3759 if (lmp_hold_capable(hdev)) 3760 link_policy |= HCI_LP_HOLD; 3761 if (lmp_sniff_capable(hdev)) 3762 link_policy |= HCI_LP_SNIFF; 3763 if (lmp_park_capable(hdev)) 3764 link_policy |= HCI_LP_PARK; 3765 3766 cp.policy = cpu_to_le16(link_policy); 3767 3768 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 3769 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 3770 } 3771 3772 static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) 3773 { 3774 if (!(hdev->commands[8] & 0x01)) 3775 return 0; 3776 3777 return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 3778 0, NULL, HCI_CMD_TIMEOUT); 3779 } 3780 3781 static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) 3782 { 3783 if (!(hdev->commands[18] & 0x04) || 3784 !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || 3785 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) 3786 return 0; 3787 3788 return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 3789 0, NULL, HCI_CMD_TIMEOUT); 3790 } 3791 3792 static int hci_read_page_scan_type_sync(struct hci_dev *hdev) 3793 { 3794 /* Some older Broadcom based Bluetooth 1.2 controllers do not 3795 * support the Read Page Scan Type command. Check support for 3796 * this command in the bit mask of supported commands. 3797 */ 3798 if (!(hdev->commands[13] & 0x01)) 3799 return 0; 3800 3801 return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, 3802 0, NULL, HCI_CMD_TIMEOUT); 3803 } 3804 3805 /* Read features beyond page 1 if available */ 3806 static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) 3807 { 3808 u8 page; 3809 int err; 3810 3811 if (!lmp_ext_feat_capable(hdev)) 3812 return 0; 3813 3814 for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; 3815 page++) { 3816 err = hci_read_local_ext_features_sync(hdev, page); 3817 if (err) 3818 return err; 3819 } 3820 3821 return 0; 3822 } 3823 3824 /* HCI Controller init stage 3 command sequence */ 3825 static const struct hci_init_stage hci_init3[] = { 3826 /* HCI_OP_SET_EVENT_MASK */ 3827 HCI_INIT(hci_set_event_mask_sync), 3828 /* HCI_OP_READ_STORED_LINK_KEY */ 3829 HCI_INIT(hci_read_stored_link_key_sync), 3830 /* HCI_OP_WRITE_DEF_LINK_POLICY */ 3831 HCI_INIT(hci_setup_link_policy_sync), 3832 /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ 3833 HCI_INIT(hci_read_page_scan_activity_sync), 3834 /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ 3835 HCI_INIT(hci_read_def_err_data_reporting_sync), 3836 /* HCI_OP_READ_PAGE_SCAN_TYPE */ 3837 HCI_INIT(hci_read_page_scan_type_sync), 3838 /* HCI_OP_READ_LOCAL_EXT_FEATURES */ 3839 HCI_INIT(hci_read_local_ext_features_all_sync), 3840 {} 3841 }; 3842 3843 static int hci_le_set_event_mask_sync(struct hci_dev *hdev) 3844 { 3845 u8 events[8]; 3846 3847 if (!lmp_le_capable(hdev)) 3848 return 0; 3849 3850 memset(events, 0, sizeof(events)); 3851 3852 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) 3853 events[0] |= 0x10; /* LE Long Term Key Request */ 3854 3855 /* If controller supports the Connection Parameters Request 3856 * Link Layer Procedure, enable the corresponding event. 3857 */ 3858 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) 3859 /* LE Remote Connection Parameter Request */ 3860 events[0] |= 0x20; 3861 3862 /* If the controller supports the Data Length Extension 3863 * feature, enable the corresponding event. 3864 */ 3865 if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) 3866 events[0] |= 0x40; /* LE Data Length Change */ 3867 3868 /* If the controller supports LL Privacy feature or LE Extended Adv, 3869 * enable the corresponding event. 3870 */ 3871 if (use_enhanced_conn_complete(hdev)) 3872 events[1] |= 0x02; /* LE Enhanced Connection Complete */ 3873 3874 /* If the controller supports Extended Scanner Filter 3875 * Policies, enable the corresponding event. 3876 */ 3877 if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) 3878 events[1] |= 0x04; /* LE Direct Advertising Report */ 3879 3880 /* If the controller supports Channel Selection Algorithm #2 3881 * feature, enable the corresponding event. 3882 */ 3883 if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) 3884 events[2] |= 0x08; /* LE Channel Selection Algorithm */ 3885 3886 /* If the controller supports the LE Set Scan Enable command, 3887 * enable the corresponding advertising report event. 3888 */ 3889 if (hdev->commands[26] & 0x08) 3890 events[0] |= 0x02; /* LE Advertising Report */ 3891 3892 /* If the controller supports the LE Create Connection 3893 * command, enable the corresponding event. 3894 */ 3895 if (hdev->commands[26] & 0x10) 3896 events[0] |= 0x01; /* LE Connection Complete */ 3897 3898 /* If the controller supports the LE Connection Update 3899 * command, enable the corresponding event. 3900 */ 3901 if (hdev->commands[27] & 0x04) 3902 events[0] |= 0x04; /* LE Connection Update Complete */ 3903 3904 /* If the controller supports the LE Read Remote Used Features 3905 * command, enable the corresponding event. 3906 */ 3907 if (hdev->commands[27] & 0x20) 3908 /* LE Read Remote Used Features Complete */ 3909 events[0] |= 0x08; 3910 3911 /* If the controller supports the LE Read Local P-256 3912 * Public Key command, enable the corresponding event. 3913 */ 3914 if (hdev->commands[34] & 0x02) 3915 /* LE Read Local P-256 Public Key Complete */ 3916 events[0] |= 0x80; 3917 3918 /* If the controller supports the LE Generate DHKey 3919 * command, enable the corresponding event. 3920 */ 3921 if (hdev->commands[34] & 0x04) 3922 events[1] |= 0x01; /* LE Generate DHKey Complete */ 3923 3924 /* If the controller supports the LE Set Default PHY or 3925 * LE Set PHY commands, enable the corresponding event. 3926 */ 3927 if (hdev->commands[35] & (0x20 | 0x40)) 3928 events[1] |= 0x08; /* LE PHY Update Complete */ 3929 3930 /* If the controller supports LE Set Extended Scan Parameters 3931 * and LE Set Extended Scan Enable commands, enable the 3932 * corresponding event. 3933 */ 3934 if (use_ext_scan(hdev)) 3935 events[1] |= 0x10; /* LE Extended Advertising Report */ 3936 3937 /* If the controller supports the LE Extended Advertising 3938 * command, enable the corresponding event. 3939 */ 3940 if (ext_adv_capable(hdev)) 3941 events[2] |= 0x02; /* LE Advertising Set Terminated */ 3942 3943 if (cis_capable(hdev)) { 3944 events[3] |= 0x01; /* LE CIS Established */ 3945 if (cis_peripheral_capable(hdev)) 3946 events[3] |= 0x02; /* LE CIS Request */ 3947 } 3948 3949 if (bis_capable(hdev)) { 3950 events[3] |= 0x04; /* LE Create BIG Complete */ 3951 events[3] |= 0x08; /* LE Terminate BIG Complete */ 3952 events[3] |= 0x10; /* LE BIG Sync Established */ 3953 events[3] |= 0x20; /* LE BIG Sync Loss */ 3954 } 3955 3956 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, 3957 sizeof(events), events, HCI_CMD_TIMEOUT); 3958 } 3959 3960 /* Read LE Advertising Channel TX Power */ 3961 static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) 3962 { 3963 if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { 3964 /* HCI TS spec forbids mixing of legacy and extended 3965 * advertising commands wherein READ_ADV_TX_POWER is 3966 * also included. So do not call it if extended adv 3967 * is supported otherwise controller will return 3968 * COMMAND_DISALLOWED for extended commands. 3969 */ 3970 return __hci_cmd_sync_status(hdev, 3971 HCI_OP_LE_READ_ADV_TX_POWER, 3972 0, NULL, HCI_CMD_TIMEOUT); 3973 } 3974 3975 return 0; 3976 } 3977 3978 /* Read LE Min/Max Tx Power*/ 3979 static int hci_le_read_tx_power_sync(struct hci_dev *hdev) 3980 { 3981 if (!(hdev->commands[38] & 0x80) || 3982 test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) 3983 return 0; 3984 3985 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, 3986 0, NULL, HCI_CMD_TIMEOUT); 3987 } 3988 3989 /* Read LE Accept List Size */ 3990 static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) 3991 { 3992 if (!(hdev->commands[26] & 0x40)) 3993 return 0; 3994 3995 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, 3996 0, NULL, HCI_CMD_TIMEOUT); 3997 } 3998 3999 /* Clear LE Accept List */ 4000 static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) 4001 { 4002 if (!(hdev->commands[26] & 0x80)) 4003 return 0; 4004 4005 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, 4006 HCI_CMD_TIMEOUT); 4007 } 4008 4009 /* Read LE Resolving List Size */ 4010 static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) 4011 { 4012 if (!(hdev->commands[34] & 0x40)) 4013 return 0; 4014 4015 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, 4016 0, NULL, HCI_CMD_TIMEOUT); 4017 } 4018 4019 /* Clear LE Resolving List */ 4020 static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) 4021 { 4022 if (!(hdev->commands[34] & 0x20)) 4023 return 0; 4024 4025 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, 4026 HCI_CMD_TIMEOUT); 4027 } 4028 4029 /* Set RPA timeout */ 4030 static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) 4031 { 4032 __le16 timeout = cpu_to_le16(hdev->rpa_timeout); 4033 4034 if (!(hdev->commands[35] & 0x04)) 4035 return 0; 4036 4037 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, 4038 sizeof(timeout), &timeout, 4039 HCI_CMD_TIMEOUT); 4040 } 4041 4042 /* Read LE Maximum Data Length */ 4043 static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) 4044 { 4045 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) 4046 return 0; 4047 4048 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, 4049 HCI_CMD_TIMEOUT); 4050 } 4051 4052 /* Read LE Suggested Default Data Length */ 4053 static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) 4054 { 4055 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) 4056 return 0; 4057 4058 return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, 4059 HCI_CMD_TIMEOUT); 4060 } 4061 4062 /* Read LE Number of Supported Advertising Sets */ 4063 static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) 4064 { 4065 if (!ext_adv_capable(hdev)) 4066 return 0; 4067 4068 return __hci_cmd_sync_status(hdev, 4069 HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, 4070 0, NULL, HCI_CMD_TIMEOUT); 4071 } 4072 4073 /* Write LE Host Supported */ 4074 static int hci_set_le_support_sync(struct hci_dev *hdev) 4075 { 4076 struct hci_cp_write_le_host_supported cp; 4077 4078 /* LE-only devices do not support explicit enablement */ 4079 if (!lmp_bredr_capable(hdev)) 4080 return 0; 4081 4082 memset(&cp, 0, sizeof(cp)); 4083 4084 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { 4085 cp.le = 0x01; 4086 cp.simul = 0x00; 4087 } 4088 4089 if (cp.le == lmp_host_le_capable(hdev)) 4090 return 0; 4091 4092 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, 4093 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4094 } 4095 4096 /* LE Set Host Feature */ 4097 static int hci_le_set_host_feature_sync(struct hci_dev *hdev) 4098 { 4099 struct hci_cp_le_set_host_feature cp; 4100 4101 if (!iso_capable(hdev)) 4102 return 0; 4103 4104 memset(&cp, 0, sizeof(cp)); 4105 4106 /* Isochronous Channels (Host Support) */ 4107 cp.bit_number = 32; 4108 cp.bit_value = 1; 4109 4110 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, 4111 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4112 } 4113 4114 /* LE Controller init stage 3 command sequence */ 4115 static const struct hci_init_stage le_init3[] = { 4116 /* HCI_OP_LE_SET_EVENT_MASK */ 4117 HCI_INIT(hci_le_set_event_mask_sync), 4118 /* HCI_OP_LE_READ_ADV_TX_POWER */ 4119 HCI_INIT(hci_le_read_adv_tx_power_sync), 4120 /* HCI_OP_LE_READ_TRANSMIT_POWER */ 4121 HCI_INIT(hci_le_read_tx_power_sync), 4122 /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ 4123 HCI_INIT(hci_le_read_accept_list_size_sync), 4124 /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ 4125 HCI_INIT(hci_le_clear_accept_list_sync), 4126 /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ 4127 HCI_INIT(hci_le_read_resolv_list_size_sync), 4128 /* HCI_OP_LE_CLEAR_RESOLV_LIST */ 4129 HCI_INIT(hci_le_clear_resolv_list_sync), 4130 /* HCI_OP_LE_SET_RPA_TIMEOUT */ 4131 HCI_INIT(hci_le_set_rpa_timeout_sync), 4132 /* HCI_OP_LE_READ_MAX_DATA_LEN */ 4133 HCI_INIT(hci_le_read_max_data_len_sync), 4134 /* HCI_OP_LE_READ_DEF_DATA_LEN */ 4135 HCI_INIT(hci_le_read_def_data_len_sync), 4136 /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ 4137 HCI_INIT(hci_le_read_num_support_adv_sets_sync), 4138 /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ 4139 HCI_INIT(hci_set_le_support_sync), 4140 /* HCI_OP_LE_SET_HOST_FEATURE */ 4141 HCI_INIT(hci_le_set_host_feature_sync), 4142 {} 4143 }; 4144 4145 static int hci_init3_sync(struct hci_dev *hdev) 4146 { 4147 int err; 4148 4149 bt_dev_dbg(hdev, ""); 4150 4151 err = hci_init_stage_sync(hdev, hci_init3); 4152 if (err) 4153 return err; 4154 4155 if (lmp_le_capable(hdev)) 4156 return hci_init_stage_sync(hdev, le_init3); 4157 4158 return 0; 4159 } 4160 4161 static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) 4162 { 4163 struct hci_cp_delete_stored_link_key cp; 4164 4165 /* Some Broadcom based Bluetooth controllers do not support the 4166 * Delete Stored Link Key command. They are clearly indicating its 4167 * absence in the bit mask of supported commands. 4168 * 4169 * Check the supported commands and only if the command is marked 4170 * as supported send it. If not supported assume that the controller 4171 * does not have actual support for stored link keys which makes this 4172 * command redundant anyway. 4173 * 4174 * Some controllers indicate that they support handling deleting 4175 * stored link keys, but they don't. The quirk lets a driver 4176 * just disable this command. 4177 */ 4178 if (!(hdev->commands[6] & 0x80) || 4179 test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) 4180 return 0; 4181 4182 memset(&cp, 0, sizeof(cp)); 4183 bacpy(&cp.bdaddr, BDADDR_ANY); 4184 cp.delete_all = 0x01; 4185 4186 return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, 4187 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4188 } 4189 4190 static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) 4191 { 4192 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; 4193 bool changed = false; 4194 4195 /* Set event mask page 2 if the HCI command for it is supported */ 4196 if (!(hdev->commands[22] & 0x04)) 4197 return 0; 4198 4199 /* If Connectionless Peripheral Broadcast central role is supported 4200 * enable all necessary events for it. 4201 */ 4202 if (lmp_cpb_central_capable(hdev)) { 4203 events[1] |= 0x40; /* Triggered Clock Capture */ 4204 events[1] |= 0x80; /* Synchronization Train Complete */ 4205 events[2] |= 0x08; /* Truncated Page Complete */ 4206 events[2] |= 0x20; /* CPB Channel Map Change */ 4207 changed = true; 4208 } 4209 4210 /* If Connectionless Peripheral Broadcast peripheral role is supported 4211 * enable all necessary events for it. 4212 */ 4213 if (lmp_cpb_peripheral_capable(hdev)) { 4214 events[2] |= 0x01; /* Synchronization Train Received */ 4215 events[2] |= 0x02; /* CPB Receive */ 4216 events[2] |= 0x04; /* CPB Timeout */ 4217 events[2] |= 0x10; /* Peripheral Page Response Timeout */ 4218 changed = true; 4219 } 4220 4221 /* Enable Authenticated Payload Timeout Expired event if supported */ 4222 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { 4223 events[2] |= 0x80; 4224 changed = true; 4225 } 4226 4227 /* Some Broadcom based controllers indicate support for Set Event 4228 * Mask Page 2 command, but then actually do not support it. Since 4229 * the default value is all bits set to zero, the command is only 4230 * required if the event mask has to be changed. In case no change 4231 * to the event mask is needed, skip this command. 4232 */ 4233 if (!changed) 4234 return 0; 4235 4236 return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, 4237 sizeof(events), events, HCI_CMD_TIMEOUT); 4238 } 4239 4240 /* Read local codec list if the HCI command is supported */ 4241 static int hci_read_local_codecs_sync(struct hci_dev *hdev) 4242 { 4243 if (hdev->commands[45] & 0x04) 4244 hci_read_supported_codecs_v2(hdev); 4245 else if (hdev->commands[29] & 0x20) 4246 hci_read_supported_codecs(hdev); 4247 4248 return 0; 4249 } 4250 4251 /* Read local pairing options if the HCI command is supported */ 4252 static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) 4253 { 4254 if (!(hdev->commands[41] & 0x08)) 4255 return 0; 4256 4257 return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, 4258 0, NULL, HCI_CMD_TIMEOUT); 4259 } 4260 4261 /* Get MWS transport configuration if the HCI command is supported */ 4262 static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) 4263 { 4264 if (!(hdev->commands[30] & 0x08)) 4265 return 0; 4266 4267 return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 4268 0, NULL, HCI_CMD_TIMEOUT); 4269 } 4270 4271 /* Check for Synchronization Train support */ 4272 static int hci_read_sync_train_params_sync(struct hci_dev *hdev) 4273 { 4274 if (!lmp_sync_train_capable(hdev)) 4275 return 0; 4276 4277 return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, 4278 0, NULL, HCI_CMD_TIMEOUT); 4279 } 4280 4281 /* Enable Secure Connections if supported and configured */ 4282 static int hci_write_sc_support_1_sync(struct hci_dev *hdev) 4283 { 4284 u8 support = 0x01; 4285 4286 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || 4287 !bredr_sc_enabled(hdev)) 4288 return 0; 4289 4290 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, 4291 sizeof(support), &support, 4292 HCI_CMD_TIMEOUT); 4293 } 4294 4295 /* Set erroneous data reporting if supported to the wideband speech 4296 * setting value 4297 */ 4298 static int hci_set_err_data_report_sync(struct hci_dev *hdev) 4299 { 4300 struct hci_cp_write_def_err_data_reporting cp; 4301 bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); 4302 4303 if (!(hdev->commands[18] & 0x08) || 4304 !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || 4305 test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) 4306 return 0; 4307 4308 if (enabled == hdev->err_data_reporting) 4309 return 0; 4310 4311 memset(&cp, 0, sizeof(cp)); 4312 cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : 4313 ERR_DATA_REPORTING_DISABLED; 4314 4315 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, 4316 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4317 } 4318 4319 static const struct hci_init_stage hci_init4[] = { 4320 /* HCI_OP_DELETE_STORED_LINK_KEY */ 4321 HCI_INIT(hci_delete_stored_link_key_sync), 4322 /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ 4323 HCI_INIT(hci_set_event_mask_page_2_sync), 4324 /* HCI_OP_READ_LOCAL_CODECS */ 4325 HCI_INIT(hci_read_local_codecs_sync), 4326 /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ 4327 HCI_INIT(hci_read_local_pairing_opts_sync), 4328 /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ 4329 HCI_INIT(hci_get_mws_transport_config_sync), 4330 /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ 4331 HCI_INIT(hci_read_sync_train_params_sync), 4332 /* HCI_OP_WRITE_SC_SUPPORT */ 4333 HCI_INIT(hci_write_sc_support_1_sync), 4334 /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ 4335 HCI_INIT(hci_set_err_data_report_sync), 4336 {} 4337 }; 4338 4339 /* Set Suggested Default Data Length to maximum if supported */ 4340 static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) 4341 { 4342 struct hci_cp_le_write_def_data_len cp; 4343 4344 if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) 4345 return 0; 4346 4347 memset(&cp, 0, sizeof(cp)); 4348 cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); 4349 cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); 4350 4351 return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, 4352 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4353 } 4354 4355 /* Set Default PHY parameters if command is supported */ 4356 static int hci_le_set_default_phy_sync(struct hci_dev *hdev) 4357 { 4358 struct hci_cp_le_set_default_phy cp; 4359 4360 if (!(hdev->commands[35] & 0x20)) 4361 return 0; 4362 4363 memset(&cp, 0, sizeof(cp)); 4364 cp.all_phys = 0x00; 4365 cp.tx_phys = hdev->le_tx_def_phys; 4366 cp.rx_phys = hdev->le_rx_def_phys; 4367 4368 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, 4369 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4370 } 4371 4372 static const struct hci_init_stage le_init4[] = { 4373 /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ 4374 HCI_INIT(hci_le_set_write_def_data_len_sync), 4375 /* HCI_OP_LE_SET_DEFAULT_PHY */ 4376 HCI_INIT(hci_le_set_default_phy_sync), 4377 {} 4378 }; 4379 4380 static int hci_init4_sync(struct hci_dev *hdev) 4381 { 4382 int err; 4383 4384 bt_dev_dbg(hdev, ""); 4385 4386 err = hci_init_stage_sync(hdev, hci_init4); 4387 if (err) 4388 return err; 4389 4390 if (lmp_le_capable(hdev)) 4391 return hci_init_stage_sync(hdev, le_init4); 4392 4393 return 0; 4394 } 4395 4396 static int hci_init_sync(struct hci_dev *hdev) 4397 { 4398 int err; 4399 4400 err = hci_init1_sync(hdev); 4401 if (err < 0) 4402 return err; 4403 4404 if (hci_dev_test_flag(hdev, HCI_SETUP)) 4405 hci_debugfs_create_basic(hdev); 4406 4407 err = hci_init2_sync(hdev); 4408 if (err < 0) 4409 return err; 4410 4411 /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode 4412 * BR/EDR/LE type controllers. AMP controllers only need the 4413 * first two stages of init. 4414 */ 4415 if (hdev->dev_type != HCI_PRIMARY) 4416 return 0; 4417 4418 err = hci_init3_sync(hdev); 4419 if (err < 0) 4420 return err; 4421 4422 err = hci_init4_sync(hdev); 4423 if (err < 0) 4424 return err; 4425 4426 /* This function is only called when the controller is actually in 4427 * configured state. When the controller is marked as unconfigured, 4428 * this initialization procedure is not run. 4429 * 4430 * It means that it is possible that a controller runs through its 4431 * setup phase and then discovers missing settings. If that is the 4432 * case, then this function will not be called. It then will only 4433 * be called during the config phase. 4434 * 4435 * So only when in setup phase or config phase, create the debugfs 4436 * entries and register the SMP channels. 4437 */ 4438 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4439 !hci_dev_test_flag(hdev, HCI_CONFIG)) 4440 return 0; 4441 4442 hci_debugfs_create_common(hdev); 4443 4444 if (lmp_bredr_capable(hdev)) 4445 hci_debugfs_create_bredr(hdev); 4446 4447 if (lmp_le_capable(hdev)) 4448 hci_debugfs_create_le(hdev); 4449 4450 return 0; 4451 } 4452 4453 #define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } 4454 4455 static const struct { 4456 unsigned long quirk; 4457 const char *desc; 4458 } hci_broken_table[] = { 4459 HCI_QUIRK_BROKEN(LOCAL_COMMANDS, 4460 "HCI Read Local Supported Commands not supported"), 4461 HCI_QUIRK_BROKEN(STORED_LINK_KEY, 4462 "HCI Delete Stored Link Key command is advertised, " 4463 "but not supported."), 4464 HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, 4465 "HCI Read Default Erroneous Data Reporting command is " 4466 "advertised, but not supported."), 4467 HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, 4468 "HCI Read Transmit Power Level command is advertised, " 4469 "but not supported."), 4470 HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, 4471 "HCI Set Event Filter command not supported."), 4472 HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, 4473 "HCI Enhanced Setup Synchronous Connection command is " 4474 "advertised, but not supported.") 4475 }; 4476 4477 /* This function handles hdev setup stage: 4478 * 4479 * Calls hdev->setup 4480 * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. 4481 */ 4482 static int hci_dev_setup_sync(struct hci_dev *hdev) 4483 { 4484 int ret = 0; 4485 bool invalid_bdaddr; 4486 size_t i; 4487 4488 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4489 !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) 4490 return 0; 4491 4492 bt_dev_dbg(hdev, ""); 4493 4494 hci_sock_dev_event(hdev, HCI_DEV_SETUP); 4495 4496 if (hdev->setup) 4497 ret = hdev->setup(hdev); 4498 4499 for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { 4500 if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) 4501 bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); 4502 } 4503 4504 /* The transport driver can set the quirk to mark the 4505 * BD_ADDR invalid before creating the HCI device or in 4506 * its setup callback. 4507 */ 4508 invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); 4509 4510 if (!ret) { 4511 if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) { 4512 if (!bacmp(&hdev->public_addr, BDADDR_ANY)) 4513 hci_dev_get_bd_addr_from_property(hdev); 4514 4515 if (bacmp(&hdev->public_addr, BDADDR_ANY) && 4516 hdev->set_bdaddr) { 4517 ret = hdev->set_bdaddr(hdev, 4518 &hdev->public_addr); 4519 4520 /* If setting of the BD_ADDR from the device 4521 * property succeeds, then treat the address 4522 * as valid even if the invalid BD_ADDR 4523 * quirk indicates otherwise. 4524 */ 4525 if (!ret) 4526 invalid_bdaddr = false; 4527 } 4528 } 4529 } 4530 4531 /* The transport driver can set these quirks before 4532 * creating the HCI device or in its setup callback. 4533 * 4534 * For the invalid BD_ADDR quirk it is possible that 4535 * it becomes a valid address if the bootloader does 4536 * provide it (see above). 4537 * 4538 * In case any of them is set, the controller has to 4539 * start up as unconfigured. 4540 */ 4541 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || 4542 invalid_bdaddr) 4543 hci_dev_set_flag(hdev, HCI_UNCONFIGURED); 4544 4545 /* For an unconfigured controller it is required to 4546 * read at least the version information provided by 4547 * the Read Local Version Information command. 4548 * 4549 * If the set_bdaddr driver callback is provided, then 4550 * also the original Bluetooth public device address 4551 * will be read using the Read BD Address command. 4552 */ 4553 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 4554 return hci_unconf_init_sync(hdev); 4555 4556 return ret; 4557 } 4558 4559 /* This function handles hdev init stage: 4560 * 4561 * Calls hci_dev_setup_sync to perform setup stage 4562 * Calls hci_init_sync to perform HCI command init sequence 4563 */ 4564 static int hci_dev_init_sync(struct hci_dev *hdev) 4565 { 4566 int ret; 4567 4568 bt_dev_dbg(hdev, ""); 4569 4570 atomic_set(&hdev->cmd_cnt, 1); 4571 set_bit(HCI_INIT, &hdev->flags); 4572 4573 ret = hci_dev_setup_sync(hdev); 4574 4575 if (hci_dev_test_flag(hdev, HCI_CONFIG)) { 4576 /* If public address change is configured, ensure that 4577 * the address gets programmed. If the driver does not 4578 * support changing the public address, fail the power 4579 * on procedure. 4580 */ 4581 if (bacmp(&hdev->public_addr, BDADDR_ANY) && 4582 hdev->set_bdaddr) 4583 ret = hdev->set_bdaddr(hdev, &hdev->public_addr); 4584 else 4585 ret = -EADDRNOTAVAIL; 4586 } 4587 4588 if (!ret) { 4589 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 4590 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4591 ret = hci_init_sync(hdev); 4592 if (!ret && hdev->post_init) 4593 ret = hdev->post_init(hdev); 4594 } 4595 } 4596 4597 /* If the HCI Reset command is clearing all diagnostic settings, 4598 * then they need to be reprogrammed after the init procedure 4599 * completed. 4600 */ 4601 if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && 4602 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4603 hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) 4604 ret = hdev->set_diag(hdev, true); 4605 4606 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4607 msft_do_open(hdev); 4608 aosp_do_open(hdev); 4609 } 4610 4611 clear_bit(HCI_INIT, &hdev->flags); 4612 4613 return ret; 4614 } 4615 4616 int hci_dev_open_sync(struct hci_dev *hdev) 4617 { 4618 int ret; 4619 4620 bt_dev_dbg(hdev, ""); 4621 4622 if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { 4623 ret = -ENODEV; 4624 goto done; 4625 } 4626 4627 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4628 !hci_dev_test_flag(hdev, HCI_CONFIG)) { 4629 /* Check for rfkill but allow the HCI setup stage to 4630 * proceed (which in itself doesn't cause any RF activity). 4631 */ 4632 if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { 4633 ret = -ERFKILL; 4634 goto done; 4635 } 4636 4637 /* Check for valid public address or a configured static 4638 * random address, but let the HCI setup proceed to 4639 * be able to determine if there is a public address 4640 * or not. 4641 * 4642 * In case of user channel usage, it is not important 4643 * if a public address or static random address is 4644 * available. 4645 * 4646 * This check is only valid for BR/EDR controllers 4647 * since AMP controllers do not have an address. 4648 */ 4649 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4650 hdev->dev_type == HCI_PRIMARY && 4651 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 4652 !bacmp(&hdev->static_addr, BDADDR_ANY)) { 4653 ret = -EADDRNOTAVAIL; 4654 goto done; 4655 } 4656 } 4657 4658 if (test_bit(HCI_UP, &hdev->flags)) { 4659 ret = -EALREADY; 4660 goto done; 4661 } 4662 4663 if (hdev->open(hdev)) { 4664 ret = -EIO; 4665 goto done; 4666 } 4667 4668 set_bit(HCI_RUNNING, &hdev->flags); 4669 hci_sock_dev_event(hdev, HCI_DEV_OPEN); 4670 4671 ret = hci_dev_init_sync(hdev); 4672 if (!ret) { 4673 hci_dev_hold(hdev); 4674 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); 4675 hci_adv_instances_set_rpa_expired(hdev, true); 4676 set_bit(HCI_UP, &hdev->flags); 4677 hci_sock_dev_event(hdev, HCI_DEV_UP); 4678 hci_leds_update_powered(hdev, true); 4679 if (!hci_dev_test_flag(hdev, HCI_SETUP) && 4680 !hci_dev_test_flag(hdev, HCI_CONFIG) && 4681 !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 4682 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4683 hci_dev_test_flag(hdev, HCI_MGMT) && 4684 hdev->dev_type == HCI_PRIMARY) { 4685 ret = hci_powered_update_sync(hdev); 4686 mgmt_power_on(hdev, ret); 4687 } 4688 } else { 4689 /* Init failed, cleanup */ 4690 flush_work(&hdev->tx_work); 4691 4692 /* Since hci_rx_work() is possible to awake new cmd_work 4693 * it should be flushed first to avoid unexpected call of 4694 * hci_cmd_work() 4695 */ 4696 flush_work(&hdev->rx_work); 4697 flush_work(&hdev->cmd_work); 4698 4699 skb_queue_purge(&hdev->cmd_q); 4700 skb_queue_purge(&hdev->rx_q); 4701 4702 if (hdev->flush) 4703 hdev->flush(hdev); 4704 4705 if (hdev->sent_cmd) { 4706 kfree_skb(hdev->sent_cmd); 4707 hdev->sent_cmd = NULL; 4708 } 4709 4710 clear_bit(HCI_RUNNING, &hdev->flags); 4711 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 4712 4713 hdev->close(hdev); 4714 hdev->flags &= BIT(HCI_RAW); 4715 } 4716 4717 done: 4718 return ret; 4719 } 4720 4721 /* This function requires the caller holds hdev->lock */ 4722 static void hci_pend_le_actions_clear(struct hci_dev *hdev) 4723 { 4724 struct hci_conn_params *p; 4725 4726 list_for_each_entry(p, &hdev->le_conn_params, list) { 4727 if (p->conn) { 4728 hci_conn_drop(p->conn); 4729 hci_conn_put(p->conn); 4730 p->conn = NULL; 4731 } 4732 list_del_init(&p->action); 4733 } 4734 4735 BT_DBG("All LE pending actions cleared"); 4736 } 4737 4738 static int hci_dev_shutdown(struct hci_dev *hdev) 4739 { 4740 int err = 0; 4741 /* Similar to how we first do setup and then set the exclusive access 4742 * bit for userspace, we must first unset userchannel and then clean up. 4743 * Otherwise, the kernel can't properly use the hci channel to clean up 4744 * the controller (some shutdown routines require sending additional 4745 * commands to the controller for example). 4746 */ 4747 bool was_userchannel = 4748 hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); 4749 4750 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && 4751 test_bit(HCI_UP, &hdev->flags)) { 4752 /* Execute vendor specific shutdown routine */ 4753 if (hdev->shutdown) 4754 err = hdev->shutdown(hdev); 4755 } 4756 4757 if (was_userchannel) 4758 hci_dev_set_flag(hdev, HCI_USER_CHANNEL); 4759 4760 return err; 4761 } 4762 4763 int hci_dev_close_sync(struct hci_dev *hdev) 4764 { 4765 bool auto_off; 4766 int err = 0; 4767 4768 bt_dev_dbg(hdev, ""); 4769 4770 cancel_delayed_work(&hdev->power_off); 4771 cancel_delayed_work(&hdev->ncmd_timer); 4772 cancel_delayed_work(&hdev->le_scan_disable); 4773 cancel_delayed_work(&hdev->le_scan_restart); 4774 4775 hci_request_cancel_all(hdev); 4776 4777 if (hdev->adv_instance_timeout) { 4778 cancel_delayed_work_sync(&hdev->adv_instance_expire); 4779 hdev->adv_instance_timeout = 0; 4780 } 4781 4782 err = hci_dev_shutdown(hdev); 4783 4784 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { 4785 cancel_delayed_work_sync(&hdev->cmd_timer); 4786 return err; 4787 } 4788 4789 hci_leds_update_powered(hdev, false); 4790 4791 /* Flush RX and TX works */ 4792 flush_work(&hdev->tx_work); 4793 flush_work(&hdev->rx_work); 4794 4795 if (hdev->discov_timeout > 0) { 4796 hdev->discov_timeout = 0; 4797 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); 4798 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); 4799 } 4800 4801 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) 4802 cancel_delayed_work(&hdev->service_cache); 4803 4804 if (hci_dev_test_flag(hdev, HCI_MGMT)) { 4805 struct adv_info *adv_instance; 4806 4807 cancel_delayed_work_sync(&hdev->rpa_expired); 4808 4809 list_for_each_entry(adv_instance, &hdev->adv_instances, list) 4810 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); 4811 } 4812 4813 /* Avoid potential lockdep warnings from the *_flush() calls by 4814 * ensuring the workqueue is empty up front. 4815 */ 4816 drain_workqueue(hdev->workqueue); 4817 4818 hci_dev_lock(hdev); 4819 4820 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 4821 4822 auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); 4823 4824 if (!auto_off && hdev->dev_type == HCI_PRIMARY && 4825 !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && 4826 hci_dev_test_flag(hdev, HCI_MGMT)) 4827 __mgmt_power_off(hdev); 4828 4829 hci_inquiry_cache_flush(hdev); 4830 hci_pend_le_actions_clear(hdev); 4831 hci_conn_hash_flush(hdev); 4832 /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ 4833 smp_unregister(hdev); 4834 hci_dev_unlock(hdev); 4835 4836 hci_sock_dev_event(hdev, HCI_DEV_DOWN); 4837 4838 if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { 4839 aosp_do_close(hdev); 4840 msft_do_close(hdev); 4841 } 4842 4843 if (hdev->flush) 4844 hdev->flush(hdev); 4845 4846 /* Reset device */ 4847 skb_queue_purge(&hdev->cmd_q); 4848 atomic_set(&hdev->cmd_cnt, 1); 4849 if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && 4850 !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 4851 set_bit(HCI_INIT, &hdev->flags); 4852 hci_reset_sync(hdev); 4853 clear_bit(HCI_INIT, &hdev->flags); 4854 } 4855 4856 /* flush cmd work */ 4857 flush_work(&hdev->cmd_work); 4858 4859 /* Drop queues */ 4860 skb_queue_purge(&hdev->rx_q); 4861 skb_queue_purge(&hdev->cmd_q); 4862 skb_queue_purge(&hdev->raw_q); 4863 4864 /* Drop last sent command */ 4865 if (hdev->sent_cmd) { 4866 cancel_delayed_work_sync(&hdev->cmd_timer); 4867 kfree_skb(hdev->sent_cmd); 4868 hdev->sent_cmd = NULL; 4869 } 4870 4871 clear_bit(HCI_RUNNING, &hdev->flags); 4872 hci_sock_dev_event(hdev, HCI_DEV_CLOSE); 4873 4874 /* After this point our queues are empty and no tasks are scheduled. */ 4875 hdev->close(hdev); 4876 4877 /* Clear flags */ 4878 hdev->flags &= BIT(HCI_RAW); 4879 hci_dev_clear_volatile_flags(hdev); 4880 4881 /* Controller radio is available but is currently powered down */ 4882 hdev->amp_status = AMP_STATUS_POWERED_DOWN; 4883 4884 memset(hdev->eir, 0, sizeof(hdev->eir)); 4885 memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); 4886 bacpy(&hdev->random_addr, BDADDR_ANY); 4887 4888 hci_dev_put(hdev); 4889 return err; 4890 } 4891 4892 /* This function perform power on HCI command sequence as follows: 4893 * 4894 * If controller is already up (HCI_UP) performs hci_powered_update_sync 4895 * sequence otherwise run hci_dev_open_sync which will follow with 4896 * hci_powered_update_sync after the init sequence is completed. 4897 */ 4898 static int hci_power_on_sync(struct hci_dev *hdev) 4899 { 4900 int err; 4901 4902 if (test_bit(HCI_UP, &hdev->flags) && 4903 hci_dev_test_flag(hdev, HCI_MGMT) && 4904 hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 4905 cancel_delayed_work(&hdev->power_off); 4906 return hci_powered_update_sync(hdev); 4907 } 4908 4909 err = hci_dev_open_sync(hdev); 4910 if (err < 0) 4911 return err; 4912 4913 /* During the HCI setup phase, a few error conditions are 4914 * ignored and they need to be checked now. If they are still 4915 * valid, it is important to return the device back off. 4916 */ 4917 if (hci_dev_test_flag(hdev, HCI_RFKILLED) || 4918 hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || 4919 (hdev->dev_type == HCI_PRIMARY && 4920 !bacmp(&hdev->bdaddr, BDADDR_ANY) && 4921 !bacmp(&hdev->static_addr, BDADDR_ANY))) { 4922 hci_dev_clear_flag(hdev, HCI_AUTO_OFF); 4923 hci_dev_close_sync(hdev); 4924 } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { 4925 queue_delayed_work(hdev->req_workqueue, &hdev->power_off, 4926 HCI_AUTO_OFF_TIMEOUT); 4927 } 4928 4929 if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { 4930 /* For unconfigured devices, set the HCI_RAW flag 4931 * so that userspace can easily identify them. 4932 */ 4933 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 4934 set_bit(HCI_RAW, &hdev->flags); 4935 4936 /* For fully configured devices, this will send 4937 * the Index Added event. For unconfigured devices, 4938 * it will send Unconfigued Index Added event. 4939 * 4940 * Devices with HCI_QUIRK_RAW_DEVICE are ignored 4941 * and no event will be send. 4942 */ 4943 mgmt_index_added(hdev); 4944 } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { 4945 /* When the controller is now configured, then it 4946 * is important to clear the HCI_RAW flag. 4947 */ 4948 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) 4949 clear_bit(HCI_RAW, &hdev->flags); 4950 4951 /* Powering on the controller with HCI_CONFIG set only 4952 * happens with the transition from unconfigured to 4953 * configured. This will send the Index Added event. 4954 */ 4955 mgmt_index_added(hdev); 4956 } 4957 4958 return 0; 4959 } 4960 4961 static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) 4962 { 4963 struct hci_cp_remote_name_req_cancel cp; 4964 4965 memset(&cp, 0, sizeof(cp)); 4966 bacpy(&cp.bdaddr, addr); 4967 4968 return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, 4969 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 4970 } 4971 4972 int hci_stop_discovery_sync(struct hci_dev *hdev) 4973 { 4974 struct discovery_state *d = &hdev->discovery; 4975 struct inquiry_entry *e; 4976 int err; 4977 4978 bt_dev_dbg(hdev, "state %u", hdev->discovery.state); 4979 4980 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { 4981 if (test_bit(HCI_INQUIRY, &hdev->flags)) { 4982 err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 4983 0, NULL, HCI_CMD_TIMEOUT); 4984 if (err) 4985 return err; 4986 } 4987 4988 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { 4989 cancel_delayed_work(&hdev->le_scan_disable); 4990 cancel_delayed_work(&hdev->le_scan_restart); 4991 4992 err = hci_scan_disable_sync(hdev); 4993 if (err) 4994 return err; 4995 } 4996 4997 } else { 4998 err = hci_scan_disable_sync(hdev); 4999 if (err) 5000 return err; 5001 } 5002 5003 /* Resume advertising if it was paused */ 5004 if (use_ll_privacy(hdev)) 5005 hci_resume_advertising_sync(hdev); 5006 5007 /* No further actions needed for LE-only discovery */ 5008 if (d->type == DISCOV_TYPE_LE) 5009 return 0; 5010 5011 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { 5012 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 5013 NAME_PENDING); 5014 if (!e) 5015 return 0; 5016 5017 return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); 5018 } 5019 5020 return 0; 5021 } 5022 5023 static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, 5024 u8 reason) 5025 { 5026 struct hci_cp_disconn_phy_link cp; 5027 5028 memset(&cp, 0, sizeof(cp)); 5029 cp.phy_handle = HCI_PHY_HANDLE(handle); 5030 cp.reason = reason; 5031 5032 return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, 5033 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5034 } 5035 5036 static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, 5037 u8 reason) 5038 { 5039 struct hci_cp_disconnect cp; 5040 5041 if (conn->type == AMP_LINK) 5042 return hci_disconnect_phy_link_sync(hdev, conn->handle, reason); 5043 5044 memset(&cp, 0, sizeof(cp)); 5045 cp.handle = cpu_to_le16(conn->handle); 5046 cp.reason = reason; 5047 5048 /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not 5049 * suspending. 5050 */ 5051 if (!hdev->suspended) 5052 return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, 5053 sizeof(cp), &cp, 5054 HCI_EV_DISCONN_COMPLETE, 5055 HCI_CMD_TIMEOUT, NULL); 5056 5057 return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, 5058 HCI_CMD_TIMEOUT); 5059 } 5060 5061 static int hci_le_connect_cancel_sync(struct hci_dev *hdev, 5062 struct hci_conn *conn) 5063 { 5064 if (test_bit(HCI_CONN_SCANNING, &conn->flags)) 5065 return 0; 5066 5067 return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 5068 6, &conn->dst, HCI_CMD_TIMEOUT); 5069 } 5070 5071 static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn) 5072 { 5073 if (conn->type == LE_LINK) 5074 return hci_le_connect_cancel_sync(hdev, conn); 5075 5076 if (hdev->hci_ver < BLUETOOTH_VER_1_2) 5077 return 0; 5078 5079 return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, 5080 6, &conn->dst, HCI_CMD_TIMEOUT); 5081 } 5082 5083 static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, 5084 u8 reason) 5085 { 5086 struct hci_cp_reject_sync_conn_req cp; 5087 5088 memset(&cp, 0, sizeof(cp)); 5089 bacpy(&cp.bdaddr, &conn->dst); 5090 cp.reason = reason; 5091 5092 /* SCO rejection has its own limited set of 5093 * allowed error values (0x0D-0x0F). 5094 */ 5095 if (reason < 0x0d || reason > 0x0f) 5096 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; 5097 5098 return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, 5099 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5100 } 5101 5102 static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, 5103 u8 reason) 5104 { 5105 struct hci_cp_reject_conn_req cp; 5106 5107 if (conn->type == SCO_LINK || conn->type == ESCO_LINK) 5108 return hci_reject_sco_sync(hdev, conn, reason); 5109 5110 memset(&cp, 0, sizeof(cp)); 5111 bacpy(&cp.bdaddr, &conn->dst); 5112 cp.reason = reason; 5113 5114 return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, 5115 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5116 } 5117 5118 int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) 5119 { 5120 int err; 5121 5122 switch (conn->state) { 5123 case BT_CONNECTED: 5124 case BT_CONFIG: 5125 return hci_disconnect_sync(hdev, conn, reason); 5126 case BT_CONNECT: 5127 err = hci_connect_cancel_sync(hdev, conn); 5128 /* Cleanup hci_conn object if it cannot be cancelled as it 5129 * likelly means the controller and host stack are out of sync. 5130 */ 5131 if (err) { 5132 hci_dev_lock(hdev); 5133 hci_conn_failed(conn, err); 5134 hci_dev_unlock(hdev); 5135 } 5136 return err; 5137 case BT_CONNECT2: 5138 return hci_reject_conn_sync(hdev, conn, reason); 5139 default: 5140 conn->state = BT_CLOSED; 5141 break; 5142 } 5143 5144 return 0; 5145 } 5146 5147 static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) 5148 { 5149 struct hci_conn *conn, *tmp; 5150 int err; 5151 5152 list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { 5153 err = hci_abort_conn_sync(hdev, conn, reason); 5154 if (err) 5155 return err; 5156 } 5157 5158 return 0; 5159 } 5160 5161 /* This function perform power off HCI command sequence as follows: 5162 * 5163 * Clear Advertising 5164 * Stop Discovery 5165 * Disconnect all connections 5166 * hci_dev_close_sync 5167 */ 5168 static int hci_power_off_sync(struct hci_dev *hdev) 5169 { 5170 int err; 5171 5172 /* If controller is already down there is nothing to do */ 5173 if (!test_bit(HCI_UP, &hdev->flags)) 5174 return 0; 5175 5176 if (test_bit(HCI_ISCAN, &hdev->flags) || 5177 test_bit(HCI_PSCAN, &hdev->flags)) { 5178 err = hci_write_scan_enable_sync(hdev, 0x00); 5179 if (err) 5180 return err; 5181 } 5182 5183 err = hci_clear_adv_sync(hdev, NULL, false); 5184 if (err) 5185 return err; 5186 5187 err = hci_stop_discovery_sync(hdev); 5188 if (err) 5189 return err; 5190 5191 /* Terminated due to Power Off */ 5192 err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); 5193 if (err) 5194 return err; 5195 5196 return hci_dev_close_sync(hdev); 5197 } 5198 5199 int hci_set_powered_sync(struct hci_dev *hdev, u8 val) 5200 { 5201 if (val) 5202 return hci_power_on_sync(hdev); 5203 5204 return hci_power_off_sync(hdev); 5205 } 5206 5207 static int hci_write_iac_sync(struct hci_dev *hdev) 5208 { 5209 struct hci_cp_write_current_iac_lap cp; 5210 5211 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) 5212 return 0; 5213 5214 memset(&cp, 0, sizeof(cp)); 5215 5216 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { 5217 /* Limited discoverable mode */ 5218 cp.num_iac = min_t(u8, hdev->num_iac, 2); 5219 cp.iac_lap[0] = 0x00; /* LIAC */ 5220 cp.iac_lap[1] = 0x8b; 5221 cp.iac_lap[2] = 0x9e; 5222 cp.iac_lap[3] = 0x33; /* GIAC */ 5223 cp.iac_lap[4] = 0x8b; 5224 cp.iac_lap[5] = 0x9e; 5225 } else { 5226 /* General discoverable mode */ 5227 cp.num_iac = 1; 5228 cp.iac_lap[0] = 0x33; /* GIAC */ 5229 cp.iac_lap[1] = 0x8b; 5230 cp.iac_lap[2] = 0x9e; 5231 } 5232 5233 return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, 5234 (cp.num_iac * 3) + 1, &cp, 5235 HCI_CMD_TIMEOUT); 5236 } 5237 5238 int hci_update_discoverable_sync(struct hci_dev *hdev) 5239 { 5240 int err = 0; 5241 5242 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { 5243 err = hci_write_iac_sync(hdev); 5244 if (err) 5245 return err; 5246 5247 err = hci_update_scan_sync(hdev); 5248 if (err) 5249 return err; 5250 5251 err = hci_update_class_sync(hdev); 5252 if (err) 5253 return err; 5254 } 5255 5256 /* Advertising instances don't use the global discoverable setting, so 5257 * only update AD if advertising was enabled using Set Advertising. 5258 */ 5259 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { 5260 err = hci_update_adv_data_sync(hdev, 0x00); 5261 if (err) 5262 return err; 5263 5264 /* Discoverable mode affects the local advertising 5265 * address in limited privacy mode. 5266 */ 5267 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { 5268 if (ext_adv_capable(hdev)) 5269 err = hci_start_ext_adv_sync(hdev, 0x00); 5270 else 5271 err = hci_enable_advertising_sync(hdev); 5272 } 5273 } 5274 5275 return err; 5276 } 5277 5278 static int update_discoverable_sync(struct hci_dev *hdev, void *data) 5279 { 5280 return hci_update_discoverable_sync(hdev); 5281 } 5282 5283 int hci_update_discoverable(struct hci_dev *hdev) 5284 { 5285 /* Only queue if it would have any effect */ 5286 if (hdev_is_powered(hdev) && 5287 hci_dev_test_flag(hdev, HCI_ADVERTISING) && 5288 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && 5289 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) 5290 return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, 5291 NULL); 5292 5293 return 0; 5294 } 5295 5296 int hci_update_connectable_sync(struct hci_dev *hdev) 5297 { 5298 int err; 5299 5300 err = hci_update_scan_sync(hdev); 5301 if (err) 5302 return err; 5303 5304 /* If BR/EDR is not enabled and we disable advertising as a 5305 * by-product of disabling connectable, we need to update the 5306 * advertising flags. 5307 */ 5308 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 5309 err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); 5310 5311 /* Update the advertising parameters if necessary */ 5312 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || 5313 !list_empty(&hdev->adv_instances)) { 5314 if (ext_adv_capable(hdev)) 5315 err = hci_start_ext_adv_sync(hdev, 5316 hdev->cur_adv_instance); 5317 else 5318 err = hci_enable_advertising_sync(hdev); 5319 5320 if (err) 5321 return err; 5322 } 5323 5324 return hci_update_passive_scan_sync(hdev); 5325 } 5326 5327 static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) 5328 { 5329 const u8 giac[3] = { 0x33, 0x8b, 0x9e }; 5330 const u8 liac[3] = { 0x00, 0x8b, 0x9e }; 5331 struct hci_cp_inquiry cp; 5332 5333 bt_dev_dbg(hdev, ""); 5334 5335 if (hci_dev_test_flag(hdev, HCI_INQUIRY)) 5336 return 0; 5337 5338 hci_dev_lock(hdev); 5339 hci_inquiry_cache_flush(hdev); 5340 hci_dev_unlock(hdev); 5341 5342 memset(&cp, 0, sizeof(cp)); 5343 5344 if (hdev->discovery.limited) 5345 memcpy(&cp.lap, liac, sizeof(cp.lap)); 5346 else 5347 memcpy(&cp.lap, giac, sizeof(cp.lap)); 5348 5349 cp.length = length; 5350 5351 return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, 5352 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5353 } 5354 5355 static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) 5356 { 5357 u8 own_addr_type; 5358 /* Accept list is not used for discovery */ 5359 u8 filter_policy = 0x00; 5360 /* Default is to enable duplicates filter */ 5361 u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; 5362 int err; 5363 5364 bt_dev_dbg(hdev, ""); 5365 5366 /* If controller is scanning, it means the passive scanning is 5367 * running. Thus, we should temporarily stop it in order to set the 5368 * discovery scanning parameters. 5369 */ 5370 err = hci_scan_disable_sync(hdev); 5371 if (err) { 5372 bt_dev_err(hdev, "Unable to disable scanning: %d", err); 5373 return err; 5374 } 5375 5376 cancel_interleave_scan(hdev); 5377 5378 /* Pause advertising since active scanning disables address resolution 5379 * which advertising depend on in order to generate its RPAs. 5380 */ 5381 if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_PRIVACY)) { 5382 err = hci_pause_advertising_sync(hdev); 5383 if (err) { 5384 bt_dev_err(hdev, "pause advertising failed: %d", err); 5385 goto failed; 5386 } 5387 } 5388 5389 /* Disable address resolution while doing active scanning since the 5390 * accept list shall not be used and all reports shall reach the host 5391 * anyway. 5392 */ 5393 err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); 5394 if (err) { 5395 bt_dev_err(hdev, "Unable to disable Address Resolution: %d", 5396 err); 5397 goto failed; 5398 } 5399 5400 /* All active scans will be done with either a resolvable private 5401 * address (when privacy feature has been enabled) or non-resolvable 5402 * private address. 5403 */ 5404 err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev), 5405 &own_addr_type); 5406 if (err < 0) 5407 own_addr_type = ADDR_LE_DEV_PUBLIC; 5408 5409 if (hci_is_adv_monitoring(hdev)) { 5410 /* Duplicate filter should be disabled when some advertisement 5411 * monitor is activated, otherwise AdvMon can only receive one 5412 * advertisement for one peer(*) during active scanning, and 5413 * might report loss to these peers. 5414 * 5415 * Note that different controllers have different meanings of 5416 * |duplicate|. Some of them consider packets with the same 5417 * address as duplicate, and others consider packets with the 5418 * same address and the same RSSI as duplicate. Although in the 5419 * latter case we don't need to disable duplicate filter, but 5420 * it is common to have active scanning for a short period of 5421 * time, the power impact should be neglectable. 5422 */ 5423 filter_dup = LE_SCAN_FILTER_DUP_DISABLE; 5424 } 5425 5426 err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, 5427 hdev->le_scan_window_discovery, 5428 own_addr_type, filter_policy, filter_dup); 5429 if (!err) 5430 return err; 5431 5432 failed: 5433 /* Resume advertising if it was paused */ 5434 if (use_ll_privacy(hdev)) 5435 hci_resume_advertising_sync(hdev); 5436 5437 /* Resume passive scanning */ 5438 hci_update_passive_scan_sync(hdev); 5439 return err; 5440 } 5441 5442 static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) 5443 { 5444 int err; 5445 5446 bt_dev_dbg(hdev, ""); 5447 5448 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); 5449 if (err) 5450 return err; 5451 5452 return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); 5453 } 5454 5455 int hci_start_discovery_sync(struct hci_dev *hdev) 5456 { 5457 unsigned long timeout; 5458 int err; 5459 5460 bt_dev_dbg(hdev, "type %u", hdev->discovery.type); 5461 5462 switch (hdev->discovery.type) { 5463 case DISCOV_TYPE_BREDR: 5464 return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); 5465 case DISCOV_TYPE_INTERLEAVED: 5466 /* When running simultaneous discovery, the LE scanning time 5467 * should occupy the whole discovery time sine BR/EDR inquiry 5468 * and LE scanning are scheduled by the controller. 5469 * 5470 * For interleaving discovery in comparison, BR/EDR inquiry 5471 * and LE scanning are done sequentially with separate 5472 * timeouts. 5473 */ 5474 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, 5475 &hdev->quirks)) { 5476 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 5477 /* During simultaneous discovery, we double LE scan 5478 * interval. We must leave some time for the controller 5479 * to do BR/EDR inquiry. 5480 */ 5481 err = hci_start_interleaved_discovery_sync(hdev); 5482 break; 5483 } 5484 5485 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); 5486 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); 5487 break; 5488 case DISCOV_TYPE_LE: 5489 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); 5490 err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); 5491 break; 5492 default: 5493 return -EINVAL; 5494 } 5495 5496 if (err) 5497 return err; 5498 5499 bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); 5500 5501 /* When service discovery is used and the controller has a 5502 * strict duplicate filter, it is important to remember the 5503 * start and duration of the scan. This is required for 5504 * restarting scanning during the discovery phase. 5505 */ 5506 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && 5507 hdev->discovery.result_filtering) { 5508 hdev->discovery.scan_start = jiffies; 5509 hdev->discovery.scan_duration = timeout; 5510 } 5511 5512 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, 5513 timeout); 5514 return 0; 5515 } 5516 5517 static void hci_suspend_monitor_sync(struct hci_dev *hdev) 5518 { 5519 switch (hci_get_adv_monitor_offload_ext(hdev)) { 5520 case HCI_ADV_MONITOR_EXT_MSFT: 5521 msft_suspend_sync(hdev); 5522 break; 5523 default: 5524 return; 5525 } 5526 } 5527 5528 /* This function disables discovery and mark it as paused */ 5529 static int hci_pause_discovery_sync(struct hci_dev *hdev) 5530 { 5531 int old_state = hdev->discovery.state; 5532 int err; 5533 5534 /* If discovery already stopped/stopping/paused there nothing to do */ 5535 if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || 5536 hdev->discovery_paused) 5537 return 0; 5538 5539 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); 5540 err = hci_stop_discovery_sync(hdev); 5541 if (err) 5542 return err; 5543 5544 hdev->discovery_paused = true; 5545 hdev->discovery_old_state = old_state; 5546 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 5547 5548 return 0; 5549 } 5550 5551 static int hci_update_event_filter_sync(struct hci_dev *hdev) 5552 { 5553 struct bdaddr_list_with_flags *b; 5554 u8 scan = SCAN_DISABLED; 5555 bool scanning = test_bit(HCI_PSCAN, &hdev->flags); 5556 int err; 5557 5558 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) 5559 return 0; 5560 5561 /* Some fake CSR controllers lock up after setting this type of 5562 * filter, so avoid sending the request altogether. 5563 */ 5564 if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) 5565 return 0; 5566 5567 /* Always clear event filter when starting */ 5568 hci_clear_event_filter_sync(hdev); 5569 5570 list_for_each_entry(b, &hdev->accept_list, list) { 5571 if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) 5572 continue; 5573 5574 bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); 5575 5576 err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, 5577 HCI_CONN_SETUP_ALLOW_BDADDR, 5578 &b->bdaddr, 5579 HCI_CONN_SETUP_AUTO_ON); 5580 if (err) 5581 bt_dev_dbg(hdev, "Failed to set event filter for %pMR", 5582 &b->bdaddr); 5583 else 5584 scan = SCAN_PAGE; 5585 } 5586 5587 if (scan && !scanning) 5588 hci_write_scan_enable_sync(hdev, scan); 5589 else if (!scan && scanning) 5590 hci_write_scan_enable_sync(hdev, scan); 5591 5592 return 0; 5593 } 5594 5595 /* This function disables scan (BR and LE) and mark it as paused */ 5596 static int hci_pause_scan_sync(struct hci_dev *hdev) 5597 { 5598 if (hdev->scanning_paused) 5599 return 0; 5600 5601 /* Disable page scan if enabled */ 5602 if (test_bit(HCI_PSCAN, &hdev->flags)) 5603 hci_write_scan_enable_sync(hdev, SCAN_DISABLED); 5604 5605 hci_scan_disable_sync(hdev); 5606 5607 hdev->scanning_paused = true; 5608 5609 return 0; 5610 } 5611 5612 /* This function performs the HCI suspend procedures in the follow order: 5613 * 5614 * Pause discovery (active scanning/inquiry) 5615 * Pause Directed Advertising/Advertising 5616 * Pause Scanning (passive scanning in case discovery was not active) 5617 * Disconnect all connections 5618 * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup 5619 * otherwise: 5620 * Update event mask (only set events that are allowed to wake up the host) 5621 * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) 5622 * Update passive scanning (lower duty cycle) 5623 * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE 5624 */ 5625 int hci_suspend_sync(struct hci_dev *hdev) 5626 { 5627 int err; 5628 5629 /* If marked as suspended there nothing to do */ 5630 if (hdev->suspended) 5631 return 0; 5632 5633 /* Mark device as suspended */ 5634 hdev->suspended = true; 5635 5636 /* Pause discovery if not already stopped */ 5637 hci_pause_discovery_sync(hdev); 5638 5639 /* Pause other advertisements */ 5640 hci_pause_advertising_sync(hdev); 5641 5642 /* Suspend monitor filters */ 5643 hci_suspend_monitor_sync(hdev); 5644 5645 /* Prevent disconnects from causing scanning to be re-enabled */ 5646 hci_pause_scan_sync(hdev); 5647 5648 if (hci_conn_count(hdev)) { 5649 /* Soft disconnect everything (power off) */ 5650 err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); 5651 if (err) { 5652 /* Set state to BT_RUNNING so resume doesn't notify */ 5653 hdev->suspend_state = BT_RUNNING; 5654 hci_resume_sync(hdev); 5655 return err; 5656 } 5657 5658 /* Update event mask so only the allowed event can wakeup the 5659 * host. 5660 */ 5661 hci_set_event_mask_sync(hdev); 5662 } 5663 5664 /* Only configure accept list if disconnect succeeded and wake 5665 * isn't being prevented. 5666 */ 5667 if (!hdev->wakeup || !hdev->wakeup(hdev)) { 5668 hdev->suspend_state = BT_SUSPEND_DISCONNECT; 5669 return 0; 5670 } 5671 5672 /* Unpause to take care of updating scanning params */ 5673 hdev->scanning_paused = false; 5674 5675 /* Enable event filter for paired devices */ 5676 hci_update_event_filter_sync(hdev); 5677 5678 /* Update LE passive scan if enabled */ 5679 hci_update_passive_scan_sync(hdev); 5680 5681 /* Pause scan changes again. */ 5682 hdev->scanning_paused = true; 5683 5684 hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; 5685 5686 return 0; 5687 } 5688 5689 /* This function resumes discovery */ 5690 static int hci_resume_discovery_sync(struct hci_dev *hdev) 5691 { 5692 int err; 5693 5694 /* If discovery not paused there nothing to do */ 5695 if (!hdev->discovery_paused) 5696 return 0; 5697 5698 hdev->discovery_paused = false; 5699 5700 hci_discovery_set_state(hdev, DISCOVERY_STARTING); 5701 5702 err = hci_start_discovery_sync(hdev); 5703 5704 hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED : 5705 DISCOVERY_FINDING); 5706 5707 return err; 5708 } 5709 5710 static void hci_resume_monitor_sync(struct hci_dev *hdev) 5711 { 5712 switch (hci_get_adv_monitor_offload_ext(hdev)) { 5713 case HCI_ADV_MONITOR_EXT_MSFT: 5714 msft_resume_sync(hdev); 5715 break; 5716 default: 5717 return; 5718 } 5719 } 5720 5721 /* This function resume scan and reset paused flag */ 5722 static int hci_resume_scan_sync(struct hci_dev *hdev) 5723 { 5724 if (!hdev->scanning_paused) 5725 return 0; 5726 5727 hdev->scanning_paused = false; 5728 5729 hci_update_scan_sync(hdev); 5730 5731 /* Reset passive scanning to normal */ 5732 hci_update_passive_scan_sync(hdev); 5733 5734 return 0; 5735 } 5736 5737 /* This function performs the HCI suspend procedures in the follow order: 5738 * 5739 * Restore event mask 5740 * Clear event filter 5741 * Update passive scanning (normal duty cycle) 5742 * Resume Directed Advertising/Advertising 5743 * Resume discovery (active scanning/inquiry) 5744 */ 5745 int hci_resume_sync(struct hci_dev *hdev) 5746 { 5747 /* If not marked as suspended there nothing to do */ 5748 if (!hdev->suspended) 5749 return 0; 5750 5751 hdev->suspended = false; 5752 5753 /* Restore event mask */ 5754 hci_set_event_mask_sync(hdev); 5755 5756 /* Clear any event filters and restore scan state */ 5757 hci_clear_event_filter_sync(hdev); 5758 5759 /* Resume scanning */ 5760 hci_resume_scan_sync(hdev); 5761 5762 /* Resume monitor filters */ 5763 hci_resume_monitor_sync(hdev); 5764 5765 /* Resume other advertisements */ 5766 hci_resume_advertising_sync(hdev); 5767 5768 /* Resume discovery */ 5769 hci_resume_discovery_sync(hdev); 5770 5771 return 0; 5772 } 5773 5774 static bool conn_use_rpa(struct hci_conn *conn) 5775 { 5776 struct hci_dev *hdev = conn->hdev; 5777 5778 return hci_dev_test_flag(hdev, HCI_PRIVACY); 5779 } 5780 5781 static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, 5782 struct hci_conn *conn) 5783 { 5784 struct hci_cp_le_set_ext_adv_params cp; 5785 int err; 5786 bdaddr_t random_addr; 5787 u8 own_addr_type; 5788 5789 err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), 5790 &own_addr_type); 5791 if (err) 5792 return err; 5793 5794 /* Set require_privacy to false so that the remote device has a 5795 * chance of identifying us. 5796 */ 5797 err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, 5798 &own_addr_type, &random_addr); 5799 if (err) 5800 return err; 5801 5802 memset(&cp, 0, sizeof(cp)); 5803 5804 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); 5805 cp.own_addr_type = own_addr_type; 5806 cp.channel_map = hdev->le_adv_channel_map; 5807 cp.tx_power = HCI_TX_POWER_INVALID; 5808 cp.primary_phy = HCI_ADV_PHY_1M; 5809 cp.secondary_phy = HCI_ADV_PHY_1M; 5810 cp.handle = 0x00; /* Use instance 0 for directed adv */ 5811 cp.own_addr_type = own_addr_type; 5812 cp.peer_addr_type = conn->dst_type; 5813 bacpy(&cp.peer_addr, &conn->dst); 5814 5815 /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for 5816 * advertising_event_property LE_LEGACY_ADV_DIRECT_IND 5817 * does not supports advertising data when the advertising set already 5818 * contains some, the controller shall return erroc code 'Invalid 5819 * HCI Command Parameters(0x12). 5820 * So it is required to remove adv set for handle 0x00. since we use 5821 * instance 0 for directed adv. 5822 */ 5823 err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL); 5824 if (err) 5825 return err; 5826 5827 err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, 5828 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5829 if (err) 5830 return err; 5831 5832 /* Check if random address need to be updated */ 5833 if (own_addr_type == ADDR_LE_DEV_RANDOM && 5834 bacmp(&random_addr, BDADDR_ANY) && 5835 bacmp(&random_addr, &hdev->random_addr)) { 5836 err = hci_set_adv_set_random_addr_sync(hdev, 0x00, 5837 &random_addr); 5838 if (err) 5839 return err; 5840 } 5841 5842 return hci_enable_ext_advertising_sync(hdev, 0x00); 5843 } 5844 5845 static int hci_le_directed_advertising_sync(struct hci_dev *hdev, 5846 struct hci_conn *conn) 5847 { 5848 struct hci_cp_le_set_adv_param cp; 5849 u8 status; 5850 u8 own_addr_type; 5851 u8 enable; 5852 5853 if (ext_adv_capable(hdev)) 5854 return hci_le_ext_directed_advertising_sync(hdev, conn); 5855 5856 /* Clear the HCI_LE_ADV bit temporarily so that the 5857 * hci_update_random_address knows that it's safe to go ahead 5858 * and write a new random address. The flag will be set back on 5859 * as soon as the SET_ADV_ENABLE HCI command completes. 5860 */ 5861 hci_dev_clear_flag(hdev, HCI_LE_ADV); 5862 5863 /* Set require_privacy to false so that the remote device has a 5864 * chance of identifying us. 5865 */ 5866 status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), 5867 &own_addr_type); 5868 if (status) 5869 return status; 5870 5871 memset(&cp, 0, sizeof(cp)); 5872 5873 /* Some controllers might reject command if intervals are not 5874 * within range for undirected advertising. 5875 * BCM20702A0 is known to be affected by this. 5876 */ 5877 cp.min_interval = cpu_to_le16(0x0020); 5878 cp.max_interval = cpu_to_le16(0x0020); 5879 5880 cp.type = LE_ADV_DIRECT_IND; 5881 cp.own_address_type = own_addr_type; 5882 cp.direct_addr_type = conn->dst_type; 5883 bacpy(&cp.direct_addr, &conn->dst); 5884 cp.channel_map = hdev->le_adv_channel_map; 5885 5886 status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, 5887 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 5888 if (status) 5889 return status; 5890 5891 enable = 0x01; 5892 5893 return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, 5894 sizeof(enable), &enable, HCI_CMD_TIMEOUT); 5895 } 5896 5897 static void set_ext_conn_params(struct hci_conn *conn, 5898 struct hci_cp_le_ext_conn_param *p) 5899 { 5900 struct hci_dev *hdev = conn->hdev; 5901 5902 memset(p, 0, sizeof(*p)); 5903 5904 p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); 5905 p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); 5906 p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 5907 p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 5908 p->conn_latency = cpu_to_le16(conn->le_conn_latency); 5909 p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); 5910 p->min_ce_len = cpu_to_le16(0x0000); 5911 p->max_ce_len = cpu_to_le16(0x0000); 5912 } 5913 5914 static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, 5915 struct hci_conn *conn, u8 own_addr_type) 5916 { 5917 struct hci_cp_le_ext_create_conn *cp; 5918 struct hci_cp_le_ext_conn_param *p; 5919 u8 data[sizeof(*cp) + sizeof(*p) * 3]; 5920 u32 plen; 5921 5922 cp = (void *)data; 5923 p = (void *)cp->data; 5924 5925 memset(cp, 0, sizeof(*cp)); 5926 5927 bacpy(&cp->peer_addr, &conn->dst); 5928 cp->peer_addr_type = conn->dst_type; 5929 cp->own_addr_type = own_addr_type; 5930 5931 plen = sizeof(*cp); 5932 5933 if (scan_1m(hdev)) { 5934 cp->phys |= LE_SCAN_PHY_1M; 5935 set_ext_conn_params(conn, p); 5936 5937 p++; 5938 plen += sizeof(*p); 5939 } 5940 5941 if (scan_2m(hdev)) { 5942 cp->phys |= LE_SCAN_PHY_2M; 5943 set_ext_conn_params(conn, p); 5944 5945 p++; 5946 plen += sizeof(*p); 5947 } 5948 5949 if (scan_coded(hdev)) { 5950 cp->phys |= LE_SCAN_PHY_CODED; 5951 set_ext_conn_params(conn, p); 5952 5953 plen += sizeof(*p); 5954 } 5955 5956 return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, 5957 plen, data, 5958 HCI_EV_LE_ENHANCED_CONN_COMPLETE, 5959 conn->conn_timeout, NULL); 5960 } 5961 5962 int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) 5963 { 5964 struct hci_cp_le_create_conn cp; 5965 struct hci_conn_params *params; 5966 u8 own_addr_type; 5967 int err; 5968 5969 /* If requested to connect as peripheral use directed advertising */ 5970 if (conn->role == HCI_ROLE_SLAVE) { 5971 /* If we're active scanning and simultaneous roles is not 5972 * enabled simply reject the attempt. 5973 */ 5974 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && 5975 hdev->le_scan_type == LE_SCAN_ACTIVE && 5976 !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { 5977 hci_conn_del(conn); 5978 return -EBUSY; 5979 } 5980 5981 /* Pause advertising while doing directed advertising. */ 5982 hci_pause_advertising_sync(hdev); 5983 5984 err = hci_le_directed_advertising_sync(hdev, conn); 5985 goto done; 5986 } 5987 5988 /* Disable advertising if simultaneous roles is not in use. */ 5989 if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) 5990 hci_pause_advertising_sync(hdev); 5991 5992 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); 5993 if (params) { 5994 conn->le_conn_min_interval = params->conn_min_interval; 5995 conn->le_conn_max_interval = params->conn_max_interval; 5996 conn->le_conn_latency = params->conn_latency; 5997 conn->le_supv_timeout = params->supervision_timeout; 5998 } else { 5999 conn->le_conn_min_interval = hdev->le_conn_min_interval; 6000 conn->le_conn_max_interval = hdev->le_conn_max_interval; 6001 conn->le_conn_latency = hdev->le_conn_latency; 6002 conn->le_supv_timeout = hdev->le_supv_timeout; 6003 } 6004 6005 /* If controller is scanning, we stop it since some controllers are 6006 * not able to scan and connect at the same time. Also set the 6007 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete 6008 * handler for scan disabling knows to set the correct discovery 6009 * state. 6010 */ 6011 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { 6012 hci_scan_disable_sync(hdev); 6013 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); 6014 } 6015 6016 /* Update random address, but set require_privacy to false so 6017 * that we never connect with an non-resolvable address. 6018 */ 6019 err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), 6020 &own_addr_type); 6021 if (err) 6022 goto done; 6023 6024 if (use_ext_conn(hdev)) { 6025 err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); 6026 goto done; 6027 } 6028 6029 memset(&cp, 0, sizeof(cp)); 6030 6031 cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); 6032 cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); 6033 6034 bacpy(&cp.peer_addr, &conn->dst); 6035 cp.peer_addr_type = conn->dst_type; 6036 cp.own_address_type = own_addr_type; 6037 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); 6038 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); 6039 cp.conn_latency = cpu_to_le16(conn->le_conn_latency); 6040 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); 6041 cp.min_ce_len = cpu_to_le16(0x0000); 6042 cp.max_ce_len = cpu_to_le16(0x0000); 6043 6044 /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: 6045 * 6046 * If this event is unmasked and the HCI_LE_Connection_Complete event 6047 * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is 6048 * sent when a new connection has been created. 6049 */ 6050 err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, 6051 sizeof(cp), &cp, 6052 use_enhanced_conn_complete(hdev) ? 6053 HCI_EV_LE_ENHANCED_CONN_COMPLETE : 6054 HCI_EV_LE_CONN_COMPLETE, 6055 conn->conn_timeout, NULL); 6056 6057 done: 6058 /* Re-enable advertising after the connection attempt is finished. */ 6059 hci_resume_advertising_sync(hdev); 6060 return err; 6061 } 6062 6063 int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) 6064 { 6065 struct hci_cp_le_remove_cig cp; 6066 6067 memset(&cp, 0, sizeof(cp)); 6068 cp.cig_id = handle; 6069 6070 return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), 6071 &cp, HCI_CMD_TIMEOUT); 6072 } 6073 6074 int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) 6075 { 6076 struct hci_cp_le_big_term_sync cp; 6077 6078 memset(&cp, 0, sizeof(cp)); 6079 cp.handle = handle; 6080 6081 return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, 6082 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6083 } 6084 6085 int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) 6086 { 6087 struct hci_cp_le_pa_term_sync cp; 6088 6089 memset(&cp, 0, sizeof(cp)); 6090 cp.handle = cpu_to_le16(handle); 6091 6092 return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, 6093 sizeof(cp), &cp, HCI_CMD_TIMEOUT); 6094 } 6095 6096 int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, 6097 bool use_rpa, struct adv_info *adv_instance, 6098 u8 *own_addr_type, bdaddr_t *rand_addr) 6099 { 6100 int err; 6101 6102 bacpy(rand_addr, BDADDR_ANY); 6103 6104 /* If privacy is enabled use a resolvable private address. If 6105 * current RPA has expired then generate a new one. 6106 */ 6107 if (use_rpa) { 6108 /* If Controller supports LL Privacy use own address type is 6109 * 0x03 6110 */ 6111 if (use_ll_privacy(hdev)) 6112 *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; 6113 else 6114 *own_addr_type = ADDR_LE_DEV_RANDOM; 6115 6116 if (adv_instance) { 6117 if (adv_rpa_valid(adv_instance)) 6118 return 0; 6119 } else { 6120 if (rpa_valid(hdev)) 6121 return 0; 6122 } 6123 6124 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); 6125 if (err < 0) { 6126 bt_dev_err(hdev, "failed to generate new RPA"); 6127 return err; 6128 } 6129 6130 bacpy(rand_addr, &hdev->rpa); 6131 6132 return 0; 6133 } 6134 6135 /* In case of required privacy without resolvable private address, 6136 * use an non-resolvable private address. This is useful for 6137 * non-connectable advertising. 6138 */ 6139 if (require_privacy) { 6140 bdaddr_t nrpa; 6141 6142 while (true) { 6143 /* The non-resolvable private address is generated 6144 * from random six bytes with the two most significant 6145 * bits cleared. 6146 */ 6147 get_random_bytes(&nrpa, 6); 6148 nrpa.b[5] &= 0x3f; 6149 6150 /* The non-resolvable private address shall not be 6151 * equal to the public address. 6152 */ 6153 if (bacmp(&hdev->bdaddr, &nrpa)) 6154 break; 6155 } 6156 6157 *own_addr_type = ADDR_LE_DEV_RANDOM; 6158 bacpy(rand_addr, &nrpa); 6159 6160 return 0; 6161 } 6162 6163 /* No privacy so use a public address. */ 6164 *own_addr_type = ADDR_LE_DEV_PUBLIC; 6165 6166 return 0; 6167 } 6168 6169 static int _update_adv_data_sync(struct hci_dev *hdev, void *data) 6170 { 6171 u8 instance = *(u8 *)data; 6172 6173 kfree(data); 6174 6175 return hci_update_adv_data_sync(hdev, instance); 6176 } 6177 6178 int hci_update_adv_data(struct hci_dev *hdev, u8 instance) 6179 { 6180 u8 *inst_ptr = kmalloc(1, GFP_KERNEL); 6181 6182 if (!inst_ptr) 6183 return -ENOMEM; 6184 6185 *inst_ptr = instance; 6186 return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL); 6187 } 6188