xref: /linux/net/bluetooth/mgmt.c (revision 0be3ff0c)
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3 
4    Copyright (C) 2010  Nokia Corporation
5    Copyright (C) 2011-2012 Intel Corporation
6 
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10 
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24 
25 /* Bluetooth HCI Management interface */
26 
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
29 
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
35 
36 #include "hci_request.h"
37 #include "smp.h"
38 #include "mgmt_util.h"
39 #include "mgmt_config.h"
40 #include "msft.h"
41 #include "eir.h"
42 #include "aosp.h"
43 
44 #define MGMT_VERSION	1
45 #define MGMT_REVISION	22
46 
47 static const u16 mgmt_commands[] = {
48 	MGMT_OP_READ_INDEX_LIST,
49 	MGMT_OP_READ_INFO,
50 	MGMT_OP_SET_POWERED,
51 	MGMT_OP_SET_DISCOVERABLE,
52 	MGMT_OP_SET_CONNECTABLE,
53 	MGMT_OP_SET_FAST_CONNECTABLE,
54 	MGMT_OP_SET_BONDABLE,
55 	MGMT_OP_SET_LINK_SECURITY,
56 	MGMT_OP_SET_SSP,
57 	MGMT_OP_SET_HS,
58 	MGMT_OP_SET_LE,
59 	MGMT_OP_SET_DEV_CLASS,
60 	MGMT_OP_SET_LOCAL_NAME,
61 	MGMT_OP_ADD_UUID,
62 	MGMT_OP_REMOVE_UUID,
63 	MGMT_OP_LOAD_LINK_KEYS,
64 	MGMT_OP_LOAD_LONG_TERM_KEYS,
65 	MGMT_OP_DISCONNECT,
66 	MGMT_OP_GET_CONNECTIONS,
67 	MGMT_OP_PIN_CODE_REPLY,
68 	MGMT_OP_PIN_CODE_NEG_REPLY,
69 	MGMT_OP_SET_IO_CAPABILITY,
70 	MGMT_OP_PAIR_DEVICE,
71 	MGMT_OP_CANCEL_PAIR_DEVICE,
72 	MGMT_OP_UNPAIR_DEVICE,
73 	MGMT_OP_USER_CONFIRM_REPLY,
74 	MGMT_OP_USER_CONFIRM_NEG_REPLY,
75 	MGMT_OP_USER_PASSKEY_REPLY,
76 	MGMT_OP_USER_PASSKEY_NEG_REPLY,
77 	MGMT_OP_READ_LOCAL_OOB_DATA,
78 	MGMT_OP_ADD_REMOTE_OOB_DATA,
79 	MGMT_OP_REMOVE_REMOTE_OOB_DATA,
80 	MGMT_OP_START_DISCOVERY,
81 	MGMT_OP_STOP_DISCOVERY,
82 	MGMT_OP_CONFIRM_NAME,
83 	MGMT_OP_BLOCK_DEVICE,
84 	MGMT_OP_UNBLOCK_DEVICE,
85 	MGMT_OP_SET_DEVICE_ID,
86 	MGMT_OP_SET_ADVERTISING,
87 	MGMT_OP_SET_BREDR,
88 	MGMT_OP_SET_STATIC_ADDRESS,
89 	MGMT_OP_SET_SCAN_PARAMS,
90 	MGMT_OP_SET_SECURE_CONN,
91 	MGMT_OP_SET_DEBUG_KEYS,
92 	MGMT_OP_SET_PRIVACY,
93 	MGMT_OP_LOAD_IRKS,
94 	MGMT_OP_GET_CONN_INFO,
95 	MGMT_OP_GET_CLOCK_INFO,
96 	MGMT_OP_ADD_DEVICE,
97 	MGMT_OP_REMOVE_DEVICE,
98 	MGMT_OP_LOAD_CONN_PARAM,
99 	MGMT_OP_READ_UNCONF_INDEX_LIST,
100 	MGMT_OP_READ_CONFIG_INFO,
101 	MGMT_OP_SET_EXTERNAL_CONFIG,
102 	MGMT_OP_SET_PUBLIC_ADDRESS,
103 	MGMT_OP_START_SERVICE_DISCOVERY,
104 	MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
105 	MGMT_OP_READ_EXT_INDEX_LIST,
106 	MGMT_OP_READ_ADV_FEATURES,
107 	MGMT_OP_ADD_ADVERTISING,
108 	MGMT_OP_REMOVE_ADVERTISING,
109 	MGMT_OP_GET_ADV_SIZE_INFO,
110 	MGMT_OP_START_LIMITED_DISCOVERY,
111 	MGMT_OP_READ_EXT_INFO,
112 	MGMT_OP_SET_APPEARANCE,
113 	MGMT_OP_GET_PHY_CONFIGURATION,
114 	MGMT_OP_SET_PHY_CONFIGURATION,
115 	MGMT_OP_SET_BLOCKED_KEYS,
116 	MGMT_OP_SET_WIDEBAND_SPEECH,
117 	MGMT_OP_READ_CONTROLLER_CAP,
118 	MGMT_OP_READ_EXP_FEATURES_INFO,
119 	MGMT_OP_SET_EXP_FEATURE,
120 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
121 	MGMT_OP_SET_DEF_SYSTEM_CONFIG,
122 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
123 	MGMT_OP_SET_DEF_RUNTIME_CONFIG,
124 	MGMT_OP_GET_DEVICE_FLAGS,
125 	MGMT_OP_SET_DEVICE_FLAGS,
126 	MGMT_OP_READ_ADV_MONITOR_FEATURES,
127 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR,
128 	MGMT_OP_REMOVE_ADV_MONITOR,
129 	MGMT_OP_ADD_EXT_ADV_PARAMS,
130 	MGMT_OP_ADD_EXT_ADV_DATA,
131 	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI,
132 };
133 
134 static const u16 mgmt_events[] = {
135 	MGMT_EV_CONTROLLER_ERROR,
136 	MGMT_EV_INDEX_ADDED,
137 	MGMT_EV_INDEX_REMOVED,
138 	MGMT_EV_NEW_SETTINGS,
139 	MGMT_EV_CLASS_OF_DEV_CHANGED,
140 	MGMT_EV_LOCAL_NAME_CHANGED,
141 	MGMT_EV_NEW_LINK_KEY,
142 	MGMT_EV_NEW_LONG_TERM_KEY,
143 	MGMT_EV_DEVICE_CONNECTED,
144 	MGMT_EV_DEVICE_DISCONNECTED,
145 	MGMT_EV_CONNECT_FAILED,
146 	MGMT_EV_PIN_CODE_REQUEST,
147 	MGMT_EV_USER_CONFIRM_REQUEST,
148 	MGMT_EV_USER_PASSKEY_REQUEST,
149 	MGMT_EV_AUTH_FAILED,
150 	MGMT_EV_DEVICE_FOUND,
151 	MGMT_EV_DISCOVERING,
152 	MGMT_EV_DEVICE_BLOCKED,
153 	MGMT_EV_DEVICE_UNBLOCKED,
154 	MGMT_EV_DEVICE_UNPAIRED,
155 	MGMT_EV_PASSKEY_NOTIFY,
156 	MGMT_EV_NEW_IRK,
157 	MGMT_EV_NEW_CSRK,
158 	MGMT_EV_DEVICE_ADDED,
159 	MGMT_EV_DEVICE_REMOVED,
160 	MGMT_EV_NEW_CONN_PARAM,
161 	MGMT_EV_UNCONF_INDEX_ADDED,
162 	MGMT_EV_UNCONF_INDEX_REMOVED,
163 	MGMT_EV_NEW_CONFIG_OPTIONS,
164 	MGMT_EV_EXT_INDEX_ADDED,
165 	MGMT_EV_EXT_INDEX_REMOVED,
166 	MGMT_EV_LOCAL_OOB_DATA_UPDATED,
167 	MGMT_EV_ADVERTISING_ADDED,
168 	MGMT_EV_ADVERTISING_REMOVED,
169 	MGMT_EV_EXT_INFO_CHANGED,
170 	MGMT_EV_PHY_CONFIGURATION_CHANGED,
171 	MGMT_EV_EXP_FEATURE_CHANGED,
172 	MGMT_EV_DEVICE_FLAGS_CHANGED,
173 	MGMT_EV_ADV_MONITOR_ADDED,
174 	MGMT_EV_ADV_MONITOR_REMOVED,
175 	MGMT_EV_CONTROLLER_SUSPEND,
176 	MGMT_EV_CONTROLLER_RESUME,
177 	MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
178 	MGMT_EV_ADV_MONITOR_DEVICE_LOST,
179 };
180 
181 static const u16 mgmt_untrusted_commands[] = {
182 	MGMT_OP_READ_INDEX_LIST,
183 	MGMT_OP_READ_INFO,
184 	MGMT_OP_READ_UNCONF_INDEX_LIST,
185 	MGMT_OP_READ_CONFIG_INFO,
186 	MGMT_OP_READ_EXT_INDEX_LIST,
187 	MGMT_OP_READ_EXT_INFO,
188 	MGMT_OP_READ_CONTROLLER_CAP,
189 	MGMT_OP_READ_EXP_FEATURES_INFO,
190 	MGMT_OP_READ_DEF_SYSTEM_CONFIG,
191 	MGMT_OP_READ_DEF_RUNTIME_CONFIG,
192 };
193 
194 static const u16 mgmt_untrusted_events[] = {
195 	MGMT_EV_INDEX_ADDED,
196 	MGMT_EV_INDEX_REMOVED,
197 	MGMT_EV_NEW_SETTINGS,
198 	MGMT_EV_CLASS_OF_DEV_CHANGED,
199 	MGMT_EV_LOCAL_NAME_CHANGED,
200 	MGMT_EV_UNCONF_INDEX_ADDED,
201 	MGMT_EV_UNCONF_INDEX_REMOVED,
202 	MGMT_EV_NEW_CONFIG_OPTIONS,
203 	MGMT_EV_EXT_INDEX_ADDED,
204 	MGMT_EV_EXT_INDEX_REMOVED,
205 	MGMT_EV_EXT_INFO_CHANGED,
206 	MGMT_EV_EXP_FEATURE_CHANGED,
207 };
208 
209 #define CACHE_TIMEOUT	msecs_to_jiffies(2 * 1000)
210 
211 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
212 		 "\x00\x00\x00\x00\x00\x00\x00\x00"
213 
214 /* HCI to MGMT error code conversion table */
215 static const u8 mgmt_status_table[] = {
216 	MGMT_STATUS_SUCCESS,
217 	MGMT_STATUS_UNKNOWN_COMMAND,	/* Unknown Command */
218 	MGMT_STATUS_NOT_CONNECTED,	/* No Connection */
219 	MGMT_STATUS_FAILED,		/* Hardware Failure */
220 	MGMT_STATUS_CONNECT_FAILED,	/* Page Timeout */
221 	MGMT_STATUS_AUTH_FAILED,	/* Authentication Failed */
222 	MGMT_STATUS_AUTH_FAILED,	/* PIN or Key Missing */
223 	MGMT_STATUS_NO_RESOURCES,	/* Memory Full */
224 	MGMT_STATUS_TIMEOUT,		/* Connection Timeout */
225 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of Connections */
226 	MGMT_STATUS_NO_RESOURCES,	/* Max Number of SCO Connections */
227 	MGMT_STATUS_ALREADY_CONNECTED,	/* ACL Connection Exists */
228 	MGMT_STATUS_BUSY,		/* Command Disallowed */
229 	MGMT_STATUS_NO_RESOURCES,	/* Rejected Limited Resources */
230 	MGMT_STATUS_REJECTED,		/* Rejected Security */
231 	MGMT_STATUS_REJECTED,		/* Rejected Personal */
232 	MGMT_STATUS_TIMEOUT,		/* Host Timeout */
233 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Feature */
234 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid Parameters */
235 	MGMT_STATUS_DISCONNECTED,	/* OE User Ended Connection */
236 	MGMT_STATUS_NO_RESOURCES,	/* OE Low Resources */
237 	MGMT_STATUS_DISCONNECTED,	/* OE Power Off */
238 	MGMT_STATUS_DISCONNECTED,	/* Connection Terminated */
239 	MGMT_STATUS_BUSY,		/* Repeated Attempts */
240 	MGMT_STATUS_REJECTED,		/* Pairing Not Allowed */
241 	MGMT_STATUS_FAILED,		/* Unknown LMP PDU */
242 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported Remote Feature */
243 	MGMT_STATUS_REJECTED,		/* SCO Offset Rejected */
244 	MGMT_STATUS_REJECTED,		/* SCO Interval Rejected */
245 	MGMT_STATUS_REJECTED,		/* Air Mode Rejected */
246 	MGMT_STATUS_INVALID_PARAMS,	/* Invalid LMP Parameters */
247 	MGMT_STATUS_FAILED,		/* Unspecified Error */
248 	MGMT_STATUS_NOT_SUPPORTED,	/* Unsupported LMP Parameter Value */
249 	MGMT_STATUS_FAILED,		/* Role Change Not Allowed */
250 	MGMT_STATUS_TIMEOUT,		/* LMP Response Timeout */
251 	MGMT_STATUS_FAILED,		/* LMP Error Transaction Collision */
252 	MGMT_STATUS_FAILED,		/* LMP PDU Not Allowed */
253 	MGMT_STATUS_REJECTED,		/* Encryption Mode Not Accepted */
254 	MGMT_STATUS_FAILED,		/* Unit Link Key Used */
255 	MGMT_STATUS_NOT_SUPPORTED,	/* QoS Not Supported */
256 	MGMT_STATUS_TIMEOUT,		/* Instant Passed */
257 	MGMT_STATUS_NOT_SUPPORTED,	/* Pairing Not Supported */
258 	MGMT_STATUS_FAILED,		/* Transaction Collision */
259 	MGMT_STATUS_FAILED,		/* Reserved for future use */
260 	MGMT_STATUS_INVALID_PARAMS,	/* Unacceptable Parameter */
261 	MGMT_STATUS_REJECTED,		/* QoS Rejected */
262 	MGMT_STATUS_NOT_SUPPORTED,	/* Classification Not Supported */
263 	MGMT_STATUS_REJECTED,		/* Insufficient Security */
264 	MGMT_STATUS_INVALID_PARAMS,	/* Parameter Out Of Range */
265 	MGMT_STATUS_FAILED,		/* Reserved for future use */
266 	MGMT_STATUS_BUSY,		/* Role Switch Pending */
267 	MGMT_STATUS_FAILED,		/* Reserved for future use */
268 	MGMT_STATUS_FAILED,		/* Slot Violation */
269 	MGMT_STATUS_FAILED,		/* Role Switch Failed */
270 	MGMT_STATUS_INVALID_PARAMS,	/* EIR Too Large */
271 	MGMT_STATUS_NOT_SUPPORTED,	/* Simple Pairing Not Supported */
272 	MGMT_STATUS_BUSY,		/* Host Busy Pairing */
273 	MGMT_STATUS_REJECTED,		/* Rejected, No Suitable Channel */
274 	MGMT_STATUS_BUSY,		/* Controller Busy */
275 	MGMT_STATUS_INVALID_PARAMS,	/* Unsuitable Connection Interval */
276 	MGMT_STATUS_TIMEOUT,		/* Directed Advertising Timeout */
277 	MGMT_STATUS_AUTH_FAILED,	/* Terminated Due to MIC Failure */
278 	MGMT_STATUS_CONNECT_FAILED,	/* Connection Establishment Failed */
279 	MGMT_STATUS_CONNECT_FAILED,	/* MAC Connection Failed */
280 };
281 
282 static u8 mgmt_errno_status(int err)
283 {
284 	switch (err) {
285 	case 0:
286 		return MGMT_STATUS_SUCCESS;
287 	case -EPERM:
288 		return MGMT_STATUS_REJECTED;
289 	case -EINVAL:
290 		return MGMT_STATUS_INVALID_PARAMS;
291 	case -EOPNOTSUPP:
292 		return MGMT_STATUS_NOT_SUPPORTED;
293 	case -EBUSY:
294 		return MGMT_STATUS_BUSY;
295 	case -ETIMEDOUT:
296 		return MGMT_STATUS_AUTH_FAILED;
297 	case -ENOMEM:
298 		return MGMT_STATUS_NO_RESOURCES;
299 	case -EISCONN:
300 		return MGMT_STATUS_ALREADY_CONNECTED;
301 	case -ENOTCONN:
302 		return MGMT_STATUS_DISCONNECTED;
303 	}
304 
305 	return MGMT_STATUS_FAILED;
306 }
307 
308 static u8 mgmt_status(int err)
309 {
310 	if (err < 0)
311 		return mgmt_errno_status(err);
312 
313 	if (err < ARRAY_SIZE(mgmt_status_table))
314 		return mgmt_status_table[err];
315 
316 	return MGMT_STATUS_FAILED;
317 }
318 
319 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
320 			    u16 len, int flag)
321 {
322 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
323 			       flag, NULL);
324 }
325 
326 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
327 			      u16 len, int flag, struct sock *skip_sk)
328 {
329 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
330 			       flag, skip_sk);
331 }
332 
333 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
334 		      struct sock *skip_sk)
335 {
336 	return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
337 			       HCI_SOCK_TRUSTED, skip_sk);
338 }
339 
340 static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk)
341 {
342 	return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED,
343 				   skip_sk);
344 }
345 
346 static u8 le_addr_type(u8 mgmt_addr_type)
347 {
348 	if (mgmt_addr_type == BDADDR_LE_PUBLIC)
349 		return ADDR_LE_DEV_PUBLIC;
350 	else
351 		return ADDR_LE_DEV_RANDOM;
352 }
353 
354 void mgmt_fill_version_info(void *ver)
355 {
356 	struct mgmt_rp_read_version *rp = ver;
357 
358 	rp->version = MGMT_VERSION;
359 	rp->revision = cpu_to_le16(MGMT_REVISION);
360 }
361 
362 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
363 			u16 data_len)
364 {
365 	struct mgmt_rp_read_version rp;
366 
367 	bt_dev_dbg(hdev, "sock %p", sk);
368 
369 	mgmt_fill_version_info(&rp);
370 
371 	return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
372 				 &rp, sizeof(rp));
373 }
374 
375 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
376 			 u16 data_len)
377 {
378 	struct mgmt_rp_read_commands *rp;
379 	u16 num_commands, num_events;
380 	size_t rp_size;
381 	int i, err;
382 
383 	bt_dev_dbg(hdev, "sock %p", sk);
384 
385 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
386 		num_commands = ARRAY_SIZE(mgmt_commands);
387 		num_events = ARRAY_SIZE(mgmt_events);
388 	} else {
389 		num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
390 		num_events = ARRAY_SIZE(mgmt_untrusted_events);
391 	}
392 
393 	rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
394 
395 	rp = kmalloc(rp_size, GFP_KERNEL);
396 	if (!rp)
397 		return -ENOMEM;
398 
399 	rp->num_commands = cpu_to_le16(num_commands);
400 	rp->num_events = cpu_to_le16(num_events);
401 
402 	if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
403 		__le16 *opcode = rp->opcodes;
404 
405 		for (i = 0; i < num_commands; i++, opcode++)
406 			put_unaligned_le16(mgmt_commands[i], opcode);
407 
408 		for (i = 0; i < num_events; i++, opcode++)
409 			put_unaligned_le16(mgmt_events[i], opcode);
410 	} else {
411 		__le16 *opcode = rp->opcodes;
412 
413 		for (i = 0; i < num_commands; i++, opcode++)
414 			put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
415 
416 		for (i = 0; i < num_events; i++, opcode++)
417 			put_unaligned_le16(mgmt_untrusted_events[i], opcode);
418 	}
419 
420 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
421 				rp, rp_size);
422 	kfree(rp);
423 
424 	return err;
425 }
426 
427 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
428 			   u16 data_len)
429 {
430 	struct mgmt_rp_read_index_list *rp;
431 	struct hci_dev *d;
432 	size_t rp_len;
433 	u16 count;
434 	int err;
435 
436 	bt_dev_dbg(hdev, "sock %p", sk);
437 
438 	read_lock(&hci_dev_list_lock);
439 
440 	count = 0;
441 	list_for_each_entry(d, &hci_dev_list, list) {
442 		if (d->dev_type == HCI_PRIMARY &&
443 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED))
444 			count++;
445 	}
446 
447 	rp_len = sizeof(*rp) + (2 * count);
448 	rp = kmalloc(rp_len, GFP_ATOMIC);
449 	if (!rp) {
450 		read_unlock(&hci_dev_list_lock);
451 		return -ENOMEM;
452 	}
453 
454 	count = 0;
455 	list_for_each_entry(d, &hci_dev_list, list) {
456 		if (hci_dev_test_flag(d, HCI_SETUP) ||
457 		    hci_dev_test_flag(d, HCI_CONFIG) ||
458 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
459 			continue;
460 
461 		/* Devices marked as raw-only are neither configured
462 		 * nor unconfigured controllers.
463 		 */
464 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
465 			continue;
466 
467 		if (d->dev_type == HCI_PRIMARY &&
468 		    !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
469 			rp->index[count++] = cpu_to_le16(d->id);
470 			bt_dev_dbg(hdev, "Added hci%u", d->id);
471 		}
472 	}
473 
474 	rp->num_controllers = cpu_to_le16(count);
475 	rp_len = sizeof(*rp) + (2 * count);
476 
477 	read_unlock(&hci_dev_list_lock);
478 
479 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
480 				0, rp, rp_len);
481 
482 	kfree(rp);
483 
484 	return err;
485 }
486 
487 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
488 				  void *data, u16 data_len)
489 {
490 	struct mgmt_rp_read_unconf_index_list *rp;
491 	struct hci_dev *d;
492 	size_t rp_len;
493 	u16 count;
494 	int err;
495 
496 	bt_dev_dbg(hdev, "sock %p", sk);
497 
498 	read_lock(&hci_dev_list_lock);
499 
500 	count = 0;
501 	list_for_each_entry(d, &hci_dev_list, list) {
502 		if (d->dev_type == HCI_PRIMARY &&
503 		    hci_dev_test_flag(d, HCI_UNCONFIGURED))
504 			count++;
505 	}
506 
507 	rp_len = sizeof(*rp) + (2 * count);
508 	rp = kmalloc(rp_len, GFP_ATOMIC);
509 	if (!rp) {
510 		read_unlock(&hci_dev_list_lock);
511 		return -ENOMEM;
512 	}
513 
514 	count = 0;
515 	list_for_each_entry(d, &hci_dev_list, list) {
516 		if (hci_dev_test_flag(d, HCI_SETUP) ||
517 		    hci_dev_test_flag(d, HCI_CONFIG) ||
518 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
519 			continue;
520 
521 		/* Devices marked as raw-only are neither configured
522 		 * nor unconfigured controllers.
523 		 */
524 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
525 			continue;
526 
527 		if (d->dev_type == HCI_PRIMARY &&
528 		    hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
529 			rp->index[count++] = cpu_to_le16(d->id);
530 			bt_dev_dbg(hdev, "Added hci%u", d->id);
531 		}
532 	}
533 
534 	rp->num_controllers = cpu_to_le16(count);
535 	rp_len = sizeof(*rp) + (2 * count);
536 
537 	read_unlock(&hci_dev_list_lock);
538 
539 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
540 				MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
541 
542 	kfree(rp);
543 
544 	return err;
545 }
546 
547 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
548 			       void *data, u16 data_len)
549 {
550 	struct mgmt_rp_read_ext_index_list *rp;
551 	struct hci_dev *d;
552 	u16 count;
553 	int err;
554 
555 	bt_dev_dbg(hdev, "sock %p", sk);
556 
557 	read_lock(&hci_dev_list_lock);
558 
559 	count = 0;
560 	list_for_each_entry(d, &hci_dev_list, list) {
561 		if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
562 			count++;
563 	}
564 
565 	rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
566 	if (!rp) {
567 		read_unlock(&hci_dev_list_lock);
568 		return -ENOMEM;
569 	}
570 
571 	count = 0;
572 	list_for_each_entry(d, &hci_dev_list, list) {
573 		if (hci_dev_test_flag(d, HCI_SETUP) ||
574 		    hci_dev_test_flag(d, HCI_CONFIG) ||
575 		    hci_dev_test_flag(d, HCI_USER_CHANNEL))
576 			continue;
577 
578 		/* Devices marked as raw-only are neither configured
579 		 * nor unconfigured controllers.
580 		 */
581 		if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
582 			continue;
583 
584 		if (d->dev_type == HCI_PRIMARY) {
585 			if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
586 				rp->entry[count].type = 0x01;
587 			else
588 				rp->entry[count].type = 0x00;
589 		} else if (d->dev_type == HCI_AMP) {
590 			rp->entry[count].type = 0x02;
591 		} else {
592 			continue;
593 		}
594 
595 		rp->entry[count].bus = d->bus;
596 		rp->entry[count++].index = cpu_to_le16(d->id);
597 		bt_dev_dbg(hdev, "Added hci%u", d->id);
598 	}
599 
600 	rp->num_controllers = cpu_to_le16(count);
601 
602 	read_unlock(&hci_dev_list_lock);
603 
604 	/* If this command is called at least once, then all the
605 	 * default index and unconfigured index events are disabled
606 	 * and from now on only extended index events are used.
607 	 */
608 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
609 	hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
610 	hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
611 
612 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
613 				MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
614 				struct_size(rp, entry, count));
615 
616 	kfree(rp);
617 
618 	return err;
619 }
620 
621 static bool is_configured(struct hci_dev *hdev)
622 {
623 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
624 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
625 		return false;
626 
627 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
628 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
629 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
630 		return false;
631 
632 	return true;
633 }
634 
635 static __le32 get_missing_options(struct hci_dev *hdev)
636 {
637 	u32 options = 0;
638 
639 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
640 	    !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
641 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
642 
643 	if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
644 	     test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
645 	    !bacmp(&hdev->public_addr, BDADDR_ANY))
646 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
647 
648 	return cpu_to_le32(options);
649 }
650 
651 static int new_options(struct hci_dev *hdev, struct sock *skip)
652 {
653 	__le32 options = get_missing_options(hdev);
654 
655 	return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
656 				  sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
657 }
658 
659 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
660 {
661 	__le32 options = get_missing_options(hdev);
662 
663 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
664 				 sizeof(options));
665 }
666 
667 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
668 			    void *data, u16 data_len)
669 {
670 	struct mgmt_rp_read_config_info rp;
671 	u32 options = 0;
672 
673 	bt_dev_dbg(hdev, "sock %p", sk);
674 
675 	hci_dev_lock(hdev);
676 
677 	memset(&rp, 0, sizeof(rp));
678 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
679 
680 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
681 		options |= MGMT_OPTION_EXTERNAL_CONFIG;
682 
683 	if (hdev->set_bdaddr)
684 		options |= MGMT_OPTION_PUBLIC_ADDRESS;
685 
686 	rp.supported_options = cpu_to_le32(options);
687 	rp.missing_options = get_missing_options(hdev);
688 
689 	hci_dev_unlock(hdev);
690 
691 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
692 				 &rp, sizeof(rp));
693 }
694 
695 static u32 get_supported_phys(struct hci_dev *hdev)
696 {
697 	u32 supported_phys = 0;
698 
699 	if (lmp_bredr_capable(hdev)) {
700 		supported_phys |= MGMT_PHY_BR_1M_1SLOT;
701 
702 		if (hdev->features[0][0] & LMP_3SLOT)
703 			supported_phys |= MGMT_PHY_BR_1M_3SLOT;
704 
705 		if (hdev->features[0][0] & LMP_5SLOT)
706 			supported_phys |= MGMT_PHY_BR_1M_5SLOT;
707 
708 		if (lmp_edr_2m_capable(hdev)) {
709 			supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
710 
711 			if (lmp_edr_3slot_capable(hdev))
712 				supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
713 
714 			if (lmp_edr_5slot_capable(hdev))
715 				supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
716 
717 			if (lmp_edr_3m_capable(hdev)) {
718 				supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
719 
720 				if (lmp_edr_3slot_capable(hdev))
721 					supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
722 
723 				if (lmp_edr_5slot_capable(hdev))
724 					supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
725 			}
726 		}
727 	}
728 
729 	if (lmp_le_capable(hdev)) {
730 		supported_phys |= MGMT_PHY_LE_1M_TX;
731 		supported_phys |= MGMT_PHY_LE_1M_RX;
732 
733 		if (hdev->le_features[1] & HCI_LE_PHY_2M) {
734 			supported_phys |= MGMT_PHY_LE_2M_TX;
735 			supported_phys |= MGMT_PHY_LE_2M_RX;
736 		}
737 
738 		if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
739 			supported_phys |= MGMT_PHY_LE_CODED_TX;
740 			supported_phys |= MGMT_PHY_LE_CODED_RX;
741 		}
742 	}
743 
744 	return supported_phys;
745 }
746 
747 static u32 get_selected_phys(struct hci_dev *hdev)
748 {
749 	u32 selected_phys = 0;
750 
751 	if (lmp_bredr_capable(hdev)) {
752 		selected_phys |= MGMT_PHY_BR_1M_1SLOT;
753 
754 		if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
755 			selected_phys |= MGMT_PHY_BR_1M_3SLOT;
756 
757 		if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
758 			selected_phys |= MGMT_PHY_BR_1M_5SLOT;
759 
760 		if (lmp_edr_2m_capable(hdev)) {
761 			if (!(hdev->pkt_type & HCI_2DH1))
762 				selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
763 
764 			if (lmp_edr_3slot_capable(hdev) &&
765 			    !(hdev->pkt_type & HCI_2DH3))
766 				selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
767 
768 			if (lmp_edr_5slot_capable(hdev) &&
769 			    !(hdev->pkt_type & HCI_2DH5))
770 				selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
771 
772 			if (lmp_edr_3m_capable(hdev)) {
773 				if (!(hdev->pkt_type & HCI_3DH1))
774 					selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
775 
776 				if (lmp_edr_3slot_capable(hdev) &&
777 				    !(hdev->pkt_type & HCI_3DH3))
778 					selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
779 
780 				if (lmp_edr_5slot_capable(hdev) &&
781 				    !(hdev->pkt_type & HCI_3DH5))
782 					selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
783 			}
784 		}
785 	}
786 
787 	if (lmp_le_capable(hdev)) {
788 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
789 			selected_phys |= MGMT_PHY_LE_1M_TX;
790 
791 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
792 			selected_phys |= MGMT_PHY_LE_1M_RX;
793 
794 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
795 			selected_phys |= MGMT_PHY_LE_2M_TX;
796 
797 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
798 			selected_phys |= MGMT_PHY_LE_2M_RX;
799 
800 		if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
801 			selected_phys |= MGMT_PHY_LE_CODED_TX;
802 
803 		if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
804 			selected_phys |= MGMT_PHY_LE_CODED_RX;
805 	}
806 
807 	return selected_phys;
808 }
809 
810 static u32 get_configurable_phys(struct hci_dev *hdev)
811 {
812 	return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
813 		~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
814 }
815 
816 static u32 get_supported_settings(struct hci_dev *hdev)
817 {
818 	u32 settings = 0;
819 
820 	settings |= MGMT_SETTING_POWERED;
821 	settings |= MGMT_SETTING_BONDABLE;
822 	settings |= MGMT_SETTING_DEBUG_KEYS;
823 	settings |= MGMT_SETTING_CONNECTABLE;
824 	settings |= MGMT_SETTING_DISCOVERABLE;
825 
826 	if (lmp_bredr_capable(hdev)) {
827 		if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
828 			settings |= MGMT_SETTING_FAST_CONNECTABLE;
829 		settings |= MGMT_SETTING_BREDR;
830 		settings |= MGMT_SETTING_LINK_SECURITY;
831 
832 		if (lmp_ssp_capable(hdev)) {
833 			settings |= MGMT_SETTING_SSP;
834 			if (IS_ENABLED(CONFIG_BT_HS))
835 				settings |= MGMT_SETTING_HS;
836 		}
837 
838 		if (lmp_sc_capable(hdev))
839 			settings |= MGMT_SETTING_SECURE_CONN;
840 
841 		if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
842 			     &hdev->quirks))
843 			settings |= MGMT_SETTING_WIDEBAND_SPEECH;
844 	}
845 
846 	if (lmp_le_capable(hdev)) {
847 		settings |= MGMT_SETTING_LE;
848 		settings |= MGMT_SETTING_SECURE_CONN;
849 		settings |= MGMT_SETTING_PRIVACY;
850 		settings |= MGMT_SETTING_STATIC_ADDRESS;
851 		settings |= MGMT_SETTING_ADVERTISING;
852 	}
853 
854 	if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
855 	    hdev->set_bdaddr)
856 		settings |= MGMT_SETTING_CONFIGURATION;
857 
858 	settings |= MGMT_SETTING_PHY_CONFIGURATION;
859 
860 	return settings;
861 }
862 
863 static u32 get_current_settings(struct hci_dev *hdev)
864 {
865 	u32 settings = 0;
866 
867 	if (hdev_is_powered(hdev))
868 		settings |= MGMT_SETTING_POWERED;
869 
870 	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
871 		settings |= MGMT_SETTING_CONNECTABLE;
872 
873 	if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
874 		settings |= MGMT_SETTING_FAST_CONNECTABLE;
875 
876 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
877 		settings |= MGMT_SETTING_DISCOVERABLE;
878 
879 	if (hci_dev_test_flag(hdev, HCI_BONDABLE))
880 		settings |= MGMT_SETTING_BONDABLE;
881 
882 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
883 		settings |= MGMT_SETTING_BREDR;
884 
885 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
886 		settings |= MGMT_SETTING_LE;
887 
888 	if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
889 		settings |= MGMT_SETTING_LINK_SECURITY;
890 
891 	if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
892 		settings |= MGMT_SETTING_SSP;
893 
894 	if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
895 		settings |= MGMT_SETTING_HS;
896 
897 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
898 		settings |= MGMT_SETTING_ADVERTISING;
899 
900 	if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
901 		settings |= MGMT_SETTING_SECURE_CONN;
902 
903 	if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
904 		settings |= MGMT_SETTING_DEBUG_KEYS;
905 
906 	if (hci_dev_test_flag(hdev, HCI_PRIVACY))
907 		settings |= MGMT_SETTING_PRIVACY;
908 
909 	/* The current setting for static address has two purposes. The
910 	 * first is to indicate if the static address will be used and
911 	 * the second is to indicate if it is actually set.
912 	 *
913 	 * This means if the static address is not configured, this flag
914 	 * will never be set. If the address is configured, then if the
915 	 * address is actually used decides if the flag is set or not.
916 	 *
917 	 * For single mode LE only controllers and dual-mode controllers
918 	 * with BR/EDR disabled, the existence of the static address will
919 	 * be evaluated.
920 	 */
921 	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
922 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
923 	    !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
924 		if (bacmp(&hdev->static_addr, BDADDR_ANY))
925 			settings |= MGMT_SETTING_STATIC_ADDRESS;
926 	}
927 
928 	if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
929 		settings |= MGMT_SETTING_WIDEBAND_SPEECH;
930 
931 	return settings;
932 }
933 
934 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
935 {
936 	return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
937 }
938 
939 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
940 {
941 	struct mgmt_pending_cmd *cmd;
942 
943 	/* If there's a pending mgmt command the flags will not yet have
944 	 * their final values, so check for this first.
945 	 */
946 	cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
947 	if (cmd) {
948 		struct mgmt_mode *cp = cmd->param;
949 		if (cp->val == 0x01)
950 			return LE_AD_GENERAL;
951 		else if (cp->val == 0x02)
952 			return LE_AD_LIMITED;
953 	} else {
954 		if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
955 			return LE_AD_LIMITED;
956 		else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
957 			return LE_AD_GENERAL;
958 	}
959 
960 	return 0;
961 }
962 
963 bool mgmt_get_connectable(struct hci_dev *hdev)
964 {
965 	struct mgmt_pending_cmd *cmd;
966 
967 	/* If there's a pending mgmt command the flag will not yet have
968 	 * it's final value, so check for this first.
969 	 */
970 	cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
971 	if (cmd) {
972 		struct mgmt_mode *cp = cmd->param;
973 
974 		return cp->val;
975 	}
976 
977 	return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
978 }
979 
980 static int service_cache_sync(struct hci_dev *hdev, void *data)
981 {
982 	hci_update_eir_sync(hdev);
983 	hci_update_class_sync(hdev);
984 
985 	return 0;
986 }
987 
988 static void service_cache_off(struct work_struct *work)
989 {
990 	struct hci_dev *hdev = container_of(work, struct hci_dev,
991 					    service_cache.work);
992 
993 	if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
994 		return;
995 
996 	hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL);
997 }
998 
999 static int rpa_expired_sync(struct hci_dev *hdev, void *data)
1000 {
1001 	/* The generation of a new RPA and programming it into the
1002 	 * controller happens in the hci_req_enable_advertising()
1003 	 * function.
1004 	 */
1005 	if (ext_adv_capable(hdev))
1006 		return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance);
1007 	else
1008 		return hci_enable_advertising_sync(hdev);
1009 }
1010 
1011 static void rpa_expired(struct work_struct *work)
1012 {
1013 	struct hci_dev *hdev = container_of(work, struct hci_dev,
1014 					    rpa_expired.work);
1015 
1016 	bt_dev_dbg(hdev, "");
1017 
1018 	hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
1019 
1020 	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
1021 		return;
1022 
1023 	hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL);
1024 }
1025 
1026 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
1027 {
1028 	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
1029 		return;
1030 
1031 	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
1032 	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
1033 
1034 	/* Non-mgmt controlled devices get this bit set
1035 	 * implicitly so that pairing works for them, however
1036 	 * for mgmt we require user-space to explicitly enable
1037 	 * it
1038 	 */
1039 	hci_dev_clear_flag(hdev, HCI_BONDABLE);
1040 }
1041 
1042 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
1043 				void *data, u16 data_len)
1044 {
1045 	struct mgmt_rp_read_info rp;
1046 
1047 	bt_dev_dbg(hdev, "sock %p", sk);
1048 
1049 	hci_dev_lock(hdev);
1050 
1051 	memset(&rp, 0, sizeof(rp));
1052 
1053 	bacpy(&rp.bdaddr, &hdev->bdaddr);
1054 
1055 	rp.version = hdev->hci_ver;
1056 	rp.manufacturer = cpu_to_le16(hdev->manufacturer);
1057 
1058 	rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
1059 	rp.current_settings = cpu_to_le32(get_current_settings(hdev));
1060 
1061 	memcpy(rp.dev_class, hdev->dev_class, 3);
1062 
1063 	memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
1064 	memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
1065 
1066 	hci_dev_unlock(hdev);
1067 
1068 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
1069 				 sizeof(rp));
1070 }
1071 
1072 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1073 {
1074 	u16 eir_len = 0;
1075 	size_t name_len;
1076 
1077 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1078 		eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1079 					  hdev->dev_class, 3);
1080 
1081 	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1082 		eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1083 					  hdev->appearance);
1084 
1085 	name_len = strlen(hdev->dev_name);
1086 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1087 				  hdev->dev_name, name_len);
1088 
1089 	name_len = strlen(hdev->short_name);
1090 	eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1091 				  hdev->short_name, name_len);
1092 
1093 	return eir_len;
1094 }
1095 
1096 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1097 				    void *data, u16 data_len)
1098 {
1099 	char buf[512];
1100 	struct mgmt_rp_read_ext_info *rp = (void *)buf;
1101 	u16 eir_len;
1102 
1103 	bt_dev_dbg(hdev, "sock %p", sk);
1104 
1105 	memset(&buf, 0, sizeof(buf));
1106 
1107 	hci_dev_lock(hdev);
1108 
1109 	bacpy(&rp->bdaddr, &hdev->bdaddr);
1110 
1111 	rp->version = hdev->hci_ver;
1112 	rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1113 
1114 	rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1115 	rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1116 
1117 
1118 	eir_len = append_eir_data_to_buf(hdev, rp->eir);
1119 	rp->eir_len = cpu_to_le16(eir_len);
1120 
1121 	hci_dev_unlock(hdev);
1122 
1123 	/* If this command is called at least once, then the events
1124 	 * for class of device and local name changes are disabled
1125 	 * and only the new extended controller information event
1126 	 * is used.
1127 	 */
1128 	hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1129 	hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1130 	hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1131 
1132 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1133 				 sizeof(*rp) + eir_len);
1134 }
1135 
1136 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1137 {
1138 	char buf[512];
1139 	struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1140 	u16 eir_len;
1141 
1142 	memset(buf, 0, sizeof(buf));
1143 
1144 	eir_len = append_eir_data_to_buf(hdev, ev->eir);
1145 	ev->eir_len = cpu_to_le16(eir_len);
1146 
1147 	return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1148 				  sizeof(*ev) + eir_len,
1149 				  HCI_MGMT_EXT_INFO_EVENTS, skip);
1150 }
1151 
1152 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1153 {
1154 	__le32 settings = cpu_to_le32(get_current_settings(hdev));
1155 
1156 	return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1157 				 sizeof(settings));
1158 }
1159 
1160 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1161 {
1162 	struct mgmt_ev_advertising_added ev;
1163 
1164 	ev.instance = instance;
1165 
1166 	mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1167 }
1168 
1169 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1170 			      u8 instance)
1171 {
1172 	struct mgmt_ev_advertising_removed ev;
1173 
1174 	ev.instance = instance;
1175 
1176 	mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1177 }
1178 
1179 static void cancel_adv_timeout(struct hci_dev *hdev)
1180 {
1181 	if (hdev->adv_instance_timeout) {
1182 		hdev->adv_instance_timeout = 0;
1183 		cancel_delayed_work(&hdev->adv_instance_expire);
1184 	}
1185 }
1186 
1187 /* This function requires the caller holds hdev->lock */
1188 static void restart_le_actions(struct hci_dev *hdev)
1189 {
1190 	struct hci_conn_params *p;
1191 
1192 	list_for_each_entry(p, &hdev->le_conn_params, list) {
1193 		/* Needed for AUTO_OFF case where might not "really"
1194 		 * have been powered off.
1195 		 */
1196 		list_del_init(&p->action);
1197 
1198 		switch (p->auto_connect) {
1199 		case HCI_AUTO_CONN_DIRECT:
1200 		case HCI_AUTO_CONN_ALWAYS:
1201 			list_add(&p->action, &hdev->pend_le_conns);
1202 			break;
1203 		case HCI_AUTO_CONN_REPORT:
1204 			list_add(&p->action, &hdev->pend_le_reports);
1205 			break;
1206 		default:
1207 			break;
1208 		}
1209 	}
1210 }
1211 
1212 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1213 {
1214 	__le32 ev = cpu_to_le32(get_current_settings(hdev));
1215 
1216 	return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1217 				  sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1218 }
1219 
1220 static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err)
1221 {
1222 	struct mgmt_pending_cmd *cmd = data;
1223 	struct mgmt_mode *cp;
1224 
1225 	/* Make sure cmd still outstanding. */
1226 	if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev))
1227 		return;
1228 
1229 	cp = cmd->param;
1230 
1231 	bt_dev_dbg(hdev, "err %d", err);
1232 
1233 	if (!err) {
1234 		if (cp->val) {
1235 			hci_dev_lock(hdev);
1236 			restart_le_actions(hdev);
1237 			hci_update_passive_scan(hdev);
1238 			hci_dev_unlock(hdev);
1239 		}
1240 
1241 		send_settings_rsp(cmd->sk, cmd->opcode, hdev);
1242 
1243 		/* Only call new_setting for power on as power off is deferred
1244 		 * to hdev->power_off work which does call hci_dev_do_close.
1245 		 */
1246 		if (cp->val)
1247 			new_settings(hdev, cmd->sk);
1248 	} else {
1249 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED,
1250 				mgmt_status(err));
1251 	}
1252 
1253 	mgmt_pending_remove(cmd);
1254 }
1255 
1256 static int set_powered_sync(struct hci_dev *hdev, void *data)
1257 {
1258 	struct mgmt_pending_cmd *cmd = data;
1259 	struct mgmt_mode *cp = cmd->param;
1260 
1261 	BT_DBG("%s", hdev->name);
1262 
1263 	return hci_set_powered_sync(hdev, cp->val);
1264 }
1265 
1266 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1267 		       u16 len)
1268 {
1269 	struct mgmt_mode *cp = data;
1270 	struct mgmt_pending_cmd *cmd;
1271 	int err;
1272 
1273 	bt_dev_dbg(hdev, "sock %p", sk);
1274 
1275 	if (cp->val != 0x00 && cp->val != 0x01)
1276 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1277 				       MGMT_STATUS_INVALID_PARAMS);
1278 
1279 	hci_dev_lock(hdev);
1280 
1281 	if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1282 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1283 				      MGMT_STATUS_BUSY);
1284 		goto failed;
1285 	}
1286 
1287 	if (!!cp->val == hdev_is_powered(hdev)) {
1288 		err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1289 		goto failed;
1290 	}
1291 
1292 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1293 	if (!cmd) {
1294 		err = -ENOMEM;
1295 		goto failed;
1296 	}
1297 
1298 	err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd,
1299 				 mgmt_set_powered_complete);
1300 
1301 	if (err < 0)
1302 		mgmt_pending_remove(cmd);
1303 
1304 failed:
1305 	hci_dev_unlock(hdev);
1306 	return err;
1307 }
1308 
1309 int mgmt_new_settings(struct hci_dev *hdev)
1310 {
1311 	return new_settings(hdev, NULL);
1312 }
1313 
1314 struct cmd_lookup {
1315 	struct sock *sk;
1316 	struct hci_dev *hdev;
1317 	u8 mgmt_status;
1318 };
1319 
1320 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1321 {
1322 	struct cmd_lookup *match = data;
1323 
1324 	send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1325 
1326 	list_del(&cmd->list);
1327 
1328 	if (match->sk == NULL) {
1329 		match->sk = cmd->sk;
1330 		sock_hold(match->sk);
1331 	}
1332 
1333 	mgmt_pending_free(cmd);
1334 }
1335 
1336 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1337 {
1338 	u8 *status = data;
1339 
1340 	mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1341 	mgmt_pending_remove(cmd);
1342 }
1343 
1344 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1345 {
1346 	if (cmd->cmd_complete) {
1347 		u8 *status = data;
1348 
1349 		cmd->cmd_complete(cmd, *status);
1350 		mgmt_pending_remove(cmd);
1351 
1352 		return;
1353 	}
1354 
1355 	cmd_status_rsp(cmd, data);
1356 }
1357 
1358 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1359 {
1360 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1361 				 cmd->param, cmd->param_len);
1362 }
1363 
1364 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1365 {
1366 	return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1367 				 cmd->param, sizeof(struct mgmt_addr_info));
1368 }
1369 
1370 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1371 {
1372 	if (!lmp_bredr_capable(hdev))
1373 		return MGMT_STATUS_NOT_SUPPORTED;
1374 	else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1375 		return MGMT_STATUS_REJECTED;
1376 	else
1377 		return MGMT_STATUS_SUCCESS;
1378 }
1379 
1380 static u8 mgmt_le_support(struct hci_dev *hdev)
1381 {
1382 	if (!lmp_le_capable(hdev))
1383 		return MGMT_STATUS_NOT_SUPPORTED;
1384 	else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1385 		return MGMT_STATUS_REJECTED;
1386 	else
1387 		return MGMT_STATUS_SUCCESS;
1388 }
1389 
1390 static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data,
1391 					   int err)
1392 {
1393 	struct mgmt_pending_cmd *cmd = data;
1394 
1395 	bt_dev_dbg(hdev, "err %d", err);
1396 
1397 	/* Make sure cmd still outstanding. */
1398 	if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev))
1399 		return;
1400 
1401 	hci_dev_lock(hdev);
1402 
1403 	if (err) {
1404 		u8 mgmt_err = mgmt_status(err);
1405 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1406 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1407 		goto done;
1408 	}
1409 
1410 	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1411 	    hdev->discov_timeout > 0) {
1412 		int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1413 		queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1414 	}
1415 
1416 	send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1417 	new_settings(hdev, cmd->sk);
1418 
1419 done:
1420 	mgmt_pending_remove(cmd);
1421 	hci_dev_unlock(hdev);
1422 }
1423 
1424 static int set_discoverable_sync(struct hci_dev *hdev, void *data)
1425 {
1426 	BT_DBG("%s", hdev->name);
1427 
1428 	return hci_update_discoverable_sync(hdev);
1429 }
1430 
1431 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1432 			    u16 len)
1433 {
1434 	struct mgmt_cp_set_discoverable *cp = data;
1435 	struct mgmt_pending_cmd *cmd;
1436 	u16 timeout;
1437 	int err;
1438 
1439 	bt_dev_dbg(hdev, "sock %p", sk);
1440 
1441 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1442 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1444 				       MGMT_STATUS_REJECTED);
1445 
1446 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1447 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1448 				       MGMT_STATUS_INVALID_PARAMS);
1449 
1450 	timeout = __le16_to_cpu(cp->timeout);
1451 
1452 	/* Disabling discoverable requires that no timeout is set,
1453 	 * and enabling limited discoverable requires a timeout.
1454 	 */
1455 	if ((cp->val == 0x00 && timeout > 0) ||
1456 	    (cp->val == 0x02 && timeout == 0))
1457 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1458 				       MGMT_STATUS_INVALID_PARAMS);
1459 
1460 	hci_dev_lock(hdev);
1461 
1462 	if (!hdev_is_powered(hdev) && timeout > 0) {
1463 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1464 				      MGMT_STATUS_NOT_POWERED);
1465 		goto failed;
1466 	}
1467 
1468 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1469 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1470 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1471 				      MGMT_STATUS_BUSY);
1472 		goto failed;
1473 	}
1474 
1475 	if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1476 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1477 				      MGMT_STATUS_REJECTED);
1478 		goto failed;
1479 	}
1480 
1481 	if (hdev->advertising_paused) {
1482 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1483 				      MGMT_STATUS_BUSY);
1484 		goto failed;
1485 	}
1486 
1487 	if (!hdev_is_powered(hdev)) {
1488 		bool changed = false;
1489 
1490 		/* Setting limited discoverable when powered off is
1491 		 * not a valid operation since it requires a timeout
1492 		 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1493 		 */
1494 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1495 			hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1496 			changed = true;
1497 		}
1498 
1499 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1500 		if (err < 0)
1501 			goto failed;
1502 
1503 		if (changed)
1504 			err = new_settings(hdev, sk);
1505 
1506 		goto failed;
1507 	}
1508 
1509 	/* If the current mode is the same, then just update the timeout
1510 	 * value with the new value. And if only the timeout gets updated,
1511 	 * then no need for any HCI transactions.
1512 	 */
1513 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1514 	    (cp->val == 0x02) == hci_dev_test_flag(hdev,
1515 						   HCI_LIMITED_DISCOVERABLE)) {
1516 		cancel_delayed_work(&hdev->discov_off);
1517 		hdev->discov_timeout = timeout;
1518 
1519 		if (cp->val && hdev->discov_timeout > 0) {
1520 			int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1521 			queue_delayed_work(hdev->req_workqueue,
1522 					   &hdev->discov_off, to);
1523 		}
1524 
1525 		err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1526 		goto failed;
1527 	}
1528 
1529 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1530 	if (!cmd) {
1531 		err = -ENOMEM;
1532 		goto failed;
1533 	}
1534 
1535 	/* Cancel any potential discoverable timeout that might be
1536 	 * still active and store new timeout value. The arming of
1537 	 * the timeout happens in the complete handler.
1538 	 */
1539 	cancel_delayed_work(&hdev->discov_off);
1540 	hdev->discov_timeout = timeout;
1541 
1542 	if (cp->val)
1543 		hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1544 	else
1545 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1546 
1547 	/* Limited discoverable mode */
1548 	if (cp->val == 0x02)
1549 		hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1550 	else
1551 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1552 
1553 	err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd,
1554 				 mgmt_set_discoverable_complete);
1555 
1556 	if (err < 0)
1557 		mgmt_pending_remove(cmd);
1558 
1559 failed:
1560 	hci_dev_unlock(hdev);
1561 	return err;
1562 }
1563 
1564 static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data,
1565 					  int err)
1566 {
1567 	struct mgmt_pending_cmd *cmd = data;
1568 
1569 	bt_dev_dbg(hdev, "err %d", err);
1570 
1571 	/* Make sure cmd still outstanding. */
1572 	if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev))
1573 		return;
1574 
1575 	hci_dev_lock(hdev);
1576 
1577 	if (err) {
1578 		u8 mgmt_err = mgmt_status(err);
1579 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1580 		goto done;
1581 	}
1582 
1583 	send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1584 	new_settings(hdev, cmd->sk);
1585 
1586 done:
1587 	if (cmd)
1588 		mgmt_pending_remove(cmd);
1589 
1590 	hci_dev_unlock(hdev);
1591 }
1592 
1593 static int set_connectable_update_settings(struct hci_dev *hdev,
1594 					   struct sock *sk, u8 val)
1595 {
1596 	bool changed = false;
1597 	int err;
1598 
1599 	if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1600 		changed = true;
1601 
1602 	if (val) {
1603 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1604 	} else {
1605 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1606 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1607 	}
1608 
1609 	err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1610 	if (err < 0)
1611 		return err;
1612 
1613 	if (changed) {
1614 		hci_req_update_scan(hdev);
1615 		hci_update_passive_scan(hdev);
1616 		return new_settings(hdev, sk);
1617 	}
1618 
1619 	return 0;
1620 }
1621 
1622 static int set_connectable_sync(struct hci_dev *hdev, void *data)
1623 {
1624 	BT_DBG("%s", hdev->name);
1625 
1626 	return hci_update_connectable_sync(hdev);
1627 }
1628 
1629 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1630 			   u16 len)
1631 {
1632 	struct mgmt_mode *cp = data;
1633 	struct mgmt_pending_cmd *cmd;
1634 	int err;
1635 
1636 	bt_dev_dbg(hdev, "sock %p", sk);
1637 
1638 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1639 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1640 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1641 				       MGMT_STATUS_REJECTED);
1642 
1643 	if (cp->val != 0x00 && cp->val != 0x01)
1644 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1645 				       MGMT_STATUS_INVALID_PARAMS);
1646 
1647 	hci_dev_lock(hdev);
1648 
1649 	if (!hdev_is_powered(hdev)) {
1650 		err = set_connectable_update_settings(hdev, sk, cp->val);
1651 		goto failed;
1652 	}
1653 
1654 	if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1655 	    pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1656 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1657 				      MGMT_STATUS_BUSY);
1658 		goto failed;
1659 	}
1660 
1661 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1662 	if (!cmd) {
1663 		err = -ENOMEM;
1664 		goto failed;
1665 	}
1666 
1667 	if (cp->val) {
1668 		hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1669 	} else {
1670 		if (hdev->discov_timeout > 0)
1671 			cancel_delayed_work(&hdev->discov_off);
1672 
1673 		hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1674 		hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1675 		hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1676 	}
1677 
1678 	err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd,
1679 				 mgmt_set_connectable_complete);
1680 
1681 	if (err < 0)
1682 		mgmt_pending_remove(cmd);
1683 
1684 failed:
1685 	hci_dev_unlock(hdev);
1686 	return err;
1687 }
1688 
1689 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1690 			u16 len)
1691 {
1692 	struct mgmt_mode *cp = data;
1693 	bool changed;
1694 	int err;
1695 
1696 	bt_dev_dbg(hdev, "sock %p", sk);
1697 
1698 	if (cp->val != 0x00 && cp->val != 0x01)
1699 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1700 				       MGMT_STATUS_INVALID_PARAMS);
1701 
1702 	hci_dev_lock(hdev);
1703 
1704 	if (cp->val)
1705 		changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1706 	else
1707 		changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1708 
1709 	err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1710 	if (err < 0)
1711 		goto unlock;
1712 
1713 	if (changed) {
1714 		/* In limited privacy mode the change of bondable mode
1715 		 * may affect the local advertising address.
1716 		 */
1717 		hci_update_discoverable(hdev);
1718 
1719 		err = new_settings(hdev, sk);
1720 	}
1721 
1722 unlock:
1723 	hci_dev_unlock(hdev);
1724 	return err;
1725 }
1726 
1727 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1728 			     u16 len)
1729 {
1730 	struct mgmt_mode *cp = data;
1731 	struct mgmt_pending_cmd *cmd;
1732 	u8 val, status;
1733 	int err;
1734 
1735 	bt_dev_dbg(hdev, "sock %p", sk);
1736 
1737 	status = mgmt_bredr_support(hdev);
1738 	if (status)
1739 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1740 				       status);
1741 
1742 	if (cp->val != 0x00 && cp->val != 0x01)
1743 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1744 				       MGMT_STATUS_INVALID_PARAMS);
1745 
1746 	hci_dev_lock(hdev);
1747 
1748 	if (!hdev_is_powered(hdev)) {
1749 		bool changed = false;
1750 
1751 		if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1752 			hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1753 			changed = true;
1754 		}
1755 
1756 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1757 		if (err < 0)
1758 			goto failed;
1759 
1760 		if (changed)
1761 			err = new_settings(hdev, sk);
1762 
1763 		goto failed;
1764 	}
1765 
1766 	if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1767 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1768 				      MGMT_STATUS_BUSY);
1769 		goto failed;
1770 	}
1771 
1772 	val = !!cp->val;
1773 
1774 	if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1775 		err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1776 		goto failed;
1777 	}
1778 
1779 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1780 	if (!cmd) {
1781 		err = -ENOMEM;
1782 		goto failed;
1783 	}
1784 
1785 	err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1786 	if (err < 0) {
1787 		mgmt_pending_remove(cmd);
1788 		goto failed;
1789 	}
1790 
1791 failed:
1792 	hci_dev_unlock(hdev);
1793 	return err;
1794 }
1795 
1796 static void set_ssp_complete(struct hci_dev *hdev, void *data, int err)
1797 {
1798 	struct cmd_lookup match = { NULL, hdev };
1799 	struct mgmt_pending_cmd *cmd = data;
1800 	struct mgmt_mode *cp = cmd->param;
1801 	u8 enable = cp->val;
1802 	bool changed;
1803 
1804 	/* Make sure cmd still outstanding. */
1805 	if (cmd != pending_find(MGMT_OP_SET_SSP, hdev))
1806 		return;
1807 
1808 	if (err) {
1809 		u8 mgmt_err = mgmt_status(err);
1810 
1811 		if (enable && hci_dev_test_and_clear_flag(hdev,
1812 							  HCI_SSP_ENABLED)) {
1813 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1814 			new_settings(hdev, NULL);
1815 		}
1816 
1817 		mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
1818 				     &mgmt_err);
1819 		return;
1820 	}
1821 
1822 	if (enable) {
1823 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1824 	} else {
1825 		changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
1826 
1827 		if (!changed)
1828 			changed = hci_dev_test_and_clear_flag(hdev,
1829 							      HCI_HS_ENABLED);
1830 		else
1831 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1832 	}
1833 
1834 	mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
1835 
1836 	if (changed)
1837 		new_settings(hdev, match.sk);
1838 
1839 	if (match.sk)
1840 		sock_put(match.sk);
1841 
1842 	hci_update_eir_sync(hdev);
1843 }
1844 
1845 static int set_ssp_sync(struct hci_dev *hdev, void *data)
1846 {
1847 	struct mgmt_pending_cmd *cmd = data;
1848 	struct mgmt_mode *cp = cmd->param;
1849 	bool changed = false;
1850 	int err;
1851 
1852 	if (cp->val)
1853 		changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
1854 
1855 	err = hci_write_ssp_mode_sync(hdev, cp->val);
1856 
1857 	if (!err && changed)
1858 		hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
1859 
1860 	return err;
1861 }
1862 
1863 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1864 {
1865 	struct mgmt_mode *cp = data;
1866 	struct mgmt_pending_cmd *cmd;
1867 	u8 status;
1868 	int err;
1869 
1870 	bt_dev_dbg(hdev, "sock %p", sk);
1871 
1872 	status = mgmt_bredr_support(hdev);
1873 	if (status)
1874 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1875 
1876 	if (!lmp_ssp_capable(hdev))
1877 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1878 				       MGMT_STATUS_NOT_SUPPORTED);
1879 
1880 	if (cp->val != 0x00 && cp->val != 0x01)
1881 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1882 				       MGMT_STATUS_INVALID_PARAMS);
1883 
1884 	hci_dev_lock(hdev);
1885 
1886 	if (!hdev_is_powered(hdev)) {
1887 		bool changed;
1888 
1889 		if (cp->val) {
1890 			changed = !hci_dev_test_and_set_flag(hdev,
1891 							     HCI_SSP_ENABLED);
1892 		} else {
1893 			changed = hci_dev_test_and_clear_flag(hdev,
1894 							      HCI_SSP_ENABLED);
1895 			if (!changed)
1896 				changed = hci_dev_test_and_clear_flag(hdev,
1897 								      HCI_HS_ENABLED);
1898 			else
1899 				hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1900 		}
1901 
1902 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1903 		if (err < 0)
1904 			goto failed;
1905 
1906 		if (changed)
1907 			err = new_settings(hdev, sk);
1908 
1909 		goto failed;
1910 	}
1911 
1912 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1913 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1914 				      MGMT_STATUS_BUSY);
1915 		goto failed;
1916 	}
1917 
1918 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1919 		err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1920 		goto failed;
1921 	}
1922 
1923 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1924 	if (!cmd)
1925 		err = -ENOMEM;
1926 	else
1927 		err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd,
1928 					 set_ssp_complete);
1929 
1930 	if (err < 0) {
1931 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1932 				      MGMT_STATUS_FAILED);
1933 
1934 		if (cmd)
1935 			mgmt_pending_remove(cmd);
1936 	}
1937 
1938 failed:
1939 	hci_dev_unlock(hdev);
1940 	return err;
1941 }
1942 
1943 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1944 {
1945 	struct mgmt_mode *cp = data;
1946 	bool changed;
1947 	u8 status;
1948 	int err;
1949 
1950 	bt_dev_dbg(hdev, "sock %p", sk);
1951 
1952 	if (!IS_ENABLED(CONFIG_BT_HS))
1953 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1954 				       MGMT_STATUS_NOT_SUPPORTED);
1955 
1956 	status = mgmt_bredr_support(hdev);
1957 	if (status)
1958 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1959 
1960 	if (!lmp_ssp_capable(hdev))
1961 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1962 				       MGMT_STATUS_NOT_SUPPORTED);
1963 
1964 	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1965 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1966 				       MGMT_STATUS_REJECTED);
1967 
1968 	if (cp->val != 0x00 && cp->val != 0x01)
1969 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1970 				       MGMT_STATUS_INVALID_PARAMS);
1971 
1972 	hci_dev_lock(hdev);
1973 
1974 	if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1975 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1976 				      MGMT_STATUS_BUSY);
1977 		goto unlock;
1978 	}
1979 
1980 	if (cp->val) {
1981 		changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1982 	} else {
1983 		if (hdev_is_powered(hdev)) {
1984 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1985 					      MGMT_STATUS_REJECTED);
1986 			goto unlock;
1987 		}
1988 
1989 		changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1990 	}
1991 
1992 	err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1993 	if (err < 0)
1994 		goto unlock;
1995 
1996 	if (changed)
1997 		err = new_settings(hdev, sk);
1998 
1999 unlock:
2000 	hci_dev_unlock(hdev);
2001 	return err;
2002 }
2003 
2004 static void set_le_complete(struct hci_dev *hdev, void *data, int err)
2005 {
2006 	struct cmd_lookup match = { NULL, hdev };
2007 	u8 status = mgmt_status(err);
2008 
2009 	bt_dev_dbg(hdev, "err %d", err);
2010 
2011 	if (status) {
2012 		mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
2013 							&status);
2014 		return;
2015 	}
2016 
2017 	mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
2018 
2019 	new_settings(hdev, match.sk);
2020 
2021 	if (match.sk)
2022 		sock_put(match.sk);
2023 }
2024 
2025 static int set_le_sync(struct hci_dev *hdev, void *data)
2026 {
2027 	struct mgmt_pending_cmd *cmd = data;
2028 	struct mgmt_mode *cp = cmd->param;
2029 	u8 val = !!cp->val;
2030 	int err;
2031 
2032 	if (!val) {
2033 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
2034 			hci_disable_advertising_sync(hdev);
2035 
2036 		if (ext_adv_capable(hdev))
2037 			hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk);
2038 	} else {
2039 		hci_dev_set_flag(hdev, HCI_LE_ENABLED);
2040 	}
2041 
2042 	err = hci_write_le_host_supported_sync(hdev, val, 0);
2043 
2044 	/* Make sure the controller has a good default for
2045 	 * advertising data. Restrict the update to when LE
2046 	 * has actually been enabled. During power on, the
2047 	 * update in powered_update_hci will take care of it.
2048 	 */
2049 	if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2050 		if (ext_adv_capable(hdev)) {
2051 			int status;
2052 
2053 			status = hci_setup_ext_adv_instance_sync(hdev, 0x00);
2054 			if (!status)
2055 				hci_update_scan_rsp_data_sync(hdev, 0x00);
2056 		} else {
2057 			hci_update_adv_data_sync(hdev, 0x00);
2058 			hci_update_scan_rsp_data_sync(hdev, 0x00);
2059 		}
2060 
2061 		hci_update_passive_scan(hdev);
2062 	}
2063 
2064 	return err;
2065 }
2066 
2067 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2068 {
2069 	struct mgmt_mode *cp = data;
2070 	struct mgmt_pending_cmd *cmd;
2071 	int err;
2072 	u8 val, enabled;
2073 
2074 	bt_dev_dbg(hdev, "sock %p", sk);
2075 
2076 	if (!lmp_le_capable(hdev))
2077 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2078 				       MGMT_STATUS_NOT_SUPPORTED);
2079 
2080 	if (cp->val != 0x00 && cp->val != 0x01)
2081 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2082 				       MGMT_STATUS_INVALID_PARAMS);
2083 
2084 	/* Bluetooth single mode LE only controllers or dual-mode
2085 	 * controllers configured as LE only devices, do not allow
2086 	 * switching LE off. These have either LE enabled explicitly
2087 	 * or BR/EDR has been previously switched off.
2088 	 *
2089 	 * When trying to enable an already enabled LE, then gracefully
2090 	 * send a positive response. Trying to disable it however will
2091 	 * result into rejection.
2092 	 */
2093 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
2094 		if (cp->val == 0x01)
2095 			return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2096 
2097 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2098 				       MGMT_STATUS_REJECTED);
2099 	}
2100 
2101 	hci_dev_lock(hdev);
2102 
2103 	val = !!cp->val;
2104 	enabled = lmp_host_le_capable(hdev);
2105 
2106 	if (!val)
2107 		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
2108 
2109 	if (!hdev_is_powered(hdev) || val == enabled) {
2110 		bool changed = false;
2111 
2112 		if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
2113 			hci_dev_change_flag(hdev, HCI_LE_ENABLED);
2114 			changed = true;
2115 		}
2116 
2117 		if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
2118 			hci_dev_clear_flag(hdev, HCI_ADVERTISING);
2119 			changed = true;
2120 		}
2121 
2122 		err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
2123 		if (err < 0)
2124 			goto unlock;
2125 
2126 		if (changed)
2127 			err = new_settings(hdev, sk);
2128 
2129 		goto unlock;
2130 	}
2131 
2132 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
2133 	    pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
2134 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2135 				      MGMT_STATUS_BUSY);
2136 		goto unlock;
2137 	}
2138 
2139 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
2140 	if (!cmd)
2141 		err = -ENOMEM;
2142 	else
2143 		err = hci_cmd_sync_queue(hdev, set_le_sync, cmd,
2144 					 set_le_complete);
2145 
2146 	if (err < 0) {
2147 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
2148 				      MGMT_STATUS_FAILED);
2149 
2150 		if (cmd)
2151 			mgmt_pending_remove(cmd);
2152 	}
2153 
2154 unlock:
2155 	hci_dev_unlock(hdev);
2156 	return err;
2157 }
2158 
2159 /* This is a helper function to test for pending mgmt commands that can
2160  * cause CoD or EIR HCI commands. We can only allow one such pending
2161  * mgmt command at a time since otherwise we cannot easily track what
2162  * the current values are, will be, and based on that calculate if a new
2163  * HCI command needs to be sent and if yes with what value.
2164  */
2165 static bool pending_eir_or_class(struct hci_dev *hdev)
2166 {
2167 	struct mgmt_pending_cmd *cmd;
2168 
2169 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2170 		switch (cmd->opcode) {
2171 		case MGMT_OP_ADD_UUID:
2172 		case MGMT_OP_REMOVE_UUID:
2173 		case MGMT_OP_SET_DEV_CLASS:
2174 		case MGMT_OP_SET_POWERED:
2175 			return true;
2176 		}
2177 	}
2178 
2179 	return false;
2180 }
2181 
2182 static const u8 bluetooth_base_uuid[] = {
2183 			0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2184 			0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2185 };
2186 
2187 static u8 get_uuid_size(const u8 *uuid)
2188 {
2189 	u32 val;
2190 
2191 	if (memcmp(uuid, bluetooth_base_uuid, 12))
2192 		return 128;
2193 
2194 	val = get_unaligned_le32(&uuid[12]);
2195 	if (val > 0xffff)
2196 		return 32;
2197 
2198 	return 16;
2199 }
2200 
2201 static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err)
2202 {
2203 	struct mgmt_pending_cmd *cmd = data;
2204 
2205 	bt_dev_dbg(hdev, "err %d", err);
2206 
2207 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2208 			  mgmt_status(err), hdev->dev_class, 3);
2209 
2210 	mgmt_pending_free(cmd);
2211 }
2212 
2213 static int add_uuid_sync(struct hci_dev *hdev, void *data)
2214 {
2215 	int err;
2216 
2217 	err = hci_update_class_sync(hdev);
2218 	if (err)
2219 		return err;
2220 
2221 	return hci_update_eir_sync(hdev);
2222 }
2223 
2224 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2225 {
2226 	struct mgmt_cp_add_uuid *cp = data;
2227 	struct mgmt_pending_cmd *cmd;
2228 	struct bt_uuid *uuid;
2229 	int err;
2230 
2231 	bt_dev_dbg(hdev, "sock %p", sk);
2232 
2233 	hci_dev_lock(hdev);
2234 
2235 	if (pending_eir_or_class(hdev)) {
2236 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2237 				      MGMT_STATUS_BUSY);
2238 		goto failed;
2239 	}
2240 
2241 	uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2242 	if (!uuid) {
2243 		err = -ENOMEM;
2244 		goto failed;
2245 	}
2246 
2247 	memcpy(uuid->uuid, cp->uuid, 16);
2248 	uuid->svc_hint = cp->svc_hint;
2249 	uuid->size = get_uuid_size(cp->uuid);
2250 
2251 	list_add_tail(&uuid->list, &hdev->uuids);
2252 
2253 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2254 	if (!cmd) {
2255 		err = -ENOMEM;
2256 		goto failed;
2257 	}
2258 
2259 	err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete);
2260 	if (err < 0) {
2261 		mgmt_pending_free(cmd);
2262 		goto failed;
2263 	}
2264 
2265 failed:
2266 	hci_dev_unlock(hdev);
2267 	return err;
2268 }
2269 
2270 static bool enable_service_cache(struct hci_dev *hdev)
2271 {
2272 	if (!hdev_is_powered(hdev))
2273 		return false;
2274 
2275 	if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2276 		queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2277 				   CACHE_TIMEOUT);
2278 		return true;
2279 	}
2280 
2281 	return false;
2282 }
2283 
2284 static int remove_uuid_sync(struct hci_dev *hdev, void *data)
2285 {
2286 	int err;
2287 
2288 	err = hci_update_class_sync(hdev);
2289 	if (err)
2290 		return err;
2291 
2292 	return hci_update_eir_sync(hdev);
2293 }
2294 
2295 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2296 		       u16 len)
2297 {
2298 	struct mgmt_cp_remove_uuid *cp = data;
2299 	struct mgmt_pending_cmd *cmd;
2300 	struct bt_uuid *match, *tmp;
2301 	static const u8 bt_uuid_any[] = {
2302 		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
2303 	};
2304 	int err, found;
2305 
2306 	bt_dev_dbg(hdev, "sock %p", sk);
2307 
2308 	hci_dev_lock(hdev);
2309 
2310 	if (pending_eir_or_class(hdev)) {
2311 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2312 				      MGMT_STATUS_BUSY);
2313 		goto unlock;
2314 	}
2315 
2316 	if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2317 		hci_uuids_clear(hdev);
2318 
2319 		if (enable_service_cache(hdev)) {
2320 			err = mgmt_cmd_complete(sk, hdev->id,
2321 						MGMT_OP_REMOVE_UUID,
2322 						0, hdev->dev_class, 3);
2323 			goto unlock;
2324 		}
2325 
2326 		goto update_class;
2327 	}
2328 
2329 	found = 0;
2330 
2331 	list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2332 		if (memcmp(match->uuid, cp->uuid, 16) != 0)
2333 			continue;
2334 
2335 		list_del(&match->list);
2336 		kfree(match);
2337 		found++;
2338 	}
2339 
2340 	if (found == 0) {
2341 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2342 				      MGMT_STATUS_INVALID_PARAMS);
2343 		goto unlock;
2344 	}
2345 
2346 update_class:
2347 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2348 	if (!cmd) {
2349 		err = -ENOMEM;
2350 		goto unlock;
2351 	}
2352 
2353 	err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd,
2354 				 mgmt_class_complete);
2355 	if (err < 0)
2356 		mgmt_pending_free(cmd);
2357 
2358 unlock:
2359 	hci_dev_unlock(hdev);
2360 	return err;
2361 }
2362 
2363 static int set_class_sync(struct hci_dev *hdev, void *data)
2364 {
2365 	int err = 0;
2366 
2367 	if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2368 		cancel_delayed_work_sync(&hdev->service_cache);
2369 		err = hci_update_eir_sync(hdev);
2370 	}
2371 
2372 	if (err)
2373 		return err;
2374 
2375 	return hci_update_class_sync(hdev);
2376 }
2377 
2378 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2379 			 u16 len)
2380 {
2381 	struct mgmt_cp_set_dev_class *cp = data;
2382 	struct mgmt_pending_cmd *cmd;
2383 	int err;
2384 
2385 	bt_dev_dbg(hdev, "sock %p", sk);
2386 
2387 	if (!lmp_bredr_capable(hdev))
2388 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2389 				       MGMT_STATUS_NOT_SUPPORTED);
2390 
2391 	hci_dev_lock(hdev);
2392 
2393 	if (pending_eir_or_class(hdev)) {
2394 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2395 				      MGMT_STATUS_BUSY);
2396 		goto unlock;
2397 	}
2398 
2399 	if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2400 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2401 				      MGMT_STATUS_INVALID_PARAMS);
2402 		goto unlock;
2403 	}
2404 
2405 	hdev->major_class = cp->major;
2406 	hdev->minor_class = cp->minor;
2407 
2408 	if (!hdev_is_powered(hdev)) {
2409 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2410 					hdev->dev_class, 3);
2411 		goto unlock;
2412 	}
2413 
2414 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2415 	if (!cmd) {
2416 		err = -ENOMEM;
2417 		goto unlock;
2418 	}
2419 
2420 	err = hci_cmd_sync_queue(hdev, set_class_sync, cmd,
2421 				 mgmt_class_complete);
2422 	if (err < 0)
2423 		mgmt_pending_free(cmd);
2424 
2425 unlock:
2426 	hci_dev_unlock(hdev);
2427 	return err;
2428 }
2429 
2430 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2431 			  u16 len)
2432 {
2433 	struct mgmt_cp_load_link_keys *cp = data;
2434 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2435 				   sizeof(struct mgmt_link_key_info));
2436 	u16 key_count, expected_len;
2437 	bool changed;
2438 	int i;
2439 
2440 	bt_dev_dbg(hdev, "sock %p", sk);
2441 
2442 	if (!lmp_bredr_capable(hdev))
2443 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2444 				       MGMT_STATUS_NOT_SUPPORTED);
2445 
2446 	key_count = __le16_to_cpu(cp->key_count);
2447 	if (key_count > max_key_count) {
2448 		bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2449 			   key_count);
2450 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2451 				       MGMT_STATUS_INVALID_PARAMS);
2452 	}
2453 
2454 	expected_len = struct_size(cp, keys, key_count);
2455 	if (expected_len != len) {
2456 		bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2457 			   expected_len, len);
2458 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2459 				       MGMT_STATUS_INVALID_PARAMS);
2460 	}
2461 
2462 	if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2463 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2464 				       MGMT_STATUS_INVALID_PARAMS);
2465 
2466 	bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys,
2467 		   key_count);
2468 
2469 	for (i = 0; i < key_count; i++) {
2470 		struct mgmt_link_key_info *key = &cp->keys[i];
2471 
2472 		if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2473 			return mgmt_cmd_status(sk, hdev->id,
2474 					       MGMT_OP_LOAD_LINK_KEYS,
2475 					       MGMT_STATUS_INVALID_PARAMS);
2476 	}
2477 
2478 	hci_dev_lock(hdev);
2479 
2480 	hci_link_keys_clear(hdev);
2481 
2482 	if (cp->debug_keys)
2483 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2484 	else
2485 		changed = hci_dev_test_and_clear_flag(hdev,
2486 						      HCI_KEEP_DEBUG_KEYS);
2487 
2488 	if (changed)
2489 		new_settings(hdev, NULL);
2490 
2491 	for (i = 0; i < key_count; i++) {
2492 		struct mgmt_link_key_info *key = &cp->keys[i];
2493 
2494 		if (hci_is_blocked_key(hdev,
2495 				       HCI_BLOCKED_KEY_TYPE_LINKKEY,
2496 				       key->val)) {
2497 			bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2498 				    &key->addr.bdaddr);
2499 			continue;
2500 		}
2501 
2502 		/* Always ignore debug keys and require a new pairing if
2503 		 * the user wants to use them.
2504 		 */
2505 		if (key->type == HCI_LK_DEBUG_COMBINATION)
2506 			continue;
2507 
2508 		hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2509 				 key->type, key->pin_len, NULL);
2510 	}
2511 
2512 	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2513 
2514 	hci_dev_unlock(hdev);
2515 
2516 	return 0;
2517 }
2518 
2519 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2520 			   u8 addr_type, struct sock *skip_sk)
2521 {
2522 	struct mgmt_ev_device_unpaired ev;
2523 
2524 	bacpy(&ev.addr.bdaddr, bdaddr);
2525 	ev.addr.type = addr_type;
2526 
2527 	return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2528 			  skip_sk);
2529 }
2530 
2531 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2532 			 u16 len)
2533 {
2534 	struct mgmt_cp_unpair_device *cp = data;
2535 	struct mgmt_rp_unpair_device rp;
2536 	struct hci_conn_params *params;
2537 	struct mgmt_pending_cmd *cmd;
2538 	struct hci_conn *conn;
2539 	u8 addr_type;
2540 	int err;
2541 
2542 	memset(&rp, 0, sizeof(rp));
2543 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2544 	rp.addr.type = cp->addr.type;
2545 
2546 	if (!bdaddr_type_is_valid(cp->addr.type))
2547 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2548 					 MGMT_STATUS_INVALID_PARAMS,
2549 					 &rp, sizeof(rp));
2550 
2551 	if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2552 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2553 					 MGMT_STATUS_INVALID_PARAMS,
2554 					 &rp, sizeof(rp));
2555 
2556 	hci_dev_lock(hdev);
2557 
2558 	if (!hdev_is_powered(hdev)) {
2559 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2560 					MGMT_STATUS_NOT_POWERED, &rp,
2561 					sizeof(rp));
2562 		goto unlock;
2563 	}
2564 
2565 	if (cp->addr.type == BDADDR_BREDR) {
2566 		/* If disconnection is requested, then look up the
2567 		 * connection. If the remote device is connected, it
2568 		 * will be later used to terminate the link.
2569 		 *
2570 		 * Setting it to NULL explicitly will cause no
2571 		 * termination of the link.
2572 		 */
2573 		if (cp->disconnect)
2574 			conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2575 						       &cp->addr.bdaddr);
2576 		else
2577 			conn = NULL;
2578 
2579 		err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2580 		if (err < 0) {
2581 			err = mgmt_cmd_complete(sk, hdev->id,
2582 						MGMT_OP_UNPAIR_DEVICE,
2583 						MGMT_STATUS_NOT_PAIRED, &rp,
2584 						sizeof(rp));
2585 			goto unlock;
2586 		}
2587 
2588 		goto done;
2589 	}
2590 
2591 	/* LE address type */
2592 	addr_type = le_addr_type(cp->addr.type);
2593 
2594 	/* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2595 	err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2596 	if (err < 0) {
2597 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2598 					MGMT_STATUS_NOT_PAIRED, &rp,
2599 					sizeof(rp));
2600 		goto unlock;
2601 	}
2602 
2603 	conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2604 	if (!conn) {
2605 		hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2606 		goto done;
2607 	}
2608 
2609 
2610 	/* Defer clearing up the connection parameters until closing to
2611 	 * give a chance of keeping them if a repairing happens.
2612 	 */
2613 	set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2614 
2615 	/* Disable auto-connection parameters if present */
2616 	params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2617 	if (params) {
2618 		if (params->explicit_connect)
2619 			params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2620 		else
2621 			params->auto_connect = HCI_AUTO_CONN_DISABLED;
2622 	}
2623 
2624 	/* If disconnection is not requested, then clear the connection
2625 	 * variable so that the link is not terminated.
2626 	 */
2627 	if (!cp->disconnect)
2628 		conn = NULL;
2629 
2630 done:
2631 	/* If the connection variable is set, then termination of the
2632 	 * link is requested.
2633 	 */
2634 	if (!conn) {
2635 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2636 					&rp, sizeof(rp));
2637 		device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2638 		goto unlock;
2639 	}
2640 
2641 	cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2642 			       sizeof(*cp));
2643 	if (!cmd) {
2644 		err = -ENOMEM;
2645 		goto unlock;
2646 	}
2647 
2648 	cmd->cmd_complete = addr_cmd_complete;
2649 
2650 	err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2651 	if (err < 0)
2652 		mgmt_pending_remove(cmd);
2653 
2654 unlock:
2655 	hci_dev_unlock(hdev);
2656 	return err;
2657 }
2658 
2659 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2660 		      u16 len)
2661 {
2662 	struct mgmt_cp_disconnect *cp = data;
2663 	struct mgmt_rp_disconnect rp;
2664 	struct mgmt_pending_cmd *cmd;
2665 	struct hci_conn *conn;
2666 	int err;
2667 
2668 	bt_dev_dbg(hdev, "sock %p", sk);
2669 
2670 	memset(&rp, 0, sizeof(rp));
2671 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2672 	rp.addr.type = cp->addr.type;
2673 
2674 	if (!bdaddr_type_is_valid(cp->addr.type))
2675 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2676 					 MGMT_STATUS_INVALID_PARAMS,
2677 					 &rp, sizeof(rp));
2678 
2679 	hci_dev_lock(hdev);
2680 
2681 	if (!test_bit(HCI_UP, &hdev->flags)) {
2682 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2683 					MGMT_STATUS_NOT_POWERED, &rp,
2684 					sizeof(rp));
2685 		goto failed;
2686 	}
2687 
2688 	if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2689 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2690 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
2691 		goto failed;
2692 	}
2693 
2694 	if (cp->addr.type == BDADDR_BREDR)
2695 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2696 					       &cp->addr.bdaddr);
2697 	else
2698 		conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2699 					       le_addr_type(cp->addr.type));
2700 
2701 	if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2702 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2703 					MGMT_STATUS_NOT_CONNECTED, &rp,
2704 					sizeof(rp));
2705 		goto failed;
2706 	}
2707 
2708 	cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2709 	if (!cmd) {
2710 		err = -ENOMEM;
2711 		goto failed;
2712 	}
2713 
2714 	cmd->cmd_complete = generic_cmd_complete;
2715 
2716 	err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2717 	if (err < 0)
2718 		mgmt_pending_remove(cmd);
2719 
2720 failed:
2721 	hci_dev_unlock(hdev);
2722 	return err;
2723 }
2724 
2725 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2726 {
2727 	switch (link_type) {
2728 	case LE_LINK:
2729 		switch (addr_type) {
2730 		case ADDR_LE_DEV_PUBLIC:
2731 			return BDADDR_LE_PUBLIC;
2732 
2733 		default:
2734 			/* Fallback to LE Random address type */
2735 			return BDADDR_LE_RANDOM;
2736 		}
2737 
2738 	default:
2739 		/* Fallback to BR/EDR type */
2740 		return BDADDR_BREDR;
2741 	}
2742 }
2743 
2744 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2745 			   u16 data_len)
2746 {
2747 	struct mgmt_rp_get_connections *rp;
2748 	struct hci_conn *c;
2749 	int err;
2750 	u16 i;
2751 
2752 	bt_dev_dbg(hdev, "sock %p", sk);
2753 
2754 	hci_dev_lock(hdev);
2755 
2756 	if (!hdev_is_powered(hdev)) {
2757 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2758 				      MGMT_STATUS_NOT_POWERED);
2759 		goto unlock;
2760 	}
2761 
2762 	i = 0;
2763 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2764 		if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2765 			i++;
2766 	}
2767 
2768 	rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2769 	if (!rp) {
2770 		err = -ENOMEM;
2771 		goto unlock;
2772 	}
2773 
2774 	i = 0;
2775 	list_for_each_entry(c, &hdev->conn_hash.list, list) {
2776 		if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2777 			continue;
2778 		bacpy(&rp->addr[i].bdaddr, &c->dst);
2779 		rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2780 		if (c->type == SCO_LINK || c->type == ESCO_LINK)
2781 			continue;
2782 		i++;
2783 	}
2784 
2785 	rp->conn_count = cpu_to_le16(i);
2786 
2787 	/* Recalculate length in case of filtered SCO connections, etc */
2788 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2789 				struct_size(rp, addr, i));
2790 
2791 	kfree(rp);
2792 
2793 unlock:
2794 	hci_dev_unlock(hdev);
2795 	return err;
2796 }
2797 
2798 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2799 				   struct mgmt_cp_pin_code_neg_reply *cp)
2800 {
2801 	struct mgmt_pending_cmd *cmd;
2802 	int err;
2803 
2804 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2805 			       sizeof(*cp));
2806 	if (!cmd)
2807 		return -ENOMEM;
2808 
2809 	cmd->cmd_complete = addr_cmd_complete;
2810 
2811 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2812 			   sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2813 	if (err < 0)
2814 		mgmt_pending_remove(cmd);
2815 
2816 	return err;
2817 }
2818 
2819 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2820 			  u16 len)
2821 {
2822 	struct hci_conn *conn;
2823 	struct mgmt_cp_pin_code_reply *cp = data;
2824 	struct hci_cp_pin_code_reply reply;
2825 	struct mgmt_pending_cmd *cmd;
2826 	int err;
2827 
2828 	bt_dev_dbg(hdev, "sock %p", sk);
2829 
2830 	hci_dev_lock(hdev);
2831 
2832 	if (!hdev_is_powered(hdev)) {
2833 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2834 				      MGMT_STATUS_NOT_POWERED);
2835 		goto failed;
2836 	}
2837 
2838 	conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2839 	if (!conn) {
2840 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2841 				      MGMT_STATUS_NOT_CONNECTED);
2842 		goto failed;
2843 	}
2844 
2845 	if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2846 		struct mgmt_cp_pin_code_neg_reply ncp;
2847 
2848 		memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2849 
2850 		bt_dev_err(hdev, "PIN code is not 16 bytes long");
2851 
2852 		err = send_pin_code_neg_reply(sk, hdev, &ncp);
2853 		if (err >= 0)
2854 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2855 					      MGMT_STATUS_INVALID_PARAMS);
2856 
2857 		goto failed;
2858 	}
2859 
2860 	cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2861 	if (!cmd) {
2862 		err = -ENOMEM;
2863 		goto failed;
2864 	}
2865 
2866 	cmd->cmd_complete = addr_cmd_complete;
2867 
2868 	bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2869 	reply.pin_len = cp->pin_len;
2870 	memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2871 
2872 	err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2873 	if (err < 0)
2874 		mgmt_pending_remove(cmd);
2875 
2876 failed:
2877 	hci_dev_unlock(hdev);
2878 	return err;
2879 }
2880 
2881 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2882 			     u16 len)
2883 {
2884 	struct mgmt_cp_set_io_capability *cp = data;
2885 
2886 	bt_dev_dbg(hdev, "sock %p", sk);
2887 
2888 	if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2889 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2890 				       MGMT_STATUS_INVALID_PARAMS);
2891 
2892 	hci_dev_lock(hdev);
2893 
2894 	hdev->io_capability = cp->io_capability;
2895 
2896 	bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability);
2897 
2898 	hci_dev_unlock(hdev);
2899 
2900 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2901 				 NULL, 0);
2902 }
2903 
2904 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2905 {
2906 	struct hci_dev *hdev = conn->hdev;
2907 	struct mgmt_pending_cmd *cmd;
2908 
2909 	list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2910 		if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2911 			continue;
2912 
2913 		if (cmd->user_data != conn)
2914 			continue;
2915 
2916 		return cmd;
2917 	}
2918 
2919 	return NULL;
2920 }
2921 
2922 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2923 {
2924 	struct mgmt_rp_pair_device rp;
2925 	struct hci_conn *conn = cmd->user_data;
2926 	int err;
2927 
2928 	bacpy(&rp.addr.bdaddr, &conn->dst);
2929 	rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2930 
2931 	err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2932 				status, &rp, sizeof(rp));
2933 
2934 	/* So we don't get further callbacks for this connection */
2935 	conn->connect_cfm_cb = NULL;
2936 	conn->security_cfm_cb = NULL;
2937 	conn->disconn_cfm_cb = NULL;
2938 
2939 	hci_conn_drop(conn);
2940 
2941 	/* The device is paired so there is no need to remove
2942 	 * its connection parameters anymore.
2943 	 */
2944 	clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2945 
2946 	hci_conn_put(conn);
2947 
2948 	return err;
2949 }
2950 
2951 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2952 {
2953 	u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2954 	struct mgmt_pending_cmd *cmd;
2955 
2956 	cmd = find_pairing(conn);
2957 	if (cmd) {
2958 		cmd->cmd_complete(cmd, status);
2959 		mgmt_pending_remove(cmd);
2960 	}
2961 }
2962 
2963 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2964 {
2965 	struct mgmt_pending_cmd *cmd;
2966 
2967 	BT_DBG("status %u", status);
2968 
2969 	cmd = find_pairing(conn);
2970 	if (!cmd) {
2971 		BT_DBG("Unable to find a pending command");
2972 		return;
2973 	}
2974 
2975 	cmd->cmd_complete(cmd, mgmt_status(status));
2976 	mgmt_pending_remove(cmd);
2977 }
2978 
2979 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2980 {
2981 	struct mgmt_pending_cmd *cmd;
2982 
2983 	BT_DBG("status %u", status);
2984 
2985 	if (!status)
2986 		return;
2987 
2988 	cmd = find_pairing(conn);
2989 	if (!cmd) {
2990 		BT_DBG("Unable to find a pending command");
2991 		return;
2992 	}
2993 
2994 	cmd->cmd_complete(cmd, mgmt_status(status));
2995 	mgmt_pending_remove(cmd);
2996 }
2997 
2998 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2999 		       u16 len)
3000 {
3001 	struct mgmt_cp_pair_device *cp = data;
3002 	struct mgmt_rp_pair_device rp;
3003 	struct mgmt_pending_cmd *cmd;
3004 	u8 sec_level, auth_type;
3005 	struct hci_conn *conn;
3006 	int err;
3007 
3008 	bt_dev_dbg(hdev, "sock %p", sk);
3009 
3010 	memset(&rp, 0, sizeof(rp));
3011 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
3012 	rp.addr.type = cp->addr.type;
3013 
3014 	if (!bdaddr_type_is_valid(cp->addr.type))
3015 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3016 					 MGMT_STATUS_INVALID_PARAMS,
3017 					 &rp, sizeof(rp));
3018 
3019 	if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
3020 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3021 					 MGMT_STATUS_INVALID_PARAMS,
3022 					 &rp, sizeof(rp));
3023 
3024 	hci_dev_lock(hdev);
3025 
3026 	if (!hdev_is_powered(hdev)) {
3027 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3028 					MGMT_STATUS_NOT_POWERED, &rp,
3029 					sizeof(rp));
3030 		goto unlock;
3031 	}
3032 
3033 	if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
3034 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3035 					MGMT_STATUS_ALREADY_PAIRED, &rp,
3036 					sizeof(rp));
3037 		goto unlock;
3038 	}
3039 
3040 	sec_level = BT_SECURITY_MEDIUM;
3041 	auth_type = HCI_AT_DEDICATED_BONDING;
3042 
3043 	if (cp->addr.type == BDADDR_BREDR) {
3044 		conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
3045 				       auth_type, CONN_REASON_PAIR_DEVICE);
3046 	} else {
3047 		u8 addr_type = le_addr_type(cp->addr.type);
3048 		struct hci_conn_params *p;
3049 
3050 		/* When pairing a new device, it is expected to remember
3051 		 * this device for future connections. Adding the connection
3052 		 * parameter information ahead of time allows tracking
3053 		 * of the peripheral preferred values and will speed up any
3054 		 * further connection establishment.
3055 		 *
3056 		 * If connection parameters already exist, then they
3057 		 * will be kept and this function does nothing.
3058 		 */
3059 		p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
3060 
3061 		if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
3062 			p->auto_connect = HCI_AUTO_CONN_DISABLED;
3063 
3064 		conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type,
3065 					   sec_level, HCI_LE_CONN_TIMEOUT,
3066 					   CONN_REASON_PAIR_DEVICE);
3067 	}
3068 
3069 	if (IS_ERR(conn)) {
3070 		int status;
3071 
3072 		if (PTR_ERR(conn) == -EBUSY)
3073 			status = MGMT_STATUS_BUSY;
3074 		else if (PTR_ERR(conn) == -EOPNOTSUPP)
3075 			status = MGMT_STATUS_NOT_SUPPORTED;
3076 		else if (PTR_ERR(conn) == -ECONNREFUSED)
3077 			status = MGMT_STATUS_REJECTED;
3078 		else
3079 			status = MGMT_STATUS_CONNECT_FAILED;
3080 
3081 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3082 					status, &rp, sizeof(rp));
3083 		goto unlock;
3084 	}
3085 
3086 	if (conn->connect_cfm_cb) {
3087 		hci_conn_drop(conn);
3088 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
3089 					MGMT_STATUS_BUSY, &rp, sizeof(rp));
3090 		goto unlock;
3091 	}
3092 
3093 	cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
3094 	if (!cmd) {
3095 		err = -ENOMEM;
3096 		hci_conn_drop(conn);
3097 		goto unlock;
3098 	}
3099 
3100 	cmd->cmd_complete = pairing_complete;
3101 
3102 	/* For LE, just connecting isn't a proof that the pairing finished */
3103 	if (cp->addr.type == BDADDR_BREDR) {
3104 		conn->connect_cfm_cb = pairing_complete_cb;
3105 		conn->security_cfm_cb = pairing_complete_cb;
3106 		conn->disconn_cfm_cb = pairing_complete_cb;
3107 	} else {
3108 		conn->connect_cfm_cb = le_pairing_complete_cb;
3109 		conn->security_cfm_cb = le_pairing_complete_cb;
3110 		conn->disconn_cfm_cb = le_pairing_complete_cb;
3111 	}
3112 
3113 	conn->io_capability = cp->io_cap;
3114 	cmd->user_data = hci_conn_get(conn);
3115 
3116 	if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
3117 	    hci_conn_security(conn, sec_level, auth_type, true)) {
3118 		cmd->cmd_complete(cmd, 0);
3119 		mgmt_pending_remove(cmd);
3120 	}
3121 
3122 	err = 0;
3123 
3124 unlock:
3125 	hci_dev_unlock(hdev);
3126 	return err;
3127 }
3128 
3129 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
3130 			      u16 len)
3131 {
3132 	struct mgmt_addr_info *addr = data;
3133 	struct mgmt_pending_cmd *cmd;
3134 	struct hci_conn *conn;
3135 	int err;
3136 
3137 	bt_dev_dbg(hdev, "sock %p", sk);
3138 
3139 	hci_dev_lock(hdev);
3140 
3141 	if (!hdev_is_powered(hdev)) {
3142 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3143 				      MGMT_STATUS_NOT_POWERED);
3144 		goto unlock;
3145 	}
3146 
3147 	cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3148 	if (!cmd) {
3149 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3150 				      MGMT_STATUS_INVALID_PARAMS);
3151 		goto unlock;
3152 	}
3153 
3154 	conn = cmd->user_data;
3155 
3156 	if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3157 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3158 				      MGMT_STATUS_INVALID_PARAMS);
3159 		goto unlock;
3160 	}
3161 
3162 	cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3163 	mgmt_pending_remove(cmd);
3164 
3165 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3166 				addr, sizeof(*addr));
3167 
3168 	/* Since user doesn't want to proceed with the connection, abort any
3169 	 * ongoing pairing and then terminate the link if it was created
3170 	 * because of the pair device action.
3171 	 */
3172 	if (addr->type == BDADDR_BREDR)
3173 		hci_remove_link_key(hdev, &addr->bdaddr);
3174 	else
3175 		smp_cancel_and_remove_pairing(hdev, &addr->bdaddr,
3176 					      le_addr_type(addr->type));
3177 
3178 	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE)
3179 		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
3180 
3181 unlock:
3182 	hci_dev_unlock(hdev);
3183 	return err;
3184 }
3185 
3186 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3187 			     struct mgmt_addr_info *addr, u16 mgmt_op,
3188 			     u16 hci_op, __le32 passkey)
3189 {
3190 	struct mgmt_pending_cmd *cmd;
3191 	struct hci_conn *conn;
3192 	int err;
3193 
3194 	hci_dev_lock(hdev);
3195 
3196 	if (!hdev_is_powered(hdev)) {
3197 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3198 					MGMT_STATUS_NOT_POWERED, addr,
3199 					sizeof(*addr));
3200 		goto done;
3201 	}
3202 
3203 	if (addr->type == BDADDR_BREDR)
3204 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3205 	else
3206 		conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3207 					       le_addr_type(addr->type));
3208 
3209 	if (!conn) {
3210 		err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3211 					MGMT_STATUS_NOT_CONNECTED, addr,
3212 					sizeof(*addr));
3213 		goto done;
3214 	}
3215 
3216 	if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3217 		err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3218 		if (!err)
3219 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3220 						MGMT_STATUS_SUCCESS, addr,
3221 						sizeof(*addr));
3222 		else
3223 			err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3224 						MGMT_STATUS_FAILED, addr,
3225 						sizeof(*addr));
3226 
3227 		goto done;
3228 	}
3229 
3230 	cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3231 	if (!cmd) {
3232 		err = -ENOMEM;
3233 		goto done;
3234 	}
3235 
3236 	cmd->cmd_complete = addr_cmd_complete;
3237 
3238 	/* Continue with pairing via HCI */
3239 	if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3240 		struct hci_cp_user_passkey_reply cp;
3241 
3242 		bacpy(&cp.bdaddr, &addr->bdaddr);
3243 		cp.passkey = passkey;
3244 		err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3245 	} else
3246 		err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3247 				   &addr->bdaddr);
3248 
3249 	if (err < 0)
3250 		mgmt_pending_remove(cmd);
3251 
3252 done:
3253 	hci_dev_unlock(hdev);
3254 	return err;
3255 }
3256 
3257 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3258 			      void *data, u16 len)
3259 {
3260 	struct mgmt_cp_pin_code_neg_reply *cp = data;
3261 
3262 	bt_dev_dbg(hdev, "sock %p", sk);
3263 
3264 	return user_pairing_resp(sk, hdev, &cp->addr,
3265 				MGMT_OP_PIN_CODE_NEG_REPLY,
3266 				HCI_OP_PIN_CODE_NEG_REPLY, 0);
3267 }
3268 
3269 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3270 			      u16 len)
3271 {
3272 	struct mgmt_cp_user_confirm_reply *cp = data;
3273 
3274 	bt_dev_dbg(hdev, "sock %p", sk);
3275 
3276 	if (len != sizeof(*cp))
3277 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3278 				       MGMT_STATUS_INVALID_PARAMS);
3279 
3280 	return user_pairing_resp(sk, hdev, &cp->addr,
3281 				 MGMT_OP_USER_CONFIRM_REPLY,
3282 				 HCI_OP_USER_CONFIRM_REPLY, 0);
3283 }
3284 
3285 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3286 				  void *data, u16 len)
3287 {
3288 	struct mgmt_cp_user_confirm_neg_reply *cp = data;
3289 
3290 	bt_dev_dbg(hdev, "sock %p", sk);
3291 
3292 	return user_pairing_resp(sk, hdev, &cp->addr,
3293 				 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3294 				 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3295 }
3296 
3297 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3298 			      u16 len)
3299 {
3300 	struct mgmt_cp_user_passkey_reply *cp = data;
3301 
3302 	bt_dev_dbg(hdev, "sock %p", sk);
3303 
3304 	return user_pairing_resp(sk, hdev, &cp->addr,
3305 				 MGMT_OP_USER_PASSKEY_REPLY,
3306 				 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3307 }
3308 
3309 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3310 				  void *data, u16 len)
3311 {
3312 	struct mgmt_cp_user_passkey_neg_reply *cp = data;
3313 
3314 	bt_dev_dbg(hdev, "sock %p", sk);
3315 
3316 	return user_pairing_resp(sk, hdev, &cp->addr,
3317 				 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3318 				 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3319 }
3320 
3321 static int adv_expire_sync(struct hci_dev *hdev, u32 flags)
3322 {
3323 	struct adv_info *adv_instance;
3324 
3325 	adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3326 	if (!adv_instance)
3327 		return 0;
3328 
3329 	/* stop if current instance doesn't need to be changed */
3330 	if (!(adv_instance->flags & flags))
3331 		return 0;
3332 
3333 	cancel_adv_timeout(hdev);
3334 
3335 	adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3336 	if (!adv_instance)
3337 		return 0;
3338 
3339 	hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true);
3340 
3341 	return 0;
3342 }
3343 
3344 static int name_changed_sync(struct hci_dev *hdev, void *data)
3345 {
3346 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3347 }
3348 
3349 static void set_name_complete(struct hci_dev *hdev, void *data, int err)
3350 {
3351 	struct mgmt_pending_cmd *cmd = data;
3352 	struct mgmt_cp_set_local_name *cp = cmd->param;
3353 	u8 status = mgmt_status(err);
3354 
3355 	bt_dev_dbg(hdev, "err %d", err);
3356 
3357 	if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev))
3358 		return;
3359 
3360 	if (status) {
3361 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3362 				status);
3363 	} else {
3364 		mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3365 				  cp, sizeof(*cp));
3366 
3367 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3368 			hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL);
3369 	}
3370 
3371 	mgmt_pending_remove(cmd);
3372 }
3373 
3374 static int set_name_sync(struct hci_dev *hdev, void *data)
3375 {
3376 	if (lmp_bredr_capable(hdev)) {
3377 		hci_update_name_sync(hdev);
3378 		hci_update_eir_sync(hdev);
3379 	}
3380 
3381 	/* The name is stored in the scan response data and so
3382 	 * no need to update the advertising data here.
3383 	 */
3384 	if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3385 		hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance);
3386 
3387 	return 0;
3388 }
3389 
3390 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3391 			  u16 len)
3392 {
3393 	struct mgmt_cp_set_local_name *cp = data;
3394 	struct mgmt_pending_cmd *cmd;
3395 	int err;
3396 
3397 	bt_dev_dbg(hdev, "sock %p", sk);
3398 
3399 	hci_dev_lock(hdev);
3400 
3401 	/* If the old values are the same as the new ones just return a
3402 	 * direct command complete event.
3403 	 */
3404 	if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3405 	    !memcmp(hdev->short_name, cp->short_name,
3406 		    sizeof(hdev->short_name))) {
3407 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3408 					data, len);
3409 		goto failed;
3410 	}
3411 
3412 	memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3413 
3414 	if (!hdev_is_powered(hdev)) {
3415 		memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3416 
3417 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3418 					data, len);
3419 		if (err < 0)
3420 			goto failed;
3421 
3422 		err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3423 					 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3424 		ext_info_changed(hdev, sk);
3425 
3426 		goto failed;
3427 	}
3428 
3429 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3430 	if (!cmd)
3431 		err = -ENOMEM;
3432 	else
3433 		err = hci_cmd_sync_queue(hdev, set_name_sync, cmd,
3434 					 set_name_complete);
3435 
3436 	if (err < 0) {
3437 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3438 				      MGMT_STATUS_FAILED);
3439 
3440 		if (cmd)
3441 			mgmt_pending_remove(cmd);
3442 
3443 		goto failed;
3444 	}
3445 
3446 	memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3447 
3448 failed:
3449 	hci_dev_unlock(hdev);
3450 	return err;
3451 }
3452 
3453 static int appearance_changed_sync(struct hci_dev *hdev, void *data)
3454 {
3455 	return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE);
3456 }
3457 
3458 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3459 			  u16 len)
3460 {
3461 	struct mgmt_cp_set_appearance *cp = data;
3462 	u16 appearance;
3463 	int err;
3464 
3465 	bt_dev_dbg(hdev, "sock %p", sk);
3466 
3467 	if (!lmp_le_capable(hdev))
3468 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3469 				       MGMT_STATUS_NOT_SUPPORTED);
3470 
3471 	appearance = le16_to_cpu(cp->appearance);
3472 
3473 	hci_dev_lock(hdev);
3474 
3475 	if (hdev->appearance != appearance) {
3476 		hdev->appearance = appearance;
3477 
3478 		if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3479 			hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL,
3480 					   NULL);
3481 
3482 		ext_info_changed(hdev, sk);
3483 	}
3484 
3485 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3486 				0);
3487 
3488 	hci_dev_unlock(hdev);
3489 
3490 	return err;
3491 }
3492 
3493 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3494 				 void *data, u16 len)
3495 {
3496 	struct mgmt_rp_get_phy_configuration rp;
3497 
3498 	bt_dev_dbg(hdev, "sock %p", sk);
3499 
3500 	hci_dev_lock(hdev);
3501 
3502 	memset(&rp, 0, sizeof(rp));
3503 
3504 	rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3505 	rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3506 	rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3507 
3508 	hci_dev_unlock(hdev);
3509 
3510 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3511 				 &rp, sizeof(rp));
3512 }
3513 
3514 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3515 {
3516 	struct mgmt_ev_phy_configuration_changed ev;
3517 
3518 	memset(&ev, 0, sizeof(ev));
3519 
3520 	ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3521 
3522 	return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3523 			  sizeof(ev), skip);
3524 }
3525 
3526 static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err)
3527 {
3528 	struct mgmt_pending_cmd *cmd = data;
3529 	struct sk_buff *skb = cmd->skb;
3530 	u8 status = mgmt_status(err);
3531 
3532 	if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev))
3533 		return;
3534 
3535 	if (!status) {
3536 		if (!skb)
3537 			status = MGMT_STATUS_FAILED;
3538 		else if (IS_ERR(skb))
3539 			status = mgmt_status(PTR_ERR(skb));
3540 		else
3541 			status = mgmt_status(skb->data[0]);
3542 	}
3543 
3544 	bt_dev_dbg(hdev, "status %d", status);
3545 
3546 	if (status) {
3547 		mgmt_cmd_status(cmd->sk, hdev->id,
3548 				MGMT_OP_SET_PHY_CONFIGURATION, status);
3549 	} else {
3550 		mgmt_cmd_complete(cmd->sk, hdev->id,
3551 				  MGMT_OP_SET_PHY_CONFIGURATION, 0,
3552 				  NULL, 0);
3553 
3554 		mgmt_phy_configuration_changed(hdev, cmd->sk);
3555 	}
3556 
3557 	if (skb && !IS_ERR(skb))
3558 		kfree_skb(skb);
3559 
3560 	mgmt_pending_remove(cmd);
3561 }
3562 
3563 static int set_default_phy_sync(struct hci_dev *hdev, void *data)
3564 {
3565 	struct mgmt_pending_cmd *cmd = data;
3566 	struct mgmt_cp_set_phy_configuration *cp = cmd->param;
3567 	struct hci_cp_le_set_default_phy cp_phy;
3568 	u32 selected_phys = __le32_to_cpu(cp->selected_phys);
3569 
3570 	memset(&cp_phy, 0, sizeof(cp_phy));
3571 
3572 	if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3573 		cp_phy.all_phys |= 0x01;
3574 
3575 	if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3576 		cp_phy.all_phys |= 0x02;
3577 
3578 	if (selected_phys & MGMT_PHY_LE_1M_TX)
3579 		cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3580 
3581 	if (selected_phys & MGMT_PHY_LE_2M_TX)
3582 		cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3583 
3584 	if (selected_phys & MGMT_PHY_LE_CODED_TX)
3585 		cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3586 
3587 	if (selected_phys & MGMT_PHY_LE_1M_RX)
3588 		cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3589 
3590 	if (selected_phys & MGMT_PHY_LE_2M_RX)
3591 		cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3592 
3593 	if (selected_phys & MGMT_PHY_LE_CODED_RX)
3594 		cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3595 
3596 	cmd->skb =  __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY,
3597 				   sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT);
3598 
3599 	return 0;
3600 }
3601 
3602 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3603 				 void *data, u16 len)
3604 {
3605 	struct mgmt_cp_set_phy_configuration *cp = data;
3606 	struct mgmt_pending_cmd *cmd;
3607 	u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3608 	u16 pkt_type = (HCI_DH1 | HCI_DM1);
3609 	bool changed = false;
3610 	int err;
3611 
3612 	bt_dev_dbg(hdev, "sock %p", sk);
3613 
3614 	configurable_phys = get_configurable_phys(hdev);
3615 	supported_phys = get_supported_phys(hdev);
3616 	selected_phys = __le32_to_cpu(cp->selected_phys);
3617 
3618 	if (selected_phys & ~supported_phys)
3619 		return mgmt_cmd_status(sk, hdev->id,
3620 				       MGMT_OP_SET_PHY_CONFIGURATION,
3621 				       MGMT_STATUS_INVALID_PARAMS);
3622 
3623 	unconfigure_phys = supported_phys & ~configurable_phys;
3624 
3625 	if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3626 		return mgmt_cmd_status(sk, hdev->id,
3627 				       MGMT_OP_SET_PHY_CONFIGURATION,
3628 				       MGMT_STATUS_INVALID_PARAMS);
3629 
3630 	if (selected_phys == get_selected_phys(hdev))
3631 		return mgmt_cmd_complete(sk, hdev->id,
3632 					 MGMT_OP_SET_PHY_CONFIGURATION,
3633 					 0, NULL, 0);
3634 
3635 	hci_dev_lock(hdev);
3636 
3637 	if (!hdev_is_powered(hdev)) {
3638 		err = mgmt_cmd_status(sk, hdev->id,
3639 				      MGMT_OP_SET_PHY_CONFIGURATION,
3640 				      MGMT_STATUS_REJECTED);
3641 		goto unlock;
3642 	}
3643 
3644 	if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3645 		err = mgmt_cmd_status(sk, hdev->id,
3646 				      MGMT_OP_SET_PHY_CONFIGURATION,
3647 				      MGMT_STATUS_BUSY);
3648 		goto unlock;
3649 	}
3650 
3651 	if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3652 		pkt_type |= (HCI_DH3 | HCI_DM3);
3653 	else
3654 		pkt_type &= ~(HCI_DH3 | HCI_DM3);
3655 
3656 	if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3657 		pkt_type |= (HCI_DH5 | HCI_DM5);
3658 	else
3659 		pkt_type &= ~(HCI_DH5 | HCI_DM5);
3660 
3661 	if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3662 		pkt_type &= ~HCI_2DH1;
3663 	else
3664 		pkt_type |= HCI_2DH1;
3665 
3666 	if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3667 		pkt_type &= ~HCI_2DH3;
3668 	else
3669 		pkt_type |= HCI_2DH3;
3670 
3671 	if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3672 		pkt_type &= ~HCI_2DH5;
3673 	else
3674 		pkt_type |= HCI_2DH5;
3675 
3676 	if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3677 		pkt_type &= ~HCI_3DH1;
3678 	else
3679 		pkt_type |= HCI_3DH1;
3680 
3681 	if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3682 		pkt_type &= ~HCI_3DH3;
3683 	else
3684 		pkt_type |= HCI_3DH3;
3685 
3686 	if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3687 		pkt_type &= ~HCI_3DH5;
3688 	else
3689 		pkt_type |= HCI_3DH5;
3690 
3691 	if (pkt_type != hdev->pkt_type) {
3692 		hdev->pkt_type = pkt_type;
3693 		changed = true;
3694 	}
3695 
3696 	if ((selected_phys & MGMT_PHY_LE_MASK) ==
3697 	    (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3698 		if (changed)
3699 			mgmt_phy_configuration_changed(hdev, sk);
3700 
3701 		err = mgmt_cmd_complete(sk, hdev->id,
3702 					MGMT_OP_SET_PHY_CONFIGURATION,
3703 					0, NULL, 0);
3704 
3705 		goto unlock;
3706 	}
3707 
3708 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3709 			       len);
3710 	if (!cmd)
3711 		err = -ENOMEM;
3712 	else
3713 		err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd,
3714 					 set_default_phy_complete);
3715 
3716 	if (err < 0) {
3717 		err = mgmt_cmd_status(sk, hdev->id,
3718 				      MGMT_OP_SET_PHY_CONFIGURATION,
3719 				      MGMT_STATUS_FAILED);
3720 
3721 		if (cmd)
3722 			mgmt_pending_remove(cmd);
3723 	}
3724 
3725 unlock:
3726 	hci_dev_unlock(hdev);
3727 
3728 	return err;
3729 }
3730 
3731 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3732 			    u16 len)
3733 {
3734 	int err = MGMT_STATUS_SUCCESS;
3735 	struct mgmt_cp_set_blocked_keys *keys = data;
3736 	const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3737 				   sizeof(struct mgmt_blocked_key_info));
3738 	u16 key_count, expected_len;
3739 	int i;
3740 
3741 	bt_dev_dbg(hdev, "sock %p", sk);
3742 
3743 	key_count = __le16_to_cpu(keys->key_count);
3744 	if (key_count > max_key_count) {
3745 		bt_dev_err(hdev, "too big key_count value %u", key_count);
3746 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3747 				       MGMT_STATUS_INVALID_PARAMS);
3748 	}
3749 
3750 	expected_len = struct_size(keys, keys, key_count);
3751 	if (expected_len != len) {
3752 		bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3753 			   expected_len, len);
3754 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3755 				       MGMT_STATUS_INVALID_PARAMS);
3756 	}
3757 
3758 	hci_dev_lock(hdev);
3759 
3760 	hci_blocked_keys_clear(hdev);
3761 
3762 	for (i = 0; i < keys->key_count; ++i) {
3763 		struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3764 
3765 		if (!b) {
3766 			err = MGMT_STATUS_NO_RESOURCES;
3767 			break;
3768 		}
3769 
3770 		b->type = keys->keys[i].type;
3771 		memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3772 		list_add_rcu(&b->list, &hdev->blocked_keys);
3773 	}
3774 	hci_dev_unlock(hdev);
3775 
3776 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3777 				err, NULL, 0);
3778 }
3779 
3780 static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
3781 			       void *data, u16 len)
3782 {
3783 	struct mgmt_mode *cp = data;
3784 	int err;
3785 	bool changed = false;
3786 
3787 	bt_dev_dbg(hdev, "sock %p", sk);
3788 
3789 	if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
3790 		return mgmt_cmd_status(sk, hdev->id,
3791 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3792 				       MGMT_STATUS_NOT_SUPPORTED);
3793 
3794 	if (cp->val != 0x00 && cp->val != 0x01)
3795 		return mgmt_cmd_status(sk, hdev->id,
3796 				       MGMT_OP_SET_WIDEBAND_SPEECH,
3797 				       MGMT_STATUS_INVALID_PARAMS);
3798 
3799 	hci_dev_lock(hdev);
3800 
3801 	if (hdev_is_powered(hdev) &&
3802 	    !!cp->val != hci_dev_test_flag(hdev,
3803 					   HCI_WIDEBAND_SPEECH_ENABLED)) {
3804 		err = mgmt_cmd_status(sk, hdev->id,
3805 				      MGMT_OP_SET_WIDEBAND_SPEECH,
3806 				      MGMT_STATUS_REJECTED);
3807 		goto unlock;
3808 	}
3809 
3810 	if (cp->val)
3811 		changed = !hci_dev_test_and_set_flag(hdev,
3812 						   HCI_WIDEBAND_SPEECH_ENABLED);
3813 	else
3814 		changed = hci_dev_test_and_clear_flag(hdev,
3815 						   HCI_WIDEBAND_SPEECH_ENABLED);
3816 
3817 	err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
3818 	if (err < 0)
3819 		goto unlock;
3820 
3821 	if (changed)
3822 		err = new_settings(hdev, sk);
3823 
3824 unlock:
3825 	hci_dev_unlock(hdev);
3826 	return err;
3827 }
3828 
3829 static int read_controller_cap(struct sock *sk, struct hci_dev *hdev,
3830 			       void *data, u16 data_len)
3831 {
3832 	char buf[20];
3833 	struct mgmt_rp_read_controller_cap *rp = (void *)buf;
3834 	u16 cap_len = 0;
3835 	u8 flags = 0;
3836 	u8 tx_power_range[2];
3837 
3838 	bt_dev_dbg(hdev, "sock %p", sk);
3839 
3840 	memset(&buf, 0, sizeof(buf));
3841 
3842 	hci_dev_lock(hdev);
3843 
3844 	/* When the Read Simple Pairing Options command is supported, then
3845 	 * the remote public key validation is supported.
3846 	 *
3847 	 * Alternatively, when Microsoft extensions are available, they can
3848 	 * indicate support for public key validation as well.
3849 	 */
3850 	if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev))
3851 		flags |= 0x01;	/* Remote public key validation (BR/EDR) */
3852 
3853 	flags |= 0x02;		/* Remote public key validation (LE) */
3854 
3855 	/* When the Read Encryption Key Size command is supported, then the
3856 	 * encryption key size is enforced.
3857 	 */
3858 	if (hdev->commands[20] & 0x10)
3859 		flags |= 0x04;	/* Encryption key size enforcement (BR/EDR) */
3860 
3861 	flags |= 0x08;		/* Encryption key size enforcement (LE) */
3862 
3863 	cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS,
3864 				  &flags, 1);
3865 
3866 	/* When the Read Simple Pairing Options command is supported, then
3867 	 * also max encryption key size information is provided.
3868 	 */
3869 	if (hdev->commands[41] & 0x08)
3870 		cap_len = eir_append_le16(rp->cap, cap_len,
3871 					  MGMT_CAP_MAX_ENC_KEY_SIZE,
3872 					  hdev->max_enc_key_size);
3873 
3874 	cap_len = eir_append_le16(rp->cap, cap_len,
3875 				  MGMT_CAP_SMP_MAX_ENC_KEY_SIZE,
3876 				  SMP_MAX_ENC_KEY_SIZE);
3877 
3878 	/* Append the min/max LE tx power parameters if we were able to fetch
3879 	 * it from the controller
3880 	 */
3881 	if (hdev->commands[38] & 0x80) {
3882 		memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1);
3883 		memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1);
3884 		cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR,
3885 					  tx_power_range, 2);
3886 	}
3887 
3888 	rp->cap_len = cpu_to_le16(cap_len);
3889 
3890 	hci_dev_unlock(hdev);
3891 
3892 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0,
3893 				 rp, sizeof(*rp) + cap_len);
3894 }
3895 
3896 #ifdef CONFIG_BT_FEATURE_DEBUG
3897 /* d4992530-b9ec-469f-ab01-6c481c47da1c */
3898 static const u8 debug_uuid[16] = {
3899 	0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab,
3900 	0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4,
3901 };
3902 #endif
3903 
3904 /* 330859bc-7506-492d-9370-9a6f0614037f */
3905 static const u8 quality_report_uuid[16] = {
3906 	0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93,
3907 	0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33,
3908 };
3909 
3910 /* a6695ace-ee7f-4fb9-881a-5fac66c629af */
3911 static const u8 offload_codecs_uuid[16] = {
3912 	0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88,
3913 	0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6,
3914 };
3915 
3916 /* 671b10b5-42c0-4696-9227-eb28d1b049d6 */
3917 static const u8 le_simultaneous_roles_uuid[16] = {
3918 	0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92,
3919 	0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67,
3920 };
3921 
3922 /* 15c0a148-c273-11ea-b3de-0242ac130004 */
3923 static const u8 rpa_resolution_uuid[16] = {
3924 	0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3,
3925 	0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15,
3926 };
3927 
3928 static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,
3929 				  void *data, u16 data_len)
3930 {
3931 	char buf[102];   /* Enough space for 5 features: 2 + 20 * 5 */
3932 	struct mgmt_rp_read_exp_features_info *rp = (void *)buf;
3933 	u16 idx = 0;
3934 	u32 flags;
3935 
3936 	bt_dev_dbg(hdev, "sock %p", sk);
3937 
3938 	memset(&buf, 0, sizeof(buf));
3939 
3940 #ifdef CONFIG_BT_FEATURE_DEBUG
3941 	if (!hdev) {
3942 		flags = bt_dbg_get() ? BIT(0) : 0;
3943 
3944 		memcpy(rp->features[idx].uuid, debug_uuid, 16);
3945 		rp->features[idx].flags = cpu_to_le32(flags);
3946 		idx++;
3947 	}
3948 #endif
3949 
3950 	if (hdev && hci_dev_le_state_simultaneous(hdev)) {
3951 		if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES))
3952 			flags = BIT(0);
3953 		else
3954 			flags = 0;
3955 
3956 		memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16);
3957 		rp->features[idx].flags = cpu_to_le32(flags);
3958 		idx++;
3959 	}
3960 
3961 	if (hdev && ll_privacy_capable(hdev)) {
3962 		if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY))
3963 			flags = BIT(0) | BIT(1);
3964 		else
3965 			flags = BIT(1);
3966 
3967 		memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16);
3968 		rp->features[idx].flags = cpu_to_le32(flags);
3969 		idx++;
3970 	}
3971 
3972 	if (hdev && (aosp_has_quality_report(hdev) ||
3973 		     hdev->set_quality_report)) {
3974 		if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT))
3975 			flags = BIT(0);
3976 		else
3977 			flags = 0;
3978 
3979 		memcpy(rp->features[idx].uuid, quality_report_uuid, 16);
3980 		rp->features[idx].flags = cpu_to_le32(flags);
3981 		idx++;
3982 	}
3983 
3984 	if (hdev && hdev->get_data_path_id) {
3985 		if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED))
3986 			flags = BIT(0);
3987 		else
3988 			flags = 0;
3989 
3990 		memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16);
3991 		rp->features[idx].flags = cpu_to_le32(flags);
3992 		idx++;
3993 	}
3994 
3995 	rp->feature_count = cpu_to_le16(idx);
3996 
3997 	/* After reading the experimental features information, enable
3998 	 * the events to update client on any future change.
3999 	 */
4000 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4001 
4002 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4003 				 MGMT_OP_READ_EXP_FEATURES_INFO,
4004 				 0, rp, sizeof(*rp) + (20 * idx));
4005 }
4006 
4007 static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev,
4008 					  struct sock *skip)
4009 {
4010 	struct mgmt_ev_exp_feature_changed ev;
4011 
4012 	memset(&ev, 0, sizeof(ev));
4013 	memcpy(ev.uuid, rpa_resolution_uuid, 16);
4014 	ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1));
4015 
4016 	if (enabled && privacy_mode_capable(hdev))
4017 		set_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4018 	else
4019 		clear_bit(HCI_CONN_FLAG_DEVICE_PRIVACY, hdev->conn_flags);
4020 
4021 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4022 				  &ev, sizeof(ev),
4023 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4024 
4025 }
4026 
4027 static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid,
4028 			       bool enabled, struct sock *skip)
4029 {
4030 	struct mgmt_ev_exp_feature_changed ev;
4031 
4032 	memset(&ev, 0, sizeof(ev));
4033 	memcpy(ev.uuid, uuid, 16);
4034 	ev.flags = cpu_to_le32(enabled ? BIT(0) : 0);
4035 
4036 	return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev,
4037 				  &ev, sizeof(ev),
4038 				  HCI_MGMT_EXP_FEATURE_EVENTS, skip);
4039 }
4040 
4041 #define EXP_FEAT(_uuid, _set_func)	\
4042 {					\
4043 	.uuid = _uuid,			\
4044 	.set_func = _set_func,		\
4045 }
4046 
4047 /* The zero key uuid is special. Multiple exp features are set through it. */
4048 static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev,
4049 			     struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4050 {
4051 	struct mgmt_rp_set_exp_feature rp;
4052 
4053 	memset(rp.uuid, 0, 16);
4054 	rp.flags = cpu_to_le32(0);
4055 
4056 #ifdef CONFIG_BT_FEATURE_DEBUG
4057 	if (!hdev) {
4058 		bool changed = bt_dbg_get();
4059 
4060 		bt_dbg_set(false);
4061 
4062 		if (changed)
4063 			exp_feature_changed(NULL, ZERO_KEY, false, sk);
4064 	}
4065 #endif
4066 
4067 	if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) {
4068 		bool changed;
4069 
4070 		changed = hci_dev_test_and_clear_flag(hdev,
4071 						      HCI_ENABLE_LL_PRIVACY);
4072 		if (changed)
4073 			exp_feature_changed(hdev, rpa_resolution_uuid, false,
4074 					    sk);
4075 	}
4076 
4077 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4078 
4079 	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4080 				 MGMT_OP_SET_EXP_FEATURE, 0,
4081 				 &rp, sizeof(rp));
4082 }
4083 
4084 #ifdef CONFIG_BT_FEATURE_DEBUG
4085 static int set_debug_func(struct sock *sk, struct hci_dev *hdev,
4086 			  struct mgmt_cp_set_exp_feature *cp, u16 data_len)
4087 {
4088 	struct mgmt_rp_set_exp_feature rp;
4089 
4090 	bool val, changed;
4091 	int err;
4092 
4093 	/* Command requires to use the non-controller index */
4094 	if (hdev)
4095 		return mgmt_cmd_status(sk, hdev->id,
4096 				       MGMT_OP_SET_EXP_FEATURE,
4097 				       MGMT_STATUS_INVALID_INDEX);
4098 
4099 	/* Parameters are limited to a single octet */
4100 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4101 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4102 				       MGMT_OP_SET_EXP_FEATURE,
4103 				       MGMT_STATUS_INVALID_PARAMS);
4104 
4105 	/* Only boolean on/off is supported */
4106 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4107 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4108 				       MGMT_OP_SET_EXP_FEATURE,
4109 				       MGMT_STATUS_INVALID_PARAMS);
4110 
4111 	val = !!cp->param[0];
4112 	changed = val ? !bt_dbg_get() : bt_dbg_get();
4113 	bt_dbg_set(val);
4114 
4115 	memcpy(rp.uuid, debug_uuid, 16);
4116 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4117 
4118 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4119 
4120 	err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
4121 				MGMT_OP_SET_EXP_FEATURE, 0,
4122 				&rp, sizeof(rp));
4123 
4124 	if (changed)
4125 		exp_feature_changed(hdev, debug_uuid, val, sk);
4126 
4127 	return err;
4128 }
4129 #endif
4130 
4131 static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,
4132 				   struct mgmt_cp_set_exp_feature *cp,
4133 				   u16 data_len)
4134 {
4135 	struct mgmt_rp_set_exp_feature rp;
4136 	bool val, changed;
4137 	int err;
4138 	u32 flags;
4139 
4140 	/* Command requires to use the controller index */
4141 	if (!hdev)
4142 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4143 				       MGMT_OP_SET_EXP_FEATURE,
4144 				       MGMT_STATUS_INVALID_INDEX);
4145 
4146 	/* Changes can only be made when controller is powered down */
4147 	if (hdev_is_powered(hdev))
4148 		return mgmt_cmd_status(sk, hdev->id,
4149 				       MGMT_OP_SET_EXP_FEATURE,
4150 				       MGMT_STATUS_REJECTED);
4151 
4152 	/* Parameters are limited to a single octet */
4153 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4154 		return mgmt_cmd_status(sk, hdev->id,
4155 				       MGMT_OP_SET_EXP_FEATURE,
4156 				       MGMT_STATUS_INVALID_PARAMS);
4157 
4158 	/* Only boolean on/off is supported */
4159 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4160 		return mgmt_cmd_status(sk, hdev->id,
4161 				       MGMT_OP_SET_EXP_FEATURE,
4162 				       MGMT_STATUS_INVALID_PARAMS);
4163 
4164 	val = !!cp->param[0];
4165 
4166 	if (val) {
4167 		changed = !hci_dev_test_and_set_flag(hdev,
4168 						     HCI_ENABLE_LL_PRIVACY);
4169 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4170 
4171 		/* Enable LL privacy + supported settings changed */
4172 		flags = BIT(0) | BIT(1);
4173 	} else {
4174 		changed = hci_dev_test_and_clear_flag(hdev,
4175 						      HCI_ENABLE_LL_PRIVACY);
4176 
4177 		/* Disable LL privacy + supported settings changed */
4178 		flags = BIT(1);
4179 	}
4180 
4181 	memcpy(rp.uuid, rpa_resolution_uuid, 16);
4182 	rp.flags = cpu_to_le32(flags);
4183 
4184 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4185 
4186 	err = mgmt_cmd_complete(sk, hdev->id,
4187 				MGMT_OP_SET_EXP_FEATURE, 0,
4188 				&rp, sizeof(rp));
4189 
4190 	if (changed)
4191 		exp_ll_privacy_feature_changed(val, hdev, sk);
4192 
4193 	return err;
4194 }
4195 
4196 static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev,
4197 				   struct mgmt_cp_set_exp_feature *cp,
4198 				   u16 data_len)
4199 {
4200 	struct mgmt_rp_set_exp_feature rp;
4201 	bool val, changed;
4202 	int err;
4203 
4204 	/* Command requires to use a valid controller index */
4205 	if (!hdev)
4206 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4207 				       MGMT_OP_SET_EXP_FEATURE,
4208 				       MGMT_STATUS_INVALID_INDEX);
4209 
4210 	/* Parameters are limited to a single octet */
4211 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4212 		return mgmt_cmd_status(sk, hdev->id,
4213 				       MGMT_OP_SET_EXP_FEATURE,
4214 				       MGMT_STATUS_INVALID_PARAMS);
4215 
4216 	/* Only boolean on/off is supported */
4217 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4218 		return mgmt_cmd_status(sk, hdev->id,
4219 				       MGMT_OP_SET_EXP_FEATURE,
4220 				       MGMT_STATUS_INVALID_PARAMS);
4221 
4222 	hci_req_sync_lock(hdev);
4223 
4224 	val = !!cp->param[0];
4225 	changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT));
4226 
4227 	if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) {
4228 		err = mgmt_cmd_status(sk, hdev->id,
4229 				      MGMT_OP_SET_EXP_FEATURE,
4230 				      MGMT_STATUS_NOT_SUPPORTED);
4231 		goto unlock_quality_report;
4232 	}
4233 
4234 	if (changed) {
4235 		if (hdev->set_quality_report)
4236 			err = hdev->set_quality_report(hdev, val);
4237 		else
4238 			err = aosp_set_quality_report(hdev, val);
4239 
4240 		if (err) {
4241 			err = mgmt_cmd_status(sk, hdev->id,
4242 					      MGMT_OP_SET_EXP_FEATURE,
4243 					      MGMT_STATUS_FAILED);
4244 			goto unlock_quality_report;
4245 		}
4246 
4247 		if (val)
4248 			hci_dev_set_flag(hdev, HCI_QUALITY_REPORT);
4249 		else
4250 			hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT);
4251 	}
4252 
4253 	bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed);
4254 
4255 	memcpy(rp.uuid, quality_report_uuid, 16);
4256 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4257 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4258 
4259 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0,
4260 				&rp, sizeof(rp));
4261 
4262 	if (changed)
4263 		exp_feature_changed(hdev, quality_report_uuid, val, sk);
4264 
4265 unlock_quality_report:
4266 	hci_req_sync_unlock(hdev);
4267 	return err;
4268 }
4269 
4270 static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev,
4271 				  struct mgmt_cp_set_exp_feature *cp,
4272 				  u16 data_len)
4273 {
4274 	bool val, changed;
4275 	int err;
4276 	struct mgmt_rp_set_exp_feature rp;
4277 
4278 	/* Command requires to use a valid controller index */
4279 	if (!hdev)
4280 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4281 				       MGMT_OP_SET_EXP_FEATURE,
4282 				       MGMT_STATUS_INVALID_INDEX);
4283 
4284 	/* Parameters are limited to a single octet */
4285 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4286 		return mgmt_cmd_status(sk, hdev->id,
4287 				       MGMT_OP_SET_EXP_FEATURE,
4288 				       MGMT_STATUS_INVALID_PARAMS);
4289 
4290 	/* Only boolean on/off is supported */
4291 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4292 		return mgmt_cmd_status(sk, hdev->id,
4293 				       MGMT_OP_SET_EXP_FEATURE,
4294 				       MGMT_STATUS_INVALID_PARAMS);
4295 
4296 	val = !!cp->param[0];
4297 	changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED));
4298 
4299 	if (!hdev->get_data_path_id) {
4300 		return mgmt_cmd_status(sk, hdev->id,
4301 				       MGMT_OP_SET_EXP_FEATURE,
4302 				       MGMT_STATUS_NOT_SUPPORTED);
4303 	}
4304 
4305 	if (changed) {
4306 		if (val)
4307 			hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4308 		else
4309 			hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED);
4310 	}
4311 
4312 	bt_dev_info(hdev, "offload codecs enable %d changed %d",
4313 		    val, changed);
4314 
4315 	memcpy(rp.uuid, offload_codecs_uuid, 16);
4316 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4317 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4318 	err = mgmt_cmd_complete(sk, hdev->id,
4319 				MGMT_OP_SET_EXP_FEATURE, 0,
4320 				&rp, sizeof(rp));
4321 
4322 	if (changed)
4323 		exp_feature_changed(hdev, offload_codecs_uuid, val, sk);
4324 
4325 	return err;
4326 }
4327 
4328 static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev,
4329 					  struct mgmt_cp_set_exp_feature *cp,
4330 					  u16 data_len)
4331 {
4332 	bool val, changed;
4333 	int err;
4334 	struct mgmt_rp_set_exp_feature rp;
4335 
4336 	/* Command requires to use a valid controller index */
4337 	if (!hdev)
4338 		return mgmt_cmd_status(sk, MGMT_INDEX_NONE,
4339 				       MGMT_OP_SET_EXP_FEATURE,
4340 				       MGMT_STATUS_INVALID_INDEX);
4341 
4342 	/* Parameters are limited to a single octet */
4343 	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1)
4344 		return mgmt_cmd_status(sk, hdev->id,
4345 				       MGMT_OP_SET_EXP_FEATURE,
4346 				       MGMT_STATUS_INVALID_PARAMS);
4347 
4348 	/* Only boolean on/off is supported */
4349 	if (cp->param[0] != 0x00 && cp->param[0] != 0x01)
4350 		return mgmt_cmd_status(sk, hdev->id,
4351 				       MGMT_OP_SET_EXP_FEATURE,
4352 				       MGMT_STATUS_INVALID_PARAMS);
4353 
4354 	val = !!cp->param[0];
4355 	changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES));
4356 
4357 	if (!hci_dev_le_state_simultaneous(hdev)) {
4358 		return mgmt_cmd_status(sk, hdev->id,
4359 				       MGMT_OP_SET_EXP_FEATURE,
4360 				       MGMT_STATUS_NOT_SUPPORTED);
4361 	}
4362 
4363 	if (changed) {
4364 		if (val)
4365 			hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4366 		else
4367 			hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES);
4368 	}
4369 
4370 	bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d",
4371 		    val, changed);
4372 
4373 	memcpy(rp.uuid, le_simultaneous_roles_uuid, 16);
4374 	rp.flags = cpu_to_le32(val ? BIT(0) : 0);
4375 	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS);
4376 	err = mgmt_cmd_complete(sk, hdev->id,
4377 				MGMT_OP_SET_EXP_FEATURE, 0,
4378 				&rp, sizeof(rp));
4379 
4380 	if (changed)
4381 		exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk);
4382 
4383 	return err;
4384 }
4385 
4386 static const struct mgmt_exp_feature {
4387 	const u8 *uuid;
4388 	int (*set_func)(struct sock *sk, struct hci_dev *hdev,
4389 			struct mgmt_cp_set_exp_feature *cp, u16 data_len);
4390 } exp_features[] = {
4391 	EXP_FEAT(ZERO_KEY, set_zero_key_func),
4392 #ifdef CONFIG_BT_FEATURE_DEBUG
4393 	EXP_FEAT(debug_uuid, set_debug_func),
4394 #endif
4395 	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),
4396 	EXP_FEAT(quality_report_uuid, set_quality_report_func),
4397 	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func),
4398 	EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func),
4399 
4400 	/* end with a null feature */
4401 	EXP_FEAT(NULL, NULL)
4402 };
4403 
4404 static int set_exp_feature(struct sock *sk, struct hci_dev *hdev,
4405 			   void *data, u16 data_len)
4406 {
4407 	struct mgmt_cp_set_exp_feature *cp = data;
4408 	size_t i = 0;
4409 
4410 	bt_dev_dbg(hdev, "sock %p", sk);
4411 
4412 	for (i = 0; exp_features[i].uuid; i++) {
4413 		if (!memcmp(cp->uuid, exp_features[i].uuid, 16))
4414 			return exp_features[i].set_func(sk, hdev, cp, data_len);
4415 	}
4416 
4417 	return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE,
4418 			       MGMT_OP_SET_EXP_FEATURE,
4419 			       MGMT_STATUS_NOT_SUPPORTED);
4420 }
4421 
4422 static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4423 			    u16 data_len)
4424 {
4425 	struct mgmt_cp_get_device_flags *cp = data;
4426 	struct mgmt_rp_get_device_flags rp;
4427 	struct bdaddr_list_with_flags *br_params;
4428 	struct hci_conn_params *params;
4429 	u32 supported_flags;
4430 	u32 current_flags = 0;
4431 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4432 
4433 	bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n",
4434 		   &cp->addr.bdaddr, cp->addr.type);
4435 
4436 	hci_dev_lock(hdev);
4437 
4438 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4439 			__HCI_CONN_NUM_FLAGS);
4440 
4441 	memset(&rp, 0, sizeof(rp));
4442 
4443 	if (cp->addr.type == BDADDR_BREDR) {
4444 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4445 							      &cp->addr.bdaddr,
4446 							      cp->addr.type);
4447 		if (!br_params)
4448 			goto done;
4449 
4450 		bitmap_to_arr32(&current_flags, br_params->flags,
4451 				__HCI_CONN_NUM_FLAGS);
4452 	} else {
4453 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4454 						le_addr_type(cp->addr.type));
4455 
4456 		if (!params)
4457 			goto done;
4458 
4459 		bitmap_to_arr32(&current_flags, params->flags,
4460 				__HCI_CONN_NUM_FLAGS);
4461 	}
4462 
4463 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
4464 	rp.addr.type = cp->addr.type;
4465 	rp.supported_flags = cpu_to_le32(supported_flags);
4466 	rp.current_flags = cpu_to_le32(current_flags);
4467 
4468 	status = MGMT_STATUS_SUCCESS;
4469 
4470 done:
4471 	hci_dev_unlock(hdev);
4472 
4473 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status,
4474 				&rp, sizeof(rp));
4475 }
4476 
4477 static void device_flags_changed(struct sock *sk, struct hci_dev *hdev,
4478 				 bdaddr_t *bdaddr, u8 bdaddr_type,
4479 				 u32 supported_flags, u32 current_flags)
4480 {
4481 	struct mgmt_ev_device_flags_changed ev;
4482 
4483 	bacpy(&ev.addr.bdaddr, bdaddr);
4484 	ev.addr.type = bdaddr_type;
4485 	ev.supported_flags = cpu_to_le32(supported_flags);
4486 	ev.current_flags = cpu_to_le32(current_flags);
4487 
4488 	mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk);
4489 }
4490 
4491 static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data,
4492 			    u16 len)
4493 {
4494 	struct mgmt_cp_set_device_flags *cp = data;
4495 	struct bdaddr_list_with_flags *br_params;
4496 	struct hci_conn_params *params;
4497 	u8 status = MGMT_STATUS_INVALID_PARAMS;
4498 	u32 supported_flags;
4499 	u32 current_flags = __le32_to_cpu(cp->current_flags);
4500 
4501 	bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x",
4502 		   &cp->addr.bdaddr, cp->addr.type,
4503 		   __le32_to_cpu(current_flags));
4504 
4505 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
4506 			__HCI_CONN_NUM_FLAGS);
4507 
4508 	if ((supported_flags | current_flags) != supported_flags) {
4509 		bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)",
4510 			    current_flags, supported_flags);
4511 		goto done;
4512 	}
4513 
4514 	hci_dev_lock(hdev);
4515 
4516 	if (cp->addr.type == BDADDR_BREDR) {
4517 		br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list,
4518 							      &cp->addr.bdaddr,
4519 							      cp->addr.type);
4520 
4521 		if (br_params) {
4522 			bitmap_from_u64(br_params->flags, current_flags);
4523 			status = MGMT_STATUS_SUCCESS;
4524 		} else {
4525 			bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)",
4526 				    &cp->addr.bdaddr, cp->addr.type);
4527 		}
4528 	} else {
4529 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
4530 						le_addr_type(cp->addr.type));
4531 		if (params) {
4532 			bitmap_from_u64(params->flags, current_flags);
4533 			status = MGMT_STATUS_SUCCESS;
4534 
4535 			/* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY
4536 			 * has been set.
4537 			 */
4538 			if (test_bit(HCI_CONN_FLAG_DEVICE_PRIVACY,
4539 				     params->flags))
4540 				hci_update_passive_scan(hdev);
4541 		} else {
4542 			bt_dev_warn(hdev, "No such LE device %pMR (0x%x)",
4543 				    &cp->addr.bdaddr,
4544 				    le_addr_type(cp->addr.type));
4545 		}
4546 	}
4547 
4548 	hci_dev_unlock(hdev);
4549 
4550 done:
4551 	if (status == MGMT_STATUS_SUCCESS)
4552 		device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type,
4553 				     supported_flags, current_flags);
4554 
4555 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status,
4556 				 &cp->addr, sizeof(cp->addr));
4557 }
4558 
4559 static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev,
4560 				   u16 handle)
4561 {
4562 	struct mgmt_ev_adv_monitor_added ev;
4563 
4564 	ev.monitor_handle = cpu_to_le16(handle);
4565 
4566 	mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk);
4567 }
4568 
4569 void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle)
4570 {
4571 	struct mgmt_ev_adv_monitor_removed ev;
4572 	struct mgmt_pending_cmd *cmd;
4573 	struct sock *sk_skip = NULL;
4574 	struct mgmt_cp_remove_adv_monitor *cp;
4575 
4576 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4577 	if (cmd) {
4578 		cp = cmd->param;
4579 
4580 		if (cp->monitor_handle)
4581 			sk_skip = cmd->sk;
4582 	}
4583 
4584 	ev.monitor_handle = cpu_to_le16(handle);
4585 
4586 	mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip);
4587 }
4588 
4589 static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev,
4590 				 void *data, u16 len)
4591 {
4592 	struct adv_monitor *monitor = NULL;
4593 	struct mgmt_rp_read_adv_monitor_features *rp = NULL;
4594 	int handle, err;
4595 	size_t rp_size = 0;
4596 	__u32 supported = 0;
4597 	__u32 enabled = 0;
4598 	__u16 num_handles = 0;
4599 	__u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES];
4600 
4601 	BT_DBG("request for %s", hdev->name);
4602 
4603 	hci_dev_lock(hdev);
4604 
4605 	if (msft_monitor_supported(hdev))
4606 		supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS;
4607 
4608 	idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
4609 		handles[num_handles++] = monitor->handle;
4610 
4611 	hci_dev_unlock(hdev);
4612 
4613 	rp_size = sizeof(*rp) + (num_handles * sizeof(u16));
4614 	rp = kmalloc(rp_size, GFP_KERNEL);
4615 	if (!rp)
4616 		return -ENOMEM;
4617 
4618 	/* All supported features are currently enabled */
4619 	enabled = supported;
4620 
4621 	rp->supported_features = cpu_to_le32(supported);
4622 	rp->enabled_features = cpu_to_le32(enabled);
4623 	rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES);
4624 	rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS;
4625 	rp->num_handles = cpu_to_le16(num_handles);
4626 	if (num_handles)
4627 		memcpy(&rp->handles, &handles, (num_handles * sizeof(u16)));
4628 
4629 	err = mgmt_cmd_complete(sk, hdev->id,
4630 				MGMT_OP_READ_ADV_MONITOR_FEATURES,
4631 				MGMT_STATUS_SUCCESS, rp, rp_size);
4632 
4633 	kfree(rp);
4634 
4635 	return err;
4636 }
4637 
4638 int mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, u8 status)
4639 {
4640 	struct mgmt_rp_add_adv_patterns_monitor rp;
4641 	struct mgmt_pending_cmd *cmd;
4642 	struct adv_monitor *monitor;
4643 	int err = 0;
4644 
4645 	hci_dev_lock(hdev);
4646 
4647 	cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev);
4648 	if (!cmd) {
4649 		cmd = pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev);
4650 		if (!cmd)
4651 			goto done;
4652 	}
4653 
4654 	monitor = cmd->user_data;
4655 	rp.monitor_handle = cpu_to_le16(monitor->handle);
4656 
4657 	if (!status) {
4658 		mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle);
4659 		hdev->adv_monitors_cnt++;
4660 		if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED)
4661 			monitor->state = ADV_MONITOR_STATE_REGISTERED;
4662 		hci_update_passive_scan(hdev);
4663 	}
4664 
4665 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4666 				mgmt_status(status), &rp, sizeof(rp));
4667 	mgmt_pending_remove(cmd);
4668 
4669 done:
4670 	hci_dev_unlock(hdev);
4671 	bt_dev_dbg(hdev, "add monitor %d complete, status %u",
4672 		   rp.monitor_handle, status);
4673 
4674 	return err;
4675 }
4676 
4677 static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4678 				      struct adv_monitor *m, u8 status,
4679 				      void *data, u16 len, u16 op)
4680 {
4681 	struct mgmt_rp_add_adv_patterns_monitor rp;
4682 	struct mgmt_pending_cmd *cmd;
4683 	int err;
4684 	bool pending;
4685 
4686 	hci_dev_lock(hdev);
4687 
4688 	if (status)
4689 		goto unlock;
4690 
4691 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4692 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4693 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) ||
4694 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) {
4695 		status = MGMT_STATUS_BUSY;
4696 		goto unlock;
4697 	}
4698 
4699 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
4700 	if (!cmd) {
4701 		status = MGMT_STATUS_NO_RESOURCES;
4702 		goto unlock;
4703 	}
4704 
4705 	cmd->user_data = m;
4706 	pending = hci_add_adv_monitor(hdev, m, &err);
4707 	if (err) {
4708 		if (err == -ENOSPC || err == -ENOMEM)
4709 			status = MGMT_STATUS_NO_RESOURCES;
4710 		else if (err == -EINVAL)
4711 			status = MGMT_STATUS_INVALID_PARAMS;
4712 		else
4713 			status = MGMT_STATUS_FAILED;
4714 
4715 		mgmt_pending_remove(cmd);
4716 		goto unlock;
4717 	}
4718 
4719 	if (!pending) {
4720 		mgmt_pending_remove(cmd);
4721 		rp.monitor_handle = cpu_to_le16(m->handle);
4722 		mgmt_adv_monitor_added(sk, hdev, m->handle);
4723 		m->state = ADV_MONITOR_STATE_REGISTERED;
4724 		hdev->adv_monitors_cnt++;
4725 
4726 		hci_dev_unlock(hdev);
4727 		return mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_SUCCESS,
4728 					 &rp, sizeof(rp));
4729 	}
4730 
4731 	hci_dev_unlock(hdev);
4732 
4733 	return 0;
4734 
4735 unlock:
4736 	hci_free_adv_monitor(hdev, m);
4737 	hci_dev_unlock(hdev);
4738 	return mgmt_cmd_status(sk, hdev->id, op, status);
4739 }
4740 
4741 static void parse_adv_monitor_rssi(struct adv_monitor *m,
4742 				   struct mgmt_adv_rssi_thresholds *rssi)
4743 {
4744 	if (rssi) {
4745 		m->rssi.low_threshold = rssi->low_threshold;
4746 		m->rssi.low_threshold_timeout =
4747 		    __le16_to_cpu(rssi->low_threshold_timeout);
4748 		m->rssi.high_threshold = rssi->high_threshold;
4749 		m->rssi.high_threshold_timeout =
4750 		    __le16_to_cpu(rssi->high_threshold_timeout);
4751 		m->rssi.sampling_period = rssi->sampling_period;
4752 	} else {
4753 		/* Default values. These numbers are the least constricting
4754 		 * parameters for MSFT API to work, so it behaves as if there
4755 		 * are no rssi parameter to consider. May need to be changed
4756 		 * if other API are to be supported.
4757 		 */
4758 		m->rssi.low_threshold = -127;
4759 		m->rssi.low_threshold_timeout = 60;
4760 		m->rssi.high_threshold = -127;
4761 		m->rssi.high_threshold_timeout = 0;
4762 		m->rssi.sampling_period = 0;
4763 	}
4764 }
4765 
4766 static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count,
4767 				    struct mgmt_adv_pattern *patterns)
4768 {
4769 	u8 offset = 0, length = 0;
4770 	struct adv_pattern *p = NULL;
4771 	int i;
4772 
4773 	for (i = 0; i < pattern_count; i++) {
4774 		offset = patterns[i].offset;
4775 		length = patterns[i].length;
4776 		if (offset >= HCI_MAX_AD_LENGTH ||
4777 		    length > HCI_MAX_AD_LENGTH ||
4778 		    (offset + length) > HCI_MAX_AD_LENGTH)
4779 			return MGMT_STATUS_INVALID_PARAMS;
4780 
4781 		p = kmalloc(sizeof(*p), GFP_KERNEL);
4782 		if (!p)
4783 			return MGMT_STATUS_NO_RESOURCES;
4784 
4785 		p->ad_type = patterns[i].ad_type;
4786 		p->offset = patterns[i].offset;
4787 		p->length = patterns[i].length;
4788 		memcpy(p->value, patterns[i].value, p->length);
4789 
4790 		INIT_LIST_HEAD(&p->list);
4791 		list_add(&p->list, &m->patterns);
4792 	}
4793 
4794 	return MGMT_STATUS_SUCCESS;
4795 }
4796 
4797 static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev,
4798 				    void *data, u16 len)
4799 {
4800 	struct mgmt_cp_add_adv_patterns_monitor *cp = data;
4801 	struct adv_monitor *m = NULL;
4802 	u8 status = MGMT_STATUS_SUCCESS;
4803 	size_t expected_size = sizeof(*cp);
4804 
4805 	BT_DBG("request for %s", hdev->name);
4806 
4807 	if (len <= sizeof(*cp)) {
4808 		status = MGMT_STATUS_INVALID_PARAMS;
4809 		goto done;
4810 	}
4811 
4812 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4813 	if (len != expected_size) {
4814 		status = MGMT_STATUS_INVALID_PARAMS;
4815 		goto done;
4816 	}
4817 
4818 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4819 	if (!m) {
4820 		status = MGMT_STATUS_NO_RESOURCES;
4821 		goto done;
4822 	}
4823 
4824 	INIT_LIST_HEAD(&m->patterns);
4825 
4826 	parse_adv_monitor_rssi(m, NULL);
4827 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4828 
4829 done:
4830 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4831 					  MGMT_OP_ADD_ADV_PATTERNS_MONITOR);
4832 }
4833 
4834 static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev,
4835 					 void *data, u16 len)
4836 {
4837 	struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data;
4838 	struct adv_monitor *m = NULL;
4839 	u8 status = MGMT_STATUS_SUCCESS;
4840 	size_t expected_size = sizeof(*cp);
4841 
4842 	BT_DBG("request for %s", hdev->name);
4843 
4844 	if (len <= sizeof(*cp)) {
4845 		status = MGMT_STATUS_INVALID_PARAMS;
4846 		goto done;
4847 	}
4848 
4849 	expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern);
4850 	if (len != expected_size) {
4851 		status = MGMT_STATUS_INVALID_PARAMS;
4852 		goto done;
4853 	}
4854 
4855 	m = kzalloc(sizeof(*m), GFP_KERNEL);
4856 	if (!m) {
4857 		status = MGMT_STATUS_NO_RESOURCES;
4858 		goto done;
4859 	}
4860 
4861 	INIT_LIST_HEAD(&m->patterns);
4862 
4863 	parse_adv_monitor_rssi(m, &cp->rssi);
4864 	status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns);
4865 
4866 done:
4867 	return __add_adv_patterns_monitor(sk, hdev, m, status, data, len,
4868 					 MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI);
4869 }
4870 
4871 int mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, u8 status)
4872 {
4873 	struct mgmt_rp_remove_adv_monitor rp;
4874 	struct mgmt_cp_remove_adv_monitor *cp;
4875 	struct mgmt_pending_cmd *cmd;
4876 	int err = 0;
4877 
4878 	hci_dev_lock(hdev);
4879 
4880 	cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev);
4881 	if (!cmd)
4882 		goto done;
4883 
4884 	cp = cmd->param;
4885 	rp.monitor_handle = cp->monitor_handle;
4886 
4887 	if (!status)
4888 		hci_update_passive_scan(hdev);
4889 
4890 	err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
4891 				mgmt_status(status), &rp, sizeof(rp));
4892 	mgmt_pending_remove(cmd);
4893 
4894 done:
4895 	hci_dev_unlock(hdev);
4896 	bt_dev_dbg(hdev, "remove monitor %d complete, status %u",
4897 		   rp.monitor_handle, status);
4898 
4899 	return err;
4900 }
4901 
4902 static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev,
4903 			      void *data, u16 len)
4904 {
4905 	struct mgmt_cp_remove_adv_monitor *cp = data;
4906 	struct mgmt_rp_remove_adv_monitor rp;
4907 	struct mgmt_pending_cmd *cmd;
4908 	u16 handle = __le16_to_cpu(cp->monitor_handle);
4909 	int err, status;
4910 	bool pending;
4911 
4912 	BT_DBG("request for %s", hdev->name);
4913 	rp.monitor_handle = cp->monitor_handle;
4914 
4915 	hci_dev_lock(hdev);
4916 
4917 	if (pending_find(MGMT_OP_SET_LE, hdev) ||
4918 	    pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) ||
4919 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) ||
4920 	    pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) {
4921 		status = MGMT_STATUS_BUSY;
4922 		goto unlock;
4923 	}
4924 
4925 	cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len);
4926 	if (!cmd) {
4927 		status = MGMT_STATUS_NO_RESOURCES;
4928 		goto unlock;
4929 	}
4930 
4931 	if (handle)
4932 		pending = hci_remove_single_adv_monitor(hdev, handle, &err);
4933 	else
4934 		pending = hci_remove_all_adv_monitor(hdev, &err);
4935 
4936 	if (err) {
4937 		mgmt_pending_remove(cmd);
4938 
4939 		if (err == -ENOENT)
4940 			status = MGMT_STATUS_INVALID_INDEX;
4941 		else
4942 			status = MGMT_STATUS_FAILED;
4943 
4944 		goto unlock;
4945 	}
4946 
4947 	/* monitor can be removed without forwarding request to controller */
4948 	if (!pending) {
4949 		mgmt_pending_remove(cmd);
4950 		hci_dev_unlock(hdev);
4951 
4952 		return mgmt_cmd_complete(sk, hdev->id,
4953 					 MGMT_OP_REMOVE_ADV_MONITOR,
4954 					 MGMT_STATUS_SUCCESS,
4955 					 &rp, sizeof(rp));
4956 	}
4957 
4958 	hci_dev_unlock(hdev);
4959 	return 0;
4960 
4961 unlock:
4962 	hci_dev_unlock(hdev);
4963 	return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR,
4964 			       status);
4965 }
4966 
4967 static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err)
4968 {
4969 	struct mgmt_rp_read_local_oob_data mgmt_rp;
4970 	size_t rp_size = sizeof(mgmt_rp);
4971 	struct mgmt_pending_cmd *cmd = data;
4972 	struct sk_buff *skb = cmd->skb;
4973 	u8 status = mgmt_status(err);
4974 
4975 	if (!status) {
4976 		if (!skb)
4977 			status = MGMT_STATUS_FAILED;
4978 		else if (IS_ERR(skb))
4979 			status = mgmt_status(PTR_ERR(skb));
4980 		else
4981 			status = mgmt_status(skb->data[0]);
4982 	}
4983 
4984 	bt_dev_dbg(hdev, "status %d", status);
4985 
4986 	if (status) {
4987 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status);
4988 		goto remove;
4989 	}
4990 
4991 	memset(&mgmt_rp, 0, sizeof(mgmt_rp));
4992 
4993 	if (!bredr_sc_enabled(hdev)) {
4994 		struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
4995 
4996 		if (skb->len < sizeof(*rp)) {
4997 			mgmt_cmd_status(cmd->sk, hdev->id,
4998 					MGMT_OP_READ_LOCAL_OOB_DATA,
4999 					MGMT_STATUS_FAILED);
5000 			goto remove;
5001 		}
5002 
5003 		memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
5004 		memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
5005 
5006 		rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
5007 	} else {
5008 		struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
5009 
5010 		if (skb->len < sizeof(*rp)) {
5011 			mgmt_cmd_status(cmd->sk, hdev->id,
5012 					MGMT_OP_READ_LOCAL_OOB_DATA,
5013 					MGMT_STATUS_FAILED);
5014 			goto remove;
5015 		}
5016 
5017 		memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
5018 		memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
5019 
5020 		memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
5021 		memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
5022 	}
5023 
5024 	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5025 			  MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
5026 
5027 remove:
5028 	if (skb && !IS_ERR(skb))
5029 		kfree_skb(skb);
5030 
5031 	mgmt_pending_free(cmd);
5032 }
5033 
5034 static int read_local_oob_data_sync(struct hci_dev *hdev, void *data)
5035 {
5036 	struct mgmt_pending_cmd *cmd = data;
5037 
5038 	if (bredr_sc_enabled(hdev))
5039 		cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk);
5040 	else
5041 		cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk);
5042 
5043 	if (IS_ERR(cmd->skb))
5044 		return PTR_ERR(cmd->skb);
5045 	else
5046 		return 0;
5047 }
5048 
5049 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
5050 			       void *data, u16 data_len)
5051 {
5052 	struct mgmt_pending_cmd *cmd;
5053 	int err;
5054 
5055 	bt_dev_dbg(hdev, "sock %p", sk);
5056 
5057 	hci_dev_lock(hdev);
5058 
5059 	if (!hdev_is_powered(hdev)) {
5060 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5061 				      MGMT_STATUS_NOT_POWERED);
5062 		goto unlock;
5063 	}
5064 
5065 	if (!lmp_ssp_capable(hdev)) {
5066 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5067 				      MGMT_STATUS_NOT_SUPPORTED);
5068 		goto unlock;
5069 	}
5070 
5071 	cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
5072 	if (!cmd)
5073 		err = -ENOMEM;
5074 	else
5075 		err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
5076 					 read_local_oob_data_complete);
5077 
5078 	if (err < 0) {
5079 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
5080 				      MGMT_STATUS_FAILED);
5081 
5082 		if (cmd)
5083 			mgmt_pending_free(cmd);
5084 	}
5085 
5086 unlock:
5087 	hci_dev_unlock(hdev);
5088 	return err;
5089 }
5090 
5091 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5092 			       void *data, u16 len)
5093 {
5094 	struct mgmt_addr_info *addr = data;
5095 	int err;
5096 
5097 	bt_dev_dbg(hdev, "sock %p", sk);
5098 
5099 	if (!bdaddr_type_is_valid(addr->type))
5100 		return mgmt_cmd_complete(sk, hdev->id,
5101 					 MGMT_OP_ADD_REMOTE_OOB_DATA,
5102 					 MGMT_STATUS_INVALID_PARAMS,
5103 					 addr, sizeof(*addr));
5104 
5105 	hci_dev_lock(hdev);
5106 
5107 	if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
5108 		struct mgmt_cp_add_remote_oob_data *cp = data;
5109 		u8 status;
5110 
5111 		if (cp->addr.type != BDADDR_BREDR) {
5112 			err = mgmt_cmd_complete(sk, hdev->id,
5113 						MGMT_OP_ADD_REMOTE_OOB_DATA,
5114 						MGMT_STATUS_INVALID_PARAMS,
5115 						&cp->addr, sizeof(cp->addr));
5116 			goto unlock;
5117 		}
5118 
5119 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5120 					      cp->addr.type, cp->hash,
5121 					      cp->rand, NULL, NULL);
5122 		if (err < 0)
5123 			status = MGMT_STATUS_FAILED;
5124 		else
5125 			status = MGMT_STATUS_SUCCESS;
5126 
5127 		err = mgmt_cmd_complete(sk, hdev->id,
5128 					MGMT_OP_ADD_REMOTE_OOB_DATA, status,
5129 					&cp->addr, sizeof(cp->addr));
5130 	} else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
5131 		struct mgmt_cp_add_remote_oob_ext_data *cp = data;
5132 		u8 *rand192, *hash192, *rand256, *hash256;
5133 		u8 status;
5134 
5135 		if (bdaddr_type_is_le(cp->addr.type)) {
5136 			/* Enforce zero-valued 192-bit parameters as
5137 			 * long as legacy SMP OOB isn't implemented.
5138 			 */
5139 			if (memcmp(cp->rand192, ZERO_KEY, 16) ||
5140 			    memcmp(cp->hash192, ZERO_KEY, 16)) {
5141 				err = mgmt_cmd_complete(sk, hdev->id,
5142 							MGMT_OP_ADD_REMOTE_OOB_DATA,
5143 							MGMT_STATUS_INVALID_PARAMS,
5144 							addr, sizeof(*addr));
5145 				goto unlock;
5146 			}
5147 
5148 			rand192 = NULL;
5149 			hash192 = NULL;
5150 		} else {
5151 			/* In case one of the P-192 values is set to zero,
5152 			 * then just disable OOB data for P-192.
5153 			 */
5154 			if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
5155 			    !memcmp(cp->hash192, ZERO_KEY, 16)) {
5156 				rand192 = NULL;
5157 				hash192 = NULL;
5158 			} else {
5159 				rand192 = cp->rand192;
5160 				hash192 = cp->hash192;
5161 			}
5162 		}
5163 
5164 		/* In case one of the P-256 values is set to zero, then just
5165 		 * disable OOB data for P-256.
5166 		 */
5167 		if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
5168 		    !memcmp(cp->hash256, ZERO_KEY, 16)) {
5169 			rand256 = NULL;
5170 			hash256 = NULL;
5171 		} else {
5172 			rand256 = cp->rand256;
5173 			hash256 = cp->hash256;
5174 		}
5175 
5176 		err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
5177 					      cp->addr.type, hash192, rand192,
5178 					      hash256, rand256);
5179 		if (err < 0)
5180 			status = MGMT_STATUS_FAILED;
5181 		else
5182 			status = MGMT_STATUS_SUCCESS;
5183 
5184 		err = mgmt_cmd_complete(sk, hdev->id,
5185 					MGMT_OP_ADD_REMOTE_OOB_DATA,
5186 					status, &cp->addr, sizeof(cp->addr));
5187 	} else {
5188 		bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
5189 			   len);
5190 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
5191 				      MGMT_STATUS_INVALID_PARAMS);
5192 	}
5193 
5194 unlock:
5195 	hci_dev_unlock(hdev);
5196 	return err;
5197 }
5198 
5199 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
5200 				  void *data, u16 len)
5201 {
5202 	struct mgmt_cp_remove_remote_oob_data *cp = data;
5203 	u8 status;
5204 	int err;
5205 
5206 	bt_dev_dbg(hdev, "sock %p", sk);
5207 
5208 	if (cp->addr.type != BDADDR_BREDR)
5209 		return mgmt_cmd_complete(sk, hdev->id,
5210 					 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5211 					 MGMT_STATUS_INVALID_PARAMS,
5212 					 &cp->addr, sizeof(cp->addr));
5213 
5214 	hci_dev_lock(hdev);
5215 
5216 	if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5217 		hci_remote_oob_data_clear(hdev);
5218 		status = MGMT_STATUS_SUCCESS;
5219 		goto done;
5220 	}
5221 
5222 	err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
5223 	if (err < 0)
5224 		status = MGMT_STATUS_INVALID_PARAMS;
5225 	else
5226 		status = MGMT_STATUS_SUCCESS;
5227 
5228 done:
5229 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
5230 				status, &cp->addr, sizeof(cp->addr));
5231 
5232 	hci_dev_unlock(hdev);
5233 	return err;
5234 }
5235 
5236 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
5237 {
5238 	struct mgmt_pending_cmd *cmd;
5239 
5240 	bt_dev_dbg(hdev, "status %u", status);
5241 
5242 	hci_dev_lock(hdev);
5243 
5244 	cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
5245 	if (!cmd)
5246 		cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
5247 
5248 	if (!cmd)
5249 		cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
5250 
5251 	if (cmd) {
5252 		cmd->cmd_complete(cmd, mgmt_status(status));
5253 		mgmt_pending_remove(cmd);
5254 	}
5255 
5256 	hci_dev_unlock(hdev);
5257 }
5258 
5259 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
5260 				    uint8_t *mgmt_status)
5261 {
5262 	switch (type) {
5263 	case DISCOV_TYPE_LE:
5264 		*mgmt_status = mgmt_le_support(hdev);
5265 		if (*mgmt_status)
5266 			return false;
5267 		break;
5268 	case DISCOV_TYPE_INTERLEAVED:
5269 		*mgmt_status = mgmt_le_support(hdev);
5270 		if (*mgmt_status)
5271 			return false;
5272 		fallthrough;
5273 	case DISCOV_TYPE_BREDR:
5274 		*mgmt_status = mgmt_bredr_support(hdev);
5275 		if (*mgmt_status)
5276 			return false;
5277 		break;
5278 	default:
5279 		*mgmt_status = MGMT_STATUS_INVALID_PARAMS;
5280 		return false;
5281 	}
5282 
5283 	return true;
5284 }
5285 
5286 static void start_discovery_complete(struct hci_dev *hdev, void *data, int err)
5287 {
5288 	struct mgmt_pending_cmd *cmd = data;
5289 
5290 	if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) &&
5291 	    cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) &&
5292 	    cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev))
5293 		return;
5294 
5295 	bt_dev_dbg(hdev, "err %d", err);
5296 
5297 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5298 			  cmd->param, 1);
5299 	mgmt_pending_remove(cmd);
5300 
5301 	hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED:
5302 				DISCOVERY_FINDING);
5303 }
5304 
5305 static int start_discovery_sync(struct hci_dev *hdev, void *data)
5306 {
5307 	return hci_start_discovery_sync(hdev);
5308 }
5309 
5310 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
5311 				    u16 op, void *data, u16 len)
5312 {
5313 	struct mgmt_cp_start_discovery *cp = data;
5314 	struct mgmt_pending_cmd *cmd;
5315 	u8 status;
5316 	int err;
5317 
5318 	bt_dev_dbg(hdev, "sock %p", sk);
5319 
5320 	hci_dev_lock(hdev);
5321 
5322 	if (!hdev_is_powered(hdev)) {
5323 		err = mgmt_cmd_complete(sk, hdev->id, op,
5324 					MGMT_STATUS_NOT_POWERED,
5325 					&cp->type, sizeof(cp->type));
5326 		goto failed;
5327 	}
5328 
5329 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5330 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5331 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5332 					&cp->type, sizeof(cp->type));
5333 		goto failed;
5334 	}
5335 
5336 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5337 		err = mgmt_cmd_complete(sk, hdev->id, op, status,
5338 					&cp->type, sizeof(cp->type));
5339 		goto failed;
5340 	}
5341 
5342 	/* Can't start discovery when it is paused */
5343 	if (hdev->discovery_paused) {
5344 		err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
5345 					&cp->type, sizeof(cp->type));
5346 		goto failed;
5347 	}
5348 
5349 	/* Clear the discovery filter first to free any previously
5350 	 * allocated memory for the UUID list.
5351 	 */
5352 	hci_discovery_filter_clear(hdev);
5353 
5354 	hdev->discovery.type = cp->type;
5355 	hdev->discovery.report_invalid_rssi = false;
5356 	if (op == MGMT_OP_START_LIMITED_DISCOVERY)
5357 		hdev->discovery.limited = true;
5358 	else
5359 		hdev->discovery.limited = false;
5360 
5361 	cmd = mgmt_pending_add(sk, op, hdev, data, len);
5362 	if (!cmd) {
5363 		err = -ENOMEM;
5364 		goto failed;
5365 	}
5366 
5367 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5368 				 start_discovery_complete);
5369 	if (err < 0) {
5370 		mgmt_pending_remove(cmd);
5371 		goto failed;
5372 	}
5373 
5374 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5375 
5376 failed:
5377 	hci_dev_unlock(hdev);
5378 	return err;
5379 }
5380 
5381 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
5382 			   void *data, u16 len)
5383 {
5384 	return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
5385 					data, len);
5386 }
5387 
5388 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
5389 				   void *data, u16 len)
5390 {
5391 	return start_discovery_internal(sk, hdev,
5392 					MGMT_OP_START_LIMITED_DISCOVERY,
5393 					data, len);
5394 }
5395 
5396 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
5397 				   void *data, u16 len)
5398 {
5399 	struct mgmt_cp_start_service_discovery *cp = data;
5400 	struct mgmt_pending_cmd *cmd;
5401 	const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
5402 	u16 uuid_count, expected_len;
5403 	u8 status;
5404 	int err;
5405 
5406 	bt_dev_dbg(hdev, "sock %p", sk);
5407 
5408 	hci_dev_lock(hdev);
5409 
5410 	if (!hdev_is_powered(hdev)) {
5411 		err = mgmt_cmd_complete(sk, hdev->id,
5412 					MGMT_OP_START_SERVICE_DISCOVERY,
5413 					MGMT_STATUS_NOT_POWERED,
5414 					&cp->type, sizeof(cp->type));
5415 		goto failed;
5416 	}
5417 
5418 	if (hdev->discovery.state != DISCOVERY_STOPPED ||
5419 	    hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
5420 		err = mgmt_cmd_complete(sk, hdev->id,
5421 					MGMT_OP_START_SERVICE_DISCOVERY,
5422 					MGMT_STATUS_BUSY, &cp->type,
5423 					sizeof(cp->type));
5424 		goto failed;
5425 	}
5426 
5427 	if (hdev->discovery_paused) {
5428 		err = mgmt_cmd_complete(sk, hdev->id,
5429 					MGMT_OP_START_SERVICE_DISCOVERY,
5430 					MGMT_STATUS_BUSY, &cp->type,
5431 					sizeof(cp->type));
5432 		goto failed;
5433 	}
5434 
5435 	uuid_count = __le16_to_cpu(cp->uuid_count);
5436 	if (uuid_count > max_uuid_count) {
5437 		bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
5438 			   uuid_count);
5439 		err = mgmt_cmd_complete(sk, hdev->id,
5440 					MGMT_OP_START_SERVICE_DISCOVERY,
5441 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5442 					sizeof(cp->type));
5443 		goto failed;
5444 	}
5445 
5446 	expected_len = sizeof(*cp) + uuid_count * 16;
5447 	if (expected_len != len) {
5448 		bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
5449 			   expected_len, len);
5450 		err = mgmt_cmd_complete(sk, hdev->id,
5451 					MGMT_OP_START_SERVICE_DISCOVERY,
5452 					MGMT_STATUS_INVALID_PARAMS, &cp->type,
5453 					sizeof(cp->type));
5454 		goto failed;
5455 	}
5456 
5457 	if (!discovery_type_is_valid(hdev, cp->type, &status)) {
5458 		err = mgmt_cmd_complete(sk, hdev->id,
5459 					MGMT_OP_START_SERVICE_DISCOVERY,
5460 					status, &cp->type, sizeof(cp->type));
5461 		goto failed;
5462 	}
5463 
5464 	cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
5465 			       hdev, data, len);
5466 	if (!cmd) {
5467 		err = -ENOMEM;
5468 		goto failed;
5469 	}
5470 
5471 	/* Clear the discovery filter first to free any previously
5472 	 * allocated memory for the UUID list.
5473 	 */
5474 	hci_discovery_filter_clear(hdev);
5475 
5476 	hdev->discovery.result_filtering = true;
5477 	hdev->discovery.type = cp->type;
5478 	hdev->discovery.rssi = cp->rssi;
5479 	hdev->discovery.uuid_count = uuid_count;
5480 
5481 	if (uuid_count > 0) {
5482 		hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
5483 						GFP_KERNEL);
5484 		if (!hdev->discovery.uuids) {
5485 			err = mgmt_cmd_complete(sk, hdev->id,
5486 						MGMT_OP_START_SERVICE_DISCOVERY,
5487 						MGMT_STATUS_FAILED,
5488 						&cp->type, sizeof(cp->type));
5489 			mgmt_pending_remove(cmd);
5490 			goto failed;
5491 		}
5492 	}
5493 
5494 	err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd,
5495 				 start_discovery_complete);
5496 	if (err < 0) {
5497 		mgmt_pending_remove(cmd);
5498 		goto failed;
5499 	}
5500 
5501 	hci_discovery_set_state(hdev, DISCOVERY_STARTING);
5502 
5503 failed:
5504 	hci_dev_unlock(hdev);
5505 	return err;
5506 }
5507 
5508 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
5509 {
5510 	struct mgmt_pending_cmd *cmd;
5511 
5512 	bt_dev_dbg(hdev, "status %u", status);
5513 
5514 	hci_dev_lock(hdev);
5515 
5516 	cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
5517 	if (cmd) {
5518 		cmd->cmd_complete(cmd, mgmt_status(status));
5519 		mgmt_pending_remove(cmd);
5520 	}
5521 
5522 	hci_dev_unlock(hdev);
5523 }
5524 
5525 static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err)
5526 {
5527 	struct mgmt_pending_cmd *cmd = data;
5528 
5529 	if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev))
5530 		return;
5531 
5532 	bt_dev_dbg(hdev, "err %d", err);
5533 
5534 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err),
5535 			  cmd->param, 1);
5536 	mgmt_pending_remove(cmd);
5537 
5538 	if (!err)
5539 		hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
5540 }
5541 
5542 static int stop_discovery_sync(struct hci_dev *hdev, void *data)
5543 {
5544 	return hci_stop_discovery_sync(hdev);
5545 }
5546 
5547 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
5548 			  u16 len)
5549 {
5550 	struct mgmt_cp_stop_discovery *mgmt_cp = data;
5551 	struct mgmt_pending_cmd *cmd;
5552 	int err;
5553 
5554 	bt_dev_dbg(hdev, "sock %p", sk);
5555 
5556 	hci_dev_lock(hdev);
5557 
5558 	if (!hci_discovery_active(hdev)) {
5559 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5560 					MGMT_STATUS_REJECTED, &mgmt_cp->type,
5561 					sizeof(mgmt_cp->type));
5562 		goto unlock;
5563 	}
5564 
5565 	if (hdev->discovery.type != mgmt_cp->type) {
5566 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
5567 					MGMT_STATUS_INVALID_PARAMS,
5568 					&mgmt_cp->type, sizeof(mgmt_cp->type));
5569 		goto unlock;
5570 	}
5571 
5572 	cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
5573 	if (!cmd) {
5574 		err = -ENOMEM;
5575 		goto unlock;
5576 	}
5577 
5578 	err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd,
5579 				 stop_discovery_complete);
5580 	if (err < 0) {
5581 		mgmt_pending_remove(cmd);
5582 		goto unlock;
5583 	}
5584 
5585 	hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
5586 
5587 unlock:
5588 	hci_dev_unlock(hdev);
5589 	return err;
5590 }
5591 
5592 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
5593 			u16 len)
5594 {
5595 	struct mgmt_cp_confirm_name *cp = data;
5596 	struct inquiry_entry *e;
5597 	int err;
5598 
5599 	bt_dev_dbg(hdev, "sock %p", sk);
5600 
5601 	hci_dev_lock(hdev);
5602 
5603 	if (!hci_discovery_active(hdev)) {
5604 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5605 					MGMT_STATUS_FAILED, &cp->addr,
5606 					sizeof(cp->addr));
5607 		goto failed;
5608 	}
5609 
5610 	e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
5611 	if (!e) {
5612 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
5613 					MGMT_STATUS_INVALID_PARAMS, &cp->addr,
5614 					sizeof(cp->addr));
5615 		goto failed;
5616 	}
5617 
5618 	if (cp->name_known) {
5619 		e->name_state = NAME_KNOWN;
5620 		list_del(&e->list);
5621 	} else {
5622 		e->name_state = NAME_NEEDED;
5623 		hci_inquiry_cache_update_resolve(hdev, e);
5624 	}
5625 
5626 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
5627 				&cp->addr, sizeof(cp->addr));
5628 
5629 failed:
5630 	hci_dev_unlock(hdev);
5631 	return err;
5632 }
5633 
5634 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
5635 			u16 len)
5636 {
5637 	struct mgmt_cp_block_device *cp = data;
5638 	u8 status;
5639 	int err;
5640 
5641 	bt_dev_dbg(hdev, "sock %p", sk);
5642 
5643 	if (!bdaddr_type_is_valid(cp->addr.type))
5644 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
5645 					 MGMT_STATUS_INVALID_PARAMS,
5646 					 &cp->addr, sizeof(cp->addr));
5647 
5648 	hci_dev_lock(hdev);
5649 
5650 	err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr,
5651 				  cp->addr.type);
5652 	if (err < 0) {
5653 		status = MGMT_STATUS_FAILED;
5654 		goto done;
5655 	}
5656 
5657 	mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5658 		   sk);
5659 	status = MGMT_STATUS_SUCCESS;
5660 
5661 done:
5662 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
5663 				&cp->addr, sizeof(cp->addr));
5664 
5665 	hci_dev_unlock(hdev);
5666 
5667 	return err;
5668 }
5669 
5670 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
5671 			  u16 len)
5672 {
5673 	struct mgmt_cp_unblock_device *cp = data;
5674 	u8 status;
5675 	int err;
5676 
5677 	bt_dev_dbg(hdev, "sock %p", sk);
5678 
5679 	if (!bdaddr_type_is_valid(cp->addr.type))
5680 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
5681 					 MGMT_STATUS_INVALID_PARAMS,
5682 					 &cp->addr, sizeof(cp->addr));
5683 
5684 	hci_dev_lock(hdev);
5685 
5686 	err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr,
5687 				  cp->addr.type);
5688 	if (err < 0) {
5689 		status = MGMT_STATUS_INVALID_PARAMS;
5690 		goto done;
5691 	}
5692 
5693 	mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
5694 		   sk);
5695 	status = MGMT_STATUS_SUCCESS;
5696 
5697 done:
5698 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
5699 				&cp->addr, sizeof(cp->addr));
5700 
5701 	hci_dev_unlock(hdev);
5702 
5703 	return err;
5704 }
5705 
5706 static int set_device_id_sync(struct hci_dev *hdev, void *data)
5707 {
5708 	return hci_update_eir_sync(hdev);
5709 }
5710 
5711 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
5712 			 u16 len)
5713 {
5714 	struct mgmt_cp_set_device_id *cp = data;
5715 	int err;
5716 	__u16 source;
5717 
5718 	bt_dev_dbg(hdev, "sock %p", sk);
5719 
5720 	source = __le16_to_cpu(cp->source);
5721 
5722 	if (source > 0x0002)
5723 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
5724 				       MGMT_STATUS_INVALID_PARAMS);
5725 
5726 	hci_dev_lock(hdev);
5727 
5728 	hdev->devid_source = source;
5729 	hdev->devid_vendor = __le16_to_cpu(cp->vendor);
5730 	hdev->devid_product = __le16_to_cpu(cp->product);
5731 	hdev->devid_version = __le16_to_cpu(cp->version);
5732 
5733 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
5734 				NULL, 0);
5735 
5736 	hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL);
5737 
5738 	hci_dev_unlock(hdev);
5739 
5740 	return err;
5741 }
5742 
5743 static void enable_advertising_instance(struct hci_dev *hdev, int err)
5744 {
5745 	if (err)
5746 		bt_dev_err(hdev, "failed to re-configure advertising %d", err);
5747 	else
5748 		bt_dev_dbg(hdev, "status %d", err);
5749 }
5750 
5751 static void set_advertising_complete(struct hci_dev *hdev, void *data, int err)
5752 {
5753 	struct cmd_lookup match = { NULL, hdev };
5754 	u8 instance;
5755 	struct adv_info *adv_instance;
5756 	u8 status = mgmt_status(err);
5757 
5758 	if (status) {
5759 		mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
5760 				     cmd_status_rsp, &status);
5761 		return;
5762 	}
5763 
5764 	if (hci_dev_test_flag(hdev, HCI_LE_ADV))
5765 		hci_dev_set_flag(hdev, HCI_ADVERTISING);
5766 	else
5767 		hci_dev_clear_flag(hdev, HCI_ADVERTISING);
5768 
5769 	mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
5770 			     &match);
5771 
5772 	new_settings(hdev, match.sk);
5773 
5774 	if (match.sk)
5775 		sock_put(match.sk);
5776 
5777 	/* If "Set Advertising" was just disabled and instance advertising was
5778 	 * set up earlier, then re-enable multi-instance advertising.
5779 	 */
5780 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
5781 	    list_empty(&hdev->adv_instances))
5782 		return;
5783 
5784 	instance = hdev->cur_adv_instance;
5785 	if (!instance) {
5786 		adv_instance = list_first_entry_or_null(&hdev->adv_instances,
5787 							struct adv_info, list);
5788 		if (!adv_instance)
5789 			return;
5790 
5791 		instance = adv_instance->instance;
5792 	}
5793 
5794 	err = hci_schedule_adv_instance_sync(hdev, instance, true);
5795 
5796 	enable_advertising_instance(hdev, err);
5797 }
5798 
5799 static int set_adv_sync(struct hci_dev *hdev, void *data)
5800 {
5801 	struct mgmt_pending_cmd *cmd = data;
5802 	struct mgmt_mode *cp = cmd->param;
5803 	u8 val = !!cp->val;
5804 
5805 	if (cp->val == 0x02)
5806 		hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5807 	else
5808 		hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5809 
5810 	cancel_adv_timeout(hdev);
5811 
5812 	if (val) {
5813 		/* Switch to instance "0" for the Set Advertising setting.
5814 		 * We cannot use update_[adv|scan_rsp]_data() here as the
5815 		 * HCI_ADVERTISING flag is not yet set.
5816 		 */
5817 		hdev->cur_adv_instance = 0x00;
5818 
5819 		if (ext_adv_capable(hdev)) {
5820 			hci_start_ext_adv_sync(hdev, 0x00);
5821 		} else {
5822 			hci_update_adv_data_sync(hdev, 0x00);
5823 			hci_update_scan_rsp_data_sync(hdev, 0x00);
5824 			hci_enable_advertising_sync(hdev);
5825 		}
5826 	} else {
5827 		hci_disable_advertising_sync(hdev);
5828 	}
5829 
5830 	return 0;
5831 }
5832 
5833 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
5834 			   u16 len)
5835 {
5836 	struct mgmt_mode *cp = data;
5837 	struct mgmt_pending_cmd *cmd;
5838 	u8 val, status;
5839 	int err;
5840 
5841 	bt_dev_dbg(hdev, "sock %p", sk);
5842 
5843 	status = mgmt_le_support(hdev);
5844 	if (status)
5845 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5846 				       status);
5847 
5848 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
5849 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5850 				       MGMT_STATUS_INVALID_PARAMS);
5851 
5852 	if (hdev->advertising_paused)
5853 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5854 				       MGMT_STATUS_BUSY);
5855 
5856 	hci_dev_lock(hdev);
5857 
5858 	val = !!cp->val;
5859 
5860 	/* The following conditions are ones which mean that we should
5861 	 * not do any HCI communication but directly send a mgmt
5862 	 * response to user space (after toggling the flag if
5863 	 * necessary).
5864 	 */
5865 	if (!hdev_is_powered(hdev) ||
5866 	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
5867 	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
5868 	    hci_conn_num(hdev, LE_LINK) > 0 ||
5869 	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
5870 	     hdev->le_scan_type == LE_SCAN_ACTIVE)) {
5871 		bool changed;
5872 
5873 		if (cp->val) {
5874 			hdev->cur_adv_instance = 0x00;
5875 			changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
5876 			if (cp->val == 0x02)
5877 				hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5878 			else
5879 				hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5880 		} else {
5881 			changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
5882 			hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
5883 		}
5884 
5885 		err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
5886 		if (err < 0)
5887 			goto unlock;
5888 
5889 		if (changed)
5890 			err = new_settings(hdev, sk);
5891 
5892 		goto unlock;
5893 	}
5894 
5895 	if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
5896 	    pending_find(MGMT_OP_SET_LE, hdev)) {
5897 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
5898 				      MGMT_STATUS_BUSY);
5899 		goto unlock;
5900 	}
5901 
5902 	cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
5903 	if (!cmd)
5904 		err = -ENOMEM;
5905 	else
5906 		err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd,
5907 					 set_advertising_complete);
5908 
5909 	if (err < 0 && cmd)
5910 		mgmt_pending_remove(cmd);
5911 
5912 unlock:
5913 	hci_dev_unlock(hdev);
5914 	return err;
5915 }
5916 
5917 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
5918 			      void *data, u16 len)
5919 {
5920 	struct mgmt_cp_set_static_address *cp = data;
5921 	int err;
5922 
5923 	bt_dev_dbg(hdev, "sock %p", sk);
5924 
5925 	if (!lmp_le_capable(hdev))
5926 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5927 				       MGMT_STATUS_NOT_SUPPORTED);
5928 
5929 	if (hdev_is_powered(hdev))
5930 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
5931 				       MGMT_STATUS_REJECTED);
5932 
5933 	if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
5934 		if (!bacmp(&cp->bdaddr, BDADDR_NONE))
5935 			return mgmt_cmd_status(sk, hdev->id,
5936 					       MGMT_OP_SET_STATIC_ADDRESS,
5937 					       MGMT_STATUS_INVALID_PARAMS);
5938 
5939 		/* Two most significant bits shall be set */
5940 		if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
5941 			return mgmt_cmd_status(sk, hdev->id,
5942 					       MGMT_OP_SET_STATIC_ADDRESS,
5943 					       MGMT_STATUS_INVALID_PARAMS);
5944 	}
5945 
5946 	hci_dev_lock(hdev);
5947 
5948 	bacpy(&hdev->static_addr, &cp->bdaddr);
5949 
5950 	err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
5951 	if (err < 0)
5952 		goto unlock;
5953 
5954 	err = new_settings(hdev, sk);
5955 
5956 unlock:
5957 	hci_dev_unlock(hdev);
5958 	return err;
5959 }
5960 
5961 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
5962 			   void *data, u16 len)
5963 {
5964 	struct mgmt_cp_set_scan_params *cp = data;
5965 	__u16 interval, window;
5966 	int err;
5967 
5968 	bt_dev_dbg(hdev, "sock %p", sk);
5969 
5970 	if (!lmp_le_capable(hdev))
5971 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5972 				       MGMT_STATUS_NOT_SUPPORTED);
5973 
5974 	interval = __le16_to_cpu(cp->interval);
5975 
5976 	if (interval < 0x0004 || interval > 0x4000)
5977 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5978 				       MGMT_STATUS_INVALID_PARAMS);
5979 
5980 	window = __le16_to_cpu(cp->window);
5981 
5982 	if (window < 0x0004 || window > 0x4000)
5983 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5984 				       MGMT_STATUS_INVALID_PARAMS);
5985 
5986 	if (window > interval)
5987 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
5988 				       MGMT_STATUS_INVALID_PARAMS);
5989 
5990 	hci_dev_lock(hdev);
5991 
5992 	hdev->le_scan_interval = interval;
5993 	hdev->le_scan_window = window;
5994 
5995 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
5996 				NULL, 0);
5997 
5998 	/* If background scan is running, restart it so new parameters are
5999 	 * loaded.
6000 	 */
6001 	if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
6002 	    hdev->discovery.state == DISCOVERY_STOPPED)
6003 		hci_update_passive_scan(hdev);
6004 
6005 	hci_dev_unlock(hdev);
6006 
6007 	return err;
6008 }
6009 
6010 static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err)
6011 {
6012 	struct mgmt_pending_cmd *cmd = data;
6013 
6014 	bt_dev_dbg(hdev, "err %d", err);
6015 
6016 	if (err) {
6017 		mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6018 				mgmt_status(err));
6019 	} else {
6020 		struct mgmt_mode *cp = cmd->param;
6021 
6022 		if (cp->val)
6023 			hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
6024 		else
6025 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6026 
6027 		send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6028 		new_settings(hdev, cmd->sk);
6029 	}
6030 
6031 	mgmt_pending_free(cmd);
6032 }
6033 
6034 static int write_fast_connectable_sync(struct hci_dev *hdev, void *data)
6035 {
6036 	struct mgmt_pending_cmd *cmd = data;
6037 	struct mgmt_mode *cp = cmd->param;
6038 
6039 	return hci_write_fast_connectable_sync(hdev, cp->val);
6040 }
6041 
6042 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
6043 				void *data, u16 len)
6044 {
6045 	struct mgmt_mode *cp = data;
6046 	struct mgmt_pending_cmd *cmd;
6047 	int err;
6048 
6049 	bt_dev_dbg(hdev, "sock %p", sk);
6050 
6051 	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
6052 	    hdev->hci_ver < BLUETOOTH_VER_1_2)
6053 		return mgmt_cmd_status(sk, hdev->id,
6054 				       MGMT_OP_SET_FAST_CONNECTABLE,
6055 				       MGMT_STATUS_NOT_SUPPORTED);
6056 
6057 	if (cp->val != 0x00 && cp->val != 0x01)
6058 		return mgmt_cmd_status(sk, hdev->id,
6059 				       MGMT_OP_SET_FAST_CONNECTABLE,
6060 				       MGMT_STATUS_INVALID_PARAMS);
6061 
6062 	hci_dev_lock(hdev);
6063 
6064 	if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
6065 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6066 		goto unlock;
6067 	}
6068 
6069 	if (!hdev_is_powered(hdev)) {
6070 		hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
6071 		err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
6072 		new_settings(hdev, sk);
6073 		goto unlock;
6074 	}
6075 
6076 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data,
6077 			       len);
6078 	if (!cmd)
6079 		err = -ENOMEM;
6080 	else
6081 		err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd,
6082 					 fast_connectable_complete);
6083 
6084 	if (err < 0) {
6085 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
6086 				MGMT_STATUS_FAILED);
6087 
6088 		if (cmd)
6089 			mgmt_pending_free(cmd);
6090 	}
6091 
6092 unlock:
6093 	hci_dev_unlock(hdev);
6094 
6095 	return err;
6096 }
6097 
6098 static void set_bredr_complete(struct hci_dev *hdev, void *data, int err)
6099 {
6100 	struct mgmt_pending_cmd *cmd = data;
6101 
6102 	bt_dev_dbg(hdev, "err %d", err);
6103 
6104 	if (err) {
6105 		u8 mgmt_err = mgmt_status(err);
6106 
6107 		/* We need to restore the flag if related HCI commands
6108 		 * failed.
6109 		 */
6110 		hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
6111 
6112 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6113 	} else {
6114 		send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
6115 		new_settings(hdev, cmd->sk);
6116 	}
6117 
6118 	mgmt_pending_free(cmd);
6119 }
6120 
6121 static int set_bredr_sync(struct hci_dev *hdev, void *data)
6122 {
6123 	int status;
6124 
6125 	status = hci_write_fast_connectable_sync(hdev, false);
6126 
6127 	if (!status)
6128 		status = hci_update_scan_sync(hdev);
6129 
6130 	/* Since only the advertising data flags will change, there
6131 	 * is no need to update the scan response data.
6132 	 */
6133 	if (!status)
6134 		status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance);
6135 
6136 	return status;
6137 }
6138 
6139 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
6140 {
6141 	struct mgmt_mode *cp = data;
6142 	struct mgmt_pending_cmd *cmd;
6143 	int err;
6144 
6145 	bt_dev_dbg(hdev, "sock %p", sk);
6146 
6147 	if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
6148 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6149 				       MGMT_STATUS_NOT_SUPPORTED);
6150 
6151 	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6152 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6153 				       MGMT_STATUS_REJECTED);
6154 
6155 	if (cp->val != 0x00 && cp->val != 0x01)
6156 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6157 				       MGMT_STATUS_INVALID_PARAMS);
6158 
6159 	hci_dev_lock(hdev);
6160 
6161 	if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6162 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6163 		goto unlock;
6164 	}
6165 
6166 	if (!hdev_is_powered(hdev)) {
6167 		if (!cp->val) {
6168 			hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
6169 			hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
6170 			hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
6171 			hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
6172 			hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
6173 		}
6174 
6175 		hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
6176 
6177 		err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
6178 		if (err < 0)
6179 			goto unlock;
6180 
6181 		err = new_settings(hdev, sk);
6182 		goto unlock;
6183 	}
6184 
6185 	/* Reject disabling when powered on */
6186 	if (!cp->val) {
6187 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6188 				      MGMT_STATUS_REJECTED);
6189 		goto unlock;
6190 	} else {
6191 		/* When configuring a dual-mode controller to operate
6192 		 * with LE only and using a static address, then switching
6193 		 * BR/EDR back on is not allowed.
6194 		 *
6195 		 * Dual-mode controllers shall operate with the public
6196 		 * address as its identity address for BR/EDR and LE. So
6197 		 * reject the attempt to create an invalid configuration.
6198 		 *
6199 		 * The same restrictions applies when secure connections
6200 		 * has been enabled. For BR/EDR this is a controller feature
6201 		 * while for LE it is a host stack feature. This means that
6202 		 * switching BR/EDR back on when secure connections has been
6203 		 * enabled is not a supported transaction.
6204 		 */
6205 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6206 		    (bacmp(&hdev->static_addr, BDADDR_ANY) ||
6207 		     hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
6208 			err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6209 					      MGMT_STATUS_REJECTED);
6210 			goto unlock;
6211 		}
6212 	}
6213 
6214 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len);
6215 	if (!cmd)
6216 		err = -ENOMEM;
6217 	else
6218 		err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd,
6219 					 set_bredr_complete);
6220 
6221 	if (err < 0) {
6222 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
6223 				MGMT_STATUS_FAILED);
6224 		if (cmd)
6225 			mgmt_pending_free(cmd);
6226 
6227 		goto unlock;
6228 	}
6229 
6230 	/* We need to flip the bit already here so that
6231 	 * hci_req_update_adv_data generates the correct flags.
6232 	 */
6233 	hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
6234 
6235 unlock:
6236 	hci_dev_unlock(hdev);
6237 	return err;
6238 }
6239 
6240 static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err)
6241 {
6242 	struct mgmt_pending_cmd *cmd = data;
6243 	struct mgmt_mode *cp;
6244 
6245 	bt_dev_dbg(hdev, "err %d", err);
6246 
6247 	if (err) {
6248 		u8 mgmt_err = mgmt_status(err);
6249 
6250 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
6251 		goto done;
6252 	}
6253 
6254 	cp = cmd->param;
6255 
6256 	switch (cp->val) {
6257 	case 0x00:
6258 		hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
6259 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6260 		break;
6261 	case 0x01:
6262 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6263 		hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6264 		break;
6265 	case 0x02:
6266 		hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6267 		hci_dev_set_flag(hdev, HCI_SC_ONLY);
6268 		break;
6269 	}
6270 
6271 	send_settings_rsp(cmd->sk, cmd->opcode, hdev);
6272 	new_settings(hdev, cmd->sk);
6273 
6274 done:
6275 	mgmt_pending_free(cmd);
6276 }
6277 
6278 static int set_secure_conn_sync(struct hci_dev *hdev, void *data)
6279 {
6280 	struct mgmt_pending_cmd *cmd = data;
6281 	struct mgmt_mode *cp = cmd->param;
6282 	u8 val = !!cp->val;
6283 
6284 	/* Force write of val */
6285 	hci_dev_set_flag(hdev, HCI_SC_ENABLED);
6286 
6287 	return hci_write_sc_support_sync(hdev, val);
6288 }
6289 
6290 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
6291 			   void *data, u16 len)
6292 {
6293 	struct mgmt_mode *cp = data;
6294 	struct mgmt_pending_cmd *cmd;
6295 	u8 val;
6296 	int err;
6297 
6298 	bt_dev_dbg(hdev, "sock %p", sk);
6299 
6300 	if (!lmp_sc_capable(hdev) &&
6301 	    !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
6302 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6303 				       MGMT_STATUS_NOT_SUPPORTED);
6304 
6305 	if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6306 	    lmp_sc_capable(hdev) &&
6307 	    !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
6308 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6309 				       MGMT_STATUS_REJECTED);
6310 
6311 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6312 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6313 				       MGMT_STATUS_INVALID_PARAMS);
6314 
6315 	hci_dev_lock(hdev);
6316 
6317 	if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
6318 	    !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
6319 		bool changed;
6320 
6321 		if (cp->val) {
6322 			changed = !hci_dev_test_and_set_flag(hdev,
6323 							     HCI_SC_ENABLED);
6324 			if (cp->val == 0x02)
6325 				hci_dev_set_flag(hdev, HCI_SC_ONLY);
6326 			else
6327 				hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6328 		} else {
6329 			changed = hci_dev_test_and_clear_flag(hdev,
6330 							      HCI_SC_ENABLED);
6331 			hci_dev_clear_flag(hdev, HCI_SC_ONLY);
6332 		}
6333 
6334 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6335 		if (err < 0)
6336 			goto failed;
6337 
6338 		if (changed)
6339 			err = new_settings(hdev, sk);
6340 
6341 		goto failed;
6342 	}
6343 
6344 	val = !!cp->val;
6345 
6346 	if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6347 	    (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6348 		err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
6349 		goto failed;
6350 	}
6351 
6352 	cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
6353 	if (!cmd)
6354 		err = -ENOMEM;
6355 	else
6356 		err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd,
6357 					 set_secure_conn_complete);
6358 
6359 	if (err < 0) {
6360 		mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
6361 				MGMT_STATUS_FAILED);
6362 		if (cmd)
6363 			mgmt_pending_free(cmd);
6364 	}
6365 
6366 failed:
6367 	hci_dev_unlock(hdev);
6368 	return err;
6369 }
6370 
6371 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
6372 			  void *data, u16 len)
6373 {
6374 	struct mgmt_mode *cp = data;
6375 	bool changed, use_changed;
6376 	int err;
6377 
6378 	bt_dev_dbg(hdev, "sock %p", sk);
6379 
6380 	if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
6381 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
6382 				       MGMT_STATUS_INVALID_PARAMS);
6383 
6384 	hci_dev_lock(hdev);
6385 
6386 	if (cp->val)
6387 		changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
6388 	else
6389 		changed = hci_dev_test_and_clear_flag(hdev,
6390 						      HCI_KEEP_DEBUG_KEYS);
6391 
6392 	if (cp->val == 0x02)
6393 		use_changed = !hci_dev_test_and_set_flag(hdev,
6394 							 HCI_USE_DEBUG_KEYS);
6395 	else
6396 		use_changed = hci_dev_test_and_clear_flag(hdev,
6397 							  HCI_USE_DEBUG_KEYS);
6398 
6399 	if (hdev_is_powered(hdev) && use_changed &&
6400 	    hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6401 		u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
6402 		hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
6403 			     sizeof(mode), &mode);
6404 	}
6405 
6406 	err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
6407 	if (err < 0)
6408 		goto unlock;
6409 
6410 	if (changed)
6411 		err = new_settings(hdev, sk);
6412 
6413 unlock:
6414 	hci_dev_unlock(hdev);
6415 	return err;
6416 }
6417 
6418 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6419 		       u16 len)
6420 {
6421 	struct mgmt_cp_set_privacy *cp = cp_data;
6422 	bool changed;
6423 	int err;
6424 
6425 	bt_dev_dbg(hdev, "sock %p", sk);
6426 
6427 	if (!lmp_le_capable(hdev))
6428 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6429 				       MGMT_STATUS_NOT_SUPPORTED);
6430 
6431 	if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
6432 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6433 				       MGMT_STATUS_INVALID_PARAMS);
6434 
6435 	if (hdev_is_powered(hdev))
6436 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
6437 				       MGMT_STATUS_REJECTED);
6438 
6439 	hci_dev_lock(hdev);
6440 
6441 	/* If user space supports this command it is also expected to
6442 	 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
6443 	 */
6444 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6445 
6446 	if (cp->privacy) {
6447 		changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
6448 		memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
6449 		hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
6450 		hci_adv_instances_set_rpa_expired(hdev, true);
6451 		if (cp->privacy == 0x02)
6452 			hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
6453 		else
6454 			hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6455 	} else {
6456 		changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
6457 		memset(hdev->irk, 0, sizeof(hdev->irk));
6458 		hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
6459 		hci_adv_instances_set_rpa_expired(hdev, false);
6460 		hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
6461 	}
6462 
6463 	err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
6464 	if (err < 0)
6465 		goto unlock;
6466 
6467 	if (changed)
6468 		err = new_settings(hdev, sk);
6469 
6470 unlock:
6471 	hci_dev_unlock(hdev);
6472 	return err;
6473 }
6474 
6475 static bool irk_is_valid(struct mgmt_irk_info *irk)
6476 {
6477 	switch (irk->addr.type) {
6478 	case BDADDR_LE_PUBLIC:
6479 		return true;
6480 
6481 	case BDADDR_LE_RANDOM:
6482 		/* Two most significant bits shall be set */
6483 		if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6484 			return false;
6485 		return true;
6486 	}
6487 
6488 	return false;
6489 }
6490 
6491 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
6492 		     u16 len)
6493 {
6494 	struct mgmt_cp_load_irks *cp = cp_data;
6495 	const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
6496 				   sizeof(struct mgmt_irk_info));
6497 	u16 irk_count, expected_len;
6498 	int i, err;
6499 
6500 	bt_dev_dbg(hdev, "sock %p", sk);
6501 
6502 	if (!lmp_le_capable(hdev))
6503 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6504 				       MGMT_STATUS_NOT_SUPPORTED);
6505 
6506 	irk_count = __le16_to_cpu(cp->irk_count);
6507 	if (irk_count > max_irk_count) {
6508 		bt_dev_err(hdev, "load_irks: too big irk_count value %u",
6509 			   irk_count);
6510 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6511 				       MGMT_STATUS_INVALID_PARAMS);
6512 	}
6513 
6514 	expected_len = struct_size(cp, irks, irk_count);
6515 	if (expected_len != len) {
6516 		bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
6517 			   expected_len, len);
6518 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
6519 				       MGMT_STATUS_INVALID_PARAMS);
6520 	}
6521 
6522 	bt_dev_dbg(hdev, "irk_count %u", irk_count);
6523 
6524 	for (i = 0; i < irk_count; i++) {
6525 		struct mgmt_irk_info *key = &cp->irks[i];
6526 
6527 		if (!irk_is_valid(key))
6528 			return mgmt_cmd_status(sk, hdev->id,
6529 					       MGMT_OP_LOAD_IRKS,
6530 					       MGMT_STATUS_INVALID_PARAMS);
6531 	}
6532 
6533 	hci_dev_lock(hdev);
6534 
6535 	hci_smp_irks_clear(hdev);
6536 
6537 	for (i = 0; i < irk_count; i++) {
6538 		struct mgmt_irk_info *irk = &cp->irks[i];
6539 
6540 		if (hci_is_blocked_key(hdev,
6541 				       HCI_BLOCKED_KEY_TYPE_IRK,
6542 				       irk->val)) {
6543 			bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
6544 				    &irk->addr.bdaddr);
6545 			continue;
6546 		}
6547 
6548 		hci_add_irk(hdev, &irk->addr.bdaddr,
6549 			    le_addr_type(irk->addr.type), irk->val,
6550 			    BDADDR_ANY);
6551 	}
6552 
6553 	hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
6554 
6555 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
6556 
6557 	hci_dev_unlock(hdev);
6558 
6559 	return err;
6560 }
6561 
6562 static bool ltk_is_valid(struct mgmt_ltk_info *key)
6563 {
6564 	if (key->initiator != 0x00 && key->initiator != 0x01)
6565 		return false;
6566 
6567 	switch (key->addr.type) {
6568 	case BDADDR_LE_PUBLIC:
6569 		return true;
6570 
6571 	case BDADDR_LE_RANDOM:
6572 		/* Two most significant bits shall be set */
6573 		if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
6574 			return false;
6575 		return true;
6576 	}
6577 
6578 	return false;
6579 }
6580 
6581 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
6582 			       void *cp_data, u16 len)
6583 {
6584 	struct mgmt_cp_load_long_term_keys *cp = cp_data;
6585 	const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
6586 				   sizeof(struct mgmt_ltk_info));
6587 	u16 key_count, expected_len;
6588 	int i, err;
6589 
6590 	bt_dev_dbg(hdev, "sock %p", sk);
6591 
6592 	if (!lmp_le_capable(hdev))
6593 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6594 				       MGMT_STATUS_NOT_SUPPORTED);
6595 
6596 	key_count = __le16_to_cpu(cp->key_count);
6597 	if (key_count > max_key_count) {
6598 		bt_dev_err(hdev, "load_ltks: too big key_count value %u",
6599 			   key_count);
6600 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6601 				       MGMT_STATUS_INVALID_PARAMS);
6602 	}
6603 
6604 	expected_len = struct_size(cp, keys, key_count);
6605 	if (expected_len != len) {
6606 		bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
6607 			   expected_len, len);
6608 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
6609 				       MGMT_STATUS_INVALID_PARAMS);
6610 	}
6611 
6612 	bt_dev_dbg(hdev, "key_count %u", key_count);
6613 
6614 	for (i = 0; i < key_count; i++) {
6615 		struct mgmt_ltk_info *key = &cp->keys[i];
6616 
6617 		if (!ltk_is_valid(key))
6618 			return mgmt_cmd_status(sk, hdev->id,
6619 					       MGMT_OP_LOAD_LONG_TERM_KEYS,
6620 					       MGMT_STATUS_INVALID_PARAMS);
6621 	}
6622 
6623 	hci_dev_lock(hdev);
6624 
6625 	hci_smp_ltks_clear(hdev);
6626 
6627 	for (i = 0; i < key_count; i++) {
6628 		struct mgmt_ltk_info *key = &cp->keys[i];
6629 		u8 type, authenticated;
6630 
6631 		if (hci_is_blocked_key(hdev,
6632 				       HCI_BLOCKED_KEY_TYPE_LTK,
6633 				       key->val)) {
6634 			bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
6635 				    &key->addr.bdaddr);
6636 			continue;
6637 		}
6638 
6639 		switch (key->type) {
6640 		case MGMT_LTK_UNAUTHENTICATED:
6641 			authenticated = 0x00;
6642 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6643 			break;
6644 		case MGMT_LTK_AUTHENTICATED:
6645 			authenticated = 0x01;
6646 			type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER;
6647 			break;
6648 		case MGMT_LTK_P256_UNAUTH:
6649 			authenticated = 0x00;
6650 			type = SMP_LTK_P256;
6651 			break;
6652 		case MGMT_LTK_P256_AUTH:
6653 			authenticated = 0x01;
6654 			type = SMP_LTK_P256;
6655 			break;
6656 		case MGMT_LTK_P256_DEBUG:
6657 			authenticated = 0x00;
6658 			type = SMP_LTK_P256_DEBUG;
6659 			fallthrough;
6660 		default:
6661 			continue;
6662 		}
6663 
6664 		hci_add_ltk(hdev, &key->addr.bdaddr,
6665 			    le_addr_type(key->addr.type), type, authenticated,
6666 			    key->val, key->enc_size, key->ediv, key->rand);
6667 	}
6668 
6669 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
6670 			   NULL, 0);
6671 
6672 	hci_dev_unlock(hdev);
6673 
6674 	return err;
6675 }
6676 
6677 static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err)
6678 {
6679 	struct mgmt_pending_cmd *cmd = data;
6680 	struct hci_conn *conn = cmd->user_data;
6681 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6682 	struct mgmt_rp_get_conn_info rp;
6683 	u8 status;
6684 
6685 	bt_dev_dbg(hdev, "err %d", err);
6686 
6687 	memcpy(&rp.addr, &cp->addr.bdaddr, sizeof(rp.addr));
6688 
6689 	status = mgmt_status(err);
6690 	if (status == MGMT_STATUS_SUCCESS) {
6691 		rp.rssi = conn->rssi;
6692 		rp.tx_power = conn->tx_power;
6693 		rp.max_tx_power = conn->max_tx_power;
6694 	} else {
6695 		rp.rssi = HCI_RSSI_INVALID;
6696 		rp.tx_power = HCI_TX_POWER_INVALID;
6697 		rp.max_tx_power = HCI_TX_POWER_INVALID;
6698 	}
6699 
6700 	mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status,
6701 			  &rp, sizeof(rp));
6702 
6703 	if (conn) {
6704 		hci_conn_drop(conn);
6705 		hci_conn_put(conn);
6706 	}
6707 
6708 	mgmt_pending_free(cmd);
6709 }
6710 
6711 static int get_conn_info_sync(struct hci_dev *hdev, void *data)
6712 {
6713 	struct mgmt_pending_cmd *cmd = data;
6714 	struct mgmt_cp_get_conn_info *cp = cmd->param;
6715 	struct hci_conn *conn;
6716 	int err;
6717 	__le16   handle;
6718 
6719 	/* Make sure we are still connected */
6720 	if (cp->addr.type == BDADDR_BREDR)
6721 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6722 					       &cp->addr.bdaddr);
6723 	else
6724 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6725 
6726 	if (!conn || conn != cmd->user_data || conn->state != BT_CONNECTED) {
6727 		if (cmd->user_data) {
6728 			hci_conn_drop(cmd->user_data);
6729 			hci_conn_put(cmd->user_data);
6730 			cmd->user_data = NULL;
6731 		}
6732 		return MGMT_STATUS_NOT_CONNECTED;
6733 	}
6734 
6735 	handle = cpu_to_le16(conn->handle);
6736 
6737 	/* Refresh RSSI each time */
6738 	err = hci_read_rssi_sync(hdev, handle);
6739 
6740 	/* For LE links TX power does not change thus we don't need to
6741 	 * query for it once value is known.
6742 	 */
6743 	if (!err && (!bdaddr_type_is_le(cp->addr.type) ||
6744 		     conn->tx_power == HCI_TX_POWER_INVALID))
6745 		err = hci_read_tx_power_sync(hdev, handle, 0x00);
6746 
6747 	/* Max TX power needs to be read only once per connection */
6748 	if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID)
6749 		err = hci_read_tx_power_sync(hdev, handle, 0x01);
6750 
6751 	return err;
6752 }
6753 
6754 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
6755 			 u16 len)
6756 {
6757 	struct mgmt_cp_get_conn_info *cp = data;
6758 	struct mgmt_rp_get_conn_info rp;
6759 	struct hci_conn *conn;
6760 	unsigned long conn_info_age;
6761 	int err = 0;
6762 
6763 	bt_dev_dbg(hdev, "sock %p", sk);
6764 
6765 	memset(&rp, 0, sizeof(rp));
6766 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6767 	rp.addr.type = cp->addr.type;
6768 
6769 	if (!bdaddr_type_is_valid(cp->addr.type))
6770 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6771 					 MGMT_STATUS_INVALID_PARAMS,
6772 					 &rp, sizeof(rp));
6773 
6774 	hci_dev_lock(hdev);
6775 
6776 	if (!hdev_is_powered(hdev)) {
6777 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6778 					MGMT_STATUS_NOT_POWERED, &rp,
6779 					sizeof(rp));
6780 		goto unlock;
6781 	}
6782 
6783 	if (cp->addr.type == BDADDR_BREDR)
6784 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6785 					       &cp->addr.bdaddr);
6786 	else
6787 		conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
6788 
6789 	if (!conn || conn->state != BT_CONNECTED) {
6790 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6791 					MGMT_STATUS_NOT_CONNECTED, &rp,
6792 					sizeof(rp));
6793 		goto unlock;
6794 	}
6795 
6796 	/* To avoid client trying to guess when to poll again for information we
6797 	 * calculate conn info age as random value between min/max set in hdev.
6798 	 */
6799 	conn_info_age = hdev->conn_info_min_age +
6800 			prandom_u32_max(hdev->conn_info_max_age -
6801 					hdev->conn_info_min_age);
6802 
6803 	/* Query controller to refresh cached values if they are too old or were
6804 	 * never read.
6805 	 */
6806 	if (time_after(jiffies, conn->conn_info_timestamp +
6807 		       msecs_to_jiffies(conn_info_age)) ||
6808 	    !conn->conn_info_timestamp) {
6809 		struct mgmt_pending_cmd *cmd;
6810 
6811 		cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data,
6812 				       len);
6813 		if (!cmd)
6814 			err = -ENOMEM;
6815 		else
6816 			err = hci_cmd_sync_queue(hdev, get_conn_info_sync,
6817 						 cmd, get_conn_info_complete);
6818 
6819 		if (err < 0) {
6820 			mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6821 					  MGMT_STATUS_FAILED, &rp, sizeof(rp));
6822 
6823 			if (cmd)
6824 				mgmt_pending_free(cmd);
6825 
6826 			goto unlock;
6827 		}
6828 
6829 		hci_conn_hold(conn);
6830 		cmd->user_data = hci_conn_get(conn);
6831 
6832 		conn->conn_info_timestamp = jiffies;
6833 	} else {
6834 		/* Cache is valid, just reply with values cached in hci_conn */
6835 		rp.rssi = conn->rssi;
6836 		rp.tx_power = conn->tx_power;
6837 		rp.max_tx_power = conn->max_tx_power;
6838 
6839 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
6840 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6841 	}
6842 
6843 unlock:
6844 	hci_dev_unlock(hdev);
6845 	return err;
6846 }
6847 
6848 static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err)
6849 {
6850 	struct mgmt_pending_cmd *cmd = data;
6851 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6852 	struct mgmt_rp_get_clock_info rp;
6853 	struct hci_conn *conn = cmd->user_data;
6854 	u8 status = mgmt_status(err);
6855 
6856 	bt_dev_dbg(hdev, "err %d", err);
6857 
6858 	memset(&rp, 0, sizeof(rp));
6859 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6860 	rp.addr.type = cp->addr.type;
6861 
6862 	if (err)
6863 		goto complete;
6864 
6865 	rp.local_clock = cpu_to_le32(hdev->clock);
6866 
6867 	if (conn) {
6868 		rp.piconet_clock = cpu_to_le32(conn->clock);
6869 		rp.accuracy = cpu_to_le16(conn->clock_accuracy);
6870 		hci_conn_drop(conn);
6871 		hci_conn_put(conn);
6872 	}
6873 
6874 complete:
6875 	mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
6876 			  sizeof(rp));
6877 
6878 	mgmt_pending_free(cmd);
6879 }
6880 
6881 static int get_clock_info_sync(struct hci_dev *hdev, void *data)
6882 {
6883 	struct mgmt_pending_cmd *cmd = data;
6884 	struct mgmt_cp_get_clock_info *cp = cmd->param;
6885 	struct hci_cp_read_clock hci_cp;
6886 	struct hci_conn *conn = cmd->user_data;
6887 	int err;
6888 
6889 	memset(&hci_cp, 0, sizeof(hci_cp));
6890 	err = hci_read_clock_sync(hdev, &hci_cp);
6891 
6892 	if (conn) {
6893 		/* Make sure connection still exists */
6894 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6895 					       &cp->addr.bdaddr);
6896 
6897 		if (conn && conn == cmd->user_data &&
6898 		    conn->state == BT_CONNECTED) {
6899 			hci_cp.handle = cpu_to_le16(conn->handle);
6900 			hci_cp.which = 0x01; /* Piconet clock */
6901 			err = hci_read_clock_sync(hdev, &hci_cp);
6902 		} else if (cmd->user_data) {
6903 			hci_conn_drop(cmd->user_data);
6904 			hci_conn_put(cmd->user_data);
6905 			cmd->user_data = NULL;
6906 		}
6907 	}
6908 
6909 	return err;
6910 }
6911 
6912 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
6913 								u16 len)
6914 {
6915 	struct mgmt_cp_get_clock_info *cp = data;
6916 	struct mgmt_rp_get_clock_info rp;
6917 	struct mgmt_pending_cmd *cmd;
6918 	struct hci_conn *conn;
6919 	int err;
6920 
6921 	bt_dev_dbg(hdev, "sock %p", sk);
6922 
6923 	memset(&rp, 0, sizeof(rp));
6924 	bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
6925 	rp.addr.type = cp->addr.type;
6926 
6927 	if (cp->addr.type != BDADDR_BREDR)
6928 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6929 					 MGMT_STATUS_INVALID_PARAMS,
6930 					 &rp, sizeof(rp));
6931 
6932 	hci_dev_lock(hdev);
6933 
6934 	if (!hdev_is_powered(hdev)) {
6935 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6936 					MGMT_STATUS_NOT_POWERED, &rp,
6937 					sizeof(rp));
6938 		goto unlock;
6939 	}
6940 
6941 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
6942 		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
6943 					       &cp->addr.bdaddr);
6944 		if (!conn || conn->state != BT_CONNECTED) {
6945 			err = mgmt_cmd_complete(sk, hdev->id,
6946 						MGMT_OP_GET_CLOCK_INFO,
6947 						MGMT_STATUS_NOT_CONNECTED,
6948 						&rp, sizeof(rp));
6949 			goto unlock;
6950 		}
6951 	} else {
6952 		conn = NULL;
6953 	}
6954 
6955 	cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
6956 	if (!cmd)
6957 		err = -ENOMEM;
6958 	else
6959 		err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd,
6960 					 get_clock_info_complete);
6961 
6962 	if (err < 0) {
6963 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
6964 					MGMT_STATUS_FAILED, &rp, sizeof(rp));
6965 
6966 		if (cmd)
6967 			mgmt_pending_free(cmd);
6968 
6969 	} else if (conn) {
6970 		hci_conn_hold(conn);
6971 		cmd->user_data = hci_conn_get(conn);
6972 	}
6973 
6974 
6975 unlock:
6976 	hci_dev_unlock(hdev);
6977 	return err;
6978 }
6979 
6980 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
6981 {
6982 	struct hci_conn *conn;
6983 
6984 	conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
6985 	if (!conn)
6986 		return false;
6987 
6988 	if (conn->dst_type != type)
6989 		return false;
6990 
6991 	if (conn->state != BT_CONNECTED)
6992 		return false;
6993 
6994 	return true;
6995 }
6996 
6997 /* This function requires the caller holds hdev->lock */
6998 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
6999 			       u8 addr_type, u8 auto_connect)
7000 {
7001 	struct hci_conn_params *params;
7002 
7003 	params = hci_conn_params_add(hdev, addr, addr_type);
7004 	if (!params)
7005 		return -EIO;
7006 
7007 	if (params->auto_connect == auto_connect)
7008 		return 0;
7009 
7010 	list_del_init(&params->action);
7011 
7012 	switch (auto_connect) {
7013 	case HCI_AUTO_CONN_DISABLED:
7014 	case HCI_AUTO_CONN_LINK_LOSS:
7015 		/* If auto connect is being disabled when we're trying to
7016 		 * connect to device, keep connecting.
7017 		 */
7018 		if (params->explicit_connect)
7019 			list_add(&params->action, &hdev->pend_le_conns);
7020 		break;
7021 	case HCI_AUTO_CONN_REPORT:
7022 		if (params->explicit_connect)
7023 			list_add(&params->action, &hdev->pend_le_conns);
7024 		else
7025 			list_add(&params->action, &hdev->pend_le_reports);
7026 		break;
7027 	case HCI_AUTO_CONN_DIRECT:
7028 	case HCI_AUTO_CONN_ALWAYS:
7029 		if (!is_connected(hdev, addr, addr_type))
7030 			list_add(&params->action, &hdev->pend_le_conns);
7031 		break;
7032 	}
7033 
7034 	params->auto_connect = auto_connect;
7035 
7036 	bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u",
7037 		   addr, addr_type, auto_connect);
7038 
7039 	return 0;
7040 }
7041 
7042 static void device_added(struct sock *sk, struct hci_dev *hdev,
7043 			 bdaddr_t *bdaddr, u8 type, u8 action)
7044 {
7045 	struct mgmt_ev_device_added ev;
7046 
7047 	bacpy(&ev.addr.bdaddr, bdaddr);
7048 	ev.addr.type = type;
7049 	ev.action = action;
7050 
7051 	mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
7052 }
7053 
7054 static int add_device_sync(struct hci_dev *hdev, void *data)
7055 {
7056 	return hci_update_passive_scan_sync(hdev);
7057 }
7058 
7059 static int add_device(struct sock *sk, struct hci_dev *hdev,
7060 		      void *data, u16 len)
7061 {
7062 	struct mgmt_cp_add_device *cp = data;
7063 	u8 auto_conn, addr_type;
7064 	struct hci_conn_params *params;
7065 	int err;
7066 	u32 current_flags = 0;
7067 	u32 supported_flags;
7068 
7069 	bt_dev_dbg(hdev, "sock %p", sk);
7070 
7071 	if (!bdaddr_type_is_valid(cp->addr.type) ||
7072 	    !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
7073 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7074 					 MGMT_STATUS_INVALID_PARAMS,
7075 					 &cp->addr, sizeof(cp->addr));
7076 
7077 	if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
7078 		return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7079 					 MGMT_STATUS_INVALID_PARAMS,
7080 					 &cp->addr, sizeof(cp->addr));
7081 
7082 	hci_dev_lock(hdev);
7083 
7084 	if (cp->addr.type == BDADDR_BREDR) {
7085 		/* Only incoming connections action is supported for now */
7086 		if (cp->action != 0x01) {
7087 			err = mgmt_cmd_complete(sk, hdev->id,
7088 						MGMT_OP_ADD_DEVICE,
7089 						MGMT_STATUS_INVALID_PARAMS,
7090 						&cp->addr, sizeof(cp->addr));
7091 			goto unlock;
7092 		}
7093 
7094 		err = hci_bdaddr_list_add_with_flags(&hdev->accept_list,
7095 						     &cp->addr.bdaddr,
7096 						     cp->addr.type, 0);
7097 		if (err)
7098 			goto unlock;
7099 
7100 		hci_req_update_scan(hdev);
7101 
7102 		goto added;
7103 	}
7104 
7105 	addr_type = le_addr_type(cp->addr.type);
7106 
7107 	if (cp->action == 0x02)
7108 		auto_conn = HCI_AUTO_CONN_ALWAYS;
7109 	else if (cp->action == 0x01)
7110 		auto_conn = HCI_AUTO_CONN_DIRECT;
7111 	else
7112 		auto_conn = HCI_AUTO_CONN_REPORT;
7113 
7114 	/* Kernel internally uses conn_params with resolvable private
7115 	 * address, but Add Device allows only identity addresses.
7116 	 * Make sure it is enforced before calling
7117 	 * hci_conn_params_lookup.
7118 	 */
7119 	if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7120 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7121 					MGMT_STATUS_INVALID_PARAMS,
7122 					&cp->addr, sizeof(cp->addr));
7123 		goto unlock;
7124 	}
7125 
7126 	/* If the connection parameters don't exist for this device,
7127 	 * they will be created and configured with defaults.
7128 	 */
7129 	if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
7130 				auto_conn) < 0) {
7131 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7132 					MGMT_STATUS_FAILED, &cp->addr,
7133 					sizeof(cp->addr));
7134 		goto unlock;
7135 	} else {
7136 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7137 						addr_type);
7138 		if (params)
7139 			bitmap_to_arr32(&current_flags, params->flags,
7140 					__HCI_CONN_NUM_FLAGS);
7141 	}
7142 
7143 	err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL);
7144 	if (err < 0)
7145 		goto unlock;
7146 
7147 added:
7148 	device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
7149 	bitmap_to_arr32(&supported_flags, hdev->conn_flags,
7150 			__HCI_CONN_NUM_FLAGS);
7151 	device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type,
7152 			     supported_flags, current_flags);
7153 
7154 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
7155 				MGMT_STATUS_SUCCESS, &cp->addr,
7156 				sizeof(cp->addr));
7157 
7158 unlock:
7159 	hci_dev_unlock(hdev);
7160 	return err;
7161 }
7162 
7163 static void device_removed(struct sock *sk, struct hci_dev *hdev,
7164 			   bdaddr_t *bdaddr, u8 type)
7165 {
7166 	struct mgmt_ev_device_removed ev;
7167 
7168 	bacpy(&ev.addr.bdaddr, bdaddr);
7169 	ev.addr.type = type;
7170 
7171 	mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
7172 }
7173 
7174 static int remove_device_sync(struct hci_dev *hdev, void *data)
7175 {
7176 	return hci_update_passive_scan_sync(hdev);
7177 }
7178 
7179 static int remove_device(struct sock *sk, struct hci_dev *hdev,
7180 			 void *data, u16 len)
7181 {
7182 	struct mgmt_cp_remove_device *cp = data;
7183 	int err;
7184 
7185 	bt_dev_dbg(hdev, "sock %p", sk);
7186 
7187 	hci_dev_lock(hdev);
7188 
7189 	if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
7190 		struct hci_conn_params *params;
7191 		u8 addr_type;
7192 
7193 		if (!bdaddr_type_is_valid(cp->addr.type)) {
7194 			err = mgmt_cmd_complete(sk, hdev->id,
7195 						MGMT_OP_REMOVE_DEVICE,
7196 						MGMT_STATUS_INVALID_PARAMS,
7197 						&cp->addr, sizeof(cp->addr));
7198 			goto unlock;
7199 		}
7200 
7201 		if (cp->addr.type == BDADDR_BREDR) {
7202 			err = hci_bdaddr_list_del(&hdev->accept_list,
7203 						  &cp->addr.bdaddr,
7204 						  cp->addr.type);
7205 			if (err) {
7206 				err = mgmt_cmd_complete(sk, hdev->id,
7207 							MGMT_OP_REMOVE_DEVICE,
7208 							MGMT_STATUS_INVALID_PARAMS,
7209 							&cp->addr,
7210 							sizeof(cp->addr));
7211 				goto unlock;
7212 			}
7213 
7214 			hci_req_update_scan(hdev);
7215 
7216 			device_removed(sk, hdev, &cp->addr.bdaddr,
7217 				       cp->addr.type);
7218 			goto complete;
7219 		}
7220 
7221 		addr_type = le_addr_type(cp->addr.type);
7222 
7223 		/* Kernel internally uses conn_params with resolvable private
7224 		 * address, but Remove Device allows only identity addresses.
7225 		 * Make sure it is enforced before calling
7226 		 * hci_conn_params_lookup.
7227 		 */
7228 		if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
7229 			err = mgmt_cmd_complete(sk, hdev->id,
7230 						MGMT_OP_REMOVE_DEVICE,
7231 						MGMT_STATUS_INVALID_PARAMS,
7232 						&cp->addr, sizeof(cp->addr));
7233 			goto unlock;
7234 		}
7235 
7236 		params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
7237 						addr_type);
7238 		if (!params) {
7239 			err = mgmt_cmd_complete(sk, hdev->id,
7240 						MGMT_OP_REMOVE_DEVICE,
7241 						MGMT_STATUS_INVALID_PARAMS,
7242 						&cp->addr, sizeof(cp->addr));
7243 			goto unlock;
7244 		}
7245 
7246 		if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
7247 		    params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
7248 			err = mgmt_cmd_complete(sk, hdev->id,
7249 						MGMT_OP_REMOVE_DEVICE,
7250 						MGMT_STATUS_INVALID_PARAMS,
7251 						&cp->addr, sizeof(cp->addr));
7252 			goto unlock;
7253 		}
7254 
7255 		list_del(&params->action);
7256 		list_del(&params->list);
7257 		kfree(params);
7258 
7259 		device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
7260 	} else {
7261 		struct hci_conn_params *p, *tmp;
7262 		struct bdaddr_list *b, *btmp;
7263 
7264 		if (cp->addr.type) {
7265 			err = mgmt_cmd_complete(sk, hdev->id,
7266 						MGMT_OP_REMOVE_DEVICE,
7267 						MGMT_STATUS_INVALID_PARAMS,
7268 						&cp->addr, sizeof(cp->addr));
7269 			goto unlock;
7270 		}
7271 
7272 		list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) {
7273 			device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
7274 			list_del(&b->list);
7275 			kfree(b);
7276 		}
7277 
7278 		hci_req_update_scan(hdev);
7279 
7280 		list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
7281 			if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
7282 				continue;
7283 			device_removed(sk, hdev, &p->addr, p->addr_type);
7284 			if (p->explicit_connect) {
7285 				p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
7286 				continue;
7287 			}
7288 			list_del(&p->action);
7289 			list_del(&p->list);
7290 			kfree(p);
7291 		}
7292 
7293 		bt_dev_dbg(hdev, "All LE connection parameters were removed");
7294 	}
7295 
7296 	hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL);
7297 
7298 complete:
7299 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
7300 				MGMT_STATUS_SUCCESS, &cp->addr,
7301 				sizeof(cp->addr));
7302 unlock:
7303 	hci_dev_unlock(hdev);
7304 	return err;
7305 }
7306 
7307 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
7308 			   u16 len)
7309 {
7310 	struct mgmt_cp_load_conn_param *cp = data;
7311 	const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
7312 				     sizeof(struct mgmt_conn_param));
7313 	u16 param_count, expected_len;
7314 	int i;
7315 
7316 	if (!lmp_le_capable(hdev))
7317 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7318 				       MGMT_STATUS_NOT_SUPPORTED);
7319 
7320 	param_count = __le16_to_cpu(cp->param_count);
7321 	if (param_count > max_param_count) {
7322 		bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
7323 			   param_count);
7324 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7325 				       MGMT_STATUS_INVALID_PARAMS);
7326 	}
7327 
7328 	expected_len = struct_size(cp, params, param_count);
7329 	if (expected_len != len) {
7330 		bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
7331 			   expected_len, len);
7332 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
7333 				       MGMT_STATUS_INVALID_PARAMS);
7334 	}
7335 
7336 	bt_dev_dbg(hdev, "param_count %u", param_count);
7337 
7338 	hci_dev_lock(hdev);
7339 
7340 	hci_conn_params_clear_disabled(hdev);
7341 
7342 	for (i = 0; i < param_count; i++) {
7343 		struct mgmt_conn_param *param = &cp->params[i];
7344 		struct hci_conn_params *hci_param;
7345 		u16 min, max, latency, timeout;
7346 		u8 addr_type;
7347 
7348 		bt_dev_dbg(hdev, "Adding %pMR (type %u)", &param->addr.bdaddr,
7349 			   param->addr.type);
7350 
7351 		if (param->addr.type == BDADDR_LE_PUBLIC) {
7352 			addr_type = ADDR_LE_DEV_PUBLIC;
7353 		} else if (param->addr.type == BDADDR_LE_RANDOM) {
7354 			addr_type = ADDR_LE_DEV_RANDOM;
7355 		} else {
7356 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7357 			continue;
7358 		}
7359 
7360 		min = le16_to_cpu(param->min_interval);
7361 		max = le16_to_cpu(param->max_interval);
7362 		latency = le16_to_cpu(param->latency);
7363 		timeout = le16_to_cpu(param->timeout);
7364 
7365 		bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
7366 			   min, max, latency, timeout);
7367 
7368 		if (hci_check_conn_params(min, max, latency, timeout) < 0) {
7369 			bt_dev_err(hdev, "ignoring invalid connection parameters");
7370 			continue;
7371 		}
7372 
7373 		hci_param = hci_conn_params_add(hdev, &param->addr.bdaddr,
7374 						addr_type);
7375 		if (!hci_param) {
7376 			bt_dev_err(hdev, "failed to add connection parameters");
7377 			continue;
7378 		}
7379 
7380 		hci_param->conn_min_interval = min;
7381 		hci_param->conn_max_interval = max;
7382 		hci_param->conn_latency = latency;
7383 		hci_param->supervision_timeout = timeout;
7384 	}
7385 
7386 	hci_dev_unlock(hdev);
7387 
7388 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
7389 				 NULL, 0);
7390 }
7391 
7392 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
7393 			       void *data, u16 len)
7394 {
7395 	struct mgmt_cp_set_external_config *cp = data;
7396 	bool changed;
7397 	int err;
7398 
7399 	bt_dev_dbg(hdev, "sock %p", sk);
7400 
7401 	if (hdev_is_powered(hdev))
7402 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7403 				       MGMT_STATUS_REJECTED);
7404 
7405 	if (cp->config != 0x00 && cp->config != 0x01)
7406 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7407 				         MGMT_STATUS_INVALID_PARAMS);
7408 
7409 	if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
7410 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
7411 				       MGMT_STATUS_NOT_SUPPORTED);
7412 
7413 	hci_dev_lock(hdev);
7414 
7415 	if (cp->config)
7416 		changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
7417 	else
7418 		changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
7419 
7420 	err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
7421 	if (err < 0)
7422 		goto unlock;
7423 
7424 	if (!changed)
7425 		goto unlock;
7426 
7427 	err = new_options(hdev, sk);
7428 
7429 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
7430 		mgmt_index_removed(hdev);
7431 
7432 		if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
7433 			hci_dev_set_flag(hdev, HCI_CONFIG);
7434 			hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7435 
7436 			queue_work(hdev->req_workqueue, &hdev->power_on);
7437 		} else {
7438 			set_bit(HCI_RAW, &hdev->flags);
7439 			mgmt_index_added(hdev);
7440 		}
7441 	}
7442 
7443 unlock:
7444 	hci_dev_unlock(hdev);
7445 	return err;
7446 }
7447 
7448 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
7449 			      void *data, u16 len)
7450 {
7451 	struct mgmt_cp_set_public_address *cp = data;
7452 	bool changed;
7453 	int err;
7454 
7455 	bt_dev_dbg(hdev, "sock %p", sk);
7456 
7457 	if (hdev_is_powered(hdev))
7458 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7459 				       MGMT_STATUS_REJECTED);
7460 
7461 	if (!bacmp(&cp->bdaddr, BDADDR_ANY))
7462 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7463 				       MGMT_STATUS_INVALID_PARAMS);
7464 
7465 	if (!hdev->set_bdaddr)
7466 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
7467 				       MGMT_STATUS_NOT_SUPPORTED);
7468 
7469 	hci_dev_lock(hdev);
7470 
7471 	changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
7472 	bacpy(&hdev->public_addr, &cp->bdaddr);
7473 
7474 	err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
7475 	if (err < 0)
7476 		goto unlock;
7477 
7478 	if (!changed)
7479 		goto unlock;
7480 
7481 	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
7482 		err = new_options(hdev, sk);
7483 
7484 	if (is_configured(hdev)) {
7485 		mgmt_index_removed(hdev);
7486 
7487 		hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
7488 
7489 		hci_dev_set_flag(hdev, HCI_CONFIG);
7490 		hci_dev_set_flag(hdev, HCI_AUTO_OFF);
7491 
7492 		queue_work(hdev->req_workqueue, &hdev->power_on);
7493 	}
7494 
7495 unlock:
7496 	hci_dev_unlock(hdev);
7497 	return err;
7498 }
7499 
7500 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data,
7501 					     int err)
7502 {
7503 	const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
7504 	struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
7505 	u8 *h192, *r192, *h256, *r256;
7506 	struct mgmt_pending_cmd *cmd = data;
7507 	struct sk_buff *skb = cmd->skb;
7508 	u8 status = mgmt_status(err);
7509 	u16 eir_len;
7510 
7511 	if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev))
7512 		return;
7513 
7514 	if (!status) {
7515 		if (!skb)
7516 			status = MGMT_STATUS_FAILED;
7517 		else if (IS_ERR(skb))
7518 			status = mgmt_status(PTR_ERR(skb));
7519 		else
7520 			status = mgmt_status(skb->data[0]);
7521 	}
7522 
7523 	bt_dev_dbg(hdev, "status %u", status);
7524 
7525 	mgmt_cp = cmd->param;
7526 
7527 	if (status) {
7528 		status = mgmt_status(status);
7529 		eir_len = 0;
7530 
7531 		h192 = NULL;
7532 		r192 = NULL;
7533 		h256 = NULL;
7534 		r256 = NULL;
7535 	} else if (!bredr_sc_enabled(hdev)) {
7536 		struct hci_rp_read_local_oob_data *rp;
7537 
7538 		if (skb->len != sizeof(*rp)) {
7539 			status = MGMT_STATUS_FAILED;
7540 			eir_len = 0;
7541 		} else {
7542 			status = MGMT_STATUS_SUCCESS;
7543 			rp = (void *)skb->data;
7544 
7545 			eir_len = 5 + 18 + 18;
7546 			h192 = rp->hash;
7547 			r192 = rp->rand;
7548 			h256 = NULL;
7549 			r256 = NULL;
7550 		}
7551 	} else {
7552 		struct hci_rp_read_local_oob_ext_data *rp;
7553 
7554 		if (skb->len != sizeof(*rp)) {
7555 			status = MGMT_STATUS_FAILED;
7556 			eir_len = 0;
7557 		} else {
7558 			status = MGMT_STATUS_SUCCESS;
7559 			rp = (void *)skb->data;
7560 
7561 			if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
7562 				eir_len = 5 + 18 + 18;
7563 				h192 = NULL;
7564 				r192 = NULL;
7565 			} else {
7566 				eir_len = 5 + 18 + 18 + 18 + 18;
7567 				h192 = rp->hash192;
7568 				r192 = rp->rand192;
7569 			}
7570 
7571 			h256 = rp->hash256;
7572 			r256 = rp->rand256;
7573 		}
7574 	}
7575 
7576 	mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
7577 	if (!mgmt_rp)
7578 		goto done;
7579 
7580 	if (eir_len == 0)
7581 		goto send_rsp;
7582 
7583 	eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
7584 				  hdev->dev_class, 3);
7585 
7586 	if (h192 && r192) {
7587 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7588 					  EIR_SSP_HASH_C192, h192, 16);
7589 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7590 					  EIR_SSP_RAND_R192, r192, 16);
7591 	}
7592 
7593 	if (h256 && r256) {
7594 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7595 					  EIR_SSP_HASH_C256, h256, 16);
7596 		eir_len = eir_append_data(mgmt_rp->eir, eir_len,
7597 					  EIR_SSP_RAND_R256, r256, 16);
7598 	}
7599 
7600 send_rsp:
7601 	mgmt_rp->type = mgmt_cp->type;
7602 	mgmt_rp->eir_len = cpu_to_le16(eir_len);
7603 
7604 	err = mgmt_cmd_complete(cmd->sk, hdev->id,
7605 				MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
7606 				mgmt_rp, sizeof(*mgmt_rp) + eir_len);
7607 	if (err < 0 || status)
7608 		goto done;
7609 
7610 	hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
7611 
7612 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7613 				 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
7614 				 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
7615 done:
7616 	if (skb && !IS_ERR(skb))
7617 		kfree_skb(skb);
7618 
7619 	kfree(mgmt_rp);
7620 	mgmt_pending_remove(cmd);
7621 }
7622 
7623 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
7624 				  struct mgmt_cp_read_local_oob_ext_data *cp)
7625 {
7626 	struct mgmt_pending_cmd *cmd;
7627 	int err;
7628 
7629 	cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
7630 			       cp, sizeof(*cp));
7631 	if (!cmd)
7632 		return -ENOMEM;
7633 
7634 	err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd,
7635 				 read_local_oob_ext_data_complete);
7636 
7637 	if (err < 0) {
7638 		mgmt_pending_remove(cmd);
7639 		return err;
7640 	}
7641 
7642 	return 0;
7643 }
7644 
7645 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
7646 				   void *data, u16 data_len)
7647 {
7648 	struct mgmt_cp_read_local_oob_ext_data *cp = data;
7649 	struct mgmt_rp_read_local_oob_ext_data *rp;
7650 	size_t rp_len;
7651 	u16 eir_len;
7652 	u8 status, flags, role, addr[7], hash[16], rand[16];
7653 	int err;
7654 
7655 	bt_dev_dbg(hdev, "sock %p", sk);
7656 
7657 	if (hdev_is_powered(hdev)) {
7658 		switch (cp->type) {
7659 		case BIT(BDADDR_BREDR):
7660 			status = mgmt_bredr_support(hdev);
7661 			if (status)
7662 				eir_len = 0;
7663 			else
7664 				eir_len = 5;
7665 			break;
7666 		case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7667 			status = mgmt_le_support(hdev);
7668 			if (status)
7669 				eir_len = 0;
7670 			else
7671 				eir_len = 9 + 3 + 18 + 18 + 3;
7672 			break;
7673 		default:
7674 			status = MGMT_STATUS_INVALID_PARAMS;
7675 			eir_len = 0;
7676 			break;
7677 		}
7678 	} else {
7679 		status = MGMT_STATUS_NOT_POWERED;
7680 		eir_len = 0;
7681 	}
7682 
7683 	rp_len = sizeof(*rp) + eir_len;
7684 	rp = kmalloc(rp_len, GFP_ATOMIC);
7685 	if (!rp)
7686 		return -ENOMEM;
7687 
7688 	if (!status && !lmp_ssp_capable(hdev)) {
7689 		status = MGMT_STATUS_NOT_SUPPORTED;
7690 		eir_len = 0;
7691 	}
7692 
7693 	if (status)
7694 		goto complete;
7695 
7696 	hci_dev_lock(hdev);
7697 
7698 	eir_len = 0;
7699 	switch (cp->type) {
7700 	case BIT(BDADDR_BREDR):
7701 		if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7702 			err = read_local_ssp_oob_req(hdev, sk, cp);
7703 			hci_dev_unlock(hdev);
7704 			if (!err)
7705 				goto done;
7706 
7707 			status = MGMT_STATUS_FAILED;
7708 			goto complete;
7709 		} else {
7710 			eir_len = eir_append_data(rp->eir, eir_len,
7711 						  EIR_CLASS_OF_DEV,
7712 						  hdev->dev_class, 3);
7713 		}
7714 		break;
7715 	case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
7716 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
7717 		    smp_generate_oob(hdev, hash, rand) < 0) {
7718 			hci_dev_unlock(hdev);
7719 			status = MGMT_STATUS_FAILED;
7720 			goto complete;
7721 		}
7722 
7723 		/* This should return the active RPA, but since the RPA
7724 		 * is only programmed on demand, it is really hard to fill
7725 		 * this in at the moment. For now disallow retrieving
7726 		 * local out-of-band data when privacy is in use.
7727 		 *
7728 		 * Returning the identity address will not help here since
7729 		 * pairing happens before the identity resolving key is
7730 		 * known and thus the connection establishment happens
7731 		 * based on the RPA and not the identity address.
7732 		 */
7733 		if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
7734 			hci_dev_unlock(hdev);
7735 			status = MGMT_STATUS_REJECTED;
7736 			goto complete;
7737 		}
7738 
7739 		if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
7740 		   !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
7741 		   (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
7742 		    bacmp(&hdev->static_addr, BDADDR_ANY))) {
7743 			memcpy(addr, &hdev->static_addr, 6);
7744 			addr[6] = 0x01;
7745 		} else {
7746 			memcpy(addr, &hdev->bdaddr, 6);
7747 			addr[6] = 0x00;
7748 		}
7749 
7750 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
7751 					  addr, sizeof(addr));
7752 
7753 		if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
7754 			role = 0x02;
7755 		else
7756 			role = 0x01;
7757 
7758 		eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
7759 					  &role, sizeof(role));
7760 
7761 		if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
7762 			eir_len = eir_append_data(rp->eir, eir_len,
7763 						  EIR_LE_SC_CONFIRM,
7764 						  hash, sizeof(hash));
7765 
7766 			eir_len = eir_append_data(rp->eir, eir_len,
7767 						  EIR_LE_SC_RANDOM,
7768 						  rand, sizeof(rand));
7769 		}
7770 
7771 		flags = mgmt_get_adv_discov_flags(hdev);
7772 
7773 		if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
7774 			flags |= LE_AD_NO_BREDR;
7775 
7776 		eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
7777 					  &flags, sizeof(flags));
7778 		break;
7779 	}
7780 
7781 	hci_dev_unlock(hdev);
7782 
7783 	hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
7784 
7785 	status = MGMT_STATUS_SUCCESS;
7786 
7787 complete:
7788 	rp->type = cp->type;
7789 	rp->eir_len = cpu_to_le16(eir_len);
7790 
7791 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
7792 				status, rp, sizeof(*rp) + eir_len);
7793 	if (err < 0 || status)
7794 		goto done;
7795 
7796 	err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
7797 				 rp, sizeof(*rp) + eir_len,
7798 				 HCI_MGMT_OOB_DATA_EVENTS, sk);
7799 
7800 done:
7801 	kfree(rp);
7802 
7803 	return err;
7804 }
7805 
7806 static u32 get_supported_adv_flags(struct hci_dev *hdev)
7807 {
7808 	u32 flags = 0;
7809 
7810 	flags |= MGMT_ADV_FLAG_CONNECTABLE;
7811 	flags |= MGMT_ADV_FLAG_DISCOV;
7812 	flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
7813 	flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
7814 	flags |= MGMT_ADV_FLAG_APPEARANCE;
7815 	flags |= MGMT_ADV_FLAG_LOCAL_NAME;
7816 	flags |= MGMT_ADV_PARAM_DURATION;
7817 	flags |= MGMT_ADV_PARAM_TIMEOUT;
7818 	flags |= MGMT_ADV_PARAM_INTERVALS;
7819 	flags |= MGMT_ADV_PARAM_TX_POWER;
7820 	flags |= MGMT_ADV_PARAM_SCAN_RSP;
7821 
7822 	/* In extended adv TX_POWER returned from Set Adv Param
7823 	 * will be always valid.
7824 	 */
7825 	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
7826 	    ext_adv_capable(hdev))
7827 		flags |= MGMT_ADV_FLAG_TX_POWER;
7828 
7829 	if (ext_adv_capable(hdev)) {
7830 		flags |= MGMT_ADV_FLAG_SEC_1M;
7831 		flags |= MGMT_ADV_FLAG_HW_OFFLOAD;
7832 		flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER;
7833 
7834 		if (hdev->le_features[1] & HCI_LE_PHY_2M)
7835 			flags |= MGMT_ADV_FLAG_SEC_2M;
7836 
7837 		if (hdev->le_features[1] & HCI_LE_PHY_CODED)
7838 			flags |= MGMT_ADV_FLAG_SEC_CODED;
7839 	}
7840 
7841 	return flags;
7842 }
7843 
7844 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
7845 			     void *data, u16 data_len)
7846 {
7847 	struct mgmt_rp_read_adv_features *rp;
7848 	size_t rp_len;
7849 	int err;
7850 	struct adv_info *adv_instance;
7851 	u32 supported_flags;
7852 	u8 *instance;
7853 
7854 	bt_dev_dbg(hdev, "sock %p", sk);
7855 
7856 	if (!lmp_le_capable(hdev))
7857 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7858 				       MGMT_STATUS_REJECTED);
7859 
7860 	hci_dev_lock(hdev);
7861 
7862 	rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
7863 	rp = kmalloc(rp_len, GFP_ATOMIC);
7864 	if (!rp) {
7865 		hci_dev_unlock(hdev);
7866 		return -ENOMEM;
7867 	}
7868 
7869 	supported_flags = get_supported_adv_flags(hdev);
7870 
7871 	rp->supported_flags = cpu_to_le32(supported_flags);
7872 	rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
7873 	rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
7874 	rp->max_instances = hdev->le_num_of_adv_sets;
7875 	rp->num_instances = hdev->adv_instance_cnt;
7876 
7877 	instance = rp->instance;
7878 	list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
7879 		*instance = adv_instance->instance;
7880 		instance++;
7881 	}
7882 
7883 	hci_dev_unlock(hdev);
7884 
7885 	err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
7886 				MGMT_STATUS_SUCCESS, rp, rp_len);
7887 
7888 	kfree(rp);
7889 
7890 	return err;
7891 }
7892 
7893 static u8 calculate_name_len(struct hci_dev *hdev)
7894 {
7895 	u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
7896 
7897 	return eir_append_local_name(hdev, buf, 0);
7898 }
7899 
7900 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
7901 			   bool is_adv_data)
7902 {
7903 	u8 max_len = HCI_MAX_AD_LENGTH;
7904 
7905 	if (is_adv_data) {
7906 		if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
7907 				 MGMT_ADV_FLAG_LIMITED_DISCOV |
7908 				 MGMT_ADV_FLAG_MANAGED_FLAGS))
7909 			max_len -= 3;
7910 
7911 		if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
7912 			max_len -= 3;
7913 	} else {
7914 		if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
7915 			max_len -= calculate_name_len(hdev);
7916 
7917 		if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
7918 			max_len -= 4;
7919 	}
7920 
7921 	return max_len;
7922 }
7923 
7924 static bool flags_managed(u32 adv_flags)
7925 {
7926 	return adv_flags & (MGMT_ADV_FLAG_DISCOV |
7927 			    MGMT_ADV_FLAG_LIMITED_DISCOV |
7928 			    MGMT_ADV_FLAG_MANAGED_FLAGS);
7929 }
7930 
7931 static bool tx_power_managed(u32 adv_flags)
7932 {
7933 	return adv_flags & MGMT_ADV_FLAG_TX_POWER;
7934 }
7935 
7936 static bool name_managed(u32 adv_flags)
7937 {
7938 	return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
7939 }
7940 
7941 static bool appearance_managed(u32 adv_flags)
7942 {
7943 	return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
7944 }
7945 
7946 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
7947 			      u8 len, bool is_adv_data)
7948 {
7949 	int i, cur_len;
7950 	u8 max_len;
7951 
7952 	max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
7953 
7954 	if (len > max_len)
7955 		return false;
7956 
7957 	/* Make sure that the data is correctly formatted. */
7958 	for (i = 0; i < len; i += (cur_len + 1)) {
7959 		cur_len = data[i];
7960 
7961 		if (!cur_len)
7962 			continue;
7963 
7964 		if (data[i + 1] == EIR_FLAGS &&
7965 		    (!is_adv_data || flags_managed(adv_flags)))
7966 			return false;
7967 
7968 		if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
7969 			return false;
7970 
7971 		if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
7972 			return false;
7973 
7974 		if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
7975 			return false;
7976 
7977 		if (data[i + 1] == EIR_APPEARANCE &&
7978 		    appearance_managed(adv_flags))
7979 			return false;
7980 
7981 		/* If the current field length would exceed the total data
7982 		 * length, then it's invalid.
7983 		 */
7984 		if (i + cur_len >= len)
7985 			return false;
7986 	}
7987 
7988 	return true;
7989 }
7990 
7991 static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags)
7992 {
7993 	u32 supported_flags, phy_flags;
7994 
7995 	/* The current implementation only supports a subset of the specified
7996 	 * flags. Also need to check mutual exclusiveness of sec flags.
7997 	 */
7998 	supported_flags = get_supported_adv_flags(hdev);
7999 	phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK;
8000 	if (adv_flags & ~supported_flags ||
8001 	    ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
8002 		return false;
8003 
8004 	return true;
8005 }
8006 
8007 static bool adv_busy(struct hci_dev *hdev)
8008 {
8009 	return pending_find(MGMT_OP_SET_LE, hdev);
8010 }
8011 
8012 static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance,
8013 			     int err)
8014 {
8015 	struct adv_info *adv, *n;
8016 
8017 	bt_dev_dbg(hdev, "err %d", err);
8018 
8019 	hci_dev_lock(hdev);
8020 
8021 	list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) {
8022 		u8 instance;
8023 
8024 		if (!adv->pending)
8025 			continue;
8026 
8027 		if (!err) {
8028 			adv->pending = false;
8029 			continue;
8030 		}
8031 
8032 		instance = adv->instance;
8033 
8034 		if (hdev->cur_adv_instance == instance)
8035 			cancel_adv_timeout(hdev);
8036 
8037 		hci_remove_adv_instance(hdev, instance);
8038 		mgmt_advertising_removed(sk, hdev, instance);
8039 	}
8040 
8041 	hci_dev_unlock(hdev);
8042 }
8043 
8044 static void add_advertising_complete(struct hci_dev *hdev, void *data, int err)
8045 {
8046 	struct mgmt_pending_cmd *cmd = data;
8047 	struct mgmt_cp_add_advertising *cp = cmd->param;
8048 	struct mgmt_rp_add_advertising rp;
8049 
8050 	memset(&rp, 0, sizeof(rp));
8051 
8052 	rp.instance = cp->instance;
8053 
8054 	if (err)
8055 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8056 				mgmt_status(err));
8057 	else
8058 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8059 				  mgmt_status(err), &rp, sizeof(rp));
8060 
8061 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8062 
8063 	mgmt_pending_free(cmd);
8064 }
8065 
8066 static int add_advertising_sync(struct hci_dev *hdev, void *data)
8067 {
8068 	struct mgmt_pending_cmd *cmd = data;
8069 	struct mgmt_cp_add_advertising *cp = cmd->param;
8070 
8071 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8072 }
8073 
8074 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
8075 			   void *data, u16 data_len)
8076 {
8077 	struct mgmt_cp_add_advertising *cp = data;
8078 	struct mgmt_rp_add_advertising rp;
8079 	u32 flags;
8080 	u8 status;
8081 	u16 timeout, duration;
8082 	unsigned int prev_instance_cnt;
8083 	u8 schedule_instance = 0;
8084 	struct adv_info *next_instance;
8085 	int err;
8086 	struct mgmt_pending_cmd *cmd;
8087 
8088 	bt_dev_dbg(hdev, "sock %p", sk);
8089 
8090 	status = mgmt_le_support(hdev);
8091 	if (status)
8092 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8093 				       status);
8094 
8095 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8096 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8097 				       MGMT_STATUS_INVALID_PARAMS);
8098 
8099 	if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
8100 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8101 				       MGMT_STATUS_INVALID_PARAMS);
8102 
8103 	flags = __le32_to_cpu(cp->flags);
8104 	timeout = __le16_to_cpu(cp->timeout);
8105 	duration = __le16_to_cpu(cp->duration);
8106 
8107 	if (!requested_adv_flags_are_valid(hdev, flags))
8108 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8109 				       MGMT_STATUS_INVALID_PARAMS);
8110 
8111 	hci_dev_lock(hdev);
8112 
8113 	if (timeout && !hdev_is_powered(hdev)) {
8114 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8115 				      MGMT_STATUS_REJECTED);
8116 		goto unlock;
8117 	}
8118 
8119 	if (adv_busy(hdev)) {
8120 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8121 				      MGMT_STATUS_BUSY);
8122 		goto unlock;
8123 	}
8124 
8125 	if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
8126 	    !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
8127 			       cp->scan_rsp_len, false)) {
8128 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8129 				      MGMT_STATUS_INVALID_PARAMS);
8130 		goto unlock;
8131 	}
8132 
8133 	prev_instance_cnt = hdev->adv_instance_cnt;
8134 
8135 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8136 				   cp->adv_data_len, cp->data,
8137 				   cp->scan_rsp_len,
8138 				   cp->data + cp->adv_data_len,
8139 				   timeout, duration,
8140 				   HCI_ADV_TX_POWER_NO_PREFERENCE,
8141 				   hdev->le_adv_min_interval,
8142 				   hdev->le_adv_max_interval);
8143 	if (err < 0) {
8144 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8145 				      MGMT_STATUS_FAILED);
8146 		goto unlock;
8147 	}
8148 
8149 	/* Only trigger an advertising added event if a new instance was
8150 	 * actually added.
8151 	 */
8152 	if (hdev->adv_instance_cnt > prev_instance_cnt)
8153 		mgmt_advertising_added(sk, hdev, cp->instance);
8154 
8155 	if (hdev->cur_adv_instance == cp->instance) {
8156 		/* If the currently advertised instance is being changed then
8157 		 * cancel the current advertising and schedule the next
8158 		 * instance. If there is only one instance then the overridden
8159 		 * advertising data will be visible right away.
8160 		 */
8161 		cancel_adv_timeout(hdev);
8162 
8163 		next_instance = hci_get_next_instance(hdev, cp->instance);
8164 		if (next_instance)
8165 			schedule_instance = next_instance->instance;
8166 	} else if (!hdev->adv_instance_timeout) {
8167 		/* Immediately advertise the new instance if no other
8168 		 * instance is currently being advertised.
8169 		 */
8170 		schedule_instance = cp->instance;
8171 	}
8172 
8173 	/* If the HCI_ADVERTISING flag is set or the device isn't powered or
8174 	 * there is no instance to be advertised then we have no HCI
8175 	 * communication to make. Simply return.
8176 	 */
8177 	if (!hdev_is_powered(hdev) ||
8178 	    hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
8179 	    !schedule_instance) {
8180 		rp.instance = cp->instance;
8181 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8182 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8183 		goto unlock;
8184 	}
8185 
8186 	/* We're good to go, update advertising data, parameters, and start
8187 	 * advertising.
8188 	 */
8189 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
8190 			       data_len);
8191 	if (!cmd) {
8192 		err = -ENOMEM;
8193 		goto unlock;
8194 	}
8195 
8196 	cp->instance = schedule_instance;
8197 
8198 	err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd,
8199 				 add_advertising_complete);
8200 	if (err < 0)
8201 		mgmt_pending_free(cmd);
8202 
8203 unlock:
8204 	hci_dev_unlock(hdev);
8205 
8206 	return err;
8207 }
8208 
8209 static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data,
8210 					int err)
8211 {
8212 	struct mgmt_pending_cmd *cmd = data;
8213 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8214 	struct mgmt_rp_add_ext_adv_params rp;
8215 	struct adv_info *adv;
8216 	u32 flags;
8217 
8218 	BT_DBG("%s", hdev->name);
8219 
8220 	hci_dev_lock(hdev);
8221 
8222 	adv = hci_find_adv_instance(hdev, cp->instance);
8223 	if (!adv)
8224 		goto unlock;
8225 
8226 	rp.instance = cp->instance;
8227 	rp.tx_power = adv->tx_power;
8228 
8229 	/* While we're at it, inform userspace of the available space for this
8230 	 * advertisement, given the flags that will be used.
8231 	 */
8232 	flags = __le32_to_cpu(cp->flags);
8233 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8234 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8235 
8236 	if (err) {
8237 		/* If this advertisement was previously advertising and we
8238 		 * failed to update it, we signal that it has been removed and
8239 		 * delete its structure
8240 		 */
8241 		if (!adv->pending)
8242 			mgmt_advertising_removed(cmd->sk, hdev, cp->instance);
8243 
8244 		hci_remove_adv_instance(hdev, cp->instance);
8245 
8246 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8247 				mgmt_status(err));
8248 	} else {
8249 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8250 				  mgmt_status(err), &rp, sizeof(rp));
8251 	}
8252 
8253 unlock:
8254 	if (cmd)
8255 		mgmt_pending_free(cmd);
8256 
8257 	hci_dev_unlock(hdev);
8258 }
8259 
8260 static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data)
8261 {
8262 	struct mgmt_pending_cmd *cmd = data;
8263 	struct mgmt_cp_add_ext_adv_params *cp = cmd->param;
8264 
8265 	return hci_setup_ext_adv_instance_sync(hdev, cp->instance);
8266 }
8267 
8268 static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,
8269 			      void *data, u16 data_len)
8270 {
8271 	struct mgmt_cp_add_ext_adv_params *cp = data;
8272 	struct mgmt_rp_add_ext_adv_params rp;
8273 	struct mgmt_pending_cmd *cmd = NULL;
8274 	u32 flags, min_interval, max_interval;
8275 	u16 timeout, duration;
8276 	u8 status;
8277 	s8 tx_power;
8278 	int err;
8279 
8280 	BT_DBG("%s", hdev->name);
8281 
8282 	status = mgmt_le_support(hdev);
8283 	if (status)
8284 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8285 				       status);
8286 
8287 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8288 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8289 				       MGMT_STATUS_INVALID_PARAMS);
8290 
8291 	/* The purpose of breaking add_advertising into two separate MGMT calls
8292 	 * for params and data is to allow more parameters to be added to this
8293 	 * structure in the future. For this reason, we verify that we have the
8294 	 * bare minimum structure we know of when the interface was defined. Any
8295 	 * extra parameters we don't know about will be ignored in this request.
8296 	 */
8297 	if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE)
8298 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
8299 				       MGMT_STATUS_INVALID_PARAMS);
8300 
8301 	flags = __le32_to_cpu(cp->flags);
8302 
8303 	if (!requested_adv_flags_are_valid(hdev, flags))
8304 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8305 				       MGMT_STATUS_INVALID_PARAMS);
8306 
8307 	hci_dev_lock(hdev);
8308 
8309 	/* In new interface, we require that we are powered to register */
8310 	if (!hdev_is_powered(hdev)) {
8311 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8312 				      MGMT_STATUS_REJECTED);
8313 		goto unlock;
8314 	}
8315 
8316 	if (adv_busy(hdev)) {
8317 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8318 				      MGMT_STATUS_BUSY);
8319 		goto unlock;
8320 	}
8321 
8322 	/* Parse defined parameters from request, use defaults otherwise */
8323 	timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ?
8324 		  __le16_to_cpu(cp->timeout) : 0;
8325 
8326 	duration = (flags & MGMT_ADV_PARAM_DURATION) ?
8327 		   __le16_to_cpu(cp->duration) :
8328 		   hdev->def_multi_adv_rotation_duration;
8329 
8330 	min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8331 		       __le32_to_cpu(cp->min_interval) :
8332 		       hdev->le_adv_min_interval;
8333 
8334 	max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ?
8335 		       __le32_to_cpu(cp->max_interval) :
8336 		       hdev->le_adv_max_interval;
8337 
8338 	tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ?
8339 		   cp->tx_power :
8340 		   HCI_ADV_TX_POWER_NO_PREFERENCE;
8341 
8342 	/* Create advertising instance with no advertising or response data */
8343 	err = hci_add_adv_instance(hdev, cp->instance, flags,
8344 				   0, NULL, 0, NULL, timeout, duration,
8345 				   tx_power, min_interval, max_interval);
8346 
8347 	if (err < 0) {
8348 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS,
8349 				      MGMT_STATUS_FAILED);
8350 		goto unlock;
8351 	}
8352 
8353 	/* Submit request for advertising params if ext adv available */
8354 	if (ext_adv_capable(hdev)) {
8355 		cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev,
8356 				       data, data_len);
8357 		if (!cmd) {
8358 			err = -ENOMEM;
8359 			hci_remove_adv_instance(hdev, cp->instance);
8360 			goto unlock;
8361 		}
8362 
8363 		err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd,
8364 					 add_ext_adv_params_complete);
8365 		if (err < 0)
8366 			mgmt_pending_free(cmd);
8367 	} else {
8368 		rp.instance = cp->instance;
8369 		rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE;
8370 		rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8371 		rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8372 		err = mgmt_cmd_complete(sk, hdev->id,
8373 					MGMT_OP_ADD_EXT_ADV_PARAMS,
8374 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8375 	}
8376 
8377 unlock:
8378 	hci_dev_unlock(hdev);
8379 
8380 	return err;
8381 }
8382 
8383 static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err)
8384 {
8385 	struct mgmt_pending_cmd *cmd = data;
8386 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8387 	struct mgmt_rp_add_advertising rp;
8388 
8389 	add_adv_complete(hdev, cmd->sk, cp->instance, err);
8390 
8391 	memset(&rp, 0, sizeof(rp));
8392 
8393 	rp.instance = cp->instance;
8394 
8395 	if (err)
8396 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8397 				mgmt_status(err));
8398 	else
8399 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8400 				  mgmt_status(err), &rp, sizeof(rp));
8401 
8402 	mgmt_pending_free(cmd);
8403 }
8404 
8405 static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data)
8406 {
8407 	struct mgmt_pending_cmd *cmd = data;
8408 	struct mgmt_cp_add_ext_adv_data *cp = cmd->param;
8409 	int err;
8410 
8411 	if (ext_adv_capable(hdev)) {
8412 		err = hci_update_adv_data_sync(hdev, cp->instance);
8413 		if (err)
8414 			return err;
8415 
8416 		err = hci_update_scan_rsp_data_sync(hdev, cp->instance);
8417 		if (err)
8418 			return err;
8419 
8420 		return hci_enable_ext_advertising_sync(hdev, cp->instance);
8421 	}
8422 
8423 	return hci_schedule_adv_instance_sync(hdev, cp->instance, true);
8424 }
8425 
8426 static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data,
8427 			    u16 data_len)
8428 {
8429 	struct mgmt_cp_add_ext_adv_data *cp = data;
8430 	struct mgmt_rp_add_ext_adv_data rp;
8431 	u8 schedule_instance = 0;
8432 	struct adv_info *next_instance;
8433 	struct adv_info *adv_instance;
8434 	int err = 0;
8435 	struct mgmt_pending_cmd *cmd;
8436 
8437 	BT_DBG("%s", hdev->name);
8438 
8439 	hci_dev_lock(hdev);
8440 
8441 	adv_instance = hci_find_adv_instance(hdev, cp->instance);
8442 
8443 	if (!adv_instance) {
8444 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8445 				      MGMT_STATUS_INVALID_PARAMS);
8446 		goto unlock;
8447 	}
8448 
8449 	/* In new interface, we require that we are powered to register */
8450 	if (!hdev_is_powered(hdev)) {
8451 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8452 				      MGMT_STATUS_REJECTED);
8453 		goto clear_new_instance;
8454 	}
8455 
8456 	if (adv_busy(hdev)) {
8457 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8458 				      MGMT_STATUS_BUSY);
8459 		goto clear_new_instance;
8460 	}
8461 
8462 	/* Validate new data */
8463 	if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data,
8464 			       cp->adv_data_len, true) ||
8465 	    !tlv_data_is_valid(hdev, adv_instance->flags, cp->data +
8466 			       cp->adv_data_len, cp->scan_rsp_len, false)) {
8467 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8468 				      MGMT_STATUS_INVALID_PARAMS);
8469 		goto clear_new_instance;
8470 	}
8471 
8472 	/* Set the data in the advertising instance */
8473 	hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len,
8474 				  cp->data, cp->scan_rsp_len,
8475 				  cp->data + cp->adv_data_len);
8476 
8477 	/* If using software rotation, determine next instance to use */
8478 	if (hdev->cur_adv_instance == cp->instance) {
8479 		/* If the currently advertised instance is being changed
8480 		 * then cancel the current advertising and schedule the
8481 		 * next instance. If there is only one instance then the
8482 		 * overridden advertising data will be visible right
8483 		 * away
8484 		 */
8485 		cancel_adv_timeout(hdev);
8486 
8487 		next_instance = hci_get_next_instance(hdev, cp->instance);
8488 		if (next_instance)
8489 			schedule_instance = next_instance->instance;
8490 	} else if (!hdev->adv_instance_timeout) {
8491 		/* Immediately advertise the new instance if no other
8492 		 * instance is currently being advertised.
8493 		 */
8494 		schedule_instance = cp->instance;
8495 	}
8496 
8497 	/* If the HCI_ADVERTISING flag is set or there is no instance to
8498 	 * be advertised then we have no HCI communication to make.
8499 	 * Simply return.
8500 	 */
8501 	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) {
8502 		if (adv_instance->pending) {
8503 			mgmt_advertising_added(sk, hdev, cp->instance);
8504 			adv_instance->pending = false;
8505 		}
8506 		rp.instance = cp->instance;
8507 		err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA,
8508 					MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8509 		goto unlock;
8510 	}
8511 
8512 	cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data,
8513 			       data_len);
8514 	if (!cmd) {
8515 		err = -ENOMEM;
8516 		goto clear_new_instance;
8517 	}
8518 
8519 	err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd,
8520 				 add_ext_adv_data_complete);
8521 	if (err < 0) {
8522 		mgmt_pending_free(cmd);
8523 		goto clear_new_instance;
8524 	}
8525 
8526 	/* We were successful in updating data, so trigger advertising_added
8527 	 * event if this is an instance that wasn't previously advertising. If
8528 	 * a failure occurs in the requests we initiated, we will remove the
8529 	 * instance again in add_advertising_complete
8530 	 */
8531 	if (adv_instance->pending)
8532 		mgmt_advertising_added(sk, hdev, cp->instance);
8533 
8534 	goto unlock;
8535 
8536 clear_new_instance:
8537 	hci_remove_adv_instance(hdev, cp->instance);
8538 
8539 unlock:
8540 	hci_dev_unlock(hdev);
8541 
8542 	return err;
8543 }
8544 
8545 static void remove_advertising_complete(struct hci_dev *hdev, void *data,
8546 					int err)
8547 {
8548 	struct mgmt_pending_cmd *cmd = data;
8549 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8550 	struct mgmt_rp_remove_advertising rp;
8551 
8552 	bt_dev_dbg(hdev, "err %d", err);
8553 
8554 	memset(&rp, 0, sizeof(rp));
8555 	rp.instance = cp->instance;
8556 
8557 	if (err)
8558 		mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
8559 				mgmt_status(err));
8560 	else
8561 		mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
8562 				  MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8563 
8564 	mgmt_pending_free(cmd);
8565 }
8566 
8567 static int remove_advertising_sync(struct hci_dev *hdev, void *data)
8568 {
8569 	struct mgmt_pending_cmd *cmd = data;
8570 	struct mgmt_cp_remove_advertising *cp = cmd->param;
8571 	int err;
8572 
8573 	err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true);
8574 	if (err)
8575 		return err;
8576 
8577 	if (list_empty(&hdev->adv_instances))
8578 		err = hci_disable_advertising_sync(hdev);
8579 
8580 	return err;
8581 }
8582 
8583 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
8584 			      void *data, u16 data_len)
8585 {
8586 	struct mgmt_cp_remove_advertising *cp = data;
8587 	struct mgmt_pending_cmd *cmd;
8588 	int err;
8589 
8590 	bt_dev_dbg(hdev, "sock %p", sk);
8591 
8592 	hci_dev_lock(hdev);
8593 
8594 	if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
8595 		err = mgmt_cmd_status(sk, hdev->id,
8596 				      MGMT_OP_REMOVE_ADVERTISING,
8597 				      MGMT_STATUS_INVALID_PARAMS);
8598 		goto unlock;
8599 	}
8600 
8601 	if (pending_find(MGMT_OP_SET_LE, hdev)) {
8602 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8603 				      MGMT_STATUS_BUSY);
8604 		goto unlock;
8605 	}
8606 
8607 	if (list_empty(&hdev->adv_instances)) {
8608 		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
8609 				      MGMT_STATUS_INVALID_PARAMS);
8610 		goto unlock;
8611 	}
8612 
8613 	cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
8614 			       data_len);
8615 	if (!cmd) {
8616 		err = -ENOMEM;
8617 		goto unlock;
8618 	}
8619 
8620 	err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd,
8621 				 remove_advertising_complete);
8622 	if (err < 0)
8623 		mgmt_pending_free(cmd);
8624 
8625 unlock:
8626 	hci_dev_unlock(hdev);
8627 
8628 	return err;
8629 }
8630 
8631 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
8632 			     void *data, u16 data_len)
8633 {
8634 	struct mgmt_cp_get_adv_size_info *cp = data;
8635 	struct mgmt_rp_get_adv_size_info rp;
8636 	u32 flags, supported_flags;
8637 
8638 	bt_dev_dbg(hdev, "sock %p", sk);
8639 
8640 	if (!lmp_le_capable(hdev))
8641 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8642 				       MGMT_STATUS_REJECTED);
8643 
8644 	if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets)
8645 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8646 				       MGMT_STATUS_INVALID_PARAMS);
8647 
8648 	flags = __le32_to_cpu(cp->flags);
8649 
8650 	/* The current implementation only supports a subset of the specified
8651 	 * flags.
8652 	 */
8653 	supported_flags = get_supported_adv_flags(hdev);
8654 	if (flags & ~supported_flags)
8655 		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8656 				       MGMT_STATUS_INVALID_PARAMS);
8657 
8658 	rp.instance = cp->instance;
8659 	rp.flags = cp->flags;
8660 	rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
8661 	rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
8662 
8663 	return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
8664 				 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
8665 }
8666 
8667 static const struct hci_mgmt_handler mgmt_handlers[] = {
8668 	{ NULL }, /* 0x0000 (no command) */
8669 	{ read_version,            MGMT_READ_VERSION_SIZE,
8670 						HCI_MGMT_NO_HDEV |
8671 						HCI_MGMT_UNTRUSTED },
8672 	{ read_commands,           MGMT_READ_COMMANDS_SIZE,
8673 						HCI_MGMT_NO_HDEV |
8674 						HCI_MGMT_UNTRUSTED },
8675 	{ read_index_list,         MGMT_READ_INDEX_LIST_SIZE,
8676 						HCI_MGMT_NO_HDEV |
8677 						HCI_MGMT_UNTRUSTED },
8678 	{ read_controller_info,    MGMT_READ_INFO_SIZE,
8679 						HCI_MGMT_UNTRUSTED },
8680 	{ set_powered,             MGMT_SETTING_SIZE },
8681 	{ set_discoverable,        MGMT_SET_DISCOVERABLE_SIZE },
8682 	{ set_connectable,         MGMT_SETTING_SIZE },
8683 	{ set_fast_connectable,    MGMT_SETTING_SIZE },
8684 	{ set_bondable,            MGMT_SETTING_SIZE },
8685 	{ set_link_security,       MGMT_SETTING_SIZE },
8686 	{ set_ssp,                 MGMT_SETTING_SIZE },
8687 	{ set_hs,                  MGMT_SETTING_SIZE },
8688 	{ set_le,                  MGMT_SETTING_SIZE },
8689 	{ set_dev_class,           MGMT_SET_DEV_CLASS_SIZE },
8690 	{ set_local_name,          MGMT_SET_LOCAL_NAME_SIZE },
8691 	{ add_uuid,                MGMT_ADD_UUID_SIZE },
8692 	{ remove_uuid,             MGMT_REMOVE_UUID_SIZE },
8693 	{ load_link_keys,          MGMT_LOAD_LINK_KEYS_SIZE,
8694 						HCI_MGMT_VAR_LEN },
8695 	{ load_long_term_keys,     MGMT_LOAD_LONG_TERM_KEYS_SIZE,
8696 						HCI_MGMT_VAR_LEN },
8697 	{ disconnect,              MGMT_DISCONNECT_SIZE },
8698 	{ get_connections,         MGMT_GET_CONNECTIONS_SIZE },
8699 	{ pin_code_reply,          MGMT_PIN_CODE_REPLY_SIZE },
8700 	{ pin_code_neg_reply,      MGMT_PIN_CODE_NEG_REPLY_SIZE },
8701 	{ set_io_capability,       MGMT_SET_IO_CAPABILITY_SIZE },
8702 	{ pair_device,             MGMT_PAIR_DEVICE_SIZE },
8703 	{ cancel_pair_device,      MGMT_CANCEL_PAIR_DEVICE_SIZE },
8704 	{ unpair_device,           MGMT_UNPAIR_DEVICE_SIZE },
8705 	{ user_confirm_reply,      MGMT_USER_CONFIRM_REPLY_SIZE },
8706 	{ user_confirm_neg_reply,  MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
8707 	{ user_passkey_reply,      MGMT_USER_PASSKEY_REPLY_SIZE },
8708 	{ user_passkey_neg_reply,  MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
8709 	{ read_local_oob_data,     MGMT_READ_LOCAL_OOB_DATA_SIZE },
8710 	{ add_remote_oob_data,     MGMT_ADD_REMOTE_OOB_DATA_SIZE,
8711 						HCI_MGMT_VAR_LEN },
8712 	{ remove_remote_oob_data,  MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
8713 	{ start_discovery,         MGMT_START_DISCOVERY_SIZE },
8714 	{ stop_discovery,          MGMT_STOP_DISCOVERY_SIZE },
8715 	{ confirm_name,            MGMT_CONFIRM_NAME_SIZE },
8716 	{ block_device,            MGMT_BLOCK_DEVICE_SIZE },
8717 	{ unblock_device,          MGMT_UNBLOCK_DEVICE_SIZE },
8718 	{ set_device_id,           MGMT_SET_DEVICE_ID_SIZE },
8719 	{ set_advertising,         MGMT_SETTING_SIZE },
8720 	{ set_bredr,               MGMT_SETTING_SIZE },
8721 	{ set_static_address,      MGMT_SET_STATIC_ADDRESS_SIZE },
8722 	{ set_scan_params,         MGMT_SET_SCAN_PARAMS_SIZE },
8723 	{ set_secure_conn,         MGMT_SETTING_SIZE },
8724 	{ set_debug_keys,          MGMT_SETTING_SIZE },
8725 	{ set_privacy,             MGMT_SET_PRIVACY_SIZE },
8726 	{ load_irks,               MGMT_LOAD_IRKS_SIZE,
8727 						HCI_MGMT_VAR_LEN },
8728 	{ get_conn_info,           MGMT_GET_CONN_INFO_SIZE },
8729 	{ get_clock_info,          MGMT_GET_CLOCK_INFO_SIZE },
8730 	{ add_device,              MGMT_ADD_DEVICE_SIZE },
8731 	{ remove_device,           MGMT_REMOVE_DEVICE_SIZE },
8732 	{ load_conn_param,         MGMT_LOAD_CONN_PARAM_SIZE,
8733 						HCI_MGMT_VAR_LEN },
8734 	{ read_unconf_index_list,  MGMT_READ_UNCONF_INDEX_LIST_SIZE,
8735 						HCI_MGMT_NO_HDEV |
8736 						HCI_MGMT_UNTRUSTED },
8737 	{ read_config_info,        MGMT_READ_CONFIG_INFO_SIZE,
8738 						HCI_MGMT_UNCONFIGURED |
8739 						HCI_MGMT_UNTRUSTED },
8740 	{ set_external_config,     MGMT_SET_EXTERNAL_CONFIG_SIZE,
8741 						HCI_MGMT_UNCONFIGURED },
8742 	{ set_public_address,      MGMT_SET_PUBLIC_ADDRESS_SIZE,
8743 						HCI_MGMT_UNCONFIGURED },
8744 	{ start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
8745 						HCI_MGMT_VAR_LEN },
8746 	{ read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
8747 	{ read_ext_index_list,     MGMT_READ_EXT_INDEX_LIST_SIZE,
8748 						HCI_MGMT_NO_HDEV |
8749 						HCI_MGMT_UNTRUSTED },
8750 	{ read_adv_features,       MGMT_READ_ADV_FEATURES_SIZE },
8751 	{ add_advertising,	   MGMT_ADD_ADVERTISING_SIZE,
8752 						HCI_MGMT_VAR_LEN },
8753 	{ remove_advertising,	   MGMT_REMOVE_ADVERTISING_SIZE },
8754 	{ get_adv_size_info,       MGMT_GET_ADV_SIZE_INFO_SIZE },
8755 	{ start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
8756 	{ read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
8757 						HCI_MGMT_UNTRUSTED },
8758 	{ set_appearance,	   MGMT_SET_APPEARANCE_SIZE },
8759 	{ get_phy_configuration,   MGMT_GET_PHY_CONFIGURATION_SIZE },
8760 	{ set_phy_configuration,   MGMT_SET_PHY_CONFIGURATION_SIZE },
8761 	{ set_blocked_keys,	   MGMT_OP_SET_BLOCKED_KEYS_SIZE,
8762 						HCI_MGMT_VAR_LEN },
8763 	{ set_wideband_speech,	   MGMT_SETTING_SIZE },
8764 	{ read_controller_cap,     MGMT_READ_CONTROLLER_CAP_SIZE,
8765 						HCI_MGMT_UNTRUSTED },
8766 	{ read_exp_features_info,  MGMT_READ_EXP_FEATURES_INFO_SIZE,
8767 						HCI_MGMT_UNTRUSTED |
8768 						HCI_MGMT_HDEV_OPTIONAL },
8769 	{ set_exp_feature,         MGMT_SET_EXP_FEATURE_SIZE,
8770 						HCI_MGMT_VAR_LEN |
8771 						HCI_MGMT_HDEV_OPTIONAL },
8772 	{ read_def_system_config,  MGMT_READ_DEF_SYSTEM_CONFIG_SIZE,
8773 						HCI_MGMT_UNTRUSTED },
8774 	{ set_def_system_config,   MGMT_SET_DEF_SYSTEM_CONFIG_SIZE,
8775 						HCI_MGMT_VAR_LEN },
8776 	{ read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE,
8777 						HCI_MGMT_UNTRUSTED },
8778 	{ set_def_runtime_config,  MGMT_SET_DEF_RUNTIME_CONFIG_SIZE,
8779 						HCI_MGMT_VAR_LEN },
8780 	{ get_device_flags,        MGMT_GET_DEVICE_FLAGS_SIZE },
8781 	{ set_device_flags,        MGMT_SET_DEVICE_FLAGS_SIZE },
8782 	{ read_adv_mon_features,   MGMT_READ_ADV_MONITOR_FEATURES_SIZE },
8783 	{ add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE,
8784 						HCI_MGMT_VAR_LEN },
8785 	{ remove_adv_monitor,      MGMT_REMOVE_ADV_MONITOR_SIZE },
8786 	{ add_ext_adv_params,      MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE,
8787 						HCI_MGMT_VAR_LEN },
8788 	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,
8789 						HCI_MGMT_VAR_LEN },
8790 	{ add_adv_patterns_monitor_rssi,
8791 				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE,
8792 						HCI_MGMT_VAR_LEN },
8793 };
8794 
8795 void mgmt_index_added(struct hci_dev *hdev)
8796 {
8797 	struct mgmt_ev_ext_index ev;
8798 
8799 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8800 		return;
8801 
8802 	switch (hdev->dev_type) {
8803 	case HCI_PRIMARY:
8804 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8805 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
8806 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8807 			ev.type = 0x01;
8808 		} else {
8809 			mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
8810 					 HCI_MGMT_INDEX_EVENTS);
8811 			ev.type = 0x00;
8812 		}
8813 		break;
8814 	case HCI_AMP:
8815 		ev.type = 0x02;
8816 		break;
8817 	default:
8818 		return;
8819 	}
8820 
8821 	ev.bus = hdev->bus;
8822 
8823 	mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
8824 			 HCI_MGMT_EXT_INDEX_EVENTS);
8825 }
8826 
8827 void mgmt_index_removed(struct hci_dev *hdev)
8828 {
8829 	struct mgmt_ev_ext_index ev;
8830 	u8 status = MGMT_STATUS_INVALID_INDEX;
8831 
8832 	if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
8833 		return;
8834 
8835 	switch (hdev->dev_type) {
8836 	case HCI_PRIMARY:
8837 		mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8838 
8839 		if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
8840 			mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
8841 					 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
8842 			ev.type = 0x01;
8843 		} else {
8844 			mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
8845 					 HCI_MGMT_INDEX_EVENTS);
8846 			ev.type = 0x00;
8847 		}
8848 		break;
8849 	case HCI_AMP:
8850 		ev.type = 0x02;
8851 		break;
8852 	default:
8853 		return;
8854 	}
8855 
8856 	ev.bus = hdev->bus;
8857 
8858 	mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
8859 			 HCI_MGMT_EXT_INDEX_EVENTS);
8860 }
8861 
8862 void mgmt_power_on(struct hci_dev *hdev, int err)
8863 {
8864 	struct cmd_lookup match = { NULL, hdev };
8865 
8866 	bt_dev_dbg(hdev, "err %d", err);
8867 
8868 	hci_dev_lock(hdev);
8869 
8870 	if (!err) {
8871 		restart_le_actions(hdev);
8872 		hci_update_passive_scan(hdev);
8873 	}
8874 
8875 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8876 
8877 	new_settings(hdev, match.sk);
8878 
8879 	if (match.sk)
8880 		sock_put(match.sk);
8881 
8882 	hci_dev_unlock(hdev);
8883 }
8884 
8885 void __mgmt_power_off(struct hci_dev *hdev)
8886 {
8887 	struct cmd_lookup match = { NULL, hdev };
8888 	u8 status, zero_cod[] = { 0, 0, 0 };
8889 
8890 	mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
8891 
8892 	/* If the power off is because of hdev unregistration let
8893 	 * use the appropriate INVALID_INDEX status. Otherwise use
8894 	 * NOT_POWERED. We cover both scenarios here since later in
8895 	 * mgmt_index_removed() any hci_conn callbacks will have already
8896 	 * been triggered, potentially causing misleading DISCONNECTED
8897 	 * status responses.
8898 	 */
8899 	if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
8900 		status = MGMT_STATUS_INVALID_INDEX;
8901 	else
8902 		status = MGMT_STATUS_NOT_POWERED;
8903 
8904 	mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
8905 
8906 	if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
8907 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
8908 				   zero_cod, sizeof(zero_cod),
8909 				   HCI_MGMT_DEV_CLASS_EVENTS, NULL);
8910 		ext_info_changed(hdev, NULL);
8911 	}
8912 
8913 	new_settings(hdev, match.sk);
8914 
8915 	if (match.sk)
8916 		sock_put(match.sk);
8917 }
8918 
8919 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
8920 {
8921 	struct mgmt_pending_cmd *cmd;
8922 	u8 status;
8923 
8924 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
8925 	if (!cmd)
8926 		return;
8927 
8928 	if (err == -ERFKILL)
8929 		status = MGMT_STATUS_RFKILLED;
8930 	else
8931 		status = MGMT_STATUS_FAILED;
8932 
8933 	mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
8934 
8935 	mgmt_pending_remove(cmd);
8936 }
8937 
8938 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
8939 		       bool persistent)
8940 {
8941 	struct mgmt_ev_new_link_key ev;
8942 
8943 	memset(&ev, 0, sizeof(ev));
8944 
8945 	ev.store_hint = persistent;
8946 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8947 	ev.key.addr.type = BDADDR_BREDR;
8948 	ev.key.type = key->type;
8949 	memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
8950 	ev.key.pin_len = key->pin_len;
8951 
8952 	mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
8953 }
8954 
8955 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
8956 {
8957 	switch (ltk->type) {
8958 	case SMP_LTK:
8959 	case SMP_LTK_RESPONDER:
8960 		if (ltk->authenticated)
8961 			return MGMT_LTK_AUTHENTICATED;
8962 		return MGMT_LTK_UNAUTHENTICATED;
8963 	case SMP_LTK_P256:
8964 		if (ltk->authenticated)
8965 			return MGMT_LTK_P256_AUTH;
8966 		return MGMT_LTK_P256_UNAUTH;
8967 	case SMP_LTK_P256_DEBUG:
8968 		return MGMT_LTK_P256_DEBUG;
8969 	}
8970 
8971 	return MGMT_LTK_UNAUTHENTICATED;
8972 }
8973 
8974 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
8975 {
8976 	struct mgmt_ev_new_long_term_key ev;
8977 
8978 	memset(&ev, 0, sizeof(ev));
8979 
8980 	/* Devices using resolvable or non-resolvable random addresses
8981 	 * without providing an identity resolving key don't require
8982 	 * to store long term keys. Their addresses will change the
8983 	 * next time around.
8984 	 *
8985 	 * Only when a remote device provides an identity address
8986 	 * make sure the long term key is stored. If the remote
8987 	 * identity is known, the long term keys are internally
8988 	 * mapped to the identity address. So allow static random
8989 	 * and public addresses here.
8990 	 */
8991 	if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
8992 	    (key->bdaddr.b[5] & 0xc0) != 0xc0)
8993 		ev.store_hint = 0x00;
8994 	else
8995 		ev.store_hint = persistent;
8996 
8997 	bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
8998 	ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
8999 	ev.key.type = mgmt_ltk_type(key);
9000 	ev.key.enc_size = key->enc_size;
9001 	ev.key.ediv = key->ediv;
9002 	ev.key.rand = key->rand;
9003 
9004 	if (key->type == SMP_LTK)
9005 		ev.key.initiator = 1;
9006 
9007 	/* Make sure we copy only the significant bytes based on the
9008 	 * encryption key size, and set the rest of the value to zeroes.
9009 	 */
9010 	memcpy(ev.key.val, key->val, key->enc_size);
9011 	memset(ev.key.val + key->enc_size, 0,
9012 	       sizeof(ev.key.val) - key->enc_size);
9013 
9014 	mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
9015 }
9016 
9017 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
9018 {
9019 	struct mgmt_ev_new_irk ev;
9020 
9021 	memset(&ev, 0, sizeof(ev));
9022 
9023 	ev.store_hint = persistent;
9024 
9025 	bacpy(&ev.rpa, &irk->rpa);
9026 	bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
9027 	ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
9028 	memcpy(ev.irk.val, irk->val, sizeof(irk->val));
9029 
9030 	mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
9031 }
9032 
9033 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
9034 		   bool persistent)
9035 {
9036 	struct mgmt_ev_new_csrk ev;
9037 
9038 	memset(&ev, 0, sizeof(ev));
9039 
9040 	/* Devices using resolvable or non-resolvable random addresses
9041 	 * without providing an identity resolving key don't require
9042 	 * to store signature resolving keys. Their addresses will change
9043 	 * the next time around.
9044 	 *
9045 	 * Only when a remote device provides an identity address
9046 	 * make sure the signature resolving key is stored. So allow
9047 	 * static random and public addresses here.
9048 	 */
9049 	if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
9050 	    (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
9051 		ev.store_hint = 0x00;
9052 	else
9053 		ev.store_hint = persistent;
9054 
9055 	bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
9056 	ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
9057 	ev.key.type = csrk->type;
9058 	memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
9059 
9060 	mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
9061 }
9062 
9063 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
9064 			 u8 bdaddr_type, u8 store_hint, u16 min_interval,
9065 			 u16 max_interval, u16 latency, u16 timeout)
9066 {
9067 	struct mgmt_ev_new_conn_param ev;
9068 
9069 	if (!hci_is_identity_address(bdaddr, bdaddr_type))
9070 		return;
9071 
9072 	memset(&ev, 0, sizeof(ev));
9073 	bacpy(&ev.addr.bdaddr, bdaddr);
9074 	ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
9075 	ev.store_hint = store_hint;
9076 	ev.min_interval = cpu_to_le16(min_interval);
9077 	ev.max_interval = cpu_to_le16(max_interval);
9078 	ev.latency = cpu_to_le16(latency);
9079 	ev.timeout = cpu_to_le16(timeout);
9080 
9081 	mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
9082 }
9083 
9084 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
9085 			   u8 *name, u8 name_len)
9086 {
9087 	struct sk_buff *skb;
9088 	struct mgmt_ev_device_connected *ev;
9089 	u16 eir_len = 0;
9090 	u32 flags = 0;
9091 
9092 	/* allocate buff for LE or BR/EDR adv */
9093 	if (conn->le_adv_data_len > 0)
9094 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9095 				     sizeof(*ev) + conn->le_adv_data_len);
9096 	else
9097 		skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED,
9098 				     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) +
9099 				     eir_precalc_len(sizeof(conn->dev_class)));
9100 
9101 	ev = skb_put(skb, sizeof(*ev));
9102 	bacpy(&ev->addr.bdaddr, &conn->dst);
9103 	ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9104 
9105 	if (conn->out)
9106 		flags |= MGMT_DEV_FOUND_INITIATED_CONN;
9107 
9108 	ev->flags = __cpu_to_le32(flags);
9109 
9110 	/* We must ensure that the EIR Data fields are ordered and
9111 	 * unique. Keep it simple for now and avoid the problem by not
9112 	 * adding any BR/EDR data to the LE adv.
9113 	 */
9114 	if (conn->le_adv_data_len > 0) {
9115 		skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len);
9116 		eir_len = conn->le_adv_data_len;
9117 	} else {
9118 		if (name)
9119 			eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9120 
9121 		if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class)))
9122 			eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV,
9123 						    conn->dev_class, sizeof(conn->dev_class));
9124 	}
9125 
9126 	ev->eir_len = cpu_to_le16(eir_len);
9127 
9128 	mgmt_event_skb(skb, NULL);
9129 }
9130 
9131 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
9132 {
9133 	struct sock **sk = data;
9134 
9135 	cmd->cmd_complete(cmd, 0);
9136 
9137 	*sk = cmd->sk;
9138 	sock_hold(*sk);
9139 
9140 	mgmt_pending_remove(cmd);
9141 }
9142 
9143 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
9144 {
9145 	struct hci_dev *hdev = data;
9146 	struct mgmt_cp_unpair_device *cp = cmd->param;
9147 
9148 	device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
9149 
9150 	cmd->cmd_complete(cmd, 0);
9151 	mgmt_pending_remove(cmd);
9152 }
9153 
9154 bool mgmt_powering_down(struct hci_dev *hdev)
9155 {
9156 	struct mgmt_pending_cmd *cmd;
9157 	struct mgmt_mode *cp;
9158 
9159 	cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
9160 	if (!cmd)
9161 		return false;
9162 
9163 	cp = cmd->param;
9164 	if (!cp->val)
9165 		return true;
9166 
9167 	return false;
9168 }
9169 
9170 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
9171 			      u8 link_type, u8 addr_type, u8 reason,
9172 			      bool mgmt_connected)
9173 {
9174 	struct mgmt_ev_device_disconnected ev;
9175 	struct sock *sk = NULL;
9176 
9177 	/* The connection is still in hci_conn_hash so test for 1
9178 	 * instead of 0 to know if this is the last one.
9179 	 */
9180 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9181 		cancel_delayed_work(&hdev->power_off);
9182 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9183 	}
9184 
9185 	if (!mgmt_connected)
9186 		return;
9187 
9188 	if (link_type != ACL_LINK && link_type != LE_LINK)
9189 		return;
9190 
9191 	mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
9192 
9193 	bacpy(&ev.addr.bdaddr, bdaddr);
9194 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9195 	ev.reason = reason;
9196 
9197 	/* Report disconnects due to suspend */
9198 	if (hdev->suspended)
9199 		ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND;
9200 
9201 	mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
9202 
9203 	if (sk)
9204 		sock_put(sk);
9205 
9206 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9207 			     hdev);
9208 }
9209 
9210 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
9211 			    u8 link_type, u8 addr_type, u8 status)
9212 {
9213 	u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
9214 	struct mgmt_cp_disconnect *cp;
9215 	struct mgmt_pending_cmd *cmd;
9216 
9217 	mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
9218 			     hdev);
9219 
9220 	cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
9221 	if (!cmd)
9222 		return;
9223 
9224 	cp = cmd->param;
9225 
9226 	if (bacmp(bdaddr, &cp->addr.bdaddr))
9227 		return;
9228 
9229 	if (cp->addr.type != bdaddr_type)
9230 		return;
9231 
9232 	cmd->cmd_complete(cmd, mgmt_status(status));
9233 	mgmt_pending_remove(cmd);
9234 }
9235 
9236 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9237 			 u8 addr_type, u8 status)
9238 {
9239 	struct mgmt_ev_connect_failed ev;
9240 
9241 	/* The connection is still in hci_conn_hash so test for 1
9242 	 * instead of 0 to know if this is the last one.
9243 	 */
9244 	if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
9245 		cancel_delayed_work(&hdev->power_off);
9246 		queue_work(hdev->req_workqueue, &hdev->power_off.work);
9247 	}
9248 
9249 	bacpy(&ev.addr.bdaddr, bdaddr);
9250 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9251 	ev.status = mgmt_status(status);
9252 
9253 	mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
9254 }
9255 
9256 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
9257 {
9258 	struct mgmt_ev_pin_code_request ev;
9259 
9260 	bacpy(&ev.addr.bdaddr, bdaddr);
9261 	ev.addr.type = BDADDR_BREDR;
9262 	ev.secure = secure;
9263 
9264 	mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
9265 }
9266 
9267 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9268 				  u8 status)
9269 {
9270 	struct mgmt_pending_cmd *cmd;
9271 
9272 	cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
9273 	if (!cmd)
9274 		return;
9275 
9276 	cmd->cmd_complete(cmd, mgmt_status(status));
9277 	mgmt_pending_remove(cmd);
9278 }
9279 
9280 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9281 				      u8 status)
9282 {
9283 	struct mgmt_pending_cmd *cmd;
9284 
9285 	cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
9286 	if (!cmd)
9287 		return;
9288 
9289 	cmd->cmd_complete(cmd, mgmt_status(status));
9290 	mgmt_pending_remove(cmd);
9291 }
9292 
9293 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9294 			      u8 link_type, u8 addr_type, u32 value,
9295 			      u8 confirm_hint)
9296 {
9297 	struct mgmt_ev_user_confirm_request ev;
9298 
9299 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9300 
9301 	bacpy(&ev.addr.bdaddr, bdaddr);
9302 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9303 	ev.confirm_hint = confirm_hint;
9304 	ev.value = cpu_to_le32(value);
9305 
9306 	return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
9307 			  NULL);
9308 }
9309 
9310 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
9311 			      u8 link_type, u8 addr_type)
9312 {
9313 	struct mgmt_ev_user_passkey_request ev;
9314 
9315 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9316 
9317 	bacpy(&ev.addr.bdaddr, bdaddr);
9318 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9319 
9320 	return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
9321 			  NULL);
9322 }
9323 
9324 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9325 				      u8 link_type, u8 addr_type, u8 status,
9326 				      u8 opcode)
9327 {
9328 	struct mgmt_pending_cmd *cmd;
9329 
9330 	cmd = pending_find(opcode, hdev);
9331 	if (!cmd)
9332 		return -ENOENT;
9333 
9334 	cmd->cmd_complete(cmd, mgmt_status(status));
9335 	mgmt_pending_remove(cmd);
9336 
9337 	return 0;
9338 }
9339 
9340 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9341 				     u8 link_type, u8 addr_type, u8 status)
9342 {
9343 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9344 					  status, MGMT_OP_USER_CONFIRM_REPLY);
9345 }
9346 
9347 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9348 					 u8 link_type, u8 addr_type, u8 status)
9349 {
9350 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9351 					  status,
9352 					  MGMT_OP_USER_CONFIRM_NEG_REPLY);
9353 }
9354 
9355 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9356 				     u8 link_type, u8 addr_type, u8 status)
9357 {
9358 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9359 					  status, MGMT_OP_USER_PASSKEY_REPLY);
9360 }
9361 
9362 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
9363 					 u8 link_type, u8 addr_type, u8 status)
9364 {
9365 	return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
9366 					  status,
9367 					  MGMT_OP_USER_PASSKEY_NEG_REPLY);
9368 }
9369 
9370 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
9371 			     u8 link_type, u8 addr_type, u32 passkey,
9372 			     u8 entered)
9373 {
9374 	struct mgmt_ev_passkey_notify ev;
9375 
9376 	bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr);
9377 
9378 	bacpy(&ev.addr.bdaddr, bdaddr);
9379 	ev.addr.type = link_to_bdaddr(link_type, addr_type);
9380 	ev.passkey = __cpu_to_le32(passkey);
9381 	ev.entered = entered;
9382 
9383 	return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
9384 }
9385 
9386 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
9387 {
9388 	struct mgmt_ev_auth_failed ev;
9389 	struct mgmt_pending_cmd *cmd;
9390 	u8 status = mgmt_status(hci_status);
9391 
9392 	bacpy(&ev.addr.bdaddr, &conn->dst);
9393 	ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
9394 	ev.status = status;
9395 
9396 	cmd = find_pairing(conn);
9397 
9398 	mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
9399 		    cmd ? cmd->sk : NULL);
9400 
9401 	if (cmd) {
9402 		cmd->cmd_complete(cmd, status);
9403 		mgmt_pending_remove(cmd);
9404 	}
9405 }
9406 
9407 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
9408 {
9409 	struct cmd_lookup match = { NULL, hdev };
9410 	bool changed;
9411 
9412 	if (status) {
9413 		u8 mgmt_err = mgmt_status(status);
9414 		mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
9415 				     cmd_status_rsp, &mgmt_err);
9416 		return;
9417 	}
9418 
9419 	if (test_bit(HCI_AUTH, &hdev->flags))
9420 		changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
9421 	else
9422 		changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
9423 
9424 	mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
9425 			     &match);
9426 
9427 	if (changed)
9428 		new_settings(hdev, match.sk);
9429 
9430 	if (match.sk)
9431 		sock_put(match.sk);
9432 }
9433 
9434 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
9435 {
9436 	struct cmd_lookup *match = data;
9437 
9438 	if (match->sk == NULL) {
9439 		match->sk = cmd->sk;
9440 		sock_hold(match->sk);
9441 	}
9442 }
9443 
9444 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
9445 				    u8 status)
9446 {
9447 	struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
9448 
9449 	mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
9450 	mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
9451 	mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
9452 
9453 	if (!status) {
9454 		mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
9455 				   3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
9456 		ext_info_changed(hdev, NULL);
9457 	}
9458 
9459 	if (match.sk)
9460 		sock_put(match.sk);
9461 }
9462 
9463 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
9464 {
9465 	struct mgmt_cp_set_local_name ev;
9466 	struct mgmt_pending_cmd *cmd;
9467 
9468 	if (status)
9469 		return;
9470 
9471 	memset(&ev, 0, sizeof(ev));
9472 	memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
9473 	memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
9474 
9475 	cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
9476 	if (!cmd) {
9477 		memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
9478 
9479 		/* If this is a HCI command related to powering on the
9480 		 * HCI dev don't send any mgmt signals.
9481 		 */
9482 		if (pending_find(MGMT_OP_SET_POWERED, hdev))
9483 			return;
9484 	}
9485 
9486 	mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
9487 			   HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
9488 	ext_info_changed(hdev, cmd ? cmd->sk : NULL);
9489 }
9490 
9491 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
9492 {
9493 	int i;
9494 
9495 	for (i = 0; i < uuid_count; i++) {
9496 		if (!memcmp(uuid, uuids[i], 16))
9497 			return true;
9498 	}
9499 
9500 	return false;
9501 }
9502 
9503 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
9504 {
9505 	u16 parsed = 0;
9506 
9507 	while (parsed < eir_len) {
9508 		u8 field_len = eir[0];
9509 		u8 uuid[16];
9510 		int i;
9511 
9512 		if (field_len == 0)
9513 			break;
9514 
9515 		if (eir_len - parsed < field_len + 1)
9516 			break;
9517 
9518 		switch (eir[1]) {
9519 		case EIR_UUID16_ALL:
9520 		case EIR_UUID16_SOME:
9521 			for (i = 0; i + 3 <= field_len; i += 2) {
9522 				memcpy(uuid, bluetooth_base_uuid, 16);
9523 				uuid[13] = eir[i + 3];
9524 				uuid[12] = eir[i + 2];
9525 				if (has_uuid(uuid, uuid_count, uuids))
9526 					return true;
9527 			}
9528 			break;
9529 		case EIR_UUID32_ALL:
9530 		case EIR_UUID32_SOME:
9531 			for (i = 0; i + 5 <= field_len; i += 4) {
9532 				memcpy(uuid, bluetooth_base_uuid, 16);
9533 				uuid[15] = eir[i + 5];
9534 				uuid[14] = eir[i + 4];
9535 				uuid[13] = eir[i + 3];
9536 				uuid[12] = eir[i + 2];
9537 				if (has_uuid(uuid, uuid_count, uuids))
9538 					return true;
9539 			}
9540 			break;
9541 		case EIR_UUID128_ALL:
9542 		case EIR_UUID128_SOME:
9543 			for (i = 0; i + 17 <= field_len; i += 16) {
9544 				memcpy(uuid, eir + i + 2, 16);
9545 				if (has_uuid(uuid, uuid_count, uuids))
9546 					return true;
9547 			}
9548 			break;
9549 		}
9550 
9551 		parsed += field_len + 1;
9552 		eir += field_len + 1;
9553 	}
9554 
9555 	return false;
9556 }
9557 
9558 static void restart_le_scan(struct hci_dev *hdev)
9559 {
9560 	/* If controller is not scanning we are done. */
9561 	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
9562 		return;
9563 
9564 	if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
9565 		       hdev->discovery.scan_start +
9566 		       hdev->discovery.scan_duration))
9567 		return;
9568 
9569 	queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
9570 			   DISCOV_LE_RESTART_DELAY);
9571 }
9572 
9573 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
9574 			    u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9575 {
9576 	/* If a RSSI threshold has been specified, and
9577 	 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
9578 	 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
9579 	 * is set, let it through for further processing, as we might need to
9580 	 * restart the scan.
9581 	 *
9582 	 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
9583 	 * the results are also dropped.
9584 	 */
9585 	if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9586 	    (rssi == HCI_RSSI_INVALID ||
9587 	    (rssi < hdev->discovery.rssi &&
9588 	     !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
9589 		return  false;
9590 
9591 	if (hdev->discovery.uuid_count != 0) {
9592 		/* If a list of UUIDs is provided in filter, results with no
9593 		 * matching UUID should be dropped.
9594 		 */
9595 		if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
9596 				   hdev->discovery.uuids) &&
9597 		    !eir_has_uuids(scan_rsp, scan_rsp_len,
9598 				   hdev->discovery.uuid_count,
9599 				   hdev->discovery.uuids))
9600 			return false;
9601 	}
9602 
9603 	/* If duplicate filtering does not report RSSI changes, then restart
9604 	 * scanning to ensure updated result with updated RSSI values.
9605 	 */
9606 	if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
9607 		restart_le_scan(hdev);
9608 
9609 		/* Validate RSSI value against the RSSI threshold once more. */
9610 		if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
9611 		    rssi < hdev->discovery.rssi)
9612 			return false;
9613 	}
9614 
9615 	return true;
9616 }
9617 
9618 void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,
9619 				  bdaddr_t *bdaddr, u8 addr_type)
9620 {
9621 	struct mgmt_ev_adv_monitor_device_lost ev;
9622 
9623 	ev.monitor_handle = cpu_to_le16(handle);
9624 	bacpy(&ev.addr.bdaddr, bdaddr);
9625 	ev.addr.type = addr_type;
9626 
9627 	mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev),
9628 		   NULL);
9629 }
9630 
9631 static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev,
9632 					       struct sk_buff *skb,
9633 					       struct sock *skip_sk,
9634 					       u16 handle)
9635 {
9636 	struct sk_buff *advmon_skb;
9637 	size_t advmon_skb_len;
9638 	__le16 *monitor_handle;
9639 
9640 	if (!skb)
9641 		return;
9642 
9643 	advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) -
9644 			  sizeof(struct mgmt_ev_device_found)) + skb->len;
9645 	advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND,
9646 				    advmon_skb_len);
9647 	if (!advmon_skb)
9648 		return;
9649 
9650 	/* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except
9651 	 * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and
9652 	 * store monitor_handle of the matched monitor.
9653 	 */
9654 	monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle));
9655 	*monitor_handle = cpu_to_le16(handle);
9656 	skb_put_data(advmon_skb, skb->data, skb->len);
9657 
9658 	mgmt_event_skb(advmon_skb, skip_sk);
9659 }
9660 
9661 static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,
9662 					  bdaddr_t *bdaddr, bool report_device,
9663 					  struct sk_buff *skb,
9664 					  struct sock *skip_sk)
9665 {
9666 	struct monitored_device *dev, *tmp;
9667 	bool matched = false;
9668 	bool notified = false;
9669 
9670 	/* We have received the Advertisement Report because:
9671 	 * 1. the kernel has initiated active discovery
9672 	 * 2. if not, we have pend_le_reports > 0 in which case we are doing
9673 	 *    passive scanning
9674 	 * 3. if none of the above is true, we have one or more active
9675 	 *    Advertisement Monitor
9676 	 *
9677 	 * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND
9678 	 * and report ONLY one advertisement per device for the matched Monitor
9679 	 * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9680 	 *
9681 	 * For case 3, since we are not active scanning and all advertisements
9682 	 * received are due to a matched Advertisement Monitor, report all
9683 	 * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event.
9684 	 */
9685 	if (report_device && !hdev->advmon_pend_notify) {
9686 		mgmt_event_skb(skb, skip_sk);
9687 		return;
9688 	}
9689 
9690 	hdev->advmon_pend_notify = false;
9691 
9692 	list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) {
9693 		if (!bacmp(&dev->bdaddr, bdaddr)) {
9694 			matched = true;
9695 
9696 			if (!dev->notified) {
9697 				mgmt_send_adv_monitor_device_found(hdev, skb,
9698 								   skip_sk,
9699 								   dev->handle);
9700 				notified = true;
9701 				dev->notified = true;
9702 			}
9703 		}
9704 
9705 		if (!dev->notified)
9706 			hdev->advmon_pend_notify = true;
9707 	}
9708 
9709 	if (!report_device &&
9710 	    ((matched && !notified) || !msft_monitor_supported(hdev))) {
9711 		/* Handle 0 indicates that we are not active scanning and this
9712 		 * is a subsequent advertisement report for an already matched
9713 		 * Advertisement Monitor or the controller offloading support
9714 		 * is not available.
9715 		 */
9716 		mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0);
9717 	}
9718 
9719 	if (report_device)
9720 		mgmt_event_skb(skb, skip_sk);
9721 	else
9722 		kfree_skb(skb);
9723 }
9724 
9725 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9726 		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
9727 		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
9728 {
9729 	struct sk_buff *skb;
9730 	struct mgmt_ev_device_found *ev;
9731 	bool report_device = hci_discovery_active(hdev);
9732 
9733 	/* Don't send events for a non-kernel initiated discovery. With
9734 	 * LE one exception is if we have pend_le_reports > 0 in which
9735 	 * case we're doing passive scanning and want these events.
9736 	 */
9737 	if (!hci_discovery_active(hdev)) {
9738 		if (link_type == ACL_LINK)
9739 			return;
9740 		if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports))
9741 			report_device = true;
9742 		else if (!hci_is_adv_monitoring(hdev))
9743 			return;
9744 	}
9745 
9746 	if (hdev->discovery.result_filtering) {
9747 		/* We are using service discovery */
9748 		if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
9749 				     scan_rsp_len))
9750 			return;
9751 	}
9752 
9753 	if (hdev->discovery.limited) {
9754 		/* Check for limited discoverable bit */
9755 		if (dev_class) {
9756 			if (!(dev_class[1] & 0x20))
9757 				return;
9758 		} else {
9759 			u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
9760 			if (!flags || !(flags[0] & LE_AD_LIMITED))
9761 				return;
9762 		}
9763 	}
9764 
9765 	/* Allocate skb. The 5 extra bytes are for the potential CoD field */
9766 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9767 			     sizeof(*ev) + eir_len + scan_rsp_len + 5);
9768 	if (!skb)
9769 		return;
9770 
9771 	ev = skb_put(skb, sizeof(*ev));
9772 
9773 	/* In case of device discovery with BR/EDR devices (pre 1.2), the
9774 	 * RSSI value was reported as 0 when not available. This behavior
9775 	 * is kept when using device discovery. This is required for full
9776 	 * backwards compatibility with the API.
9777 	 *
9778 	 * However when using service discovery, the value 127 will be
9779 	 * returned when the RSSI is not available.
9780 	 */
9781 	if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
9782 	    link_type == ACL_LINK)
9783 		rssi = 0;
9784 
9785 	bacpy(&ev->addr.bdaddr, bdaddr);
9786 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9787 	ev->rssi = rssi;
9788 	ev->flags = cpu_to_le32(flags);
9789 
9790 	if (eir_len > 0)
9791 		/* Copy EIR or advertising data into event */
9792 		skb_put_data(skb, eir, eir_len);
9793 
9794 	if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) {
9795 		u8 eir_cod[5];
9796 
9797 		eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV,
9798 					   dev_class, 3);
9799 		skb_put_data(skb, eir_cod, sizeof(eir_cod));
9800 	}
9801 
9802 	if (scan_rsp_len > 0)
9803 		/* Append scan response data to event */
9804 		skb_put_data(skb, scan_rsp, scan_rsp_len);
9805 
9806 	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
9807 
9808 	mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL);
9809 }
9810 
9811 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
9812 		      u8 addr_type, s8 rssi, u8 *name, u8 name_len)
9813 {
9814 	struct sk_buff *skb;
9815 	struct mgmt_ev_device_found *ev;
9816 	u16 eir_len = 0;
9817 	u32 flags = 0;
9818 
9819 	skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND,
9820 			     sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0));
9821 
9822 	ev = skb_put(skb, sizeof(*ev));
9823 	bacpy(&ev->addr.bdaddr, bdaddr);
9824 	ev->addr.type = link_to_bdaddr(link_type, addr_type);
9825 	ev->rssi = rssi;
9826 
9827 	if (name)
9828 		eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len);
9829 	else
9830 		flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED;
9831 
9832 	ev->eir_len = cpu_to_le16(eir_len);
9833 	ev->flags = cpu_to_le32(flags);
9834 
9835 	mgmt_event_skb(skb, NULL);
9836 }
9837 
9838 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
9839 {
9840 	struct mgmt_ev_discovering ev;
9841 
9842 	bt_dev_dbg(hdev, "discovering %u", discovering);
9843 
9844 	memset(&ev, 0, sizeof(ev));
9845 	ev.type = hdev->discovery.type;
9846 	ev.discovering = discovering;
9847 
9848 	mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
9849 }
9850 
9851 void mgmt_suspending(struct hci_dev *hdev, u8 state)
9852 {
9853 	struct mgmt_ev_controller_suspend ev;
9854 
9855 	ev.suspend_state = state;
9856 	mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL);
9857 }
9858 
9859 void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr,
9860 		   u8 addr_type)
9861 {
9862 	struct mgmt_ev_controller_resume ev;
9863 
9864 	ev.wake_reason = reason;
9865 	if (bdaddr) {
9866 		bacpy(&ev.addr.bdaddr, bdaddr);
9867 		ev.addr.type = addr_type;
9868 	} else {
9869 		memset(&ev.addr, 0, sizeof(ev.addr));
9870 	}
9871 
9872 	mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL);
9873 }
9874 
9875 static struct hci_mgmt_chan chan = {
9876 	.channel	= HCI_CHANNEL_CONTROL,
9877 	.handler_count	= ARRAY_SIZE(mgmt_handlers),
9878 	.handlers	= mgmt_handlers,
9879 	.hdev_init	= mgmt_init_hdev,
9880 };
9881 
9882 int mgmt_init(void)
9883 {
9884 	return hci_mgmt_chan_register(&chan);
9885 }
9886 
9887 void mgmt_exit(void)
9888 {
9889 	hci_mgmt_chan_unregister(&chan);
9890 }
9891