2 BlueZ - Bluetooth protocol stack for Linux
4 Copyright (C) 2010 Nokia Corporation
5 Copyright (C) 2011-2012 Intel Corporation
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI Management interface */
27 #include <linux/module.h>
28 #include <asm/unaligned.h>
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/hci_sock.h>
33 #include <net/bluetooth/l2cap.h>
34 #include <net/bluetooth/mgmt.h>
36 #include "hci_request.h"
38 #include "mgmt_util.h"
40 #define MGMT_VERSION 1
41 #define MGMT_REVISION 15
43 static const u16 mgmt_commands[] = {
44 MGMT_OP_READ_INDEX_LIST,
47 MGMT_OP_SET_DISCOVERABLE,
48 MGMT_OP_SET_CONNECTABLE,
49 MGMT_OP_SET_FAST_CONNECTABLE,
51 MGMT_OP_SET_LINK_SECURITY,
55 MGMT_OP_SET_DEV_CLASS,
56 MGMT_OP_SET_LOCAL_NAME,
59 MGMT_OP_LOAD_LINK_KEYS,
60 MGMT_OP_LOAD_LONG_TERM_KEYS,
62 MGMT_OP_GET_CONNECTIONS,
63 MGMT_OP_PIN_CODE_REPLY,
64 MGMT_OP_PIN_CODE_NEG_REPLY,
65 MGMT_OP_SET_IO_CAPABILITY,
67 MGMT_OP_CANCEL_PAIR_DEVICE,
68 MGMT_OP_UNPAIR_DEVICE,
69 MGMT_OP_USER_CONFIRM_REPLY,
70 MGMT_OP_USER_CONFIRM_NEG_REPLY,
71 MGMT_OP_USER_PASSKEY_REPLY,
72 MGMT_OP_USER_PASSKEY_NEG_REPLY,
73 MGMT_OP_READ_LOCAL_OOB_DATA,
74 MGMT_OP_ADD_REMOTE_OOB_DATA,
75 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
76 MGMT_OP_START_DISCOVERY,
77 MGMT_OP_STOP_DISCOVERY,
80 MGMT_OP_UNBLOCK_DEVICE,
81 MGMT_OP_SET_DEVICE_ID,
82 MGMT_OP_SET_ADVERTISING,
84 MGMT_OP_SET_STATIC_ADDRESS,
85 MGMT_OP_SET_SCAN_PARAMS,
86 MGMT_OP_SET_SECURE_CONN,
87 MGMT_OP_SET_DEBUG_KEYS,
90 MGMT_OP_GET_CONN_INFO,
91 MGMT_OP_GET_CLOCK_INFO,
93 MGMT_OP_REMOVE_DEVICE,
94 MGMT_OP_LOAD_CONN_PARAM,
95 MGMT_OP_READ_UNCONF_INDEX_LIST,
96 MGMT_OP_READ_CONFIG_INFO,
97 MGMT_OP_SET_EXTERNAL_CONFIG,
98 MGMT_OP_SET_PUBLIC_ADDRESS,
99 MGMT_OP_START_SERVICE_DISCOVERY,
100 MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
101 MGMT_OP_READ_EXT_INDEX_LIST,
102 MGMT_OP_READ_ADV_FEATURES,
103 MGMT_OP_ADD_ADVERTISING,
104 MGMT_OP_REMOVE_ADVERTISING,
105 MGMT_OP_GET_ADV_SIZE_INFO,
106 MGMT_OP_START_LIMITED_DISCOVERY,
107 MGMT_OP_READ_EXT_INFO,
108 MGMT_OP_SET_APPEARANCE,
109 MGMT_OP_SET_BLOCKED_KEYS,
112 static const u16 mgmt_events[] = {
113 MGMT_EV_CONTROLLER_ERROR,
115 MGMT_EV_INDEX_REMOVED,
116 MGMT_EV_NEW_SETTINGS,
117 MGMT_EV_CLASS_OF_DEV_CHANGED,
118 MGMT_EV_LOCAL_NAME_CHANGED,
119 MGMT_EV_NEW_LINK_KEY,
120 MGMT_EV_NEW_LONG_TERM_KEY,
121 MGMT_EV_DEVICE_CONNECTED,
122 MGMT_EV_DEVICE_DISCONNECTED,
123 MGMT_EV_CONNECT_FAILED,
124 MGMT_EV_PIN_CODE_REQUEST,
125 MGMT_EV_USER_CONFIRM_REQUEST,
126 MGMT_EV_USER_PASSKEY_REQUEST,
128 MGMT_EV_DEVICE_FOUND,
130 MGMT_EV_DEVICE_BLOCKED,
131 MGMT_EV_DEVICE_UNBLOCKED,
132 MGMT_EV_DEVICE_UNPAIRED,
133 MGMT_EV_PASSKEY_NOTIFY,
136 MGMT_EV_DEVICE_ADDED,
137 MGMT_EV_DEVICE_REMOVED,
138 MGMT_EV_NEW_CONN_PARAM,
139 MGMT_EV_UNCONF_INDEX_ADDED,
140 MGMT_EV_UNCONF_INDEX_REMOVED,
141 MGMT_EV_NEW_CONFIG_OPTIONS,
142 MGMT_EV_EXT_INDEX_ADDED,
143 MGMT_EV_EXT_INDEX_REMOVED,
144 MGMT_EV_LOCAL_OOB_DATA_UPDATED,
145 MGMT_EV_ADVERTISING_ADDED,
146 MGMT_EV_ADVERTISING_REMOVED,
147 MGMT_EV_EXT_INFO_CHANGED,
150 static const u16 mgmt_untrusted_commands[] = {
151 MGMT_OP_READ_INDEX_LIST,
153 MGMT_OP_READ_UNCONF_INDEX_LIST,
154 MGMT_OP_READ_CONFIG_INFO,
155 MGMT_OP_READ_EXT_INDEX_LIST,
156 MGMT_OP_READ_EXT_INFO,
159 static const u16 mgmt_untrusted_events[] = {
161 MGMT_EV_INDEX_REMOVED,
162 MGMT_EV_NEW_SETTINGS,
163 MGMT_EV_CLASS_OF_DEV_CHANGED,
164 MGMT_EV_LOCAL_NAME_CHANGED,
165 MGMT_EV_UNCONF_INDEX_ADDED,
166 MGMT_EV_UNCONF_INDEX_REMOVED,
167 MGMT_EV_NEW_CONFIG_OPTIONS,
168 MGMT_EV_EXT_INDEX_ADDED,
169 MGMT_EV_EXT_INDEX_REMOVED,
170 MGMT_EV_EXT_INFO_CHANGED,
173 #define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000)
175 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
176 "\x00\x00\x00\x00\x00\x00\x00\x00"
178 /* HCI to MGMT error code conversion table */
179 static u8 mgmt_status_table[] = {
181 MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */
182 MGMT_STATUS_NOT_CONNECTED, /* No Connection */
183 MGMT_STATUS_FAILED, /* Hardware Failure */
184 MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */
185 MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */
186 MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */
187 MGMT_STATUS_NO_RESOURCES, /* Memory Full */
188 MGMT_STATUS_TIMEOUT, /* Connection Timeout */
189 MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */
190 MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */
191 MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */
192 MGMT_STATUS_BUSY, /* Command Disallowed */
193 MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */
194 MGMT_STATUS_REJECTED, /* Rejected Security */
195 MGMT_STATUS_REJECTED, /* Rejected Personal */
196 MGMT_STATUS_TIMEOUT, /* Host Timeout */
197 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */
198 MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */
199 MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */
200 MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */
201 MGMT_STATUS_DISCONNECTED, /* OE Power Off */
202 MGMT_STATUS_DISCONNECTED, /* Connection Terminated */
203 MGMT_STATUS_BUSY, /* Repeated Attempts */
204 MGMT_STATUS_REJECTED, /* Pairing Not Allowed */
205 MGMT_STATUS_FAILED, /* Unknown LMP PDU */
206 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */
207 MGMT_STATUS_REJECTED, /* SCO Offset Rejected */
208 MGMT_STATUS_REJECTED, /* SCO Interval Rejected */
209 MGMT_STATUS_REJECTED, /* Air Mode Rejected */
210 MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */
211 MGMT_STATUS_FAILED, /* Unspecified Error */
212 MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */
213 MGMT_STATUS_FAILED, /* Role Change Not Allowed */
214 MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */
215 MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */
216 MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */
217 MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */
218 MGMT_STATUS_FAILED, /* Unit Link Key Used */
219 MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */
220 MGMT_STATUS_TIMEOUT, /* Instant Passed */
221 MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */
222 MGMT_STATUS_FAILED, /* Transaction Collision */
223 MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */
224 MGMT_STATUS_REJECTED, /* QoS Rejected */
225 MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */
226 MGMT_STATUS_REJECTED, /* Insufficient Security */
227 MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */
228 MGMT_STATUS_BUSY, /* Role Switch Pending */
229 MGMT_STATUS_FAILED, /* Slot Violation */
230 MGMT_STATUS_FAILED, /* Role Switch Failed */
231 MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */
232 MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */
233 MGMT_STATUS_BUSY, /* Host Busy Pairing */
234 MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */
235 MGMT_STATUS_BUSY, /* Controller Busy */
236 MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */
237 MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */
238 MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */
239 MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */
240 MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */
243 static u8 mgmt_status(u8 hci_status)
245 if (hci_status < ARRAY_SIZE(mgmt_status_table))
246 return mgmt_status_table[hci_status];
248 return MGMT_STATUS_FAILED;
251 static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data,
254 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
258 static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data,
259 u16 len, int flag, struct sock *skip_sk)
261 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
265 static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
266 struct sock *skip_sk)
268 return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len,
269 HCI_SOCK_TRUSTED, skip_sk);
272 static u8 le_addr_type(u8 mgmt_addr_type)
274 if (mgmt_addr_type == BDADDR_LE_PUBLIC)
275 return ADDR_LE_DEV_PUBLIC;
277 return ADDR_LE_DEV_RANDOM;
280 void mgmt_fill_version_info(void *ver)
282 struct mgmt_rp_read_version *rp = ver;
284 rp->version = MGMT_VERSION;
285 rp->revision = cpu_to_le16(MGMT_REVISION);
288 static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
291 struct mgmt_rp_read_version rp;
293 BT_DBG("sock %p", sk);
295 mgmt_fill_version_info(&rp);
297 return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0,
301 static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data,
304 struct mgmt_rp_read_commands *rp;
305 u16 num_commands, num_events;
309 BT_DBG("sock %p", sk);
311 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
312 num_commands = ARRAY_SIZE(mgmt_commands);
313 num_events = ARRAY_SIZE(mgmt_events);
315 num_commands = ARRAY_SIZE(mgmt_untrusted_commands);
316 num_events = ARRAY_SIZE(mgmt_untrusted_events);
319 rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16));
321 rp = kmalloc(rp_size, GFP_KERNEL);
325 rp->num_commands = cpu_to_le16(num_commands);
326 rp->num_events = cpu_to_le16(num_events);
328 if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) {
329 __le16 *opcode = rp->opcodes;
331 for (i = 0; i < num_commands; i++, opcode++)
332 put_unaligned_le16(mgmt_commands[i], opcode);
334 for (i = 0; i < num_events; i++, opcode++)
335 put_unaligned_le16(mgmt_events[i], opcode);
337 __le16 *opcode = rp->opcodes;
339 for (i = 0; i < num_commands; i++, opcode++)
340 put_unaligned_le16(mgmt_untrusted_commands[i], opcode);
342 for (i = 0; i < num_events; i++, opcode++)
343 put_unaligned_le16(mgmt_untrusted_events[i], opcode);
346 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0,
353 static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data,
356 struct mgmt_rp_read_index_list *rp;
362 BT_DBG("sock %p", sk);
364 read_lock(&hci_dev_list_lock);
367 list_for_each_entry(d, &hci_dev_list, list) {
368 if (d->dev_type == HCI_PRIMARY &&
369 !hci_dev_test_flag(d, HCI_UNCONFIGURED))
373 rp_len = sizeof(*rp) + (2 * count);
374 rp = kmalloc(rp_len, GFP_ATOMIC);
376 read_unlock(&hci_dev_list_lock);
381 list_for_each_entry(d, &hci_dev_list, list) {
382 if (hci_dev_test_flag(d, HCI_SETUP) ||
383 hci_dev_test_flag(d, HCI_CONFIG) ||
384 hci_dev_test_flag(d, HCI_USER_CHANNEL))
387 /* Devices marked as raw-only are neither configured
388 * nor unconfigured controllers.
390 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
393 if (d->dev_type == HCI_PRIMARY &&
394 !hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
395 rp->index[count++] = cpu_to_le16(d->id);
396 BT_DBG("Added hci%u", d->id);
400 rp->num_controllers = cpu_to_le16(count);
401 rp_len = sizeof(*rp) + (2 * count);
403 read_unlock(&hci_dev_list_lock);
405 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST,
413 static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev,
414 void *data, u16 data_len)
416 struct mgmt_rp_read_unconf_index_list *rp;
422 BT_DBG("sock %p", sk);
424 read_lock(&hci_dev_list_lock);
427 list_for_each_entry(d, &hci_dev_list, list) {
428 if (d->dev_type == HCI_PRIMARY &&
429 hci_dev_test_flag(d, HCI_UNCONFIGURED))
433 rp_len = sizeof(*rp) + (2 * count);
434 rp = kmalloc(rp_len, GFP_ATOMIC);
436 read_unlock(&hci_dev_list_lock);
441 list_for_each_entry(d, &hci_dev_list, list) {
442 if (hci_dev_test_flag(d, HCI_SETUP) ||
443 hci_dev_test_flag(d, HCI_CONFIG) ||
444 hci_dev_test_flag(d, HCI_USER_CHANNEL))
447 /* Devices marked as raw-only are neither configured
448 * nor unconfigured controllers.
450 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
453 if (d->dev_type == HCI_PRIMARY &&
454 hci_dev_test_flag(d, HCI_UNCONFIGURED)) {
455 rp->index[count++] = cpu_to_le16(d->id);
456 BT_DBG("Added hci%u", d->id);
460 rp->num_controllers = cpu_to_le16(count);
461 rp_len = sizeof(*rp) + (2 * count);
463 read_unlock(&hci_dev_list_lock);
465 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
466 MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len);
473 static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev,
474 void *data, u16 data_len)
476 struct mgmt_rp_read_ext_index_list *rp;
481 BT_DBG("sock %p", sk);
483 read_lock(&hci_dev_list_lock);
486 list_for_each_entry(d, &hci_dev_list, list) {
487 if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP)
491 rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC);
493 read_unlock(&hci_dev_list_lock);
498 list_for_each_entry(d, &hci_dev_list, list) {
499 if (hci_dev_test_flag(d, HCI_SETUP) ||
500 hci_dev_test_flag(d, HCI_CONFIG) ||
501 hci_dev_test_flag(d, HCI_USER_CHANNEL))
504 /* Devices marked as raw-only are neither configured
505 * nor unconfigured controllers.
507 if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks))
510 if (d->dev_type == HCI_PRIMARY) {
511 if (hci_dev_test_flag(d, HCI_UNCONFIGURED))
512 rp->entry[count].type = 0x01;
514 rp->entry[count].type = 0x00;
515 } else if (d->dev_type == HCI_AMP) {
516 rp->entry[count].type = 0x02;
521 rp->entry[count].bus = d->bus;
522 rp->entry[count++].index = cpu_to_le16(d->id);
523 BT_DBG("Added hci%u", d->id);
526 rp->num_controllers = cpu_to_le16(count);
528 read_unlock(&hci_dev_list_lock);
530 /* If this command is called at least once, then all the
531 * default index and unconfigured index events are disabled
532 * and from now on only extended index events are used.
534 hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS);
535 hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS);
536 hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
538 err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE,
539 MGMT_OP_READ_EXT_INDEX_LIST, 0, rp,
540 struct_size(rp, entry, count));
547 static bool is_configured(struct hci_dev *hdev)
549 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
550 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
553 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
554 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
555 !bacmp(&hdev->public_addr, BDADDR_ANY))
561 static __le32 get_missing_options(struct hci_dev *hdev)
565 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) &&
566 !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED))
567 options |= MGMT_OPTION_EXTERNAL_CONFIG;
569 if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) ||
570 test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) &&
571 !bacmp(&hdev->public_addr, BDADDR_ANY))
572 options |= MGMT_OPTION_PUBLIC_ADDRESS;
574 return cpu_to_le32(options);
577 static int new_options(struct hci_dev *hdev, struct sock *skip)
579 __le32 options = get_missing_options(hdev);
581 return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options,
582 sizeof(options), HCI_MGMT_OPTION_EVENTS, skip);
585 static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
587 __le32 options = get_missing_options(hdev);
589 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options,
593 static int read_config_info(struct sock *sk, struct hci_dev *hdev,
594 void *data, u16 data_len)
596 struct mgmt_rp_read_config_info rp;
599 BT_DBG("sock %p %s", sk, hdev->name);
603 memset(&rp, 0, sizeof(rp));
604 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
606 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
607 options |= MGMT_OPTION_EXTERNAL_CONFIG;
609 if (hdev->set_bdaddr)
610 options |= MGMT_OPTION_PUBLIC_ADDRESS;
612 rp.supported_options = cpu_to_le32(options);
613 rp.missing_options = get_missing_options(hdev);
615 hci_dev_unlock(hdev);
617 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0,
621 static u32 get_supported_phys(struct hci_dev *hdev)
623 u32 supported_phys = 0;
625 if (lmp_bredr_capable(hdev)) {
626 supported_phys |= MGMT_PHY_BR_1M_1SLOT;
628 if (hdev->features[0][0] & LMP_3SLOT)
629 supported_phys |= MGMT_PHY_BR_1M_3SLOT;
631 if (hdev->features[0][0] & LMP_5SLOT)
632 supported_phys |= MGMT_PHY_BR_1M_5SLOT;
634 if (lmp_edr_2m_capable(hdev)) {
635 supported_phys |= MGMT_PHY_EDR_2M_1SLOT;
637 if (lmp_edr_3slot_capable(hdev))
638 supported_phys |= MGMT_PHY_EDR_2M_3SLOT;
640 if (lmp_edr_5slot_capable(hdev))
641 supported_phys |= MGMT_PHY_EDR_2M_5SLOT;
643 if (lmp_edr_3m_capable(hdev)) {
644 supported_phys |= MGMT_PHY_EDR_3M_1SLOT;
646 if (lmp_edr_3slot_capable(hdev))
647 supported_phys |= MGMT_PHY_EDR_3M_3SLOT;
649 if (lmp_edr_5slot_capable(hdev))
650 supported_phys |= MGMT_PHY_EDR_3M_5SLOT;
655 if (lmp_le_capable(hdev)) {
656 supported_phys |= MGMT_PHY_LE_1M_TX;
657 supported_phys |= MGMT_PHY_LE_1M_RX;
659 if (hdev->le_features[1] & HCI_LE_PHY_2M) {
660 supported_phys |= MGMT_PHY_LE_2M_TX;
661 supported_phys |= MGMT_PHY_LE_2M_RX;
664 if (hdev->le_features[1] & HCI_LE_PHY_CODED) {
665 supported_phys |= MGMT_PHY_LE_CODED_TX;
666 supported_phys |= MGMT_PHY_LE_CODED_RX;
670 return supported_phys;
673 static u32 get_selected_phys(struct hci_dev *hdev)
675 u32 selected_phys = 0;
677 if (lmp_bredr_capable(hdev)) {
678 selected_phys |= MGMT_PHY_BR_1M_1SLOT;
680 if (hdev->pkt_type & (HCI_DM3 | HCI_DH3))
681 selected_phys |= MGMT_PHY_BR_1M_3SLOT;
683 if (hdev->pkt_type & (HCI_DM5 | HCI_DH5))
684 selected_phys |= MGMT_PHY_BR_1M_5SLOT;
686 if (lmp_edr_2m_capable(hdev)) {
687 if (!(hdev->pkt_type & HCI_2DH1))
688 selected_phys |= MGMT_PHY_EDR_2M_1SLOT;
690 if (lmp_edr_3slot_capable(hdev) &&
691 !(hdev->pkt_type & HCI_2DH3))
692 selected_phys |= MGMT_PHY_EDR_2M_3SLOT;
694 if (lmp_edr_5slot_capable(hdev) &&
695 !(hdev->pkt_type & HCI_2DH5))
696 selected_phys |= MGMT_PHY_EDR_2M_5SLOT;
698 if (lmp_edr_3m_capable(hdev)) {
699 if (!(hdev->pkt_type & HCI_3DH1))
700 selected_phys |= MGMT_PHY_EDR_3M_1SLOT;
702 if (lmp_edr_3slot_capable(hdev) &&
703 !(hdev->pkt_type & HCI_3DH3))
704 selected_phys |= MGMT_PHY_EDR_3M_3SLOT;
706 if (lmp_edr_5slot_capable(hdev) &&
707 !(hdev->pkt_type & HCI_3DH5))
708 selected_phys |= MGMT_PHY_EDR_3M_5SLOT;
713 if (lmp_le_capable(hdev)) {
714 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M)
715 selected_phys |= MGMT_PHY_LE_1M_TX;
717 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M)
718 selected_phys |= MGMT_PHY_LE_1M_RX;
720 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M)
721 selected_phys |= MGMT_PHY_LE_2M_TX;
723 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M)
724 selected_phys |= MGMT_PHY_LE_2M_RX;
726 if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED)
727 selected_phys |= MGMT_PHY_LE_CODED_TX;
729 if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED)
730 selected_phys |= MGMT_PHY_LE_CODED_RX;
733 return selected_phys;
736 static u32 get_configurable_phys(struct hci_dev *hdev)
738 return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT &
739 ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX);
742 static u32 get_supported_settings(struct hci_dev *hdev)
746 settings |= MGMT_SETTING_POWERED;
747 settings |= MGMT_SETTING_BONDABLE;
748 settings |= MGMT_SETTING_DEBUG_KEYS;
749 settings |= MGMT_SETTING_CONNECTABLE;
750 settings |= MGMT_SETTING_DISCOVERABLE;
752 if (lmp_bredr_capable(hdev)) {
753 if (hdev->hci_ver >= BLUETOOTH_VER_1_2)
754 settings |= MGMT_SETTING_FAST_CONNECTABLE;
755 settings |= MGMT_SETTING_BREDR;
756 settings |= MGMT_SETTING_LINK_SECURITY;
758 if (lmp_ssp_capable(hdev)) {
759 settings |= MGMT_SETTING_SSP;
760 settings |= MGMT_SETTING_HS;
763 if (lmp_sc_capable(hdev))
764 settings |= MGMT_SETTING_SECURE_CONN;
767 if (lmp_le_capable(hdev)) {
768 settings |= MGMT_SETTING_LE;
769 settings |= MGMT_SETTING_ADVERTISING;
770 settings |= MGMT_SETTING_SECURE_CONN;
771 settings |= MGMT_SETTING_PRIVACY;
772 settings |= MGMT_SETTING_STATIC_ADDRESS;
775 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
777 settings |= MGMT_SETTING_CONFIGURATION;
779 settings |= MGMT_SETTING_PHY_CONFIGURATION;
784 static u32 get_current_settings(struct hci_dev *hdev)
788 if (hdev_is_powered(hdev))
789 settings |= MGMT_SETTING_POWERED;
791 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE))
792 settings |= MGMT_SETTING_CONNECTABLE;
794 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
795 settings |= MGMT_SETTING_FAST_CONNECTABLE;
797 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
798 settings |= MGMT_SETTING_DISCOVERABLE;
800 if (hci_dev_test_flag(hdev, HCI_BONDABLE))
801 settings |= MGMT_SETTING_BONDABLE;
803 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
804 settings |= MGMT_SETTING_BREDR;
806 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
807 settings |= MGMT_SETTING_LE;
809 if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY))
810 settings |= MGMT_SETTING_LINK_SECURITY;
812 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
813 settings |= MGMT_SETTING_SSP;
815 if (hci_dev_test_flag(hdev, HCI_HS_ENABLED))
816 settings |= MGMT_SETTING_HS;
818 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
819 settings |= MGMT_SETTING_ADVERTISING;
821 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED))
822 settings |= MGMT_SETTING_SECURE_CONN;
824 if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS))
825 settings |= MGMT_SETTING_DEBUG_KEYS;
827 if (hci_dev_test_flag(hdev, HCI_PRIVACY))
828 settings |= MGMT_SETTING_PRIVACY;
830 /* The current setting for static address has two purposes. The
831 * first is to indicate if the static address will be used and
832 * the second is to indicate if it is actually set.
834 * This means if the static address is not configured, this flag
835 * will never be set. If the address is configured, then if the
836 * address is actually used decides if the flag is set or not.
838 * For single mode LE only controllers and dual-mode controllers
839 * with BR/EDR disabled, the existence of the static address will
842 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
843 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
844 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
845 if (bacmp(&hdev->static_addr, BDADDR_ANY))
846 settings |= MGMT_SETTING_STATIC_ADDRESS;
852 static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev)
854 return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev);
857 static struct mgmt_pending_cmd *pending_find_data(u16 opcode,
858 struct hci_dev *hdev,
861 return mgmt_pending_find_data(HCI_CHANNEL_CONTROL, opcode, hdev, data);
864 u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev)
866 struct mgmt_pending_cmd *cmd;
868 /* If there's a pending mgmt command the flags will not yet have
869 * their final values, so check for this first.
871 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
873 struct mgmt_mode *cp = cmd->param;
875 return LE_AD_GENERAL;
876 else if (cp->val == 0x02)
877 return LE_AD_LIMITED;
879 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
880 return LE_AD_LIMITED;
881 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
882 return LE_AD_GENERAL;
888 bool mgmt_get_connectable(struct hci_dev *hdev)
890 struct mgmt_pending_cmd *cmd;
892 /* If there's a pending mgmt command the flag will not yet have
893 * it's final value, so check for this first.
895 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
897 struct mgmt_mode *cp = cmd->param;
902 return hci_dev_test_flag(hdev, HCI_CONNECTABLE);
905 static void service_cache_off(struct work_struct *work)
907 struct hci_dev *hdev = container_of(work, struct hci_dev,
909 struct hci_request req;
911 if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE))
914 hci_req_init(&req, hdev);
918 __hci_req_update_eir(&req);
919 __hci_req_update_class(&req);
921 hci_dev_unlock(hdev);
923 hci_req_run(&req, NULL);
926 static void rpa_expired(struct work_struct *work)
928 struct hci_dev *hdev = container_of(work, struct hci_dev,
930 struct hci_request req;
934 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
936 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING))
939 /* The generation of a new RPA and programming it into the
940 * controller happens in the hci_req_enable_advertising()
943 hci_req_init(&req, hdev);
944 if (ext_adv_capable(hdev))
945 __hci_req_start_ext_adv(&req, hdev->cur_adv_instance);
947 __hci_req_enable_advertising(&req);
948 hci_req_run(&req, NULL);
951 static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)
953 if (hci_dev_test_and_set_flag(hdev, HCI_MGMT))
956 INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);
957 INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired);
959 /* Non-mgmt controlled devices get this bit set
960 * implicitly so that pairing works for them, however
961 * for mgmt we require user-space to explicitly enable
964 hci_dev_clear_flag(hdev, HCI_BONDABLE);
967 static int read_controller_info(struct sock *sk, struct hci_dev *hdev,
968 void *data, u16 data_len)
970 struct mgmt_rp_read_info rp;
972 BT_DBG("sock %p %s", sk, hdev->name);
976 memset(&rp, 0, sizeof(rp));
978 bacpy(&rp.bdaddr, &hdev->bdaddr);
980 rp.version = hdev->hci_ver;
981 rp.manufacturer = cpu_to_le16(hdev->manufacturer);
983 rp.supported_settings = cpu_to_le32(get_supported_settings(hdev));
984 rp.current_settings = cpu_to_le32(get_current_settings(hdev));
986 memcpy(rp.dev_class, hdev->dev_class, 3);
988 memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name));
989 memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name));
991 hci_dev_unlock(hdev);
993 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp,
997 static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir)
1002 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1003 eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV,
1004 hdev->dev_class, 3);
1006 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1007 eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE,
1010 name_len = strlen(hdev->dev_name);
1011 eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE,
1012 hdev->dev_name, name_len);
1014 name_len = strlen(hdev->short_name);
1015 eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT,
1016 hdev->short_name, name_len);
1021 static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev,
1022 void *data, u16 data_len)
1025 struct mgmt_rp_read_ext_info *rp = (void *)buf;
1028 BT_DBG("sock %p %s", sk, hdev->name);
1030 memset(&buf, 0, sizeof(buf));
1034 bacpy(&rp->bdaddr, &hdev->bdaddr);
1036 rp->version = hdev->hci_ver;
1037 rp->manufacturer = cpu_to_le16(hdev->manufacturer);
1039 rp->supported_settings = cpu_to_le32(get_supported_settings(hdev));
1040 rp->current_settings = cpu_to_le32(get_current_settings(hdev));
1043 eir_len = append_eir_data_to_buf(hdev, rp->eir);
1044 rp->eir_len = cpu_to_le16(eir_len);
1046 hci_dev_unlock(hdev);
1048 /* If this command is called at least once, then the events
1049 * for class of device and local name changes are disabled
1050 * and only the new extended controller information event
1053 hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS);
1054 hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS);
1055 hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS);
1057 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp,
1058 sizeof(*rp) + eir_len);
1061 static int ext_info_changed(struct hci_dev *hdev, struct sock *skip)
1064 struct mgmt_ev_ext_info_changed *ev = (void *)buf;
1067 memset(buf, 0, sizeof(buf));
1069 eir_len = append_eir_data_to_buf(hdev, ev->eir);
1070 ev->eir_len = cpu_to_le16(eir_len);
1072 return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev,
1073 sizeof(*ev) + eir_len,
1074 HCI_MGMT_EXT_INFO_EVENTS, skip);
1077 static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev)
1079 __le32 settings = cpu_to_le32(get_current_settings(hdev));
1081 return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings,
1085 static void clean_up_hci_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1087 BT_DBG("%s status 0x%02x", hdev->name, status);
1089 if (hci_conn_count(hdev) == 0) {
1090 cancel_delayed_work(&hdev->power_off);
1091 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1095 void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance)
1097 struct mgmt_ev_advertising_added ev;
1099 ev.instance = instance;
1101 mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk);
1104 void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
1107 struct mgmt_ev_advertising_removed ev;
1109 ev.instance = instance;
1111 mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk);
1114 static void cancel_adv_timeout(struct hci_dev *hdev)
1116 if (hdev->adv_instance_timeout) {
1117 hdev->adv_instance_timeout = 0;
1118 cancel_delayed_work(&hdev->adv_instance_expire);
1122 static int clean_up_hci_state(struct hci_dev *hdev)
1124 struct hci_request req;
1125 struct hci_conn *conn;
1126 bool discov_stopped;
1129 hci_req_init(&req, hdev);
1131 if (test_bit(HCI_ISCAN, &hdev->flags) ||
1132 test_bit(HCI_PSCAN, &hdev->flags)) {
1134 hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1137 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, false);
1139 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1140 __hci_req_disable_advertising(&req);
1142 discov_stopped = hci_req_stop_discovery(&req);
1144 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1145 /* 0x15 == Terminated due to Power Off */
1146 __hci_abort_conn(&req, conn, 0x15);
1149 err = hci_req_run(&req, clean_up_hci_complete);
1150 if (!err && discov_stopped)
1151 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
1156 static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data,
1159 struct mgmt_mode *cp = data;
1160 struct mgmt_pending_cmd *cmd;
1163 BT_DBG("request for %s", hdev->name);
1165 if (cp->val != 0x00 && cp->val != 0x01)
1166 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1167 MGMT_STATUS_INVALID_PARAMS);
1171 if (pending_find(MGMT_OP_SET_POWERED, hdev)) {
1172 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED,
1177 if (!!cp->val == hdev_is_powered(hdev)) {
1178 err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev);
1182 cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len);
1189 queue_work(hdev->req_workqueue, &hdev->power_on);
1192 /* Disconnect connections, stop scans, etc */
1193 err = clean_up_hci_state(hdev);
1195 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1196 HCI_POWER_OFF_TIMEOUT);
1198 /* ENODATA means there were no HCI commands queued */
1199 if (err == -ENODATA) {
1200 cancel_delayed_work(&hdev->power_off);
1201 queue_work(hdev->req_workqueue, &hdev->power_off.work);
1207 hci_dev_unlock(hdev);
1211 static int new_settings(struct hci_dev *hdev, struct sock *skip)
1213 __le32 ev = cpu_to_le32(get_current_settings(hdev));
1215 return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev,
1216 sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip);
1219 int mgmt_new_settings(struct hci_dev *hdev)
1221 return new_settings(hdev, NULL);
1226 struct hci_dev *hdev;
1230 static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data)
1232 struct cmd_lookup *match = data;
1234 send_settings_rsp(cmd->sk, cmd->opcode, match->hdev);
1236 list_del(&cmd->list);
1238 if (match->sk == NULL) {
1239 match->sk = cmd->sk;
1240 sock_hold(match->sk);
1243 mgmt_pending_free(cmd);
1246 static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data)
1250 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status);
1251 mgmt_pending_remove(cmd);
1254 static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data)
1256 if (cmd->cmd_complete) {
1259 cmd->cmd_complete(cmd, *status);
1260 mgmt_pending_remove(cmd);
1265 cmd_status_rsp(cmd, data);
1268 static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1270 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1271 cmd->param, cmd->param_len);
1274 static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
1276 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
1277 cmd->param, sizeof(struct mgmt_addr_info));
1280 static u8 mgmt_bredr_support(struct hci_dev *hdev)
1282 if (!lmp_bredr_capable(hdev))
1283 return MGMT_STATUS_NOT_SUPPORTED;
1284 else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1285 return MGMT_STATUS_REJECTED;
1287 return MGMT_STATUS_SUCCESS;
1290 static u8 mgmt_le_support(struct hci_dev *hdev)
1292 if (!lmp_le_capable(hdev))
1293 return MGMT_STATUS_NOT_SUPPORTED;
1294 else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1295 return MGMT_STATUS_REJECTED;
1297 return MGMT_STATUS_SUCCESS;
1300 void mgmt_set_discoverable_complete(struct hci_dev *hdev, u8 status)
1302 struct mgmt_pending_cmd *cmd;
1304 BT_DBG("status 0x%02x", status);
1308 cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev);
1313 u8 mgmt_err = mgmt_status(status);
1314 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1315 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1319 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1320 hdev->discov_timeout > 0) {
1321 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1322 queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to);
1325 send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1326 new_settings(hdev, cmd->sk);
1329 mgmt_pending_remove(cmd);
1332 hci_dev_unlock(hdev);
1335 static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
1338 struct mgmt_cp_set_discoverable *cp = data;
1339 struct mgmt_pending_cmd *cmd;
1343 BT_DBG("request for %s", hdev->name);
1345 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1346 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1347 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1348 MGMT_STATUS_REJECTED);
1350 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
1351 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1352 MGMT_STATUS_INVALID_PARAMS);
1354 timeout = __le16_to_cpu(cp->timeout);
1356 /* Disabling discoverable requires that no timeout is set,
1357 * and enabling limited discoverable requires a timeout.
1359 if ((cp->val == 0x00 && timeout > 0) ||
1360 (cp->val == 0x02 && timeout == 0))
1361 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1362 MGMT_STATUS_INVALID_PARAMS);
1366 if (!hdev_is_powered(hdev) && timeout > 0) {
1367 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1368 MGMT_STATUS_NOT_POWERED);
1372 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1373 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1374 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1379 if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) {
1380 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
1381 MGMT_STATUS_REJECTED);
1385 if (!hdev_is_powered(hdev)) {
1386 bool changed = false;
1388 /* Setting limited discoverable when powered off is
1389 * not a valid operation since it requires a timeout
1390 * and so no need to check HCI_LIMITED_DISCOVERABLE.
1392 if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) {
1393 hci_dev_change_flag(hdev, HCI_DISCOVERABLE);
1397 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1402 err = new_settings(hdev, sk);
1407 /* If the current mode is the same, then just update the timeout
1408 * value with the new value. And if only the timeout gets updated,
1409 * then no need for any HCI transactions.
1411 if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1412 (cp->val == 0x02) == hci_dev_test_flag(hdev,
1413 HCI_LIMITED_DISCOVERABLE)) {
1414 cancel_delayed_work(&hdev->discov_off);
1415 hdev->discov_timeout = timeout;
1417 if (cp->val && hdev->discov_timeout > 0) {
1418 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
1419 queue_delayed_work(hdev->req_workqueue,
1420 &hdev->discov_off, to);
1423 err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev);
1427 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len);
1433 /* Cancel any potential discoverable timeout that might be
1434 * still active and store new timeout value. The arming of
1435 * the timeout happens in the complete handler.
1437 cancel_delayed_work(&hdev->discov_off);
1438 hdev->discov_timeout = timeout;
1441 hci_dev_set_flag(hdev, HCI_DISCOVERABLE);
1443 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1445 /* Limited discoverable mode */
1446 if (cp->val == 0x02)
1447 hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1449 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1451 queue_work(hdev->req_workqueue, &hdev->discoverable_update);
1455 hci_dev_unlock(hdev);
1459 void mgmt_set_connectable_complete(struct hci_dev *hdev, u8 status)
1461 struct mgmt_pending_cmd *cmd;
1463 BT_DBG("status 0x%02x", status);
1467 cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev);
1472 u8 mgmt_err = mgmt_status(status);
1473 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
1477 send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev);
1478 new_settings(hdev, cmd->sk);
1481 mgmt_pending_remove(cmd);
1484 hci_dev_unlock(hdev);
1487 static int set_connectable_update_settings(struct hci_dev *hdev,
1488 struct sock *sk, u8 val)
1490 bool changed = false;
1493 if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE))
1497 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1499 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1500 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1503 err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev);
1508 hci_req_update_scan(hdev);
1509 hci_update_background_scan(hdev);
1510 return new_settings(hdev, sk);
1516 static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data,
1519 struct mgmt_mode *cp = data;
1520 struct mgmt_pending_cmd *cmd;
1523 BT_DBG("request for %s", hdev->name);
1525 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
1526 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1527 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1528 MGMT_STATUS_REJECTED);
1530 if (cp->val != 0x00 && cp->val != 0x01)
1531 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1532 MGMT_STATUS_INVALID_PARAMS);
1536 if (!hdev_is_powered(hdev)) {
1537 err = set_connectable_update_settings(hdev, sk, cp->val);
1541 if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) ||
1542 pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) {
1543 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE,
1548 cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len);
1555 hci_dev_set_flag(hdev, HCI_CONNECTABLE);
1557 if (hdev->discov_timeout > 0)
1558 cancel_delayed_work(&hdev->discov_off);
1560 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
1561 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
1562 hci_dev_clear_flag(hdev, HCI_CONNECTABLE);
1565 queue_work(hdev->req_workqueue, &hdev->connectable_update);
1569 hci_dev_unlock(hdev);
1573 static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data,
1576 struct mgmt_mode *cp = data;
1580 BT_DBG("request for %s", hdev->name);
1582 if (cp->val != 0x00 && cp->val != 0x01)
1583 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE,
1584 MGMT_STATUS_INVALID_PARAMS);
1589 changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE);
1591 changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE);
1593 err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev);
1598 /* In limited privacy mode the change of bondable mode
1599 * may affect the local advertising address.
1601 if (hdev_is_powered(hdev) &&
1602 hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1603 hci_dev_test_flag(hdev, HCI_DISCOVERABLE) &&
1604 hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1605 queue_work(hdev->req_workqueue,
1606 &hdev->discoverable_update);
1608 err = new_settings(hdev, sk);
1612 hci_dev_unlock(hdev);
1616 static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data,
1619 struct mgmt_mode *cp = data;
1620 struct mgmt_pending_cmd *cmd;
1624 BT_DBG("request for %s", hdev->name);
1626 status = mgmt_bredr_support(hdev);
1628 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1631 if (cp->val != 0x00 && cp->val != 0x01)
1632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1633 MGMT_STATUS_INVALID_PARAMS);
1637 if (!hdev_is_powered(hdev)) {
1638 bool changed = false;
1640 if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) {
1641 hci_dev_change_flag(hdev, HCI_LINK_SECURITY);
1645 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1650 err = new_settings(hdev, sk);
1655 if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) {
1656 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY,
1663 if (test_bit(HCI_AUTH, &hdev->flags) == val) {
1664 err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev);
1668 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len);
1674 err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val);
1676 mgmt_pending_remove(cmd);
1681 hci_dev_unlock(hdev);
1685 static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1687 struct mgmt_mode *cp = data;
1688 struct mgmt_pending_cmd *cmd;
1692 BT_DBG("request for %s", hdev->name);
1694 status = mgmt_bredr_support(hdev);
1696 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status);
1698 if (!lmp_ssp_capable(hdev))
1699 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1700 MGMT_STATUS_NOT_SUPPORTED);
1702 if (cp->val != 0x00 && cp->val != 0x01)
1703 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1704 MGMT_STATUS_INVALID_PARAMS);
1708 if (!hdev_is_powered(hdev)) {
1712 changed = !hci_dev_test_and_set_flag(hdev,
1715 changed = hci_dev_test_and_clear_flag(hdev,
1718 changed = hci_dev_test_and_clear_flag(hdev,
1721 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
1724 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1729 err = new_settings(hdev, sk);
1734 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1735 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP,
1740 if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
1741 err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev);
1745 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len);
1751 if (!cp->val && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
1752 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
1753 sizeof(cp->val), &cp->val);
1755 err = hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &cp->val);
1757 mgmt_pending_remove(cmd);
1762 hci_dev_unlock(hdev);
1766 static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1768 struct mgmt_mode *cp = data;
1773 BT_DBG("request for %s", hdev->name);
1775 status = mgmt_bredr_support(hdev);
1777 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status);
1779 if (!lmp_ssp_capable(hdev))
1780 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1781 MGMT_STATUS_NOT_SUPPORTED);
1783 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
1784 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1785 MGMT_STATUS_REJECTED);
1787 if (cp->val != 0x00 && cp->val != 0x01)
1788 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1789 MGMT_STATUS_INVALID_PARAMS);
1793 if (pending_find(MGMT_OP_SET_SSP, hdev)) {
1794 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1800 changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED);
1802 if (hdev_is_powered(hdev)) {
1803 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS,
1804 MGMT_STATUS_REJECTED);
1808 changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED);
1811 err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev);
1816 err = new_settings(hdev, sk);
1819 hci_dev_unlock(hdev);
1823 static void le_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1825 struct cmd_lookup match = { NULL, hdev };
1830 u8 mgmt_err = mgmt_status(status);
1832 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp,
1837 mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match);
1839 new_settings(hdev, match.sk);
1844 /* Make sure the controller has a good default for
1845 * advertising data. Restrict the update to when LE
1846 * has actually been enabled. During power on, the
1847 * update in powered_update_hci will take care of it.
1849 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1850 struct hci_request req;
1851 hci_req_init(&req, hdev);
1852 if (ext_adv_capable(hdev)) {
1855 err = __hci_req_setup_ext_adv_instance(&req, 0x00);
1857 __hci_req_update_scan_rsp_data(&req, 0x00);
1859 __hci_req_update_adv_data(&req, 0x00);
1860 __hci_req_update_scan_rsp_data(&req, 0x00);
1862 hci_req_run(&req, NULL);
1863 hci_update_background_scan(hdev);
1867 hci_dev_unlock(hdev);
1870 static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
1872 struct mgmt_mode *cp = data;
1873 struct hci_cp_write_le_host_supported hci_cp;
1874 struct mgmt_pending_cmd *cmd;
1875 struct hci_request req;
1879 BT_DBG("request for %s", hdev->name);
1881 if (!lmp_le_capable(hdev))
1882 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1883 MGMT_STATUS_NOT_SUPPORTED);
1885 if (cp->val != 0x00 && cp->val != 0x01)
1886 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1887 MGMT_STATUS_INVALID_PARAMS);
1889 /* Bluetooth single mode LE only controllers or dual-mode
1890 * controllers configured as LE only devices, do not allow
1891 * switching LE off. These have either LE enabled explicitly
1892 * or BR/EDR has been previously switched off.
1894 * When trying to enable an already enabled LE, then gracefully
1895 * send a positive response. Trying to disable it however will
1896 * result into rejection.
1898 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1899 if (cp->val == 0x01)
1900 return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1902 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1903 MGMT_STATUS_REJECTED);
1909 enabled = lmp_host_le_capable(hdev);
1912 hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true);
1914 if (!hdev_is_powered(hdev) || val == enabled) {
1915 bool changed = false;
1917 if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1918 hci_dev_change_flag(hdev, HCI_LE_ENABLED);
1922 if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
1923 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1927 err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev);
1932 err = new_settings(hdev, sk);
1937 if (pending_find(MGMT_OP_SET_LE, hdev) ||
1938 pending_find(MGMT_OP_SET_ADVERTISING, hdev)) {
1939 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE,
1944 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len);
1950 hci_req_init(&req, hdev);
1952 memset(&hci_cp, 0, sizeof(hci_cp));
1956 hci_cp.simul = 0x00;
1958 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1959 __hci_req_disable_advertising(&req);
1961 if (ext_adv_capable(hdev))
1962 __hci_req_clear_ext_adv_sets(&req);
1965 hci_req_add(&req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(hci_cp),
1968 err = hci_req_run(&req, le_enable_complete);
1970 mgmt_pending_remove(cmd);
1973 hci_dev_unlock(hdev);
1977 /* This is a helper function to test for pending mgmt commands that can
1978 * cause CoD or EIR HCI commands. We can only allow one such pending
1979 * mgmt command at a time since otherwise we cannot easily track what
1980 * the current values are, will be, and based on that calculate if a new
1981 * HCI command needs to be sent and if yes with what value.
1983 static bool pending_eir_or_class(struct hci_dev *hdev)
1985 struct mgmt_pending_cmd *cmd;
1987 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
1988 switch (cmd->opcode) {
1989 case MGMT_OP_ADD_UUID:
1990 case MGMT_OP_REMOVE_UUID:
1991 case MGMT_OP_SET_DEV_CLASS:
1992 case MGMT_OP_SET_POWERED:
2000 static const u8 bluetooth_base_uuid[] = {
2001 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80,
2002 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
2005 static u8 get_uuid_size(const u8 *uuid)
2009 if (memcmp(uuid, bluetooth_base_uuid, 12))
2012 val = get_unaligned_le32(&uuid[12]);
2019 static void mgmt_class_complete(struct hci_dev *hdev, u16 mgmt_op, u8 status)
2021 struct mgmt_pending_cmd *cmd;
2025 cmd = pending_find(mgmt_op, hdev);
2029 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
2030 mgmt_status(status), hdev->dev_class, 3);
2032 mgmt_pending_remove(cmd);
2035 hci_dev_unlock(hdev);
2038 static void add_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2040 BT_DBG("status 0x%02x", status);
2042 mgmt_class_complete(hdev, MGMT_OP_ADD_UUID, status);
2045 static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
2047 struct mgmt_cp_add_uuid *cp = data;
2048 struct mgmt_pending_cmd *cmd;
2049 struct hci_request req;
2050 struct bt_uuid *uuid;
2053 BT_DBG("request for %s", hdev->name);
2057 if (pending_eir_or_class(hdev)) {
2058 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID,
2063 uuid = kmalloc(sizeof(*uuid), GFP_KERNEL);
2069 memcpy(uuid->uuid, cp->uuid, 16);
2070 uuid->svc_hint = cp->svc_hint;
2071 uuid->size = get_uuid_size(cp->uuid);
2073 list_add_tail(&uuid->list, &hdev->uuids);
2075 hci_req_init(&req, hdev);
2077 __hci_req_update_class(&req);
2078 __hci_req_update_eir(&req);
2080 err = hci_req_run(&req, add_uuid_complete);
2082 if (err != -ENODATA)
2085 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_UUID, 0,
2086 hdev->dev_class, 3);
2090 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_UUID, hdev, data, len);
2099 hci_dev_unlock(hdev);
2103 static bool enable_service_cache(struct hci_dev *hdev)
2105 if (!hdev_is_powered(hdev))
2108 if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) {
2109 queue_delayed_work(hdev->workqueue, &hdev->service_cache,
2117 static void remove_uuid_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2119 BT_DBG("status 0x%02x", status);
2121 mgmt_class_complete(hdev, MGMT_OP_REMOVE_UUID, status);
2124 static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data,
2127 struct mgmt_cp_remove_uuid *cp = data;
2128 struct mgmt_pending_cmd *cmd;
2129 struct bt_uuid *match, *tmp;
2130 u8 bt_uuid_any[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
2131 struct hci_request req;
2134 BT_DBG("request for %s", hdev->name);
2138 if (pending_eir_or_class(hdev)) {
2139 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2144 if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) {
2145 hci_uuids_clear(hdev);
2147 if (enable_service_cache(hdev)) {
2148 err = mgmt_cmd_complete(sk, hdev->id,
2149 MGMT_OP_REMOVE_UUID,
2150 0, hdev->dev_class, 3);
2159 list_for_each_entry_safe(match, tmp, &hdev->uuids, list) {
2160 if (memcmp(match->uuid, cp->uuid, 16) != 0)
2163 list_del(&match->list);
2169 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID,
2170 MGMT_STATUS_INVALID_PARAMS);
2175 hci_req_init(&req, hdev);
2177 __hci_req_update_class(&req);
2178 __hci_req_update_eir(&req);
2180 err = hci_req_run(&req, remove_uuid_complete);
2182 if (err != -ENODATA)
2185 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_UUID, 0,
2186 hdev->dev_class, 3);
2190 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_UUID, hdev, data, len);
2199 hci_dev_unlock(hdev);
2203 static void set_class_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2205 BT_DBG("status 0x%02x", status);
2207 mgmt_class_complete(hdev, MGMT_OP_SET_DEV_CLASS, status);
2210 static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data,
2213 struct mgmt_cp_set_dev_class *cp = data;
2214 struct mgmt_pending_cmd *cmd;
2215 struct hci_request req;
2218 BT_DBG("request for %s", hdev->name);
2220 if (!lmp_bredr_capable(hdev))
2221 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2222 MGMT_STATUS_NOT_SUPPORTED);
2226 if (pending_eir_or_class(hdev)) {
2227 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2232 if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) {
2233 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS,
2234 MGMT_STATUS_INVALID_PARAMS);
2238 hdev->major_class = cp->major;
2239 hdev->minor_class = cp->minor;
2241 if (!hdev_is_powered(hdev)) {
2242 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2243 hdev->dev_class, 3);
2247 hci_req_init(&req, hdev);
2249 if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) {
2250 hci_dev_unlock(hdev);
2251 cancel_delayed_work_sync(&hdev->service_cache);
2253 __hci_req_update_eir(&req);
2256 __hci_req_update_class(&req);
2258 err = hci_req_run(&req, set_class_complete);
2260 if (err != -ENODATA)
2263 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0,
2264 hdev->dev_class, 3);
2268 cmd = mgmt_pending_add(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len);
2277 hci_dev_unlock(hdev);
2281 static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data,
2284 struct mgmt_cp_load_link_keys *cp = data;
2285 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
2286 sizeof(struct mgmt_link_key_info));
2287 u16 key_count, expected_len;
2291 BT_DBG("request for %s", hdev->name);
2293 if (!lmp_bredr_capable(hdev))
2294 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2295 MGMT_STATUS_NOT_SUPPORTED);
2297 key_count = __le16_to_cpu(cp->key_count);
2298 if (key_count > max_key_count) {
2299 bt_dev_err(hdev, "load_link_keys: too big key_count value %u",
2301 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2302 MGMT_STATUS_INVALID_PARAMS);
2305 expected_len = struct_size(cp, keys, key_count);
2306 if (expected_len != len) {
2307 bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes",
2309 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2310 MGMT_STATUS_INVALID_PARAMS);
2313 if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01)
2314 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS,
2315 MGMT_STATUS_INVALID_PARAMS);
2317 BT_DBG("%s debug_keys %u key_count %u", hdev->name, cp->debug_keys,
2320 for (i = 0; i < key_count; i++) {
2321 struct mgmt_link_key_info *key = &cp->keys[i];
2323 if (key->addr.type != BDADDR_BREDR || key->type > 0x08)
2324 return mgmt_cmd_status(sk, hdev->id,
2325 MGMT_OP_LOAD_LINK_KEYS,
2326 MGMT_STATUS_INVALID_PARAMS);
2331 hci_link_keys_clear(hdev);
2334 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
2336 changed = hci_dev_test_and_clear_flag(hdev,
2337 HCI_KEEP_DEBUG_KEYS);
2340 new_settings(hdev, NULL);
2342 for (i = 0; i < key_count; i++) {
2343 struct mgmt_link_key_info *key = &cp->keys[i];
2345 if (hci_is_blocked_key(hdev,
2346 HCI_BLOCKED_KEY_TYPE_LINKKEY,
2348 bt_dev_warn(hdev, "Skipping blocked link key for %pMR",
2353 /* Always ignore debug keys and require a new pairing if
2354 * the user wants to use them.
2356 if (key->type == HCI_LK_DEBUG_COMBINATION)
2359 hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val,
2360 key->type, key->pin_len, NULL);
2363 mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0);
2365 hci_dev_unlock(hdev);
2370 static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr,
2371 u8 addr_type, struct sock *skip_sk)
2373 struct mgmt_ev_device_unpaired ev;
2375 bacpy(&ev.addr.bdaddr, bdaddr);
2376 ev.addr.type = addr_type;
2378 return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev),
2382 static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2385 struct mgmt_cp_unpair_device *cp = data;
2386 struct mgmt_rp_unpair_device rp;
2387 struct hci_conn_params *params;
2388 struct mgmt_pending_cmd *cmd;
2389 struct hci_conn *conn;
2393 memset(&rp, 0, sizeof(rp));
2394 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2395 rp.addr.type = cp->addr.type;
2397 if (!bdaddr_type_is_valid(cp->addr.type))
2398 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2399 MGMT_STATUS_INVALID_PARAMS,
2402 if (cp->disconnect != 0x00 && cp->disconnect != 0x01)
2403 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2404 MGMT_STATUS_INVALID_PARAMS,
2409 if (!hdev_is_powered(hdev)) {
2410 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2411 MGMT_STATUS_NOT_POWERED, &rp,
2416 if (cp->addr.type == BDADDR_BREDR) {
2417 /* If disconnection is requested, then look up the
2418 * connection. If the remote device is connected, it
2419 * will be later used to terminate the link.
2421 * Setting it to NULL explicitly will cause no
2422 * termination of the link.
2425 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2430 err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
2432 err = mgmt_cmd_complete(sk, hdev->id,
2433 MGMT_OP_UNPAIR_DEVICE,
2434 MGMT_STATUS_NOT_PAIRED, &rp,
2442 /* LE address type */
2443 addr_type = le_addr_type(cp->addr.type);
2445 /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */
2446 err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type);
2448 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
2449 MGMT_STATUS_NOT_PAIRED, &rp,
2454 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
2456 hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
2461 /* Defer clearing up the connection parameters until closing to
2462 * give a chance of keeping them if a repairing happens.
2464 set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2466 /* Disable auto-connection parameters if present */
2467 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
2469 if (params->explicit_connect)
2470 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2472 params->auto_connect = HCI_AUTO_CONN_DISABLED;
2475 /* If disconnection is not requested, then clear the connection
2476 * variable so that the link is not terminated.
2478 if (!cp->disconnect)
2482 /* If the connection variable is set, then termination of the
2483 * link is requested.
2486 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0,
2488 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk);
2492 cmd = mgmt_pending_add(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp,
2499 cmd->cmd_complete = addr_cmd_complete;
2501 err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
2503 mgmt_pending_remove(cmd);
2506 hci_dev_unlock(hdev);
2510 static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
2513 struct mgmt_cp_disconnect *cp = data;
2514 struct mgmt_rp_disconnect rp;
2515 struct mgmt_pending_cmd *cmd;
2516 struct hci_conn *conn;
2521 memset(&rp, 0, sizeof(rp));
2522 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2523 rp.addr.type = cp->addr.type;
2525 if (!bdaddr_type_is_valid(cp->addr.type))
2526 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2527 MGMT_STATUS_INVALID_PARAMS,
2532 if (!test_bit(HCI_UP, &hdev->flags)) {
2533 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2534 MGMT_STATUS_NOT_POWERED, &rp,
2539 if (pending_find(MGMT_OP_DISCONNECT, hdev)) {
2540 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2541 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2545 if (cp->addr.type == BDADDR_BREDR)
2546 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
2549 conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
2550 le_addr_type(cp->addr.type));
2552 if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
2553 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
2554 MGMT_STATUS_NOT_CONNECTED, &rp,
2559 cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len);
2565 cmd->cmd_complete = generic_cmd_complete;
2567 err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM);
2569 mgmt_pending_remove(cmd);
2572 hci_dev_unlock(hdev);
2576 static u8 link_to_bdaddr(u8 link_type, u8 addr_type)
2578 switch (link_type) {
2580 switch (addr_type) {
2581 case ADDR_LE_DEV_PUBLIC:
2582 return BDADDR_LE_PUBLIC;
2585 /* Fallback to LE Random address type */
2586 return BDADDR_LE_RANDOM;
2590 /* Fallback to BR/EDR type */
2591 return BDADDR_BREDR;
2595 static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data,
2598 struct mgmt_rp_get_connections *rp;
2607 if (!hdev_is_powered(hdev)) {
2608 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS,
2609 MGMT_STATUS_NOT_POWERED);
2614 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2615 if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2619 rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL);
2626 list_for_each_entry(c, &hdev->conn_hash.list, list) {
2627 if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags))
2629 bacpy(&rp->addr[i].bdaddr, &c->dst);
2630 rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type);
2631 if (c->type == SCO_LINK || c->type == ESCO_LINK)
2636 rp->conn_count = cpu_to_le16(i);
2638 /* Recalculate length in case of filtered SCO connections, etc */
2639 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp,
2640 struct_size(rp, addr, i));
2645 hci_dev_unlock(hdev);
2649 static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
2650 struct mgmt_cp_pin_code_neg_reply *cp)
2652 struct mgmt_pending_cmd *cmd;
2655 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp,
2660 cmd->cmd_complete = addr_cmd_complete;
2662 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2663 sizeof(cp->addr.bdaddr), &cp->addr.bdaddr);
2665 mgmt_pending_remove(cmd);
2670 static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data,
2673 struct hci_conn *conn;
2674 struct mgmt_cp_pin_code_reply *cp = data;
2675 struct hci_cp_pin_code_reply reply;
2676 struct mgmt_pending_cmd *cmd;
2683 if (!hdev_is_powered(hdev)) {
2684 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2685 MGMT_STATUS_NOT_POWERED);
2689 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr);
2691 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2692 MGMT_STATUS_NOT_CONNECTED);
2696 if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) {
2697 struct mgmt_cp_pin_code_neg_reply ncp;
2699 memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr));
2701 bt_dev_err(hdev, "PIN code is not 16 bytes long");
2703 err = send_pin_code_neg_reply(sk, hdev, &ncp);
2705 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY,
2706 MGMT_STATUS_INVALID_PARAMS);
2711 cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len);
2717 cmd->cmd_complete = addr_cmd_complete;
2719 bacpy(&reply.bdaddr, &cp->addr.bdaddr);
2720 reply.pin_len = cp->pin_len;
2721 memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code));
2723 err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply);
2725 mgmt_pending_remove(cmd);
2728 hci_dev_unlock(hdev);
2732 static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data,
2735 struct mgmt_cp_set_io_capability *cp = data;
2739 if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY)
2740 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY,
2741 MGMT_STATUS_INVALID_PARAMS);
2745 hdev->io_capability = cp->io_capability;
2747 BT_DBG("%s IO capability set to 0x%02x", hdev->name,
2748 hdev->io_capability);
2750 hci_dev_unlock(hdev);
2752 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0,
2756 static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn)
2758 struct hci_dev *hdev = conn->hdev;
2759 struct mgmt_pending_cmd *cmd;
2761 list_for_each_entry(cmd, &hdev->mgmt_pending, list) {
2762 if (cmd->opcode != MGMT_OP_PAIR_DEVICE)
2765 if (cmd->user_data != conn)
2774 static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status)
2776 struct mgmt_rp_pair_device rp;
2777 struct hci_conn *conn = cmd->user_data;
2780 bacpy(&rp.addr.bdaddr, &conn->dst);
2781 rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
2783 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE,
2784 status, &rp, sizeof(rp));
2786 /* So we don't get further callbacks for this connection */
2787 conn->connect_cfm_cb = NULL;
2788 conn->security_cfm_cb = NULL;
2789 conn->disconn_cfm_cb = NULL;
2791 hci_conn_drop(conn);
2793 /* The device is paired so there is no need to remove
2794 * its connection parameters anymore.
2796 clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
2803 void mgmt_smp_complete(struct hci_conn *conn, bool complete)
2805 u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED;
2806 struct mgmt_pending_cmd *cmd;
2808 cmd = find_pairing(conn);
2810 cmd->cmd_complete(cmd, status);
2811 mgmt_pending_remove(cmd);
2815 static void pairing_complete_cb(struct hci_conn *conn, u8 status)
2817 struct mgmt_pending_cmd *cmd;
2819 BT_DBG("status %u", status);
2821 cmd = find_pairing(conn);
2823 BT_DBG("Unable to find a pending command");
2827 cmd->cmd_complete(cmd, mgmt_status(status));
2828 mgmt_pending_remove(cmd);
2831 static void le_pairing_complete_cb(struct hci_conn *conn, u8 status)
2833 struct mgmt_pending_cmd *cmd;
2835 BT_DBG("status %u", status);
2840 cmd = find_pairing(conn);
2842 BT_DBG("Unable to find a pending command");
2846 cmd->cmd_complete(cmd, mgmt_status(status));
2847 mgmt_pending_remove(cmd);
2850 static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2853 struct mgmt_cp_pair_device *cp = data;
2854 struct mgmt_rp_pair_device rp;
2855 struct mgmt_pending_cmd *cmd;
2856 u8 sec_level, auth_type;
2857 struct hci_conn *conn;
2862 memset(&rp, 0, sizeof(rp));
2863 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
2864 rp.addr.type = cp->addr.type;
2866 if (!bdaddr_type_is_valid(cp->addr.type))
2867 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2868 MGMT_STATUS_INVALID_PARAMS,
2871 if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY)
2872 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2873 MGMT_STATUS_INVALID_PARAMS,
2878 if (!hdev_is_powered(hdev)) {
2879 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2880 MGMT_STATUS_NOT_POWERED, &rp,
2885 if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) {
2886 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2887 MGMT_STATUS_ALREADY_PAIRED, &rp,
2892 sec_level = BT_SECURITY_MEDIUM;
2893 auth_type = HCI_AT_DEDICATED_BONDING;
2895 if (cp->addr.type == BDADDR_BREDR) {
2896 conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
2899 u8 addr_type = le_addr_type(cp->addr.type);
2900 struct hci_conn_params *p;
2902 /* When pairing a new device, it is expected to remember
2903 * this device for future connections. Adding the connection
2904 * parameter information ahead of time allows tracking
2905 * of the slave preferred values and will speed up any
2906 * further connection establishment.
2908 * If connection parameters already exist, then they
2909 * will be kept and this function does nothing.
2911 p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type);
2913 if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT)
2914 p->auto_connect = HCI_AUTO_CONN_DISABLED;
2916 conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr,
2917 addr_type, sec_level,
2918 HCI_LE_CONN_TIMEOUT);
2924 if (PTR_ERR(conn) == -EBUSY)
2925 status = MGMT_STATUS_BUSY;
2926 else if (PTR_ERR(conn) == -EOPNOTSUPP)
2927 status = MGMT_STATUS_NOT_SUPPORTED;
2928 else if (PTR_ERR(conn) == -ECONNREFUSED)
2929 status = MGMT_STATUS_REJECTED;
2931 status = MGMT_STATUS_CONNECT_FAILED;
2933 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2934 status, &rp, sizeof(rp));
2938 if (conn->connect_cfm_cb) {
2939 hci_conn_drop(conn);
2940 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE,
2941 MGMT_STATUS_BUSY, &rp, sizeof(rp));
2945 cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len);
2948 hci_conn_drop(conn);
2952 cmd->cmd_complete = pairing_complete;
2954 /* For LE, just connecting isn't a proof that the pairing finished */
2955 if (cp->addr.type == BDADDR_BREDR) {
2956 conn->connect_cfm_cb = pairing_complete_cb;
2957 conn->security_cfm_cb = pairing_complete_cb;
2958 conn->disconn_cfm_cb = pairing_complete_cb;
2960 conn->connect_cfm_cb = le_pairing_complete_cb;
2961 conn->security_cfm_cb = le_pairing_complete_cb;
2962 conn->disconn_cfm_cb = le_pairing_complete_cb;
2965 conn->io_capability = cp->io_cap;
2966 cmd->user_data = hci_conn_get(conn);
2968 if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) &&
2969 hci_conn_security(conn, sec_level, auth_type, true)) {
2970 cmd->cmd_complete(cmd, 0);
2971 mgmt_pending_remove(cmd);
2977 hci_dev_unlock(hdev);
2981 static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
2984 struct mgmt_addr_info *addr = data;
2985 struct mgmt_pending_cmd *cmd;
2986 struct hci_conn *conn;
2993 if (!hdev_is_powered(hdev)) {
2994 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
2995 MGMT_STATUS_NOT_POWERED);
2999 cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev);
3001 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3002 MGMT_STATUS_INVALID_PARAMS);
3006 conn = cmd->user_data;
3008 if (bacmp(&addr->bdaddr, &conn->dst) != 0) {
3009 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE,
3010 MGMT_STATUS_INVALID_PARAMS);
3014 cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED);
3015 mgmt_pending_remove(cmd);
3017 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0,
3018 addr, sizeof(*addr));
3020 hci_dev_unlock(hdev);
3024 static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
3025 struct mgmt_addr_info *addr, u16 mgmt_op,
3026 u16 hci_op, __le32 passkey)
3028 struct mgmt_pending_cmd *cmd;
3029 struct hci_conn *conn;
3034 if (!hdev_is_powered(hdev)) {
3035 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3036 MGMT_STATUS_NOT_POWERED, addr,
3041 if (addr->type == BDADDR_BREDR)
3042 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
3044 conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
3045 le_addr_type(addr->type));
3048 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3049 MGMT_STATUS_NOT_CONNECTED, addr,
3054 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
3055 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3057 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3058 MGMT_STATUS_SUCCESS, addr,
3061 err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
3062 MGMT_STATUS_FAILED, addr,
3068 cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr));
3074 cmd->cmd_complete = addr_cmd_complete;
3076 /* Continue with pairing via HCI */
3077 if (hci_op == HCI_OP_USER_PASSKEY_REPLY) {
3078 struct hci_cp_user_passkey_reply cp;
3080 bacpy(&cp.bdaddr, &addr->bdaddr);
3081 cp.passkey = passkey;
3082 err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp);
3084 err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr),
3088 mgmt_pending_remove(cmd);
3091 hci_dev_unlock(hdev);
3095 static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev,
3096 void *data, u16 len)
3098 struct mgmt_cp_pin_code_neg_reply *cp = data;
3102 return user_pairing_resp(sk, hdev, &cp->addr,
3103 MGMT_OP_PIN_CODE_NEG_REPLY,
3104 HCI_OP_PIN_CODE_NEG_REPLY, 0);
3107 static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3110 struct mgmt_cp_user_confirm_reply *cp = data;
3114 if (len != sizeof(*cp))
3115 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY,
3116 MGMT_STATUS_INVALID_PARAMS);
3118 return user_pairing_resp(sk, hdev, &cp->addr,
3119 MGMT_OP_USER_CONFIRM_REPLY,
3120 HCI_OP_USER_CONFIRM_REPLY, 0);
3123 static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev,
3124 void *data, u16 len)
3126 struct mgmt_cp_user_confirm_neg_reply *cp = data;
3130 return user_pairing_resp(sk, hdev, &cp->addr,
3131 MGMT_OP_USER_CONFIRM_NEG_REPLY,
3132 HCI_OP_USER_CONFIRM_NEG_REPLY, 0);
3135 static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data,
3138 struct mgmt_cp_user_passkey_reply *cp = data;
3142 return user_pairing_resp(sk, hdev, &cp->addr,
3143 MGMT_OP_USER_PASSKEY_REPLY,
3144 HCI_OP_USER_PASSKEY_REPLY, cp->passkey);
3147 static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev,
3148 void *data, u16 len)
3150 struct mgmt_cp_user_passkey_neg_reply *cp = data;
3154 return user_pairing_resp(sk, hdev, &cp->addr,
3155 MGMT_OP_USER_PASSKEY_NEG_REPLY,
3156 HCI_OP_USER_PASSKEY_NEG_REPLY, 0);
3159 static void adv_expire(struct hci_dev *hdev, u32 flags)
3161 struct adv_info *adv_instance;
3162 struct hci_request req;
3165 adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance);
3169 /* stop if current instance doesn't need to be changed */
3170 if (!(adv_instance->flags & flags))
3173 cancel_adv_timeout(hdev);
3175 adv_instance = hci_get_next_instance(hdev, adv_instance->instance);
3179 hci_req_init(&req, hdev);
3180 err = __hci_req_schedule_adv_instance(&req, adv_instance->instance,
3185 hci_req_run(&req, NULL);
3188 static void set_name_complete(struct hci_dev *hdev, u8 status, u16 opcode)
3190 struct mgmt_cp_set_local_name *cp;
3191 struct mgmt_pending_cmd *cmd;
3193 BT_DBG("status 0x%02x", status);
3197 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
3204 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME,
3205 mgmt_status(status));
3207 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3210 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3211 adv_expire(hdev, MGMT_ADV_FLAG_LOCAL_NAME);
3214 mgmt_pending_remove(cmd);
3217 hci_dev_unlock(hdev);
3220 static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data,
3223 struct mgmt_cp_set_local_name *cp = data;
3224 struct mgmt_pending_cmd *cmd;
3225 struct hci_request req;
3232 /* If the old values are the same as the new ones just return a
3233 * direct command complete event.
3235 if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) &&
3236 !memcmp(hdev->short_name, cp->short_name,
3237 sizeof(hdev->short_name))) {
3238 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3243 memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name));
3245 if (!hdev_is_powered(hdev)) {
3246 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3248 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0,
3253 err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data,
3254 len, HCI_MGMT_LOCAL_NAME_EVENTS, sk);
3255 ext_info_changed(hdev, sk);
3260 cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len);
3266 memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name));
3268 hci_req_init(&req, hdev);
3270 if (lmp_bredr_capable(hdev)) {
3271 __hci_req_update_name(&req);
3272 __hci_req_update_eir(&req);
3275 /* The name is stored in the scan response data and so
3276 * no need to udpate the advertising data here.
3278 if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING))
3279 __hci_req_update_scan_rsp_data(&req, hdev->cur_adv_instance);
3281 err = hci_req_run(&req, set_name_complete);
3283 mgmt_pending_remove(cmd);
3286 hci_dev_unlock(hdev);
3290 static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data,
3293 struct mgmt_cp_set_appearance *cp = data;
3299 if (!lmp_le_capable(hdev))
3300 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE,
3301 MGMT_STATUS_NOT_SUPPORTED);
3303 apperance = le16_to_cpu(cp->appearance);
3307 if (hdev->appearance != apperance) {
3308 hdev->appearance = apperance;
3310 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
3311 adv_expire(hdev, MGMT_ADV_FLAG_APPEARANCE);
3313 ext_info_changed(hdev, sk);
3316 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL,
3319 hci_dev_unlock(hdev);
3324 static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3325 void *data, u16 len)
3327 struct mgmt_rp_get_phy_confguration rp;
3329 BT_DBG("sock %p %s", sk, hdev->name);
3333 memset(&rp, 0, sizeof(rp));
3335 rp.supported_phys = cpu_to_le32(get_supported_phys(hdev));
3336 rp.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3337 rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev));
3339 hci_dev_unlock(hdev);
3341 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0,
3345 int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip)
3347 struct mgmt_ev_phy_configuration_changed ev;
3349 memset(&ev, 0, sizeof(ev));
3351 ev.selected_phys = cpu_to_le32(get_selected_phys(hdev));
3353 return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev,
3357 static void set_default_phy_complete(struct hci_dev *hdev, u8 status,
3358 u16 opcode, struct sk_buff *skb)
3360 struct mgmt_pending_cmd *cmd;
3362 BT_DBG("status 0x%02x", status);
3366 cmd = pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev);
3371 mgmt_cmd_status(cmd->sk, hdev->id,
3372 MGMT_OP_SET_PHY_CONFIGURATION,
3373 mgmt_status(status));
3375 mgmt_cmd_complete(cmd->sk, hdev->id,
3376 MGMT_OP_SET_PHY_CONFIGURATION, 0,
3379 mgmt_phy_configuration_changed(hdev, cmd->sk);
3382 mgmt_pending_remove(cmd);
3385 hci_dev_unlock(hdev);
3388 static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev,
3389 void *data, u16 len)
3391 struct mgmt_cp_set_phy_confguration *cp = data;
3392 struct hci_cp_le_set_default_phy cp_phy;
3393 struct mgmt_pending_cmd *cmd;
3394 struct hci_request req;
3395 u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys;
3396 u16 pkt_type = (HCI_DH1 | HCI_DM1);
3397 bool changed = false;
3400 BT_DBG("sock %p %s", sk, hdev->name);
3402 configurable_phys = get_configurable_phys(hdev);
3403 supported_phys = get_supported_phys(hdev);
3404 selected_phys = __le32_to_cpu(cp->selected_phys);
3406 if (selected_phys & ~supported_phys)
3407 return mgmt_cmd_status(sk, hdev->id,
3408 MGMT_OP_SET_PHY_CONFIGURATION,
3409 MGMT_STATUS_INVALID_PARAMS);
3411 unconfigure_phys = supported_phys & ~configurable_phys;
3413 if ((selected_phys & unconfigure_phys) != unconfigure_phys)
3414 return mgmt_cmd_status(sk, hdev->id,
3415 MGMT_OP_SET_PHY_CONFIGURATION,
3416 MGMT_STATUS_INVALID_PARAMS);
3418 if (selected_phys == get_selected_phys(hdev))
3419 return mgmt_cmd_complete(sk, hdev->id,
3420 MGMT_OP_SET_PHY_CONFIGURATION,
3425 if (!hdev_is_powered(hdev)) {
3426 err = mgmt_cmd_status(sk, hdev->id,
3427 MGMT_OP_SET_PHY_CONFIGURATION,
3428 MGMT_STATUS_REJECTED);
3432 if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) {
3433 err = mgmt_cmd_status(sk, hdev->id,
3434 MGMT_OP_SET_PHY_CONFIGURATION,
3439 if (selected_phys & MGMT_PHY_BR_1M_3SLOT)
3440 pkt_type |= (HCI_DH3 | HCI_DM3);
3442 pkt_type &= ~(HCI_DH3 | HCI_DM3);
3444 if (selected_phys & MGMT_PHY_BR_1M_5SLOT)
3445 pkt_type |= (HCI_DH5 | HCI_DM5);
3447 pkt_type &= ~(HCI_DH5 | HCI_DM5);
3449 if (selected_phys & MGMT_PHY_EDR_2M_1SLOT)
3450 pkt_type &= ~HCI_2DH1;
3452 pkt_type |= HCI_2DH1;
3454 if (selected_phys & MGMT_PHY_EDR_2M_3SLOT)
3455 pkt_type &= ~HCI_2DH3;
3457 pkt_type |= HCI_2DH3;
3459 if (selected_phys & MGMT_PHY_EDR_2M_5SLOT)
3460 pkt_type &= ~HCI_2DH5;
3462 pkt_type |= HCI_2DH5;
3464 if (selected_phys & MGMT_PHY_EDR_3M_1SLOT)
3465 pkt_type &= ~HCI_3DH1;
3467 pkt_type |= HCI_3DH1;
3469 if (selected_phys & MGMT_PHY_EDR_3M_3SLOT)
3470 pkt_type &= ~HCI_3DH3;
3472 pkt_type |= HCI_3DH3;
3474 if (selected_phys & MGMT_PHY_EDR_3M_5SLOT)
3475 pkt_type &= ~HCI_3DH5;
3477 pkt_type |= HCI_3DH5;
3479 if (pkt_type != hdev->pkt_type) {
3480 hdev->pkt_type = pkt_type;
3484 if ((selected_phys & MGMT_PHY_LE_MASK) ==
3485 (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) {
3487 mgmt_phy_configuration_changed(hdev, sk);
3489 err = mgmt_cmd_complete(sk, hdev->id,
3490 MGMT_OP_SET_PHY_CONFIGURATION,
3496 cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data,
3503 hci_req_init(&req, hdev);
3505 memset(&cp_phy, 0, sizeof(cp_phy));
3507 if (!(selected_phys & MGMT_PHY_LE_TX_MASK))
3508 cp_phy.all_phys |= 0x01;
3510 if (!(selected_phys & MGMT_PHY_LE_RX_MASK))
3511 cp_phy.all_phys |= 0x02;
3513 if (selected_phys & MGMT_PHY_LE_1M_TX)
3514 cp_phy.tx_phys |= HCI_LE_SET_PHY_1M;
3516 if (selected_phys & MGMT_PHY_LE_2M_TX)
3517 cp_phy.tx_phys |= HCI_LE_SET_PHY_2M;
3519 if (selected_phys & MGMT_PHY_LE_CODED_TX)
3520 cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED;
3522 if (selected_phys & MGMT_PHY_LE_1M_RX)
3523 cp_phy.rx_phys |= HCI_LE_SET_PHY_1M;
3525 if (selected_phys & MGMT_PHY_LE_2M_RX)
3526 cp_phy.rx_phys |= HCI_LE_SET_PHY_2M;
3528 if (selected_phys & MGMT_PHY_LE_CODED_RX)
3529 cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED;
3531 hci_req_add(&req, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp_phy), &cp_phy);
3533 err = hci_req_run_skb(&req, set_default_phy_complete);
3535 mgmt_pending_remove(cmd);
3538 hci_dev_unlock(hdev);
3543 static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
3546 int err = MGMT_STATUS_SUCCESS;
3547 struct mgmt_cp_set_blocked_keys *keys = data;
3548 const u16 max_key_count = ((U16_MAX - sizeof(*keys)) /
3549 sizeof(struct mgmt_blocked_key_info));
3550 u16 key_count, expected_len;
3553 BT_DBG("request for %s", hdev->name);
3555 key_count = __le16_to_cpu(keys->key_count);
3556 if (key_count > max_key_count) {
3557 bt_dev_err(hdev, "too big key_count value %u", key_count);
3558 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3559 MGMT_STATUS_INVALID_PARAMS);
3562 expected_len = struct_size(keys, keys, key_count);
3563 if (expected_len != len) {
3564 bt_dev_err(hdev, "expected %u bytes, got %u bytes",
3566 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3567 MGMT_STATUS_INVALID_PARAMS);
3572 hci_blocked_keys_clear(hdev);
3574 for (i = 0; i < keys->key_count; ++i) {
3575 struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL);
3578 err = MGMT_STATUS_NO_RESOURCES;
3582 b->type = keys->keys[i].type;
3583 memcpy(b->val, keys->keys[i].val, sizeof(b->val));
3584 list_add_rcu(&b->list, &hdev->blocked_keys);
3586 hci_dev_unlock(hdev);
3588 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS,
3592 static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
3593 u16 opcode, struct sk_buff *skb)
3595 struct mgmt_rp_read_local_oob_data mgmt_rp;
3596 size_t rp_size = sizeof(mgmt_rp);
3597 struct mgmt_pending_cmd *cmd;
3599 BT_DBG("%s status %u", hdev->name, status);
3601 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev);
3605 if (status || !skb) {
3606 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3607 status ? mgmt_status(status) : MGMT_STATUS_FAILED);
3611 memset(&mgmt_rp, 0, sizeof(mgmt_rp));
3613 if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
3614 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
3616 if (skb->len < sizeof(*rp)) {
3617 mgmt_cmd_status(cmd->sk, hdev->id,
3618 MGMT_OP_READ_LOCAL_OOB_DATA,
3619 MGMT_STATUS_FAILED);
3623 memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash));
3624 memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand));
3626 rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256);
3628 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
3630 if (skb->len < sizeof(*rp)) {
3631 mgmt_cmd_status(cmd->sk, hdev->id,
3632 MGMT_OP_READ_LOCAL_OOB_DATA,
3633 MGMT_STATUS_FAILED);
3637 memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192));
3638 memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192));
3640 memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256));
3641 memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256));
3644 mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3645 MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size);
3648 mgmt_pending_remove(cmd);
3651 static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev,
3652 void *data, u16 data_len)
3654 struct mgmt_pending_cmd *cmd;
3655 struct hci_request req;
3658 BT_DBG("%s", hdev->name);
3662 if (!hdev_is_powered(hdev)) {
3663 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3664 MGMT_STATUS_NOT_POWERED);
3668 if (!lmp_ssp_capable(hdev)) {
3669 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3670 MGMT_STATUS_NOT_SUPPORTED);
3674 if (pending_find(MGMT_OP_READ_LOCAL_OOB_DATA, hdev)) {
3675 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA,
3680 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0);
3686 hci_req_init(&req, hdev);
3688 if (bredr_sc_enabled(hdev))
3689 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
3691 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
3693 err = hci_req_run_skb(&req, read_local_oob_data_complete);
3695 mgmt_pending_remove(cmd);
3698 hci_dev_unlock(hdev);
3702 static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3703 void *data, u16 len)
3705 struct mgmt_addr_info *addr = data;
3708 BT_DBG("%s ", hdev->name);
3710 if (!bdaddr_type_is_valid(addr->type))
3711 return mgmt_cmd_complete(sk, hdev->id,
3712 MGMT_OP_ADD_REMOTE_OOB_DATA,
3713 MGMT_STATUS_INVALID_PARAMS,
3714 addr, sizeof(*addr));
3718 if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) {
3719 struct mgmt_cp_add_remote_oob_data *cp = data;
3722 if (cp->addr.type != BDADDR_BREDR) {
3723 err = mgmt_cmd_complete(sk, hdev->id,
3724 MGMT_OP_ADD_REMOTE_OOB_DATA,
3725 MGMT_STATUS_INVALID_PARAMS,
3726 &cp->addr, sizeof(cp->addr));
3730 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3731 cp->addr.type, cp->hash,
3732 cp->rand, NULL, NULL);
3734 status = MGMT_STATUS_FAILED;
3736 status = MGMT_STATUS_SUCCESS;
3738 err = mgmt_cmd_complete(sk, hdev->id,
3739 MGMT_OP_ADD_REMOTE_OOB_DATA, status,
3740 &cp->addr, sizeof(cp->addr));
3741 } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) {
3742 struct mgmt_cp_add_remote_oob_ext_data *cp = data;
3743 u8 *rand192, *hash192, *rand256, *hash256;
3746 if (bdaddr_type_is_le(cp->addr.type)) {
3747 /* Enforce zero-valued 192-bit parameters as
3748 * long as legacy SMP OOB isn't implemented.
3750 if (memcmp(cp->rand192, ZERO_KEY, 16) ||
3751 memcmp(cp->hash192, ZERO_KEY, 16)) {
3752 err = mgmt_cmd_complete(sk, hdev->id,
3753 MGMT_OP_ADD_REMOTE_OOB_DATA,
3754 MGMT_STATUS_INVALID_PARAMS,
3755 addr, sizeof(*addr));
3762 /* In case one of the P-192 values is set to zero,
3763 * then just disable OOB data for P-192.
3765 if (!memcmp(cp->rand192, ZERO_KEY, 16) ||
3766 !memcmp(cp->hash192, ZERO_KEY, 16)) {
3770 rand192 = cp->rand192;
3771 hash192 = cp->hash192;
3775 /* In case one of the P-256 values is set to zero, then just
3776 * disable OOB data for P-256.
3778 if (!memcmp(cp->rand256, ZERO_KEY, 16) ||
3779 !memcmp(cp->hash256, ZERO_KEY, 16)) {
3783 rand256 = cp->rand256;
3784 hash256 = cp->hash256;
3787 err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr,
3788 cp->addr.type, hash192, rand192,
3791 status = MGMT_STATUS_FAILED;
3793 status = MGMT_STATUS_SUCCESS;
3795 err = mgmt_cmd_complete(sk, hdev->id,
3796 MGMT_OP_ADD_REMOTE_OOB_DATA,
3797 status, &cp->addr, sizeof(cp->addr));
3799 bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes",
3801 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA,
3802 MGMT_STATUS_INVALID_PARAMS);
3806 hci_dev_unlock(hdev);
3810 static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev,
3811 void *data, u16 len)
3813 struct mgmt_cp_remove_remote_oob_data *cp = data;
3817 BT_DBG("%s", hdev->name);
3819 if (cp->addr.type != BDADDR_BREDR)
3820 return mgmt_cmd_complete(sk, hdev->id,
3821 MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3822 MGMT_STATUS_INVALID_PARAMS,
3823 &cp->addr, sizeof(cp->addr));
3827 if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
3828 hci_remote_oob_data_clear(hdev);
3829 status = MGMT_STATUS_SUCCESS;
3833 err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type);
3835 status = MGMT_STATUS_INVALID_PARAMS;
3837 status = MGMT_STATUS_SUCCESS;
3840 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA,
3841 status, &cp->addr, sizeof(cp->addr));
3843 hci_dev_unlock(hdev);
3847 void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
3849 struct mgmt_pending_cmd *cmd;
3851 BT_DBG("status %d", status);
3855 cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev);
3857 cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev);
3860 cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev);
3863 cmd->cmd_complete(cmd, mgmt_status(status));
3864 mgmt_pending_remove(cmd);
3867 hci_dev_unlock(hdev);
3870 static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
3871 uint8_t *mgmt_status)
3874 case DISCOV_TYPE_LE:
3875 *mgmt_status = mgmt_le_support(hdev);
3879 case DISCOV_TYPE_INTERLEAVED:
3880 *mgmt_status = mgmt_le_support(hdev);
3883 /* Intentional fall-through */
3884 case DISCOV_TYPE_BREDR:
3885 *mgmt_status = mgmt_bredr_support(hdev);
3890 *mgmt_status = MGMT_STATUS_INVALID_PARAMS;
3897 static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
3898 u16 op, void *data, u16 len)
3900 struct mgmt_cp_start_discovery *cp = data;
3901 struct mgmt_pending_cmd *cmd;
3905 BT_DBG("%s", hdev->name);
3909 if (!hdev_is_powered(hdev)) {
3910 err = mgmt_cmd_complete(sk, hdev->id, op,
3911 MGMT_STATUS_NOT_POWERED,
3912 &cp->type, sizeof(cp->type));
3916 if (hdev->discovery.state != DISCOVERY_STOPPED ||
3917 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
3918 err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
3919 &cp->type, sizeof(cp->type));
3923 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
3924 err = mgmt_cmd_complete(sk, hdev->id, op, status,
3925 &cp->type, sizeof(cp->type));
3929 /* Clear the discovery filter first to free any previously
3930 * allocated memory for the UUID list.
3932 hci_discovery_filter_clear(hdev);
3934 hdev->discovery.type = cp->type;
3935 hdev->discovery.report_invalid_rssi = false;
3936 if (op == MGMT_OP_START_LIMITED_DISCOVERY)
3937 hdev->discovery.limited = true;
3939 hdev->discovery.limited = false;
3941 cmd = mgmt_pending_add(sk, op, hdev, data, len);
3947 cmd->cmd_complete = generic_cmd_complete;
3949 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
3950 queue_work(hdev->req_workqueue, &hdev->discov_update);
3954 hci_dev_unlock(hdev);
3958 static int start_discovery(struct sock *sk, struct hci_dev *hdev,
3959 void *data, u16 len)
3961 return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY,
3965 static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev,
3966 void *data, u16 len)
3968 return start_discovery_internal(sk, hdev,
3969 MGMT_OP_START_LIMITED_DISCOVERY,
3973 static int service_discovery_cmd_complete(struct mgmt_pending_cmd *cmd,
3976 return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status,
3980 static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
3981 void *data, u16 len)
3983 struct mgmt_cp_start_service_discovery *cp = data;
3984 struct mgmt_pending_cmd *cmd;
3985 const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16);
3986 u16 uuid_count, expected_len;
3990 BT_DBG("%s", hdev->name);
3994 if (!hdev_is_powered(hdev)) {
3995 err = mgmt_cmd_complete(sk, hdev->id,
3996 MGMT_OP_START_SERVICE_DISCOVERY,
3997 MGMT_STATUS_NOT_POWERED,
3998 &cp->type, sizeof(cp->type));
4002 if (hdev->discovery.state != DISCOVERY_STOPPED ||
4003 hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) {
4004 err = mgmt_cmd_complete(sk, hdev->id,
4005 MGMT_OP_START_SERVICE_DISCOVERY,
4006 MGMT_STATUS_BUSY, &cp->type,
4011 uuid_count = __le16_to_cpu(cp->uuid_count);
4012 if (uuid_count > max_uuid_count) {
4013 bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
4015 err = mgmt_cmd_complete(sk, hdev->id,
4016 MGMT_OP_START_SERVICE_DISCOVERY,
4017 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4022 expected_len = sizeof(*cp) + uuid_count * 16;
4023 if (expected_len != len) {
4024 bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes",
4026 err = mgmt_cmd_complete(sk, hdev->id,
4027 MGMT_OP_START_SERVICE_DISCOVERY,
4028 MGMT_STATUS_INVALID_PARAMS, &cp->type,
4033 if (!discovery_type_is_valid(hdev, cp->type, &status)) {
4034 err = mgmt_cmd_complete(sk, hdev->id,
4035 MGMT_OP_START_SERVICE_DISCOVERY,
4036 status, &cp->type, sizeof(cp->type));
4040 cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY,
4047 cmd->cmd_complete = service_discovery_cmd_complete;
4049 /* Clear the discovery filter first to free any previously
4050 * allocated memory for the UUID list.
4052 hci_discovery_filter_clear(hdev);
4054 hdev->discovery.result_filtering = true;
4055 hdev->discovery.type = cp->type;
4056 hdev->discovery.rssi = cp->rssi;
4057 hdev->discovery.uuid_count = uuid_count;
4059 if (uuid_count > 0) {
4060 hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16,
4062 if (!hdev->discovery.uuids) {
4063 err = mgmt_cmd_complete(sk, hdev->id,
4064 MGMT_OP_START_SERVICE_DISCOVERY,
4066 &cp->type, sizeof(cp->type));
4067 mgmt_pending_remove(cmd);
4072 hci_discovery_set_state(hdev, DISCOVERY_STARTING);
4073 queue_work(hdev->req_workqueue, &hdev->discov_update);
4077 hci_dev_unlock(hdev);
4081 void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
4083 struct mgmt_pending_cmd *cmd;
4085 BT_DBG("status %d", status);
4089 cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev);
4091 cmd->cmd_complete(cmd, mgmt_status(status));
4092 mgmt_pending_remove(cmd);
4095 hci_dev_unlock(hdev);
4098 static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
4101 struct mgmt_cp_stop_discovery *mgmt_cp = data;
4102 struct mgmt_pending_cmd *cmd;
4105 BT_DBG("%s", hdev->name);
4109 if (!hci_discovery_active(hdev)) {
4110 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4111 MGMT_STATUS_REJECTED, &mgmt_cp->type,
4112 sizeof(mgmt_cp->type));
4116 if (hdev->discovery.type != mgmt_cp->type) {
4117 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
4118 MGMT_STATUS_INVALID_PARAMS,
4119 &mgmt_cp->type, sizeof(mgmt_cp->type));
4123 cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len);
4129 cmd->cmd_complete = generic_cmd_complete;
4131 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
4132 queue_work(hdev->req_workqueue, &hdev->discov_update);
4136 hci_dev_unlock(hdev);
4140 static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data,
4143 struct mgmt_cp_confirm_name *cp = data;
4144 struct inquiry_entry *e;
4147 BT_DBG("%s", hdev->name);
4151 if (!hci_discovery_active(hdev)) {
4152 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4153 MGMT_STATUS_FAILED, &cp->addr,
4158 e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr);
4160 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME,
4161 MGMT_STATUS_INVALID_PARAMS, &cp->addr,
4166 if (cp->name_known) {
4167 e->name_state = NAME_KNOWN;
4170 e->name_state = NAME_NEEDED;
4171 hci_inquiry_cache_update_resolve(hdev, e);
4174 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0,
4175 &cp->addr, sizeof(cp->addr));
4178 hci_dev_unlock(hdev);
4182 static int block_device(struct sock *sk, struct hci_dev *hdev, void *data,
4185 struct mgmt_cp_block_device *cp = data;
4189 BT_DBG("%s", hdev->name);
4191 if (!bdaddr_type_is_valid(cp->addr.type))
4192 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE,
4193 MGMT_STATUS_INVALID_PARAMS,
4194 &cp->addr, sizeof(cp->addr));
4198 err = hci_bdaddr_list_add(&hdev->blacklist, &cp->addr.bdaddr,
4201 status = MGMT_STATUS_FAILED;
4205 mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4207 status = MGMT_STATUS_SUCCESS;
4210 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status,
4211 &cp->addr, sizeof(cp->addr));
4213 hci_dev_unlock(hdev);
4218 static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data,
4221 struct mgmt_cp_unblock_device *cp = data;
4225 BT_DBG("%s", hdev->name);
4227 if (!bdaddr_type_is_valid(cp->addr.type))
4228 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE,
4229 MGMT_STATUS_INVALID_PARAMS,
4230 &cp->addr, sizeof(cp->addr));
4234 err = hci_bdaddr_list_del(&hdev->blacklist, &cp->addr.bdaddr,
4237 status = MGMT_STATUS_INVALID_PARAMS;
4241 mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr),
4243 status = MGMT_STATUS_SUCCESS;
4246 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status,
4247 &cp->addr, sizeof(cp->addr));
4249 hci_dev_unlock(hdev);
4254 static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data,
4257 struct mgmt_cp_set_device_id *cp = data;
4258 struct hci_request req;
4262 BT_DBG("%s", hdev->name);
4264 source = __le16_to_cpu(cp->source);
4266 if (source > 0x0002)
4267 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID,
4268 MGMT_STATUS_INVALID_PARAMS);
4272 hdev->devid_source = source;
4273 hdev->devid_vendor = __le16_to_cpu(cp->vendor);
4274 hdev->devid_product = __le16_to_cpu(cp->product);
4275 hdev->devid_version = __le16_to_cpu(cp->version);
4277 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0,
4280 hci_req_init(&req, hdev);
4281 __hci_req_update_eir(&req);
4282 hci_req_run(&req, NULL);
4284 hci_dev_unlock(hdev);
4289 static void enable_advertising_instance(struct hci_dev *hdev, u8 status,
4292 BT_DBG("status %d", status);
4295 static void set_advertising_complete(struct hci_dev *hdev, u8 status,
4298 struct cmd_lookup match = { NULL, hdev };
4299 struct hci_request req;
4301 struct adv_info *adv_instance;
4307 u8 mgmt_err = mgmt_status(status);
4309 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev,
4310 cmd_status_rsp, &mgmt_err);
4314 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
4315 hci_dev_set_flag(hdev, HCI_ADVERTISING);
4317 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
4319 mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp,
4322 new_settings(hdev, match.sk);
4327 /* If "Set Advertising" was just disabled and instance advertising was
4328 * set up earlier, then re-enable multi-instance advertising.
4330 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
4331 list_empty(&hdev->adv_instances))
4334 instance = hdev->cur_adv_instance;
4336 adv_instance = list_first_entry_or_null(&hdev->adv_instances,
4337 struct adv_info, list);
4341 instance = adv_instance->instance;
4344 hci_req_init(&req, hdev);
4346 err = __hci_req_schedule_adv_instance(&req, instance, true);
4349 err = hci_req_run(&req, enable_advertising_instance);
4352 bt_dev_err(hdev, "failed to re-configure advertising");
4355 hci_dev_unlock(hdev);
4358 static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
4361 struct mgmt_mode *cp = data;
4362 struct mgmt_pending_cmd *cmd;
4363 struct hci_request req;
4367 BT_DBG("request for %s", hdev->name);
4369 status = mgmt_le_support(hdev);
4371 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4374 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4375 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4376 MGMT_STATUS_INVALID_PARAMS);
4382 /* The following conditions are ones which mean that we should
4383 * not do any HCI communication but directly send a mgmt
4384 * response to user space (after toggling the flag if
4387 if (!hdev_is_powered(hdev) ||
4388 (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
4389 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) ||
4390 hci_conn_num(hdev, LE_LINK) > 0 ||
4391 (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4392 hdev->le_scan_type == LE_SCAN_ACTIVE)) {
4396 hdev->cur_adv_instance = 0x00;
4397 changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING);
4398 if (cp->val == 0x02)
4399 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4401 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4403 changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING);
4404 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4407 err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev);
4412 err = new_settings(hdev, sk);
4417 if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) ||
4418 pending_find(MGMT_OP_SET_LE, hdev)) {
4419 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
4424 cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len);
4430 hci_req_init(&req, hdev);
4432 if (cp->val == 0x02)
4433 hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4435 hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE);
4437 cancel_adv_timeout(hdev);
4440 /* Switch to instance "0" for the Set Advertising setting.
4441 * We cannot use update_[adv|scan_rsp]_data() here as the
4442 * HCI_ADVERTISING flag is not yet set.
4444 hdev->cur_adv_instance = 0x00;
4446 if (ext_adv_capable(hdev)) {
4447 __hci_req_start_ext_adv(&req, 0x00);
4449 __hci_req_update_adv_data(&req, 0x00);
4450 __hci_req_update_scan_rsp_data(&req, 0x00);
4451 __hci_req_enable_advertising(&req);
4454 __hci_req_disable_advertising(&req);
4457 err = hci_req_run(&req, set_advertising_complete);
4459 mgmt_pending_remove(cmd);
4462 hci_dev_unlock(hdev);
4466 static int set_static_address(struct sock *sk, struct hci_dev *hdev,
4467 void *data, u16 len)
4469 struct mgmt_cp_set_static_address *cp = data;
4472 BT_DBG("%s", hdev->name);
4474 if (!lmp_le_capable(hdev))
4475 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4476 MGMT_STATUS_NOT_SUPPORTED);
4478 if (hdev_is_powered(hdev))
4479 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS,
4480 MGMT_STATUS_REJECTED);
4482 if (bacmp(&cp->bdaddr, BDADDR_ANY)) {
4483 if (!bacmp(&cp->bdaddr, BDADDR_NONE))
4484 return mgmt_cmd_status(sk, hdev->id,
4485 MGMT_OP_SET_STATIC_ADDRESS,
4486 MGMT_STATUS_INVALID_PARAMS);
4488 /* Two most significant bits shall be set */
4489 if ((cp->bdaddr.b[5] & 0xc0) != 0xc0)
4490 return mgmt_cmd_status(sk, hdev->id,
4491 MGMT_OP_SET_STATIC_ADDRESS,
4492 MGMT_STATUS_INVALID_PARAMS);
4497 bacpy(&hdev->static_addr, &cp->bdaddr);
4499 err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev);
4503 err = new_settings(hdev, sk);
4506 hci_dev_unlock(hdev);
4510 static int set_scan_params(struct sock *sk, struct hci_dev *hdev,
4511 void *data, u16 len)
4513 struct mgmt_cp_set_scan_params *cp = data;
4514 __u16 interval, window;
4517 BT_DBG("%s", hdev->name);
4519 if (!lmp_le_capable(hdev))
4520 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4521 MGMT_STATUS_NOT_SUPPORTED);
4523 interval = __le16_to_cpu(cp->interval);
4525 if (interval < 0x0004 || interval > 0x4000)
4526 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4527 MGMT_STATUS_INVALID_PARAMS);
4529 window = __le16_to_cpu(cp->window);
4531 if (window < 0x0004 || window > 0x4000)
4532 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4533 MGMT_STATUS_INVALID_PARAMS);
4535 if (window > interval)
4536 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS,
4537 MGMT_STATUS_INVALID_PARAMS);
4541 hdev->le_scan_interval = interval;
4542 hdev->le_scan_window = window;
4544 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0,
4547 /* If background scan is running, restart it so new parameters are
4550 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
4551 hdev->discovery.state == DISCOVERY_STOPPED) {
4552 struct hci_request req;
4554 hci_req_init(&req, hdev);
4556 hci_req_add_le_scan_disable(&req);
4557 hci_req_add_le_passive_scan(&req);
4559 hci_req_run(&req, NULL);
4562 hci_dev_unlock(hdev);
4567 static void fast_connectable_complete(struct hci_dev *hdev, u8 status,
4570 struct mgmt_pending_cmd *cmd;
4572 BT_DBG("status 0x%02x", status);
4576 cmd = pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4581 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4582 mgmt_status(status));
4584 struct mgmt_mode *cp = cmd->param;
4587 hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE);
4589 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4591 send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev);
4592 new_settings(hdev, cmd->sk);
4595 mgmt_pending_remove(cmd);
4598 hci_dev_unlock(hdev);
4601 static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev,
4602 void *data, u16 len)
4604 struct mgmt_mode *cp = data;
4605 struct mgmt_pending_cmd *cmd;
4606 struct hci_request req;
4609 BT_DBG("%s", hdev->name);
4611 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) ||
4612 hdev->hci_ver < BLUETOOTH_VER_1_2)
4613 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4614 MGMT_STATUS_NOT_SUPPORTED);
4616 if (cp->val != 0x00 && cp->val != 0x01)
4617 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4618 MGMT_STATUS_INVALID_PARAMS);
4622 if (pending_find(MGMT_OP_SET_FAST_CONNECTABLE, hdev)) {
4623 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4628 if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) {
4629 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4634 if (!hdev_is_powered(hdev)) {
4635 hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE);
4636 err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE,
4638 new_settings(hdev, sk);
4642 cmd = mgmt_pending_add(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev,
4649 hci_req_init(&req, hdev);
4651 __hci_req_write_fast_connectable(&req, cp->val);
4653 err = hci_req_run(&req, fast_connectable_complete);
4655 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE,
4656 MGMT_STATUS_FAILED);
4657 mgmt_pending_remove(cmd);
4661 hci_dev_unlock(hdev);
4666 static void set_bredr_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4668 struct mgmt_pending_cmd *cmd;
4670 BT_DBG("status 0x%02x", status);
4674 cmd = pending_find(MGMT_OP_SET_BREDR, hdev);
4679 u8 mgmt_err = mgmt_status(status);
4681 /* We need to restore the flag if related HCI commands
4684 hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED);
4686 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err);
4688 send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev);
4689 new_settings(hdev, cmd->sk);
4692 mgmt_pending_remove(cmd);
4695 hci_dev_unlock(hdev);
4698 static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)
4700 struct mgmt_mode *cp = data;
4701 struct mgmt_pending_cmd *cmd;
4702 struct hci_request req;
4705 BT_DBG("request for %s", hdev->name);
4707 if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev))
4708 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4709 MGMT_STATUS_NOT_SUPPORTED);
4711 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4712 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4713 MGMT_STATUS_REJECTED);
4715 if (cp->val != 0x00 && cp->val != 0x01)
4716 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4717 MGMT_STATUS_INVALID_PARAMS);
4721 if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4722 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4726 if (!hdev_is_powered(hdev)) {
4728 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
4729 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
4730 hci_dev_clear_flag(hdev, HCI_LINK_SECURITY);
4731 hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE);
4732 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
4735 hci_dev_change_flag(hdev, HCI_BREDR_ENABLED);
4737 err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev);
4741 err = new_settings(hdev, sk);
4745 /* Reject disabling when powered on */
4747 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4748 MGMT_STATUS_REJECTED);
4751 /* When configuring a dual-mode controller to operate
4752 * with LE only and using a static address, then switching
4753 * BR/EDR back on is not allowed.
4755 * Dual-mode controllers shall operate with the public
4756 * address as its identity address for BR/EDR and LE. So
4757 * reject the attempt to create an invalid configuration.
4759 * The same restrictions applies when secure connections
4760 * has been enabled. For BR/EDR this is a controller feature
4761 * while for LE it is a host stack feature. This means that
4762 * switching BR/EDR back on when secure connections has been
4763 * enabled is not a supported transaction.
4765 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4766 (bacmp(&hdev->static_addr, BDADDR_ANY) ||
4767 hci_dev_test_flag(hdev, HCI_SC_ENABLED))) {
4768 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4769 MGMT_STATUS_REJECTED);
4774 if (pending_find(MGMT_OP_SET_BREDR, hdev)) {
4775 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR,
4780 cmd = mgmt_pending_add(sk, MGMT_OP_SET_BREDR, hdev, data, len);
4786 /* We need to flip the bit already here so that
4787 * hci_req_update_adv_data generates the correct flags.
4789 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
4791 hci_req_init(&req, hdev);
4793 __hci_req_write_fast_connectable(&req, false);
4794 __hci_req_update_scan(&req);
4796 /* Since only the advertising data flags will change, there
4797 * is no need to update the scan response data.
4799 __hci_req_update_adv_data(&req, hdev->cur_adv_instance);
4801 err = hci_req_run(&req, set_bredr_complete);
4803 mgmt_pending_remove(cmd);
4806 hci_dev_unlock(hdev);
4810 static void sc_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
4812 struct mgmt_pending_cmd *cmd;
4813 struct mgmt_mode *cp;
4815 BT_DBG("%s status %u", hdev->name, status);
4819 cmd = pending_find(MGMT_OP_SET_SECURE_CONN, hdev);
4824 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
4825 mgmt_status(status));
4833 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
4834 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4837 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4838 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4841 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
4842 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4846 send_settings_rsp(cmd->sk, MGMT_OP_SET_SECURE_CONN, hdev);
4847 new_settings(hdev, cmd->sk);
4850 mgmt_pending_remove(cmd);
4852 hci_dev_unlock(hdev);
4855 static int set_secure_conn(struct sock *sk, struct hci_dev *hdev,
4856 void *data, u16 len)
4858 struct mgmt_mode *cp = data;
4859 struct mgmt_pending_cmd *cmd;
4860 struct hci_request req;
4864 BT_DBG("request for %s", hdev->name);
4866 if (!lmp_sc_capable(hdev) &&
4867 !hci_dev_test_flag(hdev, HCI_LE_ENABLED))
4868 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4869 MGMT_STATUS_NOT_SUPPORTED);
4871 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
4872 lmp_sc_capable(hdev) &&
4873 !hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
4874 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4875 MGMT_STATUS_REJECTED);
4877 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4878 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4879 MGMT_STATUS_INVALID_PARAMS);
4883 if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) ||
4884 !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
4888 changed = !hci_dev_test_and_set_flag(hdev,
4890 if (cp->val == 0x02)
4891 hci_dev_set_flag(hdev, HCI_SC_ONLY);
4893 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4895 changed = hci_dev_test_and_clear_flag(hdev,
4897 hci_dev_clear_flag(hdev, HCI_SC_ONLY);
4900 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4905 err = new_settings(hdev, sk);
4910 if (pending_find(MGMT_OP_SET_SECURE_CONN, hdev)) {
4911 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN,
4918 if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
4919 (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4920 err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev);
4924 cmd = mgmt_pending_add(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len);
4930 hci_req_init(&req, hdev);
4931 hci_req_add(&req, HCI_OP_WRITE_SC_SUPPORT, 1, &val);
4932 err = hci_req_run(&req, sc_enable_complete);
4934 mgmt_pending_remove(cmd);
4939 hci_dev_unlock(hdev);
4943 static int set_debug_keys(struct sock *sk, struct hci_dev *hdev,
4944 void *data, u16 len)
4946 struct mgmt_mode *cp = data;
4947 bool changed, use_changed;
4950 BT_DBG("request for %s", hdev->name);
4952 if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02)
4953 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS,
4954 MGMT_STATUS_INVALID_PARAMS);
4959 changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS);
4961 changed = hci_dev_test_and_clear_flag(hdev,
4962 HCI_KEEP_DEBUG_KEYS);
4964 if (cp->val == 0x02)
4965 use_changed = !hci_dev_test_and_set_flag(hdev,
4966 HCI_USE_DEBUG_KEYS);
4968 use_changed = hci_dev_test_and_clear_flag(hdev,
4969 HCI_USE_DEBUG_KEYS);
4971 if (hdev_is_powered(hdev) && use_changed &&
4972 hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
4973 u8 mode = (cp->val == 0x02) ? 0x01 : 0x00;
4974 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE,
4975 sizeof(mode), &mode);
4978 err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev);
4983 err = new_settings(hdev, sk);
4986 hci_dev_unlock(hdev);
4990 static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data,
4993 struct mgmt_cp_set_privacy *cp = cp_data;
4997 BT_DBG("request for %s", hdev->name);
4999 if (!lmp_le_capable(hdev))
5000 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5001 MGMT_STATUS_NOT_SUPPORTED);
5003 if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02)
5004 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5005 MGMT_STATUS_INVALID_PARAMS);
5007 if (hdev_is_powered(hdev))
5008 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY,
5009 MGMT_STATUS_REJECTED);
5013 /* If user space supports this command it is also expected to
5014 * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag.
5016 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5019 changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY);
5020 memcpy(hdev->irk, cp->irk, sizeof(hdev->irk));
5021 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
5022 hci_adv_instances_set_rpa_expired(hdev, true);
5023 if (cp->privacy == 0x02)
5024 hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY);
5026 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5028 changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY);
5029 memset(hdev->irk, 0, sizeof(hdev->irk));
5030 hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED);
5031 hci_adv_instances_set_rpa_expired(hdev, false);
5032 hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY);
5035 err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev);
5040 err = new_settings(hdev, sk);
5043 hci_dev_unlock(hdev);
5047 static bool irk_is_valid(struct mgmt_irk_info *irk)
5049 switch (irk->addr.type) {
5050 case BDADDR_LE_PUBLIC:
5053 case BDADDR_LE_RANDOM:
5054 /* Two most significant bits shall be set */
5055 if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5063 static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
5066 struct mgmt_cp_load_irks *cp = cp_data;
5067 const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) /
5068 sizeof(struct mgmt_irk_info));
5069 u16 irk_count, expected_len;
5072 BT_DBG("request for %s", hdev->name);
5074 if (!lmp_le_capable(hdev))
5075 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5076 MGMT_STATUS_NOT_SUPPORTED);
5078 irk_count = __le16_to_cpu(cp->irk_count);
5079 if (irk_count > max_irk_count) {
5080 bt_dev_err(hdev, "load_irks: too big irk_count value %u",
5082 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5083 MGMT_STATUS_INVALID_PARAMS);
5086 expected_len = struct_size(cp, irks, irk_count);
5087 if (expected_len != len) {
5088 bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes",
5090 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS,
5091 MGMT_STATUS_INVALID_PARAMS);
5094 BT_DBG("%s irk_count %u", hdev->name, irk_count);
5096 for (i = 0; i < irk_count; i++) {
5097 struct mgmt_irk_info *key = &cp->irks[i];
5099 if (!irk_is_valid(key))
5100 return mgmt_cmd_status(sk, hdev->id,
5102 MGMT_STATUS_INVALID_PARAMS);
5107 hci_smp_irks_clear(hdev);
5109 for (i = 0; i < irk_count; i++) {
5110 struct mgmt_irk_info *irk = &cp->irks[i];
5112 if (hci_is_blocked_key(hdev,
5113 HCI_BLOCKED_KEY_TYPE_IRK,
5115 bt_dev_warn(hdev, "Skipping blocked IRK for %pMR",
5120 hci_add_irk(hdev, &irk->addr.bdaddr,
5121 le_addr_type(irk->addr.type), irk->val,
5125 hci_dev_set_flag(hdev, HCI_RPA_RESOLVING);
5127 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0);
5129 hci_dev_unlock(hdev);
5134 static bool ltk_is_valid(struct mgmt_ltk_info *key)
5136 if (key->master != 0x00 && key->master != 0x01)
5139 switch (key->addr.type) {
5140 case BDADDR_LE_PUBLIC:
5143 case BDADDR_LE_RANDOM:
5144 /* Two most significant bits shall be set */
5145 if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0)
5153 static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
5154 void *cp_data, u16 len)
5156 struct mgmt_cp_load_long_term_keys *cp = cp_data;
5157 const u16 max_key_count = ((U16_MAX - sizeof(*cp)) /
5158 sizeof(struct mgmt_ltk_info));
5159 u16 key_count, expected_len;
5162 BT_DBG("request for %s", hdev->name);
5164 if (!lmp_le_capable(hdev))
5165 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5166 MGMT_STATUS_NOT_SUPPORTED);
5168 key_count = __le16_to_cpu(cp->key_count);
5169 if (key_count > max_key_count) {
5170 bt_dev_err(hdev, "load_ltks: too big key_count value %u",
5172 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5173 MGMT_STATUS_INVALID_PARAMS);
5176 expected_len = struct_size(cp, keys, key_count);
5177 if (expected_len != len) {
5178 bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes",
5180 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS,
5181 MGMT_STATUS_INVALID_PARAMS);
5184 BT_DBG("%s key_count %u", hdev->name, key_count);
5186 for (i = 0; i < key_count; i++) {
5187 struct mgmt_ltk_info *key = &cp->keys[i];
5189 if (!ltk_is_valid(key))
5190 return mgmt_cmd_status(sk, hdev->id,
5191 MGMT_OP_LOAD_LONG_TERM_KEYS,
5192 MGMT_STATUS_INVALID_PARAMS);
5197 hci_smp_ltks_clear(hdev);
5199 for (i = 0; i < key_count; i++) {
5200 struct mgmt_ltk_info *key = &cp->keys[i];
5201 u8 type, authenticated;
5203 if (hci_is_blocked_key(hdev,
5204 HCI_BLOCKED_KEY_TYPE_LTK,
5206 bt_dev_warn(hdev, "Skipping blocked LTK for %pMR",
5211 switch (key->type) {
5212 case MGMT_LTK_UNAUTHENTICATED:
5213 authenticated = 0x00;
5214 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5216 case MGMT_LTK_AUTHENTICATED:
5217 authenticated = 0x01;
5218 type = key->master ? SMP_LTK : SMP_LTK_SLAVE;
5220 case MGMT_LTK_P256_UNAUTH:
5221 authenticated = 0x00;
5222 type = SMP_LTK_P256;
5224 case MGMT_LTK_P256_AUTH:
5225 authenticated = 0x01;
5226 type = SMP_LTK_P256;
5228 case MGMT_LTK_P256_DEBUG:
5229 authenticated = 0x00;
5230 type = SMP_LTK_P256_DEBUG;
5236 hci_add_ltk(hdev, &key->addr.bdaddr,
5237 le_addr_type(key->addr.type), type, authenticated,
5238 key->val, key->enc_size, key->ediv, key->rand);
5241 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
5244 hci_dev_unlock(hdev);
5249 static int conn_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5251 struct hci_conn *conn = cmd->user_data;
5252 struct mgmt_rp_get_conn_info rp;
5255 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5257 if (status == MGMT_STATUS_SUCCESS) {
5258 rp.rssi = conn->rssi;
5259 rp.tx_power = conn->tx_power;
5260 rp.max_tx_power = conn->max_tx_power;
5262 rp.rssi = HCI_RSSI_INVALID;
5263 rp.tx_power = HCI_TX_POWER_INVALID;
5264 rp.max_tx_power = HCI_TX_POWER_INVALID;
5267 err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO,
5268 status, &rp, sizeof(rp));
5270 hci_conn_drop(conn);
5276 static void conn_info_refresh_complete(struct hci_dev *hdev, u8 hci_status,
5279 struct hci_cp_read_rssi *cp;
5280 struct mgmt_pending_cmd *cmd;
5281 struct hci_conn *conn;
5285 BT_DBG("status 0x%02x", hci_status);
5289 /* Commands sent in request are either Read RSSI or Read Transmit Power
5290 * Level so we check which one was last sent to retrieve connection
5291 * handle. Both commands have handle as first parameter so it's safe to
5292 * cast data on the same command struct.
5294 * First command sent is always Read RSSI and we fail only if it fails.
5295 * In other case we simply override error to indicate success as we
5296 * already remembered if TX power value is actually valid.
5298 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_RSSI);
5300 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
5301 status = MGMT_STATUS_SUCCESS;
5303 status = mgmt_status(hci_status);
5307 bt_dev_err(hdev, "invalid sent_cmd in conn_info response");
5311 handle = __le16_to_cpu(cp->handle);
5312 conn = hci_conn_hash_lookup_handle(hdev, handle);
5314 bt_dev_err(hdev, "unknown handle (%d) in conn_info response",
5319 cmd = pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn);
5323 cmd->cmd_complete(cmd, status);
5324 mgmt_pending_remove(cmd);
5327 hci_dev_unlock(hdev);
5330 static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data,
5333 struct mgmt_cp_get_conn_info *cp = data;
5334 struct mgmt_rp_get_conn_info rp;
5335 struct hci_conn *conn;
5336 unsigned long conn_info_age;
5339 BT_DBG("%s", hdev->name);
5341 memset(&rp, 0, sizeof(rp));
5342 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5343 rp.addr.type = cp->addr.type;
5345 if (!bdaddr_type_is_valid(cp->addr.type))
5346 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5347 MGMT_STATUS_INVALID_PARAMS,
5352 if (!hdev_is_powered(hdev)) {
5353 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5354 MGMT_STATUS_NOT_POWERED, &rp,
5359 if (cp->addr.type == BDADDR_BREDR)
5360 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5363 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
5365 if (!conn || conn->state != BT_CONNECTED) {
5366 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5367 MGMT_STATUS_NOT_CONNECTED, &rp,
5372 if (pending_find_data(MGMT_OP_GET_CONN_INFO, hdev, conn)) {
5373 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5374 MGMT_STATUS_BUSY, &rp, sizeof(rp));
5378 /* To avoid client trying to guess when to poll again for information we
5379 * calculate conn info age as random value between min/max set in hdev.
5381 conn_info_age = hdev->conn_info_min_age +
5382 prandom_u32_max(hdev->conn_info_max_age -
5383 hdev->conn_info_min_age);
5385 /* Query controller to refresh cached values if they are too old or were
5388 if (time_after(jiffies, conn->conn_info_timestamp +
5389 msecs_to_jiffies(conn_info_age)) ||
5390 !conn->conn_info_timestamp) {
5391 struct hci_request req;
5392 struct hci_cp_read_tx_power req_txp_cp;
5393 struct hci_cp_read_rssi req_rssi_cp;
5394 struct mgmt_pending_cmd *cmd;
5396 hci_req_init(&req, hdev);
5397 req_rssi_cp.handle = cpu_to_le16(conn->handle);
5398 hci_req_add(&req, HCI_OP_READ_RSSI, sizeof(req_rssi_cp),
5401 /* For LE links TX power does not change thus we don't need to
5402 * query for it once value is known.
5404 if (!bdaddr_type_is_le(cp->addr.type) ||
5405 conn->tx_power == HCI_TX_POWER_INVALID) {
5406 req_txp_cp.handle = cpu_to_le16(conn->handle);
5407 req_txp_cp.type = 0x00;
5408 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5409 sizeof(req_txp_cp), &req_txp_cp);
5412 /* Max TX power needs to be read only once per connection */
5413 if (conn->max_tx_power == HCI_TX_POWER_INVALID) {
5414 req_txp_cp.handle = cpu_to_le16(conn->handle);
5415 req_txp_cp.type = 0x01;
5416 hci_req_add(&req, HCI_OP_READ_TX_POWER,
5417 sizeof(req_txp_cp), &req_txp_cp);
5420 err = hci_req_run(&req, conn_info_refresh_complete);
5424 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CONN_INFO, hdev,
5431 hci_conn_hold(conn);
5432 cmd->user_data = hci_conn_get(conn);
5433 cmd->cmd_complete = conn_info_cmd_complete;
5435 conn->conn_info_timestamp = jiffies;
5437 /* Cache is valid, just reply with values cached in hci_conn */
5438 rp.rssi = conn->rssi;
5439 rp.tx_power = conn->tx_power;
5440 rp.max_tx_power = conn->max_tx_power;
5442 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO,
5443 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
5447 hci_dev_unlock(hdev);
5451 static int clock_info_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status)
5453 struct hci_conn *conn = cmd->user_data;
5454 struct mgmt_rp_get_clock_info rp;
5455 struct hci_dev *hdev;
5458 memset(&rp, 0, sizeof(rp));
5459 memcpy(&rp.addr, cmd->param, sizeof(rp.addr));
5464 hdev = hci_dev_get(cmd->index);
5466 rp.local_clock = cpu_to_le32(hdev->clock);
5471 rp.piconet_clock = cpu_to_le32(conn->clock);
5472 rp.accuracy = cpu_to_le16(conn->clock_accuracy);
5476 err = mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp,
5480 hci_conn_drop(conn);
5487 static void get_clock_info_complete(struct hci_dev *hdev, u8 status, u16 opcode)
5489 struct hci_cp_read_clock *hci_cp;
5490 struct mgmt_pending_cmd *cmd;
5491 struct hci_conn *conn;
5493 BT_DBG("%s status %u", hdev->name, status);
5497 hci_cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
5501 if (hci_cp->which) {
5502 u16 handle = __le16_to_cpu(hci_cp->handle);
5503 conn = hci_conn_hash_lookup_handle(hdev, handle);
5508 cmd = pending_find_data(MGMT_OP_GET_CLOCK_INFO, hdev, conn);
5512 cmd->cmd_complete(cmd, mgmt_status(status));
5513 mgmt_pending_remove(cmd);
5516 hci_dev_unlock(hdev);
5519 static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data,
5522 struct mgmt_cp_get_clock_info *cp = data;
5523 struct mgmt_rp_get_clock_info rp;
5524 struct hci_cp_read_clock hci_cp;
5525 struct mgmt_pending_cmd *cmd;
5526 struct hci_request req;
5527 struct hci_conn *conn;
5530 BT_DBG("%s", hdev->name);
5532 memset(&rp, 0, sizeof(rp));
5533 bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr);
5534 rp.addr.type = cp->addr.type;
5536 if (cp->addr.type != BDADDR_BREDR)
5537 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5538 MGMT_STATUS_INVALID_PARAMS,
5543 if (!hdev_is_powered(hdev)) {
5544 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO,
5545 MGMT_STATUS_NOT_POWERED, &rp,
5550 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5551 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
5553 if (!conn || conn->state != BT_CONNECTED) {
5554 err = mgmt_cmd_complete(sk, hdev->id,
5555 MGMT_OP_GET_CLOCK_INFO,
5556 MGMT_STATUS_NOT_CONNECTED,
5564 cmd = mgmt_pending_add(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len);
5570 cmd->cmd_complete = clock_info_cmd_complete;
5572 hci_req_init(&req, hdev);
5574 memset(&hci_cp, 0, sizeof(hci_cp));
5575 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5578 hci_conn_hold(conn);
5579 cmd->user_data = hci_conn_get(conn);
5581 hci_cp.handle = cpu_to_le16(conn->handle);
5582 hci_cp.which = 0x01; /* Piconet clock */
5583 hci_req_add(&req, HCI_OP_READ_CLOCK, sizeof(hci_cp), &hci_cp);
5586 err = hci_req_run(&req, get_clock_info_complete);
5588 mgmt_pending_remove(cmd);
5591 hci_dev_unlock(hdev);
5595 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
5597 struct hci_conn *conn;
5599 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
5603 if (conn->dst_type != type)
5606 if (conn->state != BT_CONNECTED)
5612 /* This function requires the caller holds hdev->lock */
5613 static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr,
5614 u8 addr_type, u8 auto_connect)
5616 struct hci_conn_params *params;
5618 params = hci_conn_params_add(hdev, addr, addr_type);
5622 if (params->auto_connect == auto_connect)
5625 list_del_init(¶ms->action);
5627 switch (auto_connect) {
5628 case HCI_AUTO_CONN_DISABLED:
5629 case HCI_AUTO_CONN_LINK_LOSS:
5630 /* If auto connect is being disabled when we're trying to
5631 * connect to device, keep connecting.
5633 if (params->explicit_connect)
5634 list_add(¶ms->action, &hdev->pend_le_conns);
5636 case HCI_AUTO_CONN_REPORT:
5637 if (params->explicit_connect)
5638 list_add(¶ms->action, &hdev->pend_le_conns);
5640 list_add(¶ms->action, &hdev->pend_le_reports);
5642 case HCI_AUTO_CONN_DIRECT:
5643 case HCI_AUTO_CONN_ALWAYS:
5644 if (!is_connected(hdev, addr, addr_type))
5645 list_add(¶ms->action, &hdev->pend_le_conns);
5649 params->auto_connect = auto_connect;
5651 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
5657 static void device_added(struct sock *sk, struct hci_dev *hdev,
5658 bdaddr_t *bdaddr, u8 type, u8 action)
5660 struct mgmt_ev_device_added ev;
5662 bacpy(&ev.addr.bdaddr, bdaddr);
5663 ev.addr.type = type;
5666 mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk);
5669 static int add_device(struct sock *sk, struct hci_dev *hdev,
5670 void *data, u16 len)
5672 struct mgmt_cp_add_device *cp = data;
5673 u8 auto_conn, addr_type;
5676 BT_DBG("%s", hdev->name);
5678 if (!bdaddr_type_is_valid(cp->addr.type) ||
5679 !bacmp(&cp->addr.bdaddr, BDADDR_ANY))
5680 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5681 MGMT_STATUS_INVALID_PARAMS,
5682 &cp->addr, sizeof(cp->addr));
5684 if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02)
5685 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5686 MGMT_STATUS_INVALID_PARAMS,
5687 &cp->addr, sizeof(cp->addr));
5691 if (cp->addr.type == BDADDR_BREDR) {
5692 /* Only incoming connections action is supported for now */
5693 if (cp->action != 0x01) {
5694 err = mgmt_cmd_complete(sk, hdev->id,
5696 MGMT_STATUS_INVALID_PARAMS,
5697 &cp->addr, sizeof(cp->addr));
5701 err = hci_bdaddr_list_add(&hdev->whitelist, &cp->addr.bdaddr,
5706 hci_req_update_scan(hdev);
5711 addr_type = le_addr_type(cp->addr.type);
5713 if (cp->action == 0x02)
5714 auto_conn = HCI_AUTO_CONN_ALWAYS;
5715 else if (cp->action == 0x01)
5716 auto_conn = HCI_AUTO_CONN_DIRECT;
5718 auto_conn = HCI_AUTO_CONN_REPORT;
5720 /* Kernel internally uses conn_params with resolvable private
5721 * address, but Add Device allows only identity addresses.
5722 * Make sure it is enforced before calling
5723 * hci_conn_params_lookup.
5725 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5726 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5727 MGMT_STATUS_INVALID_PARAMS,
5728 &cp->addr, sizeof(cp->addr));
5732 /* If the connection parameters don't exist for this device,
5733 * they will be created and configured with defaults.
5735 if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type,
5737 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5738 MGMT_STATUS_FAILED, &cp->addr,
5743 hci_update_background_scan(hdev);
5746 device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action);
5748 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE,
5749 MGMT_STATUS_SUCCESS, &cp->addr,
5753 hci_dev_unlock(hdev);
5757 static void device_removed(struct sock *sk, struct hci_dev *hdev,
5758 bdaddr_t *bdaddr, u8 type)
5760 struct mgmt_ev_device_removed ev;
5762 bacpy(&ev.addr.bdaddr, bdaddr);
5763 ev.addr.type = type;
5765 mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk);
5768 static int remove_device(struct sock *sk, struct hci_dev *hdev,
5769 void *data, u16 len)
5771 struct mgmt_cp_remove_device *cp = data;
5774 BT_DBG("%s", hdev->name);
5778 if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) {
5779 struct hci_conn_params *params;
5782 if (!bdaddr_type_is_valid(cp->addr.type)) {
5783 err = mgmt_cmd_complete(sk, hdev->id,
5784 MGMT_OP_REMOVE_DEVICE,
5785 MGMT_STATUS_INVALID_PARAMS,
5786 &cp->addr, sizeof(cp->addr));
5790 if (cp->addr.type == BDADDR_BREDR) {
5791 err = hci_bdaddr_list_del(&hdev->whitelist,
5795 err = mgmt_cmd_complete(sk, hdev->id,
5796 MGMT_OP_REMOVE_DEVICE,
5797 MGMT_STATUS_INVALID_PARAMS,
5803 hci_req_update_scan(hdev);
5805 device_removed(sk, hdev, &cp->addr.bdaddr,
5810 addr_type = le_addr_type(cp->addr.type);
5812 /* Kernel internally uses conn_params with resolvable private
5813 * address, but Remove Device allows only identity addresses.
5814 * Make sure it is enforced before calling
5815 * hci_conn_params_lookup.
5817 if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) {
5818 err = mgmt_cmd_complete(sk, hdev->id,
5819 MGMT_OP_REMOVE_DEVICE,
5820 MGMT_STATUS_INVALID_PARAMS,
5821 &cp->addr, sizeof(cp->addr));
5825 params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr,
5828 err = mgmt_cmd_complete(sk, hdev->id,
5829 MGMT_OP_REMOVE_DEVICE,
5830 MGMT_STATUS_INVALID_PARAMS,
5831 &cp->addr, sizeof(cp->addr));
5835 if (params->auto_connect == HCI_AUTO_CONN_DISABLED ||
5836 params->auto_connect == HCI_AUTO_CONN_EXPLICIT) {
5837 err = mgmt_cmd_complete(sk, hdev->id,
5838 MGMT_OP_REMOVE_DEVICE,
5839 MGMT_STATUS_INVALID_PARAMS,
5840 &cp->addr, sizeof(cp->addr));
5844 list_del(¶ms->action);
5845 list_del(¶ms->list);
5847 hci_update_background_scan(hdev);
5849 device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type);
5851 struct hci_conn_params *p, *tmp;
5852 struct bdaddr_list *b, *btmp;
5854 if (cp->addr.type) {
5855 err = mgmt_cmd_complete(sk, hdev->id,
5856 MGMT_OP_REMOVE_DEVICE,
5857 MGMT_STATUS_INVALID_PARAMS,
5858 &cp->addr, sizeof(cp->addr));
5862 list_for_each_entry_safe(b, btmp, &hdev->whitelist, list) {
5863 device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type);
5868 hci_req_update_scan(hdev);
5870 list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) {
5871 if (p->auto_connect == HCI_AUTO_CONN_DISABLED)
5873 device_removed(sk, hdev, &p->addr, p->addr_type);
5874 if (p->explicit_connect) {
5875 p->auto_connect = HCI_AUTO_CONN_EXPLICIT;
5878 list_del(&p->action);
5883 BT_DBG("All LE connection parameters were removed");
5885 hci_update_background_scan(hdev);
5889 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE,
5890 MGMT_STATUS_SUCCESS, &cp->addr,
5893 hci_dev_unlock(hdev);
5897 static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data,
5900 struct mgmt_cp_load_conn_param *cp = data;
5901 const u16 max_param_count = ((U16_MAX - sizeof(*cp)) /
5902 sizeof(struct mgmt_conn_param));
5903 u16 param_count, expected_len;
5906 if (!lmp_le_capable(hdev))
5907 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5908 MGMT_STATUS_NOT_SUPPORTED);
5910 param_count = __le16_to_cpu(cp->param_count);
5911 if (param_count > max_param_count) {
5912 bt_dev_err(hdev, "load_conn_param: too big param_count value %u",
5914 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5915 MGMT_STATUS_INVALID_PARAMS);
5918 expected_len = struct_size(cp, params, param_count);
5919 if (expected_len != len) {
5920 bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes",
5922 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM,
5923 MGMT_STATUS_INVALID_PARAMS);
5926 BT_DBG("%s param_count %u", hdev->name, param_count);
5930 hci_conn_params_clear_disabled(hdev);
5932 for (i = 0; i < param_count; i++) {
5933 struct mgmt_conn_param *param = &cp->params[i];
5934 struct hci_conn_params *hci_param;
5935 u16 min, max, latency, timeout;
5938 BT_DBG("Adding %pMR (type %u)", ¶m->addr.bdaddr,
5941 if (param->addr.type == BDADDR_LE_PUBLIC) {
5942 addr_type = ADDR_LE_DEV_PUBLIC;
5943 } else if (param->addr.type == BDADDR_LE_RANDOM) {
5944 addr_type = ADDR_LE_DEV_RANDOM;
5946 bt_dev_err(hdev, "ignoring invalid connection parameters");
5950 min = le16_to_cpu(param->min_interval);
5951 max = le16_to_cpu(param->max_interval);
5952 latency = le16_to_cpu(param->latency);
5953 timeout = le16_to_cpu(param->timeout);
5955 BT_DBG("min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x",
5956 min, max, latency, timeout);
5958 if (hci_check_conn_params(min, max, latency, timeout) < 0) {
5959 bt_dev_err(hdev, "ignoring invalid connection parameters");
5963 hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr,
5966 bt_dev_err(hdev, "failed to add connection parameters");
5970 hci_param->conn_min_interval = min;
5971 hci_param->conn_max_interval = max;
5972 hci_param->conn_latency = latency;
5973 hci_param->supervision_timeout = timeout;
5976 hci_dev_unlock(hdev);
5978 return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0,
5982 static int set_external_config(struct sock *sk, struct hci_dev *hdev,
5983 void *data, u16 len)
5985 struct mgmt_cp_set_external_config *cp = data;
5989 BT_DBG("%s", hdev->name);
5991 if (hdev_is_powered(hdev))
5992 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5993 MGMT_STATUS_REJECTED);
5995 if (cp->config != 0x00 && cp->config != 0x01)
5996 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
5997 MGMT_STATUS_INVALID_PARAMS);
5999 if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks))
6000 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG,
6001 MGMT_STATUS_NOT_SUPPORTED);
6006 changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED);
6008 changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED);
6010 err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev);
6017 err = new_options(hdev, sk);
6019 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) {
6020 mgmt_index_removed(hdev);
6022 if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) {
6023 hci_dev_set_flag(hdev, HCI_CONFIG);
6024 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6026 queue_work(hdev->req_workqueue, &hdev->power_on);
6028 set_bit(HCI_RAW, &hdev->flags);
6029 mgmt_index_added(hdev);
6034 hci_dev_unlock(hdev);
6038 static int set_public_address(struct sock *sk, struct hci_dev *hdev,
6039 void *data, u16 len)
6041 struct mgmt_cp_set_public_address *cp = data;
6045 BT_DBG("%s", hdev->name);
6047 if (hdev_is_powered(hdev))
6048 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6049 MGMT_STATUS_REJECTED);
6051 if (!bacmp(&cp->bdaddr, BDADDR_ANY))
6052 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6053 MGMT_STATUS_INVALID_PARAMS);
6055 if (!hdev->set_bdaddr)
6056 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS,
6057 MGMT_STATUS_NOT_SUPPORTED);
6061 changed = !!bacmp(&hdev->public_addr, &cp->bdaddr);
6062 bacpy(&hdev->public_addr, &cp->bdaddr);
6064 err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev);
6071 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
6072 err = new_options(hdev, sk);
6074 if (is_configured(hdev)) {
6075 mgmt_index_removed(hdev);
6077 hci_dev_clear_flag(hdev, HCI_UNCONFIGURED);
6079 hci_dev_set_flag(hdev, HCI_CONFIG);
6080 hci_dev_set_flag(hdev, HCI_AUTO_OFF);
6082 queue_work(hdev->req_workqueue, &hdev->power_on);
6086 hci_dev_unlock(hdev);
6090 static void read_local_oob_ext_data_complete(struct hci_dev *hdev, u8 status,
6091 u16 opcode, struct sk_buff *skb)
6093 const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp;
6094 struct mgmt_rp_read_local_oob_ext_data *mgmt_rp;
6095 u8 *h192, *r192, *h256, *r256;
6096 struct mgmt_pending_cmd *cmd;
6100 BT_DBG("%s status %u", hdev->name, status);
6102 cmd = pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev);
6106 mgmt_cp = cmd->param;
6109 status = mgmt_status(status);
6116 } else if (opcode == HCI_OP_READ_LOCAL_OOB_DATA) {
6117 struct hci_rp_read_local_oob_data *rp;
6119 if (skb->len != sizeof(*rp)) {
6120 status = MGMT_STATUS_FAILED;
6123 status = MGMT_STATUS_SUCCESS;
6124 rp = (void *)skb->data;
6126 eir_len = 5 + 18 + 18;
6133 struct hci_rp_read_local_oob_ext_data *rp;
6135 if (skb->len != sizeof(*rp)) {
6136 status = MGMT_STATUS_FAILED;
6139 status = MGMT_STATUS_SUCCESS;
6140 rp = (void *)skb->data;
6142 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
6143 eir_len = 5 + 18 + 18;
6147 eir_len = 5 + 18 + 18 + 18 + 18;
6157 mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL);
6164 eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV,
6165 hdev->dev_class, 3);
6168 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6169 EIR_SSP_HASH_C192, h192, 16);
6170 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6171 EIR_SSP_RAND_R192, r192, 16);
6175 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6176 EIR_SSP_HASH_C256, h256, 16);
6177 eir_len = eir_append_data(mgmt_rp->eir, eir_len,
6178 EIR_SSP_RAND_R256, r256, 16);
6182 mgmt_rp->type = mgmt_cp->type;
6183 mgmt_rp->eir_len = cpu_to_le16(eir_len);
6185 err = mgmt_cmd_complete(cmd->sk, hdev->id,
6186 MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status,
6187 mgmt_rp, sizeof(*mgmt_rp) + eir_len);
6188 if (err < 0 || status)
6191 hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS);
6193 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6194 mgmt_rp, sizeof(*mgmt_rp) + eir_len,
6195 HCI_MGMT_OOB_DATA_EVENTS, cmd->sk);
6198 mgmt_pending_remove(cmd);
6201 static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk,
6202 struct mgmt_cp_read_local_oob_ext_data *cp)
6204 struct mgmt_pending_cmd *cmd;
6205 struct hci_request req;
6208 cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev,
6213 hci_req_init(&req, hdev);
6215 if (bredr_sc_enabled(hdev))
6216 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_EXT_DATA, 0, NULL);
6218 hci_req_add(&req, HCI_OP_READ_LOCAL_OOB_DATA, 0, NULL);
6220 err = hci_req_run_skb(&req, read_local_oob_ext_data_complete);
6222 mgmt_pending_remove(cmd);
6229 static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev,
6230 void *data, u16 data_len)
6232 struct mgmt_cp_read_local_oob_ext_data *cp = data;
6233 struct mgmt_rp_read_local_oob_ext_data *rp;
6236 u8 status, flags, role, addr[7], hash[16], rand[16];
6239 BT_DBG("%s", hdev->name);
6241 if (hdev_is_powered(hdev)) {
6243 case BIT(BDADDR_BREDR):
6244 status = mgmt_bredr_support(hdev);
6250 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6251 status = mgmt_le_support(hdev);
6255 eir_len = 9 + 3 + 18 + 18 + 3;
6258 status = MGMT_STATUS_INVALID_PARAMS;
6263 status = MGMT_STATUS_NOT_POWERED;
6267 rp_len = sizeof(*rp) + eir_len;
6268 rp = kmalloc(rp_len, GFP_ATOMIC);
6279 case BIT(BDADDR_BREDR):
6280 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
6281 err = read_local_ssp_oob_req(hdev, sk, cp);
6282 hci_dev_unlock(hdev);
6286 status = MGMT_STATUS_FAILED;
6289 eir_len = eir_append_data(rp->eir, eir_len,
6291 hdev->dev_class, 3);
6294 case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)):
6295 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) &&
6296 smp_generate_oob(hdev, hash, rand) < 0) {
6297 hci_dev_unlock(hdev);
6298 status = MGMT_STATUS_FAILED;
6302 /* This should return the active RPA, but since the RPA
6303 * is only programmed on demand, it is really hard to fill
6304 * this in at the moment. For now disallow retrieving
6305 * local out-of-band data when privacy is in use.
6307 * Returning the identity address will not help here since
6308 * pairing happens before the identity resolving key is
6309 * known and thus the connection establishment happens
6310 * based on the RPA and not the identity address.
6312 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
6313 hci_dev_unlock(hdev);
6314 status = MGMT_STATUS_REJECTED;
6318 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
6319 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
6320 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
6321 bacmp(&hdev->static_addr, BDADDR_ANY))) {
6322 memcpy(addr, &hdev->static_addr, 6);
6325 memcpy(addr, &hdev->bdaddr, 6);
6329 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR,
6330 addr, sizeof(addr));
6332 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
6337 eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE,
6338 &role, sizeof(role));
6340 if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) {
6341 eir_len = eir_append_data(rp->eir, eir_len,
6343 hash, sizeof(hash));
6345 eir_len = eir_append_data(rp->eir, eir_len,
6347 rand, sizeof(rand));
6350 flags = mgmt_get_adv_discov_flags(hdev);
6352 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
6353 flags |= LE_AD_NO_BREDR;
6355 eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS,
6356 &flags, sizeof(flags));
6360 hci_dev_unlock(hdev);
6362 hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS);
6364 status = MGMT_STATUS_SUCCESS;
6367 rp->type = cp->type;
6368 rp->eir_len = cpu_to_le16(eir_len);
6370 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA,
6371 status, rp, sizeof(*rp) + eir_len);
6372 if (err < 0 || status)
6375 err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev,
6376 rp, sizeof(*rp) + eir_len,
6377 HCI_MGMT_OOB_DATA_EVENTS, sk);
6385 static u32 get_supported_adv_flags(struct hci_dev *hdev)
6389 flags |= MGMT_ADV_FLAG_CONNECTABLE;
6390 flags |= MGMT_ADV_FLAG_DISCOV;
6391 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
6392 flags |= MGMT_ADV_FLAG_MANAGED_FLAGS;
6393 flags |= MGMT_ADV_FLAG_APPEARANCE;
6394 flags |= MGMT_ADV_FLAG_LOCAL_NAME;
6396 /* In extended adv TX_POWER returned from Set Adv Param
6397 * will be always valid.
6399 if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) ||
6400 ext_adv_capable(hdev))
6401 flags |= MGMT_ADV_FLAG_TX_POWER;
6403 if (ext_adv_capable(hdev)) {
6404 flags |= MGMT_ADV_FLAG_SEC_1M;
6406 if (hdev->le_features[1] & HCI_LE_PHY_2M)
6407 flags |= MGMT_ADV_FLAG_SEC_2M;
6409 if (hdev->le_features[1] & HCI_LE_PHY_CODED)
6410 flags |= MGMT_ADV_FLAG_SEC_CODED;
6416 static int read_adv_features(struct sock *sk, struct hci_dev *hdev,
6417 void *data, u16 data_len)
6419 struct mgmt_rp_read_adv_features *rp;
6422 struct adv_info *adv_instance;
6423 u32 supported_flags;
6426 BT_DBG("%s", hdev->name);
6428 if (!lmp_le_capable(hdev))
6429 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6430 MGMT_STATUS_REJECTED);
6434 rp_len = sizeof(*rp) + hdev->adv_instance_cnt;
6435 rp = kmalloc(rp_len, GFP_ATOMIC);
6437 hci_dev_unlock(hdev);
6441 supported_flags = get_supported_adv_flags(hdev);
6443 rp->supported_flags = cpu_to_le32(supported_flags);
6444 rp->max_adv_data_len = HCI_MAX_AD_LENGTH;
6445 rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH;
6446 rp->max_instances = HCI_MAX_ADV_INSTANCES;
6447 rp->num_instances = hdev->adv_instance_cnt;
6449 instance = rp->instance;
6450 list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
6451 *instance = adv_instance->instance;
6455 hci_dev_unlock(hdev);
6457 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES,
6458 MGMT_STATUS_SUCCESS, rp, rp_len);
6465 static u8 calculate_name_len(struct hci_dev *hdev)
6467 u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3];
6469 return append_local_name(hdev, buf, 0);
6472 static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags,
6475 u8 max_len = HCI_MAX_AD_LENGTH;
6478 if (adv_flags & (MGMT_ADV_FLAG_DISCOV |
6479 MGMT_ADV_FLAG_LIMITED_DISCOV |
6480 MGMT_ADV_FLAG_MANAGED_FLAGS))
6483 if (adv_flags & MGMT_ADV_FLAG_TX_POWER)
6486 if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME)
6487 max_len -= calculate_name_len(hdev);
6489 if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE))
6496 static bool flags_managed(u32 adv_flags)
6498 return adv_flags & (MGMT_ADV_FLAG_DISCOV |
6499 MGMT_ADV_FLAG_LIMITED_DISCOV |
6500 MGMT_ADV_FLAG_MANAGED_FLAGS);
6503 static bool tx_power_managed(u32 adv_flags)
6505 return adv_flags & MGMT_ADV_FLAG_TX_POWER;
6508 static bool name_managed(u32 adv_flags)
6510 return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME;
6513 static bool appearance_managed(u32 adv_flags)
6515 return adv_flags & MGMT_ADV_FLAG_APPEARANCE;
6518 static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data,
6519 u8 len, bool is_adv_data)
6524 max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data);
6529 /* Make sure that the data is correctly formatted. */
6530 for (i = 0, cur_len = 0; i < len; i += (cur_len + 1)) {
6533 if (data[i + 1] == EIR_FLAGS &&
6534 (!is_adv_data || flags_managed(adv_flags)))
6537 if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags))
6540 if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags))
6543 if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags))
6546 if (data[i + 1] == EIR_APPEARANCE &&
6547 appearance_managed(adv_flags))
6550 /* If the current field length would exceed the total data
6551 * length, then it's invalid.
6553 if (i + cur_len >= len)
6560 static void add_advertising_complete(struct hci_dev *hdev, u8 status,
6563 struct mgmt_pending_cmd *cmd;
6564 struct mgmt_cp_add_advertising *cp;
6565 struct mgmt_rp_add_advertising rp;
6566 struct adv_info *adv_instance, *n;
6569 BT_DBG("status %d", status);
6573 cmd = pending_find(MGMT_OP_ADD_ADVERTISING, hdev);
6575 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
6576 if (!adv_instance->pending)
6580 adv_instance->pending = false;
6584 instance = adv_instance->instance;
6586 if (hdev->cur_adv_instance == instance)
6587 cancel_adv_timeout(hdev);
6589 hci_remove_adv_instance(hdev, instance);
6590 mgmt_advertising_removed(cmd ? cmd->sk : NULL, hdev, instance);
6597 rp.instance = cp->instance;
6600 mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode,
6601 mgmt_status(status));
6603 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode,
6604 mgmt_status(status), &rp, sizeof(rp));
6606 mgmt_pending_remove(cmd);
6609 hci_dev_unlock(hdev);
6612 static int add_advertising(struct sock *sk, struct hci_dev *hdev,
6613 void *data, u16 data_len)
6615 struct mgmt_cp_add_advertising *cp = data;
6616 struct mgmt_rp_add_advertising rp;
6618 u32 supported_flags, phy_flags;
6620 u16 timeout, duration;
6621 unsigned int prev_instance_cnt = hdev->adv_instance_cnt;
6622 u8 schedule_instance = 0;
6623 struct adv_info *next_instance;
6625 struct mgmt_pending_cmd *cmd;
6626 struct hci_request req;
6628 BT_DBG("%s", hdev->name);
6630 status = mgmt_le_support(hdev);
6632 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6635 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6636 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6637 MGMT_STATUS_INVALID_PARAMS);
6639 if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len)
6640 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6641 MGMT_STATUS_INVALID_PARAMS);
6643 flags = __le32_to_cpu(cp->flags);
6644 timeout = __le16_to_cpu(cp->timeout);
6645 duration = __le16_to_cpu(cp->duration);
6647 /* The current implementation only supports a subset of the specified
6648 * flags. Also need to check mutual exclusiveness of sec flags.
6650 supported_flags = get_supported_adv_flags(hdev);
6651 phy_flags = flags & MGMT_ADV_FLAG_SEC_MASK;
6652 if (flags & ~supported_flags ||
6653 ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags)))))
6654 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6655 MGMT_STATUS_INVALID_PARAMS);
6659 if (timeout && !hdev_is_powered(hdev)) {
6660 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6661 MGMT_STATUS_REJECTED);
6665 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6666 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6667 pending_find(MGMT_OP_SET_LE, hdev)) {
6668 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6673 if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) ||
6674 !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len,
6675 cp->scan_rsp_len, false)) {
6676 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6677 MGMT_STATUS_INVALID_PARAMS);
6681 err = hci_add_adv_instance(hdev, cp->instance, flags,
6682 cp->adv_data_len, cp->data,
6684 cp->data + cp->adv_data_len,
6687 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6688 MGMT_STATUS_FAILED);
6692 /* Only trigger an advertising added event if a new instance was
6695 if (hdev->adv_instance_cnt > prev_instance_cnt)
6696 mgmt_advertising_added(sk, hdev, cp->instance);
6698 if (hdev->cur_adv_instance == cp->instance) {
6699 /* If the currently advertised instance is being changed then
6700 * cancel the current advertising and schedule the next
6701 * instance. If there is only one instance then the overridden
6702 * advertising data will be visible right away.
6704 cancel_adv_timeout(hdev);
6706 next_instance = hci_get_next_instance(hdev, cp->instance);
6708 schedule_instance = next_instance->instance;
6709 } else if (!hdev->adv_instance_timeout) {
6710 /* Immediately advertise the new instance if no other
6711 * instance is currently being advertised.
6713 schedule_instance = cp->instance;
6716 /* If the HCI_ADVERTISING flag is set or the device isn't powered or
6717 * there is no instance to be advertised then we have no HCI
6718 * communication to make. Simply return.
6720 if (!hdev_is_powered(hdev) ||
6721 hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
6722 !schedule_instance) {
6723 rp.instance = cp->instance;
6724 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
6725 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6729 /* We're good to go, update advertising data, parameters, and start
6732 cmd = mgmt_pending_add(sk, MGMT_OP_ADD_ADVERTISING, hdev, data,
6739 hci_req_init(&req, hdev);
6741 err = __hci_req_schedule_adv_instance(&req, schedule_instance, true);
6744 err = hci_req_run(&req, add_advertising_complete);
6747 mgmt_pending_remove(cmd);
6750 hci_dev_unlock(hdev);
6755 static void remove_advertising_complete(struct hci_dev *hdev, u8 status,
6758 struct mgmt_pending_cmd *cmd;
6759 struct mgmt_cp_remove_advertising *cp;
6760 struct mgmt_rp_remove_advertising rp;
6762 BT_DBG("status %d", status);
6766 /* A failure status here only means that we failed to disable
6767 * advertising. Otherwise, the advertising instance has been removed,
6768 * so report success.
6770 cmd = pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev);
6775 rp.instance = cp->instance;
6777 mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, MGMT_STATUS_SUCCESS,
6779 mgmt_pending_remove(cmd);
6782 hci_dev_unlock(hdev);
6785 static int remove_advertising(struct sock *sk, struct hci_dev *hdev,
6786 void *data, u16 data_len)
6788 struct mgmt_cp_remove_advertising *cp = data;
6789 struct mgmt_rp_remove_advertising rp;
6790 struct mgmt_pending_cmd *cmd;
6791 struct hci_request req;
6794 BT_DBG("%s", hdev->name);
6798 if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) {
6799 err = mgmt_cmd_status(sk, hdev->id,
6800 MGMT_OP_REMOVE_ADVERTISING,
6801 MGMT_STATUS_INVALID_PARAMS);
6805 if (pending_find(MGMT_OP_ADD_ADVERTISING, hdev) ||
6806 pending_find(MGMT_OP_REMOVE_ADVERTISING, hdev) ||
6807 pending_find(MGMT_OP_SET_LE, hdev)) {
6808 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6813 if (list_empty(&hdev->adv_instances)) {
6814 err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING,
6815 MGMT_STATUS_INVALID_PARAMS);
6819 hci_req_init(&req, hdev);
6821 hci_req_clear_adv_instance(hdev, sk, &req, cp->instance, true);
6823 if (list_empty(&hdev->adv_instances))
6824 __hci_req_disable_advertising(&req);
6826 /* If no HCI commands have been collected so far or the HCI_ADVERTISING
6827 * flag is set or the device isn't powered then we have no HCI
6828 * communication to make. Simply return.
6830 if (skb_queue_empty(&req.cmd_q) ||
6831 !hdev_is_powered(hdev) ||
6832 hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
6833 hci_req_purge(&req);
6834 rp.instance = cp->instance;
6835 err = mgmt_cmd_complete(sk, hdev->id,
6836 MGMT_OP_REMOVE_ADVERTISING,
6837 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6841 cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data,
6848 err = hci_req_run(&req, remove_advertising_complete);
6850 mgmt_pending_remove(cmd);
6853 hci_dev_unlock(hdev);
6858 static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev,
6859 void *data, u16 data_len)
6861 struct mgmt_cp_get_adv_size_info *cp = data;
6862 struct mgmt_rp_get_adv_size_info rp;
6863 u32 flags, supported_flags;
6866 BT_DBG("%s", hdev->name);
6868 if (!lmp_le_capable(hdev))
6869 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6870 MGMT_STATUS_REJECTED);
6872 if (cp->instance < 1 || cp->instance > HCI_MAX_ADV_INSTANCES)
6873 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6874 MGMT_STATUS_INVALID_PARAMS);
6876 flags = __le32_to_cpu(cp->flags);
6878 /* The current implementation only supports a subset of the specified
6881 supported_flags = get_supported_adv_flags(hdev);
6882 if (flags & ~supported_flags)
6883 return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6884 MGMT_STATUS_INVALID_PARAMS);
6886 rp.instance = cp->instance;
6887 rp.flags = cp->flags;
6888 rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true);
6889 rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false);
6891 err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO,
6892 MGMT_STATUS_SUCCESS, &rp, sizeof(rp));
6897 static const struct hci_mgmt_handler mgmt_handlers[] = {
6898 { NULL }, /* 0x0000 (no command) */
6899 { read_version, MGMT_READ_VERSION_SIZE,
6901 HCI_MGMT_UNTRUSTED },
6902 { read_commands, MGMT_READ_COMMANDS_SIZE,
6904 HCI_MGMT_UNTRUSTED },
6905 { read_index_list, MGMT_READ_INDEX_LIST_SIZE,
6907 HCI_MGMT_UNTRUSTED },
6908 { read_controller_info, MGMT_READ_INFO_SIZE,
6909 HCI_MGMT_UNTRUSTED },
6910 { set_powered, MGMT_SETTING_SIZE },
6911 { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE },
6912 { set_connectable, MGMT_SETTING_SIZE },
6913 { set_fast_connectable, MGMT_SETTING_SIZE },
6914 { set_bondable, MGMT_SETTING_SIZE },
6915 { set_link_security, MGMT_SETTING_SIZE },
6916 { set_ssp, MGMT_SETTING_SIZE },
6917 { set_hs, MGMT_SETTING_SIZE },
6918 { set_le, MGMT_SETTING_SIZE },
6919 { set_dev_class, MGMT_SET_DEV_CLASS_SIZE },
6920 { set_local_name, MGMT_SET_LOCAL_NAME_SIZE },
6921 { add_uuid, MGMT_ADD_UUID_SIZE },
6922 { remove_uuid, MGMT_REMOVE_UUID_SIZE },
6923 { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE,
6925 { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE,
6927 { disconnect, MGMT_DISCONNECT_SIZE },
6928 { get_connections, MGMT_GET_CONNECTIONS_SIZE },
6929 { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE },
6930 { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE },
6931 { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE },
6932 { pair_device, MGMT_PAIR_DEVICE_SIZE },
6933 { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE },
6934 { unpair_device, MGMT_UNPAIR_DEVICE_SIZE },
6935 { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE },
6936 { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE },
6937 { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE },
6938 { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE },
6939 { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE },
6940 { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE,
6942 { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE },
6943 { start_discovery, MGMT_START_DISCOVERY_SIZE },
6944 { stop_discovery, MGMT_STOP_DISCOVERY_SIZE },
6945 { confirm_name, MGMT_CONFIRM_NAME_SIZE },
6946 { block_device, MGMT_BLOCK_DEVICE_SIZE },
6947 { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE },
6948 { set_device_id, MGMT_SET_DEVICE_ID_SIZE },
6949 { set_advertising, MGMT_SETTING_SIZE },
6950 { set_bredr, MGMT_SETTING_SIZE },
6951 { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE },
6952 { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE },
6953 { set_secure_conn, MGMT_SETTING_SIZE },
6954 { set_debug_keys, MGMT_SETTING_SIZE },
6955 { set_privacy, MGMT_SET_PRIVACY_SIZE },
6956 { load_irks, MGMT_LOAD_IRKS_SIZE,
6958 { get_conn_info, MGMT_GET_CONN_INFO_SIZE },
6959 { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE },
6960 { add_device, MGMT_ADD_DEVICE_SIZE },
6961 { remove_device, MGMT_REMOVE_DEVICE_SIZE },
6962 { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE,
6964 { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE,
6966 HCI_MGMT_UNTRUSTED },
6967 { read_config_info, MGMT_READ_CONFIG_INFO_SIZE,
6968 HCI_MGMT_UNCONFIGURED |
6969 HCI_MGMT_UNTRUSTED },
6970 { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE,
6971 HCI_MGMT_UNCONFIGURED },
6972 { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE,
6973 HCI_MGMT_UNCONFIGURED },
6974 { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE,
6976 { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE },
6977 { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE,
6979 HCI_MGMT_UNTRUSTED },
6980 { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE },
6981 { add_advertising, MGMT_ADD_ADVERTISING_SIZE,
6983 { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE },
6984 { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE },
6985 { start_limited_discovery, MGMT_START_DISCOVERY_SIZE },
6986 { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE,
6987 HCI_MGMT_UNTRUSTED },
6988 { set_appearance, MGMT_SET_APPEARANCE_SIZE },
6989 { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE },
6990 { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
6991 { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
6995 void mgmt_index_added(struct hci_dev *hdev)
6997 struct mgmt_ev_ext_index ev;
6999 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7002 switch (hdev->dev_type) {
7004 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7005 mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev,
7006 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7009 mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0,
7010 HCI_MGMT_INDEX_EVENTS);
7023 mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev),
7024 HCI_MGMT_EXT_INDEX_EVENTS);
7027 void mgmt_index_removed(struct hci_dev *hdev)
7029 struct mgmt_ev_ext_index ev;
7030 u8 status = MGMT_STATUS_INVALID_INDEX;
7032 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
7035 switch (hdev->dev_type) {
7037 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7039 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
7040 mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev,
7041 NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS);
7044 mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0,
7045 HCI_MGMT_INDEX_EVENTS);
7058 mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev),
7059 HCI_MGMT_EXT_INDEX_EVENTS);
7062 /* This function requires the caller holds hdev->lock */
7063 static void restart_le_actions(struct hci_dev *hdev)
7065 struct hci_conn_params *p;
7067 list_for_each_entry(p, &hdev->le_conn_params, list) {
7068 /* Needed for AUTO_OFF case where might not "really"
7069 * have been powered off.
7071 list_del_init(&p->action);
7073 switch (p->auto_connect) {
7074 case HCI_AUTO_CONN_DIRECT:
7075 case HCI_AUTO_CONN_ALWAYS:
7076 list_add(&p->action, &hdev->pend_le_conns);
7078 case HCI_AUTO_CONN_REPORT:
7079 list_add(&p->action, &hdev->pend_le_reports);
7087 void mgmt_power_on(struct hci_dev *hdev, int err)
7089 struct cmd_lookup match = { NULL, hdev };
7091 BT_DBG("err %d", err);
7096 restart_le_actions(hdev);
7097 hci_update_background_scan(hdev);
7100 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7102 new_settings(hdev, match.sk);
7107 hci_dev_unlock(hdev);
7110 void __mgmt_power_off(struct hci_dev *hdev)
7112 struct cmd_lookup match = { NULL, hdev };
7113 u8 status, zero_cod[] = { 0, 0, 0 };
7115 mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match);
7117 /* If the power off is because of hdev unregistration let
7118 * use the appropriate INVALID_INDEX status. Otherwise use
7119 * NOT_POWERED. We cover both scenarios here since later in
7120 * mgmt_index_removed() any hci_conn callbacks will have already
7121 * been triggered, potentially causing misleading DISCONNECTED
7124 if (hci_dev_test_flag(hdev, HCI_UNREGISTER))
7125 status = MGMT_STATUS_INVALID_INDEX;
7127 status = MGMT_STATUS_NOT_POWERED;
7129 mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status);
7131 if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) {
7132 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev,
7133 zero_cod, sizeof(zero_cod),
7134 HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7135 ext_info_changed(hdev, NULL);
7138 new_settings(hdev, match.sk);
7144 void mgmt_set_powered_failed(struct hci_dev *hdev, int err)
7146 struct mgmt_pending_cmd *cmd;
7149 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7153 if (err == -ERFKILL)
7154 status = MGMT_STATUS_RFKILLED;
7156 status = MGMT_STATUS_FAILED;
7158 mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status);
7160 mgmt_pending_remove(cmd);
7163 void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key,
7166 struct mgmt_ev_new_link_key ev;
7168 memset(&ev, 0, sizeof(ev));
7170 ev.store_hint = persistent;
7171 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7172 ev.key.addr.type = BDADDR_BREDR;
7173 ev.key.type = key->type;
7174 memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE);
7175 ev.key.pin_len = key->pin_len;
7177 mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL);
7180 static u8 mgmt_ltk_type(struct smp_ltk *ltk)
7182 switch (ltk->type) {
7185 if (ltk->authenticated)
7186 return MGMT_LTK_AUTHENTICATED;
7187 return MGMT_LTK_UNAUTHENTICATED;
7189 if (ltk->authenticated)
7190 return MGMT_LTK_P256_AUTH;
7191 return MGMT_LTK_P256_UNAUTH;
7192 case SMP_LTK_P256_DEBUG:
7193 return MGMT_LTK_P256_DEBUG;
7196 return MGMT_LTK_UNAUTHENTICATED;
7199 void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7201 struct mgmt_ev_new_long_term_key ev;
7203 memset(&ev, 0, sizeof(ev));
7205 /* Devices using resolvable or non-resolvable random addresses
7206 * without providing an identity resolving key don't require
7207 * to store long term keys. Their addresses will change the
7210 * Only when a remote device provides an identity address
7211 * make sure the long term key is stored. If the remote
7212 * identity is known, the long term keys are internally
7213 * mapped to the identity address. So allow static random
7214 * and public addresses here.
7216 if (key->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7217 (key->bdaddr.b[5] & 0xc0) != 0xc0)
7218 ev.store_hint = 0x00;
7220 ev.store_hint = persistent;
7222 bacpy(&ev.key.addr.bdaddr, &key->bdaddr);
7223 ev.key.addr.type = link_to_bdaddr(LE_LINK, key->bdaddr_type);
7224 ev.key.type = mgmt_ltk_type(key);
7225 ev.key.enc_size = key->enc_size;
7226 ev.key.ediv = key->ediv;
7227 ev.key.rand = key->rand;
7229 if (key->type == SMP_LTK)
7232 /* Make sure we copy only the significant bytes based on the
7233 * encryption key size, and set the rest of the value to zeroes.
7235 memcpy(ev.key.val, key->val, key->enc_size);
7236 memset(ev.key.val + key->enc_size, 0,
7237 sizeof(ev.key.val) - key->enc_size);
7239 mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
7242 void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
7244 struct mgmt_ev_new_irk ev;
7246 memset(&ev, 0, sizeof(ev));
7248 ev.store_hint = persistent;
7250 bacpy(&ev.rpa, &irk->rpa);
7251 bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
7252 ev.irk.addr.type = link_to_bdaddr(LE_LINK, irk->addr_type);
7253 memcpy(ev.irk.val, irk->val, sizeof(irk->val));
7255 mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL);
7258 void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
7261 struct mgmt_ev_new_csrk ev;
7263 memset(&ev, 0, sizeof(ev));
7265 /* Devices using resolvable or non-resolvable random addresses
7266 * without providing an identity resolving key don't require
7267 * to store signature resolving keys. Their addresses will change
7268 * the next time around.
7270 * Only when a remote device provides an identity address
7271 * make sure the signature resolving key is stored. So allow
7272 * static random and public addresses here.
7274 if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM &&
7275 (csrk->bdaddr.b[5] & 0xc0) != 0xc0)
7276 ev.store_hint = 0x00;
7278 ev.store_hint = persistent;
7280 bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr);
7281 ev.key.addr.type = link_to_bdaddr(LE_LINK, csrk->bdaddr_type);
7282 ev.key.type = csrk->type;
7283 memcpy(ev.key.val, csrk->val, sizeof(csrk->val));
7285 mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL);
7288 void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
7289 u8 bdaddr_type, u8 store_hint, u16 min_interval,
7290 u16 max_interval, u16 latency, u16 timeout)
7292 struct mgmt_ev_new_conn_param ev;
7294 if (!hci_is_identity_address(bdaddr, bdaddr_type))
7297 memset(&ev, 0, sizeof(ev));
7298 bacpy(&ev.addr.bdaddr, bdaddr);
7299 ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type);
7300 ev.store_hint = store_hint;
7301 ev.min_interval = cpu_to_le16(min_interval);
7302 ev.max_interval = cpu_to_le16(max_interval);
7303 ev.latency = cpu_to_le16(latency);
7304 ev.timeout = cpu_to_le16(timeout);
7306 mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL);
7309 void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn,
7310 u32 flags, u8 *name, u8 name_len)
7313 struct mgmt_ev_device_connected *ev = (void *) buf;
7316 bacpy(&ev->addr.bdaddr, &conn->dst);
7317 ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7319 ev->flags = __cpu_to_le32(flags);
7321 /* We must ensure that the EIR Data fields are ordered and
7322 * unique. Keep it simple for now and avoid the problem by not
7323 * adding any BR/EDR data to the LE adv.
7325 if (conn->le_adv_data_len > 0) {
7326 memcpy(&ev->eir[eir_len],
7327 conn->le_adv_data, conn->le_adv_data_len);
7328 eir_len = conn->le_adv_data_len;
7331 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE,
7334 if (memcmp(conn->dev_class, "\0\0\0", 3) != 0)
7335 eir_len = eir_append_data(ev->eir, eir_len,
7337 conn->dev_class, 3);
7340 ev->eir_len = cpu_to_le16(eir_len);
7342 mgmt_event(MGMT_EV_DEVICE_CONNECTED, hdev, buf,
7343 sizeof(*ev) + eir_len, NULL);
7346 static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data)
7348 struct sock **sk = data;
7350 cmd->cmd_complete(cmd, 0);
7355 mgmt_pending_remove(cmd);
7358 static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data)
7360 struct hci_dev *hdev = data;
7361 struct mgmt_cp_unpair_device *cp = cmd->param;
7363 device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk);
7365 cmd->cmd_complete(cmd, 0);
7366 mgmt_pending_remove(cmd);
7369 bool mgmt_powering_down(struct hci_dev *hdev)
7371 struct mgmt_pending_cmd *cmd;
7372 struct mgmt_mode *cp;
7374 cmd = pending_find(MGMT_OP_SET_POWERED, hdev);
7385 void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr,
7386 u8 link_type, u8 addr_type, u8 reason,
7387 bool mgmt_connected)
7389 struct mgmt_ev_device_disconnected ev;
7390 struct sock *sk = NULL;
7392 /* The connection is still in hci_conn_hash so test for 1
7393 * instead of 0 to know if this is the last one.
7395 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7396 cancel_delayed_work(&hdev->power_off);
7397 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7400 if (!mgmt_connected)
7403 if (link_type != ACL_LINK && link_type != LE_LINK)
7406 mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk);
7408 bacpy(&ev.addr.bdaddr, bdaddr);
7409 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7412 mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk);
7417 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7421 void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr,
7422 u8 link_type, u8 addr_type, u8 status)
7424 u8 bdaddr_type = link_to_bdaddr(link_type, addr_type);
7425 struct mgmt_cp_disconnect *cp;
7426 struct mgmt_pending_cmd *cmd;
7428 mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp,
7431 cmd = pending_find(MGMT_OP_DISCONNECT, hdev);
7437 if (bacmp(bdaddr, &cp->addr.bdaddr))
7440 if (cp->addr.type != bdaddr_type)
7443 cmd->cmd_complete(cmd, mgmt_status(status));
7444 mgmt_pending_remove(cmd);
7447 void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7448 u8 addr_type, u8 status)
7450 struct mgmt_ev_connect_failed ev;
7452 /* The connection is still in hci_conn_hash so test for 1
7453 * instead of 0 to know if this is the last one.
7455 if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) {
7456 cancel_delayed_work(&hdev->power_off);
7457 queue_work(hdev->req_workqueue, &hdev->power_off.work);
7460 bacpy(&ev.addr.bdaddr, bdaddr);
7461 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7462 ev.status = mgmt_status(status);
7464 mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL);
7467 void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure)
7469 struct mgmt_ev_pin_code_request ev;
7471 bacpy(&ev.addr.bdaddr, bdaddr);
7472 ev.addr.type = BDADDR_BREDR;
7475 mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL);
7478 void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7481 struct mgmt_pending_cmd *cmd;
7483 cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev);
7487 cmd->cmd_complete(cmd, mgmt_status(status));
7488 mgmt_pending_remove(cmd);
7491 void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7494 struct mgmt_pending_cmd *cmd;
7496 cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev);
7500 cmd->cmd_complete(cmd, mgmt_status(status));
7501 mgmt_pending_remove(cmd);
7504 int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7505 u8 link_type, u8 addr_type, u32 value,
7508 struct mgmt_ev_user_confirm_request ev;
7510 BT_DBG("%s", hdev->name);
7512 bacpy(&ev.addr.bdaddr, bdaddr);
7513 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7514 ev.confirm_hint = confirm_hint;
7515 ev.value = cpu_to_le32(value);
7517 return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev),
7521 int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr,
7522 u8 link_type, u8 addr_type)
7524 struct mgmt_ev_user_passkey_request ev;
7526 BT_DBG("%s", hdev->name);
7528 bacpy(&ev.addr.bdaddr, bdaddr);
7529 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7531 return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev),
7535 static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7536 u8 link_type, u8 addr_type, u8 status,
7539 struct mgmt_pending_cmd *cmd;
7541 cmd = pending_find(opcode, hdev);
7545 cmd->cmd_complete(cmd, mgmt_status(status));
7546 mgmt_pending_remove(cmd);
7551 int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7552 u8 link_type, u8 addr_type, u8 status)
7554 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7555 status, MGMT_OP_USER_CONFIRM_REPLY);
7558 int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7559 u8 link_type, u8 addr_type, u8 status)
7561 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7563 MGMT_OP_USER_CONFIRM_NEG_REPLY);
7566 int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7567 u8 link_type, u8 addr_type, u8 status)
7569 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7570 status, MGMT_OP_USER_PASSKEY_REPLY);
7573 int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr,
7574 u8 link_type, u8 addr_type, u8 status)
7576 return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type,
7578 MGMT_OP_USER_PASSKEY_NEG_REPLY);
7581 int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr,
7582 u8 link_type, u8 addr_type, u32 passkey,
7585 struct mgmt_ev_passkey_notify ev;
7587 BT_DBG("%s", hdev->name);
7589 bacpy(&ev.addr.bdaddr, bdaddr);
7590 ev.addr.type = link_to_bdaddr(link_type, addr_type);
7591 ev.passkey = __cpu_to_le32(passkey);
7592 ev.entered = entered;
7594 return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL);
7597 void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status)
7599 struct mgmt_ev_auth_failed ev;
7600 struct mgmt_pending_cmd *cmd;
7601 u8 status = mgmt_status(hci_status);
7603 bacpy(&ev.addr.bdaddr, &conn->dst);
7604 ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type);
7607 cmd = find_pairing(conn);
7609 mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev),
7610 cmd ? cmd->sk : NULL);
7613 cmd->cmd_complete(cmd, status);
7614 mgmt_pending_remove(cmd);
7618 void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status)
7620 struct cmd_lookup match = { NULL, hdev };
7624 u8 mgmt_err = mgmt_status(status);
7625 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev,
7626 cmd_status_rsp, &mgmt_err);
7630 if (test_bit(HCI_AUTH, &hdev->flags))
7631 changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY);
7633 changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY);
7635 mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp,
7639 new_settings(hdev, match.sk);
7645 static void clear_eir(struct hci_request *req)
7647 struct hci_dev *hdev = req->hdev;
7648 struct hci_cp_write_eir cp;
7650 if (!lmp_ext_inq_capable(hdev))
7653 memset(hdev->eir, 0, sizeof(hdev->eir));
7655 memset(&cp, 0, sizeof(cp));
7657 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
7660 void mgmt_ssp_enable_complete(struct hci_dev *hdev, u8 enable, u8 status)
7662 struct cmd_lookup match = { NULL, hdev };
7663 struct hci_request req;
7664 bool changed = false;
7667 u8 mgmt_err = mgmt_status(status);
7669 if (enable && hci_dev_test_and_clear_flag(hdev,
7671 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7672 new_settings(hdev, NULL);
7675 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp,
7681 changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED);
7683 changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED);
7685 changed = hci_dev_test_and_clear_flag(hdev,
7688 hci_dev_clear_flag(hdev, HCI_HS_ENABLED);
7691 mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match);
7694 new_settings(hdev, match.sk);
7699 hci_req_init(&req, hdev);
7701 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) {
7702 if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS))
7703 hci_req_add(&req, HCI_OP_WRITE_SSP_DEBUG_MODE,
7704 sizeof(enable), &enable);
7705 __hci_req_update_eir(&req);
7710 hci_req_run(&req, NULL);
7713 static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data)
7715 struct cmd_lookup *match = data;
7717 if (match->sk == NULL) {
7718 match->sk = cmd->sk;
7719 sock_hold(match->sk);
7723 void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class,
7726 struct cmd_lookup match = { NULL, hdev, mgmt_status(status) };
7728 mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match);
7729 mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match);
7730 mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match);
7733 mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class,
7734 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL);
7735 ext_info_changed(hdev, NULL);
7742 void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status)
7744 struct mgmt_cp_set_local_name ev;
7745 struct mgmt_pending_cmd *cmd;
7750 memset(&ev, 0, sizeof(ev));
7751 memcpy(ev.name, name, HCI_MAX_NAME_LENGTH);
7752 memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH);
7754 cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev);
7756 memcpy(hdev->dev_name, name, sizeof(hdev->dev_name));
7758 /* If this is a HCI command related to powering on the
7759 * HCI dev don't send any mgmt signals.
7761 if (pending_find(MGMT_OP_SET_POWERED, hdev))
7765 mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev),
7766 HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL);
7767 ext_info_changed(hdev, cmd ? cmd->sk : NULL);
7770 static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16])
7774 for (i = 0; i < uuid_count; i++) {
7775 if (!memcmp(uuid, uuids[i], 16))
7782 static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16])
7786 while (parsed < eir_len) {
7787 u8 field_len = eir[0];
7794 if (eir_len - parsed < field_len + 1)
7798 case EIR_UUID16_ALL:
7799 case EIR_UUID16_SOME:
7800 for (i = 0; i + 3 <= field_len; i += 2) {
7801 memcpy(uuid, bluetooth_base_uuid, 16);
7802 uuid[13] = eir[i + 3];
7803 uuid[12] = eir[i + 2];
7804 if (has_uuid(uuid, uuid_count, uuids))
7808 case EIR_UUID32_ALL:
7809 case EIR_UUID32_SOME:
7810 for (i = 0; i + 5 <= field_len; i += 4) {
7811 memcpy(uuid, bluetooth_base_uuid, 16);
7812 uuid[15] = eir[i + 5];
7813 uuid[14] = eir[i + 4];
7814 uuid[13] = eir[i + 3];
7815 uuid[12] = eir[i + 2];
7816 if (has_uuid(uuid, uuid_count, uuids))
7820 case EIR_UUID128_ALL:
7821 case EIR_UUID128_SOME:
7822 for (i = 0; i + 17 <= field_len; i += 16) {
7823 memcpy(uuid, eir + i + 2, 16);
7824 if (has_uuid(uuid, uuid_count, uuids))
7830 parsed += field_len + 1;
7831 eir += field_len + 1;
7837 static void restart_le_scan(struct hci_dev *hdev)
7839 /* If controller is not scanning we are done. */
7840 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
7843 if (time_after(jiffies + DISCOV_LE_RESTART_DELAY,
7844 hdev->discovery.scan_start +
7845 hdev->discovery.scan_duration))
7848 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart,
7849 DISCOV_LE_RESTART_DELAY);
7852 static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir,
7853 u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7855 /* If a RSSI threshold has been specified, and
7856 * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with
7857 * a RSSI smaller than the RSSI threshold will be dropped. If the quirk
7858 * is set, let it through for further processing, as we might need to
7861 * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry,
7862 * the results are also dropped.
7864 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7865 (rssi == HCI_RSSI_INVALID ||
7866 (rssi < hdev->discovery.rssi &&
7867 !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks))))
7870 if (hdev->discovery.uuid_count != 0) {
7871 /* If a list of UUIDs is provided in filter, results with no
7872 * matching UUID should be dropped.
7874 if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count,
7875 hdev->discovery.uuids) &&
7876 !eir_has_uuids(scan_rsp, scan_rsp_len,
7877 hdev->discovery.uuid_count,
7878 hdev->discovery.uuids))
7882 /* If duplicate filtering does not report RSSI changes, then restart
7883 * scanning to ensure updated result with updated RSSI values.
7885 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) {
7886 restart_le_scan(hdev);
7888 /* Validate RSSI value against the RSSI threshold once more. */
7889 if (hdev->discovery.rssi != HCI_RSSI_INVALID &&
7890 rssi < hdev->discovery.rssi)
7897 void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7898 u8 addr_type, u8 *dev_class, s8 rssi, u32 flags,
7899 u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len)
7902 struct mgmt_ev_device_found *ev = (void *)buf;
7905 /* Don't send events for a non-kernel initiated discovery. With
7906 * LE one exception is if we have pend_le_reports > 0 in which
7907 * case we're doing passive scanning and want these events.
7909 if (!hci_discovery_active(hdev)) {
7910 if (link_type == ACL_LINK)
7912 if (link_type == LE_LINK && list_empty(&hdev->pend_le_reports))
7916 if (hdev->discovery.result_filtering) {
7917 /* We are using service discovery */
7918 if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp,
7923 if (hdev->discovery.limited) {
7924 /* Check for limited discoverable bit */
7926 if (!(dev_class[1] & 0x20))
7929 u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL);
7930 if (!flags || !(flags[0] & LE_AD_LIMITED))
7935 /* Make sure that the buffer is big enough. The 5 extra bytes
7936 * are for the potential CoD field.
7938 if (sizeof(*ev) + eir_len + scan_rsp_len + 5 > sizeof(buf))
7941 memset(buf, 0, sizeof(buf));
7943 /* In case of device discovery with BR/EDR devices (pre 1.2), the
7944 * RSSI value was reported as 0 when not available. This behavior
7945 * is kept when using device discovery. This is required for full
7946 * backwards compatibility with the API.
7948 * However when using service discovery, the value 127 will be
7949 * returned when the RSSI is not available.
7951 if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi &&
7952 link_type == ACL_LINK)
7955 bacpy(&ev->addr.bdaddr, bdaddr);
7956 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7958 ev->flags = cpu_to_le32(flags);
7961 /* Copy EIR or advertising data into event */
7962 memcpy(ev->eir, eir, eir_len);
7964 if (dev_class && !eir_get_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7966 eir_len = eir_append_data(ev->eir, eir_len, EIR_CLASS_OF_DEV,
7969 if (scan_rsp_len > 0)
7970 /* Append scan response data to event */
7971 memcpy(ev->eir + eir_len, scan_rsp, scan_rsp_len);
7973 ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len);
7974 ev_size = sizeof(*ev) + eir_len + scan_rsp_len;
7976 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, ev_size, NULL);
7979 void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
7980 u8 addr_type, s8 rssi, u8 *name, u8 name_len)
7982 struct mgmt_ev_device_found *ev;
7983 char buf[sizeof(*ev) + HCI_MAX_NAME_LENGTH + 2];
7986 ev = (struct mgmt_ev_device_found *) buf;
7988 memset(buf, 0, sizeof(buf));
7990 bacpy(&ev->addr.bdaddr, bdaddr);
7991 ev->addr.type = link_to_bdaddr(link_type, addr_type);
7994 eir_len = eir_append_data(ev->eir, 0, EIR_NAME_COMPLETE, name,
7997 ev->eir_len = cpu_to_le16(eir_len);
7999 mgmt_event(MGMT_EV_DEVICE_FOUND, hdev, ev, sizeof(*ev) + eir_len, NULL);
8002 void mgmt_discovering(struct hci_dev *hdev, u8 discovering)
8004 struct mgmt_ev_discovering ev;
8006 BT_DBG("%s discovering %u", hdev->name, discovering);
8008 memset(&ev, 0, sizeof(ev));
8009 ev.type = hdev->discovery.type;
8010 ev.discovering = discovering;
8012 mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL);
8015 static struct hci_mgmt_chan chan = {
8016 .channel = HCI_CHANNEL_CONTROL,
8017 .handler_count = ARRAY_SIZE(mgmt_handlers),
8018 .handlers = mgmt_handlers,
8019 .hdev_init = mgmt_init_hdev,
8024 return hci_mgmt_chan_register(&chan);
8027 void mgmt_exit(void)
8029 hci_mgmt_chan_unregister(&chan);