2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI event handling. */
27 #include <asm/unaligned.h>
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
37 /* Handle HCI Event packets */
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
41 __u8 status = *((__u8 *) skb->data);
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_conn_check_pending(hdev);
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
61 __u8 status = *((__u8 *) skb->data);
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
73 __u8 status = *((__u8 *) skb->data);
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
82 hci_conn_check_pending(hdev);
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 BT_DBG("%s", hdev->name);
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
105 conn->role = rp->role;
107 hci_dev_unlock(hdev);
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
124 conn->link_policy = __le16_to_cpu(rp->policy);
126 hci_dev_unlock(hdev);
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
148 conn->link_policy = get_unaligned_le16(sent + 2);
150 hci_dev_unlock(hdev);
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163 hdev->link_policy = __le16_to_cpu(rp->policy);
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
169 __u8 status = *((__u8 *) skb->data);
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
181 hdev->link_policy = get_unaligned_le16(sent);
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 __u8 status = *((__u8 *) skb->data);
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
190 clear_bit(HCI_RESET, &hdev->flags);
192 /* Reset all non-persistent flags */
193 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
195 hdev->discovery.state = DISCOVERY_STOPPED;
196 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
197 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
199 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
200 hdev->adv_data_len = 0;
202 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
203 hdev->scan_rsp_data_len = 0;
205 hdev->le_scan_type = LE_SCAN_PASSIVE;
207 hdev->ssp_debug_mode = 0;
210 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
212 __u8 status = *((__u8 *) skb->data);
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
217 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (test_bit(HCI_MGMT, &hdev->dev_flags))
224 mgmt_set_local_name_complete(hdev, sent, status);
226 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
228 hci_dev_unlock(hdev);
231 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
233 struct hci_rp_read_local_name *rp = (void *) skb->data;
235 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
240 if (test_bit(HCI_SETUP, &hdev->dev_flags))
241 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
244 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
246 __u8 status = *((__u8 *) skb->data);
249 BT_DBG("%s status 0x%2.2x", hdev->name, status);
251 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
256 __u8 param = *((__u8 *) sent);
258 if (param == AUTH_ENABLED)
259 set_bit(HCI_AUTH, &hdev->flags);
261 clear_bit(HCI_AUTH, &hdev->flags);
264 if (test_bit(HCI_MGMT, &hdev->dev_flags))
265 mgmt_auth_enable_complete(hdev, status);
268 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
270 __u8 status = *((__u8 *) skb->data);
274 BT_DBG("%s status 0x%2.2x", hdev->name, status);
279 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
283 param = *((__u8 *) sent);
286 set_bit(HCI_ENCRYPT, &hdev->flags);
288 clear_bit(HCI_ENCRYPT, &hdev->flags);
291 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
293 __u8 status = *((__u8 *) skb->data);
297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
299 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
303 param = *((__u8 *) sent);
308 hdev->discov_timeout = 0;
312 if (param & SCAN_INQUIRY)
313 set_bit(HCI_ISCAN, &hdev->flags);
315 clear_bit(HCI_ISCAN, &hdev->flags);
317 if (param & SCAN_PAGE)
318 set_bit(HCI_PSCAN, &hdev->flags);
320 clear_bit(HCI_PSCAN, &hdev->flags);
323 hci_dev_unlock(hdev);
326 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
328 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
330 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
335 memcpy(hdev->dev_class, rp->dev_class, 3);
337 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
338 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
341 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
343 __u8 status = *((__u8 *) skb->data);
346 BT_DBG("%s status 0x%2.2x", hdev->name, status);
348 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
355 memcpy(hdev->dev_class, sent, 3);
357 if (test_bit(HCI_MGMT, &hdev->dev_flags))
358 mgmt_set_class_of_dev_complete(hdev, sent, status);
360 hci_dev_unlock(hdev);
363 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
365 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
368 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
373 setting = __le16_to_cpu(rp->voice_setting);
375 if (hdev->voice_setting == setting)
378 hdev->voice_setting = setting;
380 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
383 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
386 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
389 __u8 status = *((__u8 *) skb->data);
393 BT_DBG("%s status 0x%2.2x", hdev->name, status);
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
402 setting = get_unaligned_le16(sent);
404 if (hdev->voice_setting == setting)
407 hdev->voice_setting = setting;
409 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
412 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
415 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
418 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
420 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
425 hdev->num_iac = rp->num_iac;
427 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
430 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
432 __u8 status = *((__u8 *) skb->data);
433 struct hci_cp_write_ssp_mode *sent;
435 BT_DBG("%s status 0x%2.2x", hdev->name, status);
437 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
443 hdev->features[1][0] |= LMP_HOST_SSP;
445 hdev->features[1][0] &= ~LMP_HOST_SSP;
448 if (test_bit(HCI_MGMT, &hdev->dev_flags))
449 mgmt_ssp_enable_complete(hdev, sent->mode, status);
452 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
454 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
460 u8 status = *((u8 *) skb->data);
461 struct hci_cp_write_sc_support *sent;
463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
465 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
471 hdev->features[1][0] |= LMP_HOST_SC;
473 hdev->features[1][0] &= ~LMP_HOST_SC;
476 if (test_bit(HCI_MGMT, &hdev->dev_flags))
477 mgmt_sc_enable_complete(hdev, sent->support, status);
480 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
482 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
486 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
488 struct hci_rp_read_local_version *rp = (void *) skb->data;
490 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
495 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
496 hdev->hci_ver = rp->hci_ver;
497 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
498 hdev->lmp_ver = rp->lmp_ver;
499 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
500 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
504 static void hci_cc_read_local_commands(struct hci_dev *hdev,
507 struct hci_rp_read_local_commands *rp = (void *) skb->data;
509 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
514 if (test_bit(HCI_SETUP, &hdev->dev_flags))
515 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
518 static void hci_cc_read_local_features(struct hci_dev *hdev,
521 struct hci_rp_read_local_features *rp = (void *) skb->data;
523 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
528 memcpy(hdev->features, rp->features, 8);
530 /* Adjust default settings according to features
531 * supported by device. */
533 if (hdev->features[0][0] & LMP_3SLOT)
534 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
536 if (hdev->features[0][0] & LMP_5SLOT)
537 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
539 if (hdev->features[0][1] & LMP_HV2) {
540 hdev->pkt_type |= (HCI_HV2);
541 hdev->esco_type |= (ESCO_HV2);
544 if (hdev->features[0][1] & LMP_HV3) {
545 hdev->pkt_type |= (HCI_HV3);
546 hdev->esco_type |= (ESCO_HV3);
549 if (lmp_esco_capable(hdev))
550 hdev->esco_type |= (ESCO_EV3);
552 if (hdev->features[0][4] & LMP_EV4)
553 hdev->esco_type |= (ESCO_EV4);
555 if (hdev->features[0][4] & LMP_EV5)
556 hdev->esco_type |= (ESCO_EV5);
558 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
559 hdev->esco_type |= (ESCO_2EV3);
561 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
562 hdev->esco_type |= (ESCO_3EV3);
564 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
565 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
568 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
571 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
573 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
578 if (hdev->max_page < rp->max_page)
579 hdev->max_page = rp->max_page;
581 if (rp->page < HCI_MAX_PAGES)
582 memcpy(hdev->features[rp->page], rp->features, 8);
585 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
588 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
590 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
595 hdev->flow_ctl_mode = rp->mode;
598 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
600 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
602 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
608 hdev->sco_mtu = rp->sco_mtu;
609 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
610 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
612 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
617 hdev->acl_cnt = hdev->acl_pkts;
618 hdev->sco_cnt = hdev->sco_pkts;
620 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
621 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
624 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
626 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633 if (test_bit(HCI_INIT, &hdev->flags))
634 bacpy(&hdev->bdaddr, &rp->bdaddr);
636 if (test_bit(HCI_SETUP, &hdev->dev_flags))
637 bacpy(&hdev->setup_addr, &rp->bdaddr);
640 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
643 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
650 if (test_bit(HCI_INIT, &hdev->flags)) {
651 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
652 hdev->page_scan_window = __le16_to_cpu(rp->window);
656 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
659 u8 status = *((u8 *) skb->data);
660 struct hci_cp_write_page_scan_activity *sent;
662 BT_DBG("%s status 0x%2.2x", hdev->name, status);
667 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
671 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
672 hdev->page_scan_window = __le16_to_cpu(sent->window);
675 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
678 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
680 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
685 if (test_bit(HCI_INIT, &hdev->flags))
686 hdev->page_scan_type = rp->type;
689 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
692 u8 status = *((u8 *) skb->data);
695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
700 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
702 hdev->page_scan_type = *type;
705 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
708 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
710 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
715 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
716 hdev->block_len = __le16_to_cpu(rp->block_len);
717 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
719 hdev->block_cnt = hdev->num_blocks;
721 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
722 hdev->block_cnt, hdev->block_len);
725 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
727 struct hci_rp_read_clock *rp = (void *) skb->data;
728 struct hci_cp_read_clock *cp;
729 struct hci_conn *conn;
731 BT_DBG("%s", hdev->name);
733 if (skb->len < sizeof(*rp))
741 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
745 if (cp->which == 0x00) {
746 hdev->clock = le32_to_cpu(rp->clock);
750 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
752 conn->clock = le32_to_cpu(rp->clock);
753 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
757 hci_dev_unlock(hdev);
760 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
763 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
765 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
770 hdev->amp_status = rp->amp_status;
771 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
772 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
773 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
774 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
775 hdev->amp_type = rp->amp_type;
776 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
777 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
778 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
779 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
782 a2mp_send_getinfo_rsp(hdev);
785 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
788 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
789 struct amp_assoc *assoc = &hdev->loc_assoc;
790 size_t rem_len, frag_len;
792 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
797 frag_len = skb->len - sizeof(*rp);
798 rem_len = __le16_to_cpu(rp->rem_len);
800 if (rem_len > frag_len) {
801 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
803 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
804 assoc->offset += frag_len;
806 /* Read other fragments */
807 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
812 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
813 assoc->len = assoc->offset + rem_len;
817 /* Send A2MP Rsp when all fragments are received */
818 a2mp_send_getampassoc_rsp(hdev, rp->status);
819 a2mp_send_create_phy_link_req(hdev, rp->status);
822 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
825 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832 hdev->inq_tx_power = rp->tx_power;
835 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
837 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
838 struct hci_cp_pin_code_reply *cp;
839 struct hci_conn *conn;
841 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
845 if (test_bit(HCI_MGMT, &hdev->dev_flags))
846 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
851 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
855 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
857 conn->pin_length = cp->pin_len;
860 hci_dev_unlock(hdev);
863 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
865 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
871 if (test_bit(HCI_MGMT, &hdev->dev_flags))
872 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
875 hci_dev_unlock(hdev);
878 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
881 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
883 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
888 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
889 hdev->le_pkts = rp->le_max_pkt;
891 hdev->le_cnt = hdev->le_pkts;
893 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
896 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
899 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
901 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
906 memcpy(hdev->le_features, rp->features, 8);
909 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
912 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
914 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
919 hdev->adv_tx_power = rp->tx_power;
922 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
924 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
926 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
930 if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
934 hci_dev_unlock(hdev);
937 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
940 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
946 if (test_bit(HCI_MGMT, &hdev->dev_flags))
947 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
948 ACL_LINK, 0, rp->status);
950 hci_dev_unlock(hdev);
953 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
955 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
957 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
961 if (test_bit(HCI_MGMT, &hdev->dev_flags))
962 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
965 hci_dev_unlock(hdev);
968 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
971 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
973 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
977 if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
979 ACL_LINK, 0, rp->status);
981 hci_dev_unlock(hdev);
984 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
987 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
989 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
992 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
993 NULL, NULL, rp->status);
994 hci_dev_unlock(hdev);
997 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1000 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1002 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1005 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1006 rp->hash256, rp->randomizer256,
1008 hci_dev_unlock(hdev);
1012 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1014 __u8 status = *((__u8 *) skb->data);
1017 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1022 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1028 bacpy(&hdev->random_addr, sent);
1030 hci_dev_unlock(hdev);
1033 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1035 __u8 *sent, status = *((__u8 *) skb->data);
1037 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1042 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1048 /* If we're doing connection initation as peripheral. Set a
1049 * timeout in case something goes wrong.
1052 struct hci_conn *conn;
1054 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1056 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1058 queue_delayed_work(hdev->workqueue,
1059 &conn->le_conn_timeout,
1060 conn->conn_timeout);
1062 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1065 hci_dev_unlock(hdev);
1068 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1070 struct hci_cp_le_set_scan_param *cp;
1071 __u8 status = *((__u8 *) skb->data);
1073 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1084 hdev->le_scan_type = cp->type;
1086 hci_dev_unlock(hdev);
1089 static bool has_pending_adv_report(struct hci_dev *hdev)
1091 struct discovery_state *d = &hdev->discovery;
1093 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1096 static void clear_pending_adv_report(struct hci_dev *hdev)
1098 struct discovery_state *d = &hdev->discovery;
1100 bacpy(&d->last_adv_addr, BDADDR_ANY);
1101 d->last_adv_data_len = 0;
1104 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1105 u8 bdaddr_type, s8 rssi, u32 flags,
1108 struct discovery_state *d = &hdev->discovery;
1110 bacpy(&d->last_adv_addr, bdaddr);
1111 d->last_adv_addr_type = bdaddr_type;
1112 d->last_adv_rssi = rssi;
1113 d->last_adv_flags = flags;
1114 memcpy(d->last_adv_data, data, len);
1115 d->last_adv_data_len = len;
1118 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1119 struct sk_buff *skb)
1121 struct hci_cp_le_set_scan_enable *cp;
1122 __u8 status = *((__u8 *) skb->data);
1124 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1129 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1133 switch (cp->enable) {
1134 case LE_SCAN_ENABLE:
1135 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1136 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1137 clear_pending_adv_report(hdev);
1140 case LE_SCAN_DISABLE:
1141 /* We do this here instead of when setting DISCOVERY_STOPPED
1142 * since the latter would potentially require waiting for
1143 * inquiry to stop too.
1145 if (has_pending_adv_report(hdev)) {
1146 struct discovery_state *d = &hdev->discovery;
1148 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1149 d->last_adv_addr_type, NULL,
1150 d->last_adv_rssi, d->last_adv_flags,
1152 d->last_adv_data_len, NULL, 0);
1155 /* Cancel this timer so that we don't try to disable scanning
1156 * when it's already disabled.
1158 cancel_delayed_work(&hdev->le_scan_disable);
1160 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1162 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1163 * interrupted scanning due to a connect request. Mark
1164 * therefore discovery as stopped. If this was not
1165 * because of a connect request advertising might have
1166 * been disabled because of active scanning, so
1167 * re-enable it again if necessary.
1169 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1172 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1173 hdev->discovery.state == DISCOVERY_FINDING)
1174 mgmt_reenable_advertising(hdev);
1179 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1184 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1185 struct sk_buff *skb)
1187 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1189 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1194 hdev->le_white_list_size = rp->size;
1197 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1198 struct sk_buff *skb)
1200 __u8 status = *((__u8 *) skb->data);
1202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1207 hci_bdaddr_list_clear(&hdev->le_white_list);
1210 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1211 struct sk_buff *skb)
1213 struct hci_cp_le_add_to_white_list *sent;
1214 __u8 status = *((__u8 *) skb->data);
1216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1221 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1225 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1229 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1230 struct sk_buff *skb)
1232 struct hci_cp_le_del_from_white_list *sent;
1233 __u8 status = *((__u8 *) skb->data);
1235 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1240 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1244 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1248 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1249 struct sk_buff *skb)
1251 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1258 memcpy(hdev->le_states, rp->le_states, 8);
1261 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1262 struct sk_buff *skb)
1264 struct hci_cp_write_le_host_supported *sent;
1265 __u8 status = *((__u8 *) skb->data);
1267 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1277 hdev->features[1][0] |= LMP_HOST_LE;
1278 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1280 hdev->features[1][0] &= ~LMP_HOST_LE;
1281 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1282 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1286 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1288 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1291 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1293 struct hci_cp_le_set_adv_param *cp;
1294 u8 status = *((u8 *) skb->data);
1296 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1301 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1306 hdev->adv_addr_type = cp->own_address_type;
1307 hci_dev_unlock(hdev);
1310 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1311 struct sk_buff *skb)
1313 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1315 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1316 hdev->name, rp->status, rp->phy_handle);
1321 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1324 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1326 struct hci_rp_read_rssi *rp = (void *) skb->data;
1327 struct hci_conn *conn;
1329 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1336 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1338 conn->rssi = rp->rssi;
1340 hci_dev_unlock(hdev);
1343 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1345 struct hci_cp_read_tx_power *sent;
1346 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1347 struct hci_conn *conn;
1349 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1354 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1360 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1364 switch (sent->type) {
1366 conn->tx_power = rp->tx_power;
1369 conn->max_tx_power = rp->tx_power;
1374 hci_dev_unlock(hdev);
1377 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1379 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1382 hci_conn_check_pending(hdev);
1386 set_bit(HCI_INQUIRY, &hdev->flags);
1389 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1391 struct hci_cp_create_conn *cp;
1392 struct hci_conn *conn;
1394 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1396 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1402 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1404 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1407 if (conn && conn->state == BT_CONNECT) {
1408 if (status != 0x0c || conn->attempt > 2) {
1409 conn->state = BT_CLOSED;
1410 hci_proto_connect_cfm(conn, status);
1413 conn->state = BT_CONNECT2;
1417 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1420 BT_ERR("No memory for new connection");
1424 hci_dev_unlock(hdev);
1427 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1429 struct hci_cp_add_sco *cp;
1430 struct hci_conn *acl, *sco;
1433 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1438 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1442 handle = __le16_to_cpu(cp->handle);
1444 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1448 acl = hci_conn_hash_lookup_handle(hdev, handle);
1452 sco->state = BT_CLOSED;
1454 hci_proto_connect_cfm(sco, status);
1459 hci_dev_unlock(hdev);
1462 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1464 struct hci_cp_auth_requested *cp;
1465 struct hci_conn *conn;
1467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1472 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1478 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1480 if (conn->state == BT_CONFIG) {
1481 hci_proto_connect_cfm(conn, status);
1482 hci_conn_drop(conn);
1486 hci_dev_unlock(hdev);
1489 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1491 struct hci_cp_set_conn_encrypt *cp;
1492 struct hci_conn *conn;
1494 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1499 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1507 if (conn->state == BT_CONFIG) {
1508 hci_proto_connect_cfm(conn, status);
1509 hci_conn_drop(conn);
1513 hci_dev_unlock(hdev);
1516 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1517 struct hci_conn *conn)
1519 if (conn->state != BT_CONFIG || !conn->out)
1522 if (conn->pending_sec_level == BT_SECURITY_SDP)
1525 /* Only request authentication for SSP connections or non-SSP
1526 * devices with sec_level MEDIUM or HIGH or if MITM protection
1529 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1530 conn->pending_sec_level != BT_SECURITY_FIPS &&
1531 conn->pending_sec_level != BT_SECURITY_HIGH &&
1532 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1538 static int hci_resolve_name(struct hci_dev *hdev,
1539 struct inquiry_entry *e)
1541 struct hci_cp_remote_name_req cp;
1543 memset(&cp, 0, sizeof(cp));
1545 bacpy(&cp.bdaddr, &e->data.bdaddr);
1546 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1547 cp.pscan_mode = e->data.pscan_mode;
1548 cp.clock_offset = e->data.clock_offset;
1550 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1553 static bool hci_resolve_next_name(struct hci_dev *hdev)
1555 struct discovery_state *discov = &hdev->discovery;
1556 struct inquiry_entry *e;
1558 if (list_empty(&discov->resolve))
1561 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1565 if (hci_resolve_name(hdev, e) == 0) {
1566 e->name_state = NAME_PENDING;
1573 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1574 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1576 struct discovery_state *discov = &hdev->discovery;
1577 struct inquiry_entry *e;
1579 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1580 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1581 name_len, conn->dev_class);
1583 if (discov->state == DISCOVERY_STOPPED)
1586 if (discov->state == DISCOVERY_STOPPING)
1587 goto discov_complete;
1589 if (discov->state != DISCOVERY_RESOLVING)
1592 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1593 /* If the device was not found in a list of found devices names of which
1594 * are pending. there is no need to continue resolving a next name as it
1595 * will be done upon receiving another Remote Name Request Complete
1602 e->name_state = NAME_KNOWN;
1603 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1604 e->data.rssi, name, name_len);
1606 e->name_state = NAME_NOT_KNOWN;
1609 if (hci_resolve_next_name(hdev))
1613 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1616 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1618 struct hci_cp_remote_name_req *cp;
1619 struct hci_conn *conn;
1621 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1623 /* If successful wait for the name req complete event before
1624 * checking for the need to do authentication */
1628 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1636 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1637 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1642 if (!hci_outgoing_auth_needed(hdev, conn))
1645 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1646 struct hci_cp_auth_requested auth_cp;
1648 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1650 auth_cp.handle = __cpu_to_le16(conn->handle);
1651 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1652 sizeof(auth_cp), &auth_cp);
1656 hci_dev_unlock(hdev);
1659 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1661 struct hci_cp_read_remote_features *cp;
1662 struct hci_conn *conn;
1664 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1669 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1675 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1677 if (conn->state == BT_CONFIG) {
1678 hci_proto_connect_cfm(conn, status);
1679 hci_conn_drop(conn);
1683 hci_dev_unlock(hdev);
1686 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1688 struct hci_cp_read_remote_ext_features *cp;
1689 struct hci_conn *conn;
1691 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1696 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1702 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1704 if (conn->state == BT_CONFIG) {
1705 hci_proto_connect_cfm(conn, status);
1706 hci_conn_drop(conn);
1710 hci_dev_unlock(hdev);
1713 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1715 struct hci_cp_setup_sync_conn *cp;
1716 struct hci_conn *acl, *sco;
1719 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1724 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1728 handle = __le16_to_cpu(cp->handle);
1730 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1734 acl = hci_conn_hash_lookup_handle(hdev, handle);
1738 sco->state = BT_CLOSED;
1740 hci_proto_connect_cfm(sco, status);
1745 hci_dev_unlock(hdev);
1748 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1750 struct hci_cp_sniff_mode *cp;
1751 struct hci_conn *conn;
1753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1758 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1764 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1766 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1768 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1769 hci_sco_setup(conn, status);
1772 hci_dev_unlock(hdev);
1775 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1777 struct hci_cp_exit_sniff_mode *cp;
1778 struct hci_conn *conn;
1780 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1785 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1791 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1793 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1795 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1796 hci_sco_setup(conn, status);
1799 hci_dev_unlock(hdev);
1802 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1804 struct hci_cp_disconnect *cp;
1805 struct hci_conn *conn;
1810 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1816 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1818 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1819 conn->dst_type, status);
1821 hci_dev_unlock(hdev);
1824 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1826 struct hci_cp_create_phy_link *cp;
1828 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1830 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1837 struct hci_conn *hcon;
1839 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1843 amp_write_remote_assoc(hdev, cp->phy_handle);
1846 hci_dev_unlock(hdev);
1849 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1851 struct hci_cp_accept_phy_link *cp;
1853 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1858 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1862 amp_write_remote_assoc(hdev, cp->phy_handle);
1865 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1867 struct hci_cp_le_create_conn *cp;
1868 struct hci_conn *conn;
1870 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1872 /* All connection failure handling is taken care of by the
1873 * hci_le_conn_failed function which is triggered by the HCI
1874 * request completion callbacks used for connecting.
1879 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1885 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1889 /* Store the initiator and responder address information which
1890 * is needed for SMP. These values will not change during the
1891 * lifetime of the connection.
1893 conn->init_addr_type = cp->own_address_type;
1894 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1895 bacpy(&conn->init_addr, &hdev->random_addr);
1897 bacpy(&conn->init_addr, &hdev->bdaddr);
1899 conn->resp_addr_type = cp->peer_addr_type;
1900 bacpy(&conn->resp_addr, &cp->peer_addr);
1902 /* We don't want the connection attempt to stick around
1903 * indefinitely since LE doesn't have a page timeout concept
1904 * like BR/EDR. Set a timer for any connection that doesn't use
1905 * the white list for connecting.
1907 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1908 queue_delayed_work(conn->hdev->workqueue,
1909 &conn->le_conn_timeout,
1910 conn->conn_timeout);
1913 hci_dev_unlock(hdev);
1916 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1918 struct hci_cp_le_start_enc *cp;
1919 struct hci_conn *conn;
1921 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1928 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1932 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1936 if (conn->state != BT_CONNECTED)
1939 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1940 hci_conn_drop(conn);
1943 hci_dev_unlock(hdev);
1946 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1948 __u8 status = *((__u8 *) skb->data);
1949 struct discovery_state *discov = &hdev->discovery;
1950 struct inquiry_entry *e;
1952 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1954 hci_conn_check_pending(hdev);
1956 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1959 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1960 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1962 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1967 if (discov->state != DISCOVERY_FINDING)
1970 if (list_empty(&discov->resolve)) {
1971 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1975 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1976 if (e && hci_resolve_name(hdev, e) == 0) {
1977 e->name_state = NAME_PENDING;
1978 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1980 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1984 hci_dev_unlock(hdev);
1987 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1989 struct inquiry_data data;
1990 struct inquiry_info *info = (void *) (skb->data + 1);
1991 int num_rsp = *((__u8 *) skb->data);
1993 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1998 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2003 for (; num_rsp; num_rsp--, info++) {
2006 bacpy(&data.bdaddr, &info->bdaddr);
2007 data.pscan_rep_mode = info->pscan_rep_mode;
2008 data.pscan_period_mode = info->pscan_period_mode;
2009 data.pscan_mode = info->pscan_mode;
2010 memcpy(data.dev_class, info->dev_class, 3);
2011 data.clock_offset = info->clock_offset;
2013 data.ssp_mode = 0x00;
2015 flags = hci_inquiry_cache_update(hdev, &data, false);
2017 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2018 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2021 hci_dev_unlock(hdev);
2024 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2026 struct hci_ev_conn_complete *ev = (void *) skb->data;
2027 struct hci_conn *conn;
2029 BT_DBG("%s", hdev->name);
2033 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2035 if (ev->link_type != SCO_LINK)
2038 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2042 conn->type = SCO_LINK;
2046 conn->handle = __le16_to_cpu(ev->handle);
2048 if (conn->type == ACL_LINK) {
2049 conn->state = BT_CONFIG;
2050 hci_conn_hold(conn);
2052 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2053 !hci_find_link_key(hdev, &ev->bdaddr))
2054 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2056 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2058 conn->state = BT_CONNECTED;
2060 hci_conn_add_sysfs(conn);
2062 if (test_bit(HCI_AUTH, &hdev->flags))
2063 set_bit(HCI_CONN_AUTH, &conn->flags);
2065 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2066 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2068 /* Get remote features */
2069 if (conn->type == ACL_LINK) {
2070 struct hci_cp_read_remote_features cp;
2071 cp.handle = ev->handle;
2072 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2075 hci_update_page_scan(hdev, NULL);
2078 /* Set packet type for incoming connection */
2079 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2080 struct hci_cp_change_conn_ptype cp;
2081 cp.handle = ev->handle;
2082 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2083 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2087 conn->state = BT_CLOSED;
2088 if (conn->type == ACL_LINK)
2089 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2090 conn->dst_type, ev->status);
2093 if (conn->type == ACL_LINK)
2094 hci_sco_setup(conn, ev->status);
2097 hci_proto_connect_cfm(conn, ev->status);
2099 } else if (ev->link_type != ACL_LINK)
2100 hci_proto_connect_cfm(conn, ev->status);
2103 hci_dev_unlock(hdev);
2105 hci_conn_check_pending(hdev);
2108 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2110 struct hci_cp_reject_conn_req cp;
2112 bacpy(&cp.bdaddr, bdaddr);
2113 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2114 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2117 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2119 struct hci_ev_conn_request *ev = (void *) skb->data;
2120 int mask = hdev->link_mode;
2121 struct inquiry_entry *ie;
2122 struct hci_conn *conn;
2125 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2128 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2131 if (!(mask & HCI_LM_ACCEPT)) {
2132 hci_reject_conn(hdev, &ev->bdaddr);
2136 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2138 hci_reject_conn(hdev, &ev->bdaddr);
2142 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2143 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2145 hci_reject_conn(hdev, &ev->bdaddr);
2149 /* Connection accepted */
2153 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2155 memcpy(ie->data.dev_class, ev->dev_class, 3);
2157 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2160 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2163 BT_ERR("No memory for new connection");
2164 hci_dev_unlock(hdev);
2169 memcpy(conn->dev_class, ev->dev_class, 3);
2171 hci_dev_unlock(hdev);
2173 if (ev->link_type == ACL_LINK ||
2174 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2175 struct hci_cp_accept_conn_req cp;
2176 conn->state = BT_CONNECT;
2178 bacpy(&cp.bdaddr, &ev->bdaddr);
2180 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2181 cp.role = 0x00; /* Become master */
2183 cp.role = 0x01; /* Remain slave */
2185 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2186 } else if (!(flags & HCI_PROTO_DEFER)) {
2187 struct hci_cp_accept_sync_conn_req cp;
2188 conn->state = BT_CONNECT;
2190 bacpy(&cp.bdaddr, &ev->bdaddr);
2191 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2193 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2194 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2195 cp.max_latency = cpu_to_le16(0xffff);
2196 cp.content_format = cpu_to_le16(hdev->voice_setting);
2197 cp.retrans_effort = 0xff;
2199 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2202 conn->state = BT_CONNECT2;
2203 hci_proto_connect_cfm(conn, 0);
2207 static u8 hci_to_mgmt_reason(u8 err)
2210 case HCI_ERROR_CONNECTION_TIMEOUT:
2211 return MGMT_DEV_DISCONN_TIMEOUT;
2212 case HCI_ERROR_REMOTE_USER_TERM:
2213 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2214 case HCI_ERROR_REMOTE_POWER_OFF:
2215 return MGMT_DEV_DISCONN_REMOTE;
2216 case HCI_ERROR_LOCAL_HOST_TERM:
2217 return MGMT_DEV_DISCONN_LOCAL_HOST;
2219 return MGMT_DEV_DISCONN_UNKNOWN;
2223 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2225 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2226 u8 reason = hci_to_mgmt_reason(ev->reason);
2227 struct hci_conn_params *params;
2228 struct hci_conn *conn;
2229 bool mgmt_connected;
2232 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2236 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2241 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2242 conn->dst_type, ev->status);
2246 conn->state = BT_CLOSED;
2248 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2249 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2250 reason, mgmt_connected);
2252 if (conn->type == ACL_LINK) {
2253 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2254 hci_remove_link_key(hdev, &conn->dst);
2256 hci_update_page_scan(hdev, NULL);
2259 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2261 switch (params->auto_connect) {
2262 case HCI_AUTO_CONN_LINK_LOSS:
2263 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2267 case HCI_AUTO_CONN_DIRECT:
2268 case HCI_AUTO_CONN_ALWAYS:
2269 list_del_init(¶ms->action);
2270 list_add(¶ms->action, &hdev->pend_le_conns);
2271 hci_update_background_scan(hdev);
2281 hci_proto_disconn_cfm(conn, ev->reason);
2284 /* Re-enable advertising if necessary, since it might
2285 * have been disabled by the connection. From the
2286 * HCI_LE_Set_Advertise_Enable command description in
2287 * the core specification (v4.0):
2288 * "The Controller shall continue advertising until the Host
2289 * issues an LE_Set_Advertise_Enable command with
2290 * Advertising_Enable set to 0x00 (Advertising is disabled)
2291 * or until a connection is created or until the Advertising
2292 * is timed out due to Directed Advertising."
2294 if (type == LE_LINK)
2295 mgmt_reenable_advertising(hdev);
2298 hci_dev_unlock(hdev);
2301 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2303 struct hci_ev_auth_complete *ev = (void *) skb->data;
2304 struct hci_conn *conn;
2306 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2310 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2315 if (!hci_conn_ssp_enabled(conn) &&
2316 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2317 BT_INFO("re-auth of legacy device is not possible.");
2319 set_bit(HCI_CONN_AUTH, &conn->flags);
2320 conn->sec_level = conn->pending_sec_level;
2323 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2327 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2328 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2330 if (conn->state == BT_CONFIG) {
2331 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2332 struct hci_cp_set_conn_encrypt cp;
2333 cp.handle = ev->handle;
2335 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2338 conn->state = BT_CONNECTED;
2339 hci_proto_connect_cfm(conn, ev->status);
2340 hci_conn_drop(conn);
2343 hci_auth_cfm(conn, ev->status);
2345 hci_conn_hold(conn);
2346 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2347 hci_conn_drop(conn);
2350 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2352 struct hci_cp_set_conn_encrypt cp;
2353 cp.handle = ev->handle;
2355 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2358 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2359 hci_encrypt_cfm(conn, ev->status, 0x00);
2364 hci_dev_unlock(hdev);
2367 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2369 struct hci_ev_remote_name *ev = (void *) skb->data;
2370 struct hci_conn *conn;
2372 BT_DBG("%s", hdev->name);
2374 hci_conn_check_pending(hdev);
2378 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2380 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2383 if (ev->status == 0)
2384 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2385 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2387 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2393 if (!hci_outgoing_auth_needed(hdev, conn))
2396 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2397 struct hci_cp_auth_requested cp;
2399 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2401 cp.handle = __cpu_to_le16(conn->handle);
2402 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2406 hci_dev_unlock(hdev);
2409 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2411 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2412 struct hci_conn *conn;
2414 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2418 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2424 /* Encryption implies authentication */
2425 set_bit(HCI_CONN_AUTH, &conn->flags);
2426 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2427 conn->sec_level = conn->pending_sec_level;
2429 /* P-256 authentication key implies FIPS */
2430 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2431 set_bit(HCI_CONN_FIPS, &conn->flags);
2433 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2434 conn->type == LE_LINK)
2435 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2437 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2438 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2442 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2444 if (ev->status && conn->state == BT_CONNECTED) {
2445 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2446 hci_conn_drop(conn);
2450 if (conn->state == BT_CONFIG) {
2452 conn->state = BT_CONNECTED;
2454 /* In Secure Connections Only mode, do not allow any
2455 * connections that are not encrypted with AES-CCM
2456 * using a P-256 authenticated combination key.
2458 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2459 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2460 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2461 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2462 hci_conn_drop(conn);
2466 hci_proto_connect_cfm(conn, ev->status);
2467 hci_conn_drop(conn);
2469 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2472 hci_dev_unlock(hdev);
2475 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2476 struct sk_buff *skb)
2478 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2479 struct hci_conn *conn;
2481 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2485 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2488 set_bit(HCI_CONN_SECURE, &conn->flags);
2490 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2492 hci_key_change_cfm(conn, ev->status);
2495 hci_dev_unlock(hdev);
2498 static void hci_remote_features_evt(struct hci_dev *hdev,
2499 struct sk_buff *skb)
2501 struct hci_ev_remote_features *ev = (void *) skb->data;
2502 struct hci_conn *conn;
2504 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2508 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2513 memcpy(conn->features[0], ev->features, 8);
2515 if (conn->state != BT_CONFIG)
2518 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2519 struct hci_cp_read_remote_ext_features cp;
2520 cp.handle = ev->handle;
2522 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2527 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2528 struct hci_cp_remote_name_req cp;
2529 memset(&cp, 0, sizeof(cp));
2530 bacpy(&cp.bdaddr, &conn->dst);
2531 cp.pscan_rep_mode = 0x02;
2532 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2533 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2534 mgmt_device_connected(hdev, &conn->dst, conn->type,
2535 conn->dst_type, 0, NULL, 0,
2538 if (!hci_outgoing_auth_needed(hdev, conn)) {
2539 conn->state = BT_CONNECTED;
2540 hci_proto_connect_cfm(conn, ev->status);
2541 hci_conn_drop(conn);
2545 hci_dev_unlock(hdev);
2548 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2550 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2551 u8 status = skb->data[sizeof(*ev)];
2554 skb_pull(skb, sizeof(*ev));
2556 opcode = __le16_to_cpu(ev->opcode);
2559 case HCI_OP_INQUIRY_CANCEL:
2560 hci_cc_inquiry_cancel(hdev, skb);
2563 case HCI_OP_PERIODIC_INQ:
2564 hci_cc_periodic_inq(hdev, skb);
2567 case HCI_OP_EXIT_PERIODIC_INQ:
2568 hci_cc_exit_periodic_inq(hdev, skb);
2571 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2572 hci_cc_remote_name_req_cancel(hdev, skb);
2575 case HCI_OP_ROLE_DISCOVERY:
2576 hci_cc_role_discovery(hdev, skb);
2579 case HCI_OP_READ_LINK_POLICY:
2580 hci_cc_read_link_policy(hdev, skb);
2583 case HCI_OP_WRITE_LINK_POLICY:
2584 hci_cc_write_link_policy(hdev, skb);
2587 case HCI_OP_READ_DEF_LINK_POLICY:
2588 hci_cc_read_def_link_policy(hdev, skb);
2591 case HCI_OP_WRITE_DEF_LINK_POLICY:
2592 hci_cc_write_def_link_policy(hdev, skb);
2596 hci_cc_reset(hdev, skb);
2599 case HCI_OP_WRITE_LOCAL_NAME:
2600 hci_cc_write_local_name(hdev, skb);
2603 case HCI_OP_READ_LOCAL_NAME:
2604 hci_cc_read_local_name(hdev, skb);
2607 case HCI_OP_WRITE_AUTH_ENABLE:
2608 hci_cc_write_auth_enable(hdev, skb);
2611 case HCI_OP_WRITE_ENCRYPT_MODE:
2612 hci_cc_write_encrypt_mode(hdev, skb);
2615 case HCI_OP_WRITE_SCAN_ENABLE:
2616 hci_cc_write_scan_enable(hdev, skb);
2619 case HCI_OP_READ_CLASS_OF_DEV:
2620 hci_cc_read_class_of_dev(hdev, skb);
2623 case HCI_OP_WRITE_CLASS_OF_DEV:
2624 hci_cc_write_class_of_dev(hdev, skb);
2627 case HCI_OP_READ_VOICE_SETTING:
2628 hci_cc_read_voice_setting(hdev, skb);
2631 case HCI_OP_WRITE_VOICE_SETTING:
2632 hci_cc_write_voice_setting(hdev, skb);
2635 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2636 hci_cc_read_num_supported_iac(hdev, skb);
2639 case HCI_OP_WRITE_SSP_MODE:
2640 hci_cc_write_ssp_mode(hdev, skb);
2643 case HCI_OP_WRITE_SC_SUPPORT:
2644 hci_cc_write_sc_support(hdev, skb);
2647 case HCI_OP_READ_LOCAL_VERSION:
2648 hci_cc_read_local_version(hdev, skb);
2651 case HCI_OP_READ_LOCAL_COMMANDS:
2652 hci_cc_read_local_commands(hdev, skb);
2655 case HCI_OP_READ_LOCAL_FEATURES:
2656 hci_cc_read_local_features(hdev, skb);
2659 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2660 hci_cc_read_local_ext_features(hdev, skb);
2663 case HCI_OP_READ_BUFFER_SIZE:
2664 hci_cc_read_buffer_size(hdev, skb);
2667 case HCI_OP_READ_BD_ADDR:
2668 hci_cc_read_bd_addr(hdev, skb);
2671 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2672 hci_cc_read_page_scan_activity(hdev, skb);
2675 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2676 hci_cc_write_page_scan_activity(hdev, skb);
2679 case HCI_OP_READ_PAGE_SCAN_TYPE:
2680 hci_cc_read_page_scan_type(hdev, skb);
2683 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2684 hci_cc_write_page_scan_type(hdev, skb);
2687 case HCI_OP_READ_DATA_BLOCK_SIZE:
2688 hci_cc_read_data_block_size(hdev, skb);
2691 case HCI_OP_READ_FLOW_CONTROL_MODE:
2692 hci_cc_read_flow_control_mode(hdev, skb);
2695 case HCI_OP_READ_LOCAL_AMP_INFO:
2696 hci_cc_read_local_amp_info(hdev, skb);
2699 case HCI_OP_READ_CLOCK:
2700 hci_cc_read_clock(hdev, skb);
2703 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2704 hci_cc_read_local_amp_assoc(hdev, skb);
2707 case HCI_OP_READ_INQ_RSP_TX_POWER:
2708 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2711 case HCI_OP_PIN_CODE_REPLY:
2712 hci_cc_pin_code_reply(hdev, skb);
2715 case HCI_OP_PIN_CODE_NEG_REPLY:
2716 hci_cc_pin_code_neg_reply(hdev, skb);
2719 case HCI_OP_READ_LOCAL_OOB_DATA:
2720 hci_cc_read_local_oob_data(hdev, skb);
2723 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2724 hci_cc_read_local_oob_ext_data(hdev, skb);
2727 case HCI_OP_LE_READ_BUFFER_SIZE:
2728 hci_cc_le_read_buffer_size(hdev, skb);
2731 case HCI_OP_LE_READ_LOCAL_FEATURES:
2732 hci_cc_le_read_local_features(hdev, skb);
2735 case HCI_OP_LE_READ_ADV_TX_POWER:
2736 hci_cc_le_read_adv_tx_power(hdev, skb);
2739 case HCI_OP_USER_CONFIRM_REPLY:
2740 hci_cc_user_confirm_reply(hdev, skb);
2743 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2744 hci_cc_user_confirm_neg_reply(hdev, skb);
2747 case HCI_OP_USER_PASSKEY_REPLY:
2748 hci_cc_user_passkey_reply(hdev, skb);
2751 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2752 hci_cc_user_passkey_neg_reply(hdev, skb);
2755 case HCI_OP_LE_SET_RANDOM_ADDR:
2756 hci_cc_le_set_random_addr(hdev, skb);
2759 case HCI_OP_LE_SET_ADV_ENABLE:
2760 hci_cc_le_set_adv_enable(hdev, skb);
2763 case HCI_OP_LE_SET_SCAN_PARAM:
2764 hci_cc_le_set_scan_param(hdev, skb);
2767 case HCI_OP_LE_SET_SCAN_ENABLE:
2768 hci_cc_le_set_scan_enable(hdev, skb);
2771 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2772 hci_cc_le_read_white_list_size(hdev, skb);
2775 case HCI_OP_LE_CLEAR_WHITE_LIST:
2776 hci_cc_le_clear_white_list(hdev, skb);
2779 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2780 hci_cc_le_add_to_white_list(hdev, skb);
2783 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2784 hci_cc_le_del_from_white_list(hdev, skb);
2787 case HCI_OP_LE_READ_SUPPORTED_STATES:
2788 hci_cc_le_read_supported_states(hdev, skb);
2791 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2792 hci_cc_write_le_host_supported(hdev, skb);
2795 case HCI_OP_LE_SET_ADV_PARAM:
2796 hci_cc_set_adv_param(hdev, skb);
2799 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2800 hci_cc_write_remote_amp_assoc(hdev, skb);
2803 case HCI_OP_READ_RSSI:
2804 hci_cc_read_rssi(hdev, skb);
2807 case HCI_OP_READ_TX_POWER:
2808 hci_cc_read_tx_power(hdev, skb);
2812 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2816 if (opcode != HCI_OP_NOP)
2817 cancel_delayed_work(&hdev->cmd_timer);
2819 hci_req_cmd_complete(hdev, opcode, status);
2821 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2822 atomic_set(&hdev->cmd_cnt, 1);
2823 if (!skb_queue_empty(&hdev->cmd_q))
2824 queue_work(hdev->workqueue, &hdev->cmd_work);
2828 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2830 struct hci_ev_cmd_status *ev = (void *) skb->data;
2833 skb_pull(skb, sizeof(*ev));
2835 opcode = __le16_to_cpu(ev->opcode);
2838 case HCI_OP_INQUIRY:
2839 hci_cs_inquiry(hdev, ev->status);
2842 case HCI_OP_CREATE_CONN:
2843 hci_cs_create_conn(hdev, ev->status);
2846 case HCI_OP_ADD_SCO:
2847 hci_cs_add_sco(hdev, ev->status);
2850 case HCI_OP_AUTH_REQUESTED:
2851 hci_cs_auth_requested(hdev, ev->status);
2854 case HCI_OP_SET_CONN_ENCRYPT:
2855 hci_cs_set_conn_encrypt(hdev, ev->status);
2858 case HCI_OP_REMOTE_NAME_REQ:
2859 hci_cs_remote_name_req(hdev, ev->status);
2862 case HCI_OP_READ_REMOTE_FEATURES:
2863 hci_cs_read_remote_features(hdev, ev->status);
2866 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2867 hci_cs_read_remote_ext_features(hdev, ev->status);
2870 case HCI_OP_SETUP_SYNC_CONN:
2871 hci_cs_setup_sync_conn(hdev, ev->status);
2874 case HCI_OP_SNIFF_MODE:
2875 hci_cs_sniff_mode(hdev, ev->status);
2878 case HCI_OP_EXIT_SNIFF_MODE:
2879 hci_cs_exit_sniff_mode(hdev, ev->status);
2882 case HCI_OP_DISCONNECT:
2883 hci_cs_disconnect(hdev, ev->status);
2886 case HCI_OP_CREATE_PHY_LINK:
2887 hci_cs_create_phylink(hdev, ev->status);
2890 case HCI_OP_ACCEPT_PHY_LINK:
2891 hci_cs_accept_phylink(hdev, ev->status);
2894 case HCI_OP_LE_CREATE_CONN:
2895 hci_cs_le_create_conn(hdev, ev->status);
2898 case HCI_OP_LE_START_ENC:
2899 hci_cs_le_start_enc(hdev, ev->status);
2903 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2907 if (opcode != HCI_OP_NOP)
2908 cancel_delayed_work(&hdev->cmd_timer);
2911 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2912 hci_req_cmd_complete(hdev, opcode, ev->status);
2914 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2915 atomic_set(&hdev->cmd_cnt, 1);
2916 if (!skb_queue_empty(&hdev->cmd_q))
2917 queue_work(hdev->workqueue, &hdev->cmd_work);
2921 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2923 struct hci_ev_role_change *ev = (void *) skb->data;
2924 struct hci_conn *conn;
2926 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2930 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2933 conn->role = ev->role;
2935 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2937 hci_role_switch_cfm(conn, ev->status, ev->role);
2940 hci_dev_unlock(hdev);
2943 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2945 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2948 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2949 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2953 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2954 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2955 BT_DBG("%s bad parameters", hdev->name);
2959 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2961 for (i = 0; i < ev->num_hndl; i++) {
2962 struct hci_comp_pkts_info *info = &ev->handles[i];
2963 struct hci_conn *conn;
2964 __u16 handle, count;
2966 handle = __le16_to_cpu(info->handle);
2967 count = __le16_to_cpu(info->count);
2969 conn = hci_conn_hash_lookup_handle(hdev, handle);
2973 conn->sent -= count;
2975 switch (conn->type) {
2977 hdev->acl_cnt += count;
2978 if (hdev->acl_cnt > hdev->acl_pkts)
2979 hdev->acl_cnt = hdev->acl_pkts;
2983 if (hdev->le_pkts) {
2984 hdev->le_cnt += count;
2985 if (hdev->le_cnt > hdev->le_pkts)
2986 hdev->le_cnt = hdev->le_pkts;
2988 hdev->acl_cnt += count;
2989 if (hdev->acl_cnt > hdev->acl_pkts)
2990 hdev->acl_cnt = hdev->acl_pkts;
2995 hdev->sco_cnt += count;
2996 if (hdev->sco_cnt > hdev->sco_pkts)
2997 hdev->sco_cnt = hdev->sco_pkts;
3001 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3006 queue_work(hdev->workqueue, &hdev->tx_work);
3009 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3012 struct hci_chan *chan;
3014 switch (hdev->dev_type) {
3016 return hci_conn_hash_lookup_handle(hdev, handle);
3018 chan = hci_chan_lookup_handle(hdev, handle);
3023 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3030 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3032 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3035 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3036 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3040 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3041 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3042 BT_DBG("%s bad parameters", hdev->name);
3046 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3049 for (i = 0; i < ev->num_hndl; i++) {
3050 struct hci_comp_blocks_info *info = &ev->handles[i];
3051 struct hci_conn *conn = NULL;
3052 __u16 handle, block_count;
3054 handle = __le16_to_cpu(info->handle);
3055 block_count = __le16_to_cpu(info->blocks);
3057 conn = __hci_conn_lookup_handle(hdev, handle);
3061 conn->sent -= block_count;
3063 switch (conn->type) {
3066 hdev->block_cnt += block_count;
3067 if (hdev->block_cnt > hdev->num_blocks)
3068 hdev->block_cnt = hdev->num_blocks;
3072 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3077 queue_work(hdev->workqueue, &hdev->tx_work);
3080 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3082 struct hci_ev_mode_change *ev = (void *) skb->data;
3083 struct hci_conn *conn;
3085 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3089 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3091 conn->mode = ev->mode;
3093 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3095 if (conn->mode == HCI_CM_ACTIVE)
3096 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3098 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3101 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3102 hci_sco_setup(conn, ev->status);
3105 hci_dev_unlock(hdev);
3108 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3110 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3111 struct hci_conn *conn;
3113 BT_DBG("%s", hdev->name);
3117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3121 if (conn->state == BT_CONNECTED) {
3122 hci_conn_hold(conn);
3123 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3124 hci_conn_drop(conn);
3127 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3128 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3129 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3130 sizeof(ev->bdaddr), &ev->bdaddr);
3131 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3134 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3139 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3143 hci_dev_unlock(hdev);
3146 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3148 struct hci_ev_link_key_req *ev = (void *) skb->data;
3149 struct hci_cp_link_key_reply cp;
3150 struct hci_conn *conn;
3151 struct link_key *key;
3153 BT_DBG("%s", hdev->name);
3155 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3160 key = hci_find_link_key(hdev, &ev->bdaddr);
3162 BT_DBG("%s link key not found for %pMR", hdev->name,
3167 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3170 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3172 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3173 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3174 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3175 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3179 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3180 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3181 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3182 BT_DBG("%s ignoring key unauthenticated for high security",
3187 conn->key_type = key->type;
3188 conn->pin_length = key->pin_len;
3191 bacpy(&cp.bdaddr, &ev->bdaddr);
3192 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3194 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3196 hci_dev_unlock(hdev);
3201 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3202 hci_dev_unlock(hdev);
3205 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3207 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3208 struct hci_conn *conn;
3209 struct link_key *key;
3213 BT_DBG("%s", hdev->name);
3217 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3219 hci_conn_hold(conn);
3220 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3221 pin_len = conn->pin_length;
3223 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3224 conn->key_type = ev->key_type;
3226 hci_conn_drop(conn);
3229 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3232 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3233 ev->key_type, pin_len, &persistent);
3237 mgmt_new_link_key(hdev, key, persistent);
3239 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3240 * is set. If it's not set simply remove the key from the kernel
3241 * list (we've still notified user space about it but with
3242 * store_hint being 0).
3244 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3245 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3246 list_del(&key->list);
3250 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3252 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3256 hci_dev_unlock(hdev);
3259 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3261 struct hci_ev_clock_offset *ev = (void *) skb->data;
3262 struct hci_conn *conn;
3264 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3268 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3269 if (conn && !ev->status) {
3270 struct inquiry_entry *ie;
3272 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3274 ie->data.clock_offset = ev->clock_offset;
3275 ie->timestamp = jiffies;
3279 hci_dev_unlock(hdev);
3282 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3284 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3285 struct hci_conn *conn;
3287 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3291 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3292 if (conn && !ev->status)
3293 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3295 hci_dev_unlock(hdev);
3298 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3300 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3301 struct inquiry_entry *ie;
3303 BT_DBG("%s", hdev->name);
3307 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3309 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3310 ie->timestamp = jiffies;
3313 hci_dev_unlock(hdev);
3316 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3317 struct sk_buff *skb)
3319 struct inquiry_data data;
3320 int num_rsp = *((__u8 *) skb->data);
3322 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3327 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3332 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3333 struct inquiry_info_with_rssi_and_pscan_mode *info;
3334 info = (void *) (skb->data + 1);
3336 for (; num_rsp; num_rsp--, info++) {
3339 bacpy(&data.bdaddr, &info->bdaddr);
3340 data.pscan_rep_mode = info->pscan_rep_mode;
3341 data.pscan_period_mode = info->pscan_period_mode;
3342 data.pscan_mode = info->pscan_mode;
3343 memcpy(data.dev_class, info->dev_class, 3);
3344 data.clock_offset = info->clock_offset;
3345 data.rssi = info->rssi;
3346 data.ssp_mode = 0x00;
3348 flags = hci_inquiry_cache_update(hdev, &data, false);
3350 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3351 info->dev_class, info->rssi,
3352 flags, NULL, 0, NULL, 0);
3355 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3357 for (; num_rsp; num_rsp--, info++) {
3360 bacpy(&data.bdaddr, &info->bdaddr);
3361 data.pscan_rep_mode = info->pscan_rep_mode;
3362 data.pscan_period_mode = info->pscan_period_mode;
3363 data.pscan_mode = 0x00;
3364 memcpy(data.dev_class, info->dev_class, 3);
3365 data.clock_offset = info->clock_offset;
3366 data.rssi = info->rssi;
3367 data.ssp_mode = 0x00;
3369 flags = hci_inquiry_cache_update(hdev, &data, false);
3371 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3372 info->dev_class, info->rssi,
3373 flags, NULL, 0, NULL, 0);
3377 hci_dev_unlock(hdev);
3380 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3381 struct sk_buff *skb)
3383 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3384 struct hci_conn *conn;
3386 BT_DBG("%s", hdev->name);
3390 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3394 if (ev->page < HCI_MAX_PAGES)
3395 memcpy(conn->features[ev->page], ev->features, 8);
3397 if (!ev->status && ev->page == 0x01) {
3398 struct inquiry_entry *ie;
3400 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3402 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3404 if (ev->features[0] & LMP_HOST_SSP) {
3405 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3407 /* It is mandatory by the Bluetooth specification that
3408 * Extended Inquiry Results are only used when Secure
3409 * Simple Pairing is enabled, but some devices violate
3412 * To make these devices work, the internal SSP
3413 * enabled flag needs to be cleared if the remote host
3414 * features do not indicate SSP support */
3415 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3418 if (ev->features[0] & LMP_HOST_SC)
3419 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3422 if (conn->state != BT_CONFIG)
3425 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3426 struct hci_cp_remote_name_req cp;
3427 memset(&cp, 0, sizeof(cp));
3428 bacpy(&cp.bdaddr, &conn->dst);
3429 cp.pscan_rep_mode = 0x02;
3430 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3431 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3432 mgmt_device_connected(hdev, &conn->dst, conn->type,
3433 conn->dst_type, 0, NULL, 0,
3436 if (!hci_outgoing_auth_needed(hdev, conn)) {
3437 conn->state = BT_CONNECTED;
3438 hci_proto_connect_cfm(conn, ev->status);
3439 hci_conn_drop(conn);
3443 hci_dev_unlock(hdev);
3446 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3447 struct sk_buff *skb)
3449 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3450 struct hci_conn *conn;
3452 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3456 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3458 if (ev->link_type == ESCO_LINK)
3461 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3465 conn->type = SCO_LINK;
3468 switch (ev->status) {
3470 conn->handle = __le16_to_cpu(ev->handle);
3471 conn->state = BT_CONNECTED;
3473 hci_conn_add_sysfs(conn);
3476 case 0x10: /* Connection Accept Timeout */
3477 case 0x0d: /* Connection Rejected due to Limited Resources */
3478 case 0x11: /* Unsupported Feature or Parameter Value */
3479 case 0x1c: /* SCO interval rejected */
3480 case 0x1a: /* Unsupported Remote Feature */
3481 case 0x1f: /* Unspecified error */
3482 case 0x20: /* Unsupported LMP Parameter value */
3484 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3485 (hdev->esco_type & EDR_ESCO_MASK);
3486 if (hci_setup_sync(conn, conn->link->handle))
3492 conn->state = BT_CLOSED;
3496 hci_proto_connect_cfm(conn, ev->status);
3501 hci_dev_unlock(hdev);
3504 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3508 while (parsed < eir_len) {
3509 u8 field_len = eir[0];
3514 parsed += field_len + 1;
3515 eir += field_len + 1;
3521 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3522 struct sk_buff *skb)
3524 struct inquiry_data data;
3525 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3526 int num_rsp = *((__u8 *) skb->data);
3529 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3534 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3539 for (; num_rsp; num_rsp--, info++) {
3543 bacpy(&data.bdaddr, &info->bdaddr);
3544 data.pscan_rep_mode = info->pscan_rep_mode;
3545 data.pscan_period_mode = info->pscan_period_mode;
3546 data.pscan_mode = 0x00;
3547 memcpy(data.dev_class, info->dev_class, 3);
3548 data.clock_offset = info->clock_offset;
3549 data.rssi = info->rssi;
3550 data.ssp_mode = 0x01;
3552 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3553 name_known = eir_has_data_type(info->data,
3559 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3561 eir_len = eir_get_length(info->data, sizeof(info->data));
3563 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3564 info->dev_class, info->rssi,
3565 flags, info->data, eir_len, NULL, 0);
3568 hci_dev_unlock(hdev);
3571 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3572 struct sk_buff *skb)
3574 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3575 struct hci_conn *conn;
3577 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3578 __le16_to_cpu(ev->handle));
3582 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3586 /* For BR/EDR the necessary steps are taken through the
3587 * auth_complete event.
3589 if (conn->type != LE_LINK)
3593 conn->sec_level = conn->pending_sec_level;
3595 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3597 if (ev->status && conn->state == BT_CONNECTED) {
3598 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3599 hci_conn_drop(conn);
3603 if (conn->state == BT_CONFIG) {
3605 conn->state = BT_CONNECTED;
3607 hci_proto_connect_cfm(conn, ev->status);
3608 hci_conn_drop(conn);
3610 hci_auth_cfm(conn, ev->status);
3612 hci_conn_hold(conn);
3613 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3614 hci_conn_drop(conn);
3618 hci_dev_unlock(hdev);
3621 static u8 hci_get_auth_req(struct hci_conn *conn)
3623 /* If remote requests no-bonding follow that lead */
3624 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3625 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3626 return conn->remote_auth | (conn->auth_type & 0x01);
3628 /* If both remote and local have enough IO capabilities, require
3631 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3632 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3633 return conn->remote_auth | 0x01;
3635 /* No MITM protection possible so ignore remote requirement */
3636 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3639 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3641 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3642 struct hci_conn *conn;
3644 BT_DBG("%s", hdev->name);
3648 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3652 hci_conn_hold(conn);
3654 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3657 /* Allow pairing if we're pairable, the initiators of the
3658 * pairing or if the remote is not requesting bonding.
3660 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3661 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3662 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3663 struct hci_cp_io_capability_reply cp;
3665 bacpy(&cp.bdaddr, &ev->bdaddr);
3666 /* Change the IO capability from KeyboardDisplay
3667 * to DisplayYesNo as it is not supported by BT spec. */
3668 cp.capability = (conn->io_capability == 0x04) ?
3669 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3671 /* If we are initiators, there is no remote information yet */
3672 if (conn->remote_auth == 0xff) {
3673 /* Request MITM protection if our IO caps allow it
3674 * except for the no-bonding case.
3676 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3677 conn->auth_type != HCI_AT_NO_BONDING)
3678 conn->auth_type |= 0x01;
3680 conn->auth_type = hci_get_auth_req(conn);
3683 /* If we're not bondable, force one of the non-bondable
3684 * authentication requirement values.
3686 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3687 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3689 cp.authentication = conn->auth_type;
3691 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3692 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3697 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3700 struct hci_cp_io_capability_neg_reply cp;
3702 bacpy(&cp.bdaddr, &ev->bdaddr);
3703 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3705 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3710 hci_dev_unlock(hdev);
3713 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3715 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3716 struct hci_conn *conn;
3718 BT_DBG("%s", hdev->name);
3722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3726 conn->remote_cap = ev->capability;
3727 conn->remote_auth = ev->authentication;
3729 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3732 hci_dev_unlock(hdev);
3735 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3736 struct sk_buff *skb)
3738 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3739 int loc_mitm, rem_mitm, confirm_hint = 0;
3740 struct hci_conn *conn;
3742 BT_DBG("%s", hdev->name);
3746 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3749 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3753 loc_mitm = (conn->auth_type & 0x01);
3754 rem_mitm = (conn->remote_auth & 0x01);
3756 /* If we require MITM but the remote device can't provide that
3757 * (it has NoInputNoOutput) then reject the confirmation
3758 * request. We check the security level here since it doesn't
3759 * necessarily match conn->auth_type.
3761 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3762 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3763 BT_DBG("Rejecting request: remote device can't provide MITM");
3764 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3765 sizeof(ev->bdaddr), &ev->bdaddr);
3769 /* If no side requires MITM protection; auto-accept */
3770 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3771 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3773 /* If we're not the initiators request authorization to
3774 * proceed from user space (mgmt_user_confirm with
3775 * confirm_hint set to 1). The exception is if neither
3776 * side had MITM or if the local IO capability is
3777 * NoInputNoOutput, in which case we do auto-accept
3779 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3780 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3781 (loc_mitm || rem_mitm)) {
3782 BT_DBG("Confirming auto-accept as acceptor");
3787 BT_DBG("Auto-accept of user confirmation with %ums delay",
3788 hdev->auto_accept_delay);
3790 if (hdev->auto_accept_delay > 0) {
3791 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3792 queue_delayed_work(conn->hdev->workqueue,
3793 &conn->auto_accept_work, delay);
3797 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3798 sizeof(ev->bdaddr), &ev->bdaddr);
3803 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3804 le32_to_cpu(ev->passkey), confirm_hint);
3807 hci_dev_unlock(hdev);
3810 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3811 struct sk_buff *skb)
3813 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3815 BT_DBG("%s", hdev->name);
3817 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3818 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3821 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3822 struct sk_buff *skb)
3824 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3825 struct hci_conn *conn;
3827 BT_DBG("%s", hdev->name);
3829 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3833 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3834 conn->passkey_entered = 0;
3836 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3837 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3838 conn->dst_type, conn->passkey_notify,
3839 conn->passkey_entered);
3842 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3844 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3845 struct hci_conn *conn;
3847 BT_DBG("%s", hdev->name);
3849 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3854 case HCI_KEYPRESS_STARTED:
3855 conn->passkey_entered = 0;
3858 case HCI_KEYPRESS_ENTERED:
3859 conn->passkey_entered++;
3862 case HCI_KEYPRESS_ERASED:
3863 conn->passkey_entered--;
3866 case HCI_KEYPRESS_CLEARED:
3867 conn->passkey_entered = 0;
3870 case HCI_KEYPRESS_COMPLETED:
3874 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3875 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3876 conn->dst_type, conn->passkey_notify,
3877 conn->passkey_entered);
3880 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3881 struct sk_buff *skb)
3883 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3884 struct hci_conn *conn;
3886 BT_DBG("%s", hdev->name);
3890 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3894 /* Reset the authentication requirement to unknown */
3895 conn->remote_auth = 0xff;
3897 /* To avoid duplicate auth_failed events to user space we check
3898 * the HCI_CONN_AUTH_PEND flag which will be set if we
3899 * initiated the authentication. A traditional auth_complete
3900 * event gets always produced as initiator and is also mapped to
3901 * the mgmt_auth_failed event */
3902 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3903 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3906 hci_conn_drop(conn);
3909 hci_dev_unlock(hdev);
3912 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3913 struct sk_buff *skb)
3915 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3916 struct inquiry_entry *ie;
3917 struct hci_conn *conn;
3919 BT_DBG("%s", hdev->name);
3923 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3925 memcpy(conn->features[1], ev->features, 8);
3927 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3929 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3931 hci_dev_unlock(hdev);
3934 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3935 struct sk_buff *skb)
3937 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3938 struct oob_data *data;
3940 BT_DBG("%s", hdev->name);
3944 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3947 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3949 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3950 struct hci_cp_remote_oob_ext_data_reply cp;
3952 bacpy(&cp.bdaddr, &ev->bdaddr);
3953 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3954 memcpy(cp.randomizer192, data->randomizer192,
3955 sizeof(cp.randomizer192));
3956 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3957 memcpy(cp.randomizer256, data->randomizer256,
3958 sizeof(cp.randomizer256));
3960 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3963 struct hci_cp_remote_oob_data_reply cp;
3965 bacpy(&cp.bdaddr, &ev->bdaddr);
3966 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3967 memcpy(cp.randomizer, data->randomizer192,
3968 sizeof(cp.randomizer));
3970 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3974 struct hci_cp_remote_oob_data_neg_reply cp;
3976 bacpy(&cp.bdaddr, &ev->bdaddr);
3977 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3982 hci_dev_unlock(hdev);
3985 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3986 struct sk_buff *skb)
3988 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3989 struct hci_conn *hcon, *bredr_hcon;
3991 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3996 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3998 hci_dev_unlock(hdev);
4004 hci_dev_unlock(hdev);
4008 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4010 hcon->state = BT_CONNECTED;
4011 bacpy(&hcon->dst, &bredr_hcon->dst);
4013 hci_conn_hold(hcon);
4014 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4015 hci_conn_drop(hcon);
4017 hci_conn_add_sysfs(hcon);
4019 amp_physical_cfm(bredr_hcon, hcon);
4021 hci_dev_unlock(hdev);
4024 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4026 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4027 struct hci_conn *hcon;
4028 struct hci_chan *hchan;
4029 struct amp_mgr *mgr;
4031 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4032 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4035 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4039 /* Create AMP hchan */
4040 hchan = hci_chan_create(hcon);
4044 hchan->handle = le16_to_cpu(ev->handle);
4046 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4048 mgr = hcon->amp_mgr;
4049 if (mgr && mgr->bredr_chan) {
4050 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4052 l2cap_chan_lock(bredr_chan);
4054 bredr_chan->conn->mtu = hdev->block_mtu;
4055 l2cap_logical_cfm(bredr_chan, hchan, 0);
4056 hci_conn_hold(hcon);
4058 l2cap_chan_unlock(bredr_chan);
4062 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4063 struct sk_buff *skb)
4065 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4066 struct hci_chan *hchan;
4068 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4069 le16_to_cpu(ev->handle), ev->status);
4076 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4080 amp_destroy_logical_link(hchan, ev->reason);
4083 hci_dev_unlock(hdev);
4086 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4087 struct sk_buff *skb)
4089 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4090 struct hci_conn *hcon;
4092 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4099 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4101 hcon->state = BT_CLOSED;
4105 hci_dev_unlock(hdev);
4108 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4110 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4111 struct hci_conn_params *params;
4112 struct hci_conn *conn;
4113 struct smp_irk *irk;
4116 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4120 /* All controllers implicitly stop advertising in the event of a
4121 * connection, so ensure that the state bit is cleared.
4123 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4125 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4127 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4129 BT_ERR("No memory for new connection");
4133 conn->dst_type = ev->bdaddr_type;
4135 /* If we didn't have a hci_conn object previously
4136 * but we're in master role this must be something
4137 * initiated using a white list. Since white list based
4138 * connections are not "first class citizens" we don't
4139 * have full tracking of them. Therefore, we go ahead
4140 * with a "best effort" approach of determining the
4141 * initiator address based on the HCI_PRIVACY flag.
4144 conn->resp_addr_type = ev->bdaddr_type;
4145 bacpy(&conn->resp_addr, &ev->bdaddr);
4146 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4147 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4148 bacpy(&conn->init_addr, &hdev->rpa);
4150 hci_copy_identity_address(hdev,
4152 &conn->init_addr_type);
4156 cancel_delayed_work(&conn->le_conn_timeout);
4160 /* Set the responder (our side) address type based on
4161 * the advertising address type.
4163 conn->resp_addr_type = hdev->adv_addr_type;
4164 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4165 bacpy(&conn->resp_addr, &hdev->random_addr);
4167 bacpy(&conn->resp_addr, &hdev->bdaddr);
4169 conn->init_addr_type = ev->bdaddr_type;
4170 bacpy(&conn->init_addr, &ev->bdaddr);
4172 /* For incoming connections, set the default minimum
4173 * and maximum connection interval. They will be used
4174 * to check if the parameters are in range and if not
4175 * trigger the connection update procedure.
4177 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4178 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4181 /* Lookup the identity address from the stored connection
4182 * address and address type.
4184 * When establishing connections to an identity address, the
4185 * connection procedure will store the resolvable random
4186 * address first. Now if it can be converted back into the
4187 * identity address, start using the identity address from
4190 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4192 bacpy(&conn->dst, &irk->bdaddr);
4193 conn->dst_type = irk->addr_type;
4196 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4197 addr_type = BDADDR_LE_PUBLIC;
4199 addr_type = BDADDR_LE_RANDOM;
4202 hci_le_conn_failed(conn, ev->status);
4206 /* Drop the connection if the device is blocked */
4207 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4208 hci_conn_drop(conn);
4212 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4213 mgmt_device_connected(hdev, &conn->dst, conn->type,
4214 conn->dst_type, 0, NULL, 0, NULL);
4216 conn->sec_level = BT_SECURITY_LOW;
4217 conn->handle = __le16_to_cpu(ev->handle);
4218 conn->state = BT_CONNECTED;
4220 conn->le_conn_interval = le16_to_cpu(ev->interval);
4221 conn->le_conn_latency = le16_to_cpu(ev->latency);
4222 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4224 hci_conn_add_sysfs(conn);
4226 hci_proto_connect_cfm(conn, ev->status);
4228 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4230 list_del_init(¶ms->action);
4233 hci_update_background_scan(hdev);
4234 hci_dev_unlock(hdev);
4237 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4238 struct sk_buff *skb)
4240 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4241 struct hci_conn *conn;
4243 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4250 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4252 conn->le_conn_interval = le16_to_cpu(ev->interval);
4253 conn->le_conn_latency = le16_to_cpu(ev->latency);
4254 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4257 hci_dev_unlock(hdev);
4260 /* This function requires the caller holds hdev->lock */
4261 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4262 u8 addr_type, u8 adv_type)
4264 struct hci_conn *conn;
4265 struct hci_conn_params *params;
4267 /* If the event is not connectable don't proceed further */
4268 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4271 /* Ignore if the device is blocked */
4272 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4275 /* Most controller will fail if we try to create new connections
4276 * while we have an existing one in slave role.
4278 if (hdev->conn_hash.le_num_slave > 0)
4281 /* If we're not connectable only connect devices that we have in
4282 * our pend_le_conns list.
4284 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4289 switch (params->auto_connect) {
4290 case HCI_AUTO_CONN_DIRECT:
4291 /* Only devices advertising with ADV_DIRECT_IND are
4292 * triggering a connection attempt. This is allowing
4293 * incoming connections from slave devices.
4295 if (adv_type != LE_ADV_DIRECT_IND)
4298 case HCI_AUTO_CONN_ALWAYS:
4299 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4300 * are triggering a connection attempt. This means
4301 * that incoming connectioms from slave device are
4302 * accepted and also outgoing connections to slave
4303 * devices are established when found.
4310 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4311 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4315 switch (PTR_ERR(conn)) {
4317 /* If hci_connect() returns -EBUSY it means there is already
4318 * an LE connection attempt going on. Since controllers don't
4319 * support more than one connection attempt at the time, we
4320 * don't consider this an error case.
4324 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4328 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4329 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4331 struct discovery_state *d = &hdev->discovery;
4332 struct smp_irk *irk;
4336 /* Check if we need to convert to identity address */
4337 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4339 bdaddr = &irk->bdaddr;
4340 bdaddr_type = irk->addr_type;
4343 /* Check if we have been requested to connect to this device */
4344 check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4346 /* Passive scanning shouldn't trigger any device found events,
4347 * except for devices marked as CONN_REPORT for which we do send
4348 * device found events.
4350 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4351 if (type == LE_ADV_DIRECT_IND)
4354 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4355 bdaddr, bdaddr_type))
4358 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4359 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4362 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4363 rssi, flags, data, len, NULL, 0);
4367 /* When receiving non-connectable or scannable undirected
4368 * advertising reports, this means that the remote device is
4369 * not connectable and then clearly indicate this in the
4370 * device found event.
4372 * When receiving a scan response, then there is no way to
4373 * know if the remote device is connectable or not. However
4374 * since scan responses are merged with a previously seen
4375 * advertising report, the flags field from that report
4378 * In the really unlikely case that a controller get confused
4379 * and just sends a scan response event, then it is marked as
4380 * not connectable as well.
4382 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4383 type == LE_ADV_SCAN_RSP)
4384 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4388 /* If there's nothing pending either store the data from this
4389 * event or send an immediate device found event if the data
4390 * should not be stored for later.
4392 if (!has_pending_adv_report(hdev)) {
4393 /* If the report will trigger a SCAN_REQ store it for
4396 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4397 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4398 rssi, flags, data, len);
4402 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4403 rssi, flags, data, len, NULL, 0);
4407 /* Check if the pending report is for the same device as the new one */
4408 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4409 bdaddr_type == d->last_adv_addr_type);
4411 /* If the pending data doesn't match this report or this isn't a
4412 * scan response (e.g. we got a duplicate ADV_IND) then force
4413 * sending of the pending data.
4415 if (type != LE_ADV_SCAN_RSP || !match) {
4416 /* Send out whatever is in the cache, but skip duplicates */
4418 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4419 d->last_adv_addr_type, NULL,
4420 d->last_adv_rssi, d->last_adv_flags,
4422 d->last_adv_data_len, NULL, 0);
4424 /* If the new report will trigger a SCAN_REQ store it for
4427 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4428 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4429 rssi, flags, data, len);
4433 /* The advertising reports cannot be merged, so clear
4434 * the pending report and send out a device found event.
4436 clear_pending_adv_report(hdev);
4437 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4438 rssi, flags, data, len, NULL, 0);
4442 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4443 * the new event is a SCAN_RSP. We can therefore proceed with
4444 * sending a merged device found event.
4446 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4447 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4448 d->last_adv_data, d->last_adv_data_len, data, len);
4449 clear_pending_adv_report(hdev);
4452 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4454 u8 num_reports = skb->data[0];
4455 void *ptr = &skb->data[1];
4459 while (num_reports--) {
4460 struct hci_ev_le_advertising_info *ev = ptr;
4463 rssi = ev->data[ev->length];
4464 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4465 ev->bdaddr_type, rssi, ev->data, ev->length);
4467 ptr += sizeof(*ev) + ev->length + 1;
4470 hci_dev_unlock(hdev);
4473 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4475 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4476 struct hci_cp_le_ltk_reply cp;
4477 struct hci_cp_le_ltk_neg_reply neg;
4478 struct hci_conn *conn;
4479 struct smp_ltk *ltk;
4481 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4485 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4489 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
4493 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4494 cp.handle = cpu_to_le16(conn->handle);
4496 if (ltk->authenticated)
4497 conn->pending_sec_level = BT_SECURITY_HIGH;
4499 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4501 conn->enc_key_size = ltk->enc_size;
4503 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4505 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4506 * temporary key used to encrypt a connection following
4507 * pairing. It is used during the Encrypted Session Setup to
4508 * distribute the keys. Later, security can be re-established
4509 * using a distributed LTK.
4511 if (ltk->type == SMP_STK) {
4512 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4513 list_del(<k->list);
4516 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4519 hci_dev_unlock(hdev);
4524 neg.handle = ev->handle;
4525 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4526 hci_dev_unlock(hdev);
4529 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4532 struct hci_cp_le_conn_param_req_neg_reply cp;
4534 cp.handle = cpu_to_le16(handle);
4537 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4541 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4542 struct sk_buff *skb)
4544 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4545 struct hci_cp_le_conn_param_req_reply cp;
4546 struct hci_conn *hcon;
4547 u16 handle, min, max, latency, timeout;
4549 handle = le16_to_cpu(ev->handle);
4550 min = le16_to_cpu(ev->interval_min);
4551 max = le16_to_cpu(ev->interval_max);
4552 latency = le16_to_cpu(ev->latency);
4553 timeout = le16_to_cpu(ev->timeout);
4555 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4556 if (!hcon || hcon->state != BT_CONNECTED)
4557 return send_conn_param_neg_reply(hdev, handle,
4558 HCI_ERROR_UNKNOWN_CONN_ID);
4560 if (hci_check_conn_params(min, max, latency, timeout))
4561 return send_conn_param_neg_reply(hdev, handle,
4562 HCI_ERROR_INVALID_LL_PARAMS);
4564 if (hcon->role == HCI_ROLE_MASTER) {
4565 struct hci_conn_params *params;
4570 params = hci_conn_params_lookup(hdev, &hcon->dst,
4573 params->conn_min_interval = min;
4574 params->conn_max_interval = max;
4575 params->conn_latency = latency;
4576 params->supervision_timeout = timeout;
4582 hci_dev_unlock(hdev);
4584 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4585 store_hint, min, max, latency, timeout);
4588 cp.handle = ev->handle;
4589 cp.interval_min = ev->interval_min;
4590 cp.interval_max = ev->interval_max;
4591 cp.latency = ev->latency;
4592 cp.timeout = ev->timeout;
4596 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4599 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4601 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4603 skb_pull(skb, sizeof(*le_ev));
4605 switch (le_ev->subevent) {
4606 case HCI_EV_LE_CONN_COMPLETE:
4607 hci_le_conn_complete_evt(hdev, skb);
4610 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4611 hci_le_conn_update_complete_evt(hdev, skb);
4614 case HCI_EV_LE_ADVERTISING_REPORT:
4615 hci_le_adv_report_evt(hdev, skb);
4618 case HCI_EV_LE_LTK_REQ:
4619 hci_le_ltk_request_evt(hdev, skb);
4622 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4623 hci_le_remote_conn_param_req_evt(hdev, skb);
4631 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4633 struct hci_ev_channel_selected *ev = (void *) skb->data;
4634 struct hci_conn *hcon;
4636 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4638 skb_pull(skb, sizeof(*ev));
4640 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4644 amp_read_loc_assoc_final_data(hdev, hcon);
4647 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4649 struct hci_event_hdr *hdr = (void *) skb->data;
4650 __u8 event = hdr->evt;
4654 /* Received events are (currently) only needed when a request is
4655 * ongoing so avoid unnecessary memory allocation.
4657 if (hci_req_pending(hdev)) {
4658 kfree_skb(hdev->recv_evt);
4659 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4662 hci_dev_unlock(hdev);
4664 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4666 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4667 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4668 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4670 hci_req_cmd_complete(hdev, opcode, 0);
4674 case HCI_EV_INQUIRY_COMPLETE:
4675 hci_inquiry_complete_evt(hdev, skb);
4678 case HCI_EV_INQUIRY_RESULT:
4679 hci_inquiry_result_evt(hdev, skb);
4682 case HCI_EV_CONN_COMPLETE:
4683 hci_conn_complete_evt(hdev, skb);
4686 case HCI_EV_CONN_REQUEST:
4687 hci_conn_request_evt(hdev, skb);
4690 case HCI_EV_DISCONN_COMPLETE:
4691 hci_disconn_complete_evt(hdev, skb);
4694 case HCI_EV_AUTH_COMPLETE:
4695 hci_auth_complete_evt(hdev, skb);
4698 case HCI_EV_REMOTE_NAME:
4699 hci_remote_name_evt(hdev, skb);
4702 case HCI_EV_ENCRYPT_CHANGE:
4703 hci_encrypt_change_evt(hdev, skb);
4706 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4707 hci_change_link_key_complete_evt(hdev, skb);
4710 case HCI_EV_REMOTE_FEATURES:
4711 hci_remote_features_evt(hdev, skb);
4714 case HCI_EV_CMD_COMPLETE:
4715 hci_cmd_complete_evt(hdev, skb);
4718 case HCI_EV_CMD_STATUS:
4719 hci_cmd_status_evt(hdev, skb);
4722 case HCI_EV_ROLE_CHANGE:
4723 hci_role_change_evt(hdev, skb);
4726 case HCI_EV_NUM_COMP_PKTS:
4727 hci_num_comp_pkts_evt(hdev, skb);
4730 case HCI_EV_MODE_CHANGE:
4731 hci_mode_change_evt(hdev, skb);
4734 case HCI_EV_PIN_CODE_REQ:
4735 hci_pin_code_request_evt(hdev, skb);
4738 case HCI_EV_LINK_KEY_REQ:
4739 hci_link_key_request_evt(hdev, skb);
4742 case HCI_EV_LINK_KEY_NOTIFY:
4743 hci_link_key_notify_evt(hdev, skb);
4746 case HCI_EV_CLOCK_OFFSET:
4747 hci_clock_offset_evt(hdev, skb);
4750 case HCI_EV_PKT_TYPE_CHANGE:
4751 hci_pkt_type_change_evt(hdev, skb);
4754 case HCI_EV_PSCAN_REP_MODE:
4755 hci_pscan_rep_mode_evt(hdev, skb);
4758 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4759 hci_inquiry_result_with_rssi_evt(hdev, skb);
4762 case HCI_EV_REMOTE_EXT_FEATURES:
4763 hci_remote_ext_features_evt(hdev, skb);
4766 case HCI_EV_SYNC_CONN_COMPLETE:
4767 hci_sync_conn_complete_evt(hdev, skb);
4770 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4771 hci_extended_inquiry_result_evt(hdev, skb);
4774 case HCI_EV_KEY_REFRESH_COMPLETE:
4775 hci_key_refresh_complete_evt(hdev, skb);
4778 case HCI_EV_IO_CAPA_REQUEST:
4779 hci_io_capa_request_evt(hdev, skb);
4782 case HCI_EV_IO_CAPA_REPLY:
4783 hci_io_capa_reply_evt(hdev, skb);
4786 case HCI_EV_USER_CONFIRM_REQUEST:
4787 hci_user_confirm_request_evt(hdev, skb);
4790 case HCI_EV_USER_PASSKEY_REQUEST:
4791 hci_user_passkey_request_evt(hdev, skb);
4794 case HCI_EV_USER_PASSKEY_NOTIFY:
4795 hci_user_passkey_notify_evt(hdev, skb);
4798 case HCI_EV_KEYPRESS_NOTIFY:
4799 hci_keypress_notify_evt(hdev, skb);
4802 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4803 hci_simple_pair_complete_evt(hdev, skb);
4806 case HCI_EV_REMOTE_HOST_FEATURES:
4807 hci_remote_host_features_evt(hdev, skb);
4810 case HCI_EV_LE_META:
4811 hci_le_meta_evt(hdev, skb);
4814 case HCI_EV_CHANNEL_SELECTED:
4815 hci_chan_selected_evt(hdev, skb);
4818 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4819 hci_remote_oob_data_request_evt(hdev, skb);
4822 case HCI_EV_PHY_LINK_COMPLETE:
4823 hci_phy_link_complete_evt(hdev, skb);
4826 case HCI_EV_LOGICAL_LINK_COMPLETE:
4827 hci_loglink_complete_evt(hdev, skb);
4830 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4831 hci_disconn_loglink_complete_evt(hdev, skb);
4834 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4835 hci_disconn_phylink_complete_evt(hdev, skb);
4838 case HCI_EV_NUM_COMP_BLOCKS:
4839 hci_num_comp_blocks_evt(hdev, skb);
4843 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4848 hdev->stat.evt_rx++;