Merge tag 'driver-core-6.9-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/rfkill.h>
30 #include <linux/debugfs.h>
31 #include <linux/crypto.h>
32 #include <linux/kcov.h>
33 #include <linux/property.h>
34 #include <linux/suspend.h>
35 #include <linux/wait.h>
36 #include <asm/unaligned.h>
37
38 #include <net/bluetooth/bluetooth.h>
39 #include <net/bluetooth/hci_core.h>
40 #include <net/bluetooth/l2cap.h>
41 #include <net/bluetooth/mgmt.h>
42
43 #include "hci_request.h"
44 #include "hci_debugfs.h"
45 #include "smp.h"
46 #include "leds.h"
47 #include "msft.h"
48 #include "aosp.h"
49 #include "hci_codec.h"
50
51 static void hci_rx_work(struct work_struct *work);
52 static void hci_cmd_work(struct work_struct *work);
53 static void hci_tx_work(struct work_struct *work);
54
55 /* HCI device list */
56 LIST_HEAD(hci_dev_list);
57 DEFINE_RWLOCK(hci_dev_list_lock);
58
59 /* HCI callback list */
60 LIST_HEAD(hci_cb_list);
61 DEFINE_MUTEX(hci_cb_list_lock);
62
63 /* HCI ID Numbering */
64 static DEFINE_IDA(hci_index_ida);
65
66 static int hci_scan_req(struct hci_request *req, unsigned long opt)
67 {
68         __u8 scan = opt;
69
70         BT_DBG("%s %x", req->hdev->name, scan);
71
72         /* Inquiry and Page scans */
73         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
74         return 0;
75 }
76
77 static int hci_auth_req(struct hci_request *req, unsigned long opt)
78 {
79         __u8 auth = opt;
80
81         BT_DBG("%s %x", req->hdev->name, auth);
82
83         /* Authentication */
84         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
85         return 0;
86 }
87
88 static int hci_encrypt_req(struct hci_request *req, unsigned long opt)
89 {
90         __u8 encrypt = opt;
91
92         BT_DBG("%s %x", req->hdev->name, encrypt);
93
94         /* Encryption */
95         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
96         return 0;
97 }
98
99 static int hci_linkpol_req(struct hci_request *req, unsigned long opt)
100 {
101         __le16 policy = cpu_to_le16(opt);
102
103         BT_DBG("%s %x", req->hdev->name, policy);
104
105         /* Default link policy */
106         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
107         return 0;
108 }
109
110 /* Get HCI device by index.
111  * Device is held on return. */
112 struct hci_dev *hci_dev_get(int index)
113 {
114         struct hci_dev *hdev = NULL, *d;
115
116         BT_DBG("%d", index);
117
118         if (index < 0)
119                 return NULL;
120
121         read_lock(&hci_dev_list_lock);
122         list_for_each_entry(d, &hci_dev_list, list) {
123                 if (d->id == index) {
124                         hdev = hci_dev_hold(d);
125                         break;
126                 }
127         }
128         read_unlock(&hci_dev_list_lock);
129         return hdev;
130 }
131
132 /* ---- Inquiry support ---- */
133
134 bool hci_discovery_active(struct hci_dev *hdev)
135 {
136         struct discovery_state *discov = &hdev->discovery;
137
138         switch (discov->state) {
139         case DISCOVERY_FINDING:
140         case DISCOVERY_RESOLVING:
141                 return true;
142
143         default:
144                 return false;
145         }
146 }
147
148 void hci_discovery_set_state(struct hci_dev *hdev, int state)
149 {
150         int old_state = hdev->discovery.state;
151
152         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
153
154         if (old_state == state)
155                 return;
156
157         hdev->discovery.state = state;
158
159         switch (state) {
160         case DISCOVERY_STOPPED:
161                 hci_update_passive_scan(hdev);
162
163                 if (old_state != DISCOVERY_STARTING)
164                         mgmt_discovering(hdev, 0);
165                 break;
166         case DISCOVERY_STARTING:
167                 break;
168         case DISCOVERY_FINDING:
169                 mgmt_discovering(hdev, 1);
170                 break;
171         case DISCOVERY_RESOLVING:
172                 break;
173         case DISCOVERY_STOPPING:
174                 break;
175         }
176 }
177
178 void hci_inquiry_cache_flush(struct hci_dev *hdev)
179 {
180         struct discovery_state *cache = &hdev->discovery;
181         struct inquiry_entry *p, *n;
182
183         list_for_each_entry_safe(p, n, &cache->all, all) {
184                 list_del(&p->all);
185                 kfree(p);
186         }
187
188         INIT_LIST_HEAD(&cache->unknown);
189         INIT_LIST_HEAD(&cache->resolve);
190 }
191
192 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
193                                                bdaddr_t *bdaddr)
194 {
195         struct discovery_state *cache = &hdev->discovery;
196         struct inquiry_entry *e;
197
198         BT_DBG("cache %p, %pMR", cache, bdaddr);
199
200         list_for_each_entry(e, &cache->all, all) {
201                 if (!bacmp(&e->data.bdaddr, bdaddr))
202                         return e;
203         }
204
205         return NULL;
206 }
207
208 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
209                                                        bdaddr_t *bdaddr)
210 {
211         struct discovery_state *cache = &hdev->discovery;
212         struct inquiry_entry *e;
213
214         BT_DBG("cache %p, %pMR", cache, bdaddr);
215
216         list_for_each_entry(e, &cache->unknown, list) {
217                 if (!bacmp(&e->data.bdaddr, bdaddr))
218                         return e;
219         }
220
221         return NULL;
222 }
223
224 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
225                                                        bdaddr_t *bdaddr,
226                                                        int state)
227 {
228         struct discovery_state *cache = &hdev->discovery;
229         struct inquiry_entry *e;
230
231         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
232
233         list_for_each_entry(e, &cache->resolve, list) {
234                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
235                         return e;
236                 if (!bacmp(&e->data.bdaddr, bdaddr))
237                         return e;
238         }
239
240         return NULL;
241 }
242
243 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
244                                       struct inquiry_entry *ie)
245 {
246         struct discovery_state *cache = &hdev->discovery;
247         struct list_head *pos = &cache->resolve;
248         struct inquiry_entry *p;
249
250         list_del(&ie->list);
251
252         list_for_each_entry(p, &cache->resolve, list) {
253                 if (p->name_state != NAME_PENDING &&
254                     abs(p->data.rssi) >= abs(ie->data.rssi))
255                         break;
256                 pos = &p->list;
257         }
258
259         list_add(&ie->list, pos);
260 }
261
262 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
263                              bool name_known)
264 {
265         struct discovery_state *cache = &hdev->discovery;
266         struct inquiry_entry *ie;
267         u32 flags = 0;
268
269         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
270
271         hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR);
272
273         if (!data->ssp_mode)
274                 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
275
276         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
277         if (ie) {
278                 if (!ie->data.ssp_mode)
279                         flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
280
281                 if (ie->name_state == NAME_NEEDED &&
282                     data->rssi != ie->data.rssi) {
283                         ie->data.rssi = data->rssi;
284                         hci_inquiry_cache_update_resolve(hdev, ie);
285                 }
286
287                 goto update;
288         }
289
290         /* Entry not in the cache. Add new one. */
291         ie = kzalloc(sizeof(*ie), GFP_KERNEL);
292         if (!ie) {
293                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
294                 goto done;
295         }
296
297         list_add(&ie->all, &cache->all);
298
299         if (name_known) {
300                 ie->name_state = NAME_KNOWN;
301         } else {
302                 ie->name_state = NAME_NOT_KNOWN;
303                 list_add(&ie->list, &cache->unknown);
304         }
305
306 update:
307         if (name_known && ie->name_state != NAME_KNOWN &&
308             ie->name_state != NAME_PENDING) {
309                 ie->name_state = NAME_KNOWN;
310                 list_del(&ie->list);
311         }
312
313         memcpy(&ie->data, data, sizeof(*data));
314         ie->timestamp = jiffies;
315         cache->timestamp = jiffies;
316
317         if (ie->name_state == NAME_NOT_KNOWN)
318                 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
319
320 done:
321         return flags;
322 }
323
324 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
325 {
326         struct discovery_state *cache = &hdev->discovery;
327         struct inquiry_info *info = (struct inquiry_info *) buf;
328         struct inquiry_entry *e;
329         int copied = 0;
330
331         list_for_each_entry(e, &cache->all, all) {
332                 struct inquiry_data *data = &e->data;
333
334                 if (copied >= num)
335                         break;
336
337                 bacpy(&info->bdaddr, &data->bdaddr);
338                 info->pscan_rep_mode    = data->pscan_rep_mode;
339                 info->pscan_period_mode = data->pscan_period_mode;
340                 info->pscan_mode        = data->pscan_mode;
341                 memcpy(info->dev_class, data->dev_class, 3);
342                 info->clock_offset      = data->clock_offset;
343
344                 info++;
345                 copied++;
346         }
347
348         BT_DBG("cache %p, copied %d", cache, copied);
349         return copied;
350 }
351
352 static int hci_inq_req(struct hci_request *req, unsigned long opt)
353 {
354         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
355         struct hci_dev *hdev = req->hdev;
356         struct hci_cp_inquiry cp;
357
358         BT_DBG("%s", hdev->name);
359
360         if (test_bit(HCI_INQUIRY, &hdev->flags))
361                 return 0;
362
363         /* Start Inquiry */
364         memcpy(&cp.lap, &ir->lap, 3);
365         cp.length  = ir->length;
366         cp.num_rsp = ir->num_rsp;
367         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
368
369         return 0;
370 }
371
372 int hci_inquiry(void __user *arg)
373 {
374         __u8 __user *ptr = arg;
375         struct hci_inquiry_req ir;
376         struct hci_dev *hdev;
377         int err = 0, do_inquiry = 0, max_rsp;
378         long timeo;
379         __u8 *buf;
380
381         if (copy_from_user(&ir, ptr, sizeof(ir)))
382                 return -EFAULT;
383
384         hdev = hci_dev_get(ir.dev_id);
385         if (!hdev)
386                 return -ENODEV;
387
388         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
389                 err = -EBUSY;
390                 goto done;
391         }
392
393         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
394                 err = -EOPNOTSUPP;
395                 goto done;
396         }
397
398         if (hdev->dev_type != HCI_PRIMARY) {
399                 err = -EOPNOTSUPP;
400                 goto done;
401         }
402
403         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
404                 err = -EOPNOTSUPP;
405                 goto done;
406         }
407
408         /* Restrict maximum inquiry length to 60 seconds */
409         if (ir.length > 60) {
410                 err = -EINVAL;
411                 goto done;
412         }
413
414         hci_dev_lock(hdev);
415         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
416             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
417                 hci_inquiry_cache_flush(hdev);
418                 do_inquiry = 1;
419         }
420         hci_dev_unlock(hdev);
421
422         timeo = ir.length * msecs_to_jiffies(2000);
423
424         if (do_inquiry) {
425                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
426                                    timeo, NULL);
427                 if (err < 0)
428                         goto done;
429
430                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
431                  * cleared). If it is interrupted by a signal, return -EINTR.
432                  */
433                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
434                                 TASK_INTERRUPTIBLE)) {
435                         err = -EINTR;
436                         goto done;
437                 }
438         }
439
440         /* for unlimited number of responses we will use buffer with
441          * 255 entries
442          */
443         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
444
445         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
446          * copy it to the user space.
447          */
448         buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL);
449         if (!buf) {
450                 err = -ENOMEM;
451                 goto done;
452         }
453
454         hci_dev_lock(hdev);
455         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
456         hci_dev_unlock(hdev);
457
458         BT_DBG("num_rsp %d", ir.num_rsp);
459
460         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
461                 ptr += sizeof(ir);
462                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
463                                  ir.num_rsp))
464                         err = -EFAULT;
465         } else
466                 err = -EFAULT;
467
468         kfree(buf);
469
470 done:
471         hci_dev_put(hdev);
472         return err;
473 }
474
475 static int hci_dev_do_open(struct hci_dev *hdev)
476 {
477         int ret = 0;
478
479         BT_DBG("%s %p", hdev->name, hdev);
480
481         hci_req_sync_lock(hdev);
482
483         ret = hci_dev_open_sync(hdev);
484
485         hci_req_sync_unlock(hdev);
486         return ret;
487 }
488
489 /* ---- HCI ioctl helpers ---- */
490
491 int hci_dev_open(__u16 dev)
492 {
493         struct hci_dev *hdev;
494         int err;
495
496         hdev = hci_dev_get(dev);
497         if (!hdev)
498                 return -ENODEV;
499
500         /* Devices that are marked as unconfigured can only be powered
501          * up as user channel. Trying to bring them up as normal devices
502          * will result into a failure. Only user channel operation is
503          * possible.
504          *
505          * When this function is called for a user channel, the flag
506          * HCI_USER_CHANNEL will be set first before attempting to
507          * open the device.
508          */
509         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
510             !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
511                 err = -EOPNOTSUPP;
512                 goto done;
513         }
514
515         /* We need to ensure that no other power on/off work is pending
516          * before proceeding to call hci_dev_do_open. This is
517          * particularly important if the setup procedure has not yet
518          * completed.
519          */
520         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
521                 cancel_delayed_work(&hdev->power_off);
522
523         /* After this call it is guaranteed that the setup procedure
524          * has finished. This means that error conditions like RFKILL
525          * or no valid public or static random address apply.
526          */
527         flush_workqueue(hdev->req_workqueue);
528
529         /* For controllers not using the management interface and that
530          * are brought up using legacy ioctl, set the HCI_BONDABLE bit
531          * so that pairing works for them. Once the management interface
532          * is in use this bit will be cleared again and userspace has
533          * to explicitly enable it.
534          */
535         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
536             !hci_dev_test_flag(hdev, HCI_MGMT))
537                 hci_dev_set_flag(hdev, HCI_BONDABLE);
538
539         err = hci_dev_do_open(hdev);
540
541 done:
542         hci_dev_put(hdev);
543         return err;
544 }
545
546 int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         int err;
549
550         BT_DBG("%s %p", hdev->name, hdev);
551
552         hci_req_sync_lock(hdev);
553
554         err = hci_dev_close_sync(hdev);
555
556         hci_req_sync_unlock(hdev);
557
558         return err;
559 }
560
561 int hci_dev_close(__u16 dev)
562 {
563         struct hci_dev *hdev;
564         int err;
565
566         hdev = hci_dev_get(dev);
567         if (!hdev)
568                 return -ENODEV;
569
570         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
571                 err = -EBUSY;
572                 goto done;
573         }
574
575         cancel_work_sync(&hdev->power_on);
576         if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF))
577                 cancel_delayed_work(&hdev->power_off);
578
579         err = hci_dev_do_close(hdev);
580
581 done:
582         hci_dev_put(hdev);
583         return err;
584 }
585
586 static int hci_dev_do_reset(struct hci_dev *hdev)
587 {
588         int ret;
589
590         BT_DBG("%s %p", hdev->name, hdev);
591
592         hci_req_sync_lock(hdev);
593
594         /* Drop queues */
595         skb_queue_purge(&hdev->rx_q);
596         skb_queue_purge(&hdev->cmd_q);
597
598         /* Cancel these to avoid queueing non-chained pending work */
599         hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
600         /* Wait for
601          *
602          *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
603          *        queue_delayed_work(&hdev->{cmd,ncmd}_timer)
604          *
605          * inside RCU section to see the flag or complete scheduling.
606          */
607         synchronize_rcu();
608         /* Explicitly cancel works in case scheduled after setting the flag. */
609         cancel_delayed_work(&hdev->cmd_timer);
610         cancel_delayed_work(&hdev->ncmd_timer);
611
612         /* Avoid potential lockdep warnings from the *_flush() calls by
613          * ensuring the workqueue is empty up front.
614          */
615         drain_workqueue(hdev->workqueue);
616
617         hci_dev_lock(hdev);
618         hci_inquiry_cache_flush(hdev);
619         hci_conn_hash_flush(hdev);
620         hci_dev_unlock(hdev);
621
622         if (hdev->flush)
623                 hdev->flush(hdev);
624
625         hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE);
626
627         atomic_set(&hdev->cmd_cnt, 1);
628         hdev->acl_cnt = 0;
629         hdev->sco_cnt = 0;
630         hdev->le_cnt = 0;
631         hdev->iso_cnt = 0;
632
633         ret = hci_reset_sync(hdev);
634
635         hci_req_sync_unlock(hdev);
636         return ret;
637 }
638
639 int hci_dev_reset(__u16 dev)
640 {
641         struct hci_dev *hdev;
642         int err;
643
644         hdev = hci_dev_get(dev);
645         if (!hdev)
646                 return -ENODEV;
647
648         if (!test_bit(HCI_UP, &hdev->flags)) {
649                 err = -ENETDOWN;
650                 goto done;
651         }
652
653         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
654                 err = -EBUSY;
655                 goto done;
656         }
657
658         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
659                 err = -EOPNOTSUPP;
660                 goto done;
661         }
662
663         err = hci_dev_do_reset(hdev);
664
665 done:
666         hci_dev_put(hdev);
667         return err;
668 }
669
670 int hci_dev_reset_stat(__u16 dev)
671 {
672         struct hci_dev *hdev;
673         int ret = 0;
674
675         hdev = hci_dev_get(dev);
676         if (!hdev)
677                 return -ENODEV;
678
679         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
680                 ret = -EBUSY;
681                 goto done;
682         }
683
684         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
685                 ret = -EOPNOTSUPP;
686                 goto done;
687         }
688
689         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
690
691 done:
692         hci_dev_put(hdev);
693         return ret;
694 }
695
696 static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)
697 {
698         bool conn_changed, discov_changed;
699
700         BT_DBG("%s scan 0x%02x", hdev->name, scan);
701
702         if ((scan & SCAN_PAGE))
703                 conn_changed = !hci_dev_test_and_set_flag(hdev,
704                                                           HCI_CONNECTABLE);
705         else
706                 conn_changed = hci_dev_test_and_clear_flag(hdev,
707                                                            HCI_CONNECTABLE);
708
709         if ((scan & SCAN_INQUIRY)) {
710                 discov_changed = !hci_dev_test_and_set_flag(hdev,
711                                                             HCI_DISCOVERABLE);
712         } else {
713                 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
714                 discov_changed = hci_dev_test_and_clear_flag(hdev,
715                                                              HCI_DISCOVERABLE);
716         }
717
718         if (!hci_dev_test_flag(hdev, HCI_MGMT))
719                 return;
720
721         if (conn_changed || discov_changed) {
722                 /* In case this was disabled through mgmt */
723                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
724
725                 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED))
726                         hci_update_adv_data(hdev, hdev->cur_adv_instance);
727
728                 mgmt_new_settings(hdev);
729         }
730 }
731
732 int hci_dev_cmd(unsigned int cmd, void __user *arg)
733 {
734         struct hci_dev *hdev;
735         struct hci_dev_req dr;
736         int err = 0;
737
738         if (copy_from_user(&dr, arg, sizeof(dr)))
739                 return -EFAULT;
740
741         hdev = hci_dev_get(dr.dev_id);
742         if (!hdev)
743                 return -ENODEV;
744
745         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
746                 err = -EBUSY;
747                 goto done;
748         }
749
750         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) {
751                 err = -EOPNOTSUPP;
752                 goto done;
753         }
754
755         if (hdev->dev_type != HCI_PRIMARY) {
756                 err = -EOPNOTSUPP;
757                 goto done;
758         }
759
760         if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
761                 err = -EOPNOTSUPP;
762                 goto done;
763         }
764
765         switch (cmd) {
766         case HCISETAUTH:
767                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
768                                    HCI_INIT_TIMEOUT, NULL);
769                 break;
770
771         case HCISETENCRYPT:
772                 if (!lmp_encrypt_capable(hdev)) {
773                         err = -EOPNOTSUPP;
774                         break;
775                 }
776
777                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
778                         /* Auth must be enabled first */
779                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
780                                            HCI_INIT_TIMEOUT, NULL);
781                         if (err)
782                                 break;
783                 }
784
785                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
786                                    HCI_INIT_TIMEOUT, NULL);
787                 break;
788
789         case HCISETSCAN:
790                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
791                                    HCI_INIT_TIMEOUT, NULL);
792
793                 /* Ensure that the connectable and discoverable states
794                  * get correctly modified as this was a non-mgmt change.
795                  */
796                 if (!err)
797                         hci_update_passive_scan_state(hdev, dr.dev_opt);
798                 break;
799
800         case HCISETLINKPOL:
801                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
802                                    HCI_INIT_TIMEOUT, NULL);
803                 break;
804
805         case HCISETLINKMODE:
806                 hdev->link_mode = ((__u16) dr.dev_opt) &
807                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
808                 break;
809
810         case HCISETPTYPE:
811                 if (hdev->pkt_type == (__u16) dr.dev_opt)
812                         break;
813
814                 hdev->pkt_type = (__u16) dr.dev_opt;
815                 mgmt_phy_configuration_changed(hdev, NULL);
816                 break;
817
818         case HCISETACLMTU:
819                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
820                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
821                 break;
822
823         case HCISETSCOMTU:
824                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
825                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
826                 break;
827
828         default:
829                 err = -EINVAL;
830                 break;
831         }
832
833 done:
834         hci_dev_put(hdev);
835         return err;
836 }
837
838 int hci_get_dev_list(void __user *arg)
839 {
840         struct hci_dev *hdev;
841         struct hci_dev_list_req *dl;
842         struct hci_dev_req *dr;
843         int n = 0, size, err;
844         __u16 dev_num;
845
846         if (get_user(dev_num, (__u16 __user *) arg))
847                 return -EFAULT;
848
849         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
850                 return -EINVAL;
851
852         size = sizeof(*dl) + dev_num * sizeof(*dr);
853
854         dl = kzalloc(size, GFP_KERNEL);
855         if (!dl)
856                 return -ENOMEM;
857
858         dr = dl->dev_req;
859
860         read_lock(&hci_dev_list_lock);
861         list_for_each_entry(hdev, &hci_dev_list, list) {
862                 unsigned long flags = hdev->flags;
863
864                 /* When the auto-off is configured it means the transport
865                  * is running, but in that case still indicate that the
866                  * device is actually down.
867                  */
868                 if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
869                         flags &= ~BIT(HCI_UP);
870
871                 (dr + n)->dev_id  = hdev->id;
872                 (dr + n)->dev_opt = flags;
873
874                 if (++n >= dev_num)
875                         break;
876         }
877         read_unlock(&hci_dev_list_lock);
878
879         dl->dev_num = n;
880         size = sizeof(*dl) + n * sizeof(*dr);
881
882         err = copy_to_user(arg, dl, size);
883         kfree(dl);
884
885         return err ? -EFAULT : 0;
886 }
887
888 int hci_get_dev_info(void __user *arg)
889 {
890         struct hci_dev *hdev;
891         struct hci_dev_info di;
892         unsigned long flags;
893         int err = 0;
894
895         if (copy_from_user(&di, arg, sizeof(di)))
896                 return -EFAULT;
897
898         hdev = hci_dev_get(di.dev_id);
899         if (!hdev)
900                 return -ENODEV;
901
902         /* When the auto-off is configured it means the transport
903          * is running, but in that case still indicate that the
904          * device is actually down.
905          */
906         if (hci_dev_test_flag(hdev, HCI_AUTO_OFF))
907                 flags = hdev->flags & ~BIT(HCI_UP);
908         else
909                 flags = hdev->flags;
910
911         strscpy(di.name, hdev->name, sizeof(di.name));
912         di.bdaddr   = hdev->bdaddr;
913         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
914         di.flags    = flags;
915         di.pkt_type = hdev->pkt_type;
916         if (lmp_bredr_capable(hdev)) {
917                 di.acl_mtu  = hdev->acl_mtu;
918                 di.acl_pkts = hdev->acl_pkts;
919                 di.sco_mtu  = hdev->sco_mtu;
920                 di.sco_pkts = hdev->sco_pkts;
921         } else {
922                 di.acl_mtu  = hdev->le_mtu;
923                 di.acl_pkts = hdev->le_pkts;
924                 di.sco_mtu  = 0;
925                 di.sco_pkts = 0;
926         }
927         di.link_policy = hdev->link_policy;
928         di.link_mode   = hdev->link_mode;
929
930         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
931         memcpy(&di.features, &hdev->features, sizeof(di.features));
932
933         if (copy_to_user(arg, &di, sizeof(di)))
934                 err = -EFAULT;
935
936         hci_dev_put(hdev);
937
938         return err;
939 }
940
941 /* ---- Interface to HCI drivers ---- */
942
943 static int hci_dev_do_poweroff(struct hci_dev *hdev)
944 {
945         int err;
946
947         BT_DBG("%s %p", hdev->name, hdev);
948
949         hci_req_sync_lock(hdev);
950
951         err = hci_set_powered_sync(hdev, false);
952
953         hci_req_sync_unlock(hdev);
954
955         return err;
956 }
957
958 static int hci_rfkill_set_block(void *data, bool blocked)
959 {
960         struct hci_dev *hdev = data;
961         int err;
962
963         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
964
965         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
966                 return -EBUSY;
967
968         if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED))
969                 return 0;
970
971         if (blocked) {
972                 hci_dev_set_flag(hdev, HCI_RFKILLED);
973
974                 if (!hci_dev_test_flag(hdev, HCI_SETUP) &&
975                     !hci_dev_test_flag(hdev, HCI_CONFIG)) {
976                         err = hci_dev_do_poweroff(hdev);
977                         if (err) {
978                                 bt_dev_err(hdev, "Error when powering off device on rfkill (%d)",
979                                            err);
980
981                                 /* Make sure the device is still closed even if
982                                  * anything during power off sequence (eg.
983                                  * disconnecting devices) failed.
984                                  */
985                                 hci_dev_do_close(hdev);
986                         }
987                 }
988         } else {
989                 hci_dev_clear_flag(hdev, HCI_RFKILLED);
990         }
991
992         return 0;
993 }
994
995 static const struct rfkill_ops hci_rfkill_ops = {
996         .set_block = hci_rfkill_set_block,
997 };
998
999 static void hci_power_on(struct work_struct *work)
1000 {
1001         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1002         int err;
1003
1004         BT_DBG("%s", hdev->name);
1005
1006         if (test_bit(HCI_UP, &hdev->flags) &&
1007             hci_dev_test_flag(hdev, HCI_MGMT) &&
1008             hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) {
1009                 cancel_delayed_work(&hdev->power_off);
1010                 err = hci_powered_update_sync(hdev);
1011                 mgmt_power_on(hdev, err);
1012                 return;
1013         }
1014
1015         err = hci_dev_do_open(hdev);
1016         if (err < 0) {
1017                 hci_dev_lock(hdev);
1018                 mgmt_set_powered_failed(hdev, err);
1019                 hci_dev_unlock(hdev);
1020                 return;
1021         }
1022
1023         /* During the HCI setup phase, a few error conditions are
1024          * ignored and they need to be checked now. If they are still
1025          * valid, it is important to turn the device back off.
1026          */
1027         if (hci_dev_test_flag(hdev, HCI_RFKILLED) ||
1028             hci_dev_test_flag(hdev, HCI_UNCONFIGURED) ||
1029             (hdev->dev_type == HCI_PRIMARY &&
1030              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1031              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
1032                 hci_dev_clear_flag(hdev, HCI_AUTO_OFF);
1033                 hci_dev_do_close(hdev);
1034         } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) {
1035                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1036                                    HCI_AUTO_OFF_TIMEOUT);
1037         }
1038
1039         if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) {
1040                 /* For unconfigured devices, set the HCI_RAW flag
1041                  * so that userspace can easily identify them.
1042                  */
1043                 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1044                         set_bit(HCI_RAW, &hdev->flags);
1045
1046                 /* For fully configured devices, this will send
1047                  * the Index Added event. For unconfigured devices,
1048                  * it will send Unconfigued Index Added event.
1049                  *
1050                  * Devices with HCI_QUIRK_RAW_DEVICE are ignored
1051                  * and no event will be send.
1052                  */
1053                 mgmt_index_added(hdev);
1054         } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) {
1055                 /* When the controller is now configured, then it
1056                  * is important to clear the HCI_RAW flag.
1057                  */
1058                 if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
1059                         clear_bit(HCI_RAW, &hdev->flags);
1060
1061                 /* Powering on the controller with HCI_CONFIG set only
1062                  * happens with the transition from unconfigured to
1063                  * configured. This will send the Index Added event.
1064                  */
1065                 mgmt_index_added(hdev);
1066         }
1067 }
1068
1069 static void hci_power_off(struct work_struct *work)
1070 {
1071         struct hci_dev *hdev = container_of(work, struct hci_dev,
1072                                             power_off.work);
1073
1074         BT_DBG("%s", hdev->name);
1075
1076         hci_dev_do_close(hdev);
1077 }
1078
1079 static void hci_error_reset(struct work_struct *work)
1080 {
1081         struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset);
1082
1083         hci_dev_hold(hdev);
1084         BT_DBG("%s", hdev->name);
1085
1086         if (hdev->hw_error)
1087                 hdev->hw_error(hdev, hdev->hw_error_code);
1088         else
1089                 bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code);
1090
1091         if (!hci_dev_do_close(hdev))
1092                 hci_dev_do_open(hdev);
1093
1094         hci_dev_put(hdev);
1095 }
1096
1097 void hci_uuids_clear(struct hci_dev *hdev)
1098 {
1099         struct bt_uuid *uuid, *tmp;
1100
1101         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1102                 list_del(&uuid->list);
1103                 kfree(uuid);
1104         }
1105 }
1106
1107 void hci_link_keys_clear(struct hci_dev *hdev)
1108 {
1109         struct link_key *key, *tmp;
1110
1111         list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) {
1112                 list_del_rcu(&key->list);
1113                 kfree_rcu(key, rcu);
1114         }
1115 }
1116
1117 void hci_smp_ltks_clear(struct hci_dev *hdev)
1118 {
1119         struct smp_ltk *k, *tmp;
1120
1121         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1122                 list_del_rcu(&k->list);
1123                 kfree_rcu(k, rcu);
1124         }
1125 }
1126
1127 void hci_smp_irks_clear(struct hci_dev *hdev)
1128 {
1129         struct smp_irk *k, *tmp;
1130
1131         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1132                 list_del_rcu(&k->list);
1133                 kfree_rcu(k, rcu);
1134         }
1135 }
1136
1137 void hci_blocked_keys_clear(struct hci_dev *hdev)
1138 {
1139         struct blocked_key *b, *tmp;
1140
1141         list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) {
1142                 list_del_rcu(&b->list);
1143                 kfree_rcu(b, rcu);
1144         }
1145 }
1146
1147 bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
1148 {
1149         bool blocked = false;
1150         struct blocked_key *b;
1151
1152         rcu_read_lock();
1153         list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
1154                 if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
1155                         blocked = true;
1156                         break;
1157                 }
1158         }
1159
1160         rcu_read_unlock();
1161         return blocked;
1162 }
1163
1164 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1165 {
1166         struct link_key *k;
1167
1168         rcu_read_lock();
1169         list_for_each_entry_rcu(k, &hdev->link_keys, list) {
1170                 if (bacmp(bdaddr, &k->bdaddr) == 0) {
1171                         rcu_read_unlock();
1172
1173                         if (hci_is_blocked_key(hdev,
1174                                                HCI_BLOCKED_KEY_TYPE_LINKKEY,
1175                                                k->val)) {
1176                                 bt_dev_warn_ratelimited(hdev,
1177                                                         "Link key blocked for %pMR",
1178                                                         &k->bdaddr);
1179                                 return NULL;
1180                         }
1181
1182                         return k;
1183                 }
1184         }
1185         rcu_read_unlock();
1186
1187         return NULL;
1188 }
1189
1190 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1191                                u8 key_type, u8 old_key_type)
1192 {
1193         /* Legacy key */
1194         if (key_type < 0x03)
1195                 return true;
1196
1197         /* Debug keys are insecure so don't store them persistently */
1198         if (key_type == HCI_LK_DEBUG_COMBINATION)
1199                 return false;
1200
1201         /* Changed combination key and there's no previous one */
1202         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1203                 return false;
1204
1205         /* Security mode 3 case */
1206         if (!conn)
1207                 return true;
1208
1209         /* BR/EDR key derived using SC from an LE link */
1210         if (conn->type == LE_LINK)
1211                 return true;
1212
1213         /* Neither local nor remote side had no-bonding as requirement */
1214         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1215                 return true;
1216
1217         /* Local side had dedicated bonding as requirement */
1218         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1219                 return true;
1220
1221         /* Remote side had dedicated bonding as requirement */
1222         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1223                 return true;
1224
1225         /* If none of the above criteria match, then don't store the key
1226          * persistently */
1227         return false;
1228 }
1229
1230 static u8 ltk_role(u8 type)
1231 {
1232         if (type == SMP_LTK)
1233                 return HCI_ROLE_MASTER;
1234
1235         return HCI_ROLE_SLAVE;
1236 }
1237
1238 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1239                              u8 addr_type, u8 role)
1240 {
1241         struct smp_ltk *k;
1242
1243         rcu_read_lock();
1244         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1245                 if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr))
1246                         continue;
1247
1248                 if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) {
1249                         rcu_read_unlock();
1250
1251                         if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK,
1252                                                k->val)) {
1253                                 bt_dev_warn_ratelimited(hdev,
1254                                                         "LTK blocked for %pMR",
1255                                                         &k->bdaddr);
1256                                 return NULL;
1257                         }
1258
1259                         return k;
1260                 }
1261         }
1262         rcu_read_unlock();
1263
1264         return NULL;
1265 }
1266
1267 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
1268 {
1269         struct smp_irk *irk_to_return = NULL;
1270         struct smp_irk *irk;
1271
1272         rcu_read_lock();
1273         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1274                 if (!bacmp(&irk->rpa, rpa)) {
1275                         irk_to_return = irk;
1276                         goto done;
1277                 }
1278         }
1279
1280         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1281                 if (smp_irk_matches(hdev, irk->val, rpa)) {
1282                         bacpy(&irk->rpa, rpa);
1283                         irk_to_return = irk;
1284                         goto done;
1285                 }
1286         }
1287
1288 done:
1289         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1290                                                 irk_to_return->val)) {
1291                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1292                                         &irk_to_return->bdaddr);
1293                 irk_to_return = NULL;
1294         }
1295
1296         rcu_read_unlock();
1297
1298         return irk_to_return;
1299 }
1300
1301 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1302                                      u8 addr_type)
1303 {
1304         struct smp_irk *irk_to_return = NULL;
1305         struct smp_irk *irk;
1306
1307         /* Identity Address must be public or static random */
1308         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
1309                 return NULL;
1310
1311         rcu_read_lock();
1312         list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
1313                 if (addr_type == irk->addr_type &&
1314                     bacmp(bdaddr, &irk->bdaddr) == 0) {
1315                         irk_to_return = irk;
1316                         goto done;
1317                 }
1318         }
1319
1320 done:
1321
1322         if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK,
1323                                                 irk_to_return->val)) {
1324                 bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR",
1325                                         &irk_to_return->bdaddr);
1326                 irk_to_return = NULL;
1327         }
1328
1329         rcu_read_unlock();
1330
1331         return irk_to_return;
1332 }
1333
1334 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
1335                                   bdaddr_t *bdaddr, u8 *val, u8 type,
1336                                   u8 pin_len, bool *persistent)
1337 {
1338         struct link_key *key, *old_key;
1339         u8 old_key_type;
1340
1341         old_key = hci_find_link_key(hdev, bdaddr);
1342         if (old_key) {
1343                 old_key_type = old_key->type;
1344                 key = old_key;
1345         } else {
1346                 old_key_type = conn ? conn->key_type : 0xff;
1347                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1348                 if (!key)
1349                         return NULL;
1350                 list_add_rcu(&key->list, &hdev->link_keys);
1351         }
1352
1353         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1354
1355         /* Some buggy controller combinations generate a changed
1356          * combination key for legacy pairing even when there's no
1357          * previous key */
1358         if (type == HCI_LK_CHANGED_COMBINATION &&
1359             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1360                 type = HCI_LK_COMBINATION;
1361                 if (conn)
1362                         conn->key_type = type;
1363         }
1364
1365         bacpy(&key->bdaddr, bdaddr);
1366         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1367         key->pin_len = pin_len;
1368
1369         if (type == HCI_LK_CHANGED_COMBINATION)
1370                 key->type = old_key_type;
1371         else
1372                 key->type = type;
1373
1374         if (persistent)
1375                 *persistent = hci_persistent_key(hdev, conn, type,
1376                                                  old_key_type);
1377
1378         return key;
1379 }
1380
1381 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1382                             u8 addr_type, u8 type, u8 authenticated,
1383                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
1384 {
1385         struct smp_ltk *key, *old_key;
1386         u8 role = ltk_role(type);
1387
1388         old_key = hci_find_ltk(hdev, bdaddr, addr_type, role);
1389         if (old_key)
1390                 key = old_key;
1391         else {
1392                 key = kzalloc(sizeof(*key), GFP_KERNEL);
1393                 if (!key)
1394                         return NULL;
1395                 list_add_rcu(&key->list, &hdev->long_term_keys);
1396         }
1397
1398         bacpy(&key->bdaddr, bdaddr);
1399         key->bdaddr_type = addr_type;
1400         memcpy(key->val, tk, sizeof(key->val));
1401         key->authenticated = authenticated;
1402         key->ediv = ediv;
1403         key->rand = rand;
1404         key->enc_size = enc_size;
1405         key->type = type;
1406
1407         return key;
1408 }
1409
1410 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
1411                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
1412 {
1413         struct smp_irk *irk;
1414
1415         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
1416         if (!irk) {
1417                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
1418                 if (!irk)
1419                         return NULL;
1420
1421                 bacpy(&irk->bdaddr, bdaddr);
1422                 irk->addr_type = addr_type;
1423
1424                 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
1425         }
1426
1427         memcpy(irk->val, val, 16);
1428         bacpy(&irk->rpa, rpa);
1429
1430         return irk;
1431 }
1432
1433 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1434 {
1435         struct link_key *key;
1436
1437         key = hci_find_link_key(hdev, bdaddr);
1438         if (!key)
1439                 return -ENOENT;
1440
1441         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1442
1443         list_del_rcu(&key->list);
1444         kfree_rcu(key, rcu);
1445
1446         return 0;
1447 }
1448
1449 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
1450 {
1451         struct smp_ltk *k, *tmp;
1452         int removed = 0;
1453
1454         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1455                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
1456                         continue;
1457
1458                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1459
1460                 list_del_rcu(&k->list);
1461                 kfree_rcu(k, rcu);
1462                 removed++;
1463         }
1464
1465         return removed ? 0 : -ENOENT;
1466 }
1467
1468 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
1469 {
1470         struct smp_irk *k, *tmp;
1471
1472         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
1473                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
1474                         continue;
1475
1476                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1477
1478                 list_del_rcu(&k->list);
1479                 kfree_rcu(k, rcu);
1480         }
1481 }
1482
1483 bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1484 {
1485         struct smp_ltk *k;
1486         struct smp_irk *irk;
1487         u8 addr_type;
1488
1489         if (type == BDADDR_BREDR) {
1490                 if (hci_find_link_key(hdev, bdaddr))
1491                         return true;
1492                 return false;
1493         }
1494
1495         /* Convert to HCI addr type which struct smp_ltk uses */
1496         if (type == BDADDR_LE_PUBLIC)
1497                 addr_type = ADDR_LE_DEV_PUBLIC;
1498         else
1499                 addr_type = ADDR_LE_DEV_RANDOM;
1500
1501         irk = hci_get_irk(hdev, bdaddr, addr_type);
1502         if (irk) {
1503                 bdaddr = &irk->bdaddr;
1504                 addr_type = irk->addr_type;
1505         }
1506
1507         rcu_read_lock();
1508         list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
1509                 if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) {
1510                         rcu_read_unlock();
1511                         return true;
1512                 }
1513         }
1514         rcu_read_unlock();
1515
1516         return false;
1517 }
1518
1519 /* HCI command timer function */
1520 static void hci_cmd_timeout(struct work_struct *work)
1521 {
1522         struct hci_dev *hdev = container_of(work, struct hci_dev,
1523                                             cmd_timer.work);
1524
1525         if (hdev->req_skb) {
1526                 u16 opcode = hci_skb_opcode(hdev->req_skb);
1527
1528                 bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode);
1529
1530                 hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);
1531         } else {
1532                 bt_dev_err(hdev, "command tx timeout");
1533         }
1534
1535         if (hdev->cmd_timeout)
1536                 hdev->cmd_timeout(hdev);
1537
1538         atomic_set(&hdev->cmd_cnt, 1);
1539         queue_work(hdev->workqueue, &hdev->cmd_work);
1540 }
1541
1542 /* HCI ncmd timer function */
1543 static void hci_ncmd_timeout(struct work_struct *work)
1544 {
1545         struct hci_dev *hdev = container_of(work, struct hci_dev,
1546                                             ncmd_timer.work);
1547
1548         bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0");
1549
1550         /* During HCI_INIT phase no events can be injected if the ncmd timer
1551          * triggers since the procedure has its own timeout handling.
1552          */
1553         if (test_bit(HCI_INIT, &hdev->flags))
1554                 return;
1555
1556         /* This is an irrecoverable state, inject hardware error event */
1557         hci_reset_dev(hdev);
1558 }
1559
1560 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1561                                           bdaddr_t *bdaddr, u8 bdaddr_type)
1562 {
1563         struct oob_data *data;
1564
1565         list_for_each_entry(data, &hdev->remote_oob_data, list) {
1566                 if (bacmp(bdaddr, &data->bdaddr) != 0)
1567                         continue;
1568                 if (data->bdaddr_type != bdaddr_type)
1569                         continue;
1570                 return data;
1571         }
1572
1573         return NULL;
1574 }
1575
1576 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1577                                u8 bdaddr_type)
1578 {
1579         struct oob_data *data;
1580
1581         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1582         if (!data)
1583                 return -ENOENT;
1584
1585         BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type);
1586
1587         list_del(&data->list);
1588         kfree(data);
1589
1590         return 0;
1591 }
1592
1593 void hci_remote_oob_data_clear(struct hci_dev *hdev)
1594 {
1595         struct oob_data *data, *n;
1596
1597         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1598                 list_del(&data->list);
1599                 kfree(data);
1600         }
1601 }
1602
1603 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
1604                             u8 bdaddr_type, u8 *hash192, u8 *rand192,
1605                             u8 *hash256, u8 *rand256)
1606 {
1607         struct oob_data *data;
1608
1609         data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type);
1610         if (!data) {
1611                 data = kmalloc(sizeof(*data), GFP_KERNEL);
1612                 if (!data)
1613                         return -ENOMEM;
1614
1615                 bacpy(&data->bdaddr, bdaddr);
1616                 data->bdaddr_type = bdaddr_type;
1617                 list_add(&data->list, &hdev->remote_oob_data);
1618         }
1619
1620         if (hash192 && rand192) {
1621                 memcpy(data->hash192, hash192, sizeof(data->hash192));
1622                 memcpy(data->rand192, rand192, sizeof(data->rand192));
1623                 if (hash256 && rand256)
1624                         data->present = 0x03;
1625         } else {
1626                 memset(data->hash192, 0, sizeof(data->hash192));
1627                 memset(data->rand192, 0, sizeof(data->rand192));
1628                 if (hash256 && rand256)
1629                         data->present = 0x02;
1630                 else
1631                         data->present = 0x00;
1632         }
1633
1634         if (hash256 && rand256) {
1635                 memcpy(data->hash256, hash256, sizeof(data->hash256));
1636                 memcpy(data->rand256, rand256, sizeof(data->rand256));
1637         } else {
1638                 memset(data->hash256, 0, sizeof(data->hash256));
1639                 memset(data->rand256, 0, sizeof(data->rand256));
1640                 if (hash192 && rand192)
1641                         data->present = 0x01;
1642         }
1643
1644         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1645
1646         return 0;
1647 }
1648
1649 /* This function requires the caller holds hdev->lock */
1650 struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance)
1651 {
1652         struct adv_info *adv_instance;
1653
1654         list_for_each_entry(adv_instance, &hdev->adv_instances, list) {
1655                 if (adv_instance->instance == instance)
1656                         return adv_instance;
1657         }
1658
1659         return NULL;
1660 }
1661
1662 /* This function requires the caller holds hdev->lock */
1663 struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance)
1664 {
1665         struct adv_info *cur_instance;
1666
1667         cur_instance = hci_find_adv_instance(hdev, instance);
1668         if (!cur_instance)
1669                 return NULL;
1670
1671         if (cur_instance == list_last_entry(&hdev->adv_instances,
1672                                             struct adv_info, list))
1673                 return list_first_entry(&hdev->adv_instances,
1674                                                  struct adv_info, list);
1675         else
1676                 return list_next_entry(cur_instance, list);
1677 }
1678
1679 /* This function requires the caller holds hdev->lock */
1680 int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance)
1681 {
1682         struct adv_info *adv_instance;
1683
1684         adv_instance = hci_find_adv_instance(hdev, instance);
1685         if (!adv_instance)
1686                 return -ENOENT;
1687
1688         BT_DBG("%s removing %dMR", hdev->name, instance);
1689
1690         if (hdev->cur_adv_instance == instance) {
1691                 if (hdev->adv_instance_timeout) {
1692                         cancel_delayed_work(&hdev->adv_instance_expire);
1693                         hdev->adv_instance_timeout = 0;
1694                 }
1695                 hdev->cur_adv_instance = 0x00;
1696         }
1697
1698         cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1699
1700         list_del(&adv_instance->list);
1701         kfree(adv_instance);
1702
1703         hdev->adv_instance_cnt--;
1704
1705         return 0;
1706 }
1707
1708 void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired)
1709 {
1710         struct adv_info *adv_instance, *n;
1711
1712         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list)
1713                 adv_instance->rpa_expired = rpa_expired;
1714 }
1715
1716 /* This function requires the caller holds hdev->lock */
1717 void hci_adv_instances_clear(struct hci_dev *hdev)
1718 {
1719         struct adv_info *adv_instance, *n;
1720
1721         if (hdev->adv_instance_timeout) {
1722                 cancel_delayed_work(&hdev->adv_instance_expire);
1723                 hdev->adv_instance_timeout = 0;
1724         }
1725
1726         list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) {
1727                 cancel_delayed_work_sync(&adv_instance->rpa_expired_cb);
1728                 list_del(&adv_instance->list);
1729                 kfree(adv_instance);
1730         }
1731
1732         hdev->adv_instance_cnt = 0;
1733         hdev->cur_adv_instance = 0x00;
1734 }
1735
1736 static void adv_instance_rpa_expired(struct work_struct *work)
1737 {
1738         struct adv_info *adv_instance = container_of(work, struct adv_info,
1739                                                      rpa_expired_cb.work);
1740
1741         BT_DBG("");
1742
1743         adv_instance->rpa_expired = true;
1744 }
1745
1746 /* This function requires the caller holds hdev->lock */
1747 struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,
1748                                       u32 flags, u16 adv_data_len, u8 *adv_data,
1749                                       u16 scan_rsp_len, u8 *scan_rsp_data,
1750                                       u16 timeout, u16 duration, s8 tx_power,
1751                                       u32 min_interval, u32 max_interval,
1752                                       u8 mesh_handle)
1753 {
1754         struct adv_info *adv;
1755
1756         adv = hci_find_adv_instance(hdev, instance);
1757         if (adv) {
1758                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1759                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1760                 memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));
1761         } else {
1762                 if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets ||
1763                     instance < 1 || instance > hdev->le_num_of_adv_sets + 1)
1764                         return ERR_PTR(-EOVERFLOW);
1765
1766                 adv = kzalloc(sizeof(*adv), GFP_KERNEL);
1767                 if (!adv)
1768                         return ERR_PTR(-ENOMEM);
1769
1770                 adv->pending = true;
1771                 adv->instance = instance;
1772                 list_add(&adv->list, &hdev->adv_instances);
1773                 hdev->adv_instance_cnt++;
1774         }
1775
1776         adv->flags = flags;
1777         adv->min_interval = min_interval;
1778         adv->max_interval = max_interval;
1779         adv->tx_power = tx_power;
1780         /* Defining a mesh_handle changes the timing units to ms,
1781          * rather than seconds, and ties the instance to the requested
1782          * mesh_tx queue.
1783          */
1784         adv->mesh = mesh_handle;
1785
1786         hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,
1787                                   scan_rsp_len, scan_rsp_data);
1788
1789         adv->timeout = timeout;
1790         adv->remaining_time = timeout;
1791
1792         if (duration == 0)
1793                 adv->duration = hdev->def_multi_adv_rotation_duration;
1794         else
1795                 adv->duration = duration;
1796
1797         INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired);
1798
1799         BT_DBG("%s for %dMR", hdev->name, instance);
1800
1801         return adv;
1802 }
1803
1804 /* This function requires the caller holds hdev->lock */
1805 struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,
1806                                       u32 flags, u8 data_len, u8 *data,
1807                                       u32 min_interval, u32 max_interval)
1808 {
1809         struct adv_info *adv;
1810
1811         adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,
1812                                    0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE,
1813                                    min_interval, max_interval, 0);
1814         if (IS_ERR(adv))
1815                 return adv;
1816
1817         adv->periodic = true;
1818         adv->per_adv_data_len = data_len;
1819
1820         if (data)
1821                 memcpy(adv->per_adv_data, data, data_len);
1822
1823         return adv;
1824 }
1825
1826 /* This function requires the caller holds hdev->lock */
1827 int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance,
1828                               u16 adv_data_len, u8 *adv_data,
1829                               u16 scan_rsp_len, u8 *scan_rsp_data)
1830 {
1831         struct adv_info *adv;
1832
1833         adv = hci_find_adv_instance(hdev, instance);
1834
1835         /* If advertisement doesn't exist, we can't modify its data */
1836         if (!adv)
1837                 return -ENOENT;
1838
1839         if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) {
1840                 memset(adv->adv_data, 0, sizeof(adv->adv_data));
1841                 memcpy(adv->adv_data, adv_data, adv_data_len);
1842                 adv->adv_data_len = adv_data_len;
1843                 adv->adv_data_changed = true;
1844         }
1845
1846         if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) {
1847                 memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data));
1848                 memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len);
1849                 adv->scan_rsp_len = scan_rsp_len;
1850                 adv->scan_rsp_changed = true;
1851         }
1852
1853         /* Mark as changed if there are flags which would affect it */
1854         if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) ||
1855             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1856                 adv->scan_rsp_changed = true;
1857
1858         return 0;
1859 }
1860
1861 /* This function requires the caller holds hdev->lock */
1862 u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance)
1863 {
1864         u32 flags;
1865         struct adv_info *adv;
1866
1867         if (instance == 0x00) {
1868                 /* Instance 0 always manages the "Tx Power" and "Flags"
1869                  * fields
1870                  */
1871                 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
1872
1873                 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
1874                  * corresponds to the "connectable" instance flag.
1875                  */
1876                 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
1877                         flags |= MGMT_ADV_FLAG_CONNECTABLE;
1878
1879                 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1880                         flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
1881                 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1882                         flags |= MGMT_ADV_FLAG_DISCOV;
1883
1884                 return flags;
1885         }
1886
1887         adv = hci_find_adv_instance(hdev, instance);
1888
1889         /* Return 0 when we got an invalid instance identifier. */
1890         if (!adv)
1891                 return 0;
1892
1893         return adv->flags;
1894 }
1895
1896 bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance)
1897 {
1898         struct adv_info *adv;
1899
1900         /* Instance 0x00 always set local name */
1901         if (instance == 0x00)
1902                 return true;
1903
1904         adv = hci_find_adv_instance(hdev, instance);
1905         if (!adv)
1906                 return false;
1907
1908         if (adv->flags & MGMT_ADV_FLAG_APPEARANCE ||
1909             adv->flags & MGMT_ADV_FLAG_LOCAL_NAME)
1910                 return true;
1911
1912         return adv->scan_rsp_len ? true : false;
1913 }
1914
1915 /* This function requires the caller holds hdev->lock */
1916 void hci_adv_monitors_clear(struct hci_dev *hdev)
1917 {
1918         struct adv_monitor *monitor;
1919         int handle;
1920
1921         idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle)
1922                 hci_free_adv_monitor(hdev, monitor);
1923
1924         idr_destroy(&hdev->adv_monitors_idr);
1925 }
1926
1927 /* Frees the monitor structure and do some bookkeepings.
1928  * This function requires the caller holds hdev->lock.
1929  */
1930 void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1931 {
1932         struct adv_pattern *pattern;
1933         struct adv_pattern *tmp;
1934
1935         if (!monitor)
1936                 return;
1937
1938         list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) {
1939                 list_del(&pattern->list);
1940                 kfree(pattern);
1941         }
1942
1943         if (monitor->handle)
1944                 idr_remove(&hdev->adv_monitors_idr, monitor->handle);
1945
1946         if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) {
1947                 hdev->adv_monitors_cnt--;
1948                 mgmt_adv_monitor_removed(hdev, monitor->handle);
1949         }
1950
1951         kfree(monitor);
1952 }
1953
1954 /* Assigns handle to a monitor, and if offloading is supported and power is on,
1955  * also attempts to forward the request to the controller.
1956  * This function requires the caller holds hci_req_sync_lock.
1957  */
1958 int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor)
1959 {
1960         int min, max, handle;
1961         int status = 0;
1962
1963         if (!monitor)
1964                 return -EINVAL;
1965
1966         hci_dev_lock(hdev);
1967
1968         min = HCI_MIN_ADV_MONITOR_HANDLE;
1969         max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES;
1970         handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max,
1971                            GFP_KERNEL);
1972
1973         hci_dev_unlock(hdev);
1974
1975         if (handle < 0)
1976                 return handle;
1977
1978         monitor->handle = handle;
1979
1980         if (!hdev_is_powered(hdev))
1981                 return status;
1982
1983         switch (hci_get_adv_monitor_offload_ext(hdev)) {
1984         case HCI_ADV_MONITOR_EXT_NONE:
1985                 bt_dev_dbg(hdev, "add monitor %d status %d",
1986                            monitor->handle, status);
1987                 /* Message was not forwarded to controller - not an error */
1988                 break;
1989
1990         case HCI_ADV_MONITOR_EXT_MSFT:
1991                 status = msft_add_monitor_pattern(hdev, monitor);
1992                 bt_dev_dbg(hdev, "add monitor %d msft status %d",
1993                            handle, status);
1994                 break;
1995         }
1996
1997         return status;
1998 }
1999
2000 /* Attempts to tell the controller and free the monitor. If somehow the
2001  * controller doesn't have a corresponding handle, remove anyway.
2002  * This function requires the caller holds hci_req_sync_lock.
2003  */
2004 static int hci_remove_adv_monitor(struct hci_dev *hdev,
2005                                   struct adv_monitor *monitor)
2006 {
2007         int status = 0;
2008         int handle;
2009
2010         switch (hci_get_adv_monitor_offload_ext(hdev)) {
2011         case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */
2012                 bt_dev_dbg(hdev, "remove monitor %d status %d",
2013                            monitor->handle, status);
2014                 goto free_monitor;
2015
2016         case HCI_ADV_MONITOR_EXT_MSFT:
2017                 handle = monitor->handle;
2018                 status = msft_remove_monitor(hdev, monitor);
2019                 bt_dev_dbg(hdev, "remove monitor %d msft status %d",
2020                            handle, status);
2021                 break;
2022         }
2023
2024         /* In case no matching handle registered, just free the monitor */
2025         if (status == -ENOENT)
2026                 goto free_monitor;
2027
2028         return status;
2029
2030 free_monitor:
2031         if (status == -ENOENT)
2032                 bt_dev_warn(hdev, "Removing monitor with no matching handle %d",
2033                             monitor->handle);
2034         hci_free_adv_monitor(hdev, monitor);
2035
2036         return status;
2037 }
2038
2039 /* This function requires the caller holds hci_req_sync_lock */
2040 int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle)
2041 {
2042         struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle);
2043
2044         if (!monitor)
2045                 return -EINVAL;
2046
2047         return hci_remove_adv_monitor(hdev, monitor);
2048 }
2049
2050 /* This function requires the caller holds hci_req_sync_lock */
2051 int hci_remove_all_adv_monitor(struct hci_dev *hdev)
2052 {
2053         struct adv_monitor *monitor;
2054         int idr_next_id = 0;
2055         int status = 0;
2056
2057         while (1) {
2058                 monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id);
2059                 if (!monitor)
2060                         break;
2061
2062                 status = hci_remove_adv_monitor(hdev, monitor);
2063                 if (status)
2064                         return status;
2065
2066                 idr_next_id++;
2067         }
2068
2069         return status;
2070 }
2071
2072 /* This function requires the caller holds hdev->lock */
2073 bool hci_is_adv_monitoring(struct hci_dev *hdev)
2074 {
2075         return !idr_is_empty(&hdev->adv_monitors_idr);
2076 }
2077
2078 int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev)
2079 {
2080         if (msft_monitor_supported(hdev))
2081                 return HCI_ADV_MONITOR_EXT_MSFT;
2082
2083         return HCI_ADV_MONITOR_EXT_NONE;
2084 }
2085
2086 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
2087                                          bdaddr_t *bdaddr, u8 type)
2088 {
2089         struct bdaddr_list *b;
2090
2091         list_for_each_entry(b, bdaddr_list, list) {
2092                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2093                         return b;
2094         }
2095
2096         return NULL;
2097 }
2098
2099 struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk(
2100                                 struct list_head *bdaddr_list, bdaddr_t *bdaddr,
2101                                 u8 type)
2102 {
2103         struct bdaddr_list_with_irk *b;
2104
2105         list_for_each_entry(b, bdaddr_list, list) {
2106                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2107                         return b;
2108         }
2109
2110         return NULL;
2111 }
2112
2113 struct bdaddr_list_with_flags *
2114 hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list,
2115                                   bdaddr_t *bdaddr, u8 type)
2116 {
2117         struct bdaddr_list_with_flags *b;
2118
2119         list_for_each_entry(b, bdaddr_list, list) {
2120                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2121                         return b;
2122         }
2123
2124         return NULL;
2125 }
2126
2127 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
2128 {
2129         struct bdaddr_list *b, *n;
2130
2131         list_for_each_entry_safe(b, n, bdaddr_list, list) {
2132                 list_del(&b->list);
2133                 kfree(b);
2134         }
2135 }
2136
2137 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2138 {
2139         struct bdaddr_list *entry;
2140
2141         if (!bacmp(bdaddr, BDADDR_ANY))
2142                 return -EBADF;
2143
2144         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2145                 return -EEXIST;
2146
2147         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2148         if (!entry)
2149                 return -ENOMEM;
2150
2151         bacpy(&entry->bdaddr, bdaddr);
2152         entry->bdaddr_type = type;
2153
2154         list_add(&entry->list, list);
2155
2156         return 0;
2157 }
2158
2159 int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2160                                         u8 type, u8 *peer_irk, u8 *local_irk)
2161 {
2162         struct bdaddr_list_with_irk *entry;
2163
2164         if (!bacmp(bdaddr, BDADDR_ANY))
2165                 return -EBADF;
2166
2167         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2168                 return -EEXIST;
2169
2170         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2171         if (!entry)
2172                 return -ENOMEM;
2173
2174         bacpy(&entry->bdaddr, bdaddr);
2175         entry->bdaddr_type = type;
2176
2177         if (peer_irk)
2178                 memcpy(entry->peer_irk, peer_irk, 16);
2179
2180         if (local_irk)
2181                 memcpy(entry->local_irk, local_irk, 16);
2182
2183         list_add(&entry->list, list);
2184
2185         return 0;
2186 }
2187
2188 int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2189                                    u8 type, u32 flags)
2190 {
2191         struct bdaddr_list_with_flags *entry;
2192
2193         if (!bacmp(bdaddr, BDADDR_ANY))
2194                 return -EBADF;
2195
2196         if (hci_bdaddr_list_lookup(list, bdaddr, type))
2197                 return -EEXIST;
2198
2199         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
2200         if (!entry)
2201                 return -ENOMEM;
2202
2203         bacpy(&entry->bdaddr, bdaddr);
2204         entry->bdaddr_type = type;
2205         entry->flags = flags;
2206
2207         list_add(&entry->list, list);
2208
2209         return 0;
2210 }
2211
2212 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
2213 {
2214         struct bdaddr_list *entry;
2215
2216         if (!bacmp(bdaddr, BDADDR_ANY)) {
2217                 hci_bdaddr_list_clear(list);
2218                 return 0;
2219         }
2220
2221         entry = hci_bdaddr_list_lookup(list, bdaddr, type);
2222         if (!entry)
2223                 return -ENOENT;
2224
2225         list_del(&entry->list);
2226         kfree(entry);
2227
2228         return 0;
2229 }
2230
2231 int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr,
2232                                                         u8 type)
2233 {
2234         struct bdaddr_list_with_irk *entry;
2235
2236         if (!bacmp(bdaddr, BDADDR_ANY)) {
2237                 hci_bdaddr_list_clear(list);
2238                 return 0;
2239         }
2240
2241         entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type);
2242         if (!entry)
2243                 return -ENOENT;
2244
2245         list_del(&entry->list);
2246         kfree(entry);
2247
2248         return 0;
2249 }
2250
2251 int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr,
2252                                    u8 type)
2253 {
2254         struct bdaddr_list_with_flags *entry;
2255
2256         if (!bacmp(bdaddr, BDADDR_ANY)) {
2257                 hci_bdaddr_list_clear(list);
2258                 return 0;
2259         }
2260
2261         entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type);
2262         if (!entry)
2263                 return -ENOENT;
2264
2265         list_del(&entry->list);
2266         kfree(entry);
2267
2268         return 0;
2269 }
2270
2271 /* This function requires the caller holds hdev->lock */
2272 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2273                                                bdaddr_t *addr, u8 addr_type)
2274 {
2275         struct hci_conn_params *params;
2276
2277         list_for_each_entry(params, &hdev->le_conn_params, list) {
2278                 if (bacmp(&params->addr, addr) == 0 &&
2279                     params->addr_type == addr_type) {
2280                         return params;
2281                 }
2282         }
2283
2284         return NULL;
2285 }
2286
2287 /* This function requires the caller holds hdev->lock or rcu_read_lock */
2288 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
2289                                                   bdaddr_t *addr, u8 addr_type)
2290 {
2291         struct hci_conn_params *param;
2292
2293         rcu_read_lock();
2294
2295         list_for_each_entry_rcu(param, list, action) {
2296                 if (bacmp(&param->addr, addr) == 0 &&
2297                     param->addr_type == addr_type) {
2298                         rcu_read_unlock();
2299                         return param;
2300                 }
2301         }
2302
2303         rcu_read_unlock();
2304
2305         return NULL;
2306 }
2307
2308 /* This function requires the caller holds hdev->lock */
2309 void hci_pend_le_list_del_init(struct hci_conn_params *param)
2310 {
2311         if (list_empty(&param->action))
2312                 return;
2313
2314         list_del_rcu(&param->action);
2315         synchronize_rcu();
2316         INIT_LIST_HEAD(&param->action);
2317 }
2318
2319 /* This function requires the caller holds hdev->lock */
2320 void hci_pend_le_list_add(struct hci_conn_params *param,
2321                           struct list_head *list)
2322 {
2323         list_add_rcu(&param->action, list);
2324 }
2325
2326 /* This function requires the caller holds hdev->lock */
2327 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
2328                                             bdaddr_t *addr, u8 addr_type)
2329 {
2330         struct hci_conn_params *params;
2331
2332         params = hci_conn_params_lookup(hdev, addr, addr_type);
2333         if (params)
2334                 return params;
2335
2336         params = kzalloc(sizeof(*params), GFP_KERNEL);
2337         if (!params) {
2338                 bt_dev_err(hdev, "out of memory");
2339                 return NULL;
2340         }
2341
2342         bacpy(&params->addr, addr);
2343         params->addr_type = addr_type;
2344
2345         list_add(&params->list, &hdev->le_conn_params);
2346         INIT_LIST_HEAD(&params->action);
2347
2348         params->conn_min_interval = hdev->le_conn_min_interval;
2349         params->conn_max_interval = hdev->le_conn_max_interval;
2350         params->conn_latency = hdev->le_conn_latency;
2351         params->supervision_timeout = hdev->le_supv_timeout;
2352         params->auto_connect = HCI_AUTO_CONN_DISABLED;
2353
2354         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2355
2356         return params;
2357 }
2358
2359 void hci_conn_params_free(struct hci_conn_params *params)
2360 {
2361         hci_pend_le_list_del_init(params);
2362
2363         if (params->conn) {
2364                 hci_conn_drop(params->conn);
2365                 hci_conn_put(params->conn);
2366         }
2367
2368         list_del(&params->list);
2369         kfree(params);
2370 }
2371
2372 /* This function requires the caller holds hdev->lock */
2373 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2374 {
2375         struct hci_conn_params *params;
2376
2377         params = hci_conn_params_lookup(hdev, addr, addr_type);
2378         if (!params)
2379                 return;
2380
2381         hci_conn_params_free(params);
2382
2383         hci_update_passive_scan(hdev);
2384
2385         BT_DBG("addr %pMR (type %u)", addr, addr_type);
2386 }
2387
2388 /* This function requires the caller holds hdev->lock */
2389 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
2390 {
2391         struct hci_conn_params *params, *tmp;
2392
2393         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
2394                 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
2395                         continue;
2396
2397                 /* If trying to establish one time connection to disabled
2398                  * device, leave the params, but mark them as just once.
2399                  */
2400                 if (params->explicit_connect) {
2401                         params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
2402                         continue;
2403                 }
2404
2405                 hci_conn_params_free(params);
2406         }
2407
2408         BT_DBG("All LE disabled connection parameters were removed");
2409 }
2410
2411 /* This function requires the caller holds hdev->lock */
2412 static void hci_conn_params_clear_all(struct hci_dev *hdev)
2413 {
2414         struct hci_conn_params *params, *tmp;
2415
2416         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
2417                 hci_conn_params_free(params);
2418
2419         BT_DBG("All LE connection parameters were removed");
2420 }
2421
2422 /* Copy the Identity Address of the controller.
2423  *
2424  * If the controller has a public BD_ADDR, then by default use that one.
2425  * If this is a LE only controller without a public address, default to
2426  * the static random address.
2427  *
2428  * For debugging purposes it is possible to force controllers with a
2429  * public address to use the static random address instead.
2430  *
2431  * In case BR/EDR has been disabled on a dual-mode controller and
2432  * userspace has configured a static address, then that address
2433  * becomes the identity address instead of the public BR/EDR address.
2434  */
2435 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
2436                                u8 *bdaddr_type)
2437 {
2438         if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
2439             !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
2440             (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
2441              bacmp(&hdev->static_addr, BDADDR_ANY))) {
2442                 bacpy(bdaddr, &hdev->static_addr);
2443                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
2444         } else {
2445                 bacpy(bdaddr, &hdev->bdaddr);
2446                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
2447         }
2448 }
2449
2450 static void hci_clear_wake_reason(struct hci_dev *hdev)
2451 {
2452         hci_dev_lock(hdev);
2453
2454         hdev->wake_reason = 0;
2455         bacpy(&hdev->wake_addr, BDADDR_ANY);
2456         hdev->wake_addr_type = 0;
2457
2458         hci_dev_unlock(hdev);
2459 }
2460
2461 static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
2462                                 void *data)
2463 {
2464         struct hci_dev *hdev =
2465                 container_of(nb, struct hci_dev, suspend_notifier);
2466         int ret = 0;
2467
2468         /* Userspace has full control of this device. Do nothing. */
2469         if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
2470                 return NOTIFY_DONE;
2471
2472         /* To avoid a potential race with hci_unregister_dev. */
2473         hci_dev_hold(hdev);
2474
2475         if (action == PM_SUSPEND_PREPARE)
2476                 ret = hci_suspend_dev(hdev);
2477         else if (action == PM_POST_SUSPEND)
2478                 ret = hci_resume_dev(hdev);
2479
2480         if (ret)
2481                 bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d",
2482                            action, ret);
2483
2484         hci_dev_put(hdev);
2485         return NOTIFY_DONE;
2486 }
2487
2488 /* Alloc HCI device */
2489 struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
2490 {
2491         struct hci_dev *hdev;
2492         unsigned int alloc_size;
2493
2494         alloc_size = sizeof(*hdev);
2495         if (sizeof_priv) {
2496                 /* Fixme: May need ALIGN-ment? */
2497                 alloc_size += sizeof_priv;
2498         }
2499
2500         hdev = kzalloc(alloc_size, GFP_KERNEL);
2501         if (!hdev)
2502                 return NULL;
2503
2504         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2505         hdev->esco_type = (ESCO_HV1);
2506         hdev->link_mode = (HCI_LM_ACCEPT);
2507         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2508         hdev->io_capability = 0x03;     /* No Input No Output */
2509         hdev->manufacturer = 0xffff;    /* Default to internal use */
2510         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2511         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2512         hdev->adv_instance_cnt = 0;
2513         hdev->cur_adv_instance = 0x00;
2514         hdev->adv_instance_timeout = 0;
2515
2516         hdev->advmon_allowlist_duration = 300;
2517         hdev->advmon_no_filter_duration = 500;
2518         hdev->enable_advmon_interleave_scan = 0x00;     /* Default to disable */
2519
2520         hdev->sniff_max_interval = 800;
2521         hdev->sniff_min_interval = 80;
2522
2523         hdev->le_adv_channel_map = 0x07;
2524         hdev->le_adv_min_interval = 0x0800;
2525         hdev->le_adv_max_interval = 0x0800;
2526         hdev->le_scan_interval = 0x0060;
2527         hdev->le_scan_window = 0x0030;
2528         hdev->le_scan_int_suspend = 0x0400;
2529         hdev->le_scan_window_suspend = 0x0012;
2530         hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT;
2531         hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN;
2532         hdev->le_scan_int_adv_monitor = 0x0060;
2533         hdev->le_scan_window_adv_monitor = 0x0030;
2534         hdev->le_scan_int_connect = 0x0060;
2535         hdev->le_scan_window_connect = 0x0060;
2536         hdev->le_conn_min_interval = 0x0018;
2537         hdev->le_conn_max_interval = 0x0028;
2538         hdev->le_conn_latency = 0x0000;
2539         hdev->le_supv_timeout = 0x002a;
2540         hdev->le_def_tx_len = 0x001b;
2541         hdev->le_def_tx_time = 0x0148;
2542         hdev->le_max_tx_len = 0x001b;
2543         hdev->le_max_tx_time = 0x0148;
2544         hdev->le_max_rx_len = 0x001b;
2545         hdev->le_max_rx_time = 0x0148;
2546         hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE;
2547         hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE;
2548         hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M;
2549         hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M;
2550         hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES;
2551         hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION;
2552         hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT;
2553         hdev->min_le_tx_power = HCI_TX_POWER_INVALID;
2554         hdev->max_le_tx_power = HCI_TX_POWER_INVALID;
2555
2556         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
2557         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
2558         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
2559         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
2560         hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT;
2561         hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE;
2562
2563         /* default 1.28 sec page scan */
2564         hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD;
2565         hdev->def_page_scan_int = 0x0800;
2566         hdev->def_page_scan_window = 0x0012;
2567
2568         mutex_init(&hdev->lock);
2569         mutex_init(&hdev->req_lock);
2570
2571         ida_init(&hdev->unset_handle_ida);
2572
2573         INIT_LIST_HEAD(&hdev->mesh_pending);
2574         INIT_LIST_HEAD(&hdev->mgmt_pending);
2575         INIT_LIST_HEAD(&hdev->reject_list);
2576         INIT_LIST_HEAD(&hdev->accept_list);
2577         INIT_LIST_HEAD(&hdev->uuids);
2578         INIT_LIST_HEAD(&hdev->link_keys);
2579         INIT_LIST_HEAD(&hdev->long_term_keys);
2580         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
2581         INIT_LIST_HEAD(&hdev->remote_oob_data);
2582         INIT_LIST_HEAD(&hdev->le_accept_list);
2583         INIT_LIST_HEAD(&hdev->le_resolv_list);
2584         INIT_LIST_HEAD(&hdev->le_conn_params);
2585         INIT_LIST_HEAD(&hdev->pend_le_conns);
2586         INIT_LIST_HEAD(&hdev->pend_le_reports);
2587         INIT_LIST_HEAD(&hdev->conn_hash.list);
2588         INIT_LIST_HEAD(&hdev->adv_instances);
2589         INIT_LIST_HEAD(&hdev->blocked_keys);
2590         INIT_LIST_HEAD(&hdev->monitored_devices);
2591
2592         INIT_LIST_HEAD(&hdev->local_codecs);
2593         INIT_WORK(&hdev->rx_work, hci_rx_work);
2594         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2595         INIT_WORK(&hdev->tx_work, hci_tx_work);
2596         INIT_WORK(&hdev->power_on, hci_power_on);
2597         INIT_WORK(&hdev->error_reset, hci_error_reset);
2598
2599         hci_cmd_sync_init(hdev);
2600
2601         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2602
2603         skb_queue_head_init(&hdev->rx_q);
2604         skb_queue_head_init(&hdev->cmd_q);
2605         skb_queue_head_init(&hdev->raw_q);
2606
2607         init_waitqueue_head(&hdev->req_wait_q);
2608
2609         INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
2610         INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout);
2611
2612         hci_devcd_setup(hdev);
2613         hci_request_setup(hdev);
2614
2615         hci_init_sysfs(hdev);
2616         discovery_init(hdev);
2617
2618         return hdev;
2619 }
2620 EXPORT_SYMBOL(hci_alloc_dev_priv);
2621
2622 /* Free HCI device */
2623 void hci_free_dev(struct hci_dev *hdev)
2624 {
2625         /* will free via device release */
2626         put_device(&hdev->dev);
2627 }
2628 EXPORT_SYMBOL(hci_free_dev);
2629
2630 /* Register HCI device */
2631 int hci_register_dev(struct hci_dev *hdev)
2632 {
2633         int id, error;
2634
2635         if (!hdev->open || !hdev->close || !hdev->send)
2636                 return -EINVAL;
2637
2638         /* Do not allow HCI_AMP devices to register at index 0,
2639          * so the index can be used as the AMP controller ID.
2640          */
2641         switch (hdev->dev_type) {
2642         case HCI_PRIMARY:
2643                 id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);
2644                 break;
2645         case HCI_AMP:
2646                 id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1,
2647                                      GFP_KERNEL);
2648                 break;
2649         default:
2650                 return -EINVAL;
2651         }
2652
2653         if (id < 0)
2654                 return id;
2655
2656         error = dev_set_name(&hdev->dev, "hci%u", id);
2657         if (error)
2658                 return error;
2659
2660         hdev->name = dev_name(&hdev->dev);
2661         hdev->id = id;
2662
2663         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2664
2665         hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name);
2666         if (!hdev->workqueue) {
2667                 error = -ENOMEM;
2668                 goto err;
2669         }
2670
2671         hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI,
2672                                                       hdev->name);
2673         if (!hdev->req_workqueue) {
2674                 destroy_workqueue(hdev->workqueue);
2675                 error = -ENOMEM;
2676                 goto err;
2677         }
2678
2679         if (!IS_ERR_OR_NULL(bt_debugfs))
2680                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
2681
2682         error = device_add(&hdev->dev);
2683         if (error < 0)
2684                 goto err_wqueue;
2685
2686         hci_leds_init(hdev);
2687
2688         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
2689                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2690                                     hdev);
2691         if (hdev->rfkill) {
2692                 if (rfkill_register(hdev->rfkill) < 0) {
2693                         rfkill_destroy(hdev->rfkill);
2694                         hdev->rfkill = NULL;
2695                 }
2696         }
2697
2698         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2699                 hci_dev_set_flag(hdev, HCI_RFKILLED);
2700
2701         hci_dev_set_flag(hdev, HCI_SETUP);
2702         hci_dev_set_flag(hdev, HCI_AUTO_OFF);
2703
2704         if (hdev->dev_type == HCI_PRIMARY) {
2705                 /* Assume BR/EDR support until proven otherwise (such as
2706                  * through reading supported features during init.
2707                  */
2708                 hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);
2709         }
2710
2711         write_lock(&hci_dev_list_lock);
2712         list_add(&hdev->list, &hci_dev_list);
2713         write_unlock(&hci_dev_list_lock);
2714
2715         /* Devices that are marked for raw-only usage are unconfigured
2716          * and should not be included in normal operation.
2717          */
2718         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2719                 hci_dev_set_flag(hdev, HCI_UNCONFIGURED);
2720
2721         /* Mark Remote Wakeup connection flag as supported if driver has wakeup
2722          * callback.
2723          */
2724         if (hdev->wakeup)
2725                 hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP;
2726
2727         hci_sock_dev_event(hdev, HCI_DEV_REG);
2728         hci_dev_hold(hdev);
2729
2730         error = hci_register_suspend_notifier(hdev);
2731         if (error)
2732                 BT_WARN("register suspend notifier failed error:%d\n", error);
2733
2734         queue_work(hdev->req_workqueue, &hdev->power_on);
2735
2736         idr_init(&hdev->adv_monitors_idr);
2737         msft_register(hdev);
2738
2739         return id;
2740
2741 err_wqueue:
2742         debugfs_remove_recursive(hdev->debugfs);
2743         destroy_workqueue(hdev->workqueue);
2744         destroy_workqueue(hdev->req_workqueue);
2745 err:
2746         ida_free(&hci_index_ida, hdev->id);
2747
2748         return error;
2749 }
2750 EXPORT_SYMBOL(hci_register_dev);
2751
2752 /* Unregister HCI device */
2753 void hci_unregister_dev(struct hci_dev *hdev)
2754 {
2755         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2756
2757         mutex_lock(&hdev->unregister_lock);
2758         hci_dev_set_flag(hdev, HCI_UNREGISTER);
2759         mutex_unlock(&hdev->unregister_lock);
2760
2761         write_lock(&hci_dev_list_lock);
2762         list_del(&hdev->list);
2763         write_unlock(&hci_dev_list_lock);
2764
2765         cancel_work_sync(&hdev->power_on);
2766
2767         hci_cmd_sync_clear(hdev);
2768
2769         hci_unregister_suspend_notifier(hdev);
2770
2771         msft_unregister(hdev);
2772
2773         hci_dev_do_close(hdev);
2774
2775         if (!test_bit(HCI_INIT, &hdev->flags) &&
2776             !hci_dev_test_flag(hdev, HCI_SETUP) &&
2777             !hci_dev_test_flag(hdev, HCI_CONFIG)) {
2778                 hci_dev_lock(hdev);
2779                 mgmt_index_removed(hdev);
2780                 hci_dev_unlock(hdev);
2781         }
2782
2783         /* mgmt_index_removed should take care of emptying the
2784          * pending list */
2785         BUG_ON(!list_empty(&hdev->mgmt_pending));
2786
2787         hci_sock_dev_event(hdev, HCI_DEV_UNREG);
2788
2789         if (hdev->rfkill) {
2790                 rfkill_unregister(hdev->rfkill);
2791                 rfkill_destroy(hdev->rfkill);
2792         }
2793
2794         device_del(&hdev->dev);
2795         /* Actual cleanup is deferred until hci_release_dev(). */
2796         hci_dev_put(hdev);
2797 }
2798 EXPORT_SYMBOL(hci_unregister_dev);
2799
2800 /* Release HCI device */
2801 void hci_release_dev(struct hci_dev *hdev)
2802 {
2803         debugfs_remove_recursive(hdev->debugfs);
2804         kfree_const(hdev->hw_info);
2805         kfree_const(hdev->fw_info);
2806
2807         destroy_workqueue(hdev->workqueue);
2808         destroy_workqueue(hdev->req_workqueue);
2809
2810         hci_dev_lock(hdev);
2811         hci_bdaddr_list_clear(&hdev->reject_list);
2812         hci_bdaddr_list_clear(&hdev->accept_list);
2813         hci_uuids_clear(hdev);
2814         hci_link_keys_clear(hdev);
2815         hci_smp_ltks_clear(hdev);
2816         hci_smp_irks_clear(hdev);
2817         hci_remote_oob_data_clear(hdev);
2818         hci_adv_instances_clear(hdev);
2819         hci_adv_monitors_clear(hdev);
2820         hci_bdaddr_list_clear(&hdev->le_accept_list);
2821         hci_bdaddr_list_clear(&hdev->le_resolv_list);
2822         hci_conn_params_clear_all(hdev);
2823         hci_discovery_filter_clear(hdev);
2824         hci_blocked_keys_clear(hdev);
2825         hci_codec_list_clear(&hdev->local_codecs);
2826         hci_dev_unlock(hdev);
2827
2828         ida_destroy(&hdev->unset_handle_ida);
2829         ida_free(&hci_index_ida, hdev->id);
2830         kfree_skb(hdev->sent_cmd);
2831         kfree_skb(hdev->req_skb);
2832         kfree_skb(hdev->recv_event);
2833         kfree(hdev);
2834 }
2835 EXPORT_SYMBOL(hci_release_dev);
2836
2837 int hci_register_suspend_notifier(struct hci_dev *hdev)
2838 {
2839         int ret = 0;
2840
2841         if (!hdev->suspend_notifier.notifier_call &&
2842             !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) {
2843                 hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
2844                 ret = register_pm_notifier(&hdev->suspend_notifier);
2845         }
2846
2847         return ret;
2848 }
2849
2850 int hci_unregister_suspend_notifier(struct hci_dev *hdev)
2851 {
2852         int ret = 0;
2853
2854         if (hdev->suspend_notifier.notifier_call) {
2855                 ret = unregister_pm_notifier(&hdev->suspend_notifier);
2856                 if (!ret)
2857                         hdev->suspend_notifier.notifier_call = NULL;
2858         }
2859
2860         return ret;
2861 }
2862
2863 /* Cancel ongoing command synchronously:
2864  *
2865  * - Cancel command timer
2866  * - Reset command counter
2867  * - Cancel command request
2868  */
2869 static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err)
2870 {
2871         bt_dev_dbg(hdev, "err 0x%2.2x", err);
2872
2873         cancel_delayed_work_sync(&hdev->cmd_timer);
2874         cancel_delayed_work_sync(&hdev->ncmd_timer);
2875         atomic_set(&hdev->cmd_cnt, 1);
2876
2877         hci_cmd_sync_cancel_sync(hdev, err);
2878 }
2879
2880 /* Suspend HCI device */
2881 int hci_suspend_dev(struct hci_dev *hdev)
2882 {
2883         int ret;
2884
2885         bt_dev_dbg(hdev, "");
2886
2887         /* Suspend should only act on when powered. */
2888         if (!hdev_is_powered(hdev) ||
2889             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2890                 return 0;
2891
2892         /* If powering down don't attempt to suspend */
2893         if (mgmt_powering_down(hdev))
2894                 return 0;
2895
2896         /* Cancel potentially blocking sync operation before suspend */
2897         hci_cancel_cmd_sync(hdev, EHOSTDOWN);
2898
2899         hci_req_sync_lock(hdev);
2900         ret = hci_suspend_sync(hdev);
2901         hci_req_sync_unlock(hdev);
2902
2903         hci_clear_wake_reason(hdev);
2904         mgmt_suspending(hdev, hdev->suspend_state);
2905
2906         hci_sock_dev_event(hdev, HCI_DEV_SUSPEND);
2907         return ret;
2908 }
2909 EXPORT_SYMBOL(hci_suspend_dev);
2910
2911 /* Resume HCI device */
2912 int hci_resume_dev(struct hci_dev *hdev)
2913 {
2914         int ret;
2915
2916         bt_dev_dbg(hdev, "");
2917
2918         /* Resume should only act on when powered. */
2919         if (!hdev_is_powered(hdev) ||
2920             hci_dev_test_flag(hdev, HCI_UNREGISTER))
2921                 return 0;
2922
2923         /* If powering down don't attempt to resume */
2924         if (mgmt_powering_down(hdev))
2925                 return 0;
2926
2927         hci_req_sync_lock(hdev);
2928         ret = hci_resume_sync(hdev);
2929         hci_req_sync_unlock(hdev);
2930
2931         mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr,
2932                       hdev->wake_addr_type);
2933
2934         hci_sock_dev_event(hdev, HCI_DEV_RESUME);
2935         return ret;
2936 }
2937 EXPORT_SYMBOL(hci_resume_dev);
2938
2939 /* Reset HCI device */
2940 int hci_reset_dev(struct hci_dev *hdev)
2941 {
2942         static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
2943         struct sk_buff *skb;
2944
2945         skb = bt_skb_alloc(3, GFP_ATOMIC);
2946         if (!skb)
2947                 return -ENOMEM;
2948
2949         hci_skb_pkt_type(skb) = HCI_EVENT_PKT;
2950         skb_put_data(skb, hw_err, 3);
2951
2952         bt_dev_err(hdev, "Injecting HCI hardware error event");
2953
2954         /* Send Hardware Error to upper stack */
2955         return hci_recv_frame(hdev, skb);
2956 }
2957 EXPORT_SYMBOL(hci_reset_dev);
2958
2959 /* Receive frame from HCI drivers */
2960 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
2961 {
2962         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
2963                       && !test_bit(HCI_INIT, &hdev->flags))) {
2964                 kfree_skb(skb);
2965                 return -ENXIO;
2966         }
2967
2968         switch (hci_skb_pkt_type(skb)) {
2969         case HCI_EVENT_PKT:
2970                 break;
2971         case HCI_ACLDATA_PKT:
2972                 /* Detect if ISO packet has been sent as ACL */
2973                 if (hci_conn_num(hdev, ISO_LINK)) {
2974                         __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle);
2975                         __u8 type;
2976
2977                         type = hci_conn_lookup_type(hdev, hci_handle(handle));
2978                         if (type == ISO_LINK)
2979                                 hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
2980                 }
2981                 break;
2982         case HCI_SCODATA_PKT:
2983                 break;
2984         case HCI_ISODATA_PKT:
2985                 break;
2986         default:
2987                 kfree_skb(skb);
2988                 return -EINVAL;
2989         }
2990
2991         /* Incoming skb */
2992         bt_cb(skb)->incoming = 1;
2993
2994         /* Time stamp */
2995         __net_timestamp(skb);
2996
2997         skb_queue_tail(&hdev->rx_q, skb);
2998         queue_work(hdev->workqueue, &hdev->rx_work);
2999
3000         return 0;
3001 }
3002 EXPORT_SYMBOL(hci_recv_frame);
3003
3004 /* Receive diagnostic message from HCI drivers */
3005 int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
3006 {
3007         /* Mark as diagnostic packet */
3008         hci_skb_pkt_type(skb) = HCI_DIAG_PKT;
3009
3010         /* Time stamp */
3011         __net_timestamp(skb);
3012
3013         skb_queue_tail(&hdev->rx_q, skb);
3014         queue_work(hdev->workqueue, &hdev->rx_work);
3015
3016         return 0;
3017 }
3018 EXPORT_SYMBOL(hci_recv_diag);
3019
3020 void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...)
3021 {
3022         va_list vargs;
3023
3024         va_start(vargs, fmt);
3025         kfree_const(hdev->hw_info);
3026         hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3027         va_end(vargs);
3028 }
3029 EXPORT_SYMBOL(hci_set_hw_info);
3030
3031 void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...)
3032 {
3033         va_list vargs;
3034
3035         va_start(vargs, fmt);
3036         kfree_const(hdev->fw_info);
3037         hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs);
3038         va_end(vargs);
3039 }
3040 EXPORT_SYMBOL(hci_set_fw_info);
3041
3042 /* ---- Interface to upper protocols ---- */
3043
3044 int hci_register_cb(struct hci_cb *cb)
3045 {
3046         BT_DBG("%p name %s", cb, cb->name);
3047
3048         mutex_lock(&hci_cb_list_lock);
3049         list_add_tail(&cb->list, &hci_cb_list);
3050         mutex_unlock(&hci_cb_list_lock);
3051
3052         return 0;
3053 }
3054 EXPORT_SYMBOL(hci_register_cb);
3055
3056 int hci_unregister_cb(struct hci_cb *cb)
3057 {
3058         BT_DBG("%p name %s", cb, cb->name);
3059
3060         mutex_lock(&hci_cb_list_lock);
3061         list_del(&cb->list);
3062         mutex_unlock(&hci_cb_list_lock);
3063
3064         return 0;
3065 }
3066 EXPORT_SYMBOL(hci_unregister_cb);
3067
3068 static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3069 {
3070         int err;
3071
3072         BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb),
3073                skb->len);
3074
3075         /* Time stamp */
3076         __net_timestamp(skb);
3077
3078         /* Send copy to monitor */
3079         hci_send_to_monitor(hdev, skb);
3080
3081         if (atomic_read(&hdev->promisc)) {
3082                 /* Send copy to the sockets */
3083                 hci_send_to_sock(hdev, skb);
3084         }
3085
3086         /* Get rid of skb owner, prior to sending to the driver. */
3087         skb_orphan(skb);
3088
3089         if (!test_bit(HCI_RUNNING, &hdev->flags)) {
3090                 kfree_skb(skb);
3091                 return -EINVAL;
3092         }
3093
3094         err = hdev->send(hdev, skb);
3095         if (err < 0) {
3096                 bt_dev_err(hdev, "sending frame failed (%d)", err);
3097                 kfree_skb(skb);
3098                 return err;
3099         }
3100
3101         return 0;
3102 }
3103
3104 /* Send HCI command */
3105 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3106                  const void *param)
3107 {
3108         struct sk_buff *skb;
3109
3110         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3111
3112         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3113         if (!skb) {
3114                 bt_dev_err(hdev, "no memory for command");
3115                 return -ENOMEM;
3116         }
3117
3118         /* Stand-alone HCI commands must be flagged as
3119          * single-command requests.
3120          */
3121         bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
3122
3123         skb_queue_tail(&hdev->cmd_q, skb);
3124         queue_work(hdev->workqueue, &hdev->cmd_work);
3125
3126         return 0;
3127 }
3128
3129 int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,
3130                    const void *param)
3131 {
3132         struct sk_buff *skb;
3133
3134         if (hci_opcode_ogf(opcode) != 0x3f) {
3135                 /* A controller receiving a command shall respond with either
3136                  * a Command Status Event or a Command Complete Event.
3137                  * Therefore, all standard HCI commands must be sent via the
3138                  * standard API, using hci_send_cmd or hci_cmd_sync helpers.
3139                  * Some vendors do not comply with this rule for vendor-specific
3140                  * commands and do not return any event. We want to support
3141                  * unresponded commands for such cases only.
3142                  */
3143                 bt_dev_err(hdev, "unresponded command not supported");
3144                 return -EINVAL;
3145         }
3146
3147         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3148         if (!skb) {
3149                 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
3150                            opcode);
3151                 return -ENOMEM;
3152         }
3153
3154         hci_send_frame(hdev, skb);
3155
3156         return 0;
3157 }
3158 EXPORT_SYMBOL(__hci_cmd_send);
3159
3160 /* Get data from the previously sent command */
3161 static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)
3162 {
3163         struct hci_command_hdr *hdr;
3164
3165         if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)
3166                 return NULL;
3167
3168         hdr = (void *)skb->data;
3169
3170         if (hdr->opcode != cpu_to_le16(opcode))
3171                 return NULL;
3172
3173         return skb->data + HCI_COMMAND_HDR_SIZE;
3174 }
3175
3176 /* Get data from the previously sent command */
3177 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3178 {
3179         void *data;
3180
3181         /* Check if opcode matches last sent command */
3182         data = hci_cmd_data(hdev->sent_cmd, opcode);
3183         if (!data)
3184                 /* Check if opcode matches last request */
3185                 data = hci_cmd_data(hdev->req_skb, opcode);
3186
3187         return data;
3188 }
3189
3190 /* Get data from last received event */
3191 void *hci_recv_event_data(struct hci_dev *hdev, __u8 event)
3192 {
3193         struct hci_event_hdr *hdr;
3194         int offset;
3195
3196         if (!hdev->recv_event)
3197                 return NULL;
3198
3199         hdr = (void *)hdev->recv_event->data;
3200         offset = sizeof(*hdr);
3201
3202         if (hdr->evt != event) {
3203                 /* In case of LE metaevent check the subevent match */
3204                 if (hdr->evt == HCI_EV_LE_META) {
3205                         struct hci_ev_le_meta *ev;
3206
3207                         ev = (void *)hdev->recv_event->data + offset;
3208                         offset += sizeof(*ev);
3209                         if (ev->subevent == event)
3210                                 goto found;
3211                 }
3212                 return NULL;
3213         }
3214
3215 found:
3216         bt_dev_dbg(hdev, "event 0x%2.2x", event);
3217
3218         return hdev->recv_event->data + offset;
3219 }
3220
3221 /* Send ACL data */
3222 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3223 {
3224         struct hci_acl_hdr *hdr;
3225         int len = skb->len;
3226
3227         skb_push(skb, HCI_ACL_HDR_SIZE);
3228         skb_reset_transport_header(skb);
3229         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3230         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3231         hdr->dlen   = cpu_to_le16(len);
3232 }
3233
3234 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3235                           struct sk_buff *skb, __u16 flags)
3236 {
3237         struct hci_conn *conn = chan->conn;
3238         struct hci_dev *hdev = conn->hdev;
3239         struct sk_buff *list;
3240
3241         skb->len = skb_headlen(skb);
3242         skb->data_len = 0;
3243
3244         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3245
3246         switch (hdev->dev_type) {
3247         case HCI_PRIMARY:
3248                 hci_add_acl_hdr(skb, conn->handle, flags);
3249                 break;
3250         case HCI_AMP:
3251                 hci_add_acl_hdr(skb, chan->handle, flags);
3252                 break;
3253         default:
3254                 bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type);
3255                 return;
3256         }
3257
3258         list = skb_shinfo(skb)->frag_list;
3259         if (!list) {
3260                 /* Non fragmented */
3261                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3262
3263                 skb_queue_tail(queue, skb);
3264         } else {
3265                 /* Fragmented */
3266                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3267
3268                 skb_shinfo(skb)->frag_list = NULL;
3269
3270                 /* Queue all fragments atomically. We need to use spin_lock_bh
3271                  * here because of 6LoWPAN links, as there this function is
3272                  * called from softirq and using normal spin lock could cause
3273                  * deadlocks.
3274                  */
3275                 spin_lock_bh(&queue->lock);
3276
3277                 __skb_queue_tail(queue, skb);
3278
3279                 flags &= ~ACL_START;
3280                 flags |= ACL_CONT;
3281                 do {
3282                         skb = list; list = list->next;
3283
3284                         hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT;
3285                         hci_add_acl_hdr(skb, conn->handle, flags);
3286
3287                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3288
3289                         __skb_queue_tail(queue, skb);
3290                 } while (list);
3291
3292                 spin_unlock_bh(&queue->lock);
3293         }
3294 }
3295
3296 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3297 {
3298         struct hci_dev *hdev = chan->conn->hdev;
3299
3300         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3301
3302         hci_queue_acl(chan, &chan->data_q, skb, flags);
3303
3304         queue_work(hdev->workqueue, &hdev->tx_work);
3305 }
3306
3307 /* Send SCO data */
3308 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3309 {
3310         struct hci_dev *hdev = conn->hdev;
3311         struct hci_sco_hdr hdr;
3312
3313         BT_DBG("%s len %d", hdev->name, skb->len);
3314
3315         hdr.handle = cpu_to_le16(conn->handle);
3316         hdr.dlen   = skb->len;
3317
3318         skb_push(skb, HCI_SCO_HDR_SIZE);
3319         skb_reset_transport_header(skb);
3320         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3321
3322         hci_skb_pkt_type(skb) = HCI_SCODATA_PKT;
3323
3324         skb_queue_tail(&conn->data_q, skb);
3325         queue_work(hdev->workqueue, &hdev->tx_work);
3326 }
3327
3328 /* Send ISO data */
3329 static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags)
3330 {
3331         struct hci_iso_hdr *hdr;
3332         int len = skb->len;
3333
3334         skb_push(skb, HCI_ISO_HDR_SIZE);
3335         skb_reset_transport_header(skb);
3336         hdr = (struct hci_iso_hdr *)skb_transport_header(skb);
3337         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3338         hdr->dlen   = cpu_to_le16(len);
3339 }
3340
3341 static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue,
3342                           struct sk_buff *skb)
3343 {
3344         struct hci_dev *hdev = conn->hdev;
3345         struct sk_buff *list;
3346         __u16 flags;
3347
3348         skb->len = skb_headlen(skb);
3349         skb->data_len = 0;
3350
3351         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3352
3353         list = skb_shinfo(skb)->frag_list;
3354
3355         flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00);
3356         hci_add_iso_hdr(skb, conn->handle, flags);
3357
3358         if (!list) {
3359                 /* Non fragmented */
3360                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3361
3362                 skb_queue_tail(queue, skb);
3363         } else {
3364                 /* Fragmented */
3365                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3366
3367                 skb_shinfo(skb)->frag_list = NULL;
3368
3369                 __skb_queue_tail(queue, skb);
3370
3371                 do {
3372                         skb = list; list = list->next;
3373
3374                         hci_skb_pkt_type(skb) = HCI_ISODATA_PKT;
3375                         flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END,
3376                                                    0x00);
3377                         hci_add_iso_hdr(skb, conn->handle, flags);
3378
3379                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3380
3381                         __skb_queue_tail(queue, skb);
3382                 } while (list);
3383         }
3384 }
3385
3386 void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb)
3387 {
3388         struct hci_dev *hdev = conn->hdev;
3389
3390         BT_DBG("%s len %d", hdev->name, skb->len);
3391
3392         hci_queue_iso(conn, &conn->data_q, skb);
3393
3394         queue_work(hdev->workqueue, &hdev->tx_work);
3395 }
3396
3397 /* ---- HCI TX task (outgoing data) ---- */
3398
3399 /* HCI Connection scheduler */
3400 static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote)
3401 {
3402         struct hci_dev *hdev;
3403         int cnt, q;
3404
3405         if (!conn) {
3406                 *quote = 0;
3407                 return;
3408         }
3409
3410         hdev = conn->hdev;
3411
3412         switch (conn->type) {
3413         case ACL_LINK:
3414                 cnt = hdev->acl_cnt;
3415                 break;
3416         case AMP_LINK:
3417                 cnt = hdev->block_cnt;
3418                 break;
3419         case SCO_LINK:
3420         case ESCO_LINK:
3421                 cnt = hdev->sco_cnt;
3422                 break;
3423         case LE_LINK:
3424                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3425                 break;
3426         case ISO_LINK:
3427                 cnt = hdev->iso_mtu ? hdev->iso_cnt :
3428                         hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3429                 break;
3430         default:
3431                 cnt = 0;
3432                 bt_dev_err(hdev, "unknown link type %d", conn->type);
3433         }
3434
3435         q = cnt / num;
3436         *quote = q ? q : 1;
3437 }
3438
3439 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3440                                      int *quote)
3441 {
3442         struct hci_conn_hash *h = &hdev->conn_hash;
3443         struct hci_conn *conn = NULL, *c;
3444         unsigned int num = 0, min = ~0;
3445
3446         /* We don't have to lock device here. Connections are always
3447          * added and removed with TX task disabled. */
3448
3449         rcu_read_lock();
3450
3451         list_for_each_entry_rcu(c, &h->list, list) {
3452                 if (c->type != type || skb_queue_empty(&c->data_q))
3453                         continue;
3454
3455                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3456                         continue;
3457
3458                 num++;
3459
3460                 if (c->sent < min) {
3461                         min  = c->sent;
3462                         conn = c;
3463                 }
3464
3465                 if (hci_conn_num(hdev, type) == num)
3466                         break;
3467         }
3468
3469         rcu_read_unlock();
3470
3471         hci_quote_sent(conn, num, quote);
3472
3473         BT_DBG("conn %p quote %d", conn, *quote);
3474         return conn;
3475 }
3476
3477 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3478 {
3479         struct hci_conn_hash *h = &hdev->conn_hash;
3480         struct hci_conn *c;
3481
3482         bt_dev_err(hdev, "link tx timeout");
3483
3484         rcu_read_lock();
3485
3486         /* Kill stalled connections */
3487         list_for_each_entry_rcu(c, &h->list, list) {
3488                 if (c->type == type && c->sent) {
3489                         bt_dev_err(hdev, "killing stalled connection %pMR",
3490                                    &c->dst);
3491                         /* hci_disconnect might sleep, so, we have to release
3492                          * the RCU read lock before calling it.
3493                          */
3494                         rcu_read_unlock();
3495                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3496                         rcu_read_lock();
3497                 }
3498         }
3499
3500         rcu_read_unlock();
3501 }
3502
3503 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3504                                       int *quote)
3505 {
3506         struct hci_conn_hash *h = &hdev->conn_hash;
3507         struct hci_chan *chan = NULL;
3508         unsigned int num = 0, min = ~0, cur_prio = 0;
3509         struct hci_conn *conn;
3510         int conn_num = 0;
3511
3512         BT_DBG("%s", hdev->name);
3513
3514         rcu_read_lock();
3515
3516         list_for_each_entry_rcu(conn, &h->list, list) {
3517                 struct hci_chan *tmp;
3518
3519                 if (conn->type != type)
3520                         continue;
3521
3522                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3523                         continue;
3524
3525                 conn_num++;
3526
3527                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3528                         struct sk_buff *skb;
3529
3530                         if (skb_queue_empty(&tmp->data_q))
3531                                 continue;
3532
3533                         skb = skb_peek(&tmp->data_q);
3534                         if (skb->priority < cur_prio)
3535                                 continue;
3536
3537                         if (skb->priority > cur_prio) {
3538                                 num = 0;
3539                                 min = ~0;
3540                                 cur_prio = skb->priority;
3541                         }
3542
3543                         num++;
3544
3545                         if (conn->sent < min) {
3546                                 min  = conn->sent;
3547                                 chan = tmp;
3548                         }
3549                 }
3550
3551                 if (hci_conn_num(hdev, type) == conn_num)
3552                         break;
3553         }
3554
3555         rcu_read_unlock();
3556
3557         if (!chan)
3558                 return NULL;
3559
3560         hci_quote_sent(chan->conn, num, quote);
3561
3562         BT_DBG("chan %p quote %d", chan, *quote);
3563         return chan;
3564 }
3565
3566 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3567 {
3568         struct hci_conn_hash *h = &hdev->conn_hash;
3569         struct hci_conn *conn;
3570         int num = 0;
3571
3572         BT_DBG("%s", hdev->name);
3573
3574         rcu_read_lock();
3575
3576         list_for_each_entry_rcu(conn, &h->list, list) {
3577                 struct hci_chan *chan;
3578
3579                 if (conn->type != type)
3580                         continue;
3581
3582                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3583                         continue;
3584
3585                 num++;
3586
3587                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3588                         struct sk_buff *skb;
3589
3590                         if (chan->sent) {
3591                                 chan->sent = 0;
3592                                 continue;
3593                         }
3594
3595                         if (skb_queue_empty(&chan->data_q))
3596                                 continue;
3597
3598                         skb = skb_peek(&chan->data_q);
3599                         if (skb->priority >= HCI_PRIO_MAX - 1)
3600                                 continue;
3601
3602                         skb->priority = HCI_PRIO_MAX - 1;
3603
3604                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3605                                skb->priority);
3606                 }
3607
3608                 if (hci_conn_num(hdev, type) == num)
3609                         break;
3610         }
3611
3612         rcu_read_unlock();
3613
3614 }
3615
3616 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3617 {
3618         /* Calculate count of blocks used by this packet */
3619         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3620 }
3621
3622 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)
3623 {
3624         unsigned long last_tx;
3625
3626         if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
3627                 return;
3628
3629         switch (type) {
3630         case LE_LINK:
3631                 last_tx = hdev->le_last_tx;
3632                 break;
3633         default:
3634                 last_tx = hdev->acl_last_tx;
3635                 break;
3636         }
3637
3638         /* tx timeout must be longer than maximum link supervision timeout
3639          * (40.9 seconds)
3640          */
3641         if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT))
3642                 hci_link_tx_to(hdev, type);
3643 }
3644
3645 /* Schedule SCO */
3646 static void hci_sched_sco(struct hci_dev *hdev)
3647 {
3648         struct hci_conn *conn;
3649         struct sk_buff *skb;
3650         int quote;
3651
3652         BT_DBG("%s", hdev->name);
3653
3654         if (!hci_conn_num(hdev, SCO_LINK))
3655                 return;
3656
3657         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3658                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3659                         BT_DBG("skb %p len %d", skb, skb->len);
3660                         hci_send_frame(hdev, skb);
3661
3662                         conn->sent++;
3663                         if (conn->sent == ~0)
3664                                 conn->sent = 0;
3665                 }
3666         }
3667 }
3668
3669 static void hci_sched_esco(struct hci_dev *hdev)
3670 {
3671         struct hci_conn *conn;
3672         struct sk_buff *skb;
3673         int quote;
3674
3675         BT_DBG("%s", hdev->name);
3676
3677         if (!hci_conn_num(hdev, ESCO_LINK))
3678                 return;
3679
3680         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3681                                                      &quote))) {
3682                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3683                         BT_DBG("skb %p len %d", skb, skb->len);
3684                         hci_send_frame(hdev, skb);
3685
3686                         conn->sent++;
3687                         if (conn->sent == ~0)
3688                                 conn->sent = 0;
3689                 }
3690         }
3691 }
3692
3693 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3694 {
3695         unsigned int cnt = hdev->acl_cnt;
3696         struct hci_chan *chan;
3697         struct sk_buff *skb;
3698         int quote;
3699
3700         __check_timeout(hdev, cnt, ACL_LINK);
3701
3702         while (hdev->acl_cnt &&
3703                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3704                 u32 priority = (skb_peek(&chan->data_q))->priority;
3705                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3706                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3707                                skb->len, skb->priority);
3708
3709                         /* Stop if priority has changed */
3710                         if (skb->priority < priority)
3711                                 break;
3712
3713                         skb = skb_dequeue(&chan->data_q);
3714
3715                         hci_conn_enter_active_mode(chan->conn,
3716                                                    bt_cb(skb)->force_active);
3717
3718                         hci_send_frame(hdev, skb);
3719                         hdev->acl_last_tx = jiffies;
3720
3721                         hdev->acl_cnt--;
3722                         chan->sent++;
3723                         chan->conn->sent++;
3724
3725                         /* Send pending SCO packets right away */
3726                         hci_sched_sco(hdev);
3727                         hci_sched_esco(hdev);
3728                 }
3729         }
3730
3731         if (cnt != hdev->acl_cnt)
3732                 hci_prio_recalculate(hdev, ACL_LINK);
3733 }
3734
3735 static void hci_sched_acl_blk(struct hci_dev *hdev)
3736 {
3737         unsigned int cnt = hdev->block_cnt;
3738         struct hci_chan *chan;
3739         struct sk_buff *skb;
3740         int quote;
3741         u8 type;
3742
3743         BT_DBG("%s", hdev->name);
3744
3745         if (hdev->dev_type == HCI_AMP)
3746                 type = AMP_LINK;
3747         else
3748                 type = ACL_LINK;
3749
3750         __check_timeout(hdev, cnt, type);
3751
3752         while (hdev->block_cnt > 0 &&
3753                (chan = hci_chan_sent(hdev, type, &quote))) {
3754                 u32 priority = (skb_peek(&chan->data_q))->priority;
3755                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3756                         int blocks;
3757
3758                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3759                                skb->len, skb->priority);
3760
3761                         /* Stop if priority has changed */
3762                         if (skb->priority < priority)
3763                                 break;
3764
3765                         skb = skb_dequeue(&chan->data_q);
3766
3767                         blocks = __get_blocks(hdev, skb);
3768                         if (blocks > hdev->block_cnt)
3769                                 return;
3770
3771                         hci_conn_enter_active_mode(chan->conn,
3772                                                    bt_cb(skb)->force_active);
3773
3774                         hci_send_frame(hdev, skb);
3775                         hdev->acl_last_tx = jiffies;
3776
3777                         hdev->block_cnt -= blocks;
3778                         quote -= blocks;
3779
3780                         chan->sent += blocks;
3781                         chan->conn->sent += blocks;
3782                 }
3783         }
3784
3785         if (cnt != hdev->block_cnt)
3786                 hci_prio_recalculate(hdev, type);
3787 }
3788
3789 static void hci_sched_acl(struct hci_dev *hdev)
3790 {
3791         BT_DBG("%s", hdev->name);
3792
3793         /* No ACL link over BR/EDR controller */
3794         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY)
3795                 return;
3796
3797         /* No AMP link over AMP controller */
3798         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3799                 return;
3800
3801         switch (hdev->flow_ctl_mode) {
3802         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3803                 hci_sched_acl_pkt(hdev);
3804                 break;
3805
3806         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3807                 hci_sched_acl_blk(hdev);
3808                 break;
3809         }
3810 }
3811
3812 static void hci_sched_le(struct hci_dev *hdev)
3813 {
3814         struct hci_chan *chan;
3815         struct sk_buff *skb;
3816         int quote, cnt, tmp;
3817
3818         BT_DBG("%s", hdev->name);
3819
3820         if (!hci_conn_num(hdev, LE_LINK))
3821                 return;
3822
3823         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
3824
3825         __check_timeout(hdev, cnt, LE_LINK);
3826
3827         tmp = cnt;
3828         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
3829                 u32 priority = (skb_peek(&chan->data_q))->priority;
3830                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3831                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3832                                skb->len, skb->priority);
3833
3834                         /* Stop if priority has changed */
3835                         if (skb->priority < priority)
3836                                 break;
3837
3838                         skb = skb_dequeue(&chan->data_q);
3839
3840                         hci_send_frame(hdev, skb);
3841                         hdev->le_last_tx = jiffies;
3842
3843                         cnt--;
3844                         chan->sent++;
3845                         chan->conn->sent++;
3846
3847                         /* Send pending SCO packets right away */
3848                         hci_sched_sco(hdev);
3849                         hci_sched_esco(hdev);
3850                 }
3851         }
3852
3853         if (hdev->le_pkts)
3854                 hdev->le_cnt = cnt;
3855         else
3856                 hdev->acl_cnt = cnt;
3857
3858         if (cnt != tmp)
3859                 hci_prio_recalculate(hdev, LE_LINK);
3860 }
3861
3862 /* Schedule CIS */
3863 static void hci_sched_iso(struct hci_dev *hdev)
3864 {
3865         struct hci_conn *conn;
3866         struct sk_buff *skb;
3867         int quote, *cnt;
3868
3869         BT_DBG("%s", hdev->name);
3870
3871         if (!hci_conn_num(hdev, ISO_LINK))
3872                 return;
3873
3874         cnt = hdev->iso_pkts ? &hdev->iso_cnt :
3875                 hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt;
3876         while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, &quote))) {
3877                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3878                         BT_DBG("skb %p len %d", skb, skb->len);
3879                         hci_send_frame(hdev, skb);
3880
3881                         conn->sent++;
3882                         if (conn->sent == ~0)
3883                                 conn->sent = 0;
3884                         (*cnt)--;
3885                 }
3886         }
3887 }
3888
3889 static void hci_tx_work(struct work_struct *work)
3890 {
3891         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
3892         struct sk_buff *skb;
3893
3894         BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt,
3895                hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt);
3896
3897         if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
3898                 /* Schedule queues and send stuff to HCI driver */
3899                 hci_sched_sco(hdev);
3900                 hci_sched_esco(hdev);
3901                 hci_sched_iso(hdev);
3902                 hci_sched_acl(hdev);
3903                 hci_sched_le(hdev);
3904         }
3905
3906         /* Send next queued raw (unknown type) packet */
3907         while ((skb = skb_dequeue(&hdev->raw_q)))
3908                 hci_send_frame(hdev, skb);
3909 }
3910
3911 /* ----- HCI RX task (incoming data processing) ----- */
3912
3913 /* ACL data packet */
3914 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3915 {
3916         struct hci_acl_hdr *hdr = (void *) skb->data;
3917         struct hci_conn *conn;
3918         __u16 handle, flags;
3919
3920         skb_pull(skb, HCI_ACL_HDR_SIZE);
3921
3922         handle = __le16_to_cpu(hdr->handle);
3923         flags  = hci_flags(handle);
3924         handle = hci_handle(handle);
3925
3926         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3927                handle, flags);
3928
3929         hdev->stat.acl_rx++;
3930
3931         hci_dev_lock(hdev);
3932         conn = hci_conn_hash_lookup_handle(hdev, handle);
3933         hci_dev_unlock(hdev);
3934
3935         if (conn) {
3936                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
3937
3938                 /* Send to upper protocol */
3939                 l2cap_recv_acldata(conn, skb, flags);
3940                 return;
3941         } else {
3942                 bt_dev_err(hdev, "ACL packet for unknown connection handle %d",
3943                            handle);
3944         }
3945
3946         kfree_skb(skb);
3947 }
3948
3949 /* SCO data packet */
3950 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3951 {
3952         struct hci_sco_hdr *hdr = (void *) skb->data;
3953         struct hci_conn *conn;
3954         __u16 handle, flags;
3955
3956         skb_pull(skb, HCI_SCO_HDR_SIZE);
3957
3958         handle = __le16_to_cpu(hdr->handle);
3959         flags  = hci_flags(handle);
3960         handle = hci_handle(handle);
3961
3962         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
3963                handle, flags);
3964
3965         hdev->stat.sco_rx++;
3966
3967         hci_dev_lock(hdev);
3968         conn = hci_conn_hash_lookup_handle(hdev, handle);
3969         hci_dev_unlock(hdev);
3970
3971         if (conn) {
3972                 /* Send to upper protocol */
3973                 hci_skb_pkt_status(skb) = flags & 0x03;
3974                 sco_recv_scodata(conn, skb);
3975                 return;
3976         } else {
3977                 bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d",
3978                                        handle);
3979         }
3980
3981         kfree_skb(skb);
3982 }
3983
3984 static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
3985 {
3986         struct hci_iso_hdr *hdr;
3987         struct hci_conn *conn;
3988         __u16 handle, flags;
3989
3990         hdr = skb_pull_data(skb, sizeof(*hdr));
3991         if (!hdr) {
3992                 bt_dev_err(hdev, "ISO packet too small");
3993                 goto drop;
3994         }
3995
3996         handle = __le16_to_cpu(hdr->handle);
3997         flags  = hci_flags(handle);
3998         handle = hci_handle(handle);
3999
4000         bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len,
4001                    handle, flags);
4002
4003         hci_dev_lock(hdev);
4004         conn = hci_conn_hash_lookup_handle(hdev, handle);
4005         hci_dev_unlock(hdev);
4006
4007         if (!conn) {
4008                 bt_dev_err(hdev, "ISO packet for unknown connection handle %d",
4009                            handle);
4010                 goto drop;
4011         }
4012
4013         /* Send to upper protocol */
4014         iso_recv(conn, skb, flags);
4015         return;
4016
4017 drop:
4018         kfree_skb(skb);
4019 }
4020
4021 static bool hci_req_is_complete(struct hci_dev *hdev)
4022 {
4023         struct sk_buff *skb;
4024
4025         skb = skb_peek(&hdev->cmd_q);
4026         if (!skb)
4027                 return true;
4028
4029         return (bt_cb(skb)->hci.req_flags & HCI_REQ_START);
4030 }
4031
4032 static void hci_resend_last(struct hci_dev *hdev)
4033 {
4034         struct hci_command_hdr *sent;
4035         struct sk_buff *skb;
4036         u16 opcode;
4037
4038         if (!hdev->sent_cmd)
4039                 return;
4040
4041         sent = (void *) hdev->sent_cmd->data;
4042         opcode = __le16_to_cpu(sent->opcode);
4043         if (opcode == HCI_OP_RESET)
4044                 return;
4045
4046         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4047         if (!skb)
4048                 return;
4049
4050         skb_queue_head(&hdev->cmd_q, skb);
4051         queue_work(hdev->workqueue, &hdev->cmd_work);
4052 }
4053
4054 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,
4055                           hci_req_complete_t *req_complete,
4056                           hci_req_complete_skb_t *req_complete_skb)
4057 {
4058         struct sk_buff *skb;
4059         unsigned long flags;
4060
4061         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4062
4063         /* If the completed command doesn't match the last one that was
4064          * sent we need to do special handling of it.
4065          */
4066         if (!hci_sent_cmd_data(hdev, opcode)) {
4067                 /* Some CSR based controllers generate a spontaneous
4068                  * reset complete event during init and any pending
4069                  * command will never be completed. In such a case we
4070                  * need to resend whatever was the last sent
4071                  * command.
4072                  */
4073                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4074                         hci_resend_last(hdev);
4075
4076                 return;
4077         }
4078
4079         /* If we reach this point this event matches the last command sent */
4080         hci_dev_clear_flag(hdev, HCI_CMD_PENDING);
4081
4082         /* If the command succeeded and there's still more commands in
4083          * this request the request is not yet complete.
4084          */
4085         if (!status && !hci_req_is_complete(hdev))
4086                 return;
4087
4088         skb = hdev->req_skb;
4089
4090         /* If this was the last command in a request the complete
4091          * callback would be found in hdev->req_skb instead of the
4092          * command queue (hdev->cmd_q).
4093          */
4094         if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) {
4095                 *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4096                 return;
4097         }
4098
4099         if (skb && bt_cb(skb)->hci.req_complete) {
4100                 *req_complete = bt_cb(skb)->hci.req_complete;
4101                 return;
4102         }
4103
4104         /* Remove all pending commands belonging to this request */
4105         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4106         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4107                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) {
4108                         __skb_queue_head(&hdev->cmd_q, skb);
4109                         break;
4110                 }
4111
4112                 if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
4113                         *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
4114                 else
4115                         *req_complete = bt_cb(skb)->hci.req_complete;
4116                 dev_kfree_skb_irq(skb);
4117         }
4118         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4119 }
4120
4121 static void hci_rx_work(struct work_struct *work)
4122 {
4123         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4124         struct sk_buff *skb;
4125
4126         BT_DBG("%s", hdev->name);
4127
4128         /* The kcov_remote functions used for collecting packet parsing
4129          * coverage information from this background thread and associate
4130          * the coverage with the syscall's thread which originally injected
4131          * the packet. This helps fuzzing the kernel.
4132          */
4133         for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) {
4134                 kcov_remote_start_common(skb_get_kcov_handle(skb));
4135
4136                 /* Send copy to monitor */
4137                 hci_send_to_monitor(hdev, skb);
4138
4139                 if (atomic_read(&hdev->promisc)) {
4140                         /* Send copy to the sockets */
4141                         hci_send_to_sock(hdev, skb);
4142                 }
4143
4144                 /* If the device has been opened in HCI_USER_CHANNEL,
4145                  * the userspace has exclusive access to device.
4146                  * When device is HCI_INIT, we still need to process
4147                  * the data packets to the driver in order
4148                  * to complete its setup().
4149                  */
4150                 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) &&
4151                     !test_bit(HCI_INIT, &hdev->flags)) {
4152                         kfree_skb(skb);
4153                         continue;
4154                 }
4155
4156                 if (test_bit(HCI_INIT, &hdev->flags)) {
4157                         /* Don't process data packets in this states. */
4158                         switch (hci_skb_pkt_type(skb)) {
4159                         case HCI_ACLDATA_PKT:
4160                         case HCI_SCODATA_PKT:
4161                         case HCI_ISODATA_PKT:
4162                                 kfree_skb(skb);
4163                                 continue;
4164                         }
4165                 }
4166
4167                 /* Process frame */
4168                 switch (hci_skb_pkt_type(skb)) {
4169                 case HCI_EVENT_PKT:
4170                         BT_DBG("%s Event packet", hdev->name);
4171                         hci_event_packet(hdev, skb);
4172                         break;
4173
4174                 case HCI_ACLDATA_PKT:
4175                         BT_DBG("%s ACL data packet", hdev->name);
4176                         hci_acldata_packet(hdev, skb);
4177                         break;
4178
4179                 case HCI_SCODATA_PKT:
4180                         BT_DBG("%s SCO data packet", hdev->name);
4181                         hci_scodata_packet(hdev, skb);
4182                         break;
4183
4184                 case HCI_ISODATA_PKT:
4185                         BT_DBG("%s ISO data packet", hdev->name);
4186                         hci_isodata_packet(hdev, skb);
4187                         break;
4188
4189                 default:
4190                         kfree_skb(skb);
4191                         break;
4192                 }
4193         }
4194 }
4195
4196 static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb)
4197 {
4198         int err;
4199
4200         bt_dev_dbg(hdev, "skb %p", skb);
4201
4202         kfree_skb(hdev->sent_cmd);
4203
4204         hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4205         if (!hdev->sent_cmd) {
4206                 skb_queue_head(&hdev->cmd_q, skb);
4207                 queue_work(hdev->workqueue, &hdev->cmd_work);
4208                 return;
4209         }
4210
4211         err = hci_send_frame(hdev, skb);
4212         if (err < 0) {
4213                 hci_cmd_sync_cancel_sync(hdev, -err);
4214                 return;
4215         }
4216
4217         if (hci_req_status_pend(hdev) &&
4218             !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) {
4219                 kfree_skb(hdev->req_skb);
4220                 hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4221         }
4222
4223         atomic_dec(&hdev->cmd_cnt);
4224 }
4225
4226 static void hci_cmd_work(struct work_struct *work)
4227 {
4228         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4229         struct sk_buff *skb;
4230
4231         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4232                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4233
4234         /* Send queued commands */
4235         if (atomic_read(&hdev->cmd_cnt)) {
4236                 skb = skb_dequeue(&hdev->cmd_q);
4237                 if (!skb)
4238                         return;
4239
4240                 hci_send_cmd_sync(hdev, skb);
4241
4242                 rcu_read_lock();
4243                 if (test_bit(HCI_RESET, &hdev->flags) ||
4244                     hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))
4245                         cancel_delayed_work(&hdev->cmd_timer);
4246                 else
4247                         queue_delayed_work(hdev->workqueue, &hdev->cmd_timer,
4248                                            HCI_CMD_TIMEOUT);
4249                 rcu_read_unlock();
4250         }
4251 }