Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[sfrench/cifs-2.6.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55         hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62         BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64         /* If this is the init phase check if the completed command matches
65          * the last init command, and if not just return.
66          */
67         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69                 u16 opcode = __le16_to_cpu(sent->opcode);
70                 struct sk_buff *skb;
71
72                 /* Some CSR based controllers generate a spontaneous
73                  * reset complete event during init and any pending
74                  * command will never be completed. In such a case we
75                  * need to resend whatever was the last sent
76                  * command.
77                  */
78
79                 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80                         return;
81
82                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83                 if (skb) {
84                         skb_queue_head(&hdev->cmd_q, skb);
85                         queue_work(hdev->workqueue, &hdev->cmd_work);
86                 }
87
88                 return;
89         }
90
91         if (hdev->req_status == HCI_REQ_PEND) {
92                 hdev->req_result = result;
93                 hdev->req_status = HCI_REQ_DONE;
94                 wake_up_interruptible(&hdev->req_wait_q);
95         }
96 }
97
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100         BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102         if (hdev->req_status == HCI_REQ_PEND) {
103                 hdev->req_result = err;
104                 hdev->req_status = HCI_REQ_CANCELED;
105                 wake_up_interruptible(&hdev->req_wait_q);
106         }
107 }
108
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111                          void (*req)(struct hci_dev *hdev, unsigned long opt),
112                          unsigned long opt, __u32 timeout)
113 {
114         DECLARE_WAITQUEUE(wait, current);
115         int err = 0;
116
117         BT_DBG("%s start", hdev->name);
118
119         hdev->req_status = HCI_REQ_PEND;
120
121         add_wait_queue(&hdev->req_wait_q, &wait);
122         set_current_state(TASK_INTERRUPTIBLE);
123
124         req(hdev, opt);
125         schedule_timeout(timeout);
126
127         remove_wait_queue(&hdev->req_wait_q, &wait);
128
129         if (signal_pending(current))
130                 return -EINTR;
131
132         switch (hdev->req_status) {
133         case HCI_REQ_DONE:
134                 err = -bt_to_errno(hdev->req_result);
135                 break;
136
137         case HCI_REQ_CANCELED:
138                 err = -hdev->req_result;
139                 break;
140
141         default:
142                 err = -ETIMEDOUT;
143                 break;
144         }
145
146         hdev->req_status = hdev->req_result = 0;
147
148         BT_DBG("%s end: err %d", hdev->name, err);
149
150         return err;
151 }
152
153 static int hci_request(struct hci_dev *hdev,
154                        void (*req)(struct hci_dev *hdev, unsigned long opt),
155                        unsigned long opt, __u32 timeout)
156 {
157         int ret;
158
159         if (!test_bit(HCI_UP, &hdev->flags))
160                 return -ENETDOWN;
161
162         /* Serialize all requests */
163         hci_req_lock(hdev);
164         ret = __hci_request(hdev, req, opt, timeout);
165         hci_req_unlock(hdev);
166
167         return ret;
168 }
169
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172         BT_DBG("%s %ld", hdev->name, opt);
173
174         /* Reset device */
175         set_bit(HCI_RESET, &hdev->flags);
176         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178
179 static void bredr_init(struct hci_dev *hdev)
180 {
181         struct hci_cp_delete_stored_link_key cp;
182         __le16 param;
183         __u8 flt_type;
184
185         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
187         /* Mandatory initialization */
188
189         /* Read Local Supported Features */
190         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
191
192         /* Read Local Version */
193         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194
195         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
197
198         /* Read BD Address */
199         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201         /* Read Class of Device */
202         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204         /* Read Local Name */
205         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
206
207         /* Read Voice Setting */
208         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
209
210         /* Optional initialization */
211
212         /* Clear Event Filters */
213         flt_type = HCI_FLT_CLEAR_ALL;
214         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
215
216         /* Connection accept timeout ~20 secs */
217         param = __constant_cpu_to_le16(0x7d00);
218         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
219
220         bacpy(&cp.bdaddr, BDADDR_ANY);
221         cp.delete_all = 1;
222         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
223 }
224
225 static void amp_init(struct hci_dev *hdev)
226 {
227         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
229         /* Read Local Version */
230         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
231
232         /* Read Local AMP Info */
233         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
234
235         /* Read Data Blk size */
236         hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
237 }
238
239 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240 {
241         struct sk_buff *skb;
242
243         BT_DBG("%s %ld", hdev->name, opt);
244
245         /* Driver initialization */
246
247         /* Special commands */
248         while ((skb = skb_dequeue(&hdev->driver_init))) {
249                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250                 skb->dev = (void *) hdev;
251
252                 skb_queue_tail(&hdev->cmd_q, skb);
253                 queue_work(hdev->workqueue, &hdev->cmd_work);
254         }
255         skb_queue_purge(&hdev->driver_init);
256
257         /* Reset */
258         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259                 hci_reset_req(hdev, 0);
260
261         switch (hdev->dev_type) {
262         case HCI_BREDR:
263                 bredr_init(hdev);
264                 break;
265
266         case HCI_AMP:
267                 amp_init(hdev);
268                 break;
269
270         default:
271                 BT_ERR("Unknown device type %d", hdev->dev_type);
272                 break;
273         }
274 }
275
276 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
277 {
278         BT_DBG("%s", hdev->name);
279
280         /* Read LE buffer size */
281         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
282 }
283
284 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
285 {
286         __u8 scan = opt;
287
288         BT_DBG("%s %x", hdev->name, scan);
289
290         /* Inquiry and Page scans */
291         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
292 }
293
294 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
295 {
296         __u8 auth = opt;
297
298         BT_DBG("%s %x", hdev->name, auth);
299
300         /* Authentication */
301         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
302 }
303
304 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
305 {
306         __u8 encrypt = opt;
307
308         BT_DBG("%s %x", hdev->name, encrypt);
309
310         /* Encryption */
311         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
312 }
313
314 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
315 {
316         __le16 policy = cpu_to_le16(opt);
317
318         BT_DBG("%s %x", hdev->name, policy);
319
320         /* Default link policy */
321         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
322 }
323
324 /* Get HCI device by index.
325  * Device is held on return. */
326 struct hci_dev *hci_dev_get(int index)
327 {
328         struct hci_dev *hdev = NULL, *d;
329
330         BT_DBG("%d", index);
331
332         if (index < 0)
333                 return NULL;
334
335         read_lock(&hci_dev_list_lock);
336         list_for_each_entry(d, &hci_dev_list, list) {
337                 if (d->id == index) {
338                         hdev = hci_dev_hold(d);
339                         break;
340                 }
341         }
342         read_unlock(&hci_dev_list_lock);
343         return hdev;
344 }
345
346 /* ---- Inquiry support ---- */
347
348 bool hci_discovery_active(struct hci_dev *hdev)
349 {
350         struct discovery_state *discov = &hdev->discovery;
351
352         switch (discov->state) {
353         case DISCOVERY_FINDING:
354         case DISCOVERY_RESOLVING:
355                 return true;
356
357         default:
358                 return false;
359         }
360 }
361
362 void hci_discovery_set_state(struct hci_dev *hdev, int state)
363 {
364         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
365
366         if (hdev->discovery.state == state)
367                 return;
368
369         switch (state) {
370         case DISCOVERY_STOPPED:
371                 if (hdev->discovery.state != DISCOVERY_STARTING)
372                         mgmt_discovering(hdev, 0);
373                 break;
374         case DISCOVERY_STARTING:
375                 break;
376         case DISCOVERY_FINDING:
377                 mgmt_discovering(hdev, 1);
378                 break;
379         case DISCOVERY_RESOLVING:
380                 break;
381         case DISCOVERY_STOPPING:
382                 break;
383         }
384
385         hdev->discovery.state = state;
386 }
387
388 static void inquiry_cache_flush(struct hci_dev *hdev)
389 {
390         struct discovery_state *cache = &hdev->discovery;
391         struct inquiry_entry *p, *n;
392
393         list_for_each_entry_safe(p, n, &cache->all, all) {
394                 list_del(&p->all);
395                 kfree(p);
396         }
397
398         INIT_LIST_HEAD(&cache->unknown);
399         INIT_LIST_HEAD(&cache->resolve);
400 }
401
402 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
403                                                bdaddr_t *bdaddr)
404 {
405         struct discovery_state *cache = &hdev->discovery;
406         struct inquiry_entry *e;
407
408         BT_DBG("cache %p, %pMR", cache, bdaddr);
409
410         list_for_each_entry(e, &cache->all, all) {
411                 if (!bacmp(&e->data.bdaddr, bdaddr))
412                         return e;
413         }
414
415         return NULL;
416 }
417
418 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
419                                                        bdaddr_t *bdaddr)
420 {
421         struct discovery_state *cache = &hdev->discovery;
422         struct inquiry_entry *e;
423
424         BT_DBG("cache %p, %pMR", cache, bdaddr);
425
426         list_for_each_entry(e, &cache->unknown, list) {
427                 if (!bacmp(&e->data.bdaddr, bdaddr))
428                         return e;
429         }
430
431         return NULL;
432 }
433
434 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
435                                                        bdaddr_t *bdaddr,
436                                                        int state)
437 {
438         struct discovery_state *cache = &hdev->discovery;
439         struct inquiry_entry *e;
440
441         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
442
443         list_for_each_entry(e, &cache->resolve, list) {
444                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
445                         return e;
446                 if (!bacmp(&e->data.bdaddr, bdaddr))
447                         return e;
448         }
449
450         return NULL;
451 }
452
453 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
454                                       struct inquiry_entry *ie)
455 {
456         struct discovery_state *cache = &hdev->discovery;
457         struct list_head *pos = &cache->resolve;
458         struct inquiry_entry *p;
459
460         list_del(&ie->list);
461
462         list_for_each_entry(p, &cache->resolve, list) {
463                 if (p->name_state != NAME_PENDING &&
464                     abs(p->data.rssi) >= abs(ie->data.rssi))
465                         break;
466                 pos = &p->list;
467         }
468
469         list_add(&ie->list, pos);
470 }
471
472 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
473                               bool name_known, bool *ssp)
474 {
475         struct discovery_state *cache = &hdev->discovery;
476         struct inquiry_entry *ie;
477
478         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
479
480         if (ssp)
481                 *ssp = data->ssp_mode;
482
483         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
484         if (ie) {
485                 if (ie->data.ssp_mode && ssp)
486                         *ssp = true;
487
488                 if (ie->name_state == NAME_NEEDED &&
489                     data->rssi != ie->data.rssi) {
490                         ie->data.rssi = data->rssi;
491                         hci_inquiry_cache_update_resolve(hdev, ie);
492                 }
493
494                 goto update;
495         }
496
497         /* Entry not in the cache. Add new one. */
498         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
499         if (!ie)
500                 return false;
501
502         list_add(&ie->all, &cache->all);
503
504         if (name_known) {
505                 ie->name_state = NAME_KNOWN;
506         } else {
507                 ie->name_state = NAME_NOT_KNOWN;
508                 list_add(&ie->list, &cache->unknown);
509         }
510
511 update:
512         if (name_known && ie->name_state != NAME_KNOWN &&
513             ie->name_state != NAME_PENDING) {
514                 ie->name_state = NAME_KNOWN;
515                 list_del(&ie->list);
516         }
517
518         memcpy(&ie->data, data, sizeof(*data));
519         ie->timestamp = jiffies;
520         cache->timestamp = jiffies;
521
522         if (ie->name_state == NAME_NOT_KNOWN)
523                 return false;
524
525         return true;
526 }
527
528 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
529 {
530         struct discovery_state *cache = &hdev->discovery;
531         struct inquiry_info *info = (struct inquiry_info *) buf;
532         struct inquiry_entry *e;
533         int copied = 0;
534
535         list_for_each_entry(e, &cache->all, all) {
536                 struct inquiry_data *data = &e->data;
537
538                 if (copied >= num)
539                         break;
540
541                 bacpy(&info->bdaddr, &data->bdaddr);
542                 info->pscan_rep_mode    = data->pscan_rep_mode;
543                 info->pscan_period_mode = data->pscan_period_mode;
544                 info->pscan_mode        = data->pscan_mode;
545                 memcpy(info->dev_class, data->dev_class, 3);
546                 info->clock_offset      = data->clock_offset;
547
548                 info++;
549                 copied++;
550         }
551
552         BT_DBG("cache %p, copied %d", cache, copied);
553         return copied;
554 }
555
556 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
557 {
558         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
559         struct hci_cp_inquiry cp;
560
561         BT_DBG("%s", hdev->name);
562
563         if (test_bit(HCI_INQUIRY, &hdev->flags))
564                 return;
565
566         /* Start Inquiry */
567         memcpy(&cp.lap, &ir->lap, 3);
568         cp.length  = ir->length;
569         cp.num_rsp = ir->num_rsp;
570         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
571 }
572
573 int hci_inquiry(void __user *arg)
574 {
575         __u8 __user *ptr = arg;
576         struct hci_inquiry_req ir;
577         struct hci_dev *hdev;
578         int err = 0, do_inquiry = 0, max_rsp;
579         long timeo;
580         __u8 *buf;
581
582         if (copy_from_user(&ir, ptr, sizeof(ir)))
583                 return -EFAULT;
584
585         hdev = hci_dev_get(ir.dev_id);
586         if (!hdev)
587                 return -ENODEV;
588
589         hci_dev_lock(hdev);
590         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
591             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
592                 inquiry_cache_flush(hdev);
593                 do_inquiry = 1;
594         }
595         hci_dev_unlock(hdev);
596
597         timeo = ir.length * msecs_to_jiffies(2000);
598
599         if (do_inquiry) {
600                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
601                 if (err < 0)
602                         goto done;
603         }
604
605         /* for unlimited number of responses we will use buffer with
606          * 255 entries
607          */
608         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611          * copy it to the user space.
612          */
613         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
614         if (!buf) {
615                 err = -ENOMEM;
616                 goto done;
617         }
618
619         hci_dev_lock(hdev);
620         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
621         hci_dev_unlock(hdev);
622
623         BT_DBG("num_rsp %d", ir.num_rsp);
624
625         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626                 ptr += sizeof(ir);
627                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
628                                  ir.num_rsp))
629                         err = -EFAULT;
630         } else
631                 err = -EFAULT;
632
633         kfree(buf);
634
635 done:
636         hci_dev_put(hdev);
637         return err;
638 }
639
640 /* ---- HCI ioctl helpers ---- */
641
642 int hci_dev_open(__u16 dev)
643 {
644         struct hci_dev *hdev;
645         int ret = 0;
646
647         hdev = hci_dev_get(dev);
648         if (!hdev)
649                 return -ENODEV;
650
651         BT_DBG("%s %p", hdev->name, hdev);
652
653         hci_req_lock(hdev);
654
655         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
656                 ret = -ENODEV;
657                 goto done;
658         }
659
660         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661                 ret = -ERFKILL;
662                 goto done;
663         }
664
665         if (test_bit(HCI_UP, &hdev->flags)) {
666                 ret = -EALREADY;
667                 goto done;
668         }
669
670         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671                 set_bit(HCI_RAW, &hdev->flags);
672
673         /* Treat all non BR/EDR controllers as raw devices if
674            enable_hs is not set */
675         if (hdev->dev_type != HCI_BREDR && !enable_hs)
676                 set_bit(HCI_RAW, &hdev->flags);
677
678         if (hdev->open(hdev)) {
679                 ret = -EIO;
680                 goto done;
681         }
682
683         if (!test_bit(HCI_RAW, &hdev->flags)) {
684                 atomic_set(&hdev->cmd_cnt, 1);
685                 set_bit(HCI_INIT, &hdev->flags);
686                 hdev->init_last_cmd = 0;
687
688                 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
689
690                 if (lmp_host_le_capable(hdev))
691                         ret = __hci_request(hdev, hci_le_init_req, 0,
692                                             HCI_INIT_TIMEOUT);
693
694                 clear_bit(HCI_INIT, &hdev->flags);
695         }
696
697         if (!ret) {
698                 hci_dev_hold(hdev);
699                 set_bit(HCI_UP, &hdev->flags);
700                 hci_notify(hdev, HCI_DEV_UP);
701                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702                     mgmt_valid_hdev(hdev)) {
703                         hci_dev_lock(hdev);
704                         mgmt_powered(hdev, 1);
705                         hci_dev_unlock(hdev);
706                 }
707         } else {
708                 /* Init failed, cleanup */
709                 flush_work(&hdev->tx_work);
710                 flush_work(&hdev->cmd_work);
711                 flush_work(&hdev->rx_work);
712
713                 skb_queue_purge(&hdev->cmd_q);
714                 skb_queue_purge(&hdev->rx_q);
715
716                 if (hdev->flush)
717                         hdev->flush(hdev);
718
719                 if (hdev->sent_cmd) {
720                         kfree_skb(hdev->sent_cmd);
721                         hdev->sent_cmd = NULL;
722                 }
723
724                 hdev->close(hdev);
725                 hdev->flags = 0;
726         }
727
728 done:
729         hci_req_unlock(hdev);
730         hci_dev_put(hdev);
731         return ret;
732 }
733
734 static int hci_dev_do_close(struct hci_dev *hdev)
735 {
736         BT_DBG("%s %p", hdev->name, hdev);
737
738         cancel_work_sync(&hdev->le_scan);
739
740         cancel_delayed_work(&hdev->power_off);
741
742         hci_req_cancel(hdev, ENODEV);
743         hci_req_lock(hdev);
744
745         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
746                 del_timer_sync(&hdev->cmd_timer);
747                 hci_req_unlock(hdev);
748                 return 0;
749         }
750
751         /* Flush RX and TX works */
752         flush_work(&hdev->tx_work);
753         flush_work(&hdev->rx_work);
754
755         if (hdev->discov_timeout > 0) {
756                 cancel_delayed_work(&hdev->discov_off);
757                 hdev->discov_timeout = 0;
758                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
759         }
760
761         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
762                 cancel_delayed_work(&hdev->service_cache);
763
764         cancel_delayed_work_sync(&hdev->le_scan_disable);
765
766         hci_dev_lock(hdev);
767         inquiry_cache_flush(hdev);
768         hci_conn_hash_flush(hdev);
769         hci_dev_unlock(hdev);
770
771         hci_notify(hdev, HCI_DEV_DOWN);
772
773         if (hdev->flush)
774                 hdev->flush(hdev);
775
776         /* Reset device */
777         skb_queue_purge(&hdev->cmd_q);
778         atomic_set(&hdev->cmd_cnt, 1);
779         if (!test_bit(HCI_RAW, &hdev->flags) &&
780             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
781                 set_bit(HCI_INIT, &hdev->flags);
782                 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
783                 clear_bit(HCI_INIT, &hdev->flags);
784         }
785
786         /* flush cmd  work */
787         flush_work(&hdev->cmd_work);
788
789         /* Drop queues */
790         skb_queue_purge(&hdev->rx_q);
791         skb_queue_purge(&hdev->cmd_q);
792         skb_queue_purge(&hdev->raw_q);
793
794         /* Drop last sent command */
795         if (hdev->sent_cmd) {
796                 del_timer_sync(&hdev->cmd_timer);
797                 kfree_skb(hdev->sent_cmd);
798                 hdev->sent_cmd = NULL;
799         }
800
801         /* After this point our queues are empty
802          * and no tasks are scheduled. */
803         hdev->close(hdev);
804
805         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
806             mgmt_valid_hdev(hdev)) {
807                 hci_dev_lock(hdev);
808                 mgmt_powered(hdev, 0);
809                 hci_dev_unlock(hdev);
810         }
811
812         /* Clear flags */
813         hdev->flags = 0;
814
815         memset(hdev->eir, 0, sizeof(hdev->eir));
816         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
817
818         hci_req_unlock(hdev);
819
820         hci_dev_put(hdev);
821         return 0;
822 }
823
824 int hci_dev_close(__u16 dev)
825 {
826         struct hci_dev *hdev;
827         int err;
828
829         hdev = hci_dev_get(dev);
830         if (!hdev)
831                 return -ENODEV;
832
833         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
834                 cancel_delayed_work(&hdev->power_off);
835
836         err = hci_dev_do_close(hdev);
837
838         hci_dev_put(hdev);
839         return err;
840 }
841
842 int hci_dev_reset(__u16 dev)
843 {
844         struct hci_dev *hdev;
845         int ret = 0;
846
847         hdev = hci_dev_get(dev);
848         if (!hdev)
849                 return -ENODEV;
850
851         hci_req_lock(hdev);
852
853         if (!test_bit(HCI_UP, &hdev->flags))
854                 goto done;
855
856         /* Drop queues */
857         skb_queue_purge(&hdev->rx_q);
858         skb_queue_purge(&hdev->cmd_q);
859
860         hci_dev_lock(hdev);
861         inquiry_cache_flush(hdev);
862         hci_conn_hash_flush(hdev);
863         hci_dev_unlock(hdev);
864
865         if (hdev->flush)
866                 hdev->flush(hdev);
867
868         atomic_set(&hdev->cmd_cnt, 1);
869         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
870
871         if (!test_bit(HCI_RAW, &hdev->flags))
872                 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
873
874 done:
875         hci_req_unlock(hdev);
876         hci_dev_put(hdev);
877         return ret;
878 }
879
880 int hci_dev_reset_stat(__u16 dev)
881 {
882         struct hci_dev *hdev;
883         int ret = 0;
884
885         hdev = hci_dev_get(dev);
886         if (!hdev)
887                 return -ENODEV;
888
889         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
890
891         hci_dev_put(hdev);
892
893         return ret;
894 }
895
896 int hci_dev_cmd(unsigned int cmd, void __user *arg)
897 {
898         struct hci_dev *hdev;
899         struct hci_dev_req dr;
900         int err = 0;
901
902         if (copy_from_user(&dr, arg, sizeof(dr)))
903                 return -EFAULT;
904
905         hdev = hci_dev_get(dr.dev_id);
906         if (!hdev)
907                 return -ENODEV;
908
909         switch (cmd) {
910         case HCISETAUTH:
911                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
912                                   HCI_INIT_TIMEOUT);
913                 break;
914
915         case HCISETENCRYPT:
916                 if (!lmp_encrypt_capable(hdev)) {
917                         err = -EOPNOTSUPP;
918                         break;
919                 }
920
921                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
922                         /* Auth must be enabled first */
923                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924                                           HCI_INIT_TIMEOUT);
925                         if (err)
926                                 break;
927                 }
928
929                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
930                                   HCI_INIT_TIMEOUT);
931                 break;
932
933         case HCISETSCAN:
934                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
935                                   HCI_INIT_TIMEOUT);
936                 break;
937
938         case HCISETLINKPOL:
939                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
940                                   HCI_INIT_TIMEOUT);
941                 break;
942
943         case HCISETLINKMODE:
944                 hdev->link_mode = ((__u16) dr.dev_opt) &
945                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
946                 break;
947
948         case HCISETPTYPE:
949                 hdev->pkt_type = (__u16) dr.dev_opt;
950                 break;
951
952         case HCISETACLMTU:
953                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
954                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
955                 break;
956
957         case HCISETSCOMTU:
958                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
959                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
960                 break;
961
962         default:
963                 err = -EINVAL;
964                 break;
965         }
966
967         hci_dev_put(hdev);
968         return err;
969 }
970
971 int hci_get_dev_list(void __user *arg)
972 {
973         struct hci_dev *hdev;
974         struct hci_dev_list_req *dl;
975         struct hci_dev_req *dr;
976         int n = 0, size, err;
977         __u16 dev_num;
978
979         if (get_user(dev_num, (__u16 __user *) arg))
980                 return -EFAULT;
981
982         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
983                 return -EINVAL;
984
985         size = sizeof(*dl) + dev_num * sizeof(*dr);
986
987         dl = kzalloc(size, GFP_KERNEL);
988         if (!dl)
989                 return -ENOMEM;
990
991         dr = dl->dev_req;
992
993         read_lock(&hci_dev_list_lock);
994         list_for_each_entry(hdev, &hci_dev_list, list) {
995                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
996                         cancel_delayed_work(&hdev->power_off);
997
998                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1000
1001                 (dr + n)->dev_id  = hdev->id;
1002                 (dr + n)->dev_opt = hdev->flags;
1003
1004                 if (++n >= dev_num)
1005                         break;
1006         }
1007         read_unlock(&hci_dev_list_lock);
1008
1009         dl->dev_num = n;
1010         size = sizeof(*dl) + n * sizeof(*dr);
1011
1012         err = copy_to_user(arg, dl, size);
1013         kfree(dl);
1014
1015         return err ? -EFAULT : 0;
1016 }
1017
1018 int hci_get_dev_info(void __user *arg)
1019 {
1020         struct hci_dev *hdev;
1021         struct hci_dev_info di;
1022         int err = 0;
1023
1024         if (copy_from_user(&di, arg, sizeof(di)))
1025                 return -EFAULT;
1026
1027         hdev = hci_dev_get(di.dev_id);
1028         if (!hdev)
1029                 return -ENODEV;
1030
1031         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1032                 cancel_delayed_work_sync(&hdev->power_off);
1033
1034         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1036
1037         strcpy(di.name, hdev->name);
1038         di.bdaddr   = hdev->bdaddr;
1039         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1040         di.flags    = hdev->flags;
1041         di.pkt_type = hdev->pkt_type;
1042         di.acl_mtu  = hdev->acl_mtu;
1043         di.acl_pkts = hdev->acl_pkts;
1044         di.sco_mtu  = hdev->sco_mtu;
1045         di.sco_pkts = hdev->sco_pkts;
1046         di.link_policy = hdev->link_policy;
1047         di.link_mode   = hdev->link_mode;
1048
1049         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050         memcpy(&di.features, &hdev->features, sizeof(di.features));
1051
1052         if (copy_to_user(arg, &di, sizeof(di)))
1053                 err = -EFAULT;
1054
1055         hci_dev_put(hdev);
1056
1057         return err;
1058 }
1059
1060 /* ---- Interface to HCI drivers ---- */
1061
1062 static int hci_rfkill_set_block(void *data, bool blocked)
1063 {
1064         struct hci_dev *hdev = data;
1065
1066         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1067
1068         if (!blocked)
1069                 return 0;
1070
1071         hci_dev_do_close(hdev);
1072
1073         return 0;
1074 }
1075
1076 static const struct rfkill_ops hci_rfkill_ops = {
1077         .set_block = hci_rfkill_set_block,
1078 };
1079
1080 static void hci_power_on(struct work_struct *work)
1081 {
1082         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1083
1084         BT_DBG("%s", hdev->name);
1085
1086         if (hci_dev_open(hdev->id) < 0)
1087                 return;
1088
1089         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1090                 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1091
1092         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1093                 mgmt_index_added(hdev);
1094 }
1095
1096 static void hci_power_off(struct work_struct *work)
1097 {
1098         struct hci_dev *hdev = container_of(work, struct hci_dev,
1099                                             power_off.work);
1100
1101         BT_DBG("%s", hdev->name);
1102
1103         hci_dev_do_close(hdev);
1104 }
1105
1106 static void hci_discov_off(struct work_struct *work)
1107 {
1108         struct hci_dev *hdev;
1109         u8 scan = SCAN_PAGE;
1110
1111         hdev = container_of(work, struct hci_dev, discov_off.work);
1112
1113         BT_DBG("%s", hdev->name);
1114
1115         hci_dev_lock(hdev);
1116
1117         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1118
1119         hdev->discov_timeout = 0;
1120
1121         hci_dev_unlock(hdev);
1122 }
1123
1124 int hci_uuids_clear(struct hci_dev *hdev)
1125 {
1126         struct list_head *p, *n;
1127
1128         list_for_each_safe(p, n, &hdev->uuids) {
1129                 struct bt_uuid *uuid;
1130
1131                 uuid = list_entry(p, struct bt_uuid, list);
1132
1133                 list_del(p);
1134                 kfree(uuid);
1135         }
1136
1137         return 0;
1138 }
1139
1140 int hci_link_keys_clear(struct hci_dev *hdev)
1141 {
1142         struct list_head *p, *n;
1143
1144         list_for_each_safe(p, n, &hdev->link_keys) {
1145                 struct link_key *key;
1146
1147                 key = list_entry(p, struct link_key, list);
1148
1149                 list_del(p);
1150                 kfree(key);
1151         }
1152
1153         return 0;
1154 }
1155
1156 int hci_smp_ltks_clear(struct hci_dev *hdev)
1157 {
1158         struct smp_ltk *k, *tmp;
1159
1160         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1161                 list_del(&k->list);
1162                 kfree(k);
1163         }
1164
1165         return 0;
1166 }
1167
1168 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1169 {
1170         struct link_key *k;
1171
1172         list_for_each_entry(k, &hdev->link_keys, list)
1173                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1174                         return k;
1175
1176         return NULL;
1177 }
1178
1179 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1180                                u8 key_type, u8 old_key_type)
1181 {
1182         /* Legacy key */
1183         if (key_type < 0x03)
1184                 return true;
1185
1186         /* Debug keys are insecure so don't store them persistently */
1187         if (key_type == HCI_LK_DEBUG_COMBINATION)
1188                 return false;
1189
1190         /* Changed combination key and there's no previous one */
1191         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1192                 return false;
1193
1194         /* Security mode 3 case */
1195         if (!conn)
1196                 return true;
1197
1198         /* Neither local nor remote side had no-bonding as requirement */
1199         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1200                 return true;
1201
1202         /* Local side had dedicated bonding as requirement */
1203         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1204                 return true;
1205
1206         /* Remote side had dedicated bonding as requirement */
1207         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1208                 return true;
1209
1210         /* If none of the above criteria match, then don't store the key
1211          * persistently */
1212         return false;
1213 }
1214
1215 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1216 {
1217         struct smp_ltk *k;
1218
1219         list_for_each_entry(k, &hdev->long_term_keys, list) {
1220                 if (k->ediv != ediv ||
1221                     memcmp(rand, k->rand, sizeof(k->rand)))
1222                         continue;
1223
1224                 return k;
1225         }
1226
1227         return NULL;
1228 }
1229
1230 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1231                                      u8 addr_type)
1232 {
1233         struct smp_ltk *k;
1234
1235         list_for_each_entry(k, &hdev->long_term_keys, list)
1236                 if (addr_type == k->bdaddr_type &&
1237                     bacmp(bdaddr, &k->bdaddr) == 0)
1238                         return k;
1239
1240         return NULL;
1241 }
1242
1243 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1244                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1245 {
1246         struct link_key *key, *old_key;
1247         u8 old_key_type;
1248         bool persistent;
1249
1250         old_key = hci_find_link_key(hdev, bdaddr);
1251         if (old_key) {
1252                 old_key_type = old_key->type;
1253                 key = old_key;
1254         } else {
1255                 old_key_type = conn ? conn->key_type : 0xff;
1256                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1257                 if (!key)
1258                         return -ENOMEM;
1259                 list_add(&key->list, &hdev->link_keys);
1260         }
1261
1262         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
1263
1264         /* Some buggy controller combinations generate a changed
1265          * combination key for legacy pairing even when there's no
1266          * previous key */
1267         if (type == HCI_LK_CHANGED_COMBINATION &&
1268             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1269                 type = HCI_LK_COMBINATION;
1270                 if (conn)
1271                         conn->key_type = type;
1272         }
1273
1274         bacpy(&key->bdaddr, bdaddr);
1275         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1276         key->pin_len = pin_len;
1277
1278         if (type == HCI_LK_CHANGED_COMBINATION)
1279                 key->type = old_key_type;
1280         else
1281                 key->type = type;
1282
1283         if (!new_key)
1284                 return 0;
1285
1286         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1287
1288         mgmt_new_link_key(hdev, key, persistent);
1289
1290         if (conn)
1291                 conn->flush_key = !persistent;
1292
1293         return 0;
1294 }
1295
1296 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1297                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1298                 ediv, u8 rand[8])
1299 {
1300         struct smp_ltk *key, *old_key;
1301
1302         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1303                 return 0;
1304
1305         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1306         if (old_key)
1307                 key = old_key;
1308         else {
1309                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1310                 if (!key)
1311                         return -ENOMEM;
1312                 list_add(&key->list, &hdev->long_term_keys);
1313         }
1314
1315         bacpy(&key->bdaddr, bdaddr);
1316         key->bdaddr_type = addr_type;
1317         memcpy(key->val, tk, sizeof(key->val));
1318         key->authenticated = authenticated;
1319         key->ediv = ediv;
1320         key->enc_size = enc_size;
1321         key->type = type;
1322         memcpy(key->rand, rand, sizeof(key->rand));
1323
1324         if (!new_key)
1325                 return 0;
1326
1327         if (type & HCI_SMP_LTK)
1328                 mgmt_new_ltk(hdev, key, 1);
1329
1330         return 0;
1331 }
1332
1333 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1334 {
1335         struct link_key *key;
1336
1337         key = hci_find_link_key(hdev, bdaddr);
1338         if (!key)
1339                 return -ENOENT;
1340
1341         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1342
1343         list_del(&key->list);
1344         kfree(key);
1345
1346         return 0;
1347 }
1348
1349 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1350 {
1351         struct smp_ltk *k, *tmp;
1352
1353         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1354                 if (bacmp(bdaddr, &k->bdaddr))
1355                         continue;
1356
1357                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1358
1359                 list_del(&k->list);
1360                 kfree(k);
1361         }
1362
1363         return 0;
1364 }
1365
1366 /* HCI command timer function */
1367 static void hci_cmd_timeout(unsigned long arg)
1368 {
1369         struct hci_dev *hdev = (void *) arg;
1370
1371         if (hdev->sent_cmd) {
1372                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1373                 u16 opcode = __le16_to_cpu(sent->opcode);
1374
1375                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1376         } else {
1377                 BT_ERR("%s command tx timeout", hdev->name);
1378         }
1379
1380         atomic_set(&hdev->cmd_cnt, 1);
1381         queue_work(hdev->workqueue, &hdev->cmd_work);
1382 }
1383
1384 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1385                                           bdaddr_t *bdaddr)
1386 {
1387         struct oob_data *data;
1388
1389         list_for_each_entry(data, &hdev->remote_oob_data, list)
1390                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1391                         return data;
1392
1393         return NULL;
1394 }
1395
1396 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1397 {
1398         struct oob_data *data;
1399
1400         data = hci_find_remote_oob_data(hdev, bdaddr);
1401         if (!data)
1402                 return -ENOENT;
1403
1404         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
1405
1406         list_del(&data->list);
1407         kfree(data);
1408
1409         return 0;
1410 }
1411
1412 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1413 {
1414         struct oob_data *data, *n;
1415
1416         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1417                 list_del(&data->list);
1418                 kfree(data);
1419         }
1420
1421         return 0;
1422 }
1423
1424 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1425                             u8 *randomizer)
1426 {
1427         struct oob_data *data;
1428
1429         data = hci_find_remote_oob_data(hdev, bdaddr);
1430
1431         if (!data) {
1432                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1433                 if (!data)
1434                         return -ENOMEM;
1435
1436                 bacpy(&data->bdaddr, bdaddr);
1437                 list_add(&data->list, &hdev->remote_oob_data);
1438         }
1439
1440         memcpy(data->hash, hash, sizeof(data->hash));
1441         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1442
1443         BT_DBG("%s for %pMR", hdev->name, bdaddr);
1444
1445         return 0;
1446 }
1447
1448 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1449 {
1450         struct bdaddr_list *b;
1451
1452         list_for_each_entry(b, &hdev->blacklist, list)
1453                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1454                         return b;
1455
1456         return NULL;
1457 }
1458
1459 int hci_blacklist_clear(struct hci_dev *hdev)
1460 {
1461         struct list_head *p, *n;
1462
1463         list_for_each_safe(p, n, &hdev->blacklist) {
1464                 struct bdaddr_list *b;
1465
1466                 b = list_entry(p, struct bdaddr_list, list);
1467
1468                 list_del(p);
1469                 kfree(b);
1470         }
1471
1472         return 0;
1473 }
1474
1475 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1476 {
1477         struct bdaddr_list *entry;
1478
1479         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1480                 return -EBADF;
1481
1482         if (hci_blacklist_lookup(hdev, bdaddr))
1483                 return -EEXIST;
1484
1485         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1486         if (!entry)
1487                 return -ENOMEM;
1488
1489         bacpy(&entry->bdaddr, bdaddr);
1490
1491         list_add(&entry->list, &hdev->blacklist);
1492
1493         return mgmt_device_blocked(hdev, bdaddr, type);
1494 }
1495
1496 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1497 {
1498         struct bdaddr_list *entry;
1499
1500         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1501                 return hci_blacklist_clear(hdev);
1502
1503         entry = hci_blacklist_lookup(hdev, bdaddr);
1504         if (!entry)
1505                 return -ENOENT;
1506
1507         list_del(&entry->list);
1508         kfree(entry);
1509
1510         return mgmt_device_unblocked(hdev, bdaddr, type);
1511 }
1512
1513 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1514 {
1515         struct le_scan_params *param =  (struct le_scan_params *) opt;
1516         struct hci_cp_le_set_scan_param cp;
1517
1518         memset(&cp, 0, sizeof(cp));
1519         cp.type = param->type;
1520         cp.interval = cpu_to_le16(param->interval);
1521         cp.window = cpu_to_le16(param->window);
1522
1523         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1524 }
1525
1526 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1527 {
1528         struct hci_cp_le_set_scan_enable cp;
1529
1530         memset(&cp, 0, sizeof(cp));
1531         cp.enable = 1;
1532         cp.filter_dup = 1;
1533
1534         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1535 }
1536
1537 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1538                           u16 window, int timeout)
1539 {
1540         long timeo = msecs_to_jiffies(3000);
1541         struct le_scan_params param;
1542         int err;
1543
1544         BT_DBG("%s", hdev->name);
1545
1546         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1547                 return -EINPROGRESS;
1548
1549         param.type = type;
1550         param.interval = interval;
1551         param.window = window;
1552
1553         hci_req_lock(hdev);
1554
1555         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1556                             timeo);
1557         if (!err)
1558                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1559
1560         hci_req_unlock(hdev);
1561
1562         if (err < 0)
1563                 return err;
1564
1565         schedule_delayed_work(&hdev->le_scan_disable,
1566                               msecs_to_jiffies(timeout));
1567
1568         return 0;
1569 }
1570
1571 int hci_cancel_le_scan(struct hci_dev *hdev)
1572 {
1573         BT_DBG("%s", hdev->name);
1574
1575         if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1576                 return -EALREADY;
1577
1578         if (cancel_delayed_work(&hdev->le_scan_disable)) {
1579                 struct hci_cp_le_set_scan_enable cp;
1580
1581                 /* Send HCI command to disable LE Scan */
1582                 memset(&cp, 0, sizeof(cp));
1583                 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1584         }
1585
1586         return 0;
1587 }
1588
1589 static void le_scan_disable_work(struct work_struct *work)
1590 {
1591         struct hci_dev *hdev = container_of(work, struct hci_dev,
1592                                             le_scan_disable.work);
1593         struct hci_cp_le_set_scan_enable cp;
1594
1595         BT_DBG("%s", hdev->name);
1596
1597         memset(&cp, 0, sizeof(cp));
1598
1599         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1600 }
1601
1602 static void le_scan_work(struct work_struct *work)
1603 {
1604         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1605         struct le_scan_params *param = &hdev->le_scan_params;
1606
1607         BT_DBG("%s", hdev->name);
1608
1609         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1610                        param->timeout);
1611 }
1612
1613 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1614                 int timeout)
1615 {
1616         struct le_scan_params *param = &hdev->le_scan_params;
1617
1618         BT_DBG("%s", hdev->name);
1619
1620         if (work_busy(&hdev->le_scan))
1621                 return -EINPROGRESS;
1622
1623         param->type = type;
1624         param->interval = interval;
1625         param->window = window;
1626         param->timeout = timeout;
1627
1628         queue_work(system_long_wq, &hdev->le_scan);
1629
1630         return 0;
1631 }
1632
1633 /* Alloc HCI device */
1634 struct hci_dev *hci_alloc_dev(void)
1635 {
1636         struct hci_dev *hdev;
1637
1638         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1639         if (!hdev)
1640                 return NULL;
1641
1642         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1643         hdev->esco_type = (ESCO_HV1);
1644         hdev->link_mode = (HCI_LM_ACCEPT);
1645         hdev->io_capability = 0x03; /* No Input No Output */
1646
1647         hdev->sniff_max_interval = 800;
1648         hdev->sniff_min_interval = 80;
1649
1650         mutex_init(&hdev->lock);
1651         mutex_init(&hdev->req_lock);
1652
1653         INIT_LIST_HEAD(&hdev->mgmt_pending);
1654         INIT_LIST_HEAD(&hdev->blacklist);
1655         INIT_LIST_HEAD(&hdev->uuids);
1656         INIT_LIST_HEAD(&hdev->link_keys);
1657         INIT_LIST_HEAD(&hdev->long_term_keys);
1658         INIT_LIST_HEAD(&hdev->remote_oob_data);
1659         INIT_LIST_HEAD(&hdev->conn_hash.list);
1660
1661         INIT_WORK(&hdev->rx_work, hci_rx_work);
1662         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1663         INIT_WORK(&hdev->tx_work, hci_tx_work);
1664         INIT_WORK(&hdev->power_on, hci_power_on);
1665         INIT_WORK(&hdev->le_scan, le_scan_work);
1666
1667         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1668         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1669         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1670
1671         skb_queue_head_init(&hdev->driver_init);
1672         skb_queue_head_init(&hdev->rx_q);
1673         skb_queue_head_init(&hdev->cmd_q);
1674         skb_queue_head_init(&hdev->raw_q);
1675
1676         init_waitqueue_head(&hdev->req_wait_q);
1677
1678         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1679
1680         hci_init_sysfs(hdev);
1681         discovery_init(hdev);
1682
1683         return hdev;
1684 }
1685 EXPORT_SYMBOL(hci_alloc_dev);
1686
1687 /* Free HCI device */
1688 void hci_free_dev(struct hci_dev *hdev)
1689 {
1690         skb_queue_purge(&hdev->driver_init);
1691
1692         /* will free via device release */
1693         put_device(&hdev->dev);
1694 }
1695 EXPORT_SYMBOL(hci_free_dev);
1696
1697 /* Register HCI device */
1698 int hci_register_dev(struct hci_dev *hdev)
1699 {
1700         int id, error;
1701
1702         if (!hdev->open || !hdev->close)
1703                 return -EINVAL;
1704
1705         /* Do not allow HCI_AMP devices to register at index 0,
1706          * so the index can be used as the AMP controller ID.
1707          */
1708         switch (hdev->dev_type) {
1709         case HCI_BREDR:
1710                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1711                 break;
1712         case HCI_AMP:
1713                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1714                 break;
1715         default:
1716                 return -EINVAL;
1717         }
1718
1719         if (id < 0)
1720                 return id;
1721
1722         sprintf(hdev->name, "hci%d", id);
1723         hdev->id = id;
1724
1725         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1726
1727         write_lock(&hci_dev_list_lock);
1728         list_add(&hdev->list, &hci_dev_list);
1729         write_unlock(&hci_dev_list_lock);
1730
1731         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1732                                           WQ_MEM_RECLAIM, 1);
1733         if (!hdev->workqueue) {
1734                 error = -ENOMEM;
1735                 goto err;
1736         }
1737
1738         error = hci_add_sysfs(hdev);
1739         if (error < 0)
1740                 goto err_wqueue;
1741
1742         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1743                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1744                                     hdev);
1745         if (hdev->rfkill) {
1746                 if (rfkill_register(hdev->rfkill) < 0) {
1747                         rfkill_destroy(hdev->rfkill);
1748                         hdev->rfkill = NULL;
1749                 }
1750         }
1751
1752         set_bit(HCI_SETUP, &hdev->dev_flags);
1753
1754         if (hdev->dev_type != HCI_AMP)
1755                 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1756
1757         hci_notify(hdev, HCI_DEV_REG);
1758         hci_dev_hold(hdev);
1759
1760         schedule_work(&hdev->power_on);
1761
1762         return id;
1763
1764 err_wqueue:
1765         destroy_workqueue(hdev->workqueue);
1766 err:
1767         ida_simple_remove(&hci_index_ida, hdev->id);
1768         write_lock(&hci_dev_list_lock);
1769         list_del(&hdev->list);
1770         write_unlock(&hci_dev_list_lock);
1771
1772         return error;
1773 }
1774 EXPORT_SYMBOL(hci_register_dev);
1775
1776 /* Unregister HCI device */
1777 void hci_unregister_dev(struct hci_dev *hdev)
1778 {
1779         int i, id;
1780
1781         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1782
1783         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1784
1785         id = hdev->id;
1786
1787         write_lock(&hci_dev_list_lock);
1788         list_del(&hdev->list);
1789         write_unlock(&hci_dev_list_lock);
1790
1791         hci_dev_do_close(hdev);
1792
1793         for (i = 0; i < NUM_REASSEMBLY; i++)
1794                 kfree_skb(hdev->reassembly[i]);
1795
1796         if (!test_bit(HCI_INIT, &hdev->flags) &&
1797             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1798                 hci_dev_lock(hdev);
1799                 mgmt_index_removed(hdev);
1800                 hci_dev_unlock(hdev);
1801         }
1802
1803         /* mgmt_index_removed should take care of emptying the
1804          * pending list */
1805         BUG_ON(!list_empty(&hdev->mgmt_pending));
1806
1807         hci_notify(hdev, HCI_DEV_UNREG);
1808
1809         if (hdev->rfkill) {
1810                 rfkill_unregister(hdev->rfkill);
1811                 rfkill_destroy(hdev->rfkill);
1812         }
1813
1814         hci_del_sysfs(hdev);
1815
1816         destroy_workqueue(hdev->workqueue);
1817
1818         hci_dev_lock(hdev);
1819         hci_blacklist_clear(hdev);
1820         hci_uuids_clear(hdev);
1821         hci_link_keys_clear(hdev);
1822         hci_smp_ltks_clear(hdev);
1823         hci_remote_oob_data_clear(hdev);
1824         hci_dev_unlock(hdev);
1825
1826         hci_dev_put(hdev);
1827
1828         ida_simple_remove(&hci_index_ida, id);
1829 }
1830 EXPORT_SYMBOL(hci_unregister_dev);
1831
1832 /* Suspend HCI device */
1833 int hci_suspend_dev(struct hci_dev *hdev)
1834 {
1835         hci_notify(hdev, HCI_DEV_SUSPEND);
1836         return 0;
1837 }
1838 EXPORT_SYMBOL(hci_suspend_dev);
1839
1840 /* Resume HCI device */
1841 int hci_resume_dev(struct hci_dev *hdev)
1842 {
1843         hci_notify(hdev, HCI_DEV_RESUME);
1844         return 0;
1845 }
1846 EXPORT_SYMBOL(hci_resume_dev);
1847
1848 /* Receive frame from HCI drivers */
1849 int hci_recv_frame(struct sk_buff *skb)
1850 {
1851         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1852         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1853                       && !test_bit(HCI_INIT, &hdev->flags))) {
1854                 kfree_skb(skb);
1855                 return -ENXIO;
1856         }
1857
1858         /* Incomming skb */
1859         bt_cb(skb)->incoming = 1;
1860
1861         /* Time stamp */
1862         __net_timestamp(skb);
1863
1864         skb_queue_tail(&hdev->rx_q, skb);
1865         queue_work(hdev->workqueue, &hdev->rx_work);
1866
1867         return 0;
1868 }
1869 EXPORT_SYMBOL(hci_recv_frame);
1870
1871 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1872                           int count, __u8 index)
1873 {
1874         int len = 0;
1875         int hlen = 0;
1876         int remain = count;
1877         struct sk_buff *skb;
1878         struct bt_skb_cb *scb;
1879
1880         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1881             index >= NUM_REASSEMBLY)
1882                 return -EILSEQ;
1883
1884         skb = hdev->reassembly[index];
1885
1886         if (!skb) {
1887                 switch (type) {
1888                 case HCI_ACLDATA_PKT:
1889                         len = HCI_MAX_FRAME_SIZE;
1890                         hlen = HCI_ACL_HDR_SIZE;
1891                         break;
1892                 case HCI_EVENT_PKT:
1893                         len = HCI_MAX_EVENT_SIZE;
1894                         hlen = HCI_EVENT_HDR_SIZE;
1895                         break;
1896                 case HCI_SCODATA_PKT:
1897                         len = HCI_MAX_SCO_SIZE;
1898                         hlen = HCI_SCO_HDR_SIZE;
1899                         break;
1900                 }
1901
1902                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1903                 if (!skb)
1904                         return -ENOMEM;
1905
1906                 scb = (void *) skb->cb;
1907                 scb->expect = hlen;
1908                 scb->pkt_type = type;
1909
1910                 skb->dev = (void *) hdev;
1911                 hdev->reassembly[index] = skb;
1912         }
1913
1914         while (count) {
1915                 scb = (void *) skb->cb;
1916                 len = min_t(uint, scb->expect, count);
1917
1918                 memcpy(skb_put(skb, len), data, len);
1919
1920                 count -= len;
1921                 data += len;
1922                 scb->expect -= len;
1923                 remain = count;
1924
1925                 switch (type) {
1926                 case HCI_EVENT_PKT:
1927                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1928                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1929                                 scb->expect = h->plen;
1930
1931                                 if (skb_tailroom(skb) < scb->expect) {
1932                                         kfree_skb(skb);
1933                                         hdev->reassembly[index] = NULL;
1934                                         return -ENOMEM;
1935                                 }
1936                         }
1937                         break;
1938
1939                 case HCI_ACLDATA_PKT:
1940                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1941                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1942                                 scb->expect = __le16_to_cpu(h->dlen);
1943
1944                                 if (skb_tailroom(skb) < scb->expect) {
1945                                         kfree_skb(skb);
1946                                         hdev->reassembly[index] = NULL;
1947                                         return -ENOMEM;
1948                                 }
1949                         }
1950                         break;
1951
1952                 case HCI_SCODATA_PKT:
1953                         if (skb->len == HCI_SCO_HDR_SIZE) {
1954                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1955                                 scb->expect = h->dlen;
1956
1957                                 if (skb_tailroom(skb) < scb->expect) {
1958                                         kfree_skb(skb);
1959                                         hdev->reassembly[index] = NULL;
1960                                         return -ENOMEM;
1961                                 }
1962                         }
1963                         break;
1964                 }
1965
1966                 if (scb->expect == 0) {
1967                         /* Complete frame */
1968
1969                         bt_cb(skb)->pkt_type = type;
1970                         hci_recv_frame(skb);
1971
1972                         hdev->reassembly[index] = NULL;
1973                         return remain;
1974                 }
1975         }
1976
1977         return remain;
1978 }
1979
1980 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1981 {
1982         int rem = 0;
1983
1984         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1985                 return -EILSEQ;
1986
1987         while (count) {
1988                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1989                 if (rem < 0)
1990                         return rem;
1991
1992                 data += (count - rem);
1993                 count = rem;
1994         }
1995
1996         return rem;
1997 }
1998 EXPORT_SYMBOL(hci_recv_fragment);
1999
2000 #define STREAM_REASSEMBLY 0
2001
2002 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2003 {
2004         int type;
2005         int rem = 0;
2006
2007         while (count) {
2008                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2009
2010                 if (!skb) {
2011                         struct { char type; } *pkt;
2012
2013                         /* Start of the frame */
2014                         pkt = data;
2015                         type = pkt->type;
2016
2017                         data++;
2018                         count--;
2019                 } else
2020                         type = bt_cb(skb)->pkt_type;
2021
2022                 rem = hci_reassembly(hdev, type, data, count,
2023                                      STREAM_REASSEMBLY);
2024                 if (rem < 0)
2025                         return rem;
2026
2027                 data += (count - rem);
2028                 count = rem;
2029         }
2030
2031         return rem;
2032 }
2033 EXPORT_SYMBOL(hci_recv_stream_fragment);
2034
2035 /* ---- Interface to upper protocols ---- */
2036
2037 int hci_register_cb(struct hci_cb *cb)
2038 {
2039         BT_DBG("%p name %s", cb, cb->name);
2040
2041         write_lock(&hci_cb_list_lock);
2042         list_add(&cb->list, &hci_cb_list);
2043         write_unlock(&hci_cb_list_lock);
2044
2045         return 0;
2046 }
2047 EXPORT_SYMBOL(hci_register_cb);
2048
2049 int hci_unregister_cb(struct hci_cb *cb)
2050 {
2051         BT_DBG("%p name %s", cb, cb->name);
2052
2053         write_lock(&hci_cb_list_lock);
2054         list_del(&cb->list);
2055         write_unlock(&hci_cb_list_lock);
2056
2057         return 0;
2058 }
2059 EXPORT_SYMBOL(hci_unregister_cb);
2060
2061 static int hci_send_frame(struct sk_buff *skb)
2062 {
2063         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2064
2065         if (!hdev) {
2066                 kfree_skb(skb);
2067                 return -ENODEV;
2068         }
2069
2070         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2071
2072         /* Time stamp */
2073         __net_timestamp(skb);
2074
2075         /* Send copy to monitor */
2076         hci_send_to_monitor(hdev, skb);
2077
2078         if (atomic_read(&hdev->promisc)) {
2079                 /* Send copy to the sockets */
2080                 hci_send_to_sock(hdev, skb);
2081         }
2082
2083         /* Get rid of skb owner, prior to sending to the driver. */
2084         skb_orphan(skb);
2085
2086         return hdev->send(skb);
2087 }
2088
2089 /* Send HCI command */
2090 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2091 {
2092         int len = HCI_COMMAND_HDR_SIZE + plen;
2093         struct hci_command_hdr *hdr;
2094         struct sk_buff *skb;
2095
2096         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2097
2098         skb = bt_skb_alloc(len, GFP_ATOMIC);
2099         if (!skb) {
2100                 BT_ERR("%s no memory for command", hdev->name);
2101                 return -ENOMEM;
2102         }
2103
2104         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2105         hdr->opcode = cpu_to_le16(opcode);
2106         hdr->plen   = plen;
2107
2108         if (plen)
2109                 memcpy(skb_put(skb, plen), param, plen);
2110
2111         BT_DBG("skb len %d", skb->len);
2112
2113         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2114         skb->dev = (void *) hdev;
2115
2116         if (test_bit(HCI_INIT, &hdev->flags))
2117                 hdev->init_last_cmd = opcode;
2118
2119         skb_queue_tail(&hdev->cmd_q, skb);
2120         queue_work(hdev->workqueue, &hdev->cmd_work);
2121
2122         return 0;
2123 }
2124
2125 /* Get data from the previously sent command */
2126 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2127 {
2128         struct hci_command_hdr *hdr;
2129
2130         if (!hdev->sent_cmd)
2131                 return NULL;
2132
2133         hdr = (void *) hdev->sent_cmd->data;
2134
2135         if (hdr->opcode != cpu_to_le16(opcode))
2136                 return NULL;
2137
2138         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2139
2140         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2141 }
2142
2143 /* Send ACL data */
2144 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2145 {
2146         struct hci_acl_hdr *hdr;
2147         int len = skb->len;
2148
2149         skb_push(skb, HCI_ACL_HDR_SIZE);
2150         skb_reset_transport_header(skb);
2151         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2152         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2153         hdr->dlen   = cpu_to_le16(len);
2154 }
2155
2156 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
2157                           struct sk_buff *skb, __u16 flags)
2158 {
2159         struct hci_conn *conn = chan->conn;
2160         struct hci_dev *hdev = conn->hdev;
2161         struct sk_buff *list;
2162
2163         skb->len = skb_headlen(skb);
2164         skb->data_len = 0;
2165
2166         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2167
2168         switch (hdev->dev_type) {
2169         case HCI_BREDR:
2170                 hci_add_acl_hdr(skb, conn->handle, flags);
2171                 break;
2172         case HCI_AMP:
2173                 hci_add_acl_hdr(skb, chan->handle, flags);
2174                 break;
2175         default:
2176                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2177                 return;
2178         }
2179
2180         list = skb_shinfo(skb)->frag_list;
2181         if (!list) {
2182                 /* Non fragmented */
2183                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2184
2185                 skb_queue_tail(queue, skb);
2186         } else {
2187                 /* Fragmented */
2188                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2189
2190                 skb_shinfo(skb)->frag_list = NULL;
2191
2192                 /* Queue all fragments atomically */
2193                 spin_lock(&queue->lock);
2194
2195                 __skb_queue_tail(queue, skb);
2196
2197                 flags &= ~ACL_START;
2198                 flags |= ACL_CONT;
2199                 do {
2200                         skb = list; list = list->next;
2201
2202                         skb->dev = (void *) hdev;
2203                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2204                         hci_add_acl_hdr(skb, conn->handle, flags);
2205
2206                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2207
2208                         __skb_queue_tail(queue, skb);
2209                 } while (list);
2210
2211                 spin_unlock(&queue->lock);
2212         }
2213 }
2214
2215 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2216 {
2217         struct hci_dev *hdev = chan->conn->hdev;
2218
2219         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2220
2221         skb->dev = (void *) hdev;
2222
2223         hci_queue_acl(chan, &chan->data_q, skb, flags);
2224
2225         queue_work(hdev->workqueue, &hdev->tx_work);
2226 }
2227
2228 /* Send SCO data */
2229 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2230 {
2231         struct hci_dev *hdev = conn->hdev;
2232         struct hci_sco_hdr hdr;
2233
2234         BT_DBG("%s len %d", hdev->name, skb->len);
2235
2236         hdr.handle = cpu_to_le16(conn->handle);
2237         hdr.dlen   = skb->len;
2238
2239         skb_push(skb, HCI_SCO_HDR_SIZE);
2240         skb_reset_transport_header(skb);
2241         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2242
2243         skb->dev = (void *) hdev;
2244         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2245
2246         skb_queue_tail(&conn->data_q, skb);
2247         queue_work(hdev->workqueue, &hdev->tx_work);
2248 }
2249
2250 /* ---- HCI TX task (outgoing data) ---- */
2251
2252 /* HCI Connection scheduler */
2253 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2254                                      int *quote)
2255 {
2256         struct hci_conn_hash *h = &hdev->conn_hash;
2257         struct hci_conn *conn = NULL, *c;
2258         unsigned int num = 0, min = ~0;
2259
2260         /* We don't have to lock device here. Connections are always
2261          * added and removed with TX task disabled. */
2262
2263         rcu_read_lock();
2264
2265         list_for_each_entry_rcu(c, &h->list, list) {
2266                 if (c->type != type || skb_queue_empty(&c->data_q))
2267                         continue;
2268
2269                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2270                         continue;
2271
2272                 num++;
2273
2274                 if (c->sent < min) {
2275                         min  = c->sent;
2276                         conn = c;
2277                 }
2278
2279                 if (hci_conn_num(hdev, type) == num)
2280                         break;
2281         }
2282
2283         rcu_read_unlock();
2284
2285         if (conn) {
2286                 int cnt, q;
2287
2288                 switch (conn->type) {
2289                 case ACL_LINK:
2290                         cnt = hdev->acl_cnt;
2291                         break;
2292                 case SCO_LINK:
2293                 case ESCO_LINK:
2294                         cnt = hdev->sco_cnt;
2295                         break;
2296                 case LE_LINK:
2297                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2298                         break;
2299                 default:
2300                         cnt = 0;
2301                         BT_ERR("Unknown link type");
2302                 }
2303
2304                 q = cnt / num;
2305                 *quote = q ? q : 1;
2306         } else
2307                 *quote = 0;
2308
2309         BT_DBG("conn %p quote %d", conn, *quote);
2310         return conn;
2311 }
2312
2313 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2314 {
2315         struct hci_conn_hash *h = &hdev->conn_hash;
2316         struct hci_conn *c;
2317
2318         BT_ERR("%s link tx timeout", hdev->name);
2319
2320         rcu_read_lock();
2321
2322         /* Kill stalled connections */
2323         list_for_each_entry_rcu(c, &h->list, list) {
2324                 if (c->type == type && c->sent) {
2325                         BT_ERR("%s killing stalled connection %pMR",
2326                                hdev->name, &c->dst);
2327                         hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2328                 }
2329         }
2330
2331         rcu_read_unlock();
2332 }
2333
2334 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2335                                       int *quote)
2336 {
2337         struct hci_conn_hash *h = &hdev->conn_hash;
2338         struct hci_chan *chan = NULL;
2339         unsigned int num = 0, min = ~0, cur_prio = 0;
2340         struct hci_conn *conn;
2341         int cnt, q, conn_num = 0;
2342
2343         BT_DBG("%s", hdev->name);
2344
2345         rcu_read_lock();
2346
2347         list_for_each_entry_rcu(conn, &h->list, list) {
2348                 struct hci_chan *tmp;
2349
2350                 if (conn->type != type)
2351                         continue;
2352
2353                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2354                         continue;
2355
2356                 conn_num++;
2357
2358                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2359                         struct sk_buff *skb;
2360
2361                         if (skb_queue_empty(&tmp->data_q))
2362                                 continue;
2363
2364                         skb = skb_peek(&tmp->data_q);
2365                         if (skb->priority < cur_prio)
2366                                 continue;
2367
2368                         if (skb->priority > cur_prio) {
2369                                 num = 0;
2370                                 min = ~0;
2371                                 cur_prio = skb->priority;
2372                         }
2373
2374                         num++;
2375
2376                         if (conn->sent < min) {
2377                                 min  = conn->sent;
2378                                 chan = tmp;
2379                         }
2380                 }
2381
2382                 if (hci_conn_num(hdev, type) == conn_num)
2383                         break;
2384         }
2385
2386         rcu_read_unlock();
2387
2388         if (!chan)
2389                 return NULL;
2390
2391         switch (chan->conn->type) {
2392         case ACL_LINK:
2393                 cnt = hdev->acl_cnt;
2394                 break;
2395         case AMP_LINK:
2396                 cnt = hdev->block_cnt;
2397                 break;
2398         case SCO_LINK:
2399         case ESCO_LINK:
2400                 cnt = hdev->sco_cnt;
2401                 break;
2402         case LE_LINK:
2403                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2404                 break;
2405         default:
2406                 cnt = 0;
2407                 BT_ERR("Unknown link type");
2408         }
2409
2410         q = cnt / num;
2411         *quote = q ? q : 1;
2412         BT_DBG("chan %p quote %d", chan, *quote);
2413         return chan;
2414 }
2415
2416 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2417 {
2418         struct hci_conn_hash *h = &hdev->conn_hash;
2419         struct hci_conn *conn;
2420         int num = 0;
2421
2422         BT_DBG("%s", hdev->name);
2423
2424         rcu_read_lock();
2425
2426         list_for_each_entry_rcu(conn, &h->list, list) {
2427                 struct hci_chan *chan;
2428
2429                 if (conn->type != type)
2430                         continue;
2431
2432                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2433                         continue;
2434
2435                 num++;
2436
2437                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2438                         struct sk_buff *skb;
2439
2440                         if (chan->sent) {
2441                                 chan->sent = 0;
2442                                 continue;
2443                         }
2444
2445                         if (skb_queue_empty(&chan->data_q))
2446                                 continue;
2447
2448                         skb = skb_peek(&chan->data_q);
2449                         if (skb->priority >= HCI_PRIO_MAX - 1)
2450                                 continue;
2451
2452                         skb->priority = HCI_PRIO_MAX - 1;
2453
2454                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2455                                skb->priority);
2456                 }
2457
2458                 if (hci_conn_num(hdev, type) == num)
2459                         break;
2460         }
2461
2462         rcu_read_unlock();
2463
2464 }
2465
2466 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2467 {
2468         /* Calculate count of blocks used by this packet */
2469         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2470 }
2471
2472 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2473 {
2474         if (!test_bit(HCI_RAW, &hdev->flags)) {
2475                 /* ACL tx timeout must be longer than maximum
2476                  * link supervision timeout (40.9 seconds) */
2477                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2478                                        HCI_ACL_TX_TIMEOUT))
2479                         hci_link_tx_to(hdev, ACL_LINK);
2480         }
2481 }
2482
2483 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2484 {
2485         unsigned int cnt = hdev->acl_cnt;
2486         struct hci_chan *chan;
2487         struct sk_buff *skb;
2488         int quote;
2489
2490         __check_timeout(hdev, cnt);
2491
2492         while (hdev->acl_cnt &&
2493                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2494                 u32 priority = (skb_peek(&chan->data_q))->priority;
2495                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2496                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2497                                skb->len, skb->priority);
2498
2499                         /* Stop if priority has changed */
2500                         if (skb->priority < priority)
2501                                 break;
2502
2503                         skb = skb_dequeue(&chan->data_q);
2504
2505                         hci_conn_enter_active_mode(chan->conn,
2506                                                    bt_cb(skb)->force_active);
2507
2508                         hci_send_frame(skb);
2509                         hdev->acl_last_tx = jiffies;
2510
2511                         hdev->acl_cnt--;
2512                         chan->sent++;
2513                         chan->conn->sent++;
2514                 }
2515         }
2516
2517         if (cnt != hdev->acl_cnt)
2518                 hci_prio_recalculate(hdev, ACL_LINK);
2519 }
2520
2521 static void hci_sched_acl_blk(struct hci_dev *hdev)
2522 {
2523         unsigned int cnt = hdev->block_cnt;
2524         struct hci_chan *chan;
2525         struct sk_buff *skb;
2526         int quote;
2527         u8 type;
2528
2529         __check_timeout(hdev, cnt);
2530
2531         BT_DBG("%s", hdev->name);
2532
2533         if (hdev->dev_type == HCI_AMP)
2534                 type = AMP_LINK;
2535         else
2536                 type = ACL_LINK;
2537
2538         while (hdev->block_cnt > 0 &&
2539                (chan = hci_chan_sent(hdev, type, &quote))) {
2540                 u32 priority = (skb_peek(&chan->data_q))->priority;
2541                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2542                         int blocks;
2543
2544                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2545                                skb->len, skb->priority);
2546
2547                         /* Stop if priority has changed */
2548                         if (skb->priority < priority)
2549                                 break;
2550
2551                         skb = skb_dequeue(&chan->data_q);
2552
2553                         blocks = __get_blocks(hdev, skb);
2554                         if (blocks > hdev->block_cnt)
2555                                 return;
2556
2557                         hci_conn_enter_active_mode(chan->conn,
2558                                                    bt_cb(skb)->force_active);
2559
2560                         hci_send_frame(skb);
2561                         hdev->acl_last_tx = jiffies;
2562
2563                         hdev->block_cnt -= blocks;
2564                         quote -= blocks;
2565
2566                         chan->sent += blocks;
2567                         chan->conn->sent += blocks;
2568                 }
2569         }
2570
2571         if (cnt != hdev->block_cnt)
2572                 hci_prio_recalculate(hdev, type);
2573 }
2574
2575 static void hci_sched_acl(struct hci_dev *hdev)
2576 {
2577         BT_DBG("%s", hdev->name);
2578
2579         /* No ACL link over BR/EDR controller */
2580         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2581                 return;
2582
2583         /* No AMP link over AMP controller */
2584         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
2585                 return;
2586
2587         switch (hdev->flow_ctl_mode) {
2588         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2589                 hci_sched_acl_pkt(hdev);
2590                 break;
2591
2592         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2593                 hci_sched_acl_blk(hdev);
2594                 break;
2595         }
2596 }
2597
2598 /* Schedule SCO */
2599 static void hci_sched_sco(struct hci_dev *hdev)
2600 {
2601         struct hci_conn *conn;
2602         struct sk_buff *skb;
2603         int quote;
2604
2605         BT_DBG("%s", hdev->name);
2606
2607         if (!hci_conn_num(hdev, SCO_LINK))
2608                 return;
2609
2610         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2611                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2612                         BT_DBG("skb %p len %d", skb, skb->len);
2613                         hci_send_frame(skb);
2614
2615                         conn->sent++;
2616                         if (conn->sent == ~0)
2617                                 conn->sent = 0;
2618                 }
2619         }
2620 }
2621
2622 static void hci_sched_esco(struct hci_dev *hdev)
2623 {
2624         struct hci_conn *conn;
2625         struct sk_buff *skb;
2626         int quote;
2627
2628         BT_DBG("%s", hdev->name);
2629
2630         if (!hci_conn_num(hdev, ESCO_LINK))
2631                 return;
2632
2633         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2634                                                      &quote))) {
2635                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2636                         BT_DBG("skb %p len %d", skb, skb->len);
2637                         hci_send_frame(skb);
2638
2639                         conn->sent++;
2640                         if (conn->sent == ~0)
2641                                 conn->sent = 0;
2642                 }
2643         }
2644 }
2645
2646 static void hci_sched_le(struct hci_dev *hdev)
2647 {
2648         struct hci_chan *chan;
2649         struct sk_buff *skb;
2650         int quote, cnt, tmp;
2651
2652         BT_DBG("%s", hdev->name);
2653
2654         if (!hci_conn_num(hdev, LE_LINK))
2655                 return;
2656
2657         if (!test_bit(HCI_RAW, &hdev->flags)) {
2658                 /* LE tx timeout must be longer than maximum
2659                  * link supervision timeout (40.9 seconds) */
2660                 if (!hdev->le_cnt && hdev->le_pkts &&
2661                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
2662                         hci_link_tx_to(hdev, LE_LINK);
2663         }
2664
2665         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2666         tmp = cnt;
2667         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2668                 u32 priority = (skb_peek(&chan->data_q))->priority;
2669                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2670                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2671                                skb->len, skb->priority);
2672
2673                         /* Stop if priority has changed */
2674                         if (skb->priority < priority)
2675                                 break;
2676
2677                         skb = skb_dequeue(&chan->data_q);
2678
2679                         hci_send_frame(skb);
2680                         hdev->le_last_tx = jiffies;
2681
2682                         cnt--;
2683                         chan->sent++;
2684                         chan->conn->sent++;
2685                 }
2686         }
2687
2688         if (hdev->le_pkts)
2689                 hdev->le_cnt = cnt;
2690         else
2691                 hdev->acl_cnt = cnt;
2692
2693         if (cnt != tmp)
2694                 hci_prio_recalculate(hdev, LE_LINK);
2695 }
2696
2697 static void hci_tx_work(struct work_struct *work)
2698 {
2699         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2700         struct sk_buff *skb;
2701
2702         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2703                hdev->sco_cnt, hdev->le_cnt);
2704
2705         /* Schedule queues and send stuff to HCI driver */
2706
2707         hci_sched_acl(hdev);
2708
2709         hci_sched_sco(hdev);
2710
2711         hci_sched_esco(hdev);
2712
2713         hci_sched_le(hdev);
2714
2715         /* Send next queued raw (unknown type) packet */
2716         while ((skb = skb_dequeue(&hdev->raw_q)))
2717                 hci_send_frame(skb);
2718 }
2719
2720 /* ----- HCI RX task (incoming data processing) ----- */
2721
2722 /* ACL data packet */
2723 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2724 {
2725         struct hci_acl_hdr *hdr = (void *) skb->data;
2726         struct hci_conn *conn;
2727         __u16 handle, flags;
2728
2729         skb_pull(skb, HCI_ACL_HDR_SIZE);
2730
2731         handle = __le16_to_cpu(hdr->handle);
2732         flags  = hci_flags(handle);
2733         handle = hci_handle(handle);
2734
2735         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2736                handle, flags);
2737
2738         hdev->stat.acl_rx++;
2739
2740         hci_dev_lock(hdev);
2741         conn = hci_conn_hash_lookup_handle(hdev, handle);
2742         hci_dev_unlock(hdev);
2743
2744         if (conn) {
2745                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2746
2747                 hci_dev_lock(hdev);
2748                 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2749                     !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2750                         mgmt_device_connected(hdev, &conn->dst, conn->type,
2751                                               conn->dst_type, 0, NULL, 0,
2752                                               conn->dev_class);
2753                 hci_dev_unlock(hdev);
2754
2755                 /* Send to upper protocol */
2756                 l2cap_recv_acldata(conn, skb, flags);
2757                 return;
2758         } else {
2759                 BT_ERR("%s ACL packet for unknown connection handle %d",
2760                        hdev->name, handle);
2761         }
2762
2763         kfree_skb(skb);
2764 }
2765
2766 /* SCO data packet */
2767 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2768 {
2769         struct hci_sco_hdr *hdr = (void *) skb->data;
2770         struct hci_conn *conn;
2771         __u16 handle;
2772
2773         skb_pull(skb, HCI_SCO_HDR_SIZE);
2774
2775         handle = __le16_to_cpu(hdr->handle);
2776
2777         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2778
2779         hdev->stat.sco_rx++;
2780
2781         hci_dev_lock(hdev);
2782         conn = hci_conn_hash_lookup_handle(hdev, handle);
2783         hci_dev_unlock(hdev);
2784
2785         if (conn) {
2786                 /* Send to upper protocol */
2787                 sco_recv_scodata(conn, skb);
2788                 return;
2789         } else {
2790                 BT_ERR("%s SCO packet for unknown connection handle %d",
2791                        hdev->name, handle);
2792         }
2793
2794         kfree_skb(skb);
2795 }
2796
2797 static void hci_rx_work(struct work_struct *work)
2798 {
2799         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2800         struct sk_buff *skb;
2801
2802         BT_DBG("%s", hdev->name);
2803
2804         while ((skb = skb_dequeue(&hdev->rx_q))) {
2805                 /* Send copy to monitor */
2806                 hci_send_to_monitor(hdev, skb);
2807
2808                 if (atomic_read(&hdev->promisc)) {
2809                         /* Send copy to the sockets */
2810                         hci_send_to_sock(hdev, skb);
2811                 }
2812
2813                 if (test_bit(HCI_RAW, &hdev->flags)) {
2814                         kfree_skb(skb);
2815                         continue;
2816                 }
2817
2818                 if (test_bit(HCI_INIT, &hdev->flags)) {
2819                         /* Don't process data packets in this states. */
2820                         switch (bt_cb(skb)->pkt_type) {
2821                         case HCI_ACLDATA_PKT:
2822                         case HCI_SCODATA_PKT:
2823                                 kfree_skb(skb);
2824                                 continue;
2825                         }
2826                 }
2827
2828                 /* Process frame */
2829                 switch (bt_cb(skb)->pkt_type) {
2830                 case HCI_EVENT_PKT:
2831                         BT_DBG("%s Event packet", hdev->name);
2832                         hci_event_packet(hdev, skb);
2833                         break;
2834
2835                 case HCI_ACLDATA_PKT:
2836                         BT_DBG("%s ACL data packet", hdev->name);
2837                         hci_acldata_packet(hdev, skb);
2838                         break;
2839
2840                 case HCI_SCODATA_PKT:
2841                         BT_DBG("%s SCO data packet", hdev->name);
2842                         hci_scodata_packet(hdev, skb);
2843                         break;
2844
2845                 default:
2846                         kfree_skb(skb);
2847                         break;
2848                 }
2849         }
2850 }
2851
2852 static void hci_cmd_work(struct work_struct *work)
2853 {
2854         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2855         struct sk_buff *skb;
2856
2857         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2858                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2859
2860         /* Send queued commands */
2861         if (atomic_read(&hdev->cmd_cnt)) {
2862                 skb = skb_dequeue(&hdev->cmd_q);
2863                 if (!skb)
2864                         return;
2865
2866                 kfree_skb(hdev->sent_cmd);
2867
2868                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2869                 if (hdev->sent_cmd) {
2870                         atomic_dec(&hdev->cmd_cnt);
2871                         hci_send_frame(skb);
2872                         if (test_bit(HCI_RESET, &hdev->flags))
2873                                 del_timer(&hdev->cmd_timer);
2874                         else
2875                                 mod_timer(&hdev->cmd_timer,
2876                                           jiffies + HCI_CMD_TIMEOUT);
2877                 } else {
2878                         skb_queue_head(&hdev->cmd_q, skb);
2879                         queue_work(hdev->workqueue, &hdev->cmd_work);
2880                 }
2881         }
2882 }
2883
2884 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2885 {
2886         /* General inquiry access code (GIAC) */
2887         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2888         struct hci_cp_inquiry cp;
2889
2890         BT_DBG("%s", hdev->name);
2891
2892         if (test_bit(HCI_INQUIRY, &hdev->flags))
2893                 return -EINPROGRESS;
2894
2895         inquiry_cache_flush(hdev);
2896
2897         memset(&cp, 0, sizeof(cp));
2898         memcpy(&cp.lap, lap, sizeof(cp.lap));
2899         cp.length  = length;
2900
2901         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2902 }
2903
2904 int hci_cancel_inquiry(struct hci_dev *hdev)
2905 {
2906         BT_DBG("%s", hdev->name);
2907
2908         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2909                 return -EALREADY;
2910
2911         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2912 }
2913
2914 u8 bdaddr_to_le(u8 bdaddr_type)
2915 {
2916         switch (bdaddr_type) {
2917         case BDADDR_LE_PUBLIC:
2918                 return ADDR_LE_DEV_PUBLIC;
2919
2920         default:
2921                 /* Fallback to LE Random address type */
2922                 return ADDR_LE_DEV_RANDOM;
2923         }
2924 }