ip6mr: fix mfc notification flags
[sfrench/cifs-2.6.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <asm/unaligned.h>
33
34 #include <net/bluetooth/bluetooth.h>
35 #include <net/bluetooth/hci_core.h>
36
37 static void hci_rx_work(struct work_struct *work);
38 static void hci_cmd_work(struct work_struct *work);
39 static void hci_tx_work(struct work_struct *work);
40
41 /* HCI device list */
42 LIST_HEAD(hci_dev_list);
43 DEFINE_RWLOCK(hci_dev_list_lock);
44
45 /* HCI callback list */
46 LIST_HEAD(hci_cb_list);
47 DEFINE_RWLOCK(hci_cb_list_lock);
48
49 /* HCI ID Numbering */
50 static DEFINE_IDA(hci_index_ida);
51
52 /* ---- HCI notifications ---- */
53
54 static void hci_notify(struct hci_dev *hdev, int event)
55 {
56         hci_sock_dev_event(hdev, event);
57 }
58
59 /* ---- HCI debugfs entries ---- */
60
61 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62                              size_t count, loff_t *ppos)
63 {
64         struct hci_dev *hdev = file->private_data;
65         char buf[3];
66
67         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68         buf[1] = '\n';
69         buf[2] = '\0';
70         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71 }
72
73 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74                               size_t count, loff_t *ppos)
75 {
76         struct hci_dev *hdev = file->private_data;
77         struct sk_buff *skb;
78         char buf[32];
79         size_t buf_size = min(count, (sizeof(buf)-1));
80         bool enable;
81         int err;
82
83         if (!test_bit(HCI_UP, &hdev->flags))
84                 return -ENETDOWN;
85
86         if (copy_from_user(buf, user_buf, buf_size))
87                 return -EFAULT;
88
89         buf[buf_size] = '\0';
90         if (strtobool(buf, &enable))
91                 return -EINVAL;
92
93         if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94                 return -EALREADY;
95
96         hci_req_lock(hdev);
97         if (enable)
98                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99                                      HCI_CMD_TIMEOUT);
100         else
101                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         hci_req_unlock(hdev);
104
105         if (IS_ERR(skb))
106                 return PTR_ERR(skb);
107
108         err = -bt_to_errno(skb->data[0]);
109         kfree_skb(skb);
110
111         if (err < 0)
112                 return err;
113
114         change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116         return count;
117 }
118
119 static const struct file_operations dut_mode_fops = {
120         .open           = simple_open,
121         .read           = dut_mode_read,
122         .write          = dut_mode_write,
123         .llseek         = default_llseek,
124 };
125
126 static int features_show(struct seq_file *f, void *ptr)
127 {
128         struct hci_dev *hdev = f->private;
129         u8 p;
130
131         hci_dev_lock(hdev);
132         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
133                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
134                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135                            hdev->features[p][0], hdev->features[p][1],
136                            hdev->features[p][2], hdev->features[p][3],
137                            hdev->features[p][4], hdev->features[p][5],
138                            hdev->features[p][6], hdev->features[p][7]);
139         }
140         if (lmp_le_capable(hdev))
141                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143                            hdev->le_features[0], hdev->le_features[1],
144                            hdev->le_features[2], hdev->le_features[3],
145                            hdev->le_features[4], hdev->le_features[5],
146                            hdev->le_features[6], hdev->le_features[7]);
147         hci_dev_unlock(hdev);
148
149         return 0;
150 }
151
152 static int features_open(struct inode *inode, struct file *file)
153 {
154         return single_open(file, features_show, inode->i_private);
155 }
156
157 static const struct file_operations features_fops = {
158         .open           = features_open,
159         .read           = seq_read,
160         .llseek         = seq_lseek,
161         .release        = single_release,
162 };
163
164 static int blacklist_show(struct seq_file *f, void *p)
165 {
166         struct hci_dev *hdev = f->private;
167         struct bdaddr_list *b;
168
169         hci_dev_lock(hdev);
170         list_for_each_entry(b, &hdev->blacklist, list)
171                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
172         hci_dev_unlock(hdev);
173
174         return 0;
175 }
176
177 static int blacklist_open(struct inode *inode, struct file *file)
178 {
179         return single_open(file, blacklist_show, inode->i_private);
180 }
181
182 static const struct file_operations blacklist_fops = {
183         .open           = blacklist_open,
184         .read           = seq_read,
185         .llseek         = seq_lseek,
186         .release        = single_release,
187 };
188
189 static int uuids_show(struct seq_file *f, void *p)
190 {
191         struct hci_dev *hdev = f->private;
192         struct bt_uuid *uuid;
193
194         hci_dev_lock(hdev);
195         list_for_each_entry(uuid, &hdev->uuids, list) {
196                 u8 i, val[16];
197
198                 /* The Bluetooth UUID values are stored in big endian,
199                  * but with reversed byte order. So convert them into
200                  * the right order for the %pUb modifier.
201                  */
202                 for (i = 0; i < 16; i++)
203                         val[i] = uuid->uuid[15 - i];
204
205                 seq_printf(f, "%pUb\n", val);
206         }
207         hci_dev_unlock(hdev);
208
209         return 0;
210 }
211
212 static int uuids_open(struct inode *inode, struct file *file)
213 {
214         return single_open(file, uuids_show, inode->i_private);
215 }
216
217 static const struct file_operations uuids_fops = {
218         .open           = uuids_open,
219         .read           = seq_read,
220         .llseek         = seq_lseek,
221         .release        = single_release,
222 };
223
224 static int inquiry_cache_show(struct seq_file *f, void *p)
225 {
226         struct hci_dev *hdev = f->private;
227         struct discovery_state *cache = &hdev->discovery;
228         struct inquiry_entry *e;
229
230         hci_dev_lock(hdev);
231
232         list_for_each_entry(e, &cache->all, all) {
233                 struct inquiry_data *data = &e->data;
234                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235                            &data->bdaddr,
236                            data->pscan_rep_mode, data->pscan_period_mode,
237                            data->pscan_mode, data->dev_class[2],
238                            data->dev_class[1], data->dev_class[0],
239                            __le16_to_cpu(data->clock_offset),
240                            data->rssi, data->ssp_mode, e->timestamp);
241         }
242
243         hci_dev_unlock(hdev);
244
245         return 0;
246 }
247
248 static int inquiry_cache_open(struct inode *inode, struct file *file)
249 {
250         return single_open(file, inquiry_cache_show, inode->i_private);
251 }
252
253 static const struct file_operations inquiry_cache_fops = {
254         .open           = inquiry_cache_open,
255         .read           = seq_read,
256         .llseek         = seq_lseek,
257         .release        = single_release,
258 };
259
260 static int link_keys_show(struct seq_file *f, void *ptr)
261 {
262         struct hci_dev *hdev = f->private;
263         struct list_head *p, *n;
264
265         hci_dev_lock(hdev);
266         list_for_each_safe(p, n, &hdev->link_keys) {
267                 struct link_key *key = list_entry(p, struct link_key, list);
268                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270         }
271         hci_dev_unlock(hdev);
272
273         return 0;
274 }
275
276 static int link_keys_open(struct inode *inode, struct file *file)
277 {
278         return single_open(file, link_keys_show, inode->i_private);
279 }
280
281 static const struct file_operations link_keys_fops = {
282         .open           = link_keys_open,
283         .read           = seq_read,
284         .llseek         = seq_lseek,
285         .release        = single_release,
286 };
287
288 static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289                                    size_t count, loff_t *ppos)
290 {
291         struct hci_dev *hdev = file->private_data;
292         char buf[3];
293
294         buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295         buf[1] = '\n';
296         buf[2] = '\0';
297         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298 }
299
300 static const struct file_operations use_debug_keys_fops = {
301         .open           = simple_open,
302         .read           = use_debug_keys_read,
303         .llseek         = default_llseek,
304 };
305
306 static int dev_class_show(struct seq_file *f, void *ptr)
307 {
308         struct hci_dev *hdev = f->private;
309
310         hci_dev_lock(hdev);
311         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312                    hdev->dev_class[1], hdev->dev_class[0]);
313         hci_dev_unlock(hdev);
314
315         return 0;
316 }
317
318 static int dev_class_open(struct inode *inode, struct file *file)
319 {
320         return single_open(file, dev_class_show, inode->i_private);
321 }
322
323 static const struct file_operations dev_class_fops = {
324         .open           = dev_class_open,
325         .read           = seq_read,
326         .llseek         = seq_lseek,
327         .release        = single_release,
328 };
329
330 static int voice_setting_get(void *data, u64 *val)
331 {
332         struct hci_dev *hdev = data;
333
334         hci_dev_lock(hdev);
335         *val = hdev->voice_setting;
336         hci_dev_unlock(hdev);
337
338         return 0;
339 }
340
341 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342                         NULL, "0x%4.4llx\n");
343
344 static int auto_accept_delay_set(void *data, u64 val)
345 {
346         struct hci_dev *hdev = data;
347
348         hci_dev_lock(hdev);
349         hdev->auto_accept_delay = val;
350         hci_dev_unlock(hdev);
351
352         return 0;
353 }
354
355 static int auto_accept_delay_get(void *data, u64 *val)
356 {
357         struct hci_dev *hdev = data;
358
359         hci_dev_lock(hdev);
360         *val = hdev->auto_accept_delay;
361         hci_dev_unlock(hdev);
362
363         return 0;
364 }
365
366 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367                         auto_accept_delay_set, "%llu\n");
368
369 static int ssp_debug_mode_set(void *data, u64 val)
370 {
371         struct hci_dev *hdev = data;
372         struct sk_buff *skb;
373         __u8 mode;
374         int err;
375
376         if (val != 0 && val != 1)
377                 return -EINVAL;
378
379         if (!test_bit(HCI_UP, &hdev->flags))
380                 return -ENETDOWN;
381
382         hci_req_lock(hdev);
383         mode = val;
384         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385                              &mode, HCI_CMD_TIMEOUT);
386         hci_req_unlock(hdev);
387
388         if (IS_ERR(skb))
389                 return PTR_ERR(skb);
390
391         err = -bt_to_errno(skb->data[0]);
392         kfree_skb(skb);
393
394         if (err < 0)
395                 return err;
396
397         hci_dev_lock(hdev);
398         hdev->ssp_debug_mode = val;
399         hci_dev_unlock(hdev);
400
401         return 0;
402 }
403
404 static int ssp_debug_mode_get(void *data, u64 *val)
405 {
406         struct hci_dev *hdev = data;
407
408         hci_dev_lock(hdev);
409         *val = hdev->ssp_debug_mode;
410         hci_dev_unlock(hdev);
411
412         return 0;
413 }
414
415 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416                         ssp_debug_mode_set, "%llu\n");
417
418 static int idle_timeout_set(void *data, u64 val)
419 {
420         struct hci_dev *hdev = data;
421
422         if (val != 0 && (val < 500 || val > 3600000))
423                 return -EINVAL;
424
425         hci_dev_lock(hdev);
426         hdev->idle_timeout = val;
427         hci_dev_unlock(hdev);
428
429         return 0;
430 }
431
432 static int idle_timeout_get(void *data, u64 *val)
433 {
434         struct hci_dev *hdev = data;
435
436         hci_dev_lock(hdev);
437         *val = hdev->idle_timeout;
438         hci_dev_unlock(hdev);
439
440         return 0;
441 }
442
443 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444                         idle_timeout_set, "%llu\n");
445
446 static int sniff_min_interval_set(void *data, u64 val)
447 {
448         struct hci_dev *hdev = data;
449
450         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451                 return -EINVAL;
452
453         hci_dev_lock(hdev);
454         hdev->sniff_min_interval = val;
455         hci_dev_unlock(hdev);
456
457         return 0;
458 }
459
460 static int sniff_min_interval_get(void *data, u64 *val)
461 {
462         struct hci_dev *hdev = data;
463
464         hci_dev_lock(hdev);
465         *val = hdev->sniff_min_interval;
466         hci_dev_unlock(hdev);
467
468         return 0;
469 }
470
471 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472                         sniff_min_interval_set, "%llu\n");
473
474 static int sniff_max_interval_set(void *data, u64 val)
475 {
476         struct hci_dev *hdev = data;
477
478         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479                 return -EINVAL;
480
481         hci_dev_lock(hdev);
482         hdev->sniff_max_interval = val;
483         hci_dev_unlock(hdev);
484
485         return 0;
486 }
487
488 static int sniff_max_interval_get(void *data, u64 *val)
489 {
490         struct hci_dev *hdev = data;
491
492         hci_dev_lock(hdev);
493         *val = hdev->sniff_max_interval;
494         hci_dev_unlock(hdev);
495
496         return 0;
497 }
498
499 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500                         sniff_max_interval_set, "%llu\n");
501
502 static int static_address_show(struct seq_file *f, void *p)
503 {
504         struct hci_dev *hdev = f->private;
505
506         hci_dev_lock(hdev);
507         seq_printf(f, "%pMR\n", &hdev->static_addr);
508         hci_dev_unlock(hdev);
509
510         return 0;
511 }
512
513 static int static_address_open(struct inode *inode, struct file *file)
514 {
515         return single_open(file, static_address_show, inode->i_private);
516 }
517
518 static const struct file_operations static_address_fops = {
519         .open           = static_address_open,
520         .read           = seq_read,
521         .llseek         = seq_lseek,
522         .release        = single_release,
523 };
524
525 static int own_address_type_set(void *data, u64 val)
526 {
527         struct hci_dev *hdev = data;
528
529         if (val != 0 && val != 1)
530                 return -EINVAL;
531
532         hci_dev_lock(hdev);
533         hdev->own_addr_type = val;
534         hci_dev_unlock(hdev);
535
536         return 0;
537 }
538
539 static int own_address_type_get(void *data, u64 *val)
540 {
541         struct hci_dev *hdev = data;
542
543         hci_dev_lock(hdev);
544         *val = hdev->own_addr_type;
545         hci_dev_unlock(hdev);
546
547         return 0;
548 }
549
550 DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551                         own_address_type_set, "%llu\n");
552
553 static int long_term_keys_show(struct seq_file *f, void *ptr)
554 {
555         struct hci_dev *hdev = f->private;
556         struct list_head *p, *n;
557
558         hci_dev_lock(hdev);
559         list_for_each_safe(p, n, &hdev->link_keys) {
560                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561                 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564                            8, ltk->rand, 16, ltk->val);
565         }
566         hci_dev_unlock(hdev);
567
568         return 0;
569 }
570
571 static int long_term_keys_open(struct inode *inode, struct file *file)
572 {
573         return single_open(file, long_term_keys_show, inode->i_private);
574 }
575
576 static const struct file_operations long_term_keys_fops = {
577         .open           = long_term_keys_open,
578         .read           = seq_read,
579         .llseek         = seq_lseek,
580         .release        = single_release,
581 };
582
583 static int conn_min_interval_set(void *data, u64 val)
584 {
585         struct hci_dev *hdev = data;
586
587         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588                 return -EINVAL;
589
590         hci_dev_lock(hdev);
591         hdev->le_conn_min_interval = val;
592         hci_dev_unlock(hdev);
593
594         return 0;
595 }
596
597 static int conn_min_interval_get(void *data, u64 *val)
598 {
599         struct hci_dev *hdev = data;
600
601         hci_dev_lock(hdev);
602         *val = hdev->le_conn_min_interval;
603         hci_dev_unlock(hdev);
604
605         return 0;
606 }
607
608 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609                         conn_min_interval_set, "%llu\n");
610
611 static int conn_max_interval_set(void *data, u64 val)
612 {
613         struct hci_dev *hdev = data;
614
615         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616                 return -EINVAL;
617
618         hci_dev_lock(hdev);
619         hdev->le_conn_max_interval = val;
620         hci_dev_unlock(hdev);
621
622         return 0;
623 }
624
625 static int conn_max_interval_get(void *data, u64 *val)
626 {
627         struct hci_dev *hdev = data;
628
629         hci_dev_lock(hdev);
630         *val = hdev->le_conn_max_interval;
631         hci_dev_unlock(hdev);
632
633         return 0;
634 }
635
636 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637                         conn_max_interval_set, "%llu\n");
638
639 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
640                            size_t count, loff_t *ppos)
641 {
642         struct hci_dev *hdev = file->private_data;
643         char buf[3];
644
645         buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
646         buf[1] = '\n';
647         buf[2] = '\0';
648         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
649 }
650
651 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
652                             size_t count, loff_t *position)
653 {
654         struct hci_dev *hdev = fp->private_data;
655         bool enable;
656         char buf[32];
657         size_t buf_size = min(count, (sizeof(buf)-1));
658
659         if (copy_from_user(buf, user_buffer, buf_size))
660                 return -EFAULT;
661
662         buf[buf_size] = '\0';
663
664         if (strtobool(buf, &enable) < 0)
665                 return -EINVAL;
666
667         if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
668                 return -EALREADY;
669
670         change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
671
672         return count;
673 }
674
675 static const struct file_operations lowpan_debugfs_fops = {
676         .open           = simple_open,
677         .read           = lowpan_read,
678         .write          = lowpan_write,
679         .llseek         = default_llseek,
680 };
681
682 /* ---- HCI requests ---- */
683
684 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
685 {
686         BT_DBG("%s result 0x%2.2x", hdev->name, result);
687
688         if (hdev->req_status == HCI_REQ_PEND) {
689                 hdev->req_result = result;
690                 hdev->req_status = HCI_REQ_DONE;
691                 wake_up_interruptible(&hdev->req_wait_q);
692         }
693 }
694
695 static void hci_req_cancel(struct hci_dev *hdev, int err)
696 {
697         BT_DBG("%s err 0x%2.2x", hdev->name, err);
698
699         if (hdev->req_status == HCI_REQ_PEND) {
700                 hdev->req_result = err;
701                 hdev->req_status = HCI_REQ_CANCELED;
702                 wake_up_interruptible(&hdev->req_wait_q);
703         }
704 }
705
706 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
707                                             u8 event)
708 {
709         struct hci_ev_cmd_complete *ev;
710         struct hci_event_hdr *hdr;
711         struct sk_buff *skb;
712
713         hci_dev_lock(hdev);
714
715         skb = hdev->recv_evt;
716         hdev->recv_evt = NULL;
717
718         hci_dev_unlock(hdev);
719
720         if (!skb)
721                 return ERR_PTR(-ENODATA);
722
723         if (skb->len < sizeof(*hdr)) {
724                 BT_ERR("Too short HCI event");
725                 goto failed;
726         }
727
728         hdr = (void *) skb->data;
729         skb_pull(skb, HCI_EVENT_HDR_SIZE);
730
731         if (event) {
732                 if (hdr->evt != event)
733                         goto failed;
734                 return skb;
735         }
736
737         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
738                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
739                 goto failed;
740         }
741
742         if (skb->len < sizeof(*ev)) {
743                 BT_ERR("Too short cmd_complete event");
744                 goto failed;
745         }
746
747         ev = (void *) skb->data;
748         skb_pull(skb, sizeof(*ev));
749
750         if (opcode == __le16_to_cpu(ev->opcode))
751                 return skb;
752
753         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
754                __le16_to_cpu(ev->opcode));
755
756 failed:
757         kfree_skb(skb);
758         return ERR_PTR(-ENODATA);
759 }
760
761 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
762                                   const void *param, u8 event, u32 timeout)
763 {
764         DECLARE_WAITQUEUE(wait, current);
765         struct hci_request req;
766         int err = 0;
767
768         BT_DBG("%s", hdev->name);
769
770         hci_req_init(&req, hdev);
771
772         hci_req_add_ev(&req, opcode, plen, param, event);
773
774         hdev->req_status = HCI_REQ_PEND;
775
776         err = hci_req_run(&req, hci_req_sync_complete);
777         if (err < 0)
778                 return ERR_PTR(err);
779
780         add_wait_queue(&hdev->req_wait_q, &wait);
781         set_current_state(TASK_INTERRUPTIBLE);
782
783         schedule_timeout(timeout);
784
785         remove_wait_queue(&hdev->req_wait_q, &wait);
786
787         if (signal_pending(current))
788                 return ERR_PTR(-EINTR);
789
790         switch (hdev->req_status) {
791         case HCI_REQ_DONE:
792                 err = -bt_to_errno(hdev->req_result);
793                 break;
794
795         case HCI_REQ_CANCELED:
796                 err = -hdev->req_result;
797                 break;
798
799         default:
800                 err = -ETIMEDOUT;
801                 break;
802         }
803
804         hdev->req_status = hdev->req_result = 0;
805
806         BT_DBG("%s end: err %d", hdev->name, err);
807
808         if (err < 0)
809                 return ERR_PTR(err);
810
811         return hci_get_cmd_complete(hdev, opcode, event);
812 }
813 EXPORT_SYMBOL(__hci_cmd_sync_ev);
814
815 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
816                                const void *param, u32 timeout)
817 {
818         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
819 }
820 EXPORT_SYMBOL(__hci_cmd_sync);
821
822 /* Execute request and wait for completion. */
823 static int __hci_req_sync(struct hci_dev *hdev,
824                           void (*func)(struct hci_request *req,
825                                       unsigned long opt),
826                           unsigned long opt, __u32 timeout)
827 {
828         struct hci_request req;
829         DECLARE_WAITQUEUE(wait, current);
830         int err = 0;
831
832         BT_DBG("%s start", hdev->name);
833
834         hci_req_init(&req, hdev);
835
836         hdev->req_status = HCI_REQ_PEND;
837
838         func(&req, opt);
839
840         err = hci_req_run(&req, hci_req_sync_complete);
841         if (err < 0) {
842                 hdev->req_status = 0;
843
844                 /* ENODATA means the HCI request command queue is empty.
845                  * This can happen when a request with conditionals doesn't
846                  * trigger any commands to be sent. This is normal behavior
847                  * and should not trigger an error return.
848                  */
849                 if (err == -ENODATA)
850                         return 0;
851
852                 return err;
853         }
854
855         add_wait_queue(&hdev->req_wait_q, &wait);
856         set_current_state(TASK_INTERRUPTIBLE);
857
858         schedule_timeout(timeout);
859
860         remove_wait_queue(&hdev->req_wait_q, &wait);
861
862         if (signal_pending(current))
863                 return -EINTR;
864
865         switch (hdev->req_status) {
866         case HCI_REQ_DONE:
867                 err = -bt_to_errno(hdev->req_result);
868                 break;
869
870         case HCI_REQ_CANCELED:
871                 err = -hdev->req_result;
872                 break;
873
874         default:
875                 err = -ETIMEDOUT;
876                 break;
877         }
878
879         hdev->req_status = hdev->req_result = 0;
880
881         BT_DBG("%s end: err %d", hdev->name, err);
882
883         return err;
884 }
885
886 static int hci_req_sync(struct hci_dev *hdev,
887                         void (*req)(struct hci_request *req,
888                                     unsigned long opt),
889                         unsigned long opt, __u32 timeout)
890 {
891         int ret;
892
893         if (!test_bit(HCI_UP, &hdev->flags))
894                 return -ENETDOWN;
895
896         /* Serialize all requests */
897         hci_req_lock(hdev);
898         ret = __hci_req_sync(hdev, req, opt, timeout);
899         hci_req_unlock(hdev);
900
901         return ret;
902 }
903
904 static void hci_reset_req(struct hci_request *req, unsigned long opt)
905 {
906         BT_DBG("%s %ld", req->hdev->name, opt);
907
908         /* Reset device */
909         set_bit(HCI_RESET, &req->hdev->flags);
910         hci_req_add(req, HCI_OP_RESET, 0, NULL);
911 }
912
913 static void bredr_init(struct hci_request *req)
914 {
915         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
916
917         /* Read Local Supported Features */
918         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
919
920         /* Read Local Version */
921         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
922
923         /* Read BD Address */
924         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
925 }
926
927 static void amp_init(struct hci_request *req)
928 {
929         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
930
931         /* Read Local Version */
932         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
933
934         /* Read Local Supported Commands */
935         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
936
937         /* Read Local Supported Features */
938         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
939
940         /* Read Local AMP Info */
941         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
942
943         /* Read Data Blk size */
944         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
945
946         /* Read Flow Control Mode */
947         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
948
949         /* Read Location Data */
950         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
951 }
952
953 static void hci_init1_req(struct hci_request *req, unsigned long opt)
954 {
955         struct hci_dev *hdev = req->hdev;
956
957         BT_DBG("%s %ld", hdev->name, opt);
958
959         /* Reset */
960         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
961                 hci_reset_req(req, 0);
962
963         switch (hdev->dev_type) {
964         case HCI_BREDR:
965                 bredr_init(req);
966                 break;
967
968         case HCI_AMP:
969                 amp_init(req);
970                 break;
971
972         default:
973                 BT_ERR("Unknown device type %d", hdev->dev_type);
974                 break;
975         }
976 }
977
978 static void bredr_setup(struct hci_request *req)
979 {
980         struct hci_dev *hdev = req->hdev;
981
982         __le16 param;
983         __u8 flt_type;
984
985         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
986         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
987
988         /* Read Class of Device */
989         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
990
991         /* Read Local Name */
992         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
993
994         /* Read Voice Setting */
995         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
996
997         /* Read Number of Supported IAC */
998         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
999
1000         /* Read Current IAC LAP */
1001         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1002
1003         /* Clear Event Filters */
1004         flt_type = HCI_FLT_CLEAR_ALL;
1005         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1006
1007         /* Connection accept timeout ~20 secs */
1008         param = __constant_cpu_to_le16(0x7d00);
1009         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1010
1011         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1012          * but it does not support page scan related HCI commands.
1013          */
1014         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1015                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1016                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1017         }
1018 }
1019
1020 static void le_setup(struct hci_request *req)
1021 {
1022         struct hci_dev *hdev = req->hdev;
1023
1024         /* Read LE Buffer Size */
1025         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1026
1027         /* Read LE Local Supported Features */
1028         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1029
1030         /* Read LE Advertising Channel TX Power */
1031         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1032
1033         /* Read LE White List Size */
1034         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1035
1036         /* Read LE Supported States */
1037         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1038
1039         /* LE-only controllers have LE implicitly enabled */
1040         if (!lmp_bredr_capable(hdev))
1041                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1042 }
1043
1044 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1045 {
1046         if (lmp_ext_inq_capable(hdev))
1047                 return 0x02;
1048
1049         if (lmp_inq_rssi_capable(hdev))
1050                 return 0x01;
1051
1052         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1053             hdev->lmp_subver == 0x0757)
1054                 return 0x01;
1055
1056         if (hdev->manufacturer == 15) {
1057                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1058                         return 0x01;
1059                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1060                         return 0x01;
1061                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1062                         return 0x01;
1063         }
1064
1065         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1066             hdev->lmp_subver == 0x1805)
1067                 return 0x01;
1068
1069         return 0x00;
1070 }
1071
1072 static void hci_setup_inquiry_mode(struct hci_request *req)
1073 {
1074         u8 mode;
1075
1076         mode = hci_get_inquiry_mode(req->hdev);
1077
1078         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1079 }
1080
1081 static void hci_setup_event_mask(struct hci_request *req)
1082 {
1083         struct hci_dev *hdev = req->hdev;
1084
1085         /* The second byte is 0xff instead of 0x9f (two reserved bits
1086          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1087          * command otherwise.
1088          */
1089         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1090
1091         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1092          * any event mask for pre 1.2 devices.
1093          */
1094         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095                 return;
1096
1097         if (lmp_bredr_capable(hdev)) {
1098                 events[4] |= 0x01; /* Flow Specification Complete */
1099                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1100                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1101                 events[5] |= 0x08; /* Synchronous Connection Complete */
1102                 events[5] |= 0x10; /* Synchronous Connection Changed */
1103         } else {
1104                 /* Use a different default for LE-only devices */
1105                 memset(events, 0, sizeof(events));
1106                 events[0] |= 0x10; /* Disconnection Complete */
1107                 events[0] |= 0x80; /* Encryption Change */
1108                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1109                 events[1] |= 0x20; /* Command Complete */
1110                 events[1] |= 0x40; /* Command Status */
1111                 events[1] |= 0x80; /* Hardware Error */
1112                 events[2] |= 0x04; /* Number of Completed Packets */
1113                 events[3] |= 0x02; /* Data Buffer Overflow */
1114                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1115         }
1116
1117         if (lmp_inq_rssi_capable(hdev))
1118                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1119
1120         if (lmp_sniffsubr_capable(hdev))
1121                 events[5] |= 0x20; /* Sniff Subrating */
1122
1123         if (lmp_pause_enc_capable(hdev))
1124                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1125
1126         if (lmp_ext_inq_capable(hdev))
1127                 events[5] |= 0x40; /* Extended Inquiry Result */
1128
1129         if (lmp_no_flush_capable(hdev))
1130                 events[7] |= 0x01; /* Enhanced Flush Complete */
1131
1132         if (lmp_lsto_capable(hdev))
1133                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1134
1135         if (lmp_ssp_capable(hdev)) {
1136                 events[6] |= 0x01;      /* IO Capability Request */
1137                 events[6] |= 0x02;      /* IO Capability Response */
1138                 events[6] |= 0x04;      /* User Confirmation Request */
1139                 events[6] |= 0x08;      /* User Passkey Request */
1140                 events[6] |= 0x10;      /* Remote OOB Data Request */
1141                 events[6] |= 0x20;      /* Simple Pairing Complete */
1142                 events[7] |= 0x04;      /* User Passkey Notification */
1143                 events[7] |= 0x08;      /* Keypress Notification */
1144                 events[7] |= 0x10;      /* Remote Host Supported
1145                                          * Features Notification
1146                                          */
1147         }
1148
1149         if (lmp_le_capable(hdev))
1150                 events[7] |= 0x20;      /* LE Meta-Event */
1151
1152         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1153
1154         if (lmp_le_capable(hdev)) {
1155                 memset(events, 0, sizeof(events));
1156                 events[0] = 0x1f;
1157                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1158                             sizeof(events), events);
1159         }
1160 }
1161
1162 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1163 {
1164         struct hci_dev *hdev = req->hdev;
1165
1166         if (lmp_bredr_capable(hdev))
1167                 bredr_setup(req);
1168         else
1169                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1170
1171         if (lmp_le_capable(hdev))
1172                 le_setup(req);
1173
1174         hci_setup_event_mask(req);
1175
1176         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1177          * local supported commands HCI command.
1178          */
1179         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1180                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1181
1182         if (lmp_ssp_capable(hdev)) {
1183                 /* When SSP is available, then the host features page
1184                  * should also be available as well. However some
1185                  * controllers list the max_page as 0 as long as SSP
1186                  * has not been enabled. To achieve proper debugging
1187                  * output, force the minimum max_page to 1 at least.
1188                  */
1189                 hdev->max_page = 0x01;
1190
1191                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1192                         u8 mode = 0x01;
1193                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1194                                     sizeof(mode), &mode);
1195                 } else {
1196                         struct hci_cp_write_eir cp;
1197
1198                         memset(hdev->eir, 0, sizeof(hdev->eir));
1199                         memset(&cp, 0, sizeof(cp));
1200
1201                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1202                 }
1203         }
1204
1205         if (lmp_inq_rssi_capable(hdev))
1206                 hci_setup_inquiry_mode(req);
1207
1208         if (lmp_inq_tx_pwr_capable(hdev))
1209                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1210
1211         if (lmp_ext_feat_capable(hdev)) {
1212                 struct hci_cp_read_local_ext_features cp;
1213
1214                 cp.page = 0x01;
1215                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1216                             sizeof(cp), &cp);
1217         }
1218
1219         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1220                 u8 enable = 1;
1221                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1222                             &enable);
1223         }
1224 }
1225
1226 static void hci_setup_link_policy(struct hci_request *req)
1227 {
1228         struct hci_dev *hdev = req->hdev;
1229         struct hci_cp_write_def_link_policy cp;
1230         u16 link_policy = 0;
1231
1232         if (lmp_rswitch_capable(hdev))
1233                 link_policy |= HCI_LP_RSWITCH;
1234         if (lmp_hold_capable(hdev))
1235                 link_policy |= HCI_LP_HOLD;
1236         if (lmp_sniff_capable(hdev))
1237                 link_policy |= HCI_LP_SNIFF;
1238         if (lmp_park_capable(hdev))
1239                 link_policy |= HCI_LP_PARK;
1240
1241         cp.policy = cpu_to_le16(link_policy);
1242         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1243 }
1244
1245 static void hci_set_le_support(struct hci_request *req)
1246 {
1247         struct hci_dev *hdev = req->hdev;
1248         struct hci_cp_write_le_host_supported cp;
1249
1250         /* LE-only devices do not support explicit enablement */
1251         if (!lmp_bredr_capable(hdev))
1252                 return;
1253
1254         memset(&cp, 0, sizeof(cp));
1255
1256         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1257                 cp.le = 0x01;
1258                 cp.simul = lmp_le_br_capable(hdev);
1259         }
1260
1261         if (cp.le != lmp_host_le_capable(hdev))
1262                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1263                             &cp);
1264 }
1265
1266 static void hci_set_event_mask_page_2(struct hci_request *req)
1267 {
1268         struct hci_dev *hdev = req->hdev;
1269         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1270
1271         /* If Connectionless Slave Broadcast master role is supported
1272          * enable all necessary events for it.
1273          */
1274         if (lmp_csb_master_capable(hdev)) {
1275                 events[1] |= 0x40;      /* Triggered Clock Capture */
1276                 events[1] |= 0x80;      /* Synchronization Train Complete */
1277                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1278                 events[2] |= 0x20;      /* CSB Channel Map Change */
1279         }
1280
1281         /* If Connectionless Slave Broadcast slave role is supported
1282          * enable all necessary events for it.
1283          */
1284         if (lmp_csb_slave_capable(hdev)) {
1285                 events[2] |= 0x01;      /* Synchronization Train Received */
1286                 events[2] |= 0x02;      /* CSB Receive */
1287                 events[2] |= 0x04;      /* CSB Timeout */
1288                 events[2] |= 0x08;      /* Truncated Page Complete */
1289         }
1290
1291         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1292 }
1293
1294 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1295 {
1296         struct hci_dev *hdev = req->hdev;
1297         u8 p;
1298
1299         /* Some Broadcom based Bluetooth controllers do not support the
1300          * Delete Stored Link Key command. They are clearly indicating its
1301          * absence in the bit mask of supported commands.
1302          *
1303          * Check the supported commands and only if the the command is marked
1304          * as supported send it. If not supported assume that the controller
1305          * does not have actual support for stored link keys which makes this
1306          * command redundant anyway.
1307          *
1308          * Some controllers indicate that they support handling deleting
1309          * stored link keys, but they don't. The quirk lets a driver
1310          * just disable this command.
1311          */
1312         if (hdev->commands[6] & 0x80 &&
1313             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1314                 struct hci_cp_delete_stored_link_key cp;
1315
1316                 bacpy(&cp.bdaddr, BDADDR_ANY);
1317                 cp.delete_all = 0x01;
1318                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1319                             sizeof(cp), &cp);
1320         }
1321
1322         if (hdev->commands[5] & 0x10)
1323                 hci_setup_link_policy(req);
1324
1325         if (lmp_le_capable(hdev)) {
1326                 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1327                         /* If the controller has a public BD_ADDR, then
1328                          * by default use that one. If this is a LE only
1329                          * controller without a public address, default
1330                          * to the random address.
1331                          */
1332                         if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1333                                 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1334                         else
1335                                 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1336                 }
1337
1338                 hci_set_le_support(req);
1339         }
1340
1341         /* Read features beyond page 1 if available */
1342         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1343                 struct hci_cp_read_local_ext_features cp;
1344
1345                 cp.page = p;
1346                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1347                             sizeof(cp), &cp);
1348         }
1349 }
1350
1351 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1352 {
1353         struct hci_dev *hdev = req->hdev;
1354
1355         /* Set event mask page 2 if the HCI command for it is supported */
1356         if (hdev->commands[22] & 0x04)
1357                 hci_set_event_mask_page_2(req);
1358
1359         /* Check for Synchronization Train support */
1360         if (lmp_sync_train_capable(hdev))
1361                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1362 }
1363
1364 static int __hci_init(struct hci_dev *hdev)
1365 {
1366         int err;
1367
1368         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1369         if (err < 0)
1370                 return err;
1371
1372         /* The Device Under Test (DUT) mode is special and available for
1373          * all controller types. So just create it early on.
1374          */
1375         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1376                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1377                                     &dut_mode_fops);
1378         }
1379
1380         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1381          * BR/EDR/LE type controllers. AMP controllers only need the
1382          * first stage init.
1383          */
1384         if (hdev->dev_type != HCI_BREDR)
1385                 return 0;
1386
1387         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1388         if (err < 0)
1389                 return err;
1390
1391         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1392         if (err < 0)
1393                 return err;
1394
1395         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1396         if (err < 0)
1397                 return err;
1398
1399         /* Only create debugfs entries during the initial setup
1400          * phase and not every time the controller gets powered on.
1401          */
1402         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1403                 return 0;
1404
1405         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1406                             &features_fops);
1407         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1408                            &hdev->manufacturer);
1409         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1410         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1411         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1412                             &blacklist_fops);
1413         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1414
1415         if (lmp_bredr_capable(hdev)) {
1416                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1417                                     hdev, &inquiry_cache_fops);
1418                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1419                                     hdev, &link_keys_fops);
1420                 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1421                                     hdev, &use_debug_keys_fops);
1422                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1423                                     hdev, &dev_class_fops);
1424                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1425                                     hdev, &voice_setting_fops);
1426         }
1427
1428         if (lmp_ssp_capable(hdev)) {
1429                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1430                                     hdev, &auto_accept_delay_fops);
1431                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1432                                     hdev, &ssp_debug_mode_fops);
1433         }
1434
1435         if (lmp_sniff_capable(hdev)) {
1436                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1437                                     hdev, &idle_timeout_fops);
1438                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1439                                     hdev, &sniff_min_interval_fops);
1440                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1441                                     hdev, &sniff_max_interval_fops);
1442         }
1443
1444         if (lmp_le_capable(hdev)) {
1445                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1446                                   &hdev->le_white_list_size);
1447                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1448                                    hdev, &static_address_fops);
1449                 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1450                                     hdev, &own_address_type_fops);
1451                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1452                                     hdev, &long_term_keys_fops);
1453                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1454                                     hdev, &conn_min_interval_fops);
1455                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1456                                     hdev, &conn_max_interval_fops);
1457                 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1458                                     &lowpan_debugfs_fops);
1459         }
1460
1461         return 0;
1462 }
1463
1464 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1465 {
1466         __u8 scan = opt;
1467
1468         BT_DBG("%s %x", req->hdev->name, scan);
1469
1470         /* Inquiry and Page scans */
1471         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1472 }
1473
1474 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1475 {
1476         __u8 auth = opt;
1477
1478         BT_DBG("%s %x", req->hdev->name, auth);
1479
1480         /* Authentication */
1481         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1482 }
1483
1484 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1485 {
1486         __u8 encrypt = opt;
1487
1488         BT_DBG("%s %x", req->hdev->name, encrypt);
1489
1490         /* Encryption */
1491         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1492 }
1493
1494 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1495 {
1496         __le16 policy = cpu_to_le16(opt);
1497
1498         BT_DBG("%s %x", req->hdev->name, policy);
1499
1500         /* Default link policy */
1501         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1502 }
1503
1504 /* Get HCI device by index.
1505  * Device is held on return. */
1506 struct hci_dev *hci_dev_get(int index)
1507 {
1508         struct hci_dev *hdev = NULL, *d;
1509
1510         BT_DBG("%d", index);
1511
1512         if (index < 0)
1513                 return NULL;
1514
1515         read_lock(&hci_dev_list_lock);
1516         list_for_each_entry(d, &hci_dev_list, list) {
1517                 if (d->id == index) {
1518                         hdev = hci_dev_hold(d);
1519                         break;
1520                 }
1521         }
1522         read_unlock(&hci_dev_list_lock);
1523         return hdev;
1524 }
1525
1526 /* ---- Inquiry support ---- */
1527
1528 bool hci_discovery_active(struct hci_dev *hdev)
1529 {
1530         struct discovery_state *discov = &hdev->discovery;
1531
1532         switch (discov->state) {
1533         case DISCOVERY_FINDING:
1534         case DISCOVERY_RESOLVING:
1535                 return true;
1536
1537         default:
1538                 return false;
1539         }
1540 }
1541
1542 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1543 {
1544         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1545
1546         if (hdev->discovery.state == state)
1547                 return;
1548
1549         switch (state) {
1550         case DISCOVERY_STOPPED:
1551                 if (hdev->discovery.state != DISCOVERY_STARTING)
1552                         mgmt_discovering(hdev, 0);
1553                 break;
1554         case DISCOVERY_STARTING:
1555                 break;
1556         case DISCOVERY_FINDING:
1557                 mgmt_discovering(hdev, 1);
1558                 break;
1559         case DISCOVERY_RESOLVING:
1560                 break;
1561         case DISCOVERY_STOPPING:
1562                 break;
1563         }
1564
1565         hdev->discovery.state = state;
1566 }
1567
1568 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1569 {
1570         struct discovery_state *cache = &hdev->discovery;
1571         struct inquiry_entry *p, *n;
1572
1573         list_for_each_entry_safe(p, n, &cache->all, all) {
1574                 list_del(&p->all);
1575                 kfree(p);
1576         }
1577
1578         INIT_LIST_HEAD(&cache->unknown);
1579         INIT_LIST_HEAD(&cache->resolve);
1580 }
1581
1582 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1583                                                bdaddr_t *bdaddr)
1584 {
1585         struct discovery_state *cache = &hdev->discovery;
1586         struct inquiry_entry *e;
1587
1588         BT_DBG("cache %p, %pMR", cache, bdaddr);
1589
1590         list_for_each_entry(e, &cache->all, all) {
1591                 if (!bacmp(&e->data.bdaddr, bdaddr))
1592                         return e;
1593         }
1594
1595         return NULL;
1596 }
1597
1598 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1599                                                        bdaddr_t *bdaddr)
1600 {
1601         struct discovery_state *cache = &hdev->discovery;
1602         struct inquiry_entry *e;
1603
1604         BT_DBG("cache %p, %pMR", cache, bdaddr);
1605
1606         list_for_each_entry(e, &cache->unknown, list) {
1607                 if (!bacmp(&e->data.bdaddr, bdaddr))
1608                         return e;
1609         }
1610
1611         return NULL;
1612 }
1613
1614 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1615                                                        bdaddr_t *bdaddr,
1616                                                        int state)
1617 {
1618         struct discovery_state *cache = &hdev->discovery;
1619         struct inquiry_entry *e;
1620
1621         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1622
1623         list_for_each_entry(e, &cache->resolve, list) {
1624                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1625                         return e;
1626                 if (!bacmp(&e->data.bdaddr, bdaddr))
1627                         return e;
1628         }
1629
1630         return NULL;
1631 }
1632
1633 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1634                                       struct inquiry_entry *ie)
1635 {
1636         struct discovery_state *cache = &hdev->discovery;
1637         struct list_head *pos = &cache->resolve;
1638         struct inquiry_entry *p;
1639
1640         list_del(&ie->list);
1641
1642         list_for_each_entry(p, &cache->resolve, list) {
1643                 if (p->name_state != NAME_PENDING &&
1644                     abs(p->data.rssi) >= abs(ie->data.rssi))
1645                         break;
1646                 pos = &p->list;
1647         }
1648
1649         list_add(&ie->list, pos);
1650 }
1651
1652 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1653                               bool name_known, bool *ssp)
1654 {
1655         struct discovery_state *cache = &hdev->discovery;
1656         struct inquiry_entry *ie;
1657
1658         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1659
1660         hci_remove_remote_oob_data(hdev, &data->bdaddr);
1661
1662         if (ssp)
1663                 *ssp = data->ssp_mode;
1664
1665         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1666         if (ie) {
1667                 if (ie->data.ssp_mode && ssp)
1668                         *ssp = true;
1669
1670                 if (ie->name_state == NAME_NEEDED &&
1671                     data->rssi != ie->data.rssi) {
1672                         ie->data.rssi = data->rssi;
1673                         hci_inquiry_cache_update_resolve(hdev, ie);
1674                 }
1675
1676                 goto update;
1677         }
1678
1679         /* Entry not in the cache. Add new one. */
1680         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1681         if (!ie)
1682                 return false;
1683
1684         list_add(&ie->all, &cache->all);
1685
1686         if (name_known) {
1687                 ie->name_state = NAME_KNOWN;
1688         } else {
1689                 ie->name_state = NAME_NOT_KNOWN;
1690                 list_add(&ie->list, &cache->unknown);
1691         }
1692
1693 update:
1694         if (name_known && ie->name_state != NAME_KNOWN &&
1695             ie->name_state != NAME_PENDING) {
1696                 ie->name_state = NAME_KNOWN;
1697                 list_del(&ie->list);
1698         }
1699
1700         memcpy(&ie->data, data, sizeof(*data));
1701         ie->timestamp = jiffies;
1702         cache->timestamp = jiffies;
1703
1704         if (ie->name_state == NAME_NOT_KNOWN)
1705                 return false;
1706
1707         return true;
1708 }
1709
1710 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1711 {
1712         struct discovery_state *cache = &hdev->discovery;
1713         struct inquiry_info *info = (struct inquiry_info *) buf;
1714         struct inquiry_entry *e;
1715         int copied = 0;
1716
1717         list_for_each_entry(e, &cache->all, all) {
1718                 struct inquiry_data *data = &e->data;
1719
1720                 if (copied >= num)
1721                         break;
1722
1723                 bacpy(&info->bdaddr, &data->bdaddr);
1724                 info->pscan_rep_mode    = data->pscan_rep_mode;
1725                 info->pscan_period_mode = data->pscan_period_mode;
1726                 info->pscan_mode        = data->pscan_mode;
1727                 memcpy(info->dev_class, data->dev_class, 3);
1728                 info->clock_offset      = data->clock_offset;
1729
1730                 info++;
1731                 copied++;
1732         }
1733
1734         BT_DBG("cache %p, copied %d", cache, copied);
1735         return copied;
1736 }
1737
1738 static void hci_inq_req(struct hci_request *req, unsigned long opt)
1739 {
1740         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
1741         struct hci_dev *hdev = req->hdev;
1742         struct hci_cp_inquiry cp;
1743
1744         BT_DBG("%s", hdev->name);
1745
1746         if (test_bit(HCI_INQUIRY, &hdev->flags))
1747                 return;
1748
1749         /* Start Inquiry */
1750         memcpy(&cp.lap, &ir->lap, 3);
1751         cp.length  = ir->length;
1752         cp.num_rsp = ir->num_rsp;
1753         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1754 }
1755
1756 static int wait_inquiry(void *word)
1757 {
1758         schedule();
1759         return signal_pending(current);
1760 }
1761
1762 int hci_inquiry(void __user *arg)
1763 {
1764         __u8 __user *ptr = arg;
1765         struct hci_inquiry_req ir;
1766         struct hci_dev *hdev;
1767         int err = 0, do_inquiry = 0, max_rsp;
1768         long timeo;
1769         __u8 *buf;
1770
1771         if (copy_from_user(&ir, ptr, sizeof(ir)))
1772                 return -EFAULT;
1773
1774         hdev = hci_dev_get(ir.dev_id);
1775         if (!hdev)
1776                 return -ENODEV;
1777
1778         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1779                 err = -EBUSY;
1780                 goto done;
1781         }
1782
1783         if (hdev->dev_type != HCI_BREDR) {
1784                 err = -EOPNOTSUPP;
1785                 goto done;
1786         }
1787
1788         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1789                 err = -EOPNOTSUPP;
1790                 goto done;
1791         }
1792
1793         hci_dev_lock(hdev);
1794         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
1795             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1796                 hci_inquiry_cache_flush(hdev);
1797                 do_inquiry = 1;
1798         }
1799         hci_dev_unlock(hdev);
1800
1801         timeo = ir.length * msecs_to_jiffies(2000);
1802
1803         if (do_inquiry) {
1804                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1805                                    timeo);
1806                 if (err < 0)
1807                         goto done;
1808
1809                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1810                  * cleared). If it is interrupted by a signal, return -EINTR.
1811                  */
1812                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1813                                 TASK_INTERRUPTIBLE))
1814                         return -EINTR;
1815         }
1816
1817         /* for unlimited number of responses we will use buffer with
1818          * 255 entries
1819          */
1820         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1821
1822         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1823          * copy it to the user space.
1824          */
1825         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
1826         if (!buf) {
1827                 err = -ENOMEM;
1828                 goto done;
1829         }
1830
1831         hci_dev_lock(hdev);
1832         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
1833         hci_dev_unlock(hdev);
1834
1835         BT_DBG("num_rsp %d", ir.num_rsp);
1836
1837         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1838                 ptr += sizeof(ir);
1839                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
1840                                  ir.num_rsp))
1841                         err = -EFAULT;
1842         } else
1843                 err = -EFAULT;
1844
1845         kfree(buf);
1846
1847 done:
1848         hci_dev_put(hdev);
1849         return err;
1850 }
1851
1852 static int hci_dev_do_open(struct hci_dev *hdev)
1853 {
1854         int ret = 0;
1855
1856         BT_DBG("%s %p", hdev->name, hdev);
1857
1858         hci_req_lock(hdev);
1859
1860         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1861                 ret = -ENODEV;
1862                 goto done;
1863         }
1864
1865         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1866                 /* Check for rfkill but allow the HCI setup stage to
1867                  * proceed (which in itself doesn't cause any RF activity).
1868                  */
1869                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1870                         ret = -ERFKILL;
1871                         goto done;
1872                 }
1873
1874                 /* Check for valid public address or a configured static
1875                  * random adddress, but let the HCI setup proceed to
1876                  * be able to determine if there is a public address
1877                  * or not.
1878                  *
1879                  * This check is only valid for BR/EDR controllers
1880                  * since AMP controllers do not have an address.
1881                  */
1882                 if (hdev->dev_type == HCI_BREDR &&
1883                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1884                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1885                         ret = -EADDRNOTAVAIL;
1886                         goto done;
1887                 }
1888         }
1889
1890         if (test_bit(HCI_UP, &hdev->flags)) {
1891                 ret = -EALREADY;
1892                 goto done;
1893         }
1894
1895         if (hdev->open(hdev)) {
1896                 ret = -EIO;
1897                 goto done;
1898         }
1899
1900         atomic_set(&hdev->cmd_cnt, 1);
1901         set_bit(HCI_INIT, &hdev->flags);
1902
1903         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1904                 ret = hdev->setup(hdev);
1905
1906         if (!ret) {
1907                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908                         set_bit(HCI_RAW, &hdev->flags);
1909
1910                 if (!test_bit(HCI_RAW, &hdev->flags) &&
1911                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1912                         ret = __hci_init(hdev);
1913         }
1914
1915         clear_bit(HCI_INIT, &hdev->flags);
1916
1917         if (!ret) {
1918                 hci_dev_hold(hdev);
1919                 set_bit(HCI_UP, &hdev->flags);
1920                 hci_notify(hdev, HCI_DEV_UP);
1921                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1922                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1923                     hdev->dev_type == HCI_BREDR) {
1924                         hci_dev_lock(hdev);
1925                         mgmt_powered(hdev, 1);
1926                         hci_dev_unlock(hdev);
1927                 }
1928         } else {
1929                 /* Init failed, cleanup */
1930                 flush_work(&hdev->tx_work);
1931                 flush_work(&hdev->cmd_work);
1932                 flush_work(&hdev->rx_work);
1933
1934                 skb_queue_purge(&hdev->cmd_q);
1935                 skb_queue_purge(&hdev->rx_q);
1936
1937                 if (hdev->flush)
1938                         hdev->flush(hdev);
1939
1940                 if (hdev->sent_cmd) {
1941                         kfree_skb(hdev->sent_cmd);
1942                         hdev->sent_cmd = NULL;
1943                 }
1944
1945                 hdev->close(hdev);
1946                 hdev->flags = 0;
1947         }
1948
1949 done:
1950         hci_req_unlock(hdev);
1951         return ret;
1952 }
1953
1954 /* ---- HCI ioctl helpers ---- */
1955
1956 int hci_dev_open(__u16 dev)
1957 {
1958         struct hci_dev *hdev;
1959         int err;
1960
1961         hdev = hci_dev_get(dev);
1962         if (!hdev)
1963                 return -ENODEV;
1964
1965         /* We need to ensure that no other power on/off work is pending
1966          * before proceeding to call hci_dev_do_open. This is
1967          * particularly important if the setup procedure has not yet
1968          * completed.
1969          */
1970         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1971                 cancel_delayed_work(&hdev->power_off);
1972
1973         /* After this call it is guaranteed that the setup procedure
1974          * has finished. This means that error conditions like RFKILL
1975          * or no valid public or static random address apply.
1976          */
1977         flush_workqueue(hdev->req_workqueue);
1978
1979         err = hci_dev_do_open(hdev);
1980
1981         hci_dev_put(hdev);
1982
1983         return err;
1984 }
1985
1986 static int hci_dev_do_close(struct hci_dev *hdev)
1987 {
1988         BT_DBG("%s %p", hdev->name, hdev);
1989
1990         cancel_delayed_work(&hdev->power_off);
1991
1992         hci_req_cancel(hdev, ENODEV);
1993         hci_req_lock(hdev);
1994
1995         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
1996                 del_timer_sync(&hdev->cmd_timer);
1997                 hci_req_unlock(hdev);
1998                 return 0;
1999         }
2000
2001         /* Flush RX and TX works */
2002         flush_work(&hdev->tx_work);
2003         flush_work(&hdev->rx_work);
2004
2005         if (hdev->discov_timeout > 0) {
2006                 cancel_delayed_work(&hdev->discov_off);
2007                 hdev->discov_timeout = 0;
2008                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2009                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2010         }
2011
2012         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2013                 cancel_delayed_work(&hdev->service_cache);
2014
2015         cancel_delayed_work_sync(&hdev->le_scan_disable);
2016
2017         hci_dev_lock(hdev);
2018         hci_inquiry_cache_flush(hdev);
2019         hci_conn_hash_flush(hdev);
2020         hci_dev_unlock(hdev);
2021
2022         hci_notify(hdev, HCI_DEV_DOWN);
2023
2024         if (hdev->flush)
2025                 hdev->flush(hdev);
2026
2027         /* Reset device */
2028         skb_queue_purge(&hdev->cmd_q);
2029         atomic_set(&hdev->cmd_cnt, 1);
2030         if (!test_bit(HCI_RAW, &hdev->flags) &&
2031             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2032             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2033                 set_bit(HCI_INIT, &hdev->flags);
2034                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2035                 clear_bit(HCI_INIT, &hdev->flags);
2036         }
2037
2038         /* flush cmd  work */
2039         flush_work(&hdev->cmd_work);
2040
2041         /* Drop queues */
2042         skb_queue_purge(&hdev->rx_q);
2043         skb_queue_purge(&hdev->cmd_q);
2044         skb_queue_purge(&hdev->raw_q);
2045
2046         /* Drop last sent command */
2047         if (hdev->sent_cmd) {
2048                 del_timer_sync(&hdev->cmd_timer);
2049                 kfree_skb(hdev->sent_cmd);
2050                 hdev->sent_cmd = NULL;
2051         }
2052
2053         kfree_skb(hdev->recv_evt);
2054         hdev->recv_evt = NULL;
2055
2056         /* After this point our queues are empty
2057          * and no tasks are scheduled. */
2058         hdev->close(hdev);
2059
2060         /* Clear flags */
2061         hdev->flags = 0;
2062         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2063
2064         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2065                 if (hdev->dev_type == HCI_BREDR) {
2066                         hci_dev_lock(hdev);
2067                         mgmt_powered(hdev, 0);
2068                         hci_dev_unlock(hdev);
2069                 }
2070         }
2071
2072         /* Controller radio is available but is currently powered down */
2073         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2074
2075         memset(hdev->eir, 0, sizeof(hdev->eir));
2076         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2077
2078         hci_req_unlock(hdev);
2079
2080         hci_dev_put(hdev);
2081         return 0;
2082 }
2083
2084 int hci_dev_close(__u16 dev)
2085 {
2086         struct hci_dev *hdev;
2087         int err;
2088
2089         hdev = hci_dev_get(dev);
2090         if (!hdev)
2091                 return -ENODEV;
2092
2093         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2094                 err = -EBUSY;
2095                 goto done;
2096         }
2097
2098         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2099                 cancel_delayed_work(&hdev->power_off);
2100
2101         err = hci_dev_do_close(hdev);
2102
2103 done:
2104         hci_dev_put(hdev);
2105         return err;
2106 }
2107
2108 int hci_dev_reset(__u16 dev)
2109 {
2110         struct hci_dev *hdev;
2111         int ret = 0;
2112
2113         hdev = hci_dev_get(dev);
2114         if (!hdev)
2115                 return -ENODEV;
2116
2117         hci_req_lock(hdev);
2118
2119         if (!test_bit(HCI_UP, &hdev->flags)) {
2120                 ret = -ENETDOWN;
2121                 goto done;
2122         }
2123
2124         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2125                 ret = -EBUSY;
2126                 goto done;
2127         }
2128
2129         /* Drop queues */
2130         skb_queue_purge(&hdev->rx_q);
2131         skb_queue_purge(&hdev->cmd_q);
2132
2133         hci_dev_lock(hdev);
2134         hci_inquiry_cache_flush(hdev);
2135         hci_conn_hash_flush(hdev);
2136         hci_dev_unlock(hdev);
2137
2138         if (hdev->flush)
2139                 hdev->flush(hdev);
2140
2141         atomic_set(&hdev->cmd_cnt, 1);
2142         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2143
2144         if (!test_bit(HCI_RAW, &hdev->flags))
2145                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2146
2147 done:
2148         hci_req_unlock(hdev);
2149         hci_dev_put(hdev);
2150         return ret;
2151 }
2152
2153 int hci_dev_reset_stat(__u16 dev)
2154 {
2155         struct hci_dev *hdev;
2156         int ret = 0;
2157
2158         hdev = hci_dev_get(dev);
2159         if (!hdev)
2160                 return -ENODEV;
2161
2162         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2163                 ret = -EBUSY;
2164                 goto done;
2165         }
2166
2167         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2168
2169 done:
2170         hci_dev_put(hdev);
2171         return ret;
2172 }
2173
2174 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2175 {
2176         struct hci_dev *hdev;
2177         struct hci_dev_req dr;
2178         int err = 0;
2179
2180         if (copy_from_user(&dr, arg, sizeof(dr)))
2181                 return -EFAULT;
2182
2183         hdev = hci_dev_get(dr.dev_id);
2184         if (!hdev)
2185                 return -ENODEV;
2186
2187         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2188                 err = -EBUSY;
2189                 goto done;
2190         }
2191
2192         if (hdev->dev_type != HCI_BREDR) {
2193                 err = -EOPNOTSUPP;
2194                 goto done;
2195         }
2196
2197         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2198                 err = -EOPNOTSUPP;
2199                 goto done;
2200         }
2201
2202         switch (cmd) {
2203         case HCISETAUTH:
2204                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2205                                    HCI_INIT_TIMEOUT);
2206                 break;
2207
2208         case HCISETENCRYPT:
2209                 if (!lmp_encrypt_capable(hdev)) {
2210                         err = -EOPNOTSUPP;
2211                         break;
2212                 }
2213
2214                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2215                         /* Auth must be enabled first */
2216                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2217                                            HCI_INIT_TIMEOUT);
2218                         if (err)
2219                                 break;
2220                 }
2221
2222                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2223                                    HCI_INIT_TIMEOUT);
2224                 break;
2225
2226         case HCISETSCAN:
2227                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2228                                    HCI_INIT_TIMEOUT);
2229                 break;
2230
2231         case HCISETLINKPOL:
2232                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2233                                    HCI_INIT_TIMEOUT);
2234                 break;
2235
2236         case HCISETLINKMODE:
2237                 hdev->link_mode = ((__u16) dr.dev_opt) &
2238                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2239                 break;
2240
2241         case HCISETPTYPE:
2242                 hdev->pkt_type = (__u16) dr.dev_opt;
2243                 break;
2244
2245         case HCISETACLMTU:
2246                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2247                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2248                 break;
2249
2250         case HCISETSCOMTU:
2251                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2252                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2253                 break;
2254
2255         default:
2256                 err = -EINVAL;
2257                 break;
2258         }
2259
2260 done:
2261         hci_dev_put(hdev);
2262         return err;
2263 }
2264
2265 int hci_get_dev_list(void __user *arg)
2266 {
2267         struct hci_dev *hdev;
2268         struct hci_dev_list_req *dl;
2269         struct hci_dev_req *dr;
2270         int n = 0, size, err;
2271         __u16 dev_num;
2272
2273         if (get_user(dev_num, (__u16 __user *) arg))
2274                 return -EFAULT;
2275
2276         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2277                 return -EINVAL;
2278
2279         size = sizeof(*dl) + dev_num * sizeof(*dr);
2280
2281         dl = kzalloc(size, GFP_KERNEL);
2282         if (!dl)
2283                 return -ENOMEM;
2284
2285         dr = dl->dev_req;
2286
2287         read_lock(&hci_dev_list_lock);
2288         list_for_each_entry(hdev, &hci_dev_list, list) {
2289                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2290                         cancel_delayed_work(&hdev->power_off);
2291
2292                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2293                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2294
2295                 (dr + n)->dev_id  = hdev->id;
2296                 (dr + n)->dev_opt = hdev->flags;
2297
2298                 if (++n >= dev_num)
2299                         break;
2300         }
2301         read_unlock(&hci_dev_list_lock);
2302
2303         dl->dev_num = n;
2304         size = sizeof(*dl) + n * sizeof(*dr);
2305
2306         err = copy_to_user(arg, dl, size);
2307         kfree(dl);
2308
2309         return err ? -EFAULT : 0;
2310 }
2311
2312 int hci_get_dev_info(void __user *arg)
2313 {
2314         struct hci_dev *hdev;
2315         struct hci_dev_info di;
2316         int err = 0;
2317
2318         if (copy_from_user(&di, arg, sizeof(di)))
2319                 return -EFAULT;
2320
2321         hdev = hci_dev_get(di.dev_id);
2322         if (!hdev)
2323                 return -ENODEV;
2324
2325         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2326                 cancel_delayed_work_sync(&hdev->power_off);
2327
2328         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2329                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2330
2331         strcpy(di.name, hdev->name);
2332         di.bdaddr   = hdev->bdaddr;
2333         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2334         di.flags    = hdev->flags;
2335         di.pkt_type = hdev->pkt_type;
2336         if (lmp_bredr_capable(hdev)) {
2337                 di.acl_mtu  = hdev->acl_mtu;
2338                 di.acl_pkts = hdev->acl_pkts;
2339                 di.sco_mtu  = hdev->sco_mtu;
2340                 di.sco_pkts = hdev->sco_pkts;
2341         } else {
2342                 di.acl_mtu  = hdev->le_mtu;
2343                 di.acl_pkts = hdev->le_pkts;
2344                 di.sco_mtu  = 0;
2345                 di.sco_pkts = 0;
2346         }
2347         di.link_policy = hdev->link_policy;
2348         di.link_mode   = hdev->link_mode;
2349
2350         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2351         memcpy(&di.features, &hdev->features, sizeof(di.features));
2352
2353         if (copy_to_user(arg, &di, sizeof(di)))
2354                 err = -EFAULT;
2355
2356         hci_dev_put(hdev);
2357
2358         return err;
2359 }
2360
2361 /* ---- Interface to HCI drivers ---- */
2362
2363 static int hci_rfkill_set_block(void *data, bool blocked)
2364 {
2365         struct hci_dev *hdev = data;
2366
2367         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2368
2369         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2370                 return -EBUSY;
2371
2372         if (blocked) {
2373                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2374                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2375                         hci_dev_do_close(hdev);
2376         } else {
2377                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2378         }
2379
2380         return 0;
2381 }
2382
2383 static const struct rfkill_ops hci_rfkill_ops = {
2384         .set_block = hci_rfkill_set_block,
2385 };
2386
2387 static void hci_power_on(struct work_struct *work)
2388 {
2389         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2390         int err;
2391
2392         BT_DBG("%s", hdev->name);
2393
2394         err = hci_dev_do_open(hdev);
2395         if (err < 0) {
2396                 mgmt_set_powered_failed(hdev, err);
2397                 return;
2398         }
2399
2400         /* During the HCI setup phase, a few error conditions are
2401          * ignored and they need to be checked now. If they are still
2402          * valid, it is important to turn the device back off.
2403          */
2404         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2405             (hdev->dev_type == HCI_BREDR &&
2406              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2407              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2408                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2409                 hci_dev_do_close(hdev);
2410         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2411                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2412                                    HCI_AUTO_OFF_TIMEOUT);
2413         }
2414
2415         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2416                 mgmt_index_added(hdev);
2417 }
2418
2419 static void hci_power_off(struct work_struct *work)
2420 {
2421         struct hci_dev *hdev = container_of(work, struct hci_dev,
2422                                             power_off.work);
2423
2424         BT_DBG("%s", hdev->name);
2425
2426         hci_dev_do_close(hdev);
2427 }
2428
2429 static void hci_discov_off(struct work_struct *work)
2430 {
2431         struct hci_dev *hdev;
2432
2433         hdev = container_of(work, struct hci_dev, discov_off.work);
2434
2435         BT_DBG("%s", hdev->name);
2436
2437         mgmt_discoverable_timeout(hdev);
2438 }
2439
2440 int hci_uuids_clear(struct hci_dev *hdev)
2441 {
2442         struct bt_uuid *uuid, *tmp;
2443
2444         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2445                 list_del(&uuid->list);
2446                 kfree(uuid);
2447         }
2448
2449         return 0;
2450 }
2451
2452 int hci_link_keys_clear(struct hci_dev *hdev)
2453 {
2454         struct list_head *p, *n;
2455
2456         list_for_each_safe(p, n, &hdev->link_keys) {
2457                 struct link_key *key;
2458
2459                 key = list_entry(p, struct link_key, list);
2460
2461                 list_del(p);
2462                 kfree(key);
2463         }
2464
2465         return 0;
2466 }
2467
2468 int hci_smp_ltks_clear(struct hci_dev *hdev)
2469 {
2470         struct smp_ltk *k, *tmp;
2471
2472         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2473                 list_del(&k->list);
2474                 kfree(k);
2475         }
2476
2477         return 0;
2478 }
2479
2480 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2481 {
2482         struct link_key *k;
2483
2484         list_for_each_entry(k, &hdev->link_keys, list)
2485                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2486                         return k;
2487
2488         return NULL;
2489 }
2490
2491 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2492                                u8 key_type, u8 old_key_type)
2493 {
2494         /* Legacy key */
2495         if (key_type < 0x03)
2496                 return true;
2497
2498         /* Debug keys are insecure so don't store them persistently */
2499         if (key_type == HCI_LK_DEBUG_COMBINATION)
2500                 return false;
2501
2502         /* Changed combination key and there's no previous one */
2503         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2504                 return false;
2505
2506         /* Security mode 3 case */
2507         if (!conn)
2508                 return true;
2509
2510         /* Neither local nor remote side had no-bonding as requirement */
2511         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2512                 return true;
2513
2514         /* Local side had dedicated bonding as requirement */
2515         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2516                 return true;
2517
2518         /* Remote side had dedicated bonding as requirement */
2519         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2520                 return true;
2521
2522         /* If none of the above criteria match, then don't store the key
2523          * persistently */
2524         return false;
2525 }
2526
2527 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
2528 {
2529         struct smp_ltk *k;
2530
2531         list_for_each_entry(k, &hdev->long_term_keys, list) {
2532                 if (k->ediv != ediv ||
2533                     memcmp(rand, k->rand, sizeof(k->rand)))
2534                         continue;
2535
2536                 return k;
2537         }
2538
2539         return NULL;
2540 }
2541
2542 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2543                                      u8 addr_type)
2544 {
2545         struct smp_ltk *k;
2546
2547         list_for_each_entry(k, &hdev->long_term_keys, list)
2548                 if (addr_type == k->bdaddr_type &&
2549                     bacmp(bdaddr, &k->bdaddr) == 0)
2550                         return k;
2551
2552         return NULL;
2553 }
2554
2555 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2556                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2557 {
2558         struct link_key *key, *old_key;
2559         u8 old_key_type;
2560         bool persistent;
2561
2562         old_key = hci_find_link_key(hdev, bdaddr);
2563         if (old_key) {
2564                 old_key_type = old_key->type;
2565                 key = old_key;
2566         } else {
2567                 old_key_type = conn ? conn->key_type : 0xff;
2568                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2569                 if (!key)
2570                         return -ENOMEM;
2571                 list_add(&key->list, &hdev->link_keys);
2572         }
2573
2574         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
2575
2576         /* Some buggy controller combinations generate a changed
2577          * combination key for legacy pairing even when there's no
2578          * previous key */
2579         if (type == HCI_LK_CHANGED_COMBINATION &&
2580             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
2581                 type = HCI_LK_COMBINATION;
2582                 if (conn)
2583                         conn->key_type = type;
2584         }
2585
2586         bacpy(&key->bdaddr, bdaddr);
2587         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
2588         key->pin_len = pin_len;
2589
2590         if (type == HCI_LK_CHANGED_COMBINATION)
2591                 key->type = old_key_type;
2592         else
2593                 key->type = type;
2594
2595         if (!new_key)
2596                 return 0;
2597
2598         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2599
2600         mgmt_new_link_key(hdev, key, persistent);
2601
2602         if (conn)
2603                 conn->flush_key = !persistent;
2604
2605         return 0;
2606 }
2607
2608 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
2609                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
2610                 ediv, u8 rand[8])
2611 {
2612         struct smp_ltk *key, *old_key;
2613
2614         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2615                 return 0;
2616
2617         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2618         if (old_key)
2619                 key = old_key;
2620         else {
2621                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2622                 if (!key)
2623                         return -ENOMEM;
2624                 list_add(&key->list, &hdev->long_term_keys);
2625         }
2626
2627         bacpy(&key->bdaddr, bdaddr);
2628         key->bdaddr_type = addr_type;
2629         memcpy(key->val, tk, sizeof(key->val));
2630         key->authenticated = authenticated;
2631         key->ediv = ediv;
2632         key->enc_size = enc_size;
2633         key->type = type;
2634         memcpy(key->rand, rand, sizeof(key->rand));
2635
2636         if (!new_key)
2637                 return 0;
2638
2639         if (type & HCI_SMP_LTK)
2640                 mgmt_new_ltk(hdev, key, 1);
2641
2642         return 0;
2643 }
2644
2645 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2646 {
2647         struct link_key *key;
2648
2649         key = hci_find_link_key(hdev, bdaddr);
2650         if (!key)
2651                 return -ENOENT;
2652
2653         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2654
2655         list_del(&key->list);
2656         kfree(key);
2657
2658         return 0;
2659 }
2660
2661 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2662 {
2663         struct smp_ltk *k, *tmp;
2664
2665         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2666                 if (bacmp(bdaddr, &k->bdaddr))
2667                         continue;
2668
2669                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2670
2671                 list_del(&k->list);
2672                 kfree(k);
2673         }
2674
2675         return 0;
2676 }
2677
2678 /* HCI command timer function */
2679 static void hci_cmd_timeout(unsigned long arg)
2680 {
2681         struct hci_dev *hdev = (void *) arg;
2682
2683         if (hdev->sent_cmd) {
2684                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2685                 u16 opcode = __le16_to_cpu(sent->opcode);
2686
2687                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2688         } else {
2689                 BT_ERR("%s command tx timeout", hdev->name);
2690         }
2691
2692         atomic_set(&hdev->cmd_cnt, 1);
2693         queue_work(hdev->workqueue, &hdev->cmd_work);
2694 }
2695
2696 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
2697                                           bdaddr_t *bdaddr)
2698 {
2699         struct oob_data *data;
2700
2701         list_for_each_entry(data, &hdev->remote_oob_data, list)
2702                 if (bacmp(bdaddr, &data->bdaddr) == 0)
2703                         return data;
2704
2705         return NULL;
2706 }
2707
2708 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2709 {
2710         struct oob_data *data;
2711
2712         data = hci_find_remote_oob_data(hdev, bdaddr);
2713         if (!data)
2714                 return -ENOENT;
2715
2716         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2717
2718         list_del(&data->list);
2719         kfree(data);
2720
2721         return 0;
2722 }
2723
2724 int hci_remote_oob_data_clear(struct hci_dev *hdev)
2725 {
2726         struct oob_data *data, *n;
2727
2728         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2729                 list_del(&data->list);
2730                 kfree(data);
2731         }
2732
2733         return 0;
2734 }
2735
2736 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
2737                             u8 *randomizer)
2738 {
2739         struct oob_data *data;
2740
2741         data = hci_find_remote_oob_data(hdev, bdaddr);
2742
2743         if (!data) {
2744                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2745                 if (!data)
2746                         return -ENOMEM;
2747
2748                 bacpy(&data->bdaddr, bdaddr);
2749                 list_add(&data->list, &hdev->remote_oob_data);
2750         }
2751
2752         memcpy(data->hash, hash, sizeof(data->hash));
2753         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2754
2755         BT_DBG("%s for %pMR", hdev->name, bdaddr);
2756
2757         return 0;
2758 }
2759
2760 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2761                                          bdaddr_t *bdaddr, u8 type)
2762 {
2763         struct bdaddr_list *b;
2764
2765         list_for_each_entry(b, &hdev->blacklist, list) {
2766                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
2767                         return b;
2768         }
2769
2770         return NULL;
2771 }
2772
2773 int hci_blacklist_clear(struct hci_dev *hdev)
2774 {
2775         struct list_head *p, *n;
2776
2777         list_for_each_safe(p, n, &hdev->blacklist) {
2778                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
2779
2780                 list_del(p);
2781                 kfree(b);
2782         }
2783
2784         return 0;
2785 }
2786
2787 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2788 {
2789         struct bdaddr_list *entry;
2790
2791         if (!bacmp(bdaddr, BDADDR_ANY))
2792                 return -EBADF;
2793
2794         if (hci_blacklist_lookup(hdev, bdaddr, type))
2795                 return -EEXIST;
2796
2797         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
2798         if (!entry)
2799                 return -ENOMEM;
2800
2801         bacpy(&entry->bdaddr, bdaddr);
2802         entry->bdaddr_type = type;
2803
2804         list_add(&entry->list, &hdev->blacklist);
2805
2806         return mgmt_device_blocked(hdev, bdaddr, type);
2807 }
2808
2809 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2810 {
2811         struct bdaddr_list *entry;
2812
2813         if (!bacmp(bdaddr, BDADDR_ANY))
2814                 return hci_blacklist_clear(hdev);
2815
2816         entry = hci_blacklist_lookup(hdev, bdaddr, type);
2817         if (!entry)
2818                 return -ENOENT;
2819
2820         list_del(&entry->list);
2821         kfree(entry);
2822
2823         return mgmt_device_unblocked(hdev, bdaddr, type);
2824 }
2825
2826 static void inquiry_complete(struct hci_dev *hdev, u8 status)
2827 {
2828         if (status) {
2829                 BT_ERR("Failed to start inquiry: status %d", status);
2830
2831                 hci_dev_lock(hdev);
2832                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2833                 hci_dev_unlock(hdev);
2834                 return;
2835         }
2836 }
2837
2838 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2839 {
2840         /* General inquiry access code (GIAC) */
2841         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2842         struct hci_request req;
2843         struct hci_cp_inquiry cp;
2844         int err;
2845
2846         if (status) {
2847                 BT_ERR("Failed to disable LE scanning: status %d", status);
2848                 return;
2849         }
2850
2851         switch (hdev->discovery.type) {
2852         case DISCOV_TYPE_LE:
2853                 hci_dev_lock(hdev);
2854                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2855                 hci_dev_unlock(hdev);
2856                 break;
2857
2858         case DISCOV_TYPE_INTERLEAVED:
2859                 hci_req_init(&req, hdev);
2860
2861                 memset(&cp, 0, sizeof(cp));
2862                 memcpy(&cp.lap, lap, sizeof(cp.lap));
2863                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2864                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2865
2866                 hci_dev_lock(hdev);
2867
2868                 hci_inquiry_cache_flush(hdev);
2869
2870                 err = hci_req_run(&req, inquiry_complete);
2871                 if (err) {
2872                         BT_ERR("Inquiry request failed: err %d", err);
2873                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2874                 }
2875
2876                 hci_dev_unlock(hdev);
2877                 break;
2878         }
2879 }
2880
2881 static void le_scan_disable_work(struct work_struct *work)
2882 {
2883         struct hci_dev *hdev = container_of(work, struct hci_dev,
2884                                             le_scan_disable.work);
2885         struct hci_cp_le_set_scan_enable cp;
2886         struct hci_request req;
2887         int err;
2888
2889         BT_DBG("%s", hdev->name);
2890
2891         hci_req_init(&req, hdev);
2892
2893         memset(&cp, 0, sizeof(cp));
2894         cp.enable = LE_SCAN_DISABLE;
2895         hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2896
2897         err = hci_req_run(&req, le_scan_disable_work_complete);
2898         if (err)
2899                 BT_ERR("Disable LE scanning request failed: err %d", err);
2900 }
2901
2902 /* Alloc HCI device */
2903 struct hci_dev *hci_alloc_dev(void)
2904 {
2905         struct hci_dev *hdev;
2906
2907         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2908         if (!hdev)
2909                 return NULL;
2910
2911         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2912         hdev->esco_type = (ESCO_HV1);
2913         hdev->link_mode = (HCI_LM_ACCEPT);
2914         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
2915         hdev->io_capability = 0x03;     /* No Input No Output */
2916         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2917         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
2918
2919         hdev->sniff_max_interval = 800;
2920         hdev->sniff_min_interval = 80;
2921
2922         hdev->le_scan_interval = 0x0060;
2923         hdev->le_scan_window = 0x0030;
2924         hdev->le_conn_min_interval = 0x0028;
2925         hdev->le_conn_max_interval = 0x0038;
2926
2927         mutex_init(&hdev->lock);
2928         mutex_init(&hdev->req_lock);
2929
2930         INIT_LIST_HEAD(&hdev->mgmt_pending);
2931         INIT_LIST_HEAD(&hdev->blacklist);
2932         INIT_LIST_HEAD(&hdev->uuids);
2933         INIT_LIST_HEAD(&hdev->link_keys);
2934         INIT_LIST_HEAD(&hdev->long_term_keys);
2935         INIT_LIST_HEAD(&hdev->remote_oob_data);
2936         INIT_LIST_HEAD(&hdev->conn_hash.list);
2937
2938         INIT_WORK(&hdev->rx_work, hci_rx_work);
2939         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2940         INIT_WORK(&hdev->tx_work, hci_tx_work);
2941         INIT_WORK(&hdev->power_on, hci_power_on);
2942
2943         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2944         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2945         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2946
2947         skb_queue_head_init(&hdev->rx_q);
2948         skb_queue_head_init(&hdev->cmd_q);
2949         skb_queue_head_init(&hdev->raw_q);
2950
2951         init_waitqueue_head(&hdev->req_wait_q);
2952
2953         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
2954
2955         hci_init_sysfs(hdev);
2956         discovery_init(hdev);
2957
2958         return hdev;
2959 }
2960 EXPORT_SYMBOL(hci_alloc_dev);
2961
2962 /* Free HCI device */
2963 void hci_free_dev(struct hci_dev *hdev)
2964 {
2965         /* will free via device release */
2966         put_device(&hdev->dev);
2967 }
2968 EXPORT_SYMBOL(hci_free_dev);
2969
2970 /* Register HCI device */
2971 int hci_register_dev(struct hci_dev *hdev)
2972 {
2973         int id, error;
2974
2975         if (!hdev->open || !hdev->close)
2976                 return -EINVAL;
2977
2978         /* Do not allow HCI_AMP devices to register at index 0,
2979          * so the index can be used as the AMP controller ID.
2980          */
2981         switch (hdev->dev_type) {
2982         case HCI_BREDR:
2983                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2984                 break;
2985         case HCI_AMP:
2986                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2987                 break;
2988         default:
2989                 return -EINVAL;
2990         }
2991
2992         if (id < 0)
2993                 return id;
2994
2995         sprintf(hdev->name, "hci%d", id);
2996         hdev->id = id;
2997
2998         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2999
3000         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3001                                           WQ_MEM_RECLAIM, 1, hdev->name);
3002         if (!hdev->workqueue) {
3003                 error = -ENOMEM;
3004                 goto err;
3005         }
3006
3007         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3008                                               WQ_MEM_RECLAIM, 1, hdev->name);
3009         if (!hdev->req_workqueue) {
3010                 destroy_workqueue(hdev->workqueue);
3011                 error = -ENOMEM;
3012                 goto err;
3013         }
3014
3015         if (!IS_ERR_OR_NULL(bt_debugfs))
3016                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3017
3018         dev_set_name(&hdev->dev, "%s", hdev->name);
3019
3020         error = device_add(&hdev->dev);
3021         if (error < 0)
3022                 goto err_wqueue;
3023
3024         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3025                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3026                                     hdev);
3027         if (hdev->rfkill) {
3028                 if (rfkill_register(hdev->rfkill) < 0) {
3029                         rfkill_destroy(hdev->rfkill);
3030                         hdev->rfkill = NULL;
3031                 }
3032         }
3033
3034         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3035                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3036
3037         set_bit(HCI_SETUP, &hdev->dev_flags);
3038         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3039
3040         if (hdev->dev_type == HCI_BREDR) {
3041                 /* Assume BR/EDR support until proven otherwise (such as
3042                  * through reading supported features during init.
3043                  */
3044                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3045         }
3046
3047         write_lock(&hci_dev_list_lock);
3048         list_add(&hdev->list, &hci_dev_list);
3049         write_unlock(&hci_dev_list_lock);
3050
3051         hci_notify(hdev, HCI_DEV_REG);
3052         hci_dev_hold(hdev);
3053
3054         queue_work(hdev->req_workqueue, &hdev->power_on);
3055
3056         return id;
3057
3058 err_wqueue:
3059         destroy_workqueue(hdev->workqueue);
3060         destroy_workqueue(hdev->req_workqueue);
3061 err:
3062         ida_simple_remove(&hci_index_ida, hdev->id);
3063
3064         return error;
3065 }
3066 EXPORT_SYMBOL(hci_register_dev);
3067
3068 /* Unregister HCI device */
3069 void hci_unregister_dev(struct hci_dev *hdev)
3070 {
3071         int i, id;
3072
3073         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3074
3075         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3076
3077         id = hdev->id;
3078
3079         write_lock(&hci_dev_list_lock);
3080         list_del(&hdev->list);
3081         write_unlock(&hci_dev_list_lock);
3082
3083         hci_dev_do_close(hdev);
3084
3085         for (i = 0; i < NUM_REASSEMBLY; i++)
3086                 kfree_skb(hdev->reassembly[i]);
3087
3088         cancel_work_sync(&hdev->power_on);
3089
3090         if (!test_bit(HCI_INIT, &hdev->flags) &&
3091             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
3092                 hci_dev_lock(hdev);
3093                 mgmt_index_removed(hdev);
3094                 hci_dev_unlock(hdev);
3095         }
3096
3097         /* mgmt_index_removed should take care of emptying the
3098          * pending list */
3099         BUG_ON(!list_empty(&hdev->mgmt_pending));
3100
3101         hci_notify(hdev, HCI_DEV_UNREG);
3102
3103         if (hdev->rfkill) {
3104                 rfkill_unregister(hdev->rfkill);
3105                 rfkill_destroy(hdev->rfkill);
3106         }
3107
3108         device_del(&hdev->dev);
3109
3110         debugfs_remove_recursive(hdev->debugfs);
3111
3112         destroy_workqueue(hdev->workqueue);
3113         destroy_workqueue(hdev->req_workqueue);
3114
3115         hci_dev_lock(hdev);
3116         hci_blacklist_clear(hdev);
3117         hci_uuids_clear(hdev);
3118         hci_link_keys_clear(hdev);
3119         hci_smp_ltks_clear(hdev);
3120         hci_remote_oob_data_clear(hdev);
3121         hci_dev_unlock(hdev);
3122
3123         hci_dev_put(hdev);
3124
3125         ida_simple_remove(&hci_index_ida, id);
3126 }
3127 EXPORT_SYMBOL(hci_unregister_dev);
3128
3129 /* Suspend HCI device */
3130 int hci_suspend_dev(struct hci_dev *hdev)
3131 {
3132         hci_notify(hdev, HCI_DEV_SUSPEND);
3133         return 0;
3134 }
3135 EXPORT_SYMBOL(hci_suspend_dev);
3136
3137 /* Resume HCI device */
3138 int hci_resume_dev(struct hci_dev *hdev)
3139 {
3140         hci_notify(hdev, HCI_DEV_RESUME);
3141         return 0;
3142 }
3143 EXPORT_SYMBOL(hci_resume_dev);
3144
3145 /* Receive frame from HCI drivers */
3146 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
3147 {
3148         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
3149                       && !test_bit(HCI_INIT, &hdev->flags))) {
3150                 kfree_skb(skb);
3151                 return -ENXIO;
3152         }
3153
3154         /* Incoming skb */
3155         bt_cb(skb)->incoming = 1;
3156
3157         /* Time stamp */
3158         __net_timestamp(skb);
3159
3160         skb_queue_tail(&hdev->rx_q, skb);
3161         queue_work(hdev->workqueue, &hdev->rx_work);
3162
3163         return 0;
3164 }
3165 EXPORT_SYMBOL(hci_recv_frame);
3166
3167 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
3168                           int count, __u8 index)
3169 {
3170         int len = 0;
3171         int hlen = 0;
3172         int remain = count;
3173         struct sk_buff *skb;
3174         struct bt_skb_cb *scb;
3175
3176         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
3177             index >= NUM_REASSEMBLY)
3178                 return -EILSEQ;
3179
3180         skb = hdev->reassembly[index];
3181
3182         if (!skb) {
3183                 switch (type) {
3184                 case HCI_ACLDATA_PKT:
3185                         len = HCI_MAX_FRAME_SIZE;
3186                         hlen = HCI_ACL_HDR_SIZE;
3187                         break;
3188                 case HCI_EVENT_PKT:
3189                         len = HCI_MAX_EVENT_SIZE;
3190                         hlen = HCI_EVENT_HDR_SIZE;
3191                         break;
3192                 case HCI_SCODATA_PKT:
3193                         len = HCI_MAX_SCO_SIZE;
3194                         hlen = HCI_SCO_HDR_SIZE;
3195                         break;
3196                 }
3197
3198                 skb = bt_skb_alloc(len, GFP_ATOMIC);
3199                 if (!skb)
3200                         return -ENOMEM;
3201
3202                 scb = (void *) skb->cb;
3203                 scb->expect = hlen;
3204                 scb->pkt_type = type;
3205
3206                 hdev->reassembly[index] = skb;
3207         }
3208
3209         while (count) {
3210                 scb = (void *) skb->cb;
3211                 len = min_t(uint, scb->expect, count);
3212
3213                 memcpy(skb_put(skb, len), data, len);
3214
3215                 count -= len;
3216                 data += len;
3217                 scb->expect -= len;
3218                 remain = count;
3219
3220                 switch (type) {
3221                 case HCI_EVENT_PKT:
3222                         if (skb->len == HCI_EVENT_HDR_SIZE) {
3223                                 struct hci_event_hdr *h = hci_event_hdr(skb);
3224                                 scb->expect = h->plen;
3225
3226                                 if (skb_tailroom(skb) < scb->expect) {
3227                                         kfree_skb(skb);
3228                                         hdev->reassembly[index] = NULL;
3229                                         return -ENOMEM;
3230                                 }
3231                         }
3232                         break;
3233
3234                 case HCI_ACLDATA_PKT:
3235                         if (skb->len  == HCI_ACL_HDR_SIZE) {
3236                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3237                                 scb->expect = __le16_to_cpu(h->dlen);
3238
3239                                 if (skb_tailroom(skb) < scb->expect) {
3240                                         kfree_skb(skb);
3241                                         hdev->reassembly[index] = NULL;
3242                                         return -ENOMEM;
3243                                 }
3244                         }
3245                         break;
3246
3247                 case HCI_SCODATA_PKT:
3248                         if (skb->len == HCI_SCO_HDR_SIZE) {
3249                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3250                                 scb->expect = h->dlen;
3251
3252                                 if (skb_tailroom(skb) < scb->expect) {
3253                                         kfree_skb(skb);
3254                                         hdev->reassembly[index] = NULL;
3255                                         return -ENOMEM;
3256                                 }
3257                         }
3258                         break;
3259                 }
3260
3261                 if (scb->expect == 0) {
3262                         /* Complete frame */
3263
3264                         bt_cb(skb)->pkt_type = type;
3265                         hci_recv_frame(hdev, skb);
3266
3267                         hdev->reassembly[index] = NULL;
3268                         return remain;
3269                 }
3270         }
3271
3272         return remain;
3273 }
3274
3275 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3276 {
3277         int rem = 0;
3278
3279         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3280                 return -EILSEQ;
3281
3282         while (count) {
3283                 rem = hci_reassembly(hdev, type, data, count, type - 1);
3284                 if (rem < 0)
3285                         return rem;
3286
3287                 data += (count - rem);
3288                 count = rem;
3289         }
3290
3291         return rem;
3292 }
3293 EXPORT_SYMBOL(hci_recv_fragment);
3294
3295 #define STREAM_REASSEMBLY 0
3296
3297 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3298 {
3299         int type;
3300         int rem = 0;
3301
3302         while (count) {
3303                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3304
3305                 if (!skb) {
3306                         struct { char type; } *pkt;
3307
3308                         /* Start of the frame */
3309                         pkt = data;
3310                         type = pkt->type;
3311
3312                         data++;
3313                         count--;
3314                 } else
3315                         type = bt_cb(skb)->pkt_type;
3316
3317                 rem = hci_reassembly(hdev, type, data, count,
3318                                      STREAM_REASSEMBLY);
3319                 if (rem < 0)
3320                         return rem;
3321
3322                 data += (count - rem);
3323                 count = rem;
3324         }
3325
3326         return rem;
3327 }
3328 EXPORT_SYMBOL(hci_recv_stream_fragment);
3329
3330 /* ---- Interface to upper protocols ---- */
3331
3332 int hci_register_cb(struct hci_cb *cb)
3333 {
3334         BT_DBG("%p name %s", cb, cb->name);
3335
3336         write_lock(&hci_cb_list_lock);
3337         list_add(&cb->list, &hci_cb_list);
3338         write_unlock(&hci_cb_list_lock);
3339
3340         return 0;
3341 }
3342 EXPORT_SYMBOL(hci_register_cb);
3343
3344 int hci_unregister_cb(struct hci_cb *cb)
3345 {
3346         BT_DBG("%p name %s", cb, cb->name);
3347
3348         write_lock(&hci_cb_list_lock);
3349         list_del(&cb->list);
3350         write_unlock(&hci_cb_list_lock);
3351
3352         return 0;
3353 }
3354 EXPORT_SYMBOL(hci_unregister_cb);
3355
3356 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
3357 {
3358         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
3359
3360         /* Time stamp */
3361         __net_timestamp(skb);
3362
3363         /* Send copy to monitor */
3364         hci_send_to_monitor(hdev, skb);
3365
3366         if (atomic_read(&hdev->promisc)) {
3367                 /* Send copy to the sockets */
3368                 hci_send_to_sock(hdev, skb);
3369         }
3370
3371         /* Get rid of skb owner, prior to sending to the driver. */
3372         skb_orphan(skb);
3373
3374         if (hdev->send(hdev, skb) < 0)
3375                 BT_ERR("%s sending frame failed", hdev->name);
3376 }
3377
3378 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3379 {
3380         skb_queue_head_init(&req->cmd_q);
3381         req->hdev = hdev;
3382         req->err = 0;
3383 }
3384
3385 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3386 {
3387         struct hci_dev *hdev = req->hdev;
3388         struct sk_buff *skb;
3389         unsigned long flags;
3390
3391         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3392
3393         /* If an error occured during request building, remove all HCI
3394          * commands queued on the HCI request queue.
3395          */
3396         if (req->err) {
3397                 skb_queue_purge(&req->cmd_q);
3398                 return req->err;
3399         }
3400
3401         /* Do not allow empty requests */
3402         if (skb_queue_empty(&req->cmd_q))
3403                 return -ENODATA;
3404
3405         skb = skb_peek_tail(&req->cmd_q);
3406         bt_cb(skb)->req.complete = complete;
3407
3408         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3409         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3410         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3411
3412         queue_work(hdev->workqueue, &hdev->cmd_work);
3413
3414         return 0;
3415 }
3416
3417 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
3418                                        u32 plen, const void *param)
3419 {
3420         int len = HCI_COMMAND_HDR_SIZE + plen;
3421         struct hci_command_hdr *hdr;
3422         struct sk_buff *skb;
3423
3424         skb = bt_skb_alloc(len, GFP_ATOMIC);
3425         if (!skb)
3426                 return NULL;
3427
3428         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
3429         hdr->opcode = cpu_to_le16(opcode);
3430         hdr->plen   = plen;
3431
3432         if (plen)
3433                 memcpy(skb_put(skb, plen), param, plen);
3434
3435         BT_DBG("skb len %d", skb->len);
3436
3437         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
3438
3439         return skb;
3440 }
3441
3442 /* Send HCI command */
3443 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3444                  const void *param)
3445 {
3446         struct sk_buff *skb;
3447
3448         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3449
3450         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3451         if (!skb) {
3452                 BT_ERR("%s no memory for command", hdev->name);
3453                 return -ENOMEM;
3454         }
3455
3456         /* Stand-alone HCI commands must be flaged as
3457          * single-command requests.
3458          */
3459         bt_cb(skb)->req.start = true;
3460
3461         skb_queue_tail(&hdev->cmd_q, skb);
3462         queue_work(hdev->workqueue, &hdev->cmd_work);
3463
3464         return 0;
3465 }
3466
3467 /* Queue a command to an asynchronous HCI request */
3468 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3469                     const void *param, u8 event)
3470 {
3471         struct hci_dev *hdev = req->hdev;
3472         struct sk_buff *skb;
3473
3474         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3475
3476         /* If an error occured during request building, there is no point in
3477          * queueing the HCI command. We can simply return.
3478          */
3479         if (req->err)
3480                 return;
3481
3482         skb = hci_prepare_cmd(hdev, opcode, plen, param);
3483         if (!skb) {
3484                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3485                        hdev->name, opcode);
3486                 req->err = -ENOMEM;
3487                 return;
3488         }
3489
3490         if (skb_queue_empty(&req->cmd_q))
3491                 bt_cb(skb)->req.start = true;
3492
3493         bt_cb(skb)->req.event = event;
3494
3495         skb_queue_tail(&req->cmd_q, skb);
3496 }
3497
3498 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3499                  const void *param)
3500 {
3501         hci_req_add_ev(req, opcode, plen, param, 0);
3502 }
3503
3504 /* Get data from the previously sent command */
3505 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
3506 {
3507         struct hci_command_hdr *hdr;
3508
3509         if (!hdev->sent_cmd)
3510                 return NULL;
3511
3512         hdr = (void *) hdev->sent_cmd->data;
3513
3514         if (hdr->opcode != cpu_to_le16(opcode))
3515                 return NULL;
3516
3517         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3518
3519         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3520 }
3521
3522 /* Send ACL data */
3523 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3524 {
3525         struct hci_acl_hdr *hdr;
3526         int len = skb->len;
3527
3528         skb_push(skb, HCI_ACL_HDR_SIZE);
3529         skb_reset_transport_header(skb);
3530         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
3531         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3532         hdr->dlen   = cpu_to_le16(len);
3533 }
3534
3535 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
3536                           struct sk_buff *skb, __u16 flags)
3537 {
3538         struct hci_conn *conn = chan->conn;
3539         struct hci_dev *hdev = conn->hdev;
3540         struct sk_buff *list;
3541
3542         skb->len = skb_headlen(skb);
3543         skb->data_len = 0;
3544
3545         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3546
3547         switch (hdev->dev_type) {
3548         case HCI_BREDR:
3549                 hci_add_acl_hdr(skb, conn->handle, flags);
3550                 break;
3551         case HCI_AMP:
3552                 hci_add_acl_hdr(skb, chan->handle, flags);
3553                 break;
3554         default:
3555                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3556                 return;
3557         }
3558
3559         list = skb_shinfo(skb)->frag_list;
3560         if (!list) {
3561                 /* Non fragmented */
3562                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3563
3564                 skb_queue_tail(queue, skb);
3565         } else {
3566                 /* Fragmented */
3567                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3568
3569                 skb_shinfo(skb)->frag_list = NULL;
3570
3571                 /* Queue all fragments atomically */
3572                 spin_lock(&queue->lock);
3573
3574                 __skb_queue_tail(queue, skb);
3575
3576                 flags &= ~ACL_START;
3577                 flags |= ACL_CONT;
3578                 do {
3579                         skb = list; list = list->next;
3580
3581                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
3582                         hci_add_acl_hdr(skb, conn->handle, flags);
3583
3584                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3585
3586                         __skb_queue_tail(queue, skb);
3587                 } while (list);
3588
3589                 spin_unlock(&queue->lock);
3590         }
3591 }
3592
3593 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3594 {
3595         struct hci_dev *hdev = chan->conn->hdev;
3596
3597         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
3598
3599         hci_queue_acl(chan, &chan->data_q, skb, flags);
3600
3601         queue_work(hdev->workqueue, &hdev->tx_work);
3602 }
3603
3604 /* Send SCO data */
3605 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
3606 {
3607         struct hci_dev *hdev = conn->hdev;
3608         struct hci_sco_hdr hdr;
3609
3610         BT_DBG("%s len %d", hdev->name, skb->len);
3611
3612         hdr.handle = cpu_to_le16(conn->handle);
3613         hdr.dlen   = skb->len;
3614
3615         skb_push(skb, HCI_SCO_HDR_SIZE);
3616         skb_reset_transport_header(skb);
3617         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
3618
3619         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
3620
3621         skb_queue_tail(&conn->data_q, skb);
3622         queue_work(hdev->workqueue, &hdev->tx_work);
3623 }
3624
3625 /* ---- HCI TX task (outgoing data) ---- */
3626
3627 /* HCI Connection scheduler */
3628 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3629                                      int *quote)
3630 {
3631         struct hci_conn_hash *h = &hdev->conn_hash;
3632         struct hci_conn *conn = NULL, *c;
3633         unsigned int num = 0, min = ~0;
3634
3635         /* We don't have to lock device here. Connections are always
3636          * added and removed with TX task disabled. */
3637
3638         rcu_read_lock();
3639
3640         list_for_each_entry_rcu(c, &h->list, list) {
3641                 if (c->type != type || skb_queue_empty(&c->data_q))
3642                         continue;
3643
3644                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3645                         continue;
3646
3647                 num++;
3648
3649                 if (c->sent < min) {
3650                         min  = c->sent;
3651                         conn = c;
3652                 }
3653
3654                 if (hci_conn_num(hdev, type) == num)
3655                         break;
3656         }
3657
3658         rcu_read_unlock();
3659
3660         if (conn) {
3661                 int cnt, q;
3662
3663                 switch (conn->type) {
3664                 case ACL_LINK:
3665                         cnt = hdev->acl_cnt;
3666                         break;
3667                 case SCO_LINK:
3668                 case ESCO_LINK:
3669                         cnt = hdev->sco_cnt;
3670                         break;
3671                 case LE_LINK:
3672                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3673                         break;
3674                 default:
3675                         cnt = 0;
3676                         BT_ERR("Unknown link type");
3677                 }
3678
3679                 q = cnt / num;
3680                 *quote = q ? q : 1;
3681         } else
3682                 *quote = 0;
3683
3684         BT_DBG("conn %p quote %d", conn, *quote);
3685         return conn;
3686 }
3687
3688 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
3689 {
3690         struct hci_conn_hash *h = &hdev->conn_hash;
3691         struct hci_conn *c;
3692
3693         BT_ERR("%s link tx timeout", hdev->name);
3694
3695         rcu_read_lock();
3696
3697         /* Kill stalled connections */
3698         list_for_each_entry_rcu(c, &h->list, list) {
3699                 if (c->type == type && c->sent) {
3700                         BT_ERR("%s killing stalled connection %pMR",
3701                                hdev->name, &c->dst);
3702                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
3703                 }
3704         }
3705
3706         rcu_read_unlock();
3707 }
3708
3709 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3710                                       int *quote)
3711 {
3712         struct hci_conn_hash *h = &hdev->conn_hash;
3713         struct hci_chan *chan = NULL;
3714         unsigned int num = 0, min = ~0, cur_prio = 0;
3715         struct hci_conn *conn;
3716         int cnt, q, conn_num = 0;
3717
3718         BT_DBG("%s", hdev->name);
3719
3720         rcu_read_lock();
3721
3722         list_for_each_entry_rcu(conn, &h->list, list) {
3723                 struct hci_chan *tmp;
3724
3725                 if (conn->type != type)
3726                         continue;
3727
3728                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3729                         continue;
3730
3731                 conn_num++;
3732
3733                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
3734                         struct sk_buff *skb;
3735
3736                         if (skb_queue_empty(&tmp->data_q))
3737                                 continue;
3738
3739                         skb = skb_peek(&tmp->data_q);
3740                         if (skb->priority < cur_prio)
3741                                 continue;
3742
3743                         if (skb->priority > cur_prio) {
3744                                 num = 0;
3745                                 min = ~0;
3746                                 cur_prio = skb->priority;
3747                         }
3748
3749                         num++;
3750
3751                         if (conn->sent < min) {
3752                                 min  = conn->sent;
3753                                 chan = tmp;
3754                         }
3755                 }
3756
3757                 if (hci_conn_num(hdev, type) == conn_num)
3758                         break;
3759         }
3760
3761         rcu_read_unlock();
3762
3763         if (!chan)
3764                 return NULL;
3765
3766         switch (chan->conn->type) {
3767         case ACL_LINK:
3768                 cnt = hdev->acl_cnt;
3769                 break;
3770         case AMP_LINK:
3771                 cnt = hdev->block_cnt;
3772                 break;
3773         case SCO_LINK:
3774         case ESCO_LINK:
3775                 cnt = hdev->sco_cnt;
3776                 break;
3777         case LE_LINK:
3778                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3779                 break;
3780         default:
3781                 cnt = 0;
3782                 BT_ERR("Unknown link type");
3783         }
3784
3785         q = cnt / num;
3786         *quote = q ? q : 1;
3787         BT_DBG("chan %p quote %d", chan, *quote);
3788         return chan;
3789 }
3790
3791 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3792 {
3793         struct hci_conn_hash *h = &hdev->conn_hash;
3794         struct hci_conn *conn;
3795         int num = 0;
3796
3797         BT_DBG("%s", hdev->name);
3798
3799         rcu_read_lock();
3800
3801         list_for_each_entry_rcu(conn, &h->list, list) {
3802                 struct hci_chan *chan;
3803
3804                 if (conn->type != type)
3805                         continue;
3806
3807                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3808                         continue;
3809
3810                 num++;
3811
3812                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
3813                         struct sk_buff *skb;
3814
3815                         if (chan->sent) {
3816                                 chan->sent = 0;
3817                                 continue;
3818                         }
3819
3820                         if (skb_queue_empty(&chan->data_q))
3821                                 continue;
3822
3823                         skb = skb_peek(&chan->data_q);
3824                         if (skb->priority >= HCI_PRIO_MAX - 1)
3825                                 continue;
3826
3827                         skb->priority = HCI_PRIO_MAX - 1;
3828
3829                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
3830                                skb->priority);
3831                 }
3832
3833                 if (hci_conn_num(hdev, type) == num)
3834                         break;
3835         }
3836
3837         rcu_read_unlock();
3838
3839 }
3840
3841 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3842 {
3843         /* Calculate count of blocks used by this packet */
3844         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3845 }
3846
3847 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
3848 {
3849         if (!test_bit(HCI_RAW, &hdev->flags)) {
3850                 /* ACL tx timeout must be longer than maximum
3851                  * link supervision timeout (40.9 seconds) */
3852                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
3853                                        HCI_ACL_TX_TIMEOUT))
3854                         hci_link_tx_to(hdev, ACL_LINK);
3855         }
3856 }
3857
3858 static void hci_sched_acl_pkt(struct hci_dev *hdev)
3859 {
3860         unsigned int cnt = hdev->acl_cnt;
3861         struct hci_chan *chan;
3862         struct sk_buff *skb;
3863         int quote;
3864
3865         __check_timeout(hdev, cnt);
3866
3867         while (hdev->acl_cnt &&
3868                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
3869                 u32 priority = (skb_peek(&chan->data_q))->priority;
3870                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
3871                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3872                                skb->len, skb->priority);
3873
3874                         /* Stop if priority has changed */
3875                         if (skb->priority < priority)
3876                                 break;
3877
3878                         skb = skb_dequeue(&chan->data_q);
3879
3880                         hci_conn_enter_active_mode(chan->conn,
3881                                                    bt_cb(skb)->force_active);
3882
3883                         hci_send_frame(hdev, skb);
3884                         hdev->acl_last_tx = jiffies;
3885
3886                         hdev->acl_cnt--;
3887                         chan->sent++;
3888                         chan->conn->sent++;
3889                 }
3890         }
3891
3892         if (cnt != hdev->acl_cnt)
3893                 hci_prio_recalculate(hdev, ACL_LINK);
3894 }
3895
3896 static void hci_sched_acl_blk(struct hci_dev *hdev)
3897 {
3898         unsigned int cnt = hdev->block_cnt;
3899         struct hci_chan *chan;
3900         struct sk_buff *skb;
3901         int quote;
3902         u8 type;
3903
3904         __check_timeout(hdev, cnt);
3905
3906         BT_DBG("%s", hdev->name);
3907
3908         if (hdev->dev_type == HCI_AMP)
3909                 type = AMP_LINK;
3910         else
3911                 type = ACL_LINK;
3912
3913         while (hdev->block_cnt > 0 &&
3914                (chan = hci_chan_sent(hdev, type, &quote))) {
3915                 u32 priority = (skb_peek(&chan->data_q))->priority;
3916                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3917                         int blocks;
3918
3919                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
3920                                skb->len, skb->priority);
3921
3922                         /* Stop if priority has changed */
3923                         if (skb->priority < priority)
3924                                 break;
3925
3926                         skb = skb_dequeue(&chan->data_q);
3927
3928                         blocks = __get_blocks(hdev, skb);
3929                         if (blocks > hdev->block_cnt)
3930                                 return;
3931
3932                         hci_conn_enter_active_mode(chan->conn,
3933                                                    bt_cb(skb)->force_active);
3934
3935                         hci_send_frame(hdev, skb);
3936                         hdev->acl_last_tx = jiffies;
3937
3938                         hdev->block_cnt -= blocks;
3939                         quote -= blocks;
3940
3941                         chan->sent += blocks;
3942                         chan->conn->sent += blocks;
3943                 }
3944         }
3945
3946         if (cnt != hdev->block_cnt)
3947                 hci_prio_recalculate(hdev, type);
3948 }
3949
3950 static void hci_sched_acl(struct hci_dev *hdev)
3951 {
3952         BT_DBG("%s", hdev->name);
3953
3954         /* No ACL link over BR/EDR controller */
3955         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3956                 return;
3957
3958         /* No AMP link over AMP controller */
3959         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
3960                 return;
3961
3962         switch (hdev->flow_ctl_mode) {
3963         case HCI_FLOW_CTL_MODE_PACKET_BASED:
3964                 hci_sched_acl_pkt(hdev);
3965                 break;
3966
3967         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3968                 hci_sched_acl_blk(hdev);
3969                 break;
3970         }
3971 }
3972
3973 /* Schedule SCO */
3974 static void hci_sched_sco(struct hci_dev *hdev)
3975 {
3976         struct hci_conn *conn;
3977         struct sk_buff *skb;
3978         int quote;
3979
3980         BT_DBG("%s", hdev->name);
3981
3982         if (!hci_conn_num(hdev, SCO_LINK))
3983                 return;
3984
3985         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3986                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3987                         BT_DBG("skb %p len %d", skb, skb->len);
3988                         hci_send_frame(hdev, skb);
3989
3990                         conn->sent++;
3991                         if (conn->sent == ~0)
3992                                 conn->sent = 0;
3993                 }
3994         }
3995 }
3996
3997 static void hci_sched_esco(struct hci_dev *hdev)
3998 {
3999         struct hci_conn *conn;
4000         struct sk_buff *skb;
4001         int quote;
4002
4003         BT_DBG("%s", hdev->name);
4004
4005         if (!hci_conn_num(hdev, ESCO_LINK))
4006                 return;
4007
4008         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4009                                                      &quote))) {
4010                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4011                         BT_DBG("skb %p len %d", skb, skb->len);
4012                         hci_send_frame(hdev, skb);
4013
4014                         conn->sent++;
4015                         if (conn->sent == ~0)
4016                                 conn->sent = 0;
4017                 }
4018         }
4019 }
4020
4021 static void hci_sched_le(struct hci_dev *hdev)
4022 {
4023         struct hci_chan *chan;
4024         struct sk_buff *skb;
4025         int quote, cnt, tmp;
4026
4027         BT_DBG("%s", hdev->name);
4028
4029         if (!hci_conn_num(hdev, LE_LINK))
4030                 return;
4031
4032         if (!test_bit(HCI_RAW, &hdev->flags)) {
4033                 /* LE tx timeout must be longer than maximum
4034                  * link supervision timeout (40.9 seconds) */
4035                 if (!hdev->le_cnt && hdev->le_pkts &&
4036                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4037                         hci_link_tx_to(hdev, LE_LINK);
4038         }
4039
4040         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4041         tmp = cnt;
4042         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4043                 u32 priority = (skb_peek(&chan->data_q))->priority;
4044                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4045                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4046                                skb->len, skb->priority);
4047
4048                         /* Stop if priority has changed */
4049                         if (skb->priority < priority)
4050                                 break;
4051
4052                         skb = skb_dequeue(&chan->data_q);
4053
4054                         hci_send_frame(hdev, skb);
4055                         hdev->le_last_tx = jiffies;
4056
4057                         cnt--;
4058                         chan->sent++;
4059                         chan->conn->sent++;
4060                 }
4061         }
4062
4063         if (hdev->le_pkts)
4064                 hdev->le_cnt = cnt;
4065         else
4066                 hdev->acl_cnt = cnt;
4067
4068         if (cnt != tmp)
4069                 hci_prio_recalculate(hdev, LE_LINK);
4070 }
4071
4072 static void hci_tx_work(struct work_struct *work)
4073 {
4074         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
4075         struct sk_buff *skb;
4076
4077         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
4078                hdev->sco_cnt, hdev->le_cnt);
4079
4080         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4081                 /* Schedule queues and send stuff to HCI driver */
4082                 hci_sched_acl(hdev);
4083                 hci_sched_sco(hdev);
4084                 hci_sched_esco(hdev);
4085                 hci_sched_le(hdev);
4086         }
4087
4088         /* Send next queued raw (unknown type) packet */
4089         while ((skb = skb_dequeue(&hdev->raw_q)))
4090                 hci_send_frame(hdev, skb);
4091 }
4092
4093 /* ----- HCI RX task (incoming data processing) ----- */
4094
4095 /* ACL data packet */
4096 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4097 {
4098         struct hci_acl_hdr *hdr = (void *) skb->data;
4099         struct hci_conn *conn;
4100         __u16 handle, flags;
4101
4102         skb_pull(skb, HCI_ACL_HDR_SIZE);
4103
4104         handle = __le16_to_cpu(hdr->handle);
4105         flags  = hci_flags(handle);
4106         handle = hci_handle(handle);
4107
4108         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
4109                handle, flags);
4110
4111         hdev->stat.acl_rx++;
4112
4113         hci_dev_lock(hdev);
4114         conn = hci_conn_hash_lookup_handle(hdev, handle);
4115         hci_dev_unlock(hdev);
4116
4117         if (conn) {
4118                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
4119
4120                 /* Send to upper protocol */
4121                 l2cap_recv_acldata(conn, skb, flags);
4122                 return;
4123         } else {
4124                 BT_ERR("%s ACL packet for unknown connection handle %d",
4125                        hdev->name, handle);
4126         }
4127
4128         kfree_skb(skb);
4129 }
4130
4131 /* SCO data packet */
4132 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
4133 {
4134         struct hci_sco_hdr *hdr = (void *) skb->data;
4135         struct hci_conn *conn;
4136         __u16 handle;
4137
4138         skb_pull(skb, HCI_SCO_HDR_SIZE);
4139
4140         handle = __le16_to_cpu(hdr->handle);
4141
4142         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
4143
4144         hdev->stat.sco_rx++;
4145
4146         hci_dev_lock(hdev);
4147         conn = hci_conn_hash_lookup_handle(hdev, handle);
4148         hci_dev_unlock(hdev);
4149
4150         if (conn) {
4151                 /* Send to upper protocol */
4152                 sco_recv_scodata(conn, skb);
4153                 return;
4154         } else {
4155                 BT_ERR("%s SCO packet for unknown connection handle %d",
4156                        hdev->name, handle);
4157         }
4158
4159         kfree_skb(skb);
4160 }
4161
4162 static bool hci_req_is_complete(struct hci_dev *hdev)
4163 {
4164         struct sk_buff *skb;
4165
4166         skb = skb_peek(&hdev->cmd_q);
4167         if (!skb)
4168                 return true;
4169
4170         return bt_cb(skb)->req.start;
4171 }
4172
4173 static void hci_resend_last(struct hci_dev *hdev)
4174 {
4175         struct hci_command_hdr *sent;
4176         struct sk_buff *skb;
4177         u16 opcode;
4178
4179         if (!hdev->sent_cmd)
4180                 return;
4181
4182         sent = (void *) hdev->sent_cmd->data;
4183         opcode = __le16_to_cpu(sent->opcode);
4184         if (opcode == HCI_OP_RESET)
4185                 return;
4186
4187         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4188         if (!skb)
4189                 return;
4190
4191         skb_queue_head(&hdev->cmd_q, skb);
4192         queue_work(hdev->workqueue, &hdev->cmd_work);
4193 }
4194
4195 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4196 {
4197         hci_req_complete_t req_complete = NULL;
4198         struct sk_buff *skb;
4199         unsigned long flags;
4200
4201         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4202
4203         /* If the completed command doesn't match the last one that was
4204          * sent we need to do special handling of it.
4205          */
4206         if (!hci_sent_cmd_data(hdev, opcode)) {
4207                 /* Some CSR based controllers generate a spontaneous
4208                  * reset complete event during init and any pending
4209                  * command will never be completed. In such a case we
4210                  * need to resend whatever was the last sent
4211                  * command.
4212                  */
4213                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4214                         hci_resend_last(hdev);
4215
4216                 return;
4217         }
4218
4219         /* If the command succeeded and there's still more commands in
4220          * this request the request is not yet complete.
4221          */
4222         if (!status && !hci_req_is_complete(hdev))
4223                 return;
4224
4225         /* If this was the last command in a request the complete
4226          * callback would be found in hdev->sent_cmd instead of the
4227          * command queue (hdev->cmd_q).
4228          */
4229         if (hdev->sent_cmd) {
4230                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
4231
4232                 if (req_complete) {
4233                         /* We must set the complete callback to NULL to
4234                          * avoid calling the callback more than once if
4235                          * this function gets called again.
4236                          */
4237                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
4238
4239                         goto call_complete;
4240                 }
4241         }
4242
4243         /* Remove all pending commands belonging to this request */
4244         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4245         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4246                 if (bt_cb(skb)->req.start) {
4247                         __skb_queue_head(&hdev->cmd_q, skb);
4248                         break;
4249                 }
4250
4251                 req_complete = bt_cb(skb)->req.complete;
4252                 kfree_skb(skb);
4253         }
4254         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4255
4256 call_complete:
4257         if (req_complete)
4258                 req_complete(hdev, status);
4259 }
4260
4261 static void hci_rx_work(struct work_struct *work)
4262 {
4263         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
4264         struct sk_buff *skb;
4265
4266         BT_DBG("%s", hdev->name);
4267
4268         while ((skb = skb_dequeue(&hdev->rx_q))) {
4269                 /* Send copy to monitor */
4270                 hci_send_to_monitor(hdev, skb);
4271
4272                 if (atomic_read(&hdev->promisc)) {
4273                         /* Send copy to the sockets */
4274                         hci_send_to_sock(hdev, skb);
4275                 }
4276
4277                 if (test_bit(HCI_RAW, &hdev->flags) ||
4278                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4279                         kfree_skb(skb);
4280                         continue;
4281                 }
4282
4283                 if (test_bit(HCI_INIT, &hdev->flags)) {
4284                         /* Don't process data packets in this states. */
4285                         switch (bt_cb(skb)->pkt_type) {
4286                         case HCI_ACLDATA_PKT:
4287                         case HCI_SCODATA_PKT:
4288                                 kfree_skb(skb);
4289                                 continue;
4290                         }
4291                 }
4292
4293                 /* Process frame */
4294                 switch (bt_cb(skb)->pkt_type) {
4295                 case HCI_EVENT_PKT:
4296                         BT_DBG("%s Event packet", hdev->name);
4297                         hci_event_packet(hdev, skb);
4298                         break;
4299
4300                 case HCI_ACLDATA_PKT:
4301                         BT_DBG("%s ACL data packet", hdev->name);
4302                         hci_acldata_packet(hdev, skb);
4303                         break;
4304
4305                 case HCI_SCODATA_PKT:
4306                         BT_DBG("%s SCO data packet", hdev->name);
4307                         hci_scodata_packet(hdev, skb);
4308                         break;
4309
4310                 default:
4311                         kfree_skb(skb);
4312                         break;
4313                 }
4314         }
4315 }
4316
4317 static void hci_cmd_work(struct work_struct *work)
4318 {
4319         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
4320         struct sk_buff *skb;
4321
4322         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4323                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
4324
4325         /* Send queued commands */
4326         if (atomic_read(&hdev->cmd_cnt)) {
4327                 skb = skb_dequeue(&hdev->cmd_q);
4328                 if (!skb)
4329                         return;
4330
4331                 kfree_skb(hdev->sent_cmd);
4332
4333                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
4334                 if (hdev->sent_cmd) {
4335                         atomic_dec(&hdev->cmd_cnt);
4336                         hci_send_frame(hdev, skb);
4337                         if (test_bit(HCI_RESET, &hdev->flags))
4338                                 del_timer(&hdev->cmd_timer);
4339                         else
4340                                 mod_timer(&hdev->cmd_timer,
4341                                           jiffies + HCI_CMD_TIMEOUT);
4342                 } else {
4343                         skb_queue_head(&hdev->cmd_q, skb);
4344                         queue_work(hdev->workqueue, &hdev->cmd_work);
4345                 }
4346         }
4347 }