Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[sfrench/cifs-2.6.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37
38 #include "smp.h"
39
40 static void hci_rx_work(struct work_struct *work);
41 static void hci_cmd_work(struct work_struct *work);
42 static void hci_tx_work(struct work_struct *work);
43
44 /* HCI device list */
45 LIST_HEAD(hci_dev_list);
46 DEFINE_RWLOCK(hci_dev_list_lock);
47
48 /* HCI callback list */
49 LIST_HEAD(hci_cb_list);
50 DEFINE_RWLOCK(hci_cb_list_lock);
51
52 /* HCI ID Numbering */
53 static DEFINE_IDA(hci_index_ida);
54
55 /* ---- HCI notifications ---- */
56
57 static void hci_notify(struct hci_dev *hdev, int event)
58 {
59         hci_sock_dev_event(hdev, event);
60 }
61
62 /* ---- HCI debugfs entries ---- */
63
64 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65                              size_t count, loff_t *ppos)
66 {
67         struct hci_dev *hdev = file->private_data;
68         char buf[3];
69
70         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71         buf[1] = '\n';
72         buf[2] = '\0';
73         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74 }
75
76 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77                               size_t count, loff_t *ppos)
78 {
79         struct hci_dev *hdev = file->private_data;
80         struct sk_buff *skb;
81         char buf[32];
82         size_t buf_size = min(count, (sizeof(buf)-1));
83         bool enable;
84         int err;
85
86         if (!test_bit(HCI_UP, &hdev->flags))
87                 return -ENETDOWN;
88
89         if (copy_from_user(buf, user_buf, buf_size))
90                 return -EFAULT;
91
92         buf[buf_size] = '\0';
93         if (strtobool(buf, &enable))
94                 return -EINVAL;
95
96         if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97                 return -EALREADY;
98
99         hci_req_lock(hdev);
100         if (enable)
101                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102                                      HCI_CMD_TIMEOUT);
103         else
104                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105                                      HCI_CMD_TIMEOUT);
106         hci_req_unlock(hdev);
107
108         if (IS_ERR(skb))
109                 return PTR_ERR(skb);
110
111         err = -bt_to_errno(skb->data[0]);
112         kfree_skb(skb);
113
114         if (err < 0)
115                 return err;
116
117         change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119         return count;
120 }
121
122 static const struct file_operations dut_mode_fops = {
123         .open           = simple_open,
124         .read           = dut_mode_read,
125         .write          = dut_mode_write,
126         .llseek         = default_llseek,
127 };
128
129 static int features_show(struct seq_file *f, void *ptr)
130 {
131         struct hci_dev *hdev = f->private;
132         u8 p;
133
134         hci_dev_lock(hdev);
135         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
136                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
137                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138                            hdev->features[p][0], hdev->features[p][1],
139                            hdev->features[p][2], hdev->features[p][3],
140                            hdev->features[p][4], hdev->features[p][5],
141                            hdev->features[p][6], hdev->features[p][7]);
142         }
143         if (lmp_le_capable(hdev))
144                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146                            hdev->le_features[0], hdev->le_features[1],
147                            hdev->le_features[2], hdev->le_features[3],
148                            hdev->le_features[4], hdev->le_features[5],
149                            hdev->le_features[6], hdev->le_features[7]);
150         hci_dev_unlock(hdev);
151
152         return 0;
153 }
154
155 static int features_open(struct inode *inode, struct file *file)
156 {
157         return single_open(file, features_show, inode->i_private);
158 }
159
160 static const struct file_operations features_fops = {
161         .open           = features_open,
162         .read           = seq_read,
163         .llseek         = seq_lseek,
164         .release        = single_release,
165 };
166
167 static int blacklist_show(struct seq_file *f, void *p)
168 {
169         struct hci_dev *hdev = f->private;
170         struct bdaddr_list *b;
171
172         hci_dev_lock(hdev);
173         list_for_each_entry(b, &hdev->blacklist, list)
174                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
175         hci_dev_unlock(hdev);
176
177         return 0;
178 }
179
180 static int blacklist_open(struct inode *inode, struct file *file)
181 {
182         return single_open(file, blacklist_show, inode->i_private);
183 }
184
185 static const struct file_operations blacklist_fops = {
186         .open           = blacklist_open,
187         .read           = seq_read,
188         .llseek         = seq_lseek,
189         .release        = single_release,
190 };
191
192 static int uuids_show(struct seq_file *f, void *p)
193 {
194         struct hci_dev *hdev = f->private;
195         struct bt_uuid *uuid;
196
197         hci_dev_lock(hdev);
198         list_for_each_entry(uuid, &hdev->uuids, list) {
199                 u8 i, val[16];
200
201                 /* The Bluetooth UUID values are stored in big endian,
202                  * but with reversed byte order. So convert them into
203                  * the right order for the %pUb modifier.
204                  */
205                 for (i = 0; i < 16; i++)
206                         val[i] = uuid->uuid[15 - i];
207
208                 seq_printf(f, "%pUb\n", val);
209         }
210         hci_dev_unlock(hdev);
211
212         return 0;
213 }
214
215 static int uuids_open(struct inode *inode, struct file *file)
216 {
217         return single_open(file, uuids_show, inode->i_private);
218 }
219
220 static const struct file_operations uuids_fops = {
221         .open           = uuids_open,
222         .read           = seq_read,
223         .llseek         = seq_lseek,
224         .release        = single_release,
225 };
226
227 static int inquiry_cache_show(struct seq_file *f, void *p)
228 {
229         struct hci_dev *hdev = f->private;
230         struct discovery_state *cache = &hdev->discovery;
231         struct inquiry_entry *e;
232
233         hci_dev_lock(hdev);
234
235         list_for_each_entry(e, &cache->all, all) {
236                 struct inquiry_data *data = &e->data;
237                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238                            &data->bdaddr,
239                            data->pscan_rep_mode, data->pscan_period_mode,
240                            data->pscan_mode, data->dev_class[2],
241                            data->dev_class[1], data->dev_class[0],
242                            __le16_to_cpu(data->clock_offset),
243                            data->rssi, data->ssp_mode, e->timestamp);
244         }
245
246         hci_dev_unlock(hdev);
247
248         return 0;
249 }
250
251 static int inquiry_cache_open(struct inode *inode, struct file *file)
252 {
253         return single_open(file, inquiry_cache_show, inode->i_private);
254 }
255
256 static const struct file_operations inquiry_cache_fops = {
257         .open           = inquiry_cache_open,
258         .read           = seq_read,
259         .llseek         = seq_lseek,
260         .release        = single_release,
261 };
262
263 static int link_keys_show(struct seq_file *f, void *ptr)
264 {
265         struct hci_dev *hdev = f->private;
266         struct list_head *p, *n;
267
268         hci_dev_lock(hdev);
269         list_for_each_safe(p, n, &hdev->link_keys) {
270                 struct link_key *key = list_entry(p, struct link_key, list);
271                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273         }
274         hci_dev_unlock(hdev);
275
276         return 0;
277 }
278
279 static int link_keys_open(struct inode *inode, struct file *file)
280 {
281         return single_open(file, link_keys_show, inode->i_private);
282 }
283
284 static const struct file_operations link_keys_fops = {
285         .open           = link_keys_open,
286         .read           = seq_read,
287         .llseek         = seq_lseek,
288         .release        = single_release,
289 };
290
291 static int dev_class_show(struct seq_file *f, void *ptr)
292 {
293         struct hci_dev *hdev = f->private;
294
295         hci_dev_lock(hdev);
296         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297                    hdev->dev_class[1], hdev->dev_class[0]);
298         hci_dev_unlock(hdev);
299
300         return 0;
301 }
302
303 static int dev_class_open(struct inode *inode, struct file *file)
304 {
305         return single_open(file, dev_class_show, inode->i_private);
306 }
307
308 static const struct file_operations dev_class_fops = {
309         .open           = dev_class_open,
310         .read           = seq_read,
311         .llseek         = seq_lseek,
312         .release        = single_release,
313 };
314
315 static int voice_setting_get(void *data, u64 *val)
316 {
317         struct hci_dev *hdev = data;
318
319         hci_dev_lock(hdev);
320         *val = hdev->voice_setting;
321         hci_dev_unlock(hdev);
322
323         return 0;
324 }
325
326 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327                         NULL, "0x%4.4llx\n");
328
329 static int auto_accept_delay_set(void *data, u64 val)
330 {
331         struct hci_dev *hdev = data;
332
333         hci_dev_lock(hdev);
334         hdev->auto_accept_delay = val;
335         hci_dev_unlock(hdev);
336
337         return 0;
338 }
339
340 static int auto_accept_delay_get(void *data, u64 *val)
341 {
342         struct hci_dev *hdev = data;
343
344         hci_dev_lock(hdev);
345         *val = hdev->auto_accept_delay;
346         hci_dev_unlock(hdev);
347
348         return 0;
349 }
350
351 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352                         auto_accept_delay_set, "%llu\n");
353
354 static int ssp_debug_mode_set(void *data, u64 val)
355 {
356         struct hci_dev *hdev = data;
357         struct sk_buff *skb;
358         __u8 mode;
359         int err;
360
361         if (val != 0 && val != 1)
362                 return -EINVAL;
363
364         if (!test_bit(HCI_UP, &hdev->flags))
365                 return -ENETDOWN;
366
367         hci_req_lock(hdev);
368         mode = val;
369         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370                              &mode, HCI_CMD_TIMEOUT);
371         hci_req_unlock(hdev);
372
373         if (IS_ERR(skb))
374                 return PTR_ERR(skb);
375
376         err = -bt_to_errno(skb->data[0]);
377         kfree_skb(skb);
378
379         if (err < 0)
380                 return err;
381
382         hci_dev_lock(hdev);
383         hdev->ssp_debug_mode = val;
384         hci_dev_unlock(hdev);
385
386         return 0;
387 }
388
389 static int ssp_debug_mode_get(void *data, u64 *val)
390 {
391         struct hci_dev *hdev = data;
392
393         hci_dev_lock(hdev);
394         *val = hdev->ssp_debug_mode;
395         hci_dev_unlock(hdev);
396
397         return 0;
398 }
399
400 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401                         ssp_debug_mode_set, "%llu\n");
402
403 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404                                      size_t count, loff_t *ppos)
405 {
406         struct hci_dev *hdev = file->private_data;
407         char buf[3];
408
409         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410         buf[1] = '\n';
411         buf[2] = '\0';
412         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413 }
414
415 static ssize_t force_sc_support_write(struct file *file,
416                                       const char __user *user_buf,
417                                       size_t count, loff_t *ppos)
418 {
419         struct hci_dev *hdev = file->private_data;
420         char buf[32];
421         size_t buf_size = min(count, (sizeof(buf)-1));
422         bool enable;
423
424         if (test_bit(HCI_UP, &hdev->flags))
425                 return -EBUSY;
426
427         if (copy_from_user(buf, user_buf, buf_size))
428                 return -EFAULT;
429
430         buf[buf_size] = '\0';
431         if (strtobool(buf, &enable))
432                 return -EINVAL;
433
434         if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435                 return -EALREADY;
436
437         change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439         return count;
440 }
441
442 static const struct file_operations force_sc_support_fops = {
443         .open           = simple_open,
444         .read           = force_sc_support_read,
445         .write          = force_sc_support_write,
446         .llseek         = default_llseek,
447 };
448
449 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450                                  size_t count, loff_t *ppos)
451 {
452         struct hci_dev *hdev = file->private_data;
453         char buf[3];
454
455         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456         buf[1] = '\n';
457         buf[2] = '\0';
458         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459 }
460
461 static const struct file_operations sc_only_mode_fops = {
462         .open           = simple_open,
463         .read           = sc_only_mode_read,
464         .llseek         = default_llseek,
465 };
466
467 static int idle_timeout_set(void *data, u64 val)
468 {
469         struct hci_dev *hdev = data;
470
471         if (val != 0 && (val < 500 || val > 3600000))
472                 return -EINVAL;
473
474         hci_dev_lock(hdev);
475         hdev->idle_timeout = val;
476         hci_dev_unlock(hdev);
477
478         return 0;
479 }
480
481 static int idle_timeout_get(void *data, u64 *val)
482 {
483         struct hci_dev *hdev = data;
484
485         hci_dev_lock(hdev);
486         *val = hdev->idle_timeout;
487         hci_dev_unlock(hdev);
488
489         return 0;
490 }
491
492 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493                         idle_timeout_set, "%llu\n");
494
495 static int rpa_timeout_set(void *data, u64 val)
496 {
497         struct hci_dev *hdev = data;
498
499         /* Require the RPA timeout to be at least 30 seconds and at most
500          * 24 hours.
501          */
502         if (val < 30 || val > (60 * 60 * 24))
503                 return -EINVAL;
504
505         hci_dev_lock(hdev);
506         hdev->rpa_timeout = val;
507         hci_dev_unlock(hdev);
508
509         return 0;
510 }
511
512 static int rpa_timeout_get(void *data, u64 *val)
513 {
514         struct hci_dev *hdev = data;
515
516         hci_dev_lock(hdev);
517         *val = hdev->rpa_timeout;
518         hci_dev_unlock(hdev);
519
520         return 0;
521 }
522
523 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524                         rpa_timeout_set, "%llu\n");
525
526 static int sniff_min_interval_set(void *data, u64 val)
527 {
528         struct hci_dev *hdev = data;
529
530         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531                 return -EINVAL;
532
533         hci_dev_lock(hdev);
534         hdev->sniff_min_interval = val;
535         hci_dev_unlock(hdev);
536
537         return 0;
538 }
539
540 static int sniff_min_interval_get(void *data, u64 *val)
541 {
542         struct hci_dev *hdev = data;
543
544         hci_dev_lock(hdev);
545         *val = hdev->sniff_min_interval;
546         hci_dev_unlock(hdev);
547
548         return 0;
549 }
550
551 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552                         sniff_min_interval_set, "%llu\n");
553
554 static int sniff_max_interval_set(void *data, u64 val)
555 {
556         struct hci_dev *hdev = data;
557
558         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559                 return -EINVAL;
560
561         hci_dev_lock(hdev);
562         hdev->sniff_max_interval = val;
563         hci_dev_unlock(hdev);
564
565         return 0;
566 }
567
568 static int sniff_max_interval_get(void *data, u64 *val)
569 {
570         struct hci_dev *hdev = data;
571
572         hci_dev_lock(hdev);
573         *val = hdev->sniff_max_interval;
574         hci_dev_unlock(hdev);
575
576         return 0;
577 }
578
579 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580                         sniff_max_interval_set, "%llu\n");
581
582 static int identity_show(struct seq_file *f, void *p)
583 {
584         struct hci_dev *hdev = f->private;
585         bdaddr_t addr;
586         u8 addr_type;
587
588         hci_dev_lock(hdev);
589
590         hci_copy_identity_address(hdev, &addr, &addr_type);
591
592         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
593                    16, hdev->irk, &hdev->rpa);
594
595         hci_dev_unlock(hdev);
596
597         return 0;
598 }
599
600 static int identity_open(struct inode *inode, struct file *file)
601 {
602         return single_open(file, identity_show, inode->i_private);
603 }
604
605 static const struct file_operations identity_fops = {
606         .open           = identity_open,
607         .read           = seq_read,
608         .llseek         = seq_lseek,
609         .release        = single_release,
610 };
611
612 static int random_address_show(struct seq_file *f, void *p)
613 {
614         struct hci_dev *hdev = f->private;
615
616         hci_dev_lock(hdev);
617         seq_printf(f, "%pMR\n", &hdev->random_addr);
618         hci_dev_unlock(hdev);
619
620         return 0;
621 }
622
623 static int random_address_open(struct inode *inode, struct file *file)
624 {
625         return single_open(file, random_address_show, inode->i_private);
626 }
627
628 static const struct file_operations random_address_fops = {
629         .open           = random_address_open,
630         .read           = seq_read,
631         .llseek         = seq_lseek,
632         .release        = single_release,
633 };
634
635 static int static_address_show(struct seq_file *f, void *p)
636 {
637         struct hci_dev *hdev = f->private;
638
639         hci_dev_lock(hdev);
640         seq_printf(f, "%pMR\n", &hdev->static_addr);
641         hci_dev_unlock(hdev);
642
643         return 0;
644 }
645
646 static int static_address_open(struct inode *inode, struct file *file)
647 {
648         return single_open(file, static_address_show, inode->i_private);
649 }
650
651 static const struct file_operations static_address_fops = {
652         .open           = static_address_open,
653         .read           = seq_read,
654         .llseek         = seq_lseek,
655         .release        = single_release,
656 };
657
658 static ssize_t force_static_address_read(struct file *file,
659                                          char __user *user_buf,
660                                          size_t count, loff_t *ppos)
661 {
662         struct hci_dev *hdev = file->private_data;
663         char buf[3];
664
665         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666         buf[1] = '\n';
667         buf[2] = '\0';
668         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
669 }
670
671 static ssize_t force_static_address_write(struct file *file,
672                                           const char __user *user_buf,
673                                           size_t count, loff_t *ppos)
674 {
675         struct hci_dev *hdev = file->private_data;
676         char buf[32];
677         size_t buf_size = min(count, (sizeof(buf)-1));
678         bool enable;
679
680         if (test_bit(HCI_UP, &hdev->flags))
681                 return -EBUSY;
682
683         if (copy_from_user(buf, user_buf, buf_size))
684                 return -EFAULT;
685
686         buf[buf_size] = '\0';
687         if (strtobool(buf, &enable))
688                 return -EINVAL;
689
690         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691                 return -EALREADY;
692
693         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695         return count;
696 }
697
698 static const struct file_operations force_static_address_fops = {
699         .open           = simple_open,
700         .read           = force_static_address_read,
701         .write          = force_static_address_write,
702         .llseek         = default_llseek,
703 };
704
705 static int white_list_show(struct seq_file *f, void *ptr)
706 {
707         struct hci_dev *hdev = f->private;
708         struct bdaddr_list *b;
709
710         hci_dev_lock(hdev);
711         list_for_each_entry(b, &hdev->le_white_list, list)
712                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
713         hci_dev_unlock(hdev);
714
715         return 0;
716 }
717
718 static int white_list_open(struct inode *inode, struct file *file)
719 {
720         return single_open(file, white_list_show, inode->i_private);
721 }
722
723 static const struct file_operations white_list_fops = {
724         .open           = white_list_open,
725         .read           = seq_read,
726         .llseek         = seq_lseek,
727         .release        = single_release,
728 };
729
730 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
731 {
732         struct hci_dev *hdev = f->private;
733         struct list_head *p, *n;
734
735         hci_dev_lock(hdev);
736         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
737                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
738                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
739                            &irk->bdaddr, irk->addr_type,
740                            16, irk->val, &irk->rpa);
741         }
742         hci_dev_unlock(hdev);
743
744         return 0;
745 }
746
747 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
748 {
749         return single_open(file, identity_resolving_keys_show,
750                            inode->i_private);
751 }
752
753 static const struct file_operations identity_resolving_keys_fops = {
754         .open           = identity_resolving_keys_open,
755         .read           = seq_read,
756         .llseek         = seq_lseek,
757         .release        = single_release,
758 };
759
760 static int long_term_keys_show(struct seq_file *f, void *ptr)
761 {
762         struct hci_dev *hdev = f->private;
763         struct list_head *p, *n;
764
765         hci_dev_lock(hdev);
766         list_for_each_safe(p, n, &hdev->long_term_keys) {
767                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
768                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
769                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
770                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
771                            __le64_to_cpu(ltk->rand), 16, ltk->val);
772         }
773         hci_dev_unlock(hdev);
774
775         return 0;
776 }
777
778 static int long_term_keys_open(struct inode *inode, struct file *file)
779 {
780         return single_open(file, long_term_keys_show, inode->i_private);
781 }
782
783 static const struct file_operations long_term_keys_fops = {
784         .open           = long_term_keys_open,
785         .read           = seq_read,
786         .llseek         = seq_lseek,
787         .release        = single_release,
788 };
789
790 static int conn_min_interval_set(void *data, u64 val)
791 {
792         struct hci_dev *hdev = data;
793
794         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
795                 return -EINVAL;
796
797         hci_dev_lock(hdev);
798         hdev->le_conn_min_interval = val;
799         hci_dev_unlock(hdev);
800
801         return 0;
802 }
803
804 static int conn_min_interval_get(void *data, u64 *val)
805 {
806         struct hci_dev *hdev = data;
807
808         hci_dev_lock(hdev);
809         *val = hdev->le_conn_min_interval;
810         hci_dev_unlock(hdev);
811
812         return 0;
813 }
814
815 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
816                         conn_min_interval_set, "%llu\n");
817
818 static int conn_max_interval_set(void *data, u64 val)
819 {
820         struct hci_dev *hdev = data;
821
822         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
823                 return -EINVAL;
824
825         hci_dev_lock(hdev);
826         hdev->le_conn_max_interval = val;
827         hci_dev_unlock(hdev);
828
829         return 0;
830 }
831
832 static int conn_max_interval_get(void *data, u64 *val)
833 {
834         struct hci_dev *hdev = data;
835
836         hci_dev_lock(hdev);
837         *val = hdev->le_conn_max_interval;
838         hci_dev_unlock(hdev);
839
840         return 0;
841 }
842
843 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
844                         conn_max_interval_set, "%llu\n");
845
846 static int adv_channel_map_set(void *data, u64 val)
847 {
848         struct hci_dev *hdev = data;
849
850         if (val < 0x01 || val > 0x07)
851                 return -EINVAL;
852
853         hci_dev_lock(hdev);
854         hdev->le_adv_channel_map = val;
855         hci_dev_unlock(hdev);
856
857         return 0;
858 }
859
860 static int adv_channel_map_get(void *data, u64 *val)
861 {
862         struct hci_dev *hdev = data;
863
864         hci_dev_lock(hdev);
865         *val = hdev->le_adv_channel_map;
866         hci_dev_unlock(hdev);
867
868         return 0;
869 }
870
871 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
872                         adv_channel_map_set, "%llu\n");
873
874 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
875                            size_t count, loff_t *ppos)
876 {
877         struct hci_dev *hdev = file->private_data;
878         char buf[3];
879
880         buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
881         buf[1] = '\n';
882         buf[2] = '\0';
883         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
884 }
885
886 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
887                             size_t count, loff_t *position)
888 {
889         struct hci_dev *hdev = fp->private_data;
890         bool enable;
891         char buf[32];
892         size_t buf_size = min(count, (sizeof(buf)-1));
893
894         if (copy_from_user(buf, user_buffer, buf_size))
895                 return -EFAULT;
896
897         buf[buf_size] = '\0';
898
899         if (strtobool(buf, &enable) < 0)
900                 return -EINVAL;
901
902         if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
903                 return -EALREADY;
904
905         change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
906
907         return count;
908 }
909
910 static const struct file_operations lowpan_debugfs_fops = {
911         .open           = simple_open,
912         .read           = lowpan_read,
913         .write          = lowpan_write,
914         .llseek         = default_llseek,
915 };
916
917 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
918 {
919         struct hci_dev *hdev = sf->private;
920         struct hci_conn_params *p;
921
922         hci_dev_lock(hdev);
923
924         list_for_each_entry(p, &hdev->le_conn_params, list) {
925                 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
926                            p->auto_connect);
927         }
928
929         hci_dev_unlock(hdev);
930
931         return 0;
932 }
933
934 static int le_auto_conn_open(struct inode *inode, struct file *file)
935 {
936         return single_open(file, le_auto_conn_show, inode->i_private);
937 }
938
939 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
940                                   size_t count, loff_t *offset)
941 {
942         struct seq_file *sf = file->private_data;
943         struct hci_dev *hdev = sf->private;
944         u8 auto_connect = 0;
945         bdaddr_t addr;
946         u8 addr_type;
947         char *buf;
948         int err = 0;
949         int n;
950
951         /* Don't allow partial write */
952         if (*offset != 0)
953                 return -EINVAL;
954
955         if (count < 3)
956                 return -EINVAL;
957
958         buf = kzalloc(count, GFP_KERNEL);
959         if (!buf)
960                 return -ENOMEM;
961
962         if (copy_from_user(buf, data, count)) {
963                 err = -EFAULT;
964                 goto done;
965         }
966
967         if (memcmp(buf, "add", 3) == 0) {
968                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
969                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
970                            &addr.b[1], &addr.b[0], &addr_type,
971                            &auto_connect);
972
973                 if (n < 7) {
974                         err = -EINVAL;
975                         goto done;
976                 }
977
978                 hci_dev_lock(hdev);
979                 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
980                                           hdev->le_conn_min_interval,
981                                           hdev->le_conn_max_interval);
982                 hci_dev_unlock(hdev);
983
984                 if (err)
985                         goto done;
986         } else if (memcmp(buf, "del", 3) == 0) {
987                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
988                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
989                            &addr.b[1], &addr.b[0], &addr_type);
990
991                 if (n < 7) {
992                         err = -EINVAL;
993                         goto done;
994                 }
995
996                 hci_dev_lock(hdev);
997                 hci_conn_params_del(hdev, &addr, addr_type);
998                 hci_dev_unlock(hdev);
999         } else if (memcmp(buf, "clr", 3) == 0) {
1000                 hci_dev_lock(hdev);
1001                 hci_conn_params_clear(hdev);
1002                 hci_pend_le_conns_clear(hdev);
1003                 hci_update_background_scan(hdev);
1004                 hci_dev_unlock(hdev);
1005         } else {
1006                 err = -EINVAL;
1007         }
1008
1009 done:
1010         kfree(buf);
1011
1012         if (err)
1013                 return err;
1014         else
1015                 return count;
1016 }
1017
1018 static const struct file_operations le_auto_conn_fops = {
1019         .open           = le_auto_conn_open,
1020         .read           = seq_read,
1021         .write          = le_auto_conn_write,
1022         .llseek         = seq_lseek,
1023         .release        = single_release,
1024 };
1025
1026 /* ---- HCI requests ---- */
1027
1028 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1029 {
1030         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1031
1032         if (hdev->req_status == HCI_REQ_PEND) {
1033                 hdev->req_result = result;
1034                 hdev->req_status = HCI_REQ_DONE;
1035                 wake_up_interruptible(&hdev->req_wait_q);
1036         }
1037 }
1038
1039 static void hci_req_cancel(struct hci_dev *hdev, int err)
1040 {
1041         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1042
1043         if (hdev->req_status == HCI_REQ_PEND) {
1044                 hdev->req_result = err;
1045                 hdev->req_status = HCI_REQ_CANCELED;
1046                 wake_up_interruptible(&hdev->req_wait_q);
1047         }
1048 }
1049
1050 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1051                                             u8 event)
1052 {
1053         struct hci_ev_cmd_complete *ev;
1054         struct hci_event_hdr *hdr;
1055         struct sk_buff *skb;
1056
1057         hci_dev_lock(hdev);
1058
1059         skb = hdev->recv_evt;
1060         hdev->recv_evt = NULL;
1061
1062         hci_dev_unlock(hdev);
1063
1064         if (!skb)
1065                 return ERR_PTR(-ENODATA);
1066
1067         if (skb->len < sizeof(*hdr)) {
1068                 BT_ERR("Too short HCI event");
1069                 goto failed;
1070         }
1071
1072         hdr = (void *) skb->data;
1073         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1074
1075         if (event) {
1076                 if (hdr->evt != event)
1077                         goto failed;
1078                 return skb;
1079         }
1080
1081         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1082                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1083                 goto failed;
1084         }
1085
1086         if (skb->len < sizeof(*ev)) {
1087                 BT_ERR("Too short cmd_complete event");
1088                 goto failed;
1089         }
1090
1091         ev = (void *) skb->data;
1092         skb_pull(skb, sizeof(*ev));
1093
1094         if (opcode == __le16_to_cpu(ev->opcode))
1095                 return skb;
1096
1097         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1098                __le16_to_cpu(ev->opcode));
1099
1100 failed:
1101         kfree_skb(skb);
1102         return ERR_PTR(-ENODATA);
1103 }
1104
1105 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1106                                   const void *param, u8 event, u32 timeout)
1107 {
1108         DECLARE_WAITQUEUE(wait, current);
1109         struct hci_request req;
1110         int err = 0;
1111
1112         BT_DBG("%s", hdev->name);
1113
1114         hci_req_init(&req, hdev);
1115
1116         hci_req_add_ev(&req, opcode, plen, param, event);
1117
1118         hdev->req_status = HCI_REQ_PEND;
1119
1120         err = hci_req_run(&req, hci_req_sync_complete);
1121         if (err < 0)
1122                 return ERR_PTR(err);
1123
1124         add_wait_queue(&hdev->req_wait_q, &wait);
1125         set_current_state(TASK_INTERRUPTIBLE);
1126
1127         schedule_timeout(timeout);
1128
1129         remove_wait_queue(&hdev->req_wait_q, &wait);
1130
1131         if (signal_pending(current))
1132                 return ERR_PTR(-EINTR);
1133
1134         switch (hdev->req_status) {
1135         case HCI_REQ_DONE:
1136                 err = -bt_to_errno(hdev->req_result);
1137                 break;
1138
1139         case HCI_REQ_CANCELED:
1140                 err = -hdev->req_result;
1141                 break;
1142
1143         default:
1144                 err = -ETIMEDOUT;
1145                 break;
1146         }
1147
1148         hdev->req_status = hdev->req_result = 0;
1149
1150         BT_DBG("%s end: err %d", hdev->name, err);
1151
1152         if (err < 0)
1153                 return ERR_PTR(err);
1154
1155         return hci_get_cmd_complete(hdev, opcode, event);
1156 }
1157 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1158
1159 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1160                                const void *param, u32 timeout)
1161 {
1162         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1163 }
1164 EXPORT_SYMBOL(__hci_cmd_sync);
1165
1166 /* Execute request and wait for completion. */
1167 static int __hci_req_sync(struct hci_dev *hdev,
1168                           void (*func)(struct hci_request *req,
1169                                       unsigned long opt),
1170                           unsigned long opt, __u32 timeout)
1171 {
1172         struct hci_request req;
1173         DECLARE_WAITQUEUE(wait, current);
1174         int err = 0;
1175
1176         BT_DBG("%s start", hdev->name);
1177
1178         hci_req_init(&req, hdev);
1179
1180         hdev->req_status = HCI_REQ_PEND;
1181
1182         func(&req, opt);
1183
1184         err = hci_req_run(&req, hci_req_sync_complete);
1185         if (err < 0) {
1186                 hdev->req_status = 0;
1187
1188                 /* ENODATA means the HCI request command queue is empty.
1189                  * This can happen when a request with conditionals doesn't
1190                  * trigger any commands to be sent. This is normal behavior
1191                  * and should not trigger an error return.
1192                  */
1193                 if (err == -ENODATA)
1194                         return 0;
1195
1196                 return err;
1197         }
1198
1199         add_wait_queue(&hdev->req_wait_q, &wait);
1200         set_current_state(TASK_INTERRUPTIBLE);
1201
1202         schedule_timeout(timeout);
1203
1204         remove_wait_queue(&hdev->req_wait_q, &wait);
1205
1206         if (signal_pending(current))
1207                 return -EINTR;
1208
1209         switch (hdev->req_status) {
1210         case HCI_REQ_DONE:
1211                 err = -bt_to_errno(hdev->req_result);
1212                 break;
1213
1214         case HCI_REQ_CANCELED:
1215                 err = -hdev->req_result;
1216                 break;
1217
1218         default:
1219                 err = -ETIMEDOUT;
1220                 break;
1221         }
1222
1223         hdev->req_status = hdev->req_result = 0;
1224
1225         BT_DBG("%s end: err %d", hdev->name, err);
1226
1227         return err;
1228 }
1229
1230 static int hci_req_sync(struct hci_dev *hdev,
1231                         void (*req)(struct hci_request *req,
1232                                     unsigned long opt),
1233                         unsigned long opt, __u32 timeout)
1234 {
1235         int ret;
1236
1237         if (!test_bit(HCI_UP, &hdev->flags))
1238                 return -ENETDOWN;
1239
1240         /* Serialize all requests */
1241         hci_req_lock(hdev);
1242         ret = __hci_req_sync(hdev, req, opt, timeout);
1243         hci_req_unlock(hdev);
1244
1245         return ret;
1246 }
1247
1248 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1249 {
1250         BT_DBG("%s %ld", req->hdev->name, opt);
1251
1252         /* Reset device */
1253         set_bit(HCI_RESET, &req->hdev->flags);
1254         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1255 }
1256
1257 static void bredr_init(struct hci_request *req)
1258 {
1259         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1260
1261         /* Read Local Supported Features */
1262         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1263
1264         /* Read Local Version */
1265         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1266
1267         /* Read BD Address */
1268         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1269 }
1270
1271 static void amp_init(struct hci_request *req)
1272 {
1273         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1274
1275         /* Read Local Version */
1276         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1277
1278         /* Read Local Supported Commands */
1279         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1280
1281         /* Read Local Supported Features */
1282         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1283
1284         /* Read Local AMP Info */
1285         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1286
1287         /* Read Data Blk size */
1288         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1289
1290         /* Read Flow Control Mode */
1291         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1292
1293         /* Read Location Data */
1294         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1295 }
1296
1297 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1298 {
1299         struct hci_dev *hdev = req->hdev;
1300
1301         BT_DBG("%s %ld", hdev->name, opt);
1302
1303         /* Reset */
1304         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1305                 hci_reset_req(req, 0);
1306
1307         switch (hdev->dev_type) {
1308         case HCI_BREDR:
1309                 bredr_init(req);
1310                 break;
1311
1312         case HCI_AMP:
1313                 amp_init(req);
1314                 break;
1315
1316         default:
1317                 BT_ERR("Unknown device type %d", hdev->dev_type);
1318                 break;
1319         }
1320 }
1321
1322 static void bredr_setup(struct hci_request *req)
1323 {
1324         struct hci_dev *hdev = req->hdev;
1325
1326         __le16 param;
1327         __u8 flt_type;
1328
1329         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1330         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1331
1332         /* Read Class of Device */
1333         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1334
1335         /* Read Local Name */
1336         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1337
1338         /* Read Voice Setting */
1339         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1340
1341         /* Read Number of Supported IAC */
1342         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1343
1344         /* Read Current IAC LAP */
1345         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1346
1347         /* Clear Event Filters */
1348         flt_type = HCI_FLT_CLEAR_ALL;
1349         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1350
1351         /* Connection accept timeout ~20 secs */
1352         param = cpu_to_le16(0x7d00);
1353         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1354
1355         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1356          * but it does not support page scan related HCI commands.
1357          */
1358         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1359                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1360                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1361         }
1362 }
1363
1364 static void le_setup(struct hci_request *req)
1365 {
1366         struct hci_dev *hdev = req->hdev;
1367
1368         /* Read LE Buffer Size */
1369         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1370
1371         /* Read LE Local Supported Features */
1372         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1373
1374         /* Read LE Supported States */
1375         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1376
1377         /* Read LE Advertising Channel TX Power */
1378         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1379
1380         /* Read LE White List Size */
1381         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1382
1383         /* Clear LE White List */
1384         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1385
1386         /* LE-only controllers have LE implicitly enabled */
1387         if (!lmp_bredr_capable(hdev))
1388                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1389 }
1390
1391 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1392 {
1393         if (lmp_ext_inq_capable(hdev))
1394                 return 0x02;
1395
1396         if (lmp_inq_rssi_capable(hdev))
1397                 return 0x01;
1398
1399         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1400             hdev->lmp_subver == 0x0757)
1401                 return 0x01;
1402
1403         if (hdev->manufacturer == 15) {
1404                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1405                         return 0x01;
1406                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1407                         return 0x01;
1408                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1409                         return 0x01;
1410         }
1411
1412         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1413             hdev->lmp_subver == 0x1805)
1414                 return 0x01;
1415
1416         return 0x00;
1417 }
1418
1419 static void hci_setup_inquiry_mode(struct hci_request *req)
1420 {
1421         u8 mode;
1422
1423         mode = hci_get_inquiry_mode(req->hdev);
1424
1425         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1426 }
1427
1428 static void hci_setup_event_mask(struct hci_request *req)
1429 {
1430         struct hci_dev *hdev = req->hdev;
1431
1432         /* The second byte is 0xff instead of 0x9f (two reserved bits
1433          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1434          * command otherwise.
1435          */
1436         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1437
1438         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1439          * any event mask for pre 1.2 devices.
1440          */
1441         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1442                 return;
1443
1444         if (lmp_bredr_capable(hdev)) {
1445                 events[4] |= 0x01; /* Flow Specification Complete */
1446                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1447                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1448                 events[5] |= 0x08; /* Synchronous Connection Complete */
1449                 events[5] |= 0x10; /* Synchronous Connection Changed */
1450         } else {
1451                 /* Use a different default for LE-only devices */
1452                 memset(events, 0, sizeof(events));
1453                 events[0] |= 0x10; /* Disconnection Complete */
1454                 events[0] |= 0x80; /* Encryption Change */
1455                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1456                 events[1] |= 0x20; /* Command Complete */
1457                 events[1] |= 0x40; /* Command Status */
1458                 events[1] |= 0x80; /* Hardware Error */
1459                 events[2] |= 0x04; /* Number of Completed Packets */
1460                 events[3] |= 0x02; /* Data Buffer Overflow */
1461                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1462         }
1463
1464         if (lmp_inq_rssi_capable(hdev))
1465                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1466
1467         if (lmp_sniffsubr_capable(hdev))
1468                 events[5] |= 0x20; /* Sniff Subrating */
1469
1470         if (lmp_pause_enc_capable(hdev))
1471                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1472
1473         if (lmp_ext_inq_capable(hdev))
1474                 events[5] |= 0x40; /* Extended Inquiry Result */
1475
1476         if (lmp_no_flush_capable(hdev))
1477                 events[7] |= 0x01; /* Enhanced Flush Complete */
1478
1479         if (lmp_lsto_capable(hdev))
1480                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1481
1482         if (lmp_ssp_capable(hdev)) {
1483                 events[6] |= 0x01;      /* IO Capability Request */
1484                 events[6] |= 0x02;      /* IO Capability Response */
1485                 events[6] |= 0x04;      /* User Confirmation Request */
1486                 events[6] |= 0x08;      /* User Passkey Request */
1487                 events[6] |= 0x10;      /* Remote OOB Data Request */
1488                 events[6] |= 0x20;      /* Simple Pairing Complete */
1489                 events[7] |= 0x04;      /* User Passkey Notification */
1490                 events[7] |= 0x08;      /* Keypress Notification */
1491                 events[7] |= 0x10;      /* Remote Host Supported
1492                                          * Features Notification
1493                                          */
1494         }
1495
1496         if (lmp_le_capable(hdev))
1497                 events[7] |= 0x20;      /* LE Meta-Event */
1498
1499         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1500
1501         if (lmp_le_capable(hdev)) {
1502                 memset(events, 0, sizeof(events));
1503                 events[0] = 0x1f;
1504                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1505                             sizeof(events), events);
1506         }
1507 }
1508
1509 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1510 {
1511         struct hci_dev *hdev = req->hdev;
1512
1513         if (lmp_bredr_capable(hdev))
1514                 bredr_setup(req);
1515         else
1516                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1517
1518         if (lmp_le_capable(hdev))
1519                 le_setup(req);
1520
1521         hci_setup_event_mask(req);
1522
1523         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524          * local supported commands HCI command.
1525          */
1526         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1527                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1528
1529         if (lmp_ssp_capable(hdev)) {
1530                 /* When SSP is available, then the host features page
1531                  * should also be available as well. However some
1532                  * controllers list the max_page as 0 as long as SSP
1533                  * has not been enabled. To achieve proper debugging
1534                  * output, force the minimum max_page to 1 at least.
1535                  */
1536                 hdev->max_page = 0x01;
1537
1538                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539                         u8 mode = 0x01;
1540                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541                                     sizeof(mode), &mode);
1542                 } else {
1543                         struct hci_cp_write_eir cp;
1544
1545                         memset(hdev->eir, 0, sizeof(hdev->eir));
1546                         memset(&cp, 0, sizeof(cp));
1547
1548                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1549                 }
1550         }
1551
1552         if (lmp_inq_rssi_capable(hdev))
1553                 hci_setup_inquiry_mode(req);
1554
1555         if (lmp_inq_tx_pwr_capable(hdev))
1556                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1557
1558         if (lmp_ext_feat_capable(hdev)) {
1559                 struct hci_cp_read_local_ext_features cp;
1560
1561                 cp.page = 0x01;
1562                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563                             sizeof(cp), &cp);
1564         }
1565
1566         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567                 u8 enable = 1;
1568                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569                             &enable);
1570         }
1571 }
1572
1573 static void hci_setup_link_policy(struct hci_request *req)
1574 {
1575         struct hci_dev *hdev = req->hdev;
1576         struct hci_cp_write_def_link_policy cp;
1577         u16 link_policy = 0;
1578
1579         if (lmp_rswitch_capable(hdev))
1580                 link_policy |= HCI_LP_RSWITCH;
1581         if (lmp_hold_capable(hdev))
1582                 link_policy |= HCI_LP_HOLD;
1583         if (lmp_sniff_capable(hdev))
1584                 link_policy |= HCI_LP_SNIFF;
1585         if (lmp_park_capable(hdev))
1586                 link_policy |= HCI_LP_PARK;
1587
1588         cp.policy = cpu_to_le16(link_policy);
1589         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1590 }
1591
1592 static void hci_set_le_support(struct hci_request *req)
1593 {
1594         struct hci_dev *hdev = req->hdev;
1595         struct hci_cp_write_le_host_supported cp;
1596
1597         /* LE-only devices do not support explicit enablement */
1598         if (!lmp_bredr_capable(hdev))
1599                 return;
1600
1601         memset(&cp, 0, sizeof(cp));
1602
1603         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604                 cp.le = 0x01;
1605                 cp.simul = lmp_le_br_capable(hdev);
1606         }
1607
1608         if (cp.le != lmp_host_le_capable(hdev))
1609                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610                             &cp);
1611 }
1612
1613 static void hci_set_event_mask_page_2(struct hci_request *req)
1614 {
1615         struct hci_dev *hdev = req->hdev;
1616         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618         /* If Connectionless Slave Broadcast master role is supported
1619          * enable all necessary events for it.
1620          */
1621         if (lmp_csb_master_capable(hdev)) {
1622                 events[1] |= 0x40;      /* Triggered Clock Capture */
1623                 events[1] |= 0x80;      /* Synchronization Train Complete */
1624                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1625                 events[2] |= 0x20;      /* CSB Channel Map Change */
1626         }
1627
1628         /* If Connectionless Slave Broadcast slave role is supported
1629          * enable all necessary events for it.
1630          */
1631         if (lmp_csb_slave_capable(hdev)) {
1632                 events[2] |= 0x01;      /* Synchronization Train Received */
1633                 events[2] |= 0x02;      /* CSB Receive */
1634                 events[2] |= 0x04;      /* CSB Timeout */
1635                 events[2] |= 0x08;      /* Truncated Page Complete */
1636         }
1637
1638         /* Enable Authenticated Payload Timeout Expired event if supported */
1639         if (lmp_ping_capable(hdev))
1640                 events[2] |= 0x80;
1641
1642         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643 }
1644
1645 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1646 {
1647         struct hci_dev *hdev = req->hdev;
1648         u8 p;
1649
1650         /* Some Broadcom based Bluetooth controllers do not support the
1651          * Delete Stored Link Key command. They are clearly indicating its
1652          * absence in the bit mask of supported commands.
1653          *
1654          * Check the supported commands and only if the the command is marked
1655          * as supported send it. If not supported assume that the controller
1656          * does not have actual support for stored link keys which makes this
1657          * command redundant anyway.
1658          *
1659          * Some controllers indicate that they support handling deleting
1660          * stored link keys, but they don't. The quirk lets a driver
1661          * just disable this command.
1662          */
1663         if (hdev->commands[6] & 0x80 &&
1664             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1665                 struct hci_cp_delete_stored_link_key cp;
1666
1667                 bacpy(&cp.bdaddr, BDADDR_ANY);
1668                 cp.delete_all = 0x01;
1669                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1670                             sizeof(cp), &cp);
1671         }
1672
1673         if (hdev->commands[5] & 0x10)
1674                 hci_setup_link_policy(req);
1675
1676         if (lmp_le_capable(hdev))
1677                 hci_set_le_support(req);
1678
1679         /* Read features beyond page 1 if available */
1680         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1681                 struct hci_cp_read_local_ext_features cp;
1682
1683                 cp.page = p;
1684                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1685                             sizeof(cp), &cp);
1686         }
1687 }
1688
1689 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1690 {
1691         struct hci_dev *hdev = req->hdev;
1692
1693         /* Set event mask page 2 if the HCI command for it is supported */
1694         if (hdev->commands[22] & 0x04)
1695                 hci_set_event_mask_page_2(req);
1696
1697         /* Check for Synchronization Train support */
1698         if (lmp_sync_train_capable(hdev))
1699                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1700
1701         /* Enable Secure Connections if supported and configured */
1702         if ((lmp_sc_capable(hdev) ||
1703              test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1704             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1705                 u8 support = 0x01;
1706                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1707                             sizeof(support), &support);
1708         }
1709 }
1710
1711 static int __hci_init(struct hci_dev *hdev)
1712 {
1713         int err;
1714
1715         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1716         if (err < 0)
1717                 return err;
1718
1719         /* The Device Under Test (DUT) mode is special and available for
1720          * all controller types. So just create it early on.
1721          */
1722         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1723                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1724                                     &dut_mode_fops);
1725         }
1726
1727         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1728          * BR/EDR/LE type controllers. AMP controllers only need the
1729          * first stage init.
1730          */
1731         if (hdev->dev_type != HCI_BREDR)
1732                 return 0;
1733
1734         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1735         if (err < 0)
1736                 return err;
1737
1738         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1739         if (err < 0)
1740                 return err;
1741
1742         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1743         if (err < 0)
1744                 return err;
1745
1746         /* Only create debugfs entries during the initial setup
1747          * phase and not every time the controller gets powered on.
1748          */
1749         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1750                 return 0;
1751
1752         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1753                             &features_fops);
1754         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1755                            &hdev->manufacturer);
1756         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1757         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1758         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1759                             &blacklist_fops);
1760         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1761
1762         if (lmp_bredr_capable(hdev)) {
1763                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1764                                     hdev, &inquiry_cache_fops);
1765                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1766                                     hdev, &link_keys_fops);
1767                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1768                                     hdev, &dev_class_fops);
1769                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1770                                     hdev, &voice_setting_fops);
1771         }
1772
1773         if (lmp_ssp_capable(hdev)) {
1774                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1775                                     hdev, &auto_accept_delay_fops);
1776                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1777                                     hdev, &ssp_debug_mode_fops);
1778                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1779                                     hdev, &force_sc_support_fops);
1780                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1781                                     hdev, &sc_only_mode_fops);
1782         }
1783
1784         if (lmp_sniff_capable(hdev)) {
1785                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1786                                     hdev, &idle_timeout_fops);
1787                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1788                                     hdev, &sniff_min_interval_fops);
1789                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1790                                     hdev, &sniff_max_interval_fops);
1791         }
1792
1793         if (lmp_le_capable(hdev)) {
1794                 debugfs_create_file("identity", 0400, hdev->debugfs,
1795                                     hdev, &identity_fops);
1796                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1797                                     hdev, &rpa_timeout_fops);
1798                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1799                                     hdev, &random_address_fops);
1800                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1801                                     hdev, &static_address_fops);
1802
1803                 /* For controllers with a public address, provide a debug
1804                  * option to force the usage of the configured static
1805                  * address. By default the public address is used.
1806                  */
1807                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1808                         debugfs_create_file("force_static_address", 0644,
1809                                             hdev->debugfs, hdev,
1810                                             &force_static_address_fops);
1811
1812                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1813                                   &hdev->le_white_list_size);
1814                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1815                                     &white_list_fops);
1816                 debugfs_create_file("identity_resolving_keys", 0400,
1817                                     hdev->debugfs, hdev,
1818                                     &identity_resolving_keys_fops);
1819                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1820                                     hdev, &long_term_keys_fops);
1821                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1822                                     hdev, &conn_min_interval_fops);
1823                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1824                                     hdev, &conn_max_interval_fops);
1825                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1826                                     hdev, &adv_channel_map_fops);
1827                 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1828                                     &lowpan_debugfs_fops);
1829                 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1830                                     &le_auto_conn_fops);
1831         }
1832
1833         return 0;
1834 }
1835
1836 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1837 {
1838         __u8 scan = opt;
1839
1840         BT_DBG("%s %x", req->hdev->name, scan);
1841
1842         /* Inquiry and Page scans */
1843         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1844 }
1845
1846 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1847 {
1848         __u8 auth = opt;
1849
1850         BT_DBG("%s %x", req->hdev->name, auth);
1851
1852         /* Authentication */
1853         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1854 }
1855
1856 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1857 {
1858         __u8 encrypt = opt;
1859
1860         BT_DBG("%s %x", req->hdev->name, encrypt);
1861
1862         /* Encryption */
1863         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1864 }
1865
1866 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1867 {
1868         __le16 policy = cpu_to_le16(opt);
1869
1870         BT_DBG("%s %x", req->hdev->name, policy);
1871
1872         /* Default link policy */
1873         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1874 }
1875
1876 /* Get HCI device by index.
1877  * Device is held on return. */
1878 struct hci_dev *hci_dev_get(int index)
1879 {
1880         struct hci_dev *hdev = NULL, *d;
1881
1882         BT_DBG("%d", index);
1883
1884         if (index < 0)
1885                 return NULL;
1886
1887         read_lock(&hci_dev_list_lock);
1888         list_for_each_entry(d, &hci_dev_list, list) {
1889                 if (d->id == index) {
1890                         hdev = hci_dev_hold(d);
1891                         break;
1892                 }
1893         }
1894         read_unlock(&hci_dev_list_lock);
1895         return hdev;
1896 }
1897
1898 /* ---- Inquiry support ---- */
1899
1900 bool hci_discovery_active(struct hci_dev *hdev)
1901 {
1902         struct discovery_state *discov = &hdev->discovery;
1903
1904         switch (discov->state) {
1905         case DISCOVERY_FINDING:
1906         case DISCOVERY_RESOLVING:
1907                 return true;
1908
1909         default:
1910                 return false;
1911         }
1912 }
1913
1914 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1915 {
1916         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1917
1918         if (hdev->discovery.state == state)
1919                 return;
1920
1921         switch (state) {
1922         case DISCOVERY_STOPPED:
1923                 hci_update_background_scan(hdev);
1924
1925                 if (hdev->discovery.state != DISCOVERY_STARTING)
1926                         mgmt_discovering(hdev, 0);
1927                 break;
1928         case DISCOVERY_STARTING:
1929                 break;
1930         case DISCOVERY_FINDING:
1931                 mgmt_discovering(hdev, 1);
1932                 break;
1933         case DISCOVERY_RESOLVING:
1934                 break;
1935         case DISCOVERY_STOPPING:
1936                 break;
1937         }
1938
1939         hdev->discovery.state = state;
1940 }
1941
1942 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1943 {
1944         struct discovery_state *cache = &hdev->discovery;
1945         struct inquiry_entry *p, *n;
1946
1947         list_for_each_entry_safe(p, n, &cache->all, all) {
1948                 list_del(&p->all);
1949                 kfree(p);
1950         }
1951
1952         INIT_LIST_HEAD(&cache->unknown);
1953         INIT_LIST_HEAD(&cache->resolve);
1954 }
1955
1956 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1957                                                bdaddr_t *bdaddr)
1958 {
1959         struct discovery_state *cache = &hdev->discovery;
1960         struct inquiry_entry *e;
1961
1962         BT_DBG("cache %p, %pMR", cache, bdaddr);
1963
1964         list_for_each_entry(e, &cache->all, all) {
1965                 if (!bacmp(&e->data.bdaddr, bdaddr))
1966                         return e;
1967         }
1968
1969         return NULL;
1970 }
1971
1972 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1973                                                        bdaddr_t *bdaddr)
1974 {
1975         struct discovery_state *cache = &hdev->discovery;
1976         struct inquiry_entry *e;
1977
1978         BT_DBG("cache %p, %pMR", cache, bdaddr);
1979
1980         list_for_each_entry(e, &cache->unknown, list) {
1981                 if (!bacmp(&e->data.bdaddr, bdaddr))
1982                         return e;
1983         }
1984
1985         return NULL;
1986 }
1987
1988 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1989                                                        bdaddr_t *bdaddr,
1990                                                        int state)
1991 {
1992         struct discovery_state *cache = &hdev->discovery;
1993         struct inquiry_entry *e;
1994
1995         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1996
1997         list_for_each_entry(e, &cache->resolve, list) {
1998                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1999                         return e;
2000                 if (!bacmp(&e->data.bdaddr, bdaddr))
2001                         return e;
2002         }
2003
2004         return NULL;
2005 }
2006
2007 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2008                                       struct inquiry_entry *ie)
2009 {
2010         struct discovery_state *cache = &hdev->discovery;
2011         struct list_head *pos = &cache->resolve;
2012         struct inquiry_entry *p;
2013
2014         list_del(&ie->list);
2015
2016         list_for_each_entry(p, &cache->resolve, list) {
2017                 if (p->name_state != NAME_PENDING &&
2018                     abs(p->data.rssi) >= abs(ie->data.rssi))
2019                         break;
2020                 pos = &p->list;
2021         }
2022
2023         list_add(&ie->list, pos);
2024 }
2025
2026 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2027                               bool name_known, bool *ssp)
2028 {
2029         struct discovery_state *cache = &hdev->discovery;
2030         struct inquiry_entry *ie;
2031
2032         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2033
2034         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2035
2036         if (ssp)
2037                 *ssp = data->ssp_mode;
2038
2039         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2040         if (ie) {
2041                 if (ie->data.ssp_mode && ssp)
2042                         *ssp = true;
2043
2044                 if (ie->name_state == NAME_NEEDED &&
2045                     data->rssi != ie->data.rssi) {
2046                         ie->data.rssi = data->rssi;
2047                         hci_inquiry_cache_update_resolve(hdev, ie);
2048                 }
2049
2050                 goto update;
2051         }
2052
2053         /* Entry not in the cache. Add new one. */
2054         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2055         if (!ie)
2056                 return false;
2057
2058         list_add(&ie->all, &cache->all);
2059
2060         if (name_known) {
2061                 ie->name_state = NAME_KNOWN;
2062         } else {
2063                 ie->name_state = NAME_NOT_KNOWN;
2064                 list_add(&ie->list, &cache->unknown);
2065         }
2066
2067 update:
2068         if (name_known && ie->name_state != NAME_KNOWN &&
2069             ie->name_state != NAME_PENDING) {
2070                 ie->name_state = NAME_KNOWN;
2071                 list_del(&ie->list);
2072         }
2073
2074         memcpy(&ie->data, data, sizeof(*data));
2075         ie->timestamp = jiffies;
2076         cache->timestamp = jiffies;
2077
2078         if (ie->name_state == NAME_NOT_KNOWN)
2079                 return false;
2080
2081         return true;
2082 }
2083
2084 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2085 {
2086         struct discovery_state *cache = &hdev->discovery;
2087         struct inquiry_info *info = (struct inquiry_info *) buf;
2088         struct inquiry_entry *e;
2089         int copied = 0;
2090
2091         list_for_each_entry(e, &cache->all, all) {
2092                 struct inquiry_data *data = &e->data;
2093
2094                 if (copied >= num)
2095                         break;
2096
2097                 bacpy(&info->bdaddr, &data->bdaddr);
2098                 info->pscan_rep_mode    = data->pscan_rep_mode;
2099                 info->pscan_period_mode = data->pscan_period_mode;
2100                 info->pscan_mode        = data->pscan_mode;
2101                 memcpy(info->dev_class, data->dev_class, 3);
2102                 info->clock_offset      = data->clock_offset;
2103
2104                 info++;
2105                 copied++;
2106         }
2107
2108         BT_DBG("cache %p, copied %d", cache, copied);
2109         return copied;
2110 }
2111
2112 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2113 {
2114         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2115         struct hci_dev *hdev = req->hdev;
2116         struct hci_cp_inquiry cp;
2117
2118         BT_DBG("%s", hdev->name);
2119
2120         if (test_bit(HCI_INQUIRY, &hdev->flags))
2121                 return;
2122
2123         /* Start Inquiry */
2124         memcpy(&cp.lap, &ir->lap, 3);
2125         cp.length  = ir->length;
2126         cp.num_rsp = ir->num_rsp;
2127         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2128 }
2129
2130 static int wait_inquiry(void *word)
2131 {
2132         schedule();
2133         return signal_pending(current);
2134 }
2135
2136 int hci_inquiry(void __user *arg)
2137 {
2138         __u8 __user *ptr = arg;
2139         struct hci_inquiry_req ir;
2140         struct hci_dev *hdev;
2141         int err = 0, do_inquiry = 0, max_rsp;
2142         long timeo;
2143         __u8 *buf;
2144
2145         if (copy_from_user(&ir, ptr, sizeof(ir)))
2146                 return -EFAULT;
2147
2148         hdev = hci_dev_get(ir.dev_id);
2149         if (!hdev)
2150                 return -ENODEV;
2151
2152         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2153                 err = -EBUSY;
2154                 goto done;
2155         }
2156
2157         if (hdev->dev_type != HCI_BREDR) {
2158                 err = -EOPNOTSUPP;
2159                 goto done;
2160         }
2161
2162         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2163                 err = -EOPNOTSUPP;
2164                 goto done;
2165         }
2166
2167         hci_dev_lock(hdev);
2168         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2169             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2170                 hci_inquiry_cache_flush(hdev);
2171                 do_inquiry = 1;
2172         }
2173         hci_dev_unlock(hdev);
2174
2175         timeo = ir.length * msecs_to_jiffies(2000);
2176
2177         if (do_inquiry) {
2178                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2179                                    timeo);
2180                 if (err < 0)
2181                         goto done;
2182
2183                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2184                  * cleared). If it is interrupted by a signal, return -EINTR.
2185                  */
2186                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2187                                 TASK_INTERRUPTIBLE))
2188                         return -EINTR;
2189         }
2190
2191         /* for unlimited number of responses we will use buffer with
2192          * 255 entries
2193          */
2194         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2195
2196         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2197          * copy it to the user space.
2198          */
2199         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2200         if (!buf) {
2201                 err = -ENOMEM;
2202                 goto done;
2203         }
2204
2205         hci_dev_lock(hdev);
2206         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2207         hci_dev_unlock(hdev);
2208
2209         BT_DBG("num_rsp %d", ir.num_rsp);
2210
2211         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2212                 ptr += sizeof(ir);
2213                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2214                                  ir.num_rsp))
2215                         err = -EFAULT;
2216         } else
2217                 err = -EFAULT;
2218
2219         kfree(buf);
2220
2221 done:
2222         hci_dev_put(hdev);
2223         return err;
2224 }
2225
2226 static int hci_dev_do_open(struct hci_dev *hdev)
2227 {
2228         int ret = 0;
2229
2230         BT_DBG("%s %p", hdev->name, hdev);
2231
2232         hci_req_lock(hdev);
2233
2234         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2235                 ret = -ENODEV;
2236                 goto done;
2237         }
2238
2239         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2240                 /* Check for rfkill but allow the HCI setup stage to
2241                  * proceed (which in itself doesn't cause any RF activity).
2242                  */
2243                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2244                         ret = -ERFKILL;
2245                         goto done;
2246                 }
2247
2248                 /* Check for valid public address or a configured static
2249                  * random adddress, but let the HCI setup proceed to
2250                  * be able to determine if there is a public address
2251                  * or not.
2252                  *
2253                  * In case of user channel usage, it is not important
2254                  * if a public address or static random address is
2255                  * available.
2256                  *
2257                  * This check is only valid for BR/EDR controllers
2258                  * since AMP controllers do not have an address.
2259                  */
2260                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2261                     hdev->dev_type == HCI_BREDR &&
2262                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2263                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2264                         ret = -EADDRNOTAVAIL;
2265                         goto done;
2266                 }
2267         }
2268
2269         if (test_bit(HCI_UP, &hdev->flags)) {
2270                 ret = -EALREADY;
2271                 goto done;
2272         }
2273
2274         if (hdev->open(hdev)) {
2275                 ret = -EIO;
2276                 goto done;
2277         }
2278
2279         atomic_set(&hdev->cmd_cnt, 1);
2280         set_bit(HCI_INIT, &hdev->flags);
2281
2282         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2283                 ret = hdev->setup(hdev);
2284
2285         if (!ret) {
2286                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2287                         set_bit(HCI_RAW, &hdev->flags);
2288
2289                 if (!test_bit(HCI_RAW, &hdev->flags) &&
2290                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2291                         ret = __hci_init(hdev);
2292         }
2293
2294         clear_bit(HCI_INIT, &hdev->flags);
2295
2296         if (!ret) {
2297                 hci_dev_hold(hdev);
2298                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2299                 set_bit(HCI_UP, &hdev->flags);
2300                 hci_notify(hdev, HCI_DEV_UP);
2301                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2302                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2303                     hdev->dev_type == HCI_BREDR) {
2304                         hci_dev_lock(hdev);
2305                         mgmt_powered(hdev, 1);
2306                         hci_dev_unlock(hdev);
2307                 }
2308         } else {
2309                 /* Init failed, cleanup */
2310                 flush_work(&hdev->tx_work);
2311                 flush_work(&hdev->cmd_work);
2312                 flush_work(&hdev->rx_work);
2313
2314                 skb_queue_purge(&hdev->cmd_q);
2315                 skb_queue_purge(&hdev->rx_q);
2316
2317                 if (hdev->flush)
2318                         hdev->flush(hdev);
2319
2320                 if (hdev->sent_cmd) {
2321                         kfree_skb(hdev->sent_cmd);
2322                         hdev->sent_cmd = NULL;
2323                 }
2324
2325                 hdev->close(hdev);
2326                 hdev->flags = 0;
2327         }
2328
2329 done:
2330         hci_req_unlock(hdev);
2331         return ret;
2332 }
2333
2334 /* ---- HCI ioctl helpers ---- */
2335
2336 int hci_dev_open(__u16 dev)
2337 {
2338         struct hci_dev *hdev;
2339         int err;
2340
2341         hdev = hci_dev_get(dev);
2342         if (!hdev)
2343                 return -ENODEV;
2344
2345         /* We need to ensure that no other power on/off work is pending
2346          * before proceeding to call hci_dev_do_open. This is
2347          * particularly important if the setup procedure has not yet
2348          * completed.
2349          */
2350         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2351                 cancel_delayed_work(&hdev->power_off);
2352
2353         /* After this call it is guaranteed that the setup procedure
2354          * has finished. This means that error conditions like RFKILL
2355          * or no valid public or static random address apply.
2356          */
2357         flush_workqueue(hdev->req_workqueue);
2358
2359         err = hci_dev_do_open(hdev);
2360
2361         hci_dev_put(hdev);
2362
2363         return err;
2364 }
2365
2366 static int hci_dev_do_close(struct hci_dev *hdev)
2367 {
2368         BT_DBG("%s %p", hdev->name, hdev);
2369
2370         cancel_delayed_work(&hdev->power_off);
2371
2372         hci_req_cancel(hdev, ENODEV);
2373         hci_req_lock(hdev);
2374
2375         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2376                 del_timer_sync(&hdev->cmd_timer);
2377                 hci_req_unlock(hdev);
2378                 return 0;
2379         }
2380
2381         /* Flush RX and TX works */
2382         flush_work(&hdev->tx_work);
2383         flush_work(&hdev->rx_work);
2384
2385         if (hdev->discov_timeout > 0) {
2386                 cancel_delayed_work(&hdev->discov_off);
2387                 hdev->discov_timeout = 0;
2388                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2389                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2390         }
2391
2392         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2393                 cancel_delayed_work(&hdev->service_cache);
2394
2395         cancel_delayed_work_sync(&hdev->le_scan_disable);
2396
2397         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2398                 cancel_delayed_work_sync(&hdev->rpa_expired);
2399
2400         hci_dev_lock(hdev);
2401         hci_inquiry_cache_flush(hdev);
2402         hci_conn_hash_flush(hdev);
2403         hci_pend_le_conns_clear(hdev);
2404         hci_dev_unlock(hdev);
2405
2406         hci_notify(hdev, HCI_DEV_DOWN);
2407
2408         if (hdev->flush)
2409                 hdev->flush(hdev);
2410
2411         /* Reset device */
2412         skb_queue_purge(&hdev->cmd_q);
2413         atomic_set(&hdev->cmd_cnt, 1);
2414         if (!test_bit(HCI_RAW, &hdev->flags) &&
2415             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2416             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2417                 set_bit(HCI_INIT, &hdev->flags);
2418                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2419                 clear_bit(HCI_INIT, &hdev->flags);
2420         }
2421
2422         /* flush cmd  work */
2423         flush_work(&hdev->cmd_work);
2424
2425         /* Drop queues */
2426         skb_queue_purge(&hdev->rx_q);
2427         skb_queue_purge(&hdev->cmd_q);
2428         skb_queue_purge(&hdev->raw_q);
2429
2430         /* Drop last sent command */
2431         if (hdev->sent_cmd) {
2432                 del_timer_sync(&hdev->cmd_timer);
2433                 kfree_skb(hdev->sent_cmd);
2434                 hdev->sent_cmd = NULL;
2435         }
2436
2437         kfree_skb(hdev->recv_evt);
2438         hdev->recv_evt = NULL;
2439
2440         /* After this point our queues are empty
2441          * and no tasks are scheduled. */
2442         hdev->close(hdev);
2443
2444         /* Clear flags */
2445         hdev->flags = 0;
2446         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2447
2448         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2449                 if (hdev->dev_type == HCI_BREDR) {
2450                         hci_dev_lock(hdev);
2451                         mgmt_powered(hdev, 0);
2452                         hci_dev_unlock(hdev);
2453                 }
2454         }
2455
2456         /* Controller radio is available but is currently powered down */
2457         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2458
2459         memset(hdev->eir, 0, sizeof(hdev->eir));
2460         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2461         bacpy(&hdev->random_addr, BDADDR_ANY);
2462
2463         hci_req_unlock(hdev);
2464
2465         hci_dev_put(hdev);
2466         return 0;
2467 }
2468
2469 int hci_dev_close(__u16 dev)
2470 {
2471         struct hci_dev *hdev;
2472         int err;
2473
2474         hdev = hci_dev_get(dev);
2475         if (!hdev)
2476                 return -ENODEV;
2477
2478         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2479                 err = -EBUSY;
2480                 goto done;
2481         }
2482
2483         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2484                 cancel_delayed_work(&hdev->power_off);
2485
2486         err = hci_dev_do_close(hdev);
2487
2488 done:
2489         hci_dev_put(hdev);
2490         return err;
2491 }
2492
2493 int hci_dev_reset(__u16 dev)
2494 {
2495         struct hci_dev *hdev;
2496         int ret = 0;
2497
2498         hdev = hci_dev_get(dev);
2499         if (!hdev)
2500                 return -ENODEV;
2501
2502         hci_req_lock(hdev);
2503
2504         if (!test_bit(HCI_UP, &hdev->flags)) {
2505                 ret = -ENETDOWN;
2506                 goto done;
2507         }
2508
2509         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2510                 ret = -EBUSY;
2511                 goto done;
2512         }
2513
2514         /* Drop queues */
2515         skb_queue_purge(&hdev->rx_q);
2516         skb_queue_purge(&hdev->cmd_q);
2517
2518         hci_dev_lock(hdev);
2519         hci_inquiry_cache_flush(hdev);
2520         hci_conn_hash_flush(hdev);
2521         hci_dev_unlock(hdev);
2522
2523         if (hdev->flush)
2524                 hdev->flush(hdev);
2525
2526         atomic_set(&hdev->cmd_cnt, 1);
2527         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2528
2529         if (!test_bit(HCI_RAW, &hdev->flags))
2530                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2531
2532 done:
2533         hci_req_unlock(hdev);
2534         hci_dev_put(hdev);
2535         return ret;
2536 }
2537
2538 int hci_dev_reset_stat(__u16 dev)
2539 {
2540         struct hci_dev *hdev;
2541         int ret = 0;
2542
2543         hdev = hci_dev_get(dev);
2544         if (!hdev)
2545                 return -ENODEV;
2546
2547         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2548                 ret = -EBUSY;
2549                 goto done;
2550         }
2551
2552         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2553
2554 done:
2555         hci_dev_put(hdev);
2556         return ret;
2557 }
2558
2559 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2560 {
2561         struct hci_dev *hdev;
2562         struct hci_dev_req dr;
2563         int err = 0;
2564
2565         if (copy_from_user(&dr, arg, sizeof(dr)))
2566                 return -EFAULT;
2567
2568         hdev = hci_dev_get(dr.dev_id);
2569         if (!hdev)
2570                 return -ENODEV;
2571
2572         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573                 err = -EBUSY;
2574                 goto done;
2575         }
2576
2577         if (hdev->dev_type != HCI_BREDR) {
2578                 err = -EOPNOTSUPP;
2579                 goto done;
2580         }
2581
2582         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2583                 err = -EOPNOTSUPP;
2584                 goto done;
2585         }
2586
2587         switch (cmd) {
2588         case HCISETAUTH:
2589                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2590                                    HCI_INIT_TIMEOUT);
2591                 break;
2592
2593         case HCISETENCRYPT:
2594                 if (!lmp_encrypt_capable(hdev)) {
2595                         err = -EOPNOTSUPP;
2596                         break;
2597                 }
2598
2599                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2600                         /* Auth must be enabled first */
2601                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2602                                            HCI_INIT_TIMEOUT);
2603                         if (err)
2604                                 break;
2605                 }
2606
2607                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2608                                    HCI_INIT_TIMEOUT);
2609                 break;
2610
2611         case HCISETSCAN:
2612                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2613                                    HCI_INIT_TIMEOUT);
2614                 break;
2615
2616         case HCISETLINKPOL:
2617                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2618                                    HCI_INIT_TIMEOUT);
2619                 break;
2620
2621         case HCISETLINKMODE:
2622                 hdev->link_mode = ((__u16) dr.dev_opt) &
2623                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2624                 break;
2625
2626         case HCISETPTYPE:
2627                 hdev->pkt_type = (__u16) dr.dev_opt;
2628                 break;
2629
2630         case HCISETACLMTU:
2631                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2632                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2633                 break;
2634
2635         case HCISETSCOMTU:
2636                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2637                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2638                 break;
2639
2640         default:
2641                 err = -EINVAL;
2642                 break;
2643         }
2644
2645 done:
2646         hci_dev_put(hdev);
2647         return err;
2648 }
2649
2650 int hci_get_dev_list(void __user *arg)
2651 {
2652         struct hci_dev *hdev;
2653         struct hci_dev_list_req *dl;
2654         struct hci_dev_req *dr;
2655         int n = 0, size, err;
2656         __u16 dev_num;
2657
2658         if (get_user(dev_num, (__u16 __user *) arg))
2659                 return -EFAULT;
2660
2661         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2662                 return -EINVAL;
2663
2664         size = sizeof(*dl) + dev_num * sizeof(*dr);
2665
2666         dl = kzalloc(size, GFP_KERNEL);
2667         if (!dl)
2668                 return -ENOMEM;
2669
2670         dr = dl->dev_req;
2671
2672         read_lock(&hci_dev_list_lock);
2673         list_for_each_entry(hdev, &hci_dev_list, list) {
2674                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2675                         cancel_delayed_work(&hdev->power_off);
2676
2677                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2678                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2679
2680                 (dr + n)->dev_id  = hdev->id;
2681                 (dr + n)->dev_opt = hdev->flags;
2682
2683                 if (++n >= dev_num)
2684                         break;
2685         }
2686         read_unlock(&hci_dev_list_lock);
2687
2688         dl->dev_num = n;
2689         size = sizeof(*dl) + n * sizeof(*dr);
2690
2691         err = copy_to_user(arg, dl, size);
2692         kfree(dl);
2693
2694         return err ? -EFAULT : 0;
2695 }
2696
2697 int hci_get_dev_info(void __user *arg)
2698 {
2699         struct hci_dev *hdev;
2700         struct hci_dev_info di;
2701         int err = 0;
2702
2703         if (copy_from_user(&di, arg, sizeof(di)))
2704                 return -EFAULT;
2705
2706         hdev = hci_dev_get(di.dev_id);
2707         if (!hdev)
2708                 return -ENODEV;
2709
2710         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2711                 cancel_delayed_work_sync(&hdev->power_off);
2712
2713         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2714                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2715
2716         strcpy(di.name, hdev->name);
2717         di.bdaddr   = hdev->bdaddr;
2718         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2719         di.flags    = hdev->flags;
2720         di.pkt_type = hdev->pkt_type;
2721         if (lmp_bredr_capable(hdev)) {
2722                 di.acl_mtu  = hdev->acl_mtu;
2723                 di.acl_pkts = hdev->acl_pkts;
2724                 di.sco_mtu  = hdev->sco_mtu;
2725                 di.sco_pkts = hdev->sco_pkts;
2726         } else {
2727                 di.acl_mtu  = hdev->le_mtu;
2728                 di.acl_pkts = hdev->le_pkts;
2729                 di.sco_mtu  = 0;
2730                 di.sco_pkts = 0;
2731         }
2732         di.link_policy = hdev->link_policy;
2733         di.link_mode   = hdev->link_mode;
2734
2735         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2736         memcpy(&di.features, &hdev->features, sizeof(di.features));
2737
2738         if (copy_to_user(arg, &di, sizeof(di)))
2739                 err = -EFAULT;
2740
2741         hci_dev_put(hdev);
2742
2743         return err;
2744 }
2745
2746 /* ---- Interface to HCI drivers ---- */
2747
2748 static int hci_rfkill_set_block(void *data, bool blocked)
2749 {
2750         struct hci_dev *hdev = data;
2751
2752         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2753
2754         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2755                 return -EBUSY;
2756
2757         if (blocked) {
2758                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2759                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2760                         hci_dev_do_close(hdev);
2761         } else {
2762                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2763         }
2764
2765         return 0;
2766 }
2767
2768 static const struct rfkill_ops hci_rfkill_ops = {
2769         .set_block = hci_rfkill_set_block,
2770 };
2771
2772 static void hci_power_on(struct work_struct *work)
2773 {
2774         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2775         int err;
2776
2777         BT_DBG("%s", hdev->name);
2778
2779         err = hci_dev_do_open(hdev);
2780         if (err < 0) {
2781                 mgmt_set_powered_failed(hdev, err);
2782                 return;
2783         }
2784
2785         /* During the HCI setup phase, a few error conditions are
2786          * ignored and they need to be checked now. If they are still
2787          * valid, it is important to turn the device back off.
2788          */
2789         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2790             (hdev->dev_type == HCI_BREDR &&
2791              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2792              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2793                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2794                 hci_dev_do_close(hdev);
2795         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2796                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2797                                    HCI_AUTO_OFF_TIMEOUT);
2798         }
2799
2800         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2801                 mgmt_index_added(hdev);
2802 }
2803
2804 static void hci_power_off(struct work_struct *work)
2805 {
2806         struct hci_dev *hdev = container_of(work, struct hci_dev,
2807                                             power_off.work);
2808
2809         BT_DBG("%s", hdev->name);
2810
2811         hci_dev_do_close(hdev);
2812 }
2813
2814 static void hci_discov_off(struct work_struct *work)
2815 {
2816         struct hci_dev *hdev;
2817
2818         hdev = container_of(work, struct hci_dev, discov_off.work);
2819
2820         BT_DBG("%s", hdev->name);
2821
2822         mgmt_discoverable_timeout(hdev);
2823 }
2824
2825 void hci_uuids_clear(struct hci_dev *hdev)
2826 {
2827         struct bt_uuid *uuid, *tmp;
2828
2829         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2830                 list_del(&uuid->list);
2831                 kfree(uuid);
2832         }
2833 }
2834
2835 void hci_link_keys_clear(struct hci_dev *hdev)
2836 {
2837         struct list_head *p, *n;
2838
2839         list_for_each_safe(p, n, &hdev->link_keys) {
2840                 struct link_key *key;
2841
2842                 key = list_entry(p, struct link_key, list);
2843
2844                 list_del(p);
2845                 kfree(key);
2846         }
2847 }
2848
2849 void hci_smp_ltks_clear(struct hci_dev *hdev)
2850 {
2851         struct smp_ltk *k, *tmp;
2852
2853         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2854                 list_del(&k->list);
2855                 kfree(k);
2856         }
2857 }
2858
2859 void hci_smp_irks_clear(struct hci_dev *hdev)
2860 {
2861         struct smp_irk *k, *tmp;
2862
2863         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2864                 list_del(&k->list);
2865                 kfree(k);
2866         }
2867 }
2868
2869 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2870 {
2871         struct link_key *k;
2872
2873         list_for_each_entry(k, &hdev->link_keys, list)
2874                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2875                         return k;
2876
2877         return NULL;
2878 }
2879
2880 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2881                                u8 key_type, u8 old_key_type)
2882 {
2883         /* Legacy key */
2884         if (key_type < 0x03)
2885                 return true;
2886
2887         /* Debug keys are insecure so don't store them persistently */
2888         if (key_type == HCI_LK_DEBUG_COMBINATION)
2889                 return false;
2890
2891         /* Changed combination key and there's no previous one */
2892         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2893                 return false;
2894
2895         /* Security mode 3 case */
2896         if (!conn)
2897                 return true;
2898
2899         /* Neither local nor remote side had no-bonding as requirement */
2900         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2901                 return true;
2902
2903         /* Local side had dedicated bonding as requirement */
2904         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2905                 return true;
2906
2907         /* Remote side had dedicated bonding as requirement */
2908         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2909                 return true;
2910
2911         /* If none of the above criteria match, then don't store the key
2912          * persistently */
2913         return false;
2914 }
2915
2916 static bool ltk_type_master(u8 type)
2917 {
2918         if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2919                 return true;
2920
2921         return false;
2922 }
2923
2924 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2925                              bool master)
2926 {
2927         struct smp_ltk *k;
2928
2929         list_for_each_entry(k, &hdev->long_term_keys, list) {
2930                 if (k->ediv != ediv || k->rand != rand)
2931                         continue;
2932
2933                 if (ltk_type_master(k->type) != master)
2934                         continue;
2935
2936                 return k;
2937         }
2938
2939         return NULL;
2940 }
2941
2942 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2943                                      u8 addr_type, bool master)
2944 {
2945         struct smp_ltk *k;
2946
2947         list_for_each_entry(k, &hdev->long_term_keys, list)
2948                 if (addr_type == k->bdaddr_type &&
2949                     bacmp(bdaddr, &k->bdaddr) == 0 &&
2950                     ltk_type_master(k->type) == master)
2951                         return k;
2952
2953         return NULL;
2954 }
2955
2956 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2957 {
2958         struct smp_irk *irk;
2959
2960         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2961                 if (!bacmp(&irk->rpa, rpa))
2962                         return irk;
2963         }
2964
2965         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2966                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2967                         bacpy(&irk->rpa, rpa);
2968                         return irk;
2969                 }
2970         }
2971
2972         return NULL;
2973 }
2974
2975 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2976                                      u8 addr_type)
2977 {
2978         struct smp_irk *irk;
2979
2980         /* Identity Address must be public or static random */
2981         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2982                 return NULL;
2983
2984         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2985                 if (addr_type == irk->addr_type &&
2986                     bacmp(bdaddr, &irk->bdaddr) == 0)
2987                         return irk;
2988         }
2989
2990         return NULL;
2991 }
2992
2993 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
2994                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
2995 {
2996         struct link_key *key, *old_key;
2997         u8 old_key_type;
2998         bool persistent;
2999
3000         old_key = hci_find_link_key(hdev, bdaddr);
3001         if (old_key) {
3002                 old_key_type = old_key->type;
3003                 key = old_key;
3004         } else {
3005                 old_key_type = conn ? conn->key_type : 0xff;
3006                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3007                 if (!key)
3008                         return -ENOMEM;
3009                 list_add(&key->list, &hdev->link_keys);
3010         }
3011
3012         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3013
3014         /* Some buggy controller combinations generate a changed
3015          * combination key for legacy pairing even when there's no
3016          * previous key */
3017         if (type == HCI_LK_CHANGED_COMBINATION &&
3018             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3019                 type = HCI_LK_COMBINATION;
3020                 if (conn)
3021                         conn->key_type = type;
3022         }
3023
3024         bacpy(&key->bdaddr, bdaddr);
3025         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3026         key->pin_len = pin_len;
3027
3028         if (type == HCI_LK_CHANGED_COMBINATION)
3029                 key->type = old_key_type;
3030         else
3031                 key->type = type;
3032
3033         if (!new_key)
3034                 return 0;
3035
3036         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3037
3038         mgmt_new_link_key(hdev, key, persistent);
3039
3040         if (conn)
3041                 conn->flush_key = !persistent;
3042
3043         return 0;
3044 }
3045
3046 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3047                             u8 addr_type, u8 type, u8 authenticated,
3048                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3049 {
3050         struct smp_ltk *key, *old_key;
3051         bool master = ltk_type_master(type);
3052
3053         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3054         if (old_key)
3055                 key = old_key;
3056         else {
3057                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3058                 if (!key)
3059                         return NULL;
3060                 list_add(&key->list, &hdev->long_term_keys);
3061         }
3062
3063         bacpy(&key->bdaddr, bdaddr);
3064         key->bdaddr_type = addr_type;
3065         memcpy(key->val, tk, sizeof(key->val));
3066         key->authenticated = authenticated;
3067         key->ediv = ediv;
3068         key->rand = rand;
3069         key->enc_size = enc_size;
3070         key->type = type;
3071
3072         return key;
3073 }
3074
3075 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3076                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3077 {
3078         struct smp_irk *irk;
3079
3080         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3081         if (!irk) {
3082                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3083                 if (!irk)
3084                         return NULL;
3085
3086                 bacpy(&irk->bdaddr, bdaddr);
3087                 irk->addr_type = addr_type;
3088
3089                 list_add(&irk->list, &hdev->identity_resolving_keys);
3090         }
3091
3092         memcpy(irk->val, val, 16);
3093         bacpy(&irk->rpa, rpa);
3094
3095         return irk;
3096 }
3097
3098 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3099 {
3100         struct link_key *key;
3101
3102         key = hci_find_link_key(hdev, bdaddr);
3103         if (!key)
3104                 return -ENOENT;
3105
3106         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3107
3108         list_del(&key->list);
3109         kfree(key);
3110
3111         return 0;
3112 }
3113
3114 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3115 {
3116         struct smp_ltk *k, *tmp;
3117         int removed = 0;
3118
3119         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3120                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3121                         continue;
3122
3123                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3124
3125                 list_del(&k->list);
3126                 kfree(k);
3127                 removed++;
3128         }
3129
3130         return removed ? 0 : -ENOENT;
3131 }
3132
3133 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3134 {
3135         struct smp_irk *k, *tmp;
3136
3137         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3138                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3139                         continue;
3140
3141                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3142
3143                 list_del(&k->list);
3144                 kfree(k);
3145         }
3146 }
3147
3148 /* HCI command timer function */
3149 static void hci_cmd_timeout(unsigned long arg)
3150 {
3151         struct hci_dev *hdev = (void *) arg;
3152
3153         if (hdev->sent_cmd) {
3154                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3155                 u16 opcode = __le16_to_cpu(sent->opcode);
3156
3157                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3158         } else {
3159                 BT_ERR("%s command tx timeout", hdev->name);
3160         }
3161
3162         atomic_set(&hdev->cmd_cnt, 1);
3163         queue_work(hdev->workqueue, &hdev->cmd_work);
3164 }
3165
3166 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3167                                           bdaddr_t *bdaddr)
3168 {
3169         struct oob_data *data;
3170
3171         list_for_each_entry(data, &hdev->remote_oob_data, list)
3172                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3173                         return data;
3174
3175         return NULL;
3176 }
3177
3178 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3179 {
3180         struct oob_data *data;
3181
3182         data = hci_find_remote_oob_data(hdev, bdaddr);
3183         if (!data)
3184                 return -ENOENT;
3185
3186         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3187
3188         list_del(&data->list);
3189         kfree(data);
3190
3191         return 0;
3192 }
3193
3194 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3195 {
3196         struct oob_data *data, *n;
3197
3198         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3199                 list_del(&data->list);
3200                 kfree(data);
3201         }
3202 }
3203
3204 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3205                             u8 *hash, u8 *randomizer)
3206 {
3207         struct oob_data *data;
3208
3209         data = hci_find_remote_oob_data(hdev, bdaddr);
3210         if (!data) {
3211                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3212                 if (!data)
3213                         return -ENOMEM;
3214
3215                 bacpy(&data->bdaddr, bdaddr);
3216                 list_add(&data->list, &hdev->remote_oob_data);
3217         }
3218
3219         memcpy(data->hash192, hash, sizeof(data->hash192));
3220         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3221
3222         memset(data->hash256, 0, sizeof(data->hash256));
3223         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3224
3225         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3226
3227         return 0;
3228 }
3229
3230 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3231                                 u8 *hash192, u8 *randomizer192,
3232                                 u8 *hash256, u8 *randomizer256)
3233 {
3234         struct oob_data *data;
3235
3236         data = hci_find_remote_oob_data(hdev, bdaddr);
3237         if (!data) {
3238                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3239                 if (!data)
3240                         return -ENOMEM;
3241
3242                 bacpy(&data->bdaddr, bdaddr);
3243                 list_add(&data->list, &hdev->remote_oob_data);
3244         }
3245
3246         memcpy(data->hash192, hash192, sizeof(data->hash192));
3247         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3248
3249         memcpy(data->hash256, hash256, sizeof(data->hash256));
3250         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3251
3252         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3253
3254         return 0;
3255 }
3256
3257 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3258                                          bdaddr_t *bdaddr, u8 type)
3259 {
3260         struct bdaddr_list *b;
3261
3262         list_for_each_entry(b, &hdev->blacklist, list) {
3263                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3264                         return b;
3265         }
3266
3267         return NULL;
3268 }
3269
3270 static void hci_blacklist_clear(struct hci_dev *hdev)
3271 {
3272         struct list_head *p, *n;
3273
3274         list_for_each_safe(p, n, &hdev->blacklist) {
3275                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3276
3277                 list_del(p);
3278                 kfree(b);
3279         }
3280 }
3281
3282 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3283 {
3284         struct bdaddr_list *entry;
3285
3286         if (!bacmp(bdaddr, BDADDR_ANY))
3287                 return -EBADF;
3288
3289         if (hci_blacklist_lookup(hdev, bdaddr, type))
3290                 return -EEXIST;
3291
3292         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3293         if (!entry)
3294                 return -ENOMEM;
3295
3296         bacpy(&entry->bdaddr, bdaddr);
3297         entry->bdaddr_type = type;
3298
3299         list_add(&entry->list, &hdev->blacklist);
3300
3301         return mgmt_device_blocked(hdev, bdaddr, type);
3302 }
3303
3304 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3305 {
3306         struct bdaddr_list *entry;
3307
3308         if (!bacmp(bdaddr, BDADDR_ANY)) {
3309                 hci_blacklist_clear(hdev);
3310                 return 0;
3311         }
3312
3313         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3314         if (!entry)
3315                 return -ENOENT;
3316
3317         list_del(&entry->list);
3318         kfree(entry);
3319
3320         return mgmt_device_unblocked(hdev, bdaddr, type);
3321 }
3322
3323 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3324                                           bdaddr_t *bdaddr, u8 type)
3325 {
3326         struct bdaddr_list *b;
3327
3328         list_for_each_entry(b, &hdev->le_white_list, list) {
3329                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3330                         return b;
3331         }
3332
3333         return NULL;
3334 }
3335
3336 void hci_white_list_clear(struct hci_dev *hdev)
3337 {
3338         struct list_head *p, *n;
3339
3340         list_for_each_safe(p, n, &hdev->le_white_list) {
3341                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3342
3343                 list_del(p);
3344                 kfree(b);
3345         }
3346 }
3347
3348 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3349 {
3350         struct bdaddr_list *entry;
3351
3352         if (!bacmp(bdaddr, BDADDR_ANY))
3353                 return -EBADF;
3354
3355         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3356         if (!entry)
3357                 return -ENOMEM;
3358
3359         bacpy(&entry->bdaddr, bdaddr);
3360         entry->bdaddr_type = type;
3361
3362         list_add(&entry->list, &hdev->le_white_list);
3363
3364         return 0;
3365 }
3366
3367 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3368 {
3369   &