Merge branches 'x86-build-for-linus', 'x86-cleanups-for-linus' and 'x86-debug-for...
[sfrench/cifs-2.6.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38
39 #include "smp.h"
40
41 static void hci_rx_work(struct work_struct *work);
42 static void hci_cmd_work(struct work_struct *work);
43 static void hci_tx_work(struct work_struct *work);
44
45 /* HCI device list */
46 LIST_HEAD(hci_dev_list);
47 DEFINE_RWLOCK(hci_dev_list_lock);
48
49 /* HCI callback list */
50 LIST_HEAD(hci_cb_list);
51 DEFINE_RWLOCK(hci_cb_list_lock);
52
53 /* HCI ID Numbering */
54 static DEFINE_IDA(hci_index_ida);
55
56 /* ---- HCI notifications ---- */
57
58 static void hci_notify(struct hci_dev *hdev, int event)
59 {
60         hci_sock_dev_event(hdev, event);
61 }
62
63 /* ---- HCI debugfs entries ---- */
64
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66                              size_t count, loff_t *ppos)
67 {
68         struct hci_dev *hdev = file->private_data;
69         char buf[3];
70
71         buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
72         buf[1] = '\n';
73         buf[2] = '\0';
74         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78                               size_t count, loff_t *ppos)
79 {
80         struct hci_dev *hdev = file->private_data;
81         struct sk_buff *skb;
82         char buf[32];
83         size_t buf_size = min(count, (sizeof(buf)-1));
84         bool enable;
85         int err;
86
87         if (!test_bit(HCI_UP, &hdev->flags))
88                 return -ENETDOWN;
89
90         if (copy_from_user(buf, user_buf, buf_size))
91                 return -EFAULT;
92
93         buf[buf_size] = '\0';
94         if (strtobool(buf, &enable))
95                 return -EINVAL;
96
97         if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
98                 return -EALREADY;
99
100         hci_req_lock(hdev);
101         if (enable)
102                 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103                                      HCI_CMD_TIMEOUT);
104         else
105                 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106                                      HCI_CMD_TIMEOUT);
107         hci_req_unlock(hdev);
108
109         if (IS_ERR(skb))
110                 return PTR_ERR(skb);
111
112         err = -bt_to_errno(skb->data[0]);
113         kfree_skb(skb);
114
115         if (err < 0)
116                 return err;
117
118         change_bit(HCI_DUT_MODE, &hdev->dev_flags);
119
120         return count;
121 }
122
123 static const struct file_operations dut_mode_fops = {
124         .open           = simple_open,
125         .read           = dut_mode_read,
126         .write          = dut_mode_write,
127         .llseek         = default_llseek,
128 };
129
130 static int features_show(struct seq_file *f, void *ptr)
131 {
132         struct hci_dev *hdev = f->private;
133         u8 p;
134
135         hci_dev_lock(hdev);
136         for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
137                 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
138                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139                            hdev->features[p][0], hdev->features[p][1],
140                            hdev->features[p][2], hdev->features[p][3],
141                            hdev->features[p][4], hdev->features[p][5],
142                            hdev->features[p][6], hdev->features[p][7]);
143         }
144         if (lmp_le_capable(hdev))
145                 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146                            "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147                            hdev->le_features[0], hdev->le_features[1],
148                            hdev->le_features[2], hdev->le_features[3],
149                            hdev->le_features[4], hdev->le_features[5],
150                            hdev->le_features[6], hdev->le_features[7]);
151         hci_dev_unlock(hdev);
152
153         return 0;
154 }
155
156 static int features_open(struct inode *inode, struct file *file)
157 {
158         return single_open(file, features_show, inode->i_private);
159 }
160
161 static const struct file_operations features_fops = {
162         .open           = features_open,
163         .read           = seq_read,
164         .llseek         = seq_lseek,
165         .release        = single_release,
166 };
167
168 static int blacklist_show(struct seq_file *f, void *p)
169 {
170         struct hci_dev *hdev = f->private;
171         struct bdaddr_list *b;
172
173         hci_dev_lock(hdev);
174         list_for_each_entry(b, &hdev->blacklist, list)
175                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
176         hci_dev_unlock(hdev);
177
178         return 0;
179 }
180
181 static int blacklist_open(struct inode *inode, struct file *file)
182 {
183         return single_open(file, blacklist_show, inode->i_private);
184 }
185
186 static const struct file_operations blacklist_fops = {
187         .open           = blacklist_open,
188         .read           = seq_read,
189         .llseek         = seq_lseek,
190         .release        = single_release,
191 };
192
193 static int uuids_show(struct seq_file *f, void *p)
194 {
195         struct hci_dev *hdev = f->private;
196         struct bt_uuid *uuid;
197
198         hci_dev_lock(hdev);
199         list_for_each_entry(uuid, &hdev->uuids, list) {
200                 u8 i, val[16];
201
202                 /* The Bluetooth UUID values are stored in big endian,
203                  * but with reversed byte order. So convert them into
204                  * the right order for the %pUb modifier.
205                  */
206                 for (i = 0; i < 16; i++)
207                         val[i] = uuid->uuid[15 - i];
208
209                 seq_printf(f, "%pUb\n", val);
210         }
211         hci_dev_unlock(hdev);
212
213         return 0;
214 }
215
216 static int uuids_open(struct inode *inode, struct file *file)
217 {
218         return single_open(file, uuids_show, inode->i_private);
219 }
220
221 static const struct file_operations uuids_fops = {
222         .open           = uuids_open,
223         .read           = seq_read,
224         .llseek         = seq_lseek,
225         .release        = single_release,
226 };
227
228 static int inquiry_cache_show(struct seq_file *f, void *p)
229 {
230         struct hci_dev *hdev = f->private;
231         struct discovery_state *cache = &hdev->discovery;
232         struct inquiry_entry *e;
233
234         hci_dev_lock(hdev);
235
236         list_for_each_entry(e, &cache->all, all) {
237                 struct inquiry_data *data = &e->data;
238                 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239                            &data->bdaddr,
240                            data->pscan_rep_mode, data->pscan_period_mode,
241                            data->pscan_mode, data->dev_class[2],
242                            data->dev_class[1], data->dev_class[0],
243                            __le16_to_cpu(data->clock_offset),
244                            data->rssi, data->ssp_mode, e->timestamp);
245         }
246
247         hci_dev_unlock(hdev);
248
249         return 0;
250 }
251
252 static int inquiry_cache_open(struct inode *inode, struct file *file)
253 {
254         return single_open(file, inquiry_cache_show, inode->i_private);
255 }
256
257 static const struct file_operations inquiry_cache_fops = {
258         .open           = inquiry_cache_open,
259         .read           = seq_read,
260         .llseek         = seq_lseek,
261         .release        = single_release,
262 };
263
264 static int link_keys_show(struct seq_file *f, void *ptr)
265 {
266         struct hci_dev *hdev = f->private;
267         struct list_head *p, *n;
268
269         hci_dev_lock(hdev);
270         list_for_each_safe(p, n, &hdev->link_keys) {
271                 struct link_key *key = list_entry(p, struct link_key, list);
272                 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273                            HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274         }
275         hci_dev_unlock(hdev);
276
277         return 0;
278 }
279
280 static int link_keys_open(struct inode *inode, struct file *file)
281 {
282         return single_open(file, link_keys_show, inode->i_private);
283 }
284
285 static const struct file_operations link_keys_fops = {
286         .open           = link_keys_open,
287         .read           = seq_read,
288         .llseek         = seq_lseek,
289         .release        = single_release,
290 };
291
292 static int dev_class_show(struct seq_file *f, void *ptr)
293 {
294         struct hci_dev *hdev = f->private;
295
296         hci_dev_lock(hdev);
297         seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298                    hdev->dev_class[1], hdev->dev_class[0]);
299         hci_dev_unlock(hdev);
300
301         return 0;
302 }
303
304 static int dev_class_open(struct inode *inode, struct file *file)
305 {
306         return single_open(file, dev_class_show, inode->i_private);
307 }
308
309 static const struct file_operations dev_class_fops = {
310         .open           = dev_class_open,
311         .read           = seq_read,
312         .llseek         = seq_lseek,
313         .release        = single_release,
314 };
315
316 static int voice_setting_get(void *data, u64 *val)
317 {
318         struct hci_dev *hdev = data;
319
320         hci_dev_lock(hdev);
321         *val = hdev->voice_setting;
322         hci_dev_unlock(hdev);
323
324         return 0;
325 }
326
327 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328                         NULL, "0x%4.4llx\n");
329
330 static int auto_accept_delay_set(void *data, u64 val)
331 {
332         struct hci_dev *hdev = data;
333
334         hci_dev_lock(hdev);
335         hdev->auto_accept_delay = val;
336         hci_dev_unlock(hdev);
337
338         return 0;
339 }
340
341 static int auto_accept_delay_get(void *data, u64 *val)
342 {
343         struct hci_dev *hdev = data;
344
345         hci_dev_lock(hdev);
346         *val = hdev->auto_accept_delay;
347         hci_dev_unlock(hdev);
348
349         return 0;
350 }
351
352 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353                         auto_accept_delay_set, "%llu\n");
354
355 static int ssp_debug_mode_set(void *data, u64 val)
356 {
357         struct hci_dev *hdev = data;
358         struct sk_buff *skb;
359         __u8 mode;
360         int err;
361
362         if (val != 0 && val != 1)
363                 return -EINVAL;
364
365         if (!test_bit(HCI_UP, &hdev->flags))
366                 return -ENETDOWN;
367
368         hci_req_lock(hdev);
369         mode = val;
370         skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371                              &mode, HCI_CMD_TIMEOUT);
372         hci_req_unlock(hdev);
373
374         if (IS_ERR(skb))
375                 return PTR_ERR(skb);
376
377         err = -bt_to_errno(skb->data[0]);
378         kfree_skb(skb);
379
380         if (err < 0)
381                 return err;
382
383         hci_dev_lock(hdev);
384         hdev->ssp_debug_mode = val;
385         hci_dev_unlock(hdev);
386
387         return 0;
388 }
389
390 static int ssp_debug_mode_get(void *data, u64 *val)
391 {
392         struct hci_dev *hdev = data;
393
394         hci_dev_lock(hdev);
395         *val = hdev->ssp_debug_mode;
396         hci_dev_unlock(hdev);
397
398         return 0;
399 }
400
401 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402                         ssp_debug_mode_set, "%llu\n");
403
404 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405                                      size_t count, loff_t *ppos)
406 {
407         struct hci_dev *hdev = file->private_data;
408         char buf[3];
409
410         buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
411         buf[1] = '\n';
412         buf[2] = '\0';
413         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414 }
415
416 static ssize_t force_sc_support_write(struct file *file,
417                                       const char __user *user_buf,
418                                       size_t count, loff_t *ppos)
419 {
420         struct hci_dev *hdev = file->private_data;
421         char buf[32];
422         size_t buf_size = min(count, (sizeof(buf)-1));
423         bool enable;
424
425         if (test_bit(HCI_UP, &hdev->flags))
426                 return -EBUSY;
427
428         if (copy_from_user(buf, user_buf, buf_size))
429                 return -EFAULT;
430
431         buf[buf_size] = '\0';
432         if (strtobool(buf, &enable))
433                 return -EINVAL;
434
435         if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
436                 return -EALREADY;
437
438         change_bit(HCI_FORCE_SC, &hdev->dev_flags);
439
440         return count;
441 }
442
443 static const struct file_operations force_sc_support_fops = {
444         .open           = simple_open,
445         .read           = force_sc_support_read,
446         .write          = force_sc_support_write,
447         .llseek         = default_llseek,
448 };
449
450 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
451                                  size_t count, loff_t *ppos)
452 {
453         struct hci_dev *hdev = file->private_data;
454         char buf[3];
455
456         buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
457         buf[1] = '\n';
458         buf[2] = '\0';
459         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
460 }
461
462 static const struct file_operations sc_only_mode_fops = {
463         .open           = simple_open,
464         .read           = sc_only_mode_read,
465         .llseek         = default_llseek,
466 };
467
468 static int idle_timeout_set(void *data, u64 val)
469 {
470         struct hci_dev *hdev = data;
471
472         if (val != 0 && (val < 500 || val > 3600000))
473                 return -EINVAL;
474
475         hci_dev_lock(hdev);
476         hdev->idle_timeout = val;
477         hci_dev_unlock(hdev);
478
479         return 0;
480 }
481
482 static int idle_timeout_get(void *data, u64 *val)
483 {
484         struct hci_dev *hdev = data;
485
486         hci_dev_lock(hdev);
487         *val = hdev->idle_timeout;
488         hci_dev_unlock(hdev);
489
490         return 0;
491 }
492
493 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
494                         idle_timeout_set, "%llu\n");
495
496 static int rpa_timeout_set(void *data, u64 val)
497 {
498         struct hci_dev *hdev = data;
499
500         /* Require the RPA timeout to be at least 30 seconds and at most
501          * 24 hours.
502          */
503         if (val < 30 || val > (60 * 60 * 24))
504                 return -EINVAL;
505
506         hci_dev_lock(hdev);
507         hdev->rpa_timeout = val;
508         hci_dev_unlock(hdev);
509
510         return 0;
511 }
512
513 static int rpa_timeout_get(void *data, u64 *val)
514 {
515         struct hci_dev *hdev = data;
516
517         hci_dev_lock(hdev);
518         *val = hdev->rpa_timeout;
519         hci_dev_unlock(hdev);
520
521         return 0;
522 }
523
524 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
525                         rpa_timeout_set, "%llu\n");
526
527 static int sniff_min_interval_set(void *data, u64 val)
528 {
529         struct hci_dev *hdev = data;
530
531         if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
532                 return -EINVAL;
533
534         hci_dev_lock(hdev);
535         hdev->sniff_min_interval = val;
536         hci_dev_unlock(hdev);
537
538         return 0;
539 }
540
541 static int sniff_min_interval_get(void *data, u64 *val)
542 {
543         struct hci_dev *hdev = data;
544
545         hci_dev_lock(hdev);
546         *val = hdev->sniff_min_interval;
547         hci_dev_unlock(hdev);
548
549         return 0;
550 }
551
552 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
553                         sniff_min_interval_set, "%llu\n");
554
555 static int sniff_max_interval_set(void *data, u64 val)
556 {
557         struct hci_dev *hdev = data;
558
559         if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
560                 return -EINVAL;
561
562         hci_dev_lock(hdev);
563         hdev->sniff_max_interval = val;
564         hci_dev_unlock(hdev);
565
566         return 0;
567 }
568
569 static int sniff_max_interval_get(void *data, u64 *val)
570 {
571         struct hci_dev *hdev = data;
572
573         hci_dev_lock(hdev);
574         *val = hdev->sniff_max_interval;
575         hci_dev_unlock(hdev);
576
577         return 0;
578 }
579
580 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
581                         sniff_max_interval_set, "%llu\n");
582
583 static int conn_info_min_age_set(void *data, u64 val)
584 {
585         struct hci_dev *hdev = data;
586
587         if (val == 0 || val > hdev->conn_info_max_age)
588                 return -EINVAL;
589
590         hci_dev_lock(hdev);
591         hdev->conn_info_min_age = val;
592         hci_dev_unlock(hdev);
593
594         return 0;
595 }
596
597 static int conn_info_min_age_get(void *data, u64 *val)
598 {
599         struct hci_dev *hdev = data;
600
601         hci_dev_lock(hdev);
602         *val = hdev->conn_info_min_age;
603         hci_dev_unlock(hdev);
604
605         return 0;
606 }
607
608 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609                         conn_info_min_age_set, "%llu\n");
610
611 static int conn_info_max_age_set(void *data, u64 val)
612 {
613         struct hci_dev *hdev = data;
614
615         if (val == 0 || val < hdev->conn_info_min_age)
616                 return -EINVAL;
617
618         hci_dev_lock(hdev);
619         hdev->conn_info_max_age = val;
620         hci_dev_unlock(hdev);
621
622         return 0;
623 }
624
625 static int conn_info_max_age_get(void *data, u64 *val)
626 {
627         struct hci_dev *hdev = data;
628
629         hci_dev_lock(hdev);
630         *val = hdev->conn_info_max_age;
631         hci_dev_unlock(hdev);
632
633         return 0;
634 }
635
636 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637                         conn_info_max_age_set, "%llu\n");
638
639 static int identity_show(struct seq_file *f, void *p)
640 {
641         struct hci_dev *hdev = f->private;
642         bdaddr_t addr;
643         u8 addr_type;
644
645         hci_dev_lock(hdev);
646
647         hci_copy_identity_address(hdev, &addr, &addr_type);
648
649         seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
650                    16, hdev->irk, &hdev->rpa);
651
652         hci_dev_unlock(hdev);
653
654         return 0;
655 }
656
657 static int identity_open(struct inode *inode, struct file *file)
658 {
659         return single_open(file, identity_show, inode->i_private);
660 }
661
662 static const struct file_operations identity_fops = {
663         .open           = identity_open,
664         .read           = seq_read,
665         .llseek         = seq_lseek,
666         .release        = single_release,
667 };
668
669 static int random_address_show(struct seq_file *f, void *p)
670 {
671         struct hci_dev *hdev = f->private;
672
673         hci_dev_lock(hdev);
674         seq_printf(f, "%pMR\n", &hdev->random_addr);
675         hci_dev_unlock(hdev);
676
677         return 0;
678 }
679
680 static int random_address_open(struct inode *inode, struct file *file)
681 {
682         return single_open(file, random_address_show, inode->i_private);
683 }
684
685 static const struct file_operations random_address_fops = {
686         .open           = random_address_open,
687         .read           = seq_read,
688         .llseek         = seq_lseek,
689         .release        = single_release,
690 };
691
692 static int static_address_show(struct seq_file *f, void *p)
693 {
694         struct hci_dev *hdev = f->private;
695
696         hci_dev_lock(hdev);
697         seq_printf(f, "%pMR\n", &hdev->static_addr);
698         hci_dev_unlock(hdev);
699
700         return 0;
701 }
702
703 static int static_address_open(struct inode *inode, struct file *file)
704 {
705         return single_open(file, static_address_show, inode->i_private);
706 }
707
708 static const struct file_operations static_address_fops = {
709         .open           = static_address_open,
710         .read           = seq_read,
711         .llseek         = seq_lseek,
712         .release        = single_release,
713 };
714
715 static ssize_t force_static_address_read(struct file *file,
716                                          char __user *user_buf,
717                                          size_t count, loff_t *ppos)
718 {
719         struct hci_dev *hdev = file->private_data;
720         char buf[3];
721
722         buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
723         buf[1] = '\n';
724         buf[2] = '\0';
725         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
726 }
727
728 static ssize_t force_static_address_write(struct file *file,
729                                           const char __user *user_buf,
730                                           size_t count, loff_t *ppos)
731 {
732         struct hci_dev *hdev = file->private_data;
733         char buf[32];
734         size_t buf_size = min(count, (sizeof(buf)-1));
735         bool enable;
736
737         if (test_bit(HCI_UP, &hdev->flags))
738                 return -EBUSY;
739
740         if (copy_from_user(buf, user_buf, buf_size))
741                 return -EFAULT;
742
743         buf[buf_size] = '\0';
744         if (strtobool(buf, &enable))
745                 return -EINVAL;
746
747         if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
748                 return -EALREADY;
749
750         change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
751
752         return count;
753 }
754
755 static const struct file_operations force_static_address_fops = {
756         .open           = simple_open,
757         .read           = force_static_address_read,
758         .write          = force_static_address_write,
759         .llseek         = default_llseek,
760 };
761
762 static int white_list_show(struct seq_file *f, void *ptr)
763 {
764         struct hci_dev *hdev = f->private;
765         struct bdaddr_list *b;
766
767         hci_dev_lock(hdev);
768         list_for_each_entry(b, &hdev->le_white_list, list)
769                 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
770         hci_dev_unlock(hdev);
771
772         return 0;
773 }
774
775 static int white_list_open(struct inode *inode, struct file *file)
776 {
777         return single_open(file, white_list_show, inode->i_private);
778 }
779
780 static const struct file_operations white_list_fops = {
781         .open           = white_list_open,
782         .read           = seq_read,
783         .llseek         = seq_lseek,
784         .release        = single_release,
785 };
786
787 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
788 {
789         struct hci_dev *hdev = f->private;
790         struct list_head *p, *n;
791
792         hci_dev_lock(hdev);
793         list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
794                 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
795                 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
796                            &irk->bdaddr, irk->addr_type,
797                            16, irk->val, &irk->rpa);
798         }
799         hci_dev_unlock(hdev);
800
801         return 0;
802 }
803
804 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
805 {
806         return single_open(file, identity_resolving_keys_show,
807                            inode->i_private);
808 }
809
810 static const struct file_operations identity_resolving_keys_fops = {
811         .open           = identity_resolving_keys_open,
812         .read           = seq_read,
813         .llseek         = seq_lseek,
814         .release        = single_release,
815 };
816
817 static int long_term_keys_show(struct seq_file *f, void *ptr)
818 {
819         struct hci_dev *hdev = f->private;
820         struct list_head *p, *n;
821
822         hci_dev_lock(hdev);
823         list_for_each_safe(p, n, &hdev->long_term_keys) {
824                 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
825                 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826                            &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827                            ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828                            __le64_to_cpu(ltk->rand), 16, ltk->val);
829         }
830         hci_dev_unlock(hdev);
831
832         return 0;
833 }
834
835 static int long_term_keys_open(struct inode *inode, struct file *file)
836 {
837         return single_open(file, long_term_keys_show, inode->i_private);
838 }
839
840 static const struct file_operations long_term_keys_fops = {
841         .open           = long_term_keys_open,
842         .read           = seq_read,
843         .llseek         = seq_lseek,
844         .release        = single_release,
845 };
846
847 static int conn_min_interval_set(void *data, u64 val)
848 {
849         struct hci_dev *hdev = data;
850
851         if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852                 return -EINVAL;
853
854         hci_dev_lock(hdev);
855         hdev->le_conn_min_interval = val;
856         hci_dev_unlock(hdev);
857
858         return 0;
859 }
860
861 static int conn_min_interval_get(void *data, u64 *val)
862 {
863         struct hci_dev *hdev = data;
864
865         hci_dev_lock(hdev);
866         *val = hdev->le_conn_min_interval;
867         hci_dev_unlock(hdev);
868
869         return 0;
870 }
871
872 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873                         conn_min_interval_set, "%llu\n");
874
875 static int conn_max_interval_set(void *data, u64 val)
876 {
877         struct hci_dev *hdev = data;
878
879         if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880                 return -EINVAL;
881
882         hci_dev_lock(hdev);
883         hdev->le_conn_max_interval = val;
884         hci_dev_unlock(hdev);
885
886         return 0;
887 }
888
889 static int conn_max_interval_get(void *data, u64 *val)
890 {
891         struct hci_dev *hdev = data;
892
893         hci_dev_lock(hdev);
894         *val = hdev->le_conn_max_interval;
895         hci_dev_unlock(hdev);
896
897         return 0;
898 }
899
900 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901                         conn_max_interval_set, "%llu\n");
902
903 static int adv_channel_map_set(void *data, u64 val)
904 {
905         struct hci_dev *hdev = data;
906
907         if (val < 0x01 || val > 0x07)
908                 return -EINVAL;
909
910         hci_dev_lock(hdev);
911         hdev->le_adv_channel_map = val;
912         hci_dev_unlock(hdev);
913
914         return 0;
915 }
916
917 static int adv_channel_map_get(void *data, u64 *val)
918 {
919         struct hci_dev *hdev = data;
920
921         hci_dev_lock(hdev);
922         *val = hdev->le_adv_channel_map;
923         hci_dev_unlock(hdev);
924
925         return 0;
926 }
927
928 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
929                         adv_channel_map_set, "%llu\n");
930
931 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
932                            size_t count, loff_t *ppos)
933 {
934         struct hci_dev *hdev = file->private_data;
935         char buf[3];
936
937         buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
938         buf[1] = '\n';
939         buf[2] = '\0';
940         return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
941 }
942
943 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
944                             size_t count, loff_t *position)
945 {
946         struct hci_dev *hdev = fp->private_data;
947         bool enable;
948         char buf[32];
949         size_t buf_size = min(count, (sizeof(buf)-1));
950
951         if (copy_from_user(buf, user_buffer, buf_size))
952                 return -EFAULT;
953
954         buf[buf_size] = '\0';
955
956         if (strtobool(buf, &enable) < 0)
957                 return -EINVAL;
958
959         if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
960                 return -EALREADY;
961
962         change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
963
964         return count;
965 }
966
967 static const struct file_operations lowpan_debugfs_fops = {
968         .open           = simple_open,
969         .read           = lowpan_read,
970         .write          = lowpan_write,
971         .llseek         = default_llseek,
972 };
973
974 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
975 {
976         struct hci_dev *hdev = sf->private;
977         struct hci_conn_params *p;
978
979         hci_dev_lock(hdev);
980
981         list_for_each_entry(p, &hdev->le_conn_params, list) {
982                 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
983                            p->auto_connect);
984         }
985
986         hci_dev_unlock(hdev);
987
988         return 0;
989 }
990
991 static int le_auto_conn_open(struct inode *inode, struct file *file)
992 {
993         return single_open(file, le_auto_conn_show, inode->i_private);
994 }
995
996 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
997                                   size_t count, loff_t *offset)
998 {
999         struct seq_file *sf = file->private_data;
1000         struct hci_dev *hdev = sf->private;
1001         u8 auto_connect = 0;
1002         bdaddr_t addr;
1003         u8 addr_type;
1004         char *buf;
1005         int err = 0;
1006         int n;
1007
1008         /* Don't allow partial write */
1009         if (*offset != 0)
1010                 return -EINVAL;
1011
1012         if (count < 3)
1013                 return -EINVAL;
1014
1015         buf = memdup_user(data, count);
1016         if (IS_ERR(buf))
1017                 return PTR_ERR(buf);
1018
1019         if (memcmp(buf, "add", 3) == 0) {
1020                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
1021                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1022                            &addr.b[1], &addr.b[0], &addr_type,
1023                            &auto_connect);
1024
1025                 if (n < 7) {
1026                         err = -EINVAL;
1027                         goto done;
1028                 }
1029
1030                 hci_dev_lock(hdev);
1031                 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
1032                                           hdev->le_conn_min_interval,
1033                                           hdev->le_conn_max_interval);
1034                 hci_dev_unlock(hdev);
1035
1036                 if (err)
1037                         goto done;
1038         } else if (memcmp(buf, "del", 3) == 0) {
1039                 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1040                            &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1041                            &addr.b[1], &addr.b[0], &addr_type);
1042
1043                 if (n < 7) {
1044                         err = -EINVAL;
1045                         goto done;
1046                 }
1047
1048                 hci_dev_lock(hdev);
1049                 hci_conn_params_del(hdev, &addr, addr_type);
1050                 hci_dev_unlock(hdev);
1051         } else if (memcmp(buf, "clr", 3) == 0) {
1052                 hci_dev_lock(hdev);
1053                 hci_conn_params_clear(hdev);
1054                 hci_pend_le_conns_clear(hdev);
1055                 hci_update_background_scan(hdev);
1056                 hci_dev_unlock(hdev);
1057         } else {
1058                 err = -EINVAL;
1059         }
1060
1061 done:
1062         kfree(buf);
1063
1064         if (err)
1065                 return err;
1066         else
1067                 return count;
1068 }
1069
1070 static const struct file_operations le_auto_conn_fops = {
1071         .open           = le_auto_conn_open,
1072         .read           = seq_read,
1073         .write          = le_auto_conn_write,
1074         .llseek         = seq_lseek,
1075         .release        = single_release,
1076 };
1077
1078 /* ---- HCI requests ---- */
1079
1080 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1081 {
1082         BT_DBG("%s result 0x%2.2x", hdev->name, result);
1083
1084         if (hdev->req_status == HCI_REQ_PEND) {
1085                 hdev->req_result = result;
1086                 hdev->req_status = HCI_REQ_DONE;
1087                 wake_up_interruptible(&hdev->req_wait_q);
1088         }
1089 }
1090
1091 static void hci_req_cancel(struct hci_dev *hdev, int err)
1092 {
1093         BT_DBG("%s err 0x%2.2x", hdev->name, err);
1094
1095         if (hdev->req_status == HCI_REQ_PEND) {
1096                 hdev->req_result = err;
1097                 hdev->req_status = HCI_REQ_CANCELED;
1098                 wake_up_interruptible(&hdev->req_wait_q);
1099         }
1100 }
1101
1102 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1103                                             u8 event)
1104 {
1105         struct hci_ev_cmd_complete *ev;
1106         struct hci_event_hdr *hdr;
1107         struct sk_buff *skb;
1108
1109         hci_dev_lock(hdev);
1110
1111         skb = hdev->recv_evt;
1112         hdev->recv_evt = NULL;
1113
1114         hci_dev_unlock(hdev);
1115
1116         if (!skb)
1117                 return ERR_PTR(-ENODATA);
1118
1119         if (skb->len < sizeof(*hdr)) {
1120                 BT_ERR("Too short HCI event");
1121                 goto failed;
1122         }
1123
1124         hdr = (void *) skb->data;
1125         skb_pull(skb, HCI_EVENT_HDR_SIZE);
1126
1127         if (event) {
1128                 if (hdr->evt != event)
1129                         goto failed;
1130                 return skb;
1131         }
1132
1133         if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1134                 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1135                 goto failed;
1136         }
1137
1138         if (skb->len < sizeof(*ev)) {
1139                 BT_ERR("Too short cmd_complete event");
1140                 goto failed;
1141         }
1142
1143         ev = (void *) skb->data;
1144         skb_pull(skb, sizeof(*ev));
1145
1146         if (opcode == __le16_to_cpu(ev->opcode))
1147                 return skb;
1148
1149         BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1150                __le16_to_cpu(ev->opcode));
1151
1152 failed:
1153         kfree_skb(skb);
1154         return ERR_PTR(-ENODATA);
1155 }
1156
1157 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1158                                   const void *param, u8 event, u32 timeout)
1159 {
1160         DECLARE_WAITQUEUE(wait, current);
1161         struct hci_request req;
1162         int err = 0;
1163
1164         BT_DBG("%s", hdev->name);
1165
1166         hci_req_init(&req, hdev);
1167
1168         hci_req_add_ev(&req, opcode, plen, param, event);
1169
1170         hdev->req_status = HCI_REQ_PEND;
1171
1172         err = hci_req_run(&req, hci_req_sync_complete);
1173         if (err < 0)
1174                 return ERR_PTR(err);
1175
1176         add_wait_queue(&hdev->req_wait_q, &wait);
1177         set_current_state(TASK_INTERRUPTIBLE);
1178
1179         schedule_timeout(timeout);
1180
1181         remove_wait_queue(&hdev->req_wait_q, &wait);
1182
1183         if (signal_pending(current))
1184                 return ERR_PTR(-EINTR);
1185
1186         switch (hdev->req_status) {
1187         case HCI_REQ_DONE:
1188                 err = -bt_to_errno(hdev->req_result);
1189                 break;
1190
1191         case HCI_REQ_CANCELED:
1192                 err = -hdev->req_result;
1193                 break;
1194
1195         default:
1196                 err = -ETIMEDOUT;
1197                 break;
1198         }
1199
1200         hdev->req_status = hdev->req_result = 0;
1201
1202         BT_DBG("%s end: err %d", hdev->name, err);
1203
1204         if (err < 0)
1205                 return ERR_PTR(err);
1206
1207         return hci_get_cmd_complete(hdev, opcode, event);
1208 }
1209 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1210
1211 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1212                                const void *param, u32 timeout)
1213 {
1214         return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1215 }
1216 EXPORT_SYMBOL(__hci_cmd_sync);
1217
1218 /* Execute request and wait for completion. */
1219 static int __hci_req_sync(struct hci_dev *hdev,
1220                           void (*func)(struct hci_request *req,
1221                                       unsigned long opt),
1222                           unsigned long opt, __u32 timeout)
1223 {
1224         struct hci_request req;
1225         DECLARE_WAITQUEUE(wait, current);
1226         int err = 0;
1227
1228         BT_DBG("%s start", hdev->name);
1229
1230         hci_req_init(&req, hdev);
1231
1232         hdev->req_status = HCI_REQ_PEND;
1233
1234         func(&req, opt);
1235
1236         err = hci_req_run(&req, hci_req_sync_complete);
1237         if (err < 0) {
1238                 hdev->req_status = 0;
1239
1240                 /* ENODATA means the HCI request command queue is empty.
1241                  * This can happen when a request with conditionals doesn't
1242                  * trigger any commands to be sent. This is normal behavior
1243                  * and should not trigger an error return.
1244                  */
1245                 if (err == -ENODATA)
1246                         return 0;
1247
1248                 return err;
1249         }
1250
1251         add_wait_queue(&hdev->req_wait_q, &wait);
1252         set_current_state(TASK_INTERRUPTIBLE);
1253
1254         schedule_timeout(timeout);
1255
1256         remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258         if (signal_pending(current))
1259                 return -EINTR;
1260
1261         switch (hdev->req_status) {
1262         case HCI_REQ_DONE:
1263                 err = -bt_to_errno(hdev->req_result);
1264                 break;
1265
1266         case HCI_REQ_CANCELED:
1267                 err = -hdev->req_result;
1268                 break;
1269
1270         default:
1271                 err = -ETIMEDOUT;
1272                 break;
1273         }
1274
1275         hdev->req_status = hdev->req_result = 0;
1276
1277         BT_DBG("%s end: err %d", hdev->name, err);
1278
1279         return err;
1280 }
1281
1282 static int hci_req_sync(struct hci_dev *hdev,
1283                         void (*req)(struct hci_request *req,
1284                                     unsigned long opt),
1285                         unsigned long opt, __u32 timeout)
1286 {
1287         int ret;
1288
1289         if (!test_bit(HCI_UP, &hdev->flags))
1290                 return -ENETDOWN;
1291
1292         /* Serialize all requests */
1293         hci_req_lock(hdev);
1294         ret = __hci_req_sync(hdev, req, opt, timeout);
1295         hci_req_unlock(hdev);
1296
1297         return ret;
1298 }
1299
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1301 {
1302         BT_DBG("%s %ld", req->hdev->name, opt);
1303
1304         /* Reset device */
1305         set_bit(HCI_RESET, &req->hdev->flags);
1306         hci_req_add(req, HCI_OP_RESET, 0, NULL);
1307 }
1308
1309 static void bredr_init(struct hci_request *req)
1310 {
1311         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1312
1313         /* Read Local Supported Features */
1314         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1315
1316         /* Read Local Version */
1317         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1318
1319         /* Read BD Address */
1320         hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1321 }
1322
1323 static void amp_init(struct hci_request *req)
1324 {
1325         req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1326
1327         /* Read Local Version */
1328         hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1329
1330         /* Read Local Supported Commands */
1331         hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333         /* Read Local Supported Features */
1334         hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
1336         /* Read Local AMP Info */
1337         hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1338
1339         /* Read Data Blk size */
1340         hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1341
1342         /* Read Flow Control Mode */
1343         hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
1345         /* Read Location Data */
1346         hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1347 }
1348
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1350 {
1351         struct hci_dev *hdev = req->hdev;
1352
1353         BT_DBG("%s %ld", hdev->name, opt);
1354
1355         /* Reset */
1356         if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357                 hci_reset_req(req, 0);
1358
1359         switch (hdev->dev_type) {
1360         case HCI_BREDR:
1361                 bredr_init(req);
1362                 break;
1363
1364         case HCI_AMP:
1365                 amp_init(req);
1366                 break;
1367
1368         default:
1369                 BT_ERR("Unknown device type %d", hdev->dev_type);
1370                 break;
1371         }
1372 }
1373
1374 static void bredr_setup(struct hci_request *req)
1375 {
1376         struct hci_dev *hdev = req->hdev;
1377
1378         __le16 param;
1379         __u8 flt_type;
1380
1381         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382         hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1383
1384         /* Read Class of Device */
1385         hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1386
1387         /* Read Local Name */
1388         hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1389
1390         /* Read Voice Setting */
1391         hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1392
1393         /* Read Number of Supported IAC */
1394         hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
1396         /* Read Current IAC LAP */
1397         hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
1399         /* Clear Event Filters */
1400         flt_type = HCI_FLT_CLEAR_ALL;
1401         hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1402
1403         /* Connection accept timeout ~20 secs */
1404         param = cpu_to_le16(0x7d00);
1405         hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1406
1407         /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408          * but it does not support page scan related HCI commands.
1409          */
1410         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1411                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412                 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413         }
1414 }
1415
1416 static void le_setup(struct hci_request *req)
1417 {
1418         struct hci_dev *hdev = req->hdev;
1419
1420         /* Read LE Buffer Size */
1421         hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1422
1423         /* Read LE Local Supported Features */
1424         hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1425
1426         /* Read LE Supported States */
1427         hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
1429         /* Read LE Advertising Channel TX Power */
1430         hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1431
1432         /* Read LE White List Size */
1433         hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1434
1435         /* Clear LE White List */
1436         hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1437
1438         /* LE-only controllers have LE implicitly enabled */
1439         if (!lmp_bredr_capable(hdev))
1440                 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1441 }
1442
1443 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1444 {
1445         if (lmp_ext_inq_capable(hdev))
1446                 return 0x02;
1447
1448         if (lmp_inq_rssi_capable(hdev))
1449                 return 0x01;
1450
1451         if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1452             hdev->lmp_subver == 0x0757)
1453                 return 0x01;
1454
1455         if (hdev->manufacturer == 15) {
1456                 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1457                         return 0x01;
1458                 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1459                         return 0x01;
1460                 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1461                         return 0x01;
1462         }
1463
1464         if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1465             hdev->lmp_subver == 0x1805)
1466                 return 0x01;
1467
1468         return 0x00;
1469 }
1470
1471 static void hci_setup_inquiry_mode(struct hci_request *req)
1472 {
1473         u8 mode;
1474
1475         mode = hci_get_inquiry_mode(req->hdev);
1476
1477         hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1478 }
1479
1480 static void hci_setup_event_mask(struct hci_request *req)
1481 {
1482         struct hci_dev *hdev = req->hdev;
1483
1484         /* The second byte is 0xff instead of 0x9f (two reserved bits
1485          * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1486          * command otherwise.
1487          */
1488         u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1489
1490         /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1491          * any event mask for pre 1.2 devices.
1492          */
1493         if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1494                 return;
1495
1496         if (lmp_bredr_capable(hdev)) {
1497                 events[4] |= 0x01; /* Flow Specification Complete */
1498                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1499                 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1500                 events[5] |= 0x08; /* Synchronous Connection Complete */
1501                 events[5] |= 0x10; /* Synchronous Connection Changed */
1502         } else {
1503                 /* Use a different default for LE-only devices */
1504                 memset(events, 0, sizeof(events));
1505                 events[0] |= 0x10; /* Disconnection Complete */
1506                 events[0] |= 0x80; /* Encryption Change */
1507                 events[1] |= 0x08; /* Read Remote Version Information Complete */
1508                 events[1] |= 0x20; /* Command Complete */
1509                 events[1] |= 0x40; /* Command Status */
1510                 events[1] |= 0x80; /* Hardware Error */
1511                 events[2] |= 0x04; /* Number of Completed Packets */
1512                 events[3] |= 0x02; /* Data Buffer Overflow */
1513                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1514         }
1515
1516         if (lmp_inq_rssi_capable(hdev))
1517                 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519         if (lmp_sniffsubr_capable(hdev))
1520                 events[5] |= 0x20; /* Sniff Subrating */
1521
1522         if (lmp_pause_enc_capable(hdev))
1523                 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525         if (lmp_ext_inq_capable(hdev))
1526                 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528         if (lmp_no_flush_capable(hdev))
1529                 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531         if (lmp_lsto_capable(hdev))
1532                 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534         if (lmp_ssp_capable(hdev)) {
1535                 events[6] |= 0x01;      /* IO Capability Request */
1536                 events[6] |= 0x02;      /* IO Capability Response */
1537                 events[6] |= 0x04;      /* User Confirmation Request */
1538                 events[6] |= 0x08;      /* User Passkey Request */
1539                 events[6] |= 0x10;      /* Remote OOB Data Request */
1540                 events[6] |= 0x20;      /* Simple Pairing Complete */
1541                 events[7] |= 0x04;      /* User Passkey Notification */
1542                 events[7] |= 0x08;      /* Keypress Notification */
1543                 events[7] |= 0x10;      /* Remote Host Supported
1544                                          * Features Notification
1545                                          */
1546         }
1547
1548         if (lmp_le_capable(hdev))
1549                 events[7] |= 0x20;      /* LE Meta-Event */
1550
1551         hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1552
1553         if (lmp_le_capable(hdev)) {
1554                 memset(events, 0, sizeof(events));
1555                 events[0] = 0x1f;
1556                 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1557                             sizeof(events), events);
1558         }
1559 }
1560
1561 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1562 {
1563         struct hci_dev *hdev = req->hdev;
1564
1565         if (lmp_bredr_capable(hdev))
1566                 bredr_setup(req);
1567         else
1568                 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1569
1570         if (lmp_le_capable(hdev))
1571                 le_setup(req);
1572
1573         hci_setup_event_mask(req);
1574
1575         /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1576          * local supported commands HCI command.
1577          */
1578         if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1579                 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1580
1581         if (lmp_ssp_capable(hdev)) {
1582                 /* When SSP is available, then the host features page
1583                  * should also be available as well. However some
1584                  * controllers list the max_page as 0 as long as SSP
1585                  * has not been enabled. To achieve proper debugging
1586                  * output, force the minimum max_page to 1 at least.
1587                  */
1588                 hdev->max_page = 0x01;
1589
1590                 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1591                         u8 mode = 0x01;
1592                         hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1593                                     sizeof(mode), &mode);
1594                 } else {
1595                         struct hci_cp_write_eir cp;
1596
1597                         memset(hdev->eir, 0, sizeof(hdev->eir));
1598                         memset(&cp, 0, sizeof(cp));
1599
1600                         hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1601                 }
1602         }
1603
1604         if (lmp_inq_rssi_capable(hdev))
1605                 hci_setup_inquiry_mode(req);
1606
1607         if (lmp_inq_tx_pwr_capable(hdev))
1608                 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1609
1610         if (lmp_ext_feat_capable(hdev)) {
1611                 struct hci_cp_read_local_ext_features cp;
1612
1613                 cp.page = 0x01;
1614                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1615                             sizeof(cp), &cp);
1616         }
1617
1618         if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1619                 u8 enable = 1;
1620                 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1621                             &enable);
1622         }
1623 }
1624
1625 static void hci_setup_link_policy(struct hci_request *req)
1626 {
1627         struct hci_dev *hdev = req->hdev;
1628         struct hci_cp_write_def_link_policy cp;
1629         u16 link_policy = 0;
1630
1631         if (lmp_rswitch_capable(hdev))
1632                 link_policy |= HCI_LP_RSWITCH;
1633         if (lmp_hold_capable(hdev))
1634                 link_policy |= HCI_LP_HOLD;
1635         if (lmp_sniff_capable(hdev))
1636                 link_policy |= HCI_LP_SNIFF;
1637         if (lmp_park_capable(hdev))
1638                 link_policy |= HCI_LP_PARK;
1639
1640         cp.policy = cpu_to_le16(link_policy);
1641         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1642 }
1643
1644 static void hci_set_le_support(struct hci_request *req)
1645 {
1646         struct hci_dev *hdev = req->hdev;
1647         struct hci_cp_write_le_host_supported cp;
1648
1649         /* LE-only devices do not support explicit enablement */
1650         if (!lmp_bredr_capable(hdev))
1651                 return;
1652
1653         memset(&cp, 0, sizeof(cp));
1654
1655         if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1656                 cp.le = 0x01;
1657                 cp.simul = lmp_le_br_capable(hdev);
1658         }
1659
1660         if (cp.le != lmp_host_le_capable(hdev))
1661                 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1662                             &cp);
1663 }
1664
1665 static void hci_set_event_mask_page_2(struct hci_request *req)
1666 {
1667         struct hci_dev *hdev = req->hdev;
1668         u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1669
1670         /* If Connectionless Slave Broadcast master role is supported
1671          * enable all necessary events for it.
1672          */
1673         if (lmp_csb_master_capable(hdev)) {
1674                 events[1] |= 0x40;      /* Triggered Clock Capture */
1675                 events[1] |= 0x80;      /* Synchronization Train Complete */
1676                 events[2] |= 0x10;      /* Slave Page Response Timeout */
1677                 events[2] |= 0x20;      /* CSB Channel Map Change */
1678         }
1679
1680         /* If Connectionless Slave Broadcast slave role is supported
1681          * enable all necessary events for it.
1682          */
1683         if (lmp_csb_slave_capable(hdev)) {
1684                 events[2] |= 0x01;      /* Synchronization Train Received */
1685                 events[2] |= 0x02;      /* CSB Receive */
1686                 events[2] |= 0x04;      /* CSB Timeout */
1687                 events[2] |= 0x08;      /* Truncated Page Complete */
1688         }
1689
1690         /* Enable Authenticated Payload Timeout Expired event if supported */
1691         if (lmp_ping_capable(hdev))
1692                 events[2] |= 0x80;
1693
1694         hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1695 }
1696
1697 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1698 {
1699         struct hci_dev *hdev = req->hdev;
1700         u8 p;
1701
1702         /* Some Broadcom based Bluetooth controllers do not support the
1703          * Delete Stored Link Key command. They are clearly indicating its
1704          * absence in the bit mask of supported commands.
1705          *
1706          * Check the supported commands and only if the the command is marked
1707          * as supported send it. If not supported assume that the controller
1708          * does not have actual support for stored link keys which makes this
1709          * command redundant anyway.
1710          *
1711          * Some controllers indicate that they support handling deleting
1712          * stored link keys, but they don't. The quirk lets a driver
1713          * just disable this command.
1714          */
1715         if (hdev->commands[6] & 0x80 &&
1716             !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1717                 struct hci_cp_delete_stored_link_key cp;
1718
1719                 bacpy(&cp.bdaddr, BDADDR_ANY);
1720                 cp.delete_all = 0x01;
1721                 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1722                             sizeof(cp), &cp);
1723         }
1724
1725         if (hdev->commands[5] & 0x10)
1726                 hci_setup_link_policy(req);
1727
1728         if (lmp_le_capable(hdev))
1729                 hci_set_le_support(req);
1730
1731         /* Read features beyond page 1 if available */
1732         for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1733                 struct hci_cp_read_local_ext_features cp;
1734
1735                 cp.page = p;
1736                 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1737                             sizeof(cp), &cp);
1738         }
1739 }
1740
1741 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1742 {
1743         struct hci_dev *hdev = req->hdev;
1744
1745         /* Set event mask page 2 if the HCI command for it is supported */
1746         if (hdev->commands[22] & 0x04)
1747                 hci_set_event_mask_page_2(req);
1748
1749         /* Check for Synchronization Train support */
1750         if (lmp_sync_train_capable(hdev))
1751                 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1752
1753         /* Enable Secure Connections if supported and configured */
1754         if ((lmp_sc_capable(hdev) ||
1755              test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1756             test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1757                 u8 support = 0x01;
1758                 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1759                             sizeof(support), &support);
1760         }
1761 }
1762
1763 static int __hci_init(struct hci_dev *hdev)
1764 {
1765         int err;
1766
1767         err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1768         if (err < 0)
1769                 return err;
1770
1771         /* The Device Under Test (DUT) mode is special and available for
1772          * all controller types. So just create it early on.
1773          */
1774         if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1775                 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1776                                     &dut_mode_fops);
1777         }
1778
1779         /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1780          * BR/EDR/LE type controllers. AMP controllers only need the
1781          * first stage init.
1782          */
1783         if (hdev->dev_type != HCI_BREDR)
1784                 return 0;
1785
1786         err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1787         if (err < 0)
1788                 return err;
1789
1790         err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1791         if (err < 0)
1792                 return err;
1793
1794         err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1795         if (err < 0)
1796                 return err;
1797
1798         /* Only create debugfs entries during the initial setup
1799          * phase and not every time the controller gets powered on.
1800          */
1801         if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1802                 return 0;
1803
1804         debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1805                             &features_fops);
1806         debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1807                            &hdev->manufacturer);
1808         debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1809         debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1810         debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1811                             &blacklist_fops);
1812         debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1813
1814         debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1815                             &conn_info_min_age_fops);
1816         debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1817                             &conn_info_max_age_fops);
1818
1819         if (lmp_bredr_capable(hdev)) {
1820                 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1821                                     hdev, &inquiry_cache_fops);
1822                 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1823                                     hdev, &link_keys_fops);
1824                 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1825                                     hdev, &dev_class_fops);
1826                 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1827                                     hdev, &voice_setting_fops);
1828         }
1829
1830         if (lmp_ssp_capable(hdev)) {
1831                 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1832                                     hdev, &auto_accept_delay_fops);
1833                 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1834                                     hdev, &ssp_debug_mode_fops);
1835                 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1836                                     hdev, &force_sc_support_fops);
1837                 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1838                                     hdev, &sc_only_mode_fops);
1839         }
1840
1841         if (lmp_sniff_capable(hdev)) {
1842                 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1843                                     hdev, &idle_timeout_fops);
1844                 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1845                                     hdev, &sniff_min_interval_fops);
1846                 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1847                                     hdev, &sniff_max_interval_fops);
1848         }
1849
1850         if (lmp_le_capable(hdev)) {
1851                 debugfs_create_file("identity", 0400, hdev->debugfs,
1852                                     hdev, &identity_fops);
1853                 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1854                                     hdev, &rpa_timeout_fops);
1855                 debugfs_create_file("random_address", 0444, hdev->debugfs,
1856                                     hdev, &random_address_fops);
1857                 debugfs_create_file("static_address", 0444, hdev->debugfs,
1858                                     hdev, &static_address_fops);
1859
1860                 /* For controllers with a public address, provide a debug
1861                  * option to force the usage of the configured static
1862                  * address. By default the public address is used.
1863                  */
1864                 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1865                         debugfs_create_file("force_static_address", 0644,
1866                                             hdev->debugfs, hdev,
1867                                             &force_static_address_fops);
1868
1869                 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1870                                   &hdev->le_white_list_size);
1871                 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1872                                     &white_list_fops);
1873                 debugfs_create_file("identity_resolving_keys", 0400,
1874                                     hdev->debugfs, hdev,
1875                                     &identity_resolving_keys_fops);
1876                 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1877                                     hdev, &long_term_keys_fops);
1878                 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1879                                     hdev, &conn_min_interval_fops);
1880                 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1881                                     hdev, &conn_max_interval_fops);
1882                 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1883                                     hdev, &adv_channel_map_fops);
1884                 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1885                                     &lowpan_debugfs_fops);
1886                 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1887                                     &le_auto_conn_fops);
1888                 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889                                    hdev->debugfs,
1890                                    &hdev->discov_interleaved_timeout);
1891         }
1892
1893         return 0;
1894 }
1895
1896 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1897 {
1898         __u8 scan = opt;
1899
1900         BT_DBG("%s %x", req->hdev->name, scan);
1901
1902         /* Inquiry and Page scans */
1903         hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1904 }
1905
1906 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1907 {
1908         __u8 auth = opt;
1909
1910         BT_DBG("%s %x", req->hdev->name, auth);
1911
1912         /* Authentication */
1913         hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1914 }
1915
1916 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1917 {
1918         __u8 encrypt = opt;
1919
1920         BT_DBG("%s %x", req->hdev->name, encrypt);
1921
1922         /* Encryption */
1923         hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1924 }
1925
1926 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1927 {
1928         __le16 policy = cpu_to_le16(opt);
1929
1930         BT_DBG("%s %x", req->hdev->name, policy);
1931
1932         /* Default link policy */
1933         hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1934 }
1935
1936 /* Get HCI device by index.
1937  * Device is held on return. */
1938 struct hci_dev *hci_dev_get(int index)
1939 {
1940         struct hci_dev *hdev = NULL, *d;
1941
1942         BT_DBG("%d", index);
1943
1944         if (index < 0)
1945                 return NULL;
1946
1947         read_lock(&hci_dev_list_lock);
1948         list_for_each_entry(d, &hci_dev_list, list) {
1949                 if (d->id == index) {
1950                         hdev = hci_dev_hold(d);
1951                         break;
1952                 }
1953         }
1954         read_unlock(&hci_dev_list_lock);
1955         return hdev;
1956 }
1957
1958 /* ---- Inquiry support ---- */
1959
1960 bool hci_discovery_active(struct hci_dev *hdev)
1961 {
1962         struct discovery_state *discov = &hdev->discovery;
1963
1964         switch (discov->state) {
1965         case DISCOVERY_FINDING:
1966         case DISCOVERY_RESOLVING:
1967                 return true;
1968
1969         default:
1970                 return false;
1971         }
1972 }
1973
1974 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1975 {
1976         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1977
1978         if (hdev->discovery.state == state)
1979                 return;
1980
1981         switch (state) {
1982         case DISCOVERY_STOPPED:
1983                 hci_update_background_scan(hdev);
1984
1985                 if (hdev->discovery.state != DISCOVERY_STARTING)
1986                         mgmt_discovering(hdev, 0);
1987                 break;
1988         case DISCOVERY_STARTING:
1989                 break;
1990         case DISCOVERY_FINDING:
1991                 mgmt_discovering(hdev, 1);
1992                 break;
1993         case DISCOVERY_RESOLVING:
1994                 break;
1995         case DISCOVERY_STOPPING:
1996                 break;
1997         }
1998
1999         hdev->discovery.state = state;
2000 }
2001
2002 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2003 {
2004         struct discovery_state *cache = &hdev->discovery;
2005         struct inquiry_entry *p, *n;
2006
2007         list_for_each_entry_safe(p, n, &cache->all, all) {
2008                 list_del(&p->all);
2009                 kfree(p);
2010         }
2011
2012         INIT_LIST_HEAD(&cache->unknown);
2013         INIT_LIST_HEAD(&cache->resolve);
2014 }
2015
2016 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2017                                                bdaddr_t *bdaddr)
2018 {
2019         struct discovery_state *cache = &hdev->discovery;
2020         struct inquiry_entry *e;
2021
2022         BT_DBG("cache %p, %pMR", cache, bdaddr);
2023
2024         list_for_each_entry(e, &cache->all, all) {
2025                 if (!bacmp(&e->data.bdaddr, bdaddr))
2026                         return e;
2027         }
2028
2029         return NULL;
2030 }
2031
2032 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2033                                                        bdaddr_t *bdaddr)
2034 {
2035         struct discovery_state *cache = &hdev->discovery;
2036         struct inquiry_entry *e;
2037
2038         BT_DBG("cache %p, %pMR", cache, bdaddr);
2039
2040         list_for_each_entry(e, &cache->unknown, list) {
2041                 if (!bacmp(&e->data.bdaddr, bdaddr))
2042                         return e;
2043         }
2044
2045         return NULL;
2046 }
2047
2048 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2049                                                        bdaddr_t *bdaddr,
2050                                                        int state)
2051 {
2052         struct discovery_state *cache = &hdev->discovery;
2053         struct inquiry_entry *e;
2054
2055         BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2056
2057         list_for_each_entry(e, &cache->resolve, list) {
2058                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2059                         return e;
2060                 if (!bacmp(&e->data.bdaddr, bdaddr))
2061                         return e;
2062         }
2063
2064         return NULL;
2065 }
2066
2067 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2068                                       struct inquiry_entry *ie)
2069 {
2070         struct discovery_state *cache = &hdev->discovery;
2071         struct list_head *pos = &cache->resolve;
2072         struct inquiry_entry *p;
2073
2074         list_del(&ie->list);
2075
2076         list_for_each_entry(p, &cache->resolve, list) {
2077                 if (p->name_state != NAME_PENDING &&
2078                     abs(p->data.rssi) >= abs(ie->data.rssi))
2079                         break;
2080                 pos = &p->list;
2081         }
2082
2083         list_add(&ie->list, pos);
2084 }
2085
2086 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2087                               bool name_known, bool *ssp)
2088 {
2089         struct discovery_state *cache = &hdev->discovery;
2090         struct inquiry_entry *ie;
2091
2092         BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2093
2094         hci_remove_remote_oob_data(hdev, &data->bdaddr);
2095
2096         *ssp = data->ssp_mode;
2097
2098         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2099         if (ie) {
2100                 if (ie->data.ssp_mode)
2101                         *ssp = true;
2102
2103                 if (ie->name_state == NAME_NEEDED &&
2104                     data->rssi != ie->data.rssi) {
2105                         ie->data.rssi = data->rssi;
2106                         hci_inquiry_cache_update_resolve(hdev, ie);
2107                 }
2108
2109                 goto update;
2110         }
2111
2112         /* Entry not in the cache. Add new one. */
2113         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2114         if (!ie)
2115                 return false;
2116
2117         list_add(&ie->all, &cache->all);
2118
2119         if (name_known) {
2120                 ie->name_state = NAME_KNOWN;
2121         } else {
2122                 ie->name_state = NAME_NOT_KNOWN;
2123                 list_add(&ie->list, &cache->unknown);
2124         }
2125
2126 update:
2127         if (name_known && ie->name_state != NAME_KNOWN &&
2128             ie->name_state != NAME_PENDING) {
2129                 ie->name_state = NAME_KNOWN;
2130                 list_del(&ie->list);
2131         }
2132
2133         memcpy(&ie->data, data, sizeof(*data));
2134         ie->timestamp = jiffies;
2135         cache->timestamp = jiffies;
2136
2137         if (ie->name_state == NAME_NOT_KNOWN)
2138                 return false;
2139
2140         return true;
2141 }
2142
2143 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2144 {
2145         struct discovery_state *cache = &hdev->discovery;
2146         struct inquiry_info *info = (struct inquiry_info *) buf;
2147         struct inquiry_entry *e;
2148         int copied = 0;
2149
2150         list_for_each_entry(e, &cache->all, all) {
2151                 struct inquiry_data *data = &e->data;
2152
2153                 if (copied >= num)
2154                         break;
2155
2156                 bacpy(&info->bdaddr, &data->bdaddr);
2157                 info->pscan_rep_mode    = data->pscan_rep_mode;
2158                 info->pscan_period_mode = data->pscan_period_mode;
2159                 info->pscan_mode        = data->pscan_mode;
2160                 memcpy(info->dev_class, data->dev_class, 3);
2161                 info->clock_offset      = data->clock_offset;
2162
2163                 info++;
2164                 copied++;
2165         }
2166
2167         BT_DBG("cache %p, copied %d", cache, copied);
2168         return copied;
2169 }
2170
2171 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2172 {
2173         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2174         struct hci_dev *hdev = req->hdev;
2175         struct hci_cp_inquiry cp;
2176
2177         BT_DBG("%s", hdev->name);
2178
2179         if (test_bit(HCI_INQUIRY, &hdev->flags))
2180                 return;
2181
2182         /* Start Inquiry */
2183         memcpy(&cp.lap, &ir->lap, 3);
2184         cp.length  = ir->length;
2185         cp.num_rsp = ir->num_rsp;
2186         hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2187 }
2188
2189 int hci_inquiry(void __user *arg)
2190 {
2191         __u8 __user *ptr = arg;
2192         struct hci_inquiry_req ir;
2193         struct hci_dev *hdev;
2194         int err = 0, do_inquiry = 0, max_rsp;
2195         long timeo;
2196         __u8 *buf;
2197
2198         if (copy_from_user(&ir, ptr, sizeof(ir)))
2199                 return -EFAULT;
2200
2201         hdev = hci_dev_get(ir.dev_id);
2202         if (!hdev)
2203                 return -ENODEV;
2204
2205         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2206                 err = -EBUSY;
2207                 goto done;
2208         }
2209
2210         if (hdev->dev_type != HCI_BREDR) {
2211                 err = -EOPNOTSUPP;
2212                 goto done;
2213         }
2214
2215         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2216                 err = -EOPNOTSUPP;
2217                 goto done;
2218         }
2219
2220         hci_dev_lock(hdev);
2221         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2222             inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2223                 hci_inquiry_cache_flush(hdev);
2224                 do_inquiry = 1;
2225         }
2226         hci_dev_unlock(hdev);
2227
2228         timeo = ir.length * msecs_to_jiffies(2000);
2229
2230         if (do_inquiry) {
2231                 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2232                                    timeo);
2233                 if (err < 0)
2234                         goto done;
2235
2236                 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2237                  * cleared). If it is interrupted by a signal, return -EINTR.
2238                  */
2239                 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2240                                 TASK_INTERRUPTIBLE))
2241                         return -EINTR;
2242         }
2243
2244         /* for unlimited number of responses we will use buffer with
2245          * 255 entries
2246          */
2247         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2248
2249         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2250          * copy it to the user space.
2251          */
2252         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2253         if (!buf) {
2254                 err = -ENOMEM;
2255                 goto done;
2256         }
2257
2258         hci_dev_lock(hdev);
2259         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2260         hci_dev_unlock(hdev);
2261
2262         BT_DBG("num_rsp %d", ir.num_rsp);
2263
2264         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2265                 ptr += sizeof(ir);
2266                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2267                                  ir.num_rsp))
2268                         err = -EFAULT;
2269         } else
2270                 err = -EFAULT;
2271
2272         kfree(buf);
2273
2274 done:
2275         hci_dev_put(hdev);
2276         return err;
2277 }
2278
2279 static int hci_dev_do_open(struct hci_dev *hdev)
2280 {
2281         int ret = 0;
2282
2283         BT_DBG("%s %p", hdev->name, hdev);
2284
2285         hci_req_lock(hdev);
2286
2287         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2288                 ret = -ENODEV;
2289                 goto done;
2290         }
2291
2292         if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2293                 /* Check for rfkill but allow the HCI setup stage to
2294                  * proceed (which in itself doesn't cause any RF activity).
2295                  */
2296                 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2297                         ret = -ERFKILL;
2298                         goto done;
2299                 }
2300
2301                 /* Check for valid public address or a configured static
2302                  * random adddress, but let the HCI setup proceed to
2303                  * be able to determine if there is a public address
2304                  * or not.
2305                  *
2306                  * In case of user channel usage, it is not important
2307                  * if a public address or static random address is
2308                  * available.
2309                  *
2310                  * This check is only valid for BR/EDR controllers
2311                  * since AMP controllers do not have an address.
2312                  */
2313                 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2314                     hdev->dev_type == HCI_BREDR &&
2315                     !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2316                     !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2317                         ret = -EADDRNOTAVAIL;
2318                         goto done;
2319                 }
2320         }
2321
2322         if (test_bit(HCI_UP, &hdev->flags)) {
2323                 ret = -EALREADY;
2324                 goto done;
2325         }
2326
2327         if (hdev->open(hdev)) {
2328                 ret = -EIO;
2329                 goto done;
2330         }
2331
2332         atomic_set(&hdev->cmd_cnt, 1);
2333         set_bit(HCI_INIT, &hdev->flags);
2334
2335         if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2336                 ret = hdev->setup(hdev);
2337
2338         if (!ret) {
2339                 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2340                         set_bit(HCI_RAW, &hdev->flags);
2341
2342                 if (!test_bit(HCI_RAW, &hdev->flags) &&
2343                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2344                         ret = __hci_init(hdev);
2345         }
2346
2347         clear_bit(HCI_INIT, &hdev->flags);
2348
2349         if (!ret) {
2350                 hci_dev_hold(hdev);
2351                 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2352                 set_bit(HCI_UP, &hdev->flags);
2353                 hci_notify(hdev, HCI_DEV_UP);
2354                 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2355                     !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2356                     hdev->dev_type == HCI_BREDR) {
2357                         hci_dev_lock(hdev);
2358                         mgmt_powered(hdev, 1);
2359                         hci_dev_unlock(hdev);
2360                 }
2361         } else {
2362                 /* Init failed, cleanup */
2363                 flush_work(&hdev->tx_work);
2364                 flush_work(&hdev->cmd_work);
2365                 flush_work(&hdev->rx_work);
2366
2367                 skb_queue_purge(&hdev->cmd_q);
2368                 skb_queue_purge(&hdev->rx_q);
2369
2370                 if (hdev->flush)
2371                         hdev->flush(hdev);
2372
2373                 if (hdev->sent_cmd) {
2374                         kfree_skb(hdev->sent_cmd);
2375                         hdev->sent_cmd = NULL;
2376                 }
2377
2378                 hdev->close(hdev);
2379                 hdev->flags = 0;
2380         }
2381
2382 done:
2383         hci_req_unlock(hdev);
2384         return ret;
2385 }
2386
2387 /* ---- HCI ioctl helpers ---- */
2388
2389 int hci_dev_open(__u16 dev)
2390 {
2391         struct hci_dev *hdev;
2392         int err;
2393
2394         hdev = hci_dev_get(dev);
2395         if (!hdev)
2396                 return -ENODEV;
2397
2398         /* We need to ensure that no other power on/off work is pending
2399          * before proceeding to call hci_dev_do_open. This is
2400          * particularly important if the setup procedure has not yet
2401          * completed.
2402          */
2403         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2404                 cancel_delayed_work(&hdev->power_off);
2405
2406         /* After this call it is guaranteed that the setup procedure
2407          * has finished. This means that error conditions like RFKILL
2408          * or no valid public or static random address apply.
2409          */
2410         flush_workqueue(hdev->req_workqueue);
2411
2412         err = hci_dev_do_open(hdev);
2413
2414         hci_dev_put(hdev);
2415
2416         return err;
2417 }
2418
2419 static int hci_dev_do_close(struct hci_dev *hdev)
2420 {
2421         BT_DBG("%s %p", hdev->name, hdev);
2422
2423         cancel_delayed_work(&hdev->power_off);
2424
2425         hci_req_cancel(hdev, ENODEV);
2426         hci_req_lock(hdev);
2427
2428         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2429                 del_timer_sync(&hdev->cmd_timer);
2430                 hci_req_unlock(hdev);
2431                 return 0;
2432         }
2433
2434         /* Flush RX and TX works */
2435         flush_work(&hdev->tx_work);
2436         flush_work(&hdev->rx_work);
2437
2438         if (hdev->discov_timeout > 0) {
2439                 cancel_delayed_work(&hdev->discov_off);
2440                 hdev->discov_timeout = 0;
2441                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2442                 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2443         }
2444
2445         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2446                 cancel_delayed_work(&hdev->service_cache);
2447
2448         cancel_delayed_work_sync(&hdev->le_scan_disable);
2449
2450         if (test_bit(HCI_MGMT, &hdev->dev_flags))
2451                 cancel_delayed_work_sync(&hdev->rpa_expired);
2452
2453         hci_dev_lock(hdev);
2454         hci_inquiry_cache_flush(hdev);
2455         hci_conn_hash_flush(hdev);
2456         hci_pend_le_conns_clear(hdev);
2457         hci_dev_unlock(hdev);
2458
2459         hci_notify(hdev, HCI_DEV_DOWN);
2460
2461         if (hdev->flush)
2462                 hdev->flush(hdev);
2463
2464         /* Reset device */
2465         skb_queue_purge(&hdev->cmd_q);
2466         atomic_set(&hdev->cmd_cnt, 1);
2467         if (!test_bit(HCI_RAW, &hdev->flags) &&
2468             !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2469             test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2470                 set_bit(HCI_INIT, &hdev->flags);
2471                 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2472                 clear_bit(HCI_INIT, &hdev->flags);
2473         }
2474
2475         /* flush cmd  work */
2476         flush_work(&hdev->cmd_work);
2477
2478         /* Drop queues */
2479         skb_queue_purge(&hdev->rx_q);
2480         skb_queue_purge(&hdev->cmd_q);
2481         skb_queue_purge(&hdev->raw_q);
2482
2483         /* Drop last sent command */
2484         if (hdev->sent_cmd) {
2485                 del_timer_sync(&hdev->cmd_timer);
2486                 kfree_skb(hdev->sent_cmd);
2487                 hdev->sent_cmd = NULL;
2488         }
2489
2490         kfree_skb(hdev->recv_evt);
2491         hdev->recv_evt = NULL;
2492
2493         /* After this point our queues are empty
2494          * and no tasks are scheduled. */
2495         hdev->close(hdev);
2496
2497         /* Clear flags */
2498         hdev->flags = 0;
2499         hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2500
2501         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2502                 if (hdev->dev_type == HCI_BREDR) {
2503                         hci_dev_lock(hdev);
2504                         mgmt_powered(hdev, 0);
2505                         hci_dev_unlock(hdev);
2506                 }
2507         }
2508
2509         /* Controller radio is available but is currently powered down */
2510         hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2511
2512         memset(hdev->eir, 0, sizeof(hdev->eir));
2513         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2514         bacpy(&hdev->random_addr, BDADDR_ANY);
2515
2516         hci_req_unlock(hdev);
2517
2518         hci_dev_put(hdev);
2519         return 0;
2520 }
2521
2522 int hci_dev_close(__u16 dev)
2523 {
2524         struct hci_dev *hdev;
2525         int err;
2526
2527         hdev = hci_dev_get(dev);
2528         if (!hdev)
2529                 return -ENODEV;
2530
2531         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2532                 err = -EBUSY;
2533                 goto done;
2534         }
2535
2536         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2537                 cancel_delayed_work(&hdev->power_off);
2538
2539         err = hci_dev_do_close(hdev);
2540
2541 done:
2542         hci_dev_put(hdev);
2543         return err;
2544 }
2545
2546 int hci_dev_reset(__u16 dev)
2547 {
2548         struct hci_dev *hdev;
2549         int ret = 0;
2550
2551         hdev = hci_dev_get(dev);
2552         if (!hdev)
2553                 return -ENODEV;
2554
2555         hci_req_lock(hdev);
2556
2557         if (!test_bit(HCI_UP, &hdev->flags)) {
2558                 ret = -ENETDOWN;
2559                 goto done;
2560         }
2561
2562         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2563                 ret = -EBUSY;
2564                 goto done;
2565         }
2566
2567         /* Drop queues */
2568         skb_queue_purge(&hdev->rx_q);
2569         skb_queue_purge(&hdev->cmd_q);
2570
2571         hci_dev_lock(hdev);
2572         hci_inquiry_cache_flush(hdev);
2573         hci_conn_hash_flush(hdev);
2574         hci_dev_unlock(hdev);
2575
2576         if (hdev->flush)
2577                 hdev->flush(hdev);
2578
2579         atomic_set(&hdev->cmd_cnt, 1);
2580         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2581
2582         if (!test_bit(HCI_RAW, &hdev->flags))
2583                 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2584
2585 done:
2586         hci_req_unlock(hdev);
2587         hci_dev_put(hdev);
2588         return ret;
2589 }
2590
2591 int hci_dev_reset_stat(__u16 dev)
2592 {
2593         struct hci_dev *hdev;
2594         int ret = 0;
2595
2596         hdev = hci_dev_get(dev);
2597         if (!hdev)
2598                 return -ENODEV;
2599
2600         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2601                 ret = -EBUSY;
2602                 goto done;
2603         }
2604
2605         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2606
2607 done:
2608         hci_dev_put(hdev);
2609         return ret;
2610 }
2611
2612 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2613 {
2614         struct hci_dev *hdev;
2615         struct hci_dev_req dr;
2616         int err = 0;
2617
2618         if (copy_from_user(&dr, arg, sizeof(dr)))
2619                 return -EFAULT;
2620
2621         hdev = hci_dev_get(dr.dev_id);
2622         if (!hdev)
2623                 return -ENODEV;
2624
2625         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2626                 err = -EBUSY;
2627                 goto done;
2628         }
2629
2630         if (hdev->dev_type != HCI_BREDR) {
2631                 err = -EOPNOTSUPP;
2632                 goto done;
2633         }
2634
2635         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2636                 err = -EOPNOTSUPP;
2637                 goto done;
2638         }
2639
2640         switch (cmd) {
2641         case HCISETAUTH:
2642                 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2643                                    HCI_INIT_TIMEOUT);
2644                 break;
2645
2646         case HCISETENCRYPT:
2647                 if (!lmp_encrypt_capable(hdev)) {
2648                         err = -EOPNOTSUPP;
2649                         break;
2650                 }
2651
2652                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2653                         /* Auth must be enabled first */
2654                         err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2655                                            HCI_INIT_TIMEOUT);
2656                         if (err)
2657                                 break;
2658                 }
2659
2660                 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2661                                    HCI_INIT_TIMEOUT);
2662                 break;
2663
2664         case HCISETSCAN:
2665                 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2666                                    HCI_INIT_TIMEOUT);
2667                 break;
2668
2669         case HCISETLINKPOL:
2670                 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2671                                    HCI_INIT_TIMEOUT);
2672                 break;
2673
2674         case HCISETLINKMODE:
2675                 hdev->link_mode = ((__u16) dr.dev_opt) &
2676                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
2677                 break;
2678
2679         case HCISETPTYPE:
2680                 hdev->pkt_type = (__u16) dr.dev_opt;
2681                 break;
2682
2683         case HCISETACLMTU:
2684                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
2685                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2686                 break;
2687
2688         case HCISETSCOMTU:
2689                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
2690                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2691                 break;
2692
2693         default:
2694                 err = -EINVAL;
2695                 break;
2696         }
2697
2698 done:
2699         hci_dev_put(hdev);
2700         return err;
2701 }
2702
2703 int hci_get_dev_list(void __user *arg)
2704 {
2705         struct hci_dev *hdev;
2706         struct hci_dev_list_req *dl;
2707         struct hci_dev_req *dr;
2708         int n = 0, size, err;
2709         __u16 dev_num;
2710
2711         if (get_user(dev_num, (__u16 __user *) arg))
2712                 return -EFAULT;
2713
2714         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2715                 return -EINVAL;
2716
2717         size = sizeof(*dl) + dev_num * sizeof(*dr);
2718
2719         dl = kzalloc(size, GFP_KERNEL);
2720         if (!dl)
2721                 return -ENOMEM;
2722
2723         dr = dl->dev_req;
2724
2725         read_lock(&hci_dev_list_lock);
2726         list_for_each_entry(hdev, &hci_dev_list, list) {
2727                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2728                         cancel_delayed_work(&hdev->power_off);
2729
2730                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2731                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2732
2733                 (dr + n)->dev_id  = hdev->id;
2734                 (dr + n)->dev_opt = hdev->flags;
2735
2736                 if (++n >= dev_num)
2737                         break;
2738         }
2739         read_unlock(&hci_dev_list_lock);
2740
2741         dl->dev_num = n;
2742         size = sizeof(*dl) + n * sizeof(*dr);
2743
2744         err = copy_to_user(arg, dl, size);
2745         kfree(dl);
2746
2747         return err ? -EFAULT : 0;
2748 }
2749
2750 int hci_get_dev_info(void __user *arg)
2751 {
2752         struct hci_dev *hdev;
2753         struct hci_dev_info di;
2754         int err = 0;
2755
2756         if (copy_from_user(&di, arg, sizeof(di)))
2757                 return -EFAULT;
2758
2759         hdev = hci_dev_get(di.dev_id);
2760         if (!hdev)
2761                 return -ENODEV;
2762
2763         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2764                 cancel_delayed_work_sync(&hdev->power_off);
2765
2766         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2767                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2768
2769         strcpy(di.name, hdev->name);
2770         di.bdaddr   = hdev->bdaddr;
2771         di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2772         di.flags    = hdev->flags;
2773         di.pkt_type = hdev->pkt_type;
2774         if (lmp_bredr_capable(hdev)) {
2775                 di.acl_mtu  = hdev->acl_mtu;
2776                 di.acl_pkts = hdev->acl_pkts;
2777                 di.sco_mtu  = hdev->sco_mtu;
2778                 di.sco_pkts = hdev->sco_pkts;
2779         } else {
2780                 di.acl_mtu  = hdev->le_mtu;
2781                 di.acl_pkts = hdev->le_pkts;
2782                 di.sco_mtu  = 0;
2783                 di.sco_pkts = 0;
2784         }
2785         di.link_policy = hdev->link_policy;
2786         di.link_mode   = hdev->link_mode;
2787
2788         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2789         memcpy(&di.features, &hdev->features, sizeof(di.features));
2790
2791         if (copy_to_user(arg, &di, sizeof(di)))
2792                 err = -EFAULT;
2793
2794         hci_dev_put(hdev);
2795
2796         return err;
2797 }
2798
2799 /* ---- Interface to HCI drivers ---- */
2800
2801 static int hci_rfkill_set_block(void *data, bool blocked)
2802 {
2803         struct hci_dev *hdev = data;
2804
2805         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2806
2807         if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2808                 return -EBUSY;
2809
2810         if (blocked) {
2811                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2812                 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2813                         hci_dev_do_close(hdev);
2814         } else {
2815                 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2816         }
2817
2818         return 0;
2819 }
2820
2821 static const struct rfkill_ops hci_rfkill_ops = {
2822         .set_block = hci_rfkill_set_block,
2823 };
2824
2825 static void hci_power_on(struct work_struct *work)
2826 {
2827         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2828         int err;
2829
2830         BT_DBG("%s", hdev->name);
2831
2832         err = hci_dev_do_open(hdev);
2833         if (err < 0) {
2834                 mgmt_set_powered_failed(hdev, err);
2835                 return;
2836         }
2837
2838         /* During the HCI setup phase, a few error conditions are
2839          * ignored and they need to be checked now. If they are still
2840          * valid, it is important to turn the device back off.
2841          */
2842         if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2843             (hdev->dev_type == HCI_BREDR &&
2844              !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2845              !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2846                 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2847                 hci_dev_do_close(hdev);
2848         } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2849                 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2850                                    HCI_AUTO_OFF_TIMEOUT);
2851         }
2852
2853         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2854                 mgmt_index_added(hdev);
2855 }
2856
2857 static void hci_power_off(struct work_struct *work)
2858 {
2859         struct hci_dev *hdev = container_of(work, struct hci_dev,
2860                                             power_off.work);
2861
2862         BT_DBG("%s", hdev->name);
2863
2864         hci_dev_do_close(hdev);
2865 }
2866
2867 static void hci_discov_off(struct work_struct *work)
2868 {
2869         struct hci_dev *hdev;
2870
2871         hdev = container_of(work, struct hci_dev, discov_off.work);
2872
2873         BT_DBG("%s", hdev->name);
2874
2875         mgmt_discoverable_timeout(hdev);
2876 }
2877
2878 void hci_uuids_clear(struct hci_dev *hdev)
2879 {
2880         struct bt_uuid *uuid, *tmp;
2881
2882         list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2883                 list_del(&uuid->list);
2884                 kfree(uuid);
2885         }
2886 }
2887
2888 void hci_link_keys_clear(struct hci_dev *hdev)
2889 {
2890         struct list_head *p, *n;
2891
2892         list_for_each_safe(p, n, &hdev->link_keys) {
2893                 struct link_key *key;
2894
2895                 key = list_entry(p, struct link_key, list);
2896
2897                 list_del(p);
2898                 kfree(key);
2899         }
2900 }
2901
2902 void hci_smp_ltks_clear(struct hci_dev *hdev)
2903 {
2904         struct smp_ltk *k, *tmp;
2905
2906         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2907                 list_del(&k->list);
2908                 kfree(k);
2909         }
2910 }
2911
2912 void hci_smp_irks_clear(struct hci_dev *hdev)
2913 {
2914         struct smp_irk *k, *tmp;
2915
2916         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2917                 list_del(&k->list);
2918                 kfree(k);
2919         }
2920 }
2921
2922 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2923 {
2924         struct link_key *k;
2925
2926         list_for_each_entry(k, &hdev->link_keys, list)
2927                 if (bacmp(bdaddr, &k->bdaddr) == 0)
2928                         return k;
2929
2930         return NULL;
2931 }
2932
2933 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2934                                u8 key_type, u8 old_key_type)
2935 {
2936         /* Legacy key */
2937         if (key_type < 0x03)
2938                 return true;
2939
2940         /* Debug keys are insecure so don't store them persistently */
2941         if (key_type == HCI_LK_DEBUG_COMBINATION)
2942                 return false;
2943
2944         /* Changed combination key and there's no previous one */
2945         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2946                 return false;
2947
2948         /* Security mode 3 case */
2949         if (!conn)
2950                 return true;
2951
2952         /* Neither local nor remote side had no-bonding as requirement */
2953         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2954                 return true;
2955
2956         /* Local side had dedicated bonding as requirement */
2957         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2958                 return true;
2959
2960         /* Remote side had dedicated bonding as requirement */
2961         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2962                 return true;
2963
2964         /* If none of the above criteria match, then don't store the key
2965          * persistently */
2966         return false;
2967 }
2968
2969 static bool ltk_type_master(u8 type)
2970 {
2971         if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2972                 return true;
2973
2974         return false;
2975 }
2976
2977 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2978                              bool master)
2979 {
2980         struct smp_ltk *k;
2981
2982         list_for_each_entry(k, &hdev->long_term_keys, list) {
2983                 if (k->ediv != ediv || k->rand != rand)
2984                         continue;
2985
2986                 if (ltk_type_master(k->type) != master)
2987                         continue;
2988
2989                 return k;
2990         }
2991
2992         return NULL;
2993 }
2994
2995 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2996                                      u8 addr_type, bool master)
2997 {
2998         struct smp_ltk *k;
2999
3000         list_for_each_entry(k, &hdev->long_term_keys, list)
3001                 if (addr_type == k->bdaddr_type &&
3002                     bacmp(bdaddr, &k->bdaddr) == 0 &&
3003                     ltk_type_master(k->type) == master)
3004                         return k;
3005
3006         return NULL;
3007 }
3008
3009 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3010 {
3011         struct smp_irk *irk;
3012
3013         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3014                 if (!bacmp(&irk->rpa, rpa))
3015                         return irk;
3016         }
3017
3018         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3019                 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3020                         bacpy(&irk->rpa, rpa);
3021                         return irk;
3022                 }
3023         }
3024
3025         return NULL;
3026 }
3027
3028 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3029                                      u8 addr_type)
3030 {
3031         struct smp_irk *irk;
3032
3033         /* Identity Address must be public or static random */
3034         if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3035                 return NULL;
3036
3037         list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3038                 if (addr_type == irk->addr_type &&
3039                     bacmp(bdaddr, &irk->bdaddr) == 0)
3040                         return irk;
3041         }
3042
3043         return NULL;
3044 }
3045
3046 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
3047                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
3048 {
3049         struct link_key *key, *old_key;
3050         u8 old_key_type;
3051         bool persistent;
3052
3053         old_key = hci_find_link_key(hdev, bdaddr);
3054         if (old_key) {
3055                 old_key_type = old_key->type;
3056                 key = old_key;
3057         } else {
3058                 old_key_type = conn ? conn->key_type : 0xff;
3059                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3060                 if (!key)
3061                         return -ENOMEM;
3062                 list_add(&key->list, &hdev->link_keys);
3063         }
3064
3065         BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3066
3067         /* Some buggy controller combinations generate a changed
3068          * combination key for legacy pairing even when there's no
3069          * previous key */
3070         if (type == HCI_LK_CHANGED_COMBINATION &&
3071             (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3072                 type = HCI_LK_COMBINATION;
3073                 if (conn)
3074                         conn->key_type = type;
3075         }
3076
3077         bacpy(&key->bdaddr, bdaddr);
3078         memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3079         key->pin_len = pin_len;
3080
3081         if (type == HCI_LK_CHANGED_COMBINATION)
3082                 key->type = old_key_type;
3083         else
3084                 key->type = type;
3085
3086         if (!new_key)
3087                 return 0;
3088
3089         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3090
3091         mgmt_new_link_key(hdev, key, persistent);
3092
3093         if (conn)
3094                 conn->flush_key = !persistent;
3095
3096         return 0;
3097 }
3098
3099 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3100                             u8 addr_type, u8 type, u8 authenticated,
3101                             u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3102 {
3103         struct smp_ltk *key, *old_key;
3104         bool master = ltk_type_master(type);
3105
3106         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3107         if (old_key)
3108                 key = old_key;
3109         else {
3110                 key = kzalloc(sizeof(*key), GFP_KERNEL);
3111                 if (!key)
3112                         return NULL;
3113                 list_add(&key->list, &hdev->long_term_keys);
3114         }
3115
3116         bacpy(&key->bdaddr, bdaddr);
3117         key->bdaddr_type = addr_type;
3118         memcpy(key->val, tk, sizeof(key->val));
3119         key->authenticated = authenticated;
3120         key->ediv = ediv;
3121         key->rand = rand;
3122         key->enc_size = enc_size;
3123         key->type = type;
3124
3125         return key;
3126 }
3127
3128 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3129                             u8 addr_type, u8 val[16], bdaddr_t *rpa)
3130 {
3131         struct smp_irk *irk;
3132
3133         irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3134         if (!irk) {
3135                 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3136                 if (!irk)
3137                         return NULL;
3138
3139                 bacpy(&irk->bdaddr, bdaddr);
3140                 irk->addr_type = addr_type;
3141
3142                 list_add(&irk->list, &hdev->identity_resolving_keys);
3143         }
3144
3145         memcpy(irk->val, val, 16);
3146         bacpy(&irk->rpa, rpa);
3147
3148         return irk;
3149 }
3150
3151 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3152 {
3153         struct link_key *key;
3154
3155         key = hci_find_link_key(hdev, bdaddr);
3156         if (!key)
3157                 return -ENOENT;
3158
3159         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3160
3161         list_del(&key->list);
3162         kfree(key);
3163
3164         return 0;
3165 }
3166
3167 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3168 {
3169         struct smp_ltk *k, *tmp;
3170         int removed = 0;
3171
3172         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3173                 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3174                         continue;
3175
3176                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3177
3178                 list_del(&k->list);
3179                 kfree(k);
3180                 removed++;
3181         }
3182
3183         return removed ? 0 : -ENOENT;
3184 }
3185
3186 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3187 {
3188         struct smp_irk *k, *tmp;
3189
3190         list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3191                 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3192                         continue;
3193
3194                 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3195
3196                 list_del(&k->list);
3197                 kfree(k);
3198         }
3199 }
3200
3201 /* HCI command timer function */
3202 static void hci_cmd_timeout(unsigned long arg)
3203 {
3204         struct hci_dev *hdev = (void *) arg;
3205
3206         if (hdev->sent_cmd) {
3207                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3208                 u16 opcode = __le16_to_cpu(sent->opcode);
3209
3210                 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3211         } else {
3212                 BT_ERR("%s command tx timeout", hdev->name);
3213         }
3214
3215         atomic_set(&hdev->cmd_cnt, 1);
3216         queue_work(hdev->workqueue, &hdev->cmd_work);
3217 }
3218
3219 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3220                                           bdaddr_t *bdaddr)
3221 {
3222         struct oob_data *data;
3223
3224         list_for_each_entry(data, &hdev->remote_oob_data, list)
3225                 if (bacmp(bdaddr, &data->bdaddr) == 0)
3226                         return data;
3227
3228         return NULL;
3229 }
3230
3231 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3232 {
3233         struct oob_data *data;
3234
3235         data = hci_find_remote_oob_data(hdev, bdaddr);
3236         if (!data)
3237                 return -ENOENT;
3238
3239         BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3240
3241         list_del(&data->list);
3242         kfree(data);
3243
3244         return 0;
3245 }
3246
3247 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3248 {
3249         struct oob_data *data, *n;
3250
3251         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3252                 list_del(&data->list);
3253                 kfree(data);
3254         }
3255 }
3256
3257 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3258                             u8 *hash, u8 *randomizer)
3259 {
3260         struct oob_data *data;
3261
3262         data = hci_find_remote_oob_data(hdev, bdaddr);
3263         if (!data) {
3264                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3265                 if (!data)
3266                         return -ENOMEM;
3267
3268                 bacpy(&data->bdaddr, bdaddr);
3269                 list_add(&data->list, &hdev->remote_oob_data);
3270         }
3271
3272         memcpy(data->hash192, hash, sizeof(data->hash192));
3273         memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3274
3275         memset(data->hash256, 0, sizeof(data->hash256));
3276         memset(data->randomizer256, 0, sizeof(data->randomizer256));
3277
3278         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3279
3280         return 0;
3281 }
3282
3283 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3284                                 u8 *hash192, u8 *randomizer192,
3285                                 u8 *hash256, u8 *randomizer256)
3286 {
3287         struct oob_data *data;
3288
3289         data = hci_find_remote_oob_data(hdev, bdaddr);
3290         if (!data) {
3291                 data = kmalloc(sizeof(*data), GFP_KERNEL);
3292                 if (!data)
3293                         return -ENOMEM;
3294
3295                 bacpy(&data->bdaddr, bdaddr);
3296                 list_add(&data->list, &hdev->remote_oob_data);
3297         }
3298
3299         memcpy(data->hash192, hash192, sizeof(data->hash192));
3300         memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3301
3302         memcpy(data->hash256, hash256, sizeof(data->hash256));
3303         memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3304
3305         BT_DBG("%s for %pMR", hdev->name, bdaddr);
3306
3307         return 0;
3308 }
3309
3310 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3311                                          bdaddr_t *bdaddr, u8 type)
3312 {
3313         struct bdaddr_list *b;
3314
3315         list_for_each_entry(b, &hdev->blacklist, list) {
3316                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3317                         return b;
3318         }
3319
3320         return NULL;
3321 }
3322
3323 static void hci_blacklist_clear(struct hci_dev *hdev)
3324 {
3325         struct list_head *p, *n;
3326
3327         list_for_each_safe(p, n, &hdev->blacklist) {
3328                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3329
3330                 list_del(p);
3331                 kfree(b);
3332         }
3333 }
3334
3335 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3336 {
3337         struct bdaddr_list *entry;
3338
3339         if (!bacmp(bdaddr, BDADDR_ANY))
3340                 return -EBADF;
3341
3342         if (hci_blacklist_lookup(hdev, bdaddr, type))
3343                 return -EEXIST;
3344
3345         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3346         if (!entry)
3347                 return -ENOMEM;
3348
3349         bacpy(&entry->bdaddr, bdaddr);
3350         entry->bdaddr_type = type;
3351
3352         list_add(&entry->list, &hdev->blacklist);
3353
3354         return mgmt_device_blocked(hdev, bdaddr, type);
3355 }
3356
3357 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3358 {
3359         struct bdaddr_list *entry;
3360
3361         if (!bacmp(bdaddr, BDADDR_ANY)) {
3362                 hci_blacklist_clear(hdev);
3363                 return 0;
3364         }
3365
3366         entry = hci_blacklist_lookup(hdev, bdaddr, type);
3367         if (!entry)
3368                 return -ENOENT;
3369
3370         list_del(&entry->list);
3371         kfree(entry);
3372
3373         return mgmt_device_unblocked(hdev, bdaddr, type);
3374 }
3375
3376 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3377                                           bdaddr_t *bdaddr, u8 type)
3378 {
3379         struct bdaddr_list *b;
3380
3381         list_for_each_entry(b, &hdev->le_white_list, list) {
3382                 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3383                         return b;
3384         }
3385
3386         return NULL;
3387 }
3388
3389 void hci_white_list_clear(struct hci_dev *hdev)
3390 {
3391         struct list_head *p, *n;
3392
3393         list_for_each_safe(p, n, &hdev->le_white_list) {
3394                 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3395
3396                 list_del(p);
3397                 kfree(b);
3398         }
3399 }
3400
3401 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3402 {
3403         struct bdaddr_list *entry;
3404
3405         if (!bacmp(bdaddr, BDADDR_ANY))
3406                 return -EBADF;
3407
3408         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3409         if (!entry)
3410                 return -ENOMEM;
3411
3412         bacpy(&entry->bdaddr, bdaddr);
3413         entry->bdaddr_type = type;
3414
3415         list_add(&entry->list, &hdev->le_white_list);
3416
3417         return 0;
3418 }
3419
3420 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3421 {
3422         struct bdaddr_list *entry;
3423
3424         if (!bacmp(bdaddr, BDADDR_ANY))
3425                 return -EBADF;
3426
3427         entry = hci_white_list_lookup(hdev, bdaddr, type);
3428         if (!entry)
3429                 return -ENOENT;
3430
3431         list_del(&entry->list);
3432         kfree(entry);
3433
3434         return 0;
3435 }
3436
3437 /* This function requires the caller holds hdev->lock */
3438 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3439                                                bdaddr_t *addr, u8 addr_type)
3440 {
3441         struct hci_conn_params *params;
3442
3443         list_for_each_entry(params, &hdev->le_conn_params, list) {
3444                 if (bacmp(&params->addr, addr) == 0 &&
3445                     params->addr_type == addr_type) {
3446                         return params;
3447                 }
3448         }
3449
3450         return NULL;
3451 }
3452
3453 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3454 {
3455         struct hci_conn *conn;
3456
3457         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3458         if (!conn)
3459                 return false;
3460
3461         if (conn->dst_type != type)
3462                 return false;
3463
3464         if (conn->state != BT_CONNECTED)
3465                 return false;
3466
3467         return true;
3468 }
3469
3470 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3471 {
3472         if (addr_type == ADDR_LE_DEV_PUBLIC)
3473                 return true;
3474
3475         /* Check for Random Static address type */
3476         if ((addr->b[5] & 0xc0) == 0xc0)
3477                 return true;
3478
3479         return false;
3480 }
3481
3482 /* This function requires the caller holds hdev->lock */
3483 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3484                         u8 auto_connect, u16 conn_min_interval,
3485                         u16 conn_max_interval)
3486 {
3487         struct hci_conn_params *params;
3488
3489         if (!is_identity_address(addr, addr_type))
3490                 return -EINVAL;
3491
3492         params = hci_conn_params_lookup(hdev, addr, addr_type);
3493         if (params)
3494                 goto update;
3495
3496         params = kzalloc(sizeof(*params), GFP_KERNEL);
3497         if (!params) {
3498                 BT_ERR("Out of memory");
3499                 return -ENOMEM;
3500         }
3501
3502         bacpy(&params->addr, addr);
3503         params->addr_type = addr_type;
3504
3505         list_add(&params->list, &hdev->le_conn_params);
3506
3507 update:
3508         params->conn_min_interval = conn_min_interval;
3509         params->conn_max_interval = conn_max_interval;
3510         params->auto_connect = auto_connect;
3511
3512         switch (auto_connect) {
3513         case HCI_AUTO_CONN_DISABLED:
3514         case HCI_AUTO_CONN_LINK_LOSS:
3515                 hci_pend_le_conn_del(hdev, addr, addr_type);
3516                 break;
3517         case HCI_AUTO_CONN_ALWAYS:
3518                 if (!is_connected(hdev, addr, addr_type))
3519                         hci_pend_le_conn_add(hdev, addr, addr_type);
3520                 break;
3521         }
3522
3523         BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3524                "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3525                conn_min_interval, conn_max_interval);
3526
3527         return 0;
3528 }
3529
3530 /* This function requires the caller holds hdev->lock */
3531 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3532 {
3533         struct hci_conn_params *params;
3534
3535         params = hci_conn_params_lookup(hdev, addr, addr_type);
3536         if (!params)
3537                 return;
3538
3539         hci_pend_le_conn_del(hdev, addr, addr_type);
3540
3541         list_del(&params->list);
3542         kfree(params);
3543
3544         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3545 }
3546
3547 /* This function requires the caller holds hdev->lock */
3548 void hci_conn_params_clear(struct hci_dev *hdev)
3549 {
3550         struct hci_conn_params *params, *tmp;
3551
3552         list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3553                 list_del(&params->list);
3554                 kfree(params);
3555         }
3556
3557         BT_DBG("All LE connection parameters were removed");
3558 }
3559
3560 /* This function requires the caller holds hdev->lock */
3561 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3562                                             bdaddr_t *addr, u8 addr_type)
3563 {
3564         struct bdaddr_list *entry;
3565
3566         list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3567                 if (bacmp(&entry->bdaddr, addr) == 0 &&
3568                     entry->bdaddr_type == addr_type)
3569                         return entry;
3570         }
3571
3572         return NULL;
3573 }
3574
3575 /* This function requires the caller holds hdev->lock */
3576 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3577 {
3578         struct bdaddr_list *entry;
3579
3580         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3581         if (entry)
3582                 goto done;
3583
3584         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3585         if (!entry) {
3586                 BT_ERR("Out of memory");
3587                 return;
3588         }
3589
3590         bacpy(&entry->bdaddr, addr);
3591         entry->bdaddr_type = addr_type;
3592
3593         list_add(&entry->list, &hdev->pend_le_conns);
3594
3595         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3596
3597 done:
3598         hci_update_background_scan(hdev);
3599 }
3600
3601 /* This function requires the caller holds hdev->lock */
3602 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3603 {
3604         struct bdaddr_list *entry;
3605
3606         entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3607         if (!entry)
3608                 goto done;
3609
3610         list_del(&entry->list);
3611         kfree(entry);
3612
3613         BT_DBG("addr %pMR (type %u)", addr, addr_type);
3614
3615 done:
3616         hci_update_background_scan(hdev);
3617 }
3618
3619 /* This function requires the caller holds hdev->lock */
3620 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3621 {
3622         struct bdaddr_list *entry, *tmp;
3623
3624         list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3625                 list_del(&entry->list);
3626                 kfree(entry);
3627         }
3628
3629         BT_DBG("All LE pending connections cleared");
3630 }
3631
3632 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3633 {
3634         if (status) {
3635                 BT_ERR("Failed to start inquiry: status %d", status);
3636
3637                 hci_dev_lock(hdev);
3638                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3639                 hci_dev_unlock(hdev);
3640                 return;
3641         }
3642 }
3643
3644 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3645 {
3646         /* General inquiry access code (GIAC) */
3647         u8 lap[3] = { 0x33, 0x8b, 0x9e };
3648         struct hci_request req;
3649         struct hci_cp_inquiry cp;
3650         int err;
3651
3652         if (status) {
3653                 BT_ERR("Failed to disable LE scanning: status %d", status);
3654                 return;
3655         }
3656
3657         switch (hdev->discovery.type) {
3658         case DISCOV_TYPE_LE:
3659                 hci_dev_lock(hdev);
3660                 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3661                 hci_dev_unlock(hdev);
3662                 break;
3663
3664         case DISCOV_TYPE_INTERLEAVED:
3665                 hci_req_init(&req, hdev);
3666
3667                 memset(&cp, 0, sizeof(cp));
3668                 memcpy(&cp.lap, lap, sizeof(cp.lap));
3669                 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3670                 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3671
3672                 hci_dev_lock(hdev);
3673
3674                 hci_inquiry_cache_flush(hdev);
3675
3676                 err = hci_req_run(&req, inquiry_complete);
3677                 if (err) {
3678                         BT_ERR("Inquiry request failed: err %d", err);
3679                         hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3680                 }
3681
3682                 hci_dev_unlock(hdev);
3683                 break;
3684         }
3685 }
3686
3687 static void le_scan_disable_work(struct work_struct *work)
3688 {
3689         struct hci_dev *hdev = container_of(work, struct hci_dev,
3690                                             le_scan_disable.work);
3691         struct hci_request req;
3692         int err;
3693
3694         BT_DBG("%s", hdev->name);
3695
3696         hci_req_init(&req, hdev);
3697
3698         hci_req_add_le_scan_disable(&req);
3699
3700         err = hci_req_run(&req, le_scan_disable_work_complete);
3701         if (err)
3702                 BT_ERR("Disable LE scanning request failed: err %d", err);
3703 }
3704
3705 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3706 {
3707         struct hci_dev *hdev = req->hdev;
3708
3709         /* If we're advertising or initiating an LE connection we can't
3710          * go ahead and change the random address at this time. This is
3711          * because the eventual initiator address used for the
3712          * subsequently created connection will be undefined (some
3713          * controllers use the new address and others the one we had
3714          * when the operation started).
3715          *
3716          * In this kind of scenario skip the update and let the random
3717          * address be updated at the next cycle.
3718          */
3719         if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3720             hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3721                 BT_DBG("Deferring random address update");
3722                 return;
3723         }
3724
3725         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3726 }
3727
3728 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3729                               u8 *own_addr_type)
3730 {
3731         struct hci_dev *hdev = req->hdev;
3732         int err;
3733
3734         /* If privacy is enabled use a resolvable private address. If
3735          * current RPA has expired or there is something else than
3736          * the current RPA in use, then generate a new one.
3737          */
3738         if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3739                 int to;
3740
3741                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3742
3743                 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3744                     !bacmp(&hdev->random_addr, &hdev->rpa))
3745                         return 0;
3746
3747                 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3748                 if (err < 0) {
3749                         BT_ERR("%s failed to generate new RPA", hdev->name);
3750                         return err;
3751                 }
3752
3753                 set_random_addr(req, &hdev->rpa);
3754
3755                 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3756                 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3757
3758                 return 0;
3759         }
3760
3761         /* In case of required privacy without resolvable private address,
3762          * use an unresolvable private address. This is useful for active
3763          * scanning and non-connectable advertising.
3764          */
3765         if (require_privacy) {
3766                 bdaddr_t urpa;
3767
3768                 get_random_bytes(&urpa, 6);
3769                 urpa.b[5] &= 0x3f;      /* Clear two most significant bits */
3770
3771                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3772                 set_random_addr(req, &urpa);
3773                 return 0;
3774         }
3775
3776         /* If forcing static address is in use or there is no public
3777          * address use the static address as random address (but skip
3778          * the HCI command if the current random address is already the
3779          * static one.
3780          */
3781         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3782             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3783                 *own_addr_type = ADDR_LE_DEV_RANDOM;
3784                 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3785                         hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3786                                     &hdev->static_addr);
3787                 return 0;
3788         }
3789
3790         /* Neither privacy nor static address is being used so use a
3791          * public address.
3792          */
3793         *own_addr_type = ADDR_LE_DEV_PUBLIC;
3794
3795         return 0;
3796 }
3797
3798 /* Copy the Identity Address of the controller.
3799  *
3800  * If the controller has a public BD_ADDR, then by default use that one.
3801  * If this is a LE only controller without a public address, default to
3802  * the static random address.
3803  *
3804  * For debugging purposes it is possible to force controllers with a
3805  * public address to use the static random address instead.
3806  */
3807 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3808                                u8 *bdaddr_type)
3809 {
3810         if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3811             !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3812                 bacpy(bdaddr, &hdev->static_addr);
3813                 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3814         } else {
3815                 bacpy(bdaddr, &hdev->bdaddr);
3816                 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3817         }
3818 }
3819
3820 /* Alloc HCI device */
3821 struct hci_dev *hci_alloc_dev(void)
3822 {
3823         struct hci_dev *hdev;
3824
3825         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3826         if (!hdev)
3827                 return NULL;
3828
3829         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3830         hdev->esco_type = (ESCO_HV1);
3831         hdev->link_mode = (HCI_LM_ACCEPT);
3832         hdev->num_iac = 0x01;           /* One IAC support is mandatory */
3833         hdev->io_capability = 0x03;     /* No Input No Output */
3834         hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3835         hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3836
3837         hdev->sniff_max_interval = 800;
3838         hdev->sniff_min_interval = 80;
3839
3840         hdev->le_adv_channel_map = 0x07;
3841         hdev->le_scan_interval = 0x0060;
3842         hdev->le_scan_window = 0x0030;
3843         hdev->le_conn_min_interval = 0x0028;
3844         hdev->le_conn_max_interval = 0x0038;
3845
3846         hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3847         hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3848         hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3849         hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3850
3851         mutex_init(&hdev->lock);
3852         mutex_init(&hdev->req_lock);
3853
3854         INIT_LIST_HEAD(&hdev->mgmt_pending);
3855         INIT_LIST_HEAD(&hdev->blacklist);
3856         INIT_LIST_HEAD(&hdev->uuids);
3857         INIT_LIST_HEAD(&hdev->link_keys);
3858         INIT_LIST_HEAD(&hdev->long_term_keys);
3859         INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3860         INIT_LIST_HEAD(&hdev->remote_oob_data);
3861         INIT_LIST_HEAD(&hdev->le_white_list);
3862         INIT_LIST_HEAD(&hdev->le_conn_params);
3863         INIT_LIST_HEAD(&hdev->pend_le_conns);
3864         INIT_LIST_HEAD(&hdev->conn_hash.list);
3865
3866         INIT_WORK(&hdev->rx_work, hci_rx_work);
3867         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3868         INIT_WORK(&hdev->tx_work, hci_tx_work);
3869         INIT_WORK(&hdev->power_on, hci_power_on);
3870
3871         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3872         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3873         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3874
3875         skb_queue_head_init(&hdev->rx_q);
3876         skb_queue_head_init(&hdev->cmd_q);
3877         skb_queue_head_init(&hdev->raw_q);
3878
3879         init_waitqueue_head(&hdev->req_wait_q);
3880
3881         setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
3882
3883         hci_init_sysfs(hdev);
3884         discovery_init(hdev);
3885
3886         return hdev;
3887 }
3888 EXPORT_SYMBOL(hci_alloc_dev);
3889
3890 /* Free HCI device */
3891 void hci_free_dev(struct hci_dev *hdev)
3892 {
3893         /* will free via device release */
3894         put_device(&hdev->dev);
3895 }
3896 EXPORT_SYMBOL(hci_free_dev);
3897
3898 /* Register HCI device */
3899 int hci_register_dev(struct hci_dev *hdev)
3900 {
3901         int id, error;
3902
3903         if (!hdev->open || !hdev->close)
3904                 return -EINVAL;
3905
3906         /* Do not allow HCI_AMP devices to register at index 0,
3907          * so the index can be used as the AMP controller ID.
3908          */
3909         switch (hdev->dev_type) {
3910         case HCI_BREDR:
3911                 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3912                 break;
3913         case HCI_AMP:
3914                 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3915                 break;
3916         default:
3917                 return -EINVAL;
3918         }
3919
3920         if (id < 0)
3921                 return id;
3922
3923         sprintf(hdev->name, "hci%d", id);
3924         hdev->id = id;
3925
3926         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3927
3928         hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3929                                           WQ_MEM_RECLAIM, 1, hdev->name);
3930         if (!hdev->workqueue) {
3931                 error = -ENOMEM;
3932                 goto err;
3933         }
3934
3935         hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3936                                               WQ_MEM_RECLAIM, 1, hdev->name);
3937         if (!hdev->req_workqueue) {
3938                 destroy_workqueue(hdev->workqueue);
3939                 error = -ENOMEM;
3940                 goto err;
3941         }
3942
3943         if (!IS_ERR_OR_NULL(bt_debugfs))
3944                 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3945
3946         dev_set_name(&hdev->dev, "%s", hdev->name);
3947
3948         hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3949                                                CRYPTO_ALG_ASYNC);
3950         if (IS_ERR(hdev->tfm_aes)) {
3951                 BT_ERR("Unable to create crypto context");
3952                 error = PTR_ERR(hdev->tfm_aes);
3953                 hdev->tfm_aes = NULL;
3954                 goto err_wqueue;
3955         }
3956
3957         error = device_add(&hdev->dev);
3958         if (error < 0)
3959                 goto err_tfm;
3960
3961         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3962                                     RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3963                                     hdev);
3964         if (hdev->rfkill) {
3965                 if (rfkill_register(hdev->rfkill) < 0) {
3966                         rfkill_destroy(hdev->rfkill);
3967                         hdev->rfkill = NULL;
3968                 }
3969         }
3970
3971         if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3972                 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3973
3974         set_bit(HCI_SETUP, &hdev->dev_flags);
3975         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3976
3977         if (hdev->dev_type == HCI_BREDR) {
3978                 /* Assume BR/EDR support until proven otherwise (such as
3979                  * through reading supported features during init.
3980                  */
3981                 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3982         }
3983
3984         write_lock(&hci_dev_list_lock);
3985         list_add(&hdev->list, &hci_dev_list);
3986         write_unlock(&hci_dev_list_lock);
3987
3988         hci_notify(hdev, HCI_DEV_REG);
3989         hci_dev_hold(hdev);
3990
3991         queue_work(hdev->req_workqueue, &hdev->power_on);
3992
3993         return id;
3994
3995 err_tfm:
3996         crypto_free_blkcipher(hdev->tfm_aes);
3997 err_wqueue:
3998         destroy_workqueue(hdev->workqueue);
3999         destroy_workqueue(hdev->req_workqueue);
4000 err:
4001         ida_simple_remove(&hci_index_ida, hdev->id);
4002
4003         return error;
4004 }
4005 EXPORT_SYMBOL(hci_register_dev);
4006
4007 /* Unregister HCI device */
4008 void hci_unregister_dev(struct hci_dev *hdev)
4009 {
4010         int i, id;
4011
4012         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4013
4014         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4015
4016         id = hdev->id;
4017
4018         write_lock(&hci_dev_list_lock);
4019         list_del(&hdev->list);
4020         write_unlock(&hci_dev_list_lock);
4021
4022         hci_dev_do_close(hdev);
4023
4024         for (i = 0; i < NUM_REASSEMBLY; i++)
4025                 kfree_skb(hdev->reassembly[i]);
4026
4027         cancel_work_sync(&hdev->power_on);
4028
4029         if (!test_bit(HCI_INIT, &hdev->flags) &&
4030             !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4031                 hci_dev_lock(hdev);
4032                 mgmt_index_removed(hdev);
4033                 hci_dev_unlock(hdev);
4034         }
4035
4036         /* mgmt_index_removed should take care of emptying the
4037          * pending list */
4038         BUG_ON(!list_empty(&hdev->mgmt_pending));
4039
4040         hci_notify(hdev, HCI_DEV_UNREG);
4041
4042         if (hdev->rfkill) {
4043                 rfkill_unregister(hdev->rfkill);
4044                 rfkill_destroy(hdev->rfkill);
4045         }
4046
4047         if (hdev->tfm_aes)
4048                 crypto_free_blkcipher(hdev->tfm_aes);
4049
4050         device_del(&hdev->dev);
4051
4052         debugfs_remove_recursive(hdev->debugfs);
4053
4054         destroy_workqueue(hdev->workqueue);
4055         destroy_workqueue(hdev->req_workqueue);
4056
4057         hci_dev_lock(hdev);
4058         hci_blacklist_clear(hdev);
4059         hci_uuids_clear(hdev);
4060         hci_link_keys_clear(hdev);
4061         hci_smp_ltks_clear(hdev);
4062         hci_smp_irks_clear(hdev);
4063         hci_remote_oob_data_clear(hdev);
4064         hci_white_list_clear(hdev);
4065         hci_conn_params_clear(hdev);
4066         hci_pend_le_conns_clear(hdev);
4067         hci_dev_unlock(hdev);
4068
4069         hci_dev_put(hdev);
4070
4071         ida_simple_remove(&hci_index_ida, id);
4072 }
4073 EXPORT_SYMBOL(hci_unregister_dev);
4074
4075 /* Suspend HCI device */
4076 int hci_suspend_dev(struct hci_dev *hdev)
4077 {
4078         hci_notify(hdev, HCI_DEV_SUSPEND);
4079         return 0;
4080 }
4081 EXPORT_SYMBOL(hci_suspend_dev);
4082
4083 /* Resume HCI device */
4084 int hci_resume_dev(struct hci_dev *hdev)
4085 {
4086         hci_notify(hdev, HCI_DEV_RESUME);
4087         return 0;
4088 }
4089 EXPORT_SYMBOL(hci_resume_dev);
4090
4091 /* Receive frame from HCI drivers */
4092 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4093 {
4094         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4095                       && !test_bit(HCI_INIT, &hdev->flags))) {
4096                 kfree_skb(skb);
4097                 return -ENXIO;
4098         }
4099
4100         /* Incoming skb */
4101         bt_cb(skb)->incoming = 1;
4102
4103         /* Time stamp */
4104         __net_timestamp(skb);
4105
4106         skb_queue_tail(&hdev->rx_q, skb);
4107         queue_work(hdev->workqueue, &hdev->rx_work);
4108
4109         return 0;
4110 }
4111 EXPORT_SYMBOL(hci_recv_frame);
4112
4113 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4114                           int count, __u8 index)
4115 {
4116         int len = 0;
4117         int hlen = 0;
4118         int remain = count;
4119         struct sk_buff *skb;
4120         struct bt_skb_cb *scb;
4121
4122         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4123             index >= NUM_REASSEMBLY)
4124                 return -EILSEQ;
4125
4126         skb = hdev->reassembly[index];
4127
4128         if (!skb) {
4129                 switch (type) {
4130                 case HCI_ACLDATA_PKT:
4131                         len = HCI_MAX_FRAME_SIZE;
4132                         hlen = HCI_ACL_HDR_SIZE;
4133                         break;
4134                 case HCI_EVENT_PKT:
4135                         len = HCI_MAX_EVENT_SIZE;
4136                         hlen = HCI_EVENT_HDR_SIZE;
4137                         break;
4138                 case HCI_SCODATA_PKT:
4139                         len = HCI_MAX_SCO_SIZE;
4140                         hlen = HCI_SCO_HDR_SIZE;
4141                         break;
4142                 }
4143
4144                 skb = bt_skb_alloc(len, GFP_ATOMIC);
4145                 if (!skb)
4146                         return -ENOMEM;
4147
4148                 scb = (void *) skb->cb;
4149                 scb->expect = hlen;
4150                 scb->pkt_type = type;
4151
4152                 hdev->reassembly[index] = skb;
4153         }
4154
4155         while (count) {
4156                 scb = (void *) skb->cb;
4157                 len = min_t(uint, scb->expect, count);
4158
4159                 memcpy(skb_put(skb, len), data, len);
4160
4161                 count -= len;
4162                 data += len;
4163                 scb->expect -= len;
4164                 remain = count;
4165
4166                 switch (type) {
4167                 case HCI_EVENT_PKT:
4168                         if (skb->len == HCI_EVENT_HDR_SIZE) {
4169                                 struct hci_event_hdr *h = hci_event_hdr(skb);
4170                                 scb->expect = h->plen;
4171
4172                                 if (skb_tailroom(skb) < scb->expect) {
4173                                         kfree_skb(skb);
4174                                         hdev->reassembly[index] = NULL;
4175                                         return -ENOMEM;
4176                                 }
4177                         }
4178                         break;
4179
4180                 case HCI_ACLDATA_PKT:
4181                         if (skb->len  == HCI_ACL_HDR_SIZE) {
4182                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4183                                 scb->expect = __le16_to_cpu(h->dlen);
4184
4185                                 if (skb_tailroom(skb) < scb->expect) {
4186                                         kfree_skb(skb);
4187                                         hdev->reassembly[index] = NULL;
4188                                         return -ENOMEM;
4189                                 }
4190                         }
4191                         break;
4192
4193                 case HCI_SCODATA_PKT:
4194                         if (skb->len == HCI_SCO_HDR_SIZE) {
4195                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4196                                 scb->expect = h->dlen;
4197
4198                                 if (skb_tailroom(skb) < scb->expect) {
4199                                         kfree_skb(skb);
4200                                         hdev->reassembly[index] = NULL;
4201                                         return -ENOMEM;
4202                                 }
4203                         }
4204                         break;
4205                 }
4206
4207                 if (scb->expect == 0) {
4208                         /* Complete frame */
4209
4210                         bt_cb(skb)->pkt_type = type;
4211                         hci_recv_frame(hdev, skb);
4212
4213                         hdev->reassembly[index] = NULL;
4214                         return remain;
4215                 }
4216         }
4217
4218         return remain;
4219 }
4220
4221 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4222 {
4223         int rem = 0;
4224
4225         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4226                 return -EILSEQ;
4227
4228         while (count) {
4229                 rem = hci_reassembly(hdev, type, data, count, type - 1);
4230                 if (rem < 0)
4231                         return rem;
4232
4233                 data += (count - rem);
4234                 count = rem;
4235         }
4236
4237         return rem;
4238 }
4239 EXPORT_SYMBOL(hci_recv_fragment);
4240
4241 #define STREAM_REASSEMBLY 0
4242
4243 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4244 {
4245         int type;
4246         int rem = 0;
4247
4248         while (count) {
4249                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4250
4251                 if (!skb) {
4252                         struct { char type; } *pkt;
4253
4254                         /* Start of the frame */
4255                         pkt = data;
4256                         type = pkt->type;
4257
4258                         data++;
4259                         count--;
4260                 } else
4261                         type = bt_cb(skb)->pkt_type;
4262
4263                 rem = hci_reassembly(hdev, type, data, count,
4264                                      STREAM_REASSEMBLY);
4265                 if (rem < 0)
4266                         return rem;
4267
4268                 data += (count - rem);
4269                 count = rem;
4270         }
4271
4272         return rem;
4273 }
4274 EXPORT_SYMBOL(hci_recv_stream_fragment);
4275
4276 /* ---- Interface to upper protocols ---- */
4277
4278 int hci_register_cb(struct hci_cb *cb)
4279 {
4280         BT_DBG("%p name %s", cb, cb->name);
4281
4282         write_lock(&hci_cb_list_lock);
4283         list_add(&cb->list, &hci_cb_list);
4284         write_unlock(&hci_cb_list_lock);
4285
4286         return 0;
4287 }
4288 EXPORT_SYMBOL(hci_register_cb);
4289
4290 int hci_unregister_cb(struct hci_cb *cb)
4291 {
4292         BT_DBG("%p name %s", cb, cb->name);
4293
4294         write_lock(&hci_cb_list_lock);
4295         list_del(&cb->list);
4296         write_unlock(&hci_cb_list_lock);
4297
4298         return 0;
4299 }
4300 EXPORT_SYMBOL(hci_unregister_cb);
4301
4302 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4303 {
4304         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4305
4306         /* Time stamp */
4307         __net_timestamp(skb);
4308
4309         /* Send copy to monitor */
4310         hci_send_to_monitor(hdev, skb);
4311
4312         if (atomic_read(&hdev->promisc)) {
4313                 /* Send copy to the sockets */
4314                 hci_send_to_sock(hdev, skb);
4315         }
4316
4317         /* Get rid of skb owner, prior to sending to the driver. */
4318         skb_orphan(skb);
4319
4320         if (hdev->send(hdev, skb) < 0)
4321                 BT_ERR("%s sending frame failed", hdev->name);
4322 }
4323
4324 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4325 {
4326         skb_queue_head_init(&req->cmd_q);
4327         req->hdev = hdev;
4328         req->err = 0;
4329 }
4330
4331 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4332 {
4333         struct hci_dev *hdev = req->hdev;
4334         struct sk_buff *skb;
4335         unsigned long flags;
4336
4337         BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4338
4339         /* If an error occured during request building, remove all HCI
4340          * commands queued on the HCI request queue.
4341          */
4342         if (req->err) {
4343                 skb_queue_purge(&req->cmd_q);
4344                 return req->err;
4345         }
4346
4347         /* Do not allow empty requests */
4348         if (skb_queue_empty(&req->cmd_q))
4349                 return -ENODATA;
4350
4351         skb = skb_peek_tail(&req->cmd_q);
4352         bt_cb(skb)->req.complete = complete;
4353
4354         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4355         skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4356         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4357
4358         queue_work(hdev->workqueue, &hdev->cmd_work);
4359
4360         return 0;
4361 }
4362
4363 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4364                                        u32 plen, const void *param)
4365 {
4366         int len = HCI_COMMAND_HDR_SIZE + plen;
4367         struct hci_command_hdr *hdr;
4368         struct sk_buff *skb;
4369
4370         skb = bt_skb_alloc(len, GFP_ATOMIC);
4371         if (!skb)
4372                 return NULL;
4373
4374         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4375         hdr->opcode = cpu_to_le16(opcode);
4376         hdr->plen   = plen;
4377
4378         if (plen)
4379                 memcpy(skb_put(skb, plen), param, plen);
4380
4381         BT_DBG("skb len %d", skb->len);
4382
4383         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4384
4385         return skb;
4386 }
4387
4388 /* Send HCI command */
4389 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4390                  const void *param)
4391 {
4392         struct sk_buff *skb;
4393
4394         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4395
4396         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4397         if (!skb) {
4398                 BT_ERR("%s no memory for command", hdev->name);
4399                 return -ENOMEM;
4400         }
4401
4402         /* Stand-alone HCI commands must be flaged as
4403          * single-command requests.
4404          */
4405         bt_cb(skb)->req.start = true;
4406
4407         skb_queue_tail(&hdev->cmd_q, skb);
4408         queue_work(hdev->workqueue, &hdev->cmd_work);
4409
4410         return 0;
4411 }
4412
4413 /* Queue a command to an asynchronous HCI request */
4414 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4415                     const void *param, u8 event)
4416 {
4417         struct hci_dev *hdev = req->hdev;
4418         struct sk_buff *skb;
4419
4420         BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4421
4422         /* If an error occured during request building, there is no point in
4423          * queueing the HCI command. We can simply return.
4424          */
4425         if (req->err)
4426                 return;
4427
4428         skb = hci_prepare_cmd(hdev, opcode, plen, param);
4429         if (!skb) {
4430                 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4431                        hdev->name, opcode);
4432                 req->err = -ENOMEM;
4433                 return;
4434         }
4435
4436         if (skb_queue_empty(&req->cmd_q))
4437                 bt_cb(skb)->req.start = true;
4438
4439         bt_cb(skb)->req.event = event;
4440
4441         skb_queue_tail(&req->cmd_q, skb);
4442 }
4443
4444 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4445                  const void *param)
4446 {
4447         hci_req_add_ev(req, opcode, plen, param, 0);
4448 }
4449
4450 /* Get data from the previously sent command */
4451 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4452 {
4453         struct hci_command_hdr *hdr;
4454
4455         if (!hdev->sent_cmd)
4456                 return NULL;
4457
4458         hdr = (void *) hdev->sent_cmd->data;
4459
4460         if (hdr->opcode != cpu_to_le16(opcode))
4461                 return NULL;
4462
4463         BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4464
4465         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4466 }
4467
4468 /* Send ACL data */
4469 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4470 {
4471         struct hci_acl_hdr *hdr;
4472         int len = skb->len;
4473
4474         skb_push(skb, HCI_ACL_HDR_SIZE);
4475         skb_reset_transport_header(skb);
4476         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4477         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4478         hdr->dlen   = cpu_to_le16(len);
4479 }
4480
4481 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4482                           struct sk_buff *skb, __u16 flags)
4483 {
4484         struct hci_conn *conn = chan->conn;
4485         struct hci_dev *hdev = conn->hdev;
4486         struct sk_buff *list;
4487
4488         skb->len = skb_headlen(skb);
4489         skb->data_len = 0;
4490
4491         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4492
4493         switch (hdev->dev_type) {
4494         case HCI_BREDR:
4495                 hci_add_acl_hdr(skb, conn->handle, flags);
4496                 break;
4497         case HCI_AMP:
4498                 hci_add_acl_hdr(skb, chan->handle, flags);
4499                 break;
4500         default:
4501                 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4502                 return;
4503         }
4504
4505         list = skb_shinfo(skb)->frag_list;
4506         if (!list) {
4507                 /* Non fragmented */
4508                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4509
4510                 skb_queue_tail(queue, skb);
4511         } else {
4512                 /* Fragmented */
4513                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4514
4515                 skb_shinfo(skb)->frag_list = NULL;
4516
4517                 /* Queue all fragments atomically */
4518                 spin_lock(&queue->lock);
4519
4520                 __skb_queue_tail(queue, skb);
4521
4522                 flags &= ~ACL_START;
4523                 flags |= ACL_CONT;
4524                 do {
4525                         skb = list; list = list->next;
4526
4527                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4528                         hci_add_acl_hdr(skb, conn->handle, flags);
4529
4530                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4531
4532                         __skb_queue_tail(queue, skb);
4533                 } while (list);
4534
4535                 spin_unlock(&queue->lock);
4536         }
4537 }
4538
4539 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4540 {
4541         struct hci_dev *hdev = chan->conn->hdev;
4542
4543         BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4544
4545         hci_queue_acl(chan, &chan->data_q, skb, flags);
4546
4547         queue_work(hdev->workqueue, &hdev->tx_work);
4548 }
4549
4550 /* Send SCO data */
4551 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4552 {
4553         struct hci_dev *hdev = conn->hdev;
4554         struct hci_sco_hdr hdr;
4555
4556         BT_DBG("%s len %d", hdev->name, skb->len);
4557
4558         hdr.handle = cpu_to_le16(conn->handle);
4559         hdr.dlen   = skb->len;
4560
4561         skb_push(skb, HCI_SCO_HDR_SIZE);
4562         skb_reset_transport_header(skb);
4563         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4564
4565         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4566
4567         skb_queue_tail(&conn->data_q, skb);
4568         queue_work(hdev->workqueue, &hdev->tx_work);
4569 }
4570
4571 /* ---- HCI TX task (outgoing data) ---- */
4572
4573 /* HCI Connection scheduler */
4574 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4575                                      int *quote)
4576 {
4577         struct hci_conn_hash *h = &hdev->conn_hash;
4578         struct hci_conn *conn = NULL, *c;
4579         unsigned int num = 0, min = ~0;
4580
4581         /* We don't have to lock device here. Connections are always
4582          * added and removed with TX task disabled. */
4583
4584         rcu_read_lock();
4585
4586         list_for_each_entry_rcu(c, &h->list, list) {
4587                 if (c->type != type || skb_queue_empty(&c->data_q))
4588                         continue;
4589
4590                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4591                         continue;
4592
4593                 num++;
4594
4595                 if (c->sent < min) {
4596                         min  = c->sent;
4597                         conn = c;
4598                 }
4599
4600                 if (hci_conn_num(hdev, type) == num)
4601                         break;
4602         }
4603
4604         rcu_read_unlock();
4605
4606         if (conn) {
4607                 int cnt, q;
4608
4609                 switch (conn->type) {
4610                 case ACL_LINK:
4611                         cnt = hdev->acl_cnt;
4612                         break;
4613                 case SCO_LINK:
4614                 case ESCO_LINK:
4615                         cnt = hdev->sco_cnt;
4616                         break;
4617                 case LE_LINK:
4618                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4619                         break;
4620                 default:
4621                         cnt = 0;
4622                         BT_ERR("Unknown link type");
4623                 }
4624
4625                 q = cnt / num;
4626                 *quote = q ? q : 1;
4627         } else
4628                 *quote = 0;
4629
4630         BT_DBG("conn %p quote %d", conn, *quote);
4631         return conn;
4632 }
4633
4634 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4635 {
4636         struct hci_conn_hash *h = &hdev->conn_hash;
4637         struct hci_conn *c;
4638
4639         BT_ERR("%s link tx timeout", hdev->name);
4640
4641         rcu_read_lock();
4642
4643         /* Kill stalled connections */
4644         list_for_each_entry_rcu(c, &h->list, list) {
4645                 if (c->type == type && c->sent) {
4646                         BT_ERR("%s killing stalled connection %pMR",
4647                                hdev->name, &c->dst);
4648                         hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4649                 }
4650         }
4651
4652         rcu_read_unlock();
4653 }
4654
4655 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4656                                       int *quote)
4657 {
4658         struct hci_conn_hash *h = &hdev->conn_hash;
4659         struct hci_chan *chan = NULL;
4660         unsigned int num = 0, min = ~0, cur_prio = 0;
4661         struct hci_conn *conn;
4662         int cnt, q, conn_num = 0;
4663
4664         BT_DBG("%s", hdev->name);
4665
4666         rcu_read_lock();
4667
4668         list_for_each_entry_rcu(conn, &h->list, list) {
4669                 struct hci_chan *tmp;
4670
4671                 if (conn->type != type)
4672                         continue;
4673
4674                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4675                         continue;
4676
4677                 conn_num++;
4678
4679                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4680                         struct sk_buff *skb;
4681
4682                         if (skb_queue_empty(&tmp->data_q))
4683                                 continue;
4684
4685                         skb = skb_peek(&tmp->data_q);
4686                         if (skb->priority < cur_prio)
4687                                 continue;
4688
4689                         if (skb->priority > cur_prio) {
4690                                 num = 0;
4691                                 min = ~0;
4692                                 cur_prio = skb->priority;
4693                         }
4694
4695                         num++;
4696
4697                         if (conn->sent < min) {
4698                                 min  = conn->sent;
4699                                 chan = tmp;
4700                         }
4701                 }
4702
4703                 if (hci_conn_num(hdev, type) == conn_num)
4704                         break;
4705         }
4706
4707         rcu_read_unlock();
4708
4709         if (!chan)
4710                 return NULL;
4711
4712         switch (chan->conn->type) {
4713         case ACL_LINK:
4714                 cnt = hdev->acl_cnt;
4715                 break;
4716         case AMP_LINK:
4717                 cnt = hdev->block_cnt;
4718                 break;
4719         case SCO_LINK:
4720         case ESCO_LINK:
4721                 cnt = hdev->sco_cnt;
4722                 break;
4723         case LE_LINK:
4724                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4725                 break;
4726         default:
4727                 cnt = 0;
4728                 BT_ERR("Unknown link type");
4729         }
4730
4731         q = cnt / num;
4732         *quote = q ? q : 1;
4733         BT_DBG("chan %p quote %d", chan, *quote);
4734         return chan;
4735 }
4736
4737 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4738 {
4739         struct hci_conn_hash *h = &hdev->conn_hash;
4740         struct hci_conn *conn;
4741         int num = 0;
4742
4743         BT_DBG("%s", hdev->name);
4744
4745         rcu_read_lock();
4746
4747         list_for_each_entry_rcu(conn, &h->list, list) {
4748                 struct hci_chan *chan;
4749
4750                 if (conn->type != type)
4751                         continue;
4752
4753                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4754                         continue;
4755
4756                 num++;
4757
4758                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4759                         struct sk_buff *skb;
4760
4761                         if (chan->sent) {
4762                                 chan->sent = 0;
4763                                 continue;
4764                         }
4765
4766                         if (skb_queue_empty(&chan->data_q))
4767                                 continue;
4768
4769                         skb = skb_peek(&chan->data_q);
4770                         if (skb->priority >= HCI_PRIO_MAX - 1)
4771                                 continue;
4772
4773                         skb->priority = HCI_PRIO_MAX - 1;
4774
4775                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4776                                skb->priority);
4777                 }
4778
4779                 if (hci_conn_num(hdev, type) == num)
4780                         break;
4781         }
4782
4783         rcu_read_unlock();
4784
4785 }
4786
4787 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4788 {
4789         /* Calculate count of blocks used by this packet */
4790         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4791 }
4792
4793 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4794 {
4795         if (!test_bit(HCI_RAW, &hdev->flags)) {
4796                 /* ACL tx timeout must be longer than maximum
4797                  * link supervision timeout (40.9 seconds) */
4798                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4799                                        HCI_ACL_TX_TIMEOUT))
4800                         hci_link_tx_to(hdev, ACL_LINK);
4801         }
4802 }
4803
4804 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4805 {
4806         unsigned int cnt = hdev->acl_cnt;
4807         struct hci_chan *chan;
4808         struct sk_buff *skb;
4809         int quote;
4810
4811         __check_timeout(hdev, cnt);
4812
4813         while (hdev->acl_cnt &&
4814                (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4815                 u32 priority = (skb_peek(&chan->data_q))->priority;
4816                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4817                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4818                                skb->len, skb->priority);
4819
4820                         /* Stop if priority has changed */
4821                         if (skb->priority < priority)
4822                                 break;
4823
4824                         skb = skb_dequeue(&chan->data_q);
4825
4826                         hci_conn_enter_active_mode(chan->conn,
4827                                                    bt_cb(skb)->force_active);
4828
4829                         hci_send_frame(hdev, skb);
4830                         hdev->acl_last_tx = jiffies;
4831
4832                         hdev->acl_cnt--;
4833                         chan->sent++;
4834                         chan->conn->sent++;
4835                 }
4836         }
4837
4838         if (cnt != hdev->acl_cnt)
4839                 hci_prio_recalculate(hdev, ACL_LINK);
4840 }
4841
4842 static void hci_sched_acl_blk(struct hci_dev *hdev)
4843 {
4844         unsigned int cnt = hdev->block_cnt;
4845         struct hci_chan *chan;
4846         struct sk_buff *skb;
4847         int quote;
4848         u8 type;
4849
4850         __check_timeout(hdev, cnt);
4851
4852         BT_DBG("%s", hdev->name);
4853
4854         if (hdev->dev_type == HCI_AMP)
4855                 type = AMP_LINK;
4856         else
4857                 type = ACL_LINK;
4858
4859         while (hdev->block_cnt > 0 &&
4860                (chan = hci_chan_sent(hdev, type, &quote))) {
4861                 u32 priority = (skb_peek(&chan->data_q))->priority;
4862                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4863                         int blocks;
4864
4865                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4866                                skb->len, skb->priority);
4867
4868                         /* Stop if priority has changed */
4869                         if (skb->priority < priority)
4870                                 break;
4871
4872                         skb = skb_dequeue(&chan->data_q);
4873
4874                         blocks = __get_blocks(hdev, skb);
4875                         if (blocks > hdev->block_cnt)
4876                                 return;
4877
4878                         hci_conn_enter_active_mode(chan->conn,
4879                                                    bt_cb(skb)->force_active);
4880
4881                         hci_send_frame(hdev, skb);
4882                         hdev->acl_last_tx = jiffies;
4883
4884                         hdev->block_cnt -= blocks;
4885                         quote -= blocks;
4886
4887                         chan->sent += blocks;
4888                         chan->conn->sent += blocks;
4889                 }
4890         }
4891
4892         if (cnt != hdev->block_cnt)
4893                 hci_prio_recalculate(hdev, type);
4894 }
4895
4896 static void hci_sched_acl(struct hci_dev *hdev)
4897 {
4898         BT_DBG("%s", hdev->name);
4899
4900         /* No ACL link over BR/EDR controller */
4901         if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4902                 return;
4903
4904         /* No AMP link over AMP controller */
4905         if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4906                 return;
4907
4908         switch (hdev->flow_ctl_mode) {
4909         case HCI_FLOW_CTL_MODE_PACKET_BASED:
4910                 hci_sched_acl_pkt(hdev);
4911                 break;
4912
4913         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4914                 hci_sched_acl_blk(hdev);
4915                 break;
4916         }
4917 }
4918
4919 /* Schedule SCO */
4920 static void hci_sched_sco(struct hci_dev *hdev)
4921 {
4922         struct hci_conn *conn;
4923         struct sk_buff *skb;
4924         int quote;
4925
4926         BT_DBG("%s", hdev->name);
4927
4928         if (!hci_conn_num(hdev, SCO_LINK))
4929                 return;
4930
4931         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4932                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4933                         BT_DBG("skb %p len %d", skb, skb->len);
4934                         hci_send_frame(hdev, skb);
4935
4936                         conn->sent++;
4937                         if (conn->sent == ~0)
4938                                 conn->sent = 0;
4939                 }
4940         }
4941 }
4942
4943 static void hci_sched_esco(struct hci_dev *hdev)
4944 {
4945         struct hci_conn *conn;
4946         struct sk_buff *skb;
4947         int quote;
4948
4949         BT_DBG("%s", hdev->name);
4950
4951         if (!hci_conn_num(hdev, ESCO_LINK))
4952                 return;
4953
4954         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4955                                                      &quote))) {
4956                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4957                         BT_DBG("skb %p len %d", skb, skb->len);
4958                         hci_send_frame(hdev, skb);
4959
4960                         conn->sent++;
4961                         if (conn->sent == ~0)
4962                                 conn->sent = 0;
4963                 }
4964         }
4965 }
4966
4967 static void hci_sched_le(struct hci_dev *hdev)
4968 {
4969         struct hci_chan *chan;
4970         struct sk_buff *skb;
4971         int quote, cnt, tmp;
4972
4973         BT_DBG("%s", hdev->name);
4974
4975         if (!hci_conn_num(hdev, LE_LINK))
4976                 return;
4977
4978         if (!test_bit(HCI_RAW, &hdev->flags)) {
4979                 /* LE tx timeout must be longer than maximum
4980                  * link supervision timeout (40.9 seconds) */
4981                 if (!hdev->le_cnt && hdev->le_pkts &&
4982                     time_after(jiffies, hdev->le_last_tx + HZ * 45))
4983                         hci_link_tx_to(hdev, LE_LINK);
4984         }
4985
4986         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4987         tmp = cnt;
4988         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4989                 u32 priority = (skb_peek(&chan->data_q))->priority;
4990                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4991                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4992                                skb->len, skb->priority);
4993
4994                         /* Stop if priority has changed */
4995                         if (skb->priority < priority)
4996                                 break;
4997
4998                         skb = skb_dequeue(&chan->data_q);
4999
5000                         hci_send_frame(hdev, skb);
5001                         hdev->le_last_tx = jiffies;
5002
5003                         cnt--;
5004                         chan->sent++;
5005                         chan->conn->sent++;
5006                 }
5007         }
5008
5009         if (hdev->le_pkts)
5010                 hdev->le_cnt = cnt;
5011         else
5012                 hdev->acl_cnt = cnt;
5013
5014         if (cnt != tmp)
5015                 hci_prio_recalculate(hdev, LE_LINK);
5016 }
5017
5018 static void hci_tx_work(struct work_struct *work)
5019 {
5020         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5021         struct sk_buff *skb;
5022
5023         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5024                hdev->sco_cnt, hdev->le_cnt);
5025
5026         if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5027                 /* Schedule queues and send stuff to HCI driver */
5028                 hci_sched_acl(hdev);
5029                 hci_sched_sco(hdev);
5030                 hci_sched_esco(hdev);
5031                 hci_sched_le(hdev);
5032         }
5033
5034         /* Send next queued raw (unknown type) packet */
5035         while ((skb = skb_dequeue(&hdev->raw_q)))
5036                 hci_send_frame(hdev, skb);
5037 }
5038
5039 /* ----- HCI RX task (incoming data processing) ----- */
5040
5041 /* ACL data packet */
5042 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5043 {
5044         struct hci_acl_hdr *hdr = (void *) skb->data;
5045         struct hci_conn *conn;
5046         __u16 handle, flags;
5047
5048         skb_pull(skb, HCI_ACL_HDR_SIZE);
5049
5050         handle = __le16_to_cpu(hdr->handle);
5051         flags  = hci_flags(handle);
5052         handle = hci_handle(handle);
5053
5054         BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5055                handle, flags);
5056
5057         hdev->stat.acl_rx++;
5058
5059         hci_dev_lock(hdev);
5060         conn = hci_conn_hash_lookup_handle(hdev, handle);
5061         hci_dev_unlock(hdev);
5062
5063         if (conn) {
5064                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5065
5066                 /* Send to upper protocol */
5067                 l2cap_recv_acldata(conn, skb, flags);
5068                 return;
5069         } else {
5070                 BT_ERR("%s ACL packet for unknown connection handle %d",
5071                        hdev->name, handle);
5072         }
5073
5074         kfree_skb(skb);
5075 }
5076
5077 /* SCO data packet */
5078 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5079 {
5080         struct hci_sco_hdr *hdr = (void *) skb->data;
5081         struct hci_conn *conn;
5082         __u16 handle;
5083
5084         skb_pull(skb, HCI_SCO_HDR_SIZE);
5085
5086         handle = __le16_to_cpu(hdr->handle);
5087
5088         BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5089
5090         hdev->stat.sco_rx++;
5091
5092         hci_dev_lock(hdev);
5093         conn = hci_conn_hash_lookup_handle(hdev, handle);
5094         hci_dev_unlock(hdev);
5095
5096         if (conn) {
5097                 /* Send to upper protocol */
5098                 sco_recv_scodata(conn, skb);
5099                 return;
5100         } else {
5101                 BT_ERR("%s SCO packet for unknown connection handle %d",
5102                        hdev->name, handle);
5103         }
5104
5105         kfree_skb(skb);
5106 }
5107
5108 static bool hci_req_is_complete(struct hci_dev *hdev)
5109 {
5110         struct sk_buff *skb;
5111
5112         skb = skb_peek(&hdev->cmd_q);
5113         if (!skb)
5114                 return true;
5115
5116         return bt_cb(skb)->req.start;
5117 }
5118
5119 static void hci_resend_last(struct hci_dev *hdev)
5120 {
5121         struct hci_command_hdr *sent;
5122         struct sk_buff *skb;
5123         u16 opcode;
5124
5125         if (!hdev->sent_cmd)
5126                 return;
5127
5128         sent = (void *) hdev->sent_cmd->data;
5129         opcode = __le16_to_cpu(sent->opcode);
5130         if (opcode == HCI_OP_RESET)
5131                 return;
5132
5133         skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5134         if (!skb)
5135                 return;
5136
5137         skb_queue_head(&hdev->cmd_q, skb);
5138         queue_work(hdev->workqueue, &hdev->cmd_work);
5139 }
5140
5141 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5142 {
5143         hci_req_complete_t req_complete = NULL;
5144         struct sk_buff *skb;
5145         unsigned long flags;
5146
5147         BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5148
5149         /* If the completed command doesn't match the last one that was
5150          * sent we need to do special handling of it.
5151          */
5152         if (!hci_sent_cmd_data(hdev, opcode)) {
5153                 /* Some CSR based controllers generate a spontaneous
5154                  * reset complete event during init and any pending
5155                  * command will never be completed. In such a case we
5156                  * need to resend whatever was the last sent
5157                  * command.
5158                  */
5159                 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5160                         hci_resend_last(hdev);
5161
5162                 return;
5163         }
5164
5165         /* If the command succeeded and there's still more commands in
5166          * this request the request is not yet complete.
5167          */
5168         if (!status && !hci_req_is_complete(hdev))
5169                 return;
5170
5171         /* If this was the last command in a request the complete
5172          * callback would be found in hdev->sent_cmd instead of the
5173          * command queue (hdev->cmd_q).
5174          */
5175         if (hdev->sent_cmd) {
5176                 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5177
5178                 if (req_complete) {
5179                         /* We must set the complete callback to NULL to
5180                          * avoid calling the callback more than once if
5181                          * this function gets called again.
5182                          */
5183                         bt_cb(hdev->sent_cmd)->req.complete = NULL;
5184
5185                         goto call_complete;
5186                 }
5187         }
5188
5189         /* Remove all pending commands belonging to this request */
5190         spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5191         while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5192                 if (bt_cb(skb)->req.start) {
5193                         __skb_queue_head(&hdev->cmd_q, skb);
5194                         break;
5195                 }
5196
5197                 req_complete = bt_cb(skb)->req.complete;
5198                 kfree_skb(skb);
5199         }
5200         spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5201
5202 call_complete:
5203         if (req_complete)
5204                 req_complete(hdev, status);
5205 }
5206
5207 static void hci_rx_work(struct work_struct *work)
5208 {
5209         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5210         struct sk_buff *skb;
5211
5212         BT_DBG("%s", hdev->name);
5213
5214         while ((skb = skb_dequeue(&hdev->rx_q))) {
5215                 /* Send copy to monitor */
5216                 hci_send_to_monitor(hdev, skb);
5217
5218                 if (atomic_read(&hdev->promisc)) {
5219                         /* Send copy to the sockets */
5220                         hci_send_to_sock(hdev, skb);
5221                 }
5222
5223                 if (test_bit(HCI_RAW, &hdev->flags) ||
5224                     test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5225                         kfree_skb(skb);
5226                         continue;
5227                 }
5228
5229                 if (test_bit(HCI_INIT, &hdev->flags)) {
5230                         /* Don't process data packets in this states. */
5231                         switch (bt_cb(skb)->pkt_type) {
5232                         case HCI_ACLDATA_PKT:
5233                         case HCI_SCODATA_PKT:
5234                                 kfree_skb(skb);
5235                                 continue;
5236                         }
5237                 }
5238
5239                 /* Process frame */
5240                 switch (bt_cb(skb)->pkt_type) {
5241                 case HCI_EVENT_PKT:
5242                         BT_DBG("%s Event packet", hdev->name);
5243                         hci_event_packet(hdev, skb);
5244                         break;
5245
5246                 case HCI_ACLDATA_PKT:
5247                         BT_DBG("%s ACL data packet", hdev->name);
5248                         hci_acldata_packet(hdev, skb);
5249                         break;
5250
5251                 case HCI_SCODATA_PKT:
5252                         BT_DBG("%s SCO data packet", hdev->name);
5253                         hci_scodata_packet(hdev, skb);
5254                         break;
5255
5256                 default:
5257                         kfree_skb(skb);
5258                         break;
5259                 }
5260         }
5261 }
5262
5263 static void hci_cmd_work(struct work_struct *work)
5264 {
5265         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5266         struct sk_buff *skb;
5267
5268         BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5269                atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5270
5271         /* Send queued commands */
5272         if (atomic_read(&hdev->cmd_cnt)) {
5273                 skb = skb_dequeue(&hdev->cmd_q);
5274                 if (!skb)
5275                         return;
5276
5277                 kfree_skb(hdev->sent_cmd);
5278
5279                 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5280                 if (hdev->sent_cmd) {
5281                         atomic_dec(&hdev->cmd_cnt);
5282                         hci_send_frame(hdev, skb);
5283                         if (test_bit(HCI_RESET, &hdev->flags))
5284                                 del_timer(&hdev->cmd_timer);
5285                         else
5286                                 mod_timer(&hdev->cmd_timer,
5287                                           jiffies + HCI_CMD_TIMEOUT);
5288                 } else {
5289                         skb_queue_head(&hdev->cmd_q, skb);
5290                         queue_work(hdev->workqueue, &hdev->cmd_work);
5291                 }
5292         }
5293 }
5294
5295 void hci_req_add_le_scan_disable(struct hci_request *req)
5296 {
5297         struct hci_cp_le_set_scan_enable cp;
5298
5299         memset(&cp, 0, sizeof(cp));
5300         cp.enable = LE_SCAN_DISABLE;
5301         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5302 }
5303
5304 void hci_req_add_le_passive_scan(struct hci_request *req)
5305 {
5306         struct hci_cp_le_set_scan_param param_cp;
5307         struct hci_cp_le_set_scan_enable enable_cp;
5308         struct hci_dev *hdev = req->hdev;
5309         u8 own_addr_type;
5310
5311         /* Set require_privacy to true to avoid identification from
5312          * unknown peer devices. Since this is passive scanning, no
5313          * SCAN_REQ using the local identity should be sent. Mandating
5314          * privacy is just an extra precaution.
5315          */
5316         if (hci_update_random_address(req, true, &own_addr_type))
5317                 return;
5318
5319         memset(&param_cp, 0, sizeof(param_cp));
5320         param_cp.type = LE_SCAN_PASSIVE;
5321         param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5322         param_cp.window = cpu_to_le16(hdev->le_scan_window);
5323         param_cp.own_address_type = own_addr_type;
5324         hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5325                     &param_cp);
5326
5327         memset(&enable_cp, 0, sizeof(enable_cp));
5328         enable_cp.enable = LE_SCAN_ENABLE;
5329         enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5330         hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5331                     &enable_cp);
5332 }
5333
5334 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5335 {
5336         if (status)
5337                 BT_DBG("HCI request failed to update background scanning: "
5338                        "status 0x%2.2x", status);
5339 }
5340
5341 /* This function controls the background scanning based on hdev->pend_le_conns
5342  * list. If there are pending LE connection we start the background scanning,
5343  * otherwise we stop it.
5344  *
5345  * This function requires the caller holds hdev->lock.
5346  */
5347 void hci_update_background_scan(struct hci_dev *hdev)
5348 {
5349         struct hci_request req;
5350         struct hci_conn *conn;
5351         int err;
5352
5353         hci_req_init(&req, hdev);
5354
5355         if (list_empty(&hdev->pend_le_conns)) {
5356                 /* If there is no pending LE connections, we should stop
5357                  * the background scanning.
5358                  */
5359
5360                 /* If controller is not scanning we are done. */
5361                 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5362                         return;
5363
5364                 hci_req_add_le_scan_disable(&req);
5365
5366                 BT_DBG("%s stopping background scanning", hdev->name);
5367         } else {
5368                 /* If there is at least one pending LE connection, we should
5369                  * keep the background scan running.
5370                  */
5371
5372                 /* If controller is connecting, we should not start scanning
5373                  * since some controllers are not able to scan and connect at
5374                  * the same time.
5375                  */
5376                 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5377                 if (conn)
5378                         return;
5379
5380                 /* If controller is currently scanning, we stop it to ensure we
5381                  * don't miss any advertising (due to duplicates filter).
5382                  */
5383                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5384                         hci_req_add_le_scan_disable(&req);
5385
5386                 hci_req_add_le_passive_scan(&req);
5387
5388                 BT_DBG("%s starting background scanning", hdev->name);
5389         }
5390
5391         err = hci_req_run(&req, update_background_scan_complete);
5392         if (err)
5393                 BT_ERR("Failed to run HCI request: err %d", err);
5394 }