perf tools: Fix build error on read only source.
[sfrench/cifs-2.6.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
57
58 static DEFINE_RWLOCK(hci_task_lock);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* HCI protocols */
69 #define HCI_MAX_PROTO   2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, int result)
95 {
96         BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98         if (hdev->req_status == HCI_REQ_PEND) {
99                 hdev->req_result = result;
100                 hdev->req_status = HCI_REQ_DONE;
101                 wake_up_interruptible(&hdev->req_wait_q);
102         }
103 }
104
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
106 {
107         BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109         if (hdev->req_status == HCI_REQ_PEND) {
110                 hdev->req_result = err;
111                 hdev->req_status = HCI_REQ_CANCELED;
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118                                 unsigned long opt, __u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         int err = 0;
122
123         BT_DBG("%s start", hdev->name);
124
125         hdev->req_status = HCI_REQ_PEND;
126
127         add_wait_queue(&hdev->req_wait_q, &wait);
128         set_current_state(TASK_INTERRUPTIBLE);
129
130         req(hdev, opt);
131         schedule_timeout(timeout);
132
133         remove_wait_queue(&hdev->req_wait_q, &wait);
134
135         if (signal_pending(current))
136                 return -EINTR;
137
138         switch (hdev->req_status) {
139         case HCI_REQ_DONE:
140                 err = -bt_err(hdev->req_result);
141                 break;
142
143         case HCI_REQ_CANCELED:
144                 err = -hdev->req_result;
145                 break;
146
147         default:
148                 err = -ETIMEDOUT;
149                 break;
150         }
151
152         hdev->req_status = hdev->req_result = 0;
153
154         BT_DBG("%s end: err %d", hdev->name, err);
155
156         return err;
157 }
158
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160                                 unsigned long opt, __u32 timeout)
161 {
162         int ret;
163
164         if (!test_bit(HCI_UP, &hdev->flags))
165                 return -ENETDOWN;
166
167         /* Serialize all requests */
168         hci_req_lock(hdev);
169         ret = __hci_request(hdev, req, opt, timeout);
170         hci_req_unlock(hdev);
171
172         return ret;
173 }
174
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 {
177         BT_DBG("%s %ld", hdev->name, opt);
178
179         /* Reset device */
180         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181 }
182
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         struct sk_buff *skb;
186         __le16 param;
187         __u8 flt_type;
188
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Driver initialization */
192
193         /* Special commands */
194         while ((skb = skb_dequeue(&hdev->driver_init))) {
195                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196                 skb->dev = (void *) hdev;
197
198                 skb_queue_tail(&hdev->cmd_q, skb);
199                 tasklet_schedule(&hdev->cmd_task);
200         }
201         skb_queue_purge(&hdev->driver_init);
202
203         /* Mandatory initialization */
204
205         /* Reset */
206         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208
209         /* Read Local Supported Features */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211
212         /* Read Local Version */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217
218 #if 0
219         /* Host buffer size */
220         {
221                 struct hci_cp_host_buffer_size cp;
222                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224                 cp.acl_max_pkt = cpu_to_le16(0xffff);
225                 cp.sco_max_pkt = cpu_to_le16(0xffff);
226                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
227         }
228 #endif
229
230         /* Read BD Address */
231         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233         /* Read Class of Device */
234         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236         /* Read Local Name */
237         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
238
239         /* Read Voice Setting */
240         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
241
242         /* Optional initialization */
243
244         /* Clear Event Filters */
245         flt_type = HCI_FLT_CLEAR_ALL;
246         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
247
248         /* Page timeout ~20 secs */
249         param = cpu_to_le16(0x8000);
250         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
251
252         /* Connection accept timeout ~20 secs */
253         param = cpu_to_le16(0x7d00);
254         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
255 }
256
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258 {
259         __u8 scan = opt;
260
261         BT_DBG("%s %x", hdev->name, scan);
262
263         /* Inquiry and Page scans */
264         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
265 }
266
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268 {
269         __u8 auth = opt;
270
271         BT_DBG("%s %x", hdev->name, auth);
272
273         /* Authentication */
274         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
275 }
276
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 {
279         __u8 encrypt = opt;
280
281         BT_DBG("%s %x", hdev->name, encrypt);
282
283         /* Encryption */
284         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
285 }
286
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         __le16 policy = cpu_to_le16(opt);
290
291         BT_DBG("%s %x", hdev->name, policy);
292
293         /* Default link policy */
294         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295 }
296
297 /* Get HCI device by index.
298  * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
300 {
301         struct hci_dev *hdev = NULL;
302         struct list_head *p;
303
304         BT_DBG("%d", index);
305
306         if (index < 0)
307                 return NULL;
308
309         read_lock(&hci_dev_list_lock);
310         list_for_each(p, &hci_dev_list) {
311                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312                 if (d->id == index) {
313                         hdev = hci_dev_hold(d);
314                         break;
315                 }
316         }
317         read_unlock(&hci_dev_list_lock);
318         return hdev;
319 }
320
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
323 {
324         struct inquiry_cache *cache = &hdev->inq_cache;
325         struct inquiry_entry *next  = cache->list, *e;
326
327         BT_DBG("cache %p", cache);
328
329         cache->list = NULL;
330         while ((e = next)) {
331                 next = e->next;
332                 kfree(e);
333         }
334 }
335
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337 {
338         struct inquiry_cache *cache = &hdev->inq_cache;
339         struct inquiry_entry *e;
340
341         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343         for (e = cache->list; e; e = e->next)
344                 if (!bacmp(&e->data.bdaddr, bdaddr))
345                         break;
346         return e;
347 }
348
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350 {
351         struct inquiry_cache *cache = &hdev->inq_cache;
352         struct inquiry_entry *e;
353
354         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
356         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357                 /* Entry not in the cache. Add new one. */
358                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
359                         return;
360                 e->next     = cache->list;
361                 cache->list = e;
362         }
363
364         memcpy(&e->data, data, sizeof(*data));
365         e->timestamp = jiffies;
366         cache->timestamp = jiffies;
367 }
368
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370 {
371         struct inquiry_cache *cache = &hdev->inq_cache;
372         struct inquiry_info *info = (struct inquiry_info *) buf;
373         struct inquiry_entry *e;
374         int copied = 0;
375
376         for (e = cache->list; e && copied < num; e = e->next, copied++) {
377                 struct inquiry_data *data = &e->data;
378                 bacpy(&info->bdaddr, &data->bdaddr);
379                 info->pscan_rep_mode    = data->pscan_rep_mode;
380                 info->pscan_period_mode = data->pscan_period_mode;
381                 info->pscan_mode        = data->pscan_mode;
382                 memcpy(info->dev_class, data->dev_class, 3);
383                 info->clock_offset      = data->clock_offset;
384                 info++;
385         }
386
387         BT_DBG("cache %p, copied %d", cache, copied);
388         return copied;
389 }
390
391 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
392 {
393         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394         struct hci_cp_inquiry cp;
395
396         BT_DBG("%s", hdev->name);
397
398         if (test_bit(HCI_INQUIRY, &hdev->flags))
399                 return;
400
401         /* Start Inquiry */
402         memcpy(&cp.lap, &ir->lap, 3);
403         cp.length  = ir->length;
404         cp.num_rsp = ir->num_rsp;
405         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
406 }
407
408 int hci_inquiry(void __user *arg)
409 {
410         __u8 __user *ptr = arg;
411         struct hci_inquiry_req ir;
412         struct hci_dev *hdev;
413         int err = 0, do_inquiry = 0, max_rsp;
414         long timeo;
415         __u8 *buf;
416
417         if (copy_from_user(&ir, ptr, sizeof(ir)))
418                 return -EFAULT;
419
420         if (!(hdev = hci_dev_get(ir.dev_id)))
421                 return -ENODEV;
422
423         hci_dev_lock_bh(hdev);
424         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425                                         inquiry_cache_empty(hdev) ||
426                                         ir.flags & IREQ_CACHE_FLUSH) {
427                 inquiry_cache_flush(hdev);
428                 do_inquiry = 1;
429         }
430         hci_dev_unlock_bh(hdev);
431
432         timeo = ir.length * msecs_to_jiffies(2000);
433         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434                 goto done;
435
436         /* for unlimited number of responses we will use buffer with 255 entries */
437         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
438
439         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440          * copy it to the user space.
441          */
442         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443                 err = -ENOMEM;
444                 goto done;
445         }
446
447         hci_dev_lock_bh(hdev);
448         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449         hci_dev_unlock_bh(hdev);
450
451         BT_DBG("num_rsp %d", ir.num_rsp);
452
453         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454                 ptr += sizeof(ir);
455                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456                                         ir.num_rsp))
457                         err = -EFAULT;
458         } else
459                 err = -EFAULT;
460
461         kfree(buf);
462
463 done:
464         hci_dev_put(hdev);
465         return err;
466 }
467
468 /* ---- HCI ioctl helpers ---- */
469
470 int hci_dev_open(__u16 dev)
471 {
472         struct hci_dev *hdev;
473         int ret = 0;
474
475         if (!(hdev = hci_dev_get(dev)))
476                 return -ENODEV;
477
478         BT_DBG("%s %p", hdev->name, hdev);
479
480         hci_req_lock(hdev);
481
482         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483                 ret = -ERFKILL;
484                 goto done;
485         }
486
487         if (test_bit(HCI_UP, &hdev->flags)) {
488                 ret = -EALREADY;
489                 goto done;
490         }
491
492         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493                 set_bit(HCI_RAW, &hdev->flags);
494
495         /* Treat all non BR/EDR controllers as raw devices for now */
496         if (hdev->dev_type != HCI_BREDR)
497                 set_bit(HCI_RAW, &hdev->flags);
498
499         if (hdev->open(hdev)) {
500                 ret = -EIO;
501                 goto done;
502         }
503
504         if (!test_bit(HCI_RAW, &hdev->flags)) {
505                 atomic_set(&hdev->cmd_cnt, 1);
506                 set_bit(HCI_INIT, &hdev->flags);
507
508                 //__hci_request(hdev, hci_reset_req, 0, HZ);
509                 ret = __hci_request(hdev, hci_init_req, 0,
510                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
511
512                 clear_bit(HCI_INIT, &hdev->flags);
513         }
514
515         if (!ret) {
516                 hci_dev_hold(hdev);
517                 set_bit(HCI_UP, &hdev->flags);
518                 hci_notify(hdev, HCI_DEV_UP);
519         } else {
520                 /* Init failed, cleanup */
521                 tasklet_kill(&hdev->rx_task);
522                 tasklet_kill(&hdev->tx_task);
523                 tasklet_kill(&hdev->cmd_task);
524
525                 skb_queue_purge(&hdev->cmd_q);
526                 skb_queue_purge(&hdev->rx_q);
527
528                 if (hdev->flush)
529                         hdev->flush(hdev);
530
531                 if (hdev->sent_cmd) {
532                         kfree_skb(hdev->sent_cmd);
533                         hdev->sent_cmd = NULL;
534                 }
535
536                 hdev->close(hdev);
537                 hdev->flags = 0;
538         }
539
540 done:
541         hci_req_unlock(hdev);
542         hci_dev_put(hdev);
543         return ret;
544 }
545
546 static int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         BT_DBG("%s %p", hdev->name, hdev);
549
550         hci_req_cancel(hdev, ENODEV);
551         hci_req_lock(hdev);
552
553         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554                 hci_req_unlock(hdev);
555                 return 0;
556         }
557
558         /* Kill RX and TX tasks */
559         tasklet_kill(&hdev->rx_task);
560         tasklet_kill(&hdev->tx_task);
561
562         hci_dev_lock_bh(hdev);
563         inquiry_cache_flush(hdev);
564         hci_conn_hash_flush(hdev);
565         hci_blacklist_clear(hdev);
566         hci_dev_unlock_bh(hdev);
567
568         hci_notify(hdev, HCI_DEV_DOWN);
569
570         if (hdev->flush)
571                 hdev->flush(hdev);
572
573         /* Reset device */
574         skb_queue_purge(&hdev->cmd_q);
575         atomic_set(&hdev->cmd_cnt, 1);
576         if (!test_bit(HCI_RAW, &hdev->flags)) {
577                 set_bit(HCI_INIT, &hdev->flags);
578                 __hci_request(hdev, hci_reset_req, 0,
579                                         msecs_to_jiffies(250));
580                 clear_bit(HCI_INIT, &hdev->flags);
581         }
582
583         /* Kill cmd task */
584         tasklet_kill(&hdev->cmd_task);
585
586         /* Drop queues */
587         skb_queue_purge(&hdev->rx_q);
588         skb_queue_purge(&hdev->cmd_q);
589         skb_queue_purge(&hdev->raw_q);
590
591         /* Drop last sent command */
592         if (hdev->sent_cmd) {
593                 kfree_skb(hdev->sent_cmd);
594                 hdev->sent_cmd = NULL;
595         }
596
597         /* After this point our queues are empty
598          * and no tasks are scheduled. */
599         hdev->close(hdev);
600
601         /* Clear flags */
602         hdev->flags = 0;
603
604         hci_req_unlock(hdev);
605
606         hci_dev_put(hdev);
607         return 0;
608 }
609
610 int hci_dev_close(__u16 dev)
611 {
612         struct hci_dev *hdev;
613         int err;
614
615         if (!(hdev = hci_dev_get(dev)))
616                 return -ENODEV;
617         err = hci_dev_do_close(hdev);
618         hci_dev_put(hdev);
619         return err;
620 }
621
622 int hci_dev_reset(__u16 dev)
623 {
624         struct hci_dev *hdev;
625         int ret = 0;
626
627         if (!(hdev = hci_dev_get(dev)))
628                 return -ENODEV;
629
630         hci_req_lock(hdev);
631         tasklet_disable(&hdev->tx_task);
632
633         if (!test_bit(HCI_UP, &hdev->flags))
634                 goto done;
635
636         /* Drop queues */
637         skb_queue_purge(&hdev->rx_q);
638         skb_queue_purge(&hdev->cmd_q);
639
640         hci_dev_lock_bh(hdev);
641         inquiry_cache_flush(hdev);
642         hci_conn_hash_flush(hdev);
643         hci_dev_unlock_bh(hdev);
644
645         if (hdev->flush)
646                 hdev->flush(hdev);
647
648         atomic_set(&hdev->cmd_cnt, 1);
649         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
650
651         if (!test_bit(HCI_RAW, &hdev->flags))
652                 ret = __hci_request(hdev, hci_reset_req, 0,
653                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
654
655 done:
656         tasklet_enable(&hdev->tx_task);
657         hci_req_unlock(hdev);
658         hci_dev_put(hdev);
659         return ret;
660 }
661
662 int hci_dev_reset_stat(__u16 dev)
663 {
664         struct hci_dev *hdev;
665         int ret = 0;
666
667         if (!(hdev = hci_dev_get(dev)))
668                 return -ENODEV;
669
670         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
671
672         hci_dev_put(hdev);
673
674         return ret;
675 }
676
677 int hci_dev_cmd(unsigned int cmd, void __user *arg)
678 {
679         struct hci_dev *hdev;
680         struct hci_dev_req dr;
681         int err = 0;
682
683         if (copy_from_user(&dr, arg, sizeof(dr)))
684                 return -EFAULT;
685
686         if (!(hdev = hci_dev_get(dr.dev_id)))
687                 return -ENODEV;
688
689         switch (cmd) {
690         case HCISETAUTH:
691                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
692                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
693                 break;
694
695         case HCISETENCRYPT:
696                 if (!lmp_encrypt_capable(hdev)) {
697                         err = -EOPNOTSUPP;
698                         break;
699                 }
700
701                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
702                         /* Auth must be enabled first */
703                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
704                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
705                         if (err)
706                                 break;
707                 }
708
709                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
710                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
711                 break;
712
713         case HCISETSCAN:
714                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
715                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
716                 break;
717
718         case HCISETLINKPOL:
719                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
720                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
721                 break;
722
723         case HCISETLINKMODE:
724                 hdev->link_mode = ((__u16) dr.dev_opt) &
725                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
726                 break;
727
728         case HCISETPTYPE:
729                 hdev->pkt_type = (__u16) dr.dev_opt;
730                 break;
731
732         case HCISETACLMTU:
733                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
734                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
735                 break;
736
737         case HCISETSCOMTU:
738                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
739                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
740                 break;
741
742         default:
743                 err = -EINVAL;
744                 break;
745         }
746
747         hci_dev_put(hdev);
748         return err;
749 }
750
751 int hci_get_dev_list(void __user *arg)
752 {
753         struct hci_dev_list_req *dl;
754         struct hci_dev_req *dr;
755         struct list_head *p;
756         int n = 0, size, err;
757         __u16 dev_num;
758
759         if (get_user(dev_num, (__u16 __user *) arg))
760                 return -EFAULT;
761
762         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
763                 return -EINVAL;
764
765         size = sizeof(*dl) + dev_num * sizeof(*dr);
766
767         if (!(dl = kzalloc(size, GFP_KERNEL)))
768                 return -ENOMEM;
769
770         dr = dl->dev_req;
771
772         read_lock_bh(&hci_dev_list_lock);
773         list_for_each(p, &hci_dev_list) {
774                 struct hci_dev *hdev;
775                 hdev = list_entry(p, struct hci_dev, list);
776                 (dr + n)->dev_id  = hdev->id;
777                 (dr + n)->dev_opt = hdev->flags;
778                 if (++n >= dev_num)
779                         break;
780         }
781         read_unlock_bh(&hci_dev_list_lock);
782
783         dl->dev_num = n;
784         size = sizeof(*dl) + n * sizeof(*dr);
785
786         err = copy_to_user(arg, dl, size);
787         kfree(dl);
788
789         return err ? -EFAULT : 0;
790 }
791
792 int hci_get_dev_info(void __user *arg)
793 {
794         struct hci_dev *hdev;
795         struct hci_dev_info di;
796         int err = 0;
797
798         if (copy_from_user(&di, arg, sizeof(di)))
799                 return -EFAULT;
800
801         if (!(hdev = hci_dev_get(di.dev_id)))
802                 return -ENODEV;
803
804         strcpy(di.name, hdev->name);
805         di.bdaddr   = hdev->bdaddr;
806         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
807         di.flags    = hdev->flags;
808         di.pkt_type = hdev->pkt_type;
809         di.acl_mtu  = hdev->acl_mtu;
810         di.acl_pkts = hdev->acl_pkts;
811         di.sco_mtu  = hdev->sco_mtu;
812         di.sco_pkts = hdev->sco_pkts;
813         di.link_policy = hdev->link_policy;
814         di.link_mode   = hdev->link_mode;
815
816         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
817         memcpy(&di.features, &hdev->features, sizeof(di.features));
818
819         if (copy_to_user(arg, &di, sizeof(di)))
820                 err = -EFAULT;
821
822         hci_dev_put(hdev);
823
824         return err;
825 }
826
827 /* ---- Interface to HCI drivers ---- */
828
829 static int hci_rfkill_set_block(void *data, bool blocked)
830 {
831         struct hci_dev *hdev = data;
832
833         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
834
835         if (!blocked)
836                 return 0;
837
838         hci_dev_do_close(hdev);
839
840         return 0;
841 }
842
843 static const struct rfkill_ops hci_rfkill_ops = {
844         .set_block = hci_rfkill_set_block,
845 };
846
847 /* Alloc HCI device */
848 struct hci_dev *hci_alloc_dev(void)
849 {
850         struct hci_dev *hdev;
851
852         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
853         if (!hdev)
854                 return NULL;
855
856         skb_queue_head_init(&hdev->driver_init);
857
858         return hdev;
859 }
860 EXPORT_SYMBOL(hci_alloc_dev);
861
862 /* Free HCI device */
863 void hci_free_dev(struct hci_dev *hdev)
864 {
865         skb_queue_purge(&hdev->driver_init);
866
867         /* will free via device release */
868         put_device(&hdev->dev);
869 }
870 EXPORT_SYMBOL(hci_free_dev);
871
872 /* Register HCI device */
873 int hci_register_dev(struct hci_dev *hdev)
874 {
875         struct list_head *head = &hci_dev_list, *p;
876         int i, id = 0;
877
878         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
879                                                 hdev->bus, hdev->owner);
880
881         if (!hdev->open || !hdev->close || !hdev->destruct)
882                 return -EINVAL;
883
884         write_lock_bh(&hci_dev_list_lock);
885
886         /* Find first available device id */
887         list_for_each(p, &hci_dev_list) {
888                 if (list_entry(p, struct hci_dev, list)->id != id)
889                         break;
890                 head = p; id++;
891         }
892
893         sprintf(hdev->name, "hci%d", id);
894         hdev->id = id;
895         list_add(&hdev->list, head);
896
897         atomic_set(&hdev->refcnt, 1);
898         spin_lock_init(&hdev->lock);
899
900         hdev->flags = 0;
901         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
902         hdev->esco_type = (ESCO_HV1);
903         hdev->link_mode = (HCI_LM_ACCEPT);
904
905         hdev->idle_timeout = 0;
906         hdev->sniff_max_interval = 800;
907         hdev->sniff_min_interval = 80;
908
909         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
910         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
911         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
912
913         skb_queue_head_init(&hdev->rx_q);
914         skb_queue_head_init(&hdev->cmd_q);
915         skb_queue_head_init(&hdev->raw_q);
916
917         for (i = 0; i < NUM_REASSEMBLY; i++)
918                 hdev->reassembly[i] = NULL;
919
920         init_waitqueue_head(&hdev->req_wait_q);
921         mutex_init(&hdev->req_lock);
922
923         inquiry_cache_init(hdev);
924
925         hci_conn_hash_init(hdev);
926
927         INIT_LIST_HEAD(&hdev->blacklist);
928
929         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
930
931         atomic_set(&hdev->promisc, 0);
932
933         write_unlock_bh(&hci_dev_list_lock);
934
935         hdev->workqueue = create_singlethread_workqueue(hdev->name);
936         if (!hdev->workqueue)
937                 goto nomem;
938
939         hci_register_sysfs(hdev);
940
941         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
942                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
943         if (hdev->rfkill) {
944                 if (rfkill_register(hdev->rfkill) < 0) {
945                         rfkill_destroy(hdev->rfkill);
946                         hdev->rfkill = NULL;
947                 }
948         }
949
950         hci_notify(hdev, HCI_DEV_REG);
951
952         return id;
953
954 nomem:
955         write_lock_bh(&hci_dev_list_lock);
956         list_del(&hdev->list);
957         write_unlock_bh(&hci_dev_list_lock);
958
959         return -ENOMEM;
960 }
961 EXPORT_SYMBOL(hci_register_dev);
962
963 /* Unregister HCI device */
964 int hci_unregister_dev(struct hci_dev *hdev)
965 {
966         int i;
967
968         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
969
970         write_lock_bh(&hci_dev_list_lock);
971         list_del(&hdev->list);
972         write_unlock_bh(&hci_dev_list_lock);
973
974         hci_dev_do_close(hdev);
975
976         for (i = 0; i < NUM_REASSEMBLY; i++)
977                 kfree_skb(hdev->reassembly[i]);
978
979         hci_notify(hdev, HCI_DEV_UNREG);
980
981         if (hdev->rfkill) {
982                 rfkill_unregister(hdev->rfkill);
983                 rfkill_destroy(hdev->rfkill);
984         }
985
986         hci_unregister_sysfs(hdev);
987
988         destroy_workqueue(hdev->workqueue);
989
990         __hci_dev_put(hdev);
991
992         return 0;
993 }
994 EXPORT_SYMBOL(hci_unregister_dev);
995
996 /* Suspend HCI device */
997 int hci_suspend_dev(struct hci_dev *hdev)
998 {
999         hci_notify(hdev, HCI_DEV_SUSPEND);
1000         return 0;
1001 }
1002 EXPORT_SYMBOL(hci_suspend_dev);
1003
1004 /* Resume HCI device */
1005 int hci_resume_dev(struct hci_dev *hdev)
1006 {
1007         hci_notify(hdev, HCI_DEV_RESUME);
1008         return 0;
1009 }
1010 EXPORT_SYMBOL(hci_resume_dev);
1011
1012 /* Receive frame from HCI drivers */
1013 int hci_recv_frame(struct sk_buff *skb)
1014 {
1015         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1016         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1017                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1018                 kfree_skb(skb);
1019                 return -ENXIO;
1020         }
1021
1022         /* Incomming skb */
1023         bt_cb(skb)->incoming = 1;
1024
1025         /* Time stamp */
1026         __net_timestamp(skb);
1027
1028         /* Queue frame for rx task */
1029         skb_queue_tail(&hdev->rx_q, skb);
1030         tasklet_schedule(&hdev->rx_task);
1031
1032         return 0;
1033 }
1034 EXPORT_SYMBOL(hci_recv_frame);
1035
1036 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1037                           int count, __u8 index, gfp_t gfp_mask)
1038 {
1039         int len = 0;
1040         int hlen = 0;
1041         int remain = count;
1042         struct sk_buff *skb;
1043         struct bt_skb_cb *scb;
1044
1045         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1046                                 index >= NUM_REASSEMBLY)
1047                 return -EILSEQ;
1048
1049         skb = hdev->reassembly[index];
1050
1051         if (!skb) {
1052                 switch (type) {
1053                 case HCI_ACLDATA_PKT:
1054                         len = HCI_MAX_FRAME_SIZE;
1055                         hlen = HCI_ACL_HDR_SIZE;
1056                         break;
1057                 case HCI_EVENT_PKT:
1058                         len = HCI_MAX_EVENT_SIZE;
1059                         hlen = HCI_EVENT_HDR_SIZE;
1060                         break;
1061                 case HCI_SCODATA_PKT:
1062                         len = HCI_MAX_SCO_SIZE;
1063                         hlen = HCI_SCO_HDR_SIZE;
1064                         break;
1065                 }
1066
1067                 skb = bt_skb_alloc(len, gfp_mask);
1068                 if (!skb)
1069                         return -ENOMEM;
1070
1071                 scb = (void *) skb->cb;
1072                 scb->expect = hlen;
1073                 scb->pkt_type = type;
1074
1075                 skb->dev = (void *) hdev;
1076                 hdev->reassembly[index] = skb;
1077         }
1078
1079         while (count) {
1080                 scb = (void *) skb->cb;
1081                 len = min(scb->expect, (__u16)count);
1082
1083                 memcpy(skb_put(skb, len), data, len);
1084
1085                 count -= len;
1086                 data += len;
1087                 scb->expect -= len;
1088                 remain = count;
1089
1090                 switch (type) {
1091                 case HCI_EVENT_PKT:
1092                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1093                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1094                                 scb->expect = h->plen;
1095
1096                                 if (skb_tailroom(skb) < scb->expect) {
1097                                         kfree_skb(skb);
1098                                         hdev->reassembly[index] = NULL;
1099                                         return -ENOMEM;
1100                                 }
1101                         }
1102                         break;
1103
1104                 case HCI_ACLDATA_PKT:
1105                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1106                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1107                                 scb->expect = __le16_to_cpu(h->dlen);
1108
1109                                 if (skb_tailroom(skb) < scb->expect) {
1110                                         kfree_skb(skb);
1111                                         hdev->reassembly[index] = NULL;
1112                                         return -ENOMEM;
1113                                 }
1114                         }
1115                         break;
1116
1117                 case HCI_SCODATA_PKT:
1118                         if (skb->len == HCI_SCO_HDR_SIZE) {
1119                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1120                                 scb->expect = h->dlen;
1121
1122                                 if (skb_tailroom(skb) < scb->expect) {
1123                                         kfree_skb(skb);
1124                                         hdev->reassembly[index] = NULL;
1125                                         return -ENOMEM;
1126                                 }
1127                         }
1128                         break;
1129                 }
1130
1131                 if (scb->expect == 0) {
1132                         /* Complete frame */
1133
1134                         bt_cb(skb)->pkt_type = type;
1135                         hci_recv_frame(skb);
1136
1137                         hdev->reassembly[index] = NULL;
1138                         return remain;
1139                 }
1140         }
1141
1142         return remain;
1143 }
1144
1145 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1146 {
1147         int rem = 0;
1148
1149         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1150                 return -EILSEQ;
1151
1152         while (count) {
1153                 rem = hci_reassembly(hdev, type, data, count,
1154                                                 type - 1, GFP_ATOMIC);
1155                 if (rem < 0)
1156                         return rem;
1157
1158                 data += (count - rem);
1159                 count = rem;
1160         };
1161
1162         return rem;
1163 }
1164 EXPORT_SYMBOL(hci_recv_fragment);
1165
1166 #define STREAM_REASSEMBLY 0
1167
1168 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1169 {
1170         int type;
1171         int rem = 0;
1172
1173         while (count) {
1174                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1175
1176                 if (!skb) {
1177                         struct { char type; } *pkt;
1178
1179                         /* Start of the frame */
1180                         pkt = data;
1181                         type = pkt->type;
1182
1183                         data++;
1184                         count--;
1185                 } else
1186                         type = bt_cb(skb)->pkt_type;
1187
1188                 rem = hci_reassembly(hdev, type, data,
1189                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1190                 if (rem < 0)
1191                         return rem;
1192
1193                 data += (count - rem);
1194                 count = rem;
1195         };
1196
1197         return rem;
1198 }
1199 EXPORT_SYMBOL(hci_recv_stream_fragment);
1200
1201 /* ---- Interface to upper protocols ---- */
1202
1203 /* Register/Unregister protocols.
1204  * hci_task_lock is used to ensure that no tasks are running. */
1205 int hci_register_proto(struct hci_proto *hp)
1206 {
1207         int err = 0;
1208
1209         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1210
1211         if (hp->id >= HCI_MAX_PROTO)
1212                 return -EINVAL;
1213
1214         write_lock_bh(&hci_task_lock);
1215
1216         if (!hci_proto[hp->id])
1217                 hci_proto[hp->id] = hp;
1218         else
1219                 err = -EEXIST;
1220
1221         write_unlock_bh(&hci_task_lock);
1222
1223         return err;
1224 }
1225 EXPORT_SYMBOL(hci_register_proto);
1226
1227 int hci_unregister_proto(struct hci_proto *hp)
1228 {
1229         int err = 0;
1230
1231         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1232
1233         if (hp->id >= HCI_MAX_PROTO)
1234                 return -EINVAL;
1235
1236         write_lock_bh(&hci_task_lock);
1237
1238         if (hci_proto[hp->id])
1239                 hci_proto[hp->id] = NULL;
1240         else
1241                 err = -ENOENT;
1242
1243         write_unlock_bh(&hci_task_lock);
1244
1245         return err;
1246 }
1247 EXPORT_SYMBOL(hci_unregister_proto);
1248
1249 int hci_register_cb(struct hci_cb *cb)
1250 {
1251         BT_DBG("%p name %s", cb, cb->name);
1252
1253         write_lock_bh(&hci_cb_list_lock);
1254         list_add(&cb->list, &hci_cb_list);
1255         write_unlock_bh(&hci_cb_list_lock);
1256
1257         return 0;
1258 }
1259 EXPORT_SYMBOL(hci_register_cb);
1260
1261 int hci_unregister_cb(struct hci_cb *cb)
1262 {
1263         BT_DBG("%p name %s", cb, cb->name);
1264
1265         write_lock_bh(&hci_cb_list_lock);
1266         list_del(&cb->list);
1267         write_unlock_bh(&hci_cb_list_lock);
1268
1269         return 0;
1270 }
1271 EXPORT_SYMBOL(hci_unregister_cb);
1272
1273 static int hci_send_frame(struct sk_buff *skb)
1274 {
1275         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1276
1277         if (!hdev) {
1278                 kfree_skb(skb);
1279                 return -ENODEV;
1280         }
1281
1282         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1283
1284         if (atomic_read(&hdev->promisc)) {
1285                 /* Time stamp */
1286                 __net_timestamp(skb);
1287
1288                 hci_send_to_sock(hdev, skb);
1289         }
1290
1291         /* Get rid of skb owner, prior to sending to the driver. */
1292         skb_orphan(skb);
1293
1294         return hdev->send(skb);
1295 }
1296
1297 /* Send HCI command */
1298 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1299 {
1300         int len = HCI_COMMAND_HDR_SIZE + plen;
1301         struct hci_command_hdr *hdr;
1302         struct sk_buff *skb;
1303
1304         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1305
1306         skb = bt_skb_alloc(len, GFP_ATOMIC);
1307         if (!skb) {
1308                 BT_ERR("%s no memory for command", hdev->name);
1309                 return -ENOMEM;
1310         }
1311
1312         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1313         hdr->opcode = cpu_to_le16(opcode);
1314         hdr->plen   = plen;
1315
1316         if (plen)
1317                 memcpy(skb_put(skb, plen), param, plen);
1318
1319         BT_DBG("skb len %d", skb->len);
1320
1321         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1322         skb->dev = (void *) hdev;
1323
1324         skb_queue_tail(&hdev->cmd_q, skb);
1325         tasklet_schedule(&hdev->cmd_task);
1326
1327         return 0;
1328 }
1329
1330 /* Get data from the previously sent command */
1331 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1332 {
1333         struct hci_command_hdr *hdr;
1334
1335         if (!hdev->sent_cmd)
1336                 return NULL;
1337
1338         hdr = (void *) hdev->sent_cmd->data;
1339
1340         if (hdr->opcode != cpu_to_le16(opcode))
1341                 return NULL;
1342
1343         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1344
1345         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1346 }
1347
1348 /* Send ACL data */
1349 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1350 {
1351         struct hci_acl_hdr *hdr;
1352         int len = skb->len;
1353
1354         skb_push(skb, HCI_ACL_HDR_SIZE);
1355         skb_reset_transport_header(skb);
1356         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1357         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1358         hdr->dlen   = cpu_to_le16(len);
1359 }
1360
1361 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1362 {
1363         struct hci_dev *hdev = conn->hdev;
1364         struct sk_buff *list;
1365
1366         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1367
1368         skb->dev = (void *) hdev;
1369         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1370         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1371
1372         if (!(list = skb_shinfo(skb)->frag_list)) {
1373                 /* Non fragmented */
1374                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1375
1376                 skb_queue_tail(&conn->data_q, skb);
1377         } else {
1378                 /* Fragmented */
1379                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1380
1381                 skb_shinfo(skb)->frag_list = NULL;
1382
1383                 /* Queue all fragments atomically */
1384                 spin_lock_bh(&conn->data_q.lock);
1385
1386                 __skb_queue_tail(&conn->data_q, skb);
1387                 do {
1388                         skb = list; list = list->next;
1389
1390                         skb->dev = (void *) hdev;
1391                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1392                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1393
1394                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1395
1396                         __skb_queue_tail(&conn->data_q, skb);
1397                 } while (list);
1398
1399                 spin_unlock_bh(&conn->data_q.lock);
1400         }
1401
1402         tasklet_schedule(&hdev->tx_task);
1403 }
1404 EXPORT_SYMBOL(hci_send_acl);
1405
1406 /* Send SCO data */
1407 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1408 {
1409         struct hci_dev *hdev = conn->hdev;
1410         struct hci_sco_hdr hdr;
1411
1412         BT_DBG("%s len %d", hdev->name, skb->len);
1413
1414         hdr.handle = cpu_to_le16(conn->handle);
1415         hdr.dlen   = skb->len;
1416
1417         skb_push(skb, HCI_SCO_HDR_SIZE);
1418         skb_reset_transport_header(skb);
1419         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1420
1421         skb->dev = (void *) hdev;
1422         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1423
1424         skb_queue_tail(&conn->data_q, skb);
1425         tasklet_schedule(&hdev->tx_task);
1426 }
1427 EXPORT_SYMBOL(hci_send_sco);
1428
1429 /* ---- HCI TX task (outgoing data) ---- */
1430
1431 /* HCI Connection scheduler */
1432 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1433 {
1434         struct hci_conn_hash *h = &hdev->conn_hash;
1435         struct hci_conn *conn = NULL;
1436         int num = 0, min = ~0;
1437         struct list_head *p;
1438
1439         /* We don't have to lock device here. Connections are always
1440          * added and removed with TX task disabled. */
1441         list_for_each(p, &h->list) {
1442                 struct hci_conn *c;
1443                 c = list_entry(p, struct hci_conn, list);
1444
1445                 if (c->type != type || skb_queue_empty(&c->data_q))
1446                         continue;
1447
1448                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1449                         continue;
1450
1451                 num++;
1452
1453                 if (c->sent < min) {
1454                         min  = c->sent;
1455                         conn = c;
1456                 }
1457         }
1458
1459         if (conn) {
1460                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1461                 int q = cnt / num;
1462                 *quote = q ? q : 1;
1463         } else
1464                 *quote = 0;
1465
1466         BT_DBG("conn %p quote %d", conn, *quote);
1467         return conn;
1468 }
1469
1470 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1471 {
1472         struct hci_conn_hash *h = &hdev->conn_hash;
1473         struct list_head *p;
1474         struct hci_conn  *c;
1475
1476         BT_ERR("%s ACL tx timeout", hdev->name);
1477
1478         /* Kill stalled connections */
1479         list_for_each(p, &h->list) {
1480                 c = list_entry(p, struct hci_conn, list);
1481                 if (c->type == ACL_LINK && c->sent) {
1482                         BT_ERR("%s killing stalled ACL connection %s",
1483                                 hdev->name, batostr(&c->dst));
1484                         hci_acl_disconn(c, 0x13);
1485                 }
1486         }
1487 }
1488
1489 static inline void hci_sched_acl(struct hci_dev *hdev)
1490 {
1491         struct hci_conn *conn;
1492         struct sk_buff *skb;
1493         int quote;
1494
1495         BT_DBG("%s", hdev->name);
1496
1497         if (!test_bit(HCI_RAW, &hdev->flags)) {
1498                 /* ACL tx timeout must be longer than maximum
1499                  * link supervision timeout (40.9 seconds) */
1500                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1501                         hci_acl_tx_to(hdev);
1502         }
1503
1504         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1505                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1506                         BT_DBG("skb %p len %d", skb, skb->len);
1507
1508                         hci_conn_enter_active_mode(conn);
1509
1510                         hci_send_frame(skb);
1511                         hdev->acl_last_tx = jiffies;
1512
1513                         hdev->acl_cnt--;
1514                         conn->sent++;
1515                 }
1516         }
1517 }
1518
1519 /* Schedule SCO */
1520 static inline void hci_sched_sco(struct hci_dev *hdev)
1521 {
1522         struct hci_conn *conn;
1523         struct sk_buff *skb;
1524         int quote;
1525
1526         BT_DBG("%s", hdev->name);
1527
1528         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1529                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1530                         BT_DBG("skb %p len %d", skb, skb->len);
1531                         hci_send_frame(skb);
1532
1533                         conn->sent++;
1534                         if (conn->sent == ~0)
1535                                 conn->sent = 0;
1536                 }
1537         }
1538 }
1539
1540 static inline void hci_sched_esco(struct hci_dev *hdev)
1541 {
1542         struct hci_conn *conn;
1543         struct sk_buff *skb;
1544         int quote;
1545
1546         BT_DBG("%s", hdev->name);
1547
1548         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1549                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1550                         BT_DBG("skb %p len %d", skb, skb->len);
1551                         hci_send_frame(skb);
1552
1553                         conn->sent++;
1554                         if (conn->sent == ~0)
1555                                 conn->sent = 0;
1556                 }
1557         }
1558 }
1559
1560 static void hci_tx_task(unsigned long arg)
1561 {
1562         struct hci_dev *hdev = (struct hci_dev *) arg;
1563         struct sk_buff *skb;
1564
1565         read_lock(&hci_task_lock);
1566
1567         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1568
1569         /* Schedule queues and send stuff to HCI driver */
1570
1571         hci_sched_acl(hdev);
1572
1573         hci_sched_sco(hdev);
1574
1575         hci_sched_esco(hdev);
1576
1577         /* Send next queued raw (unknown type) packet */
1578         while ((skb = skb_dequeue(&hdev->raw_q)))
1579                 hci_send_frame(skb);
1580
1581         read_unlock(&hci_task_lock);
1582 }
1583
1584 /* ----- HCI RX task (incoming data proccessing) ----- */
1585
1586 /* ACL data packet */
1587 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1588 {
1589         struct hci_acl_hdr *hdr = (void *) skb->data;
1590         struct hci_conn *conn;
1591         __u16 handle, flags;
1592
1593         skb_pull(skb, HCI_ACL_HDR_SIZE);
1594
1595         handle = __le16_to_cpu(hdr->handle);
1596         flags  = hci_flags(handle);
1597         handle = hci_handle(handle);
1598
1599         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1600
1601         hdev->stat.acl_rx++;
1602
1603         hci_dev_lock(hdev);
1604         conn = hci_conn_hash_lookup_handle(hdev, handle);
1605         hci_dev_unlock(hdev);
1606
1607         if (conn) {
1608                 register struct hci_proto *hp;
1609
1610                 hci_conn_enter_active_mode(conn);
1611
1612                 /* Send to upper protocol */
1613                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1614                         hp->recv_acldata(conn, skb, flags);
1615                         return;
1616                 }
1617         } else {
1618                 BT_ERR("%s ACL packet for unknown connection handle %d",
1619                         hdev->name, handle);
1620         }
1621
1622         kfree_skb(skb);
1623 }
1624
1625 /* SCO data packet */
1626 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1627 {
1628         struct hci_sco_hdr *hdr = (void *) skb->data;
1629         struct hci_conn *conn;
1630         __u16 handle;
1631
1632         skb_pull(skb, HCI_SCO_HDR_SIZE);
1633
1634         handle = __le16_to_cpu(hdr->handle);
1635
1636         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1637
1638         hdev->stat.sco_rx++;
1639
1640         hci_dev_lock(hdev);
1641         conn = hci_conn_hash_lookup_handle(hdev, handle);
1642         hci_dev_unlock(hdev);
1643
1644         if (conn) {
1645                 register struct hci_proto *hp;
1646
1647                 /* Send to upper protocol */
1648                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1649                         hp->recv_scodata(conn, skb);
1650                         return;
1651                 }
1652         } else {
1653                 BT_ERR("%s SCO packet for unknown connection handle %d",
1654                         hdev->name, handle);
1655         }
1656
1657         kfree_skb(skb);
1658 }
1659
1660 static void hci_rx_task(unsigned long arg)
1661 {
1662         struct hci_dev *hdev = (struct hci_dev *) arg;
1663         struct sk_buff *skb;
1664
1665         BT_DBG("%s", hdev->name);
1666
1667         read_lock(&hci_task_lock);
1668
1669         while ((skb = skb_dequeue(&hdev->rx_q))) {
1670                 if (atomic_read(&hdev->promisc)) {
1671                         /* Send copy to the sockets */
1672                         hci_send_to_sock(hdev, skb);
1673                 }
1674
1675                 if (test_bit(HCI_RAW, &hdev->flags)) {
1676                         kfree_skb(skb);
1677                         continue;
1678                 }
1679
1680                 if (test_bit(HCI_INIT, &hdev->flags)) {
1681                         /* Don't process data packets in this states. */
1682                         switch (bt_cb(skb)->pkt_type) {
1683                         case HCI_ACLDATA_PKT:
1684                         case HCI_SCODATA_PKT:
1685                                 kfree_skb(skb);
1686                                 continue;
1687                         }
1688                 }
1689
1690                 /* Process frame */
1691                 switch (bt_cb(skb)->pkt_type) {
1692                 case HCI_EVENT_PKT:
1693                         hci_event_packet(hdev, skb);
1694                         break;
1695
1696                 case HCI_ACLDATA_PKT:
1697                         BT_DBG("%s ACL data packet", hdev->name);
1698                         hci_acldata_packet(hdev, skb);
1699                         break;
1700
1701                 case HCI_SCODATA_PKT:
1702                         BT_DBG("%s SCO data packet", hdev->name);
1703                         hci_scodata_packet(hdev, skb);
1704                         break;
1705
1706                 default:
1707                         kfree_skb(skb);
1708                         break;
1709                 }
1710         }
1711
1712         read_unlock(&hci_task_lock);
1713 }
1714
1715 static void hci_cmd_task(unsigned long arg)
1716 {
1717         struct hci_dev *hdev = (struct hci_dev *) arg;
1718         struct sk_buff *skb;
1719
1720         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1721
1722         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1723                 BT_ERR("%s command tx timeout", hdev->name);
1724                 atomic_set(&hdev->cmd_cnt, 1);
1725         }
1726
1727         /* Send queued commands */
1728         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1729                 kfree_skb(hdev->sent_cmd);
1730
1731                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1732                         atomic_dec(&hdev->cmd_cnt);
1733                         hci_send_frame(skb);
1734                         hdev->cmd_last_tx = jiffies;
1735                 } else {
1736                         skb_queue_head(&hdev->cmd_q, skb);
1737                         tasklet_schedule(&hdev->cmd_task);
1738                 }
1739         }
1740 }