Bluetooth: Minor code refactoring
[deliverable/linux.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #define AUTO_OFF_TIMEOUT 2000
57
58 bool enable_hs;
59
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79 return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95 {
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
100 */
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102 return;
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109 }
110
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
112 {
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120 }
121
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 unsigned long opt, __u32 timeout)
125 {
126 DECLARE_WAITQUEUE(wait, current);
127 int err = 0;
128
129 BT_DBG("%s start", hdev->name);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 req(hdev, opt);
137 schedule_timeout(timeout);
138
139 remove_wait_queue(&hdev->req_wait_q, &wait);
140
141 if (signal_pending(current))
142 return -EINTR;
143
144 switch (hdev->req_status) {
145 case HCI_REQ_DONE:
146 err = -bt_to_errno(hdev->req_result);
147 break;
148
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
151 break;
152
153 default:
154 err = -ETIMEDOUT;
155 break;
156 }
157
158 hdev->req_status = hdev->req_result = 0;
159
160 BT_DBG("%s end: err %d", hdev->name, err);
161
162 return err;
163 }
164
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 unsigned long opt, __u32 timeout)
167 {
168 int ret;
169
170 if (!test_bit(HCI_UP, &hdev->flags))
171 return -ENETDOWN;
172
173 /* Serialize all requests */
174 hci_req_lock(hdev);
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
177
178 return ret;
179 }
180
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182 {
183 BT_DBG("%s %ld", hdev->name, opt);
184
185 /* Reset device */
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189
190 static void bredr_init(struct hci_dev *hdev)
191 {
192 struct hci_cp_delete_stored_link_key cp;
193 __le16 param;
194 __u8 flt_type;
195
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198 /* Mandatory initialization */
199
200 /* Reset */
201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
204 }
205
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
214
215 /* Read BD Address */
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
223
224 /* Read Voice Setting */
225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
226
227 /* Optional initialization */
228
229 /* Clear Event Filters */
230 flt_type = HCI_FLT_CLEAR_ALL;
231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
232
233 /* Connection accept timeout ~20 secs */
234 param = cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
236
237 bacpy(&cp.bdaddr, BDADDR_ANY);
238 cp.delete_all = 1;
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
240 }
241
242 static void amp_init(struct hci_dev *hdev)
243 {
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
246 /* Reset */
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251 }
252
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254 {
255 struct sk_buff *skb;
256
257 BT_DBG("%s %ld", hdev->name, opt);
258
259 /* Driver initialization */
260
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
265
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
268 }
269 skb_queue_purge(&hdev->driver_init);
270
271 switch (hdev->dev_type) {
272 case HCI_BREDR:
273 bredr_init(hdev);
274 break;
275
276 case HCI_AMP:
277 amp_init(hdev);
278 break;
279
280 default:
281 BT_ERR("Unknown device type %d", hdev->dev_type);
282 break;
283 }
284
285 }
286
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288 {
289 BT_DBG("%s", hdev->name);
290
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293 }
294
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296 {
297 __u8 scan = opt;
298
299 BT_DBG("%s %x", hdev->name, scan);
300
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
303 }
304
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306 {
307 __u8 auth = opt;
308
309 BT_DBG("%s %x", hdev->name, auth);
310
311 /* Authentication */
312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
313 }
314
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316 {
317 __u8 encrypt = opt;
318
319 BT_DBG("%s %x", hdev->name, encrypt);
320
321 /* Encryption */
322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
323 }
324
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326 {
327 __le16 policy = cpu_to_le16(opt);
328
329 BT_DBG("%s %x", hdev->name, policy);
330
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333 }
334
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
338 {
339 struct hci_dev *hdev = NULL, *d;
340
341 BT_DBG("%d", index);
342
343 if (index < 0)
344 return NULL;
345
346 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
350 break;
351 }
352 }
353 read_unlock(&hci_dev_list_lock);
354 return hdev;
355 }
356
357 /* ---- Inquiry support ---- */
358
359 bool hci_discovery_active(struct hci_dev *hdev)
360 {
361 struct discovery_state *discov = &hdev->discovery;
362
363 switch (discov->state) {
364 case DISCOVERY_INQUIRY:
365 case DISCOVERY_LE_SCAN:
366 case DISCOVERY_RESOLVING:
367 return true;
368
369 default:
370 return false;
371 }
372 }
373
374 void hci_discovery_set_state(struct hci_dev *hdev, int state)
375 {
376 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
377
378 if (hdev->discovery.state == state)
379 return;
380
381 switch (state) {
382 case DISCOVERY_STOPPED:
383 mgmt_discovering(hdev, 0);
384 break;
385 case DISCOVERY_STARTING:
386 break;
387 case DISCOVERY_INQUIRY:
388 case DISCOVERY_LE_SCAN:
389 mgmt_discovering(hdev, 1);
390 break;
391 case DISCOVERY_RESOLVING:
392 break;
393 case DISCOVERY_STOPPING:
394 break;
395 }
396
397 hdev->discovery.state = state;
398 }
399
400 static void inquiry_cache_flush(struct hci_dev *hdev)
401 {
402 struct discovery_state *cache = &hdev->discovery;
403 struct inquiry_entry *p, *n;
404
405 list_for_each_entry_safe(p, n, &cache->all, all) {
406 list_del(&p->all);
407 kfree(p);
408 }
409
410 INIT_LIST_HEAD(&cache->unknown);
411 INIT_LIST_HEAD(&cache->resolve);
412 cache->state = DISCOVERY_STOPPED;
413 }
414
415 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
416 {
417 struct discovery_state *cache = &hdev->discovery;
418 struct inquiry_entry *e;
419
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
421
422 list_for_each_entry(e, &cache->all, all) {
423 if (!bacmp(&e->data.bdaddr, bdaddr))
424 return e;
425 }
426
427 return NULL;
428 }
429
430 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
431 bdaddr_t *bdaddr)
432 {
433 struct discovery_state *cache = &hdev->discovery;
434 struct inquiry_entry *e;
435
436 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
437
438 list_for_each_entry(e, &cache->unknown, list) {
439 if (!bacmp(&e->data.bdaddr, bdaddr))
440 return e;
441 }
442
443 return NULL;
444 }
445
446 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
447 bdaddr_t *bdaddr,
448 int state)
449 {
450 struct discovery_state *cache = &hdev->discovery;
451 struct inquiry_entry *e;
452
453 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
454
455 list_for_each_entry(e, &cache->resolve, list) {
456 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
457 return e;
458 if (!bacmp(&e->data.bdaddr, bdaddr))
459 return e;
460 }
461
462 return NULL;
463 }
464
465 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
466 struct inquiry_entry *ie)
467 {
468 struct discovery_state *cache = &hdev->discovery;
469 struct list_head *pos = &cache->resolve;
470 struct inquiry_entry *p;
471
472 list_del(&ie->list);
473
474 list_for_each_entry(p, &cache->resolve, list) {
475 if (p->name_state != NAME_PENDING &&
476 abs(p->data.rssi) >= abs(ie->data.rssi))
477 break;
478 pos = &p->list;
479 }
480
481 list_add(&ie->list, pos);
482 }
483
484 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
485 bool name_known)
486 {
487 struct discovery_state *cache = &hdev->discovery;
488 struct inquiry_entry *ie;
489
490 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
491
492 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
493 if (ie) {
494 if (ie->name_state == NAME_NEEDED &&
495 data->rssi != ie->data.rssi) {
496 ie->data.rssi = data->rssi;
497 hci_inquiry_cache_update_resolve(hdev, ie);
498 }
499
500 goto update;
501 }
502
503 /* Entry not in the cache. Add new one. */
504 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
505 if (!ie)
506 return false;
507
508 list_add(&ie->all, &cache->all);
509
510 if (name_known) {
511 ie->name_state = NAME_KNOWN;
512 } else {
513 ie->name_state = NAME_NOT_KNOWN;
514 list_add(&ie->list, &cache->unknown);
515 }
516
517 update:
518 if (name_known && ie->name_state != NAME_KNOWN &&
519 ie->name_state != NAME_PENDING) {
520 ie->name_state = NAME_KNOWN;
521 list_del(&ie->list);
522 }
523
524 memcpy(&ie->data, data, sizeof(*data));
525 ie->timestamp = jiffies;
526 cache->timestamp = jiffies;
527
528 if (ie->name_state == NAME_NOT_KNOWN)
529 return false;
530
531 return true;
532 }
533
534 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
535 {
536 struct discovery_state *cache = &hdev->discovery;
537 struct inquiry_info *info = (struct inquiry_info *) buf;
538 struct inquiry_entry *e;
539 int copied = 0;
540
541 list_for_each_entry(e, &cache->all, all) {
542 struct inquiry_data *data = &e->data;
543
544 if (copied >= num)
545 break;
546
547 bacpy(&info->bdaddr, &data->bdaddr);
548 info->pscan_rep_mode = data->pscan_rep_mode;
549 info->pscan_period_mode = data->pscan_period_mode;
550 info->pscan_mode = data->pscan_mode;
551 memcpy(info->dev_class, data->dev_class, 3);
552 info->clock_offset = data->clock_offset;
553
554 info++;
555 copied++;
556 }
557
558 BT_DBG("cache %p, copied %d", cache, copied);
559 return copied;
560 }
561
562 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
563 {
564 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
565 struct hci_cp_inquiry cp;
566
567 BT_DBG("%s", hdev->name);
568
569 if (test_bit(HCI_INQUIRY, &hdev->flags))
570 return;
571
572 /* Start Inquiry */
573 memcpy(&cp.lap, &ir->lap, 3);
574 cp.length = ir->length;
575 cp.num_rsp = ir->num_rsp;
576 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
577 }
578
579 int hci_inquiry(void __user *arg)
580 {
581 __u8 __user *ptr = arg;
582 struct hci_inquiry_req ir;
583 struct hci_dev *hdev;
584 int err = 0, do_inquiry = 0, max_rsp;
585 long timeo;
586 __u8 *buf;
587
588 if (copy_from_user(&ir, ptr, sizeof(ir)))
589 return -EFAULT;
590
591 hdev = hci_dev_get(ir.dev_id);
592 if (!hdev)
593 return -ENODEV;
594
595 hci_dev_lock(hdev);
596 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
597 inquiry_cache_empty(hdev) ||
598 ir.flags & IREQ_CACHE_FLUSH) {
599 inquiry_cache_flush(hdev);
600 do_inquiry = 1;
601 }
602 hci_dev_unlock(hdev);
603
604 timeo = ir.length * msecs_to_jiffies(2000);
605
606 if (do_inquiry) {
607 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
608 if (err < 0)
609 goto done;
610 }
611
612 /* for unlimited number of responses we will use buffer with 255 entries */
613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
619 if (!buf) {
620 err = -ENOMEM;
621 goto done;
622 }
623
624 hci_dev_lock(hdev);
625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
626 hci_dev_unlock(hdev);
627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
633 ir.num_rsp))
634 err = -EFAULT;
635 } else
636 err = -EFAULT;
637
638 kfree(buf);
639
640 done:
641 hci_dev_put(hdev);
642 return err;
643 }
644
645 /* ---- HCI ioctl helpers ---- */
646
647 int hci_dev_open(__u16 dev)
648 {
649 struct hci_dev *hdev;
650 int ret = 0;
651
652 hdev = hci_dev_get(dev);
653 if (!hdev)
654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661 ret = -ERFKILL;
662 goto done;
663 }
664
665 if (test_bit(HCI_UP, &hdev->flags)) {
666 ret = -EALREADY;
667 goto done;
668 }
669
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
672
673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
676 set_bit(HCI_RAW, &hdev->flags);
677
678 if (hdev->open(hdev)) {
679 ret = -EIO;
680 goto done;
681 }
682
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
686 hdev->init_last_cmd = 0;
687
688 ret = __hci_request(hdev, hci_init_req, 0,
689 msecs_to_jiffies(HCI_INIT_TIMEOUT));
690
691 if (lmp_host_le_capable(hdev))
692 ret = __hci_request(hdev, hci_le_init_req, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
694
695 clear_bit(HCI_INIT, &hdev->flags);
696 }
697
698 if (!ret) {
699 hci_dev_hold(hdev);
700 set_bit(HCI_UP, &hdev->flags);
701 hci_notify(hdev, HCI_DEV_UP);
702 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
703 hci_dev_lock(hdev);
704 mgmt_powered(hdev, 1);
705 hci_dev_unlock(hdev);
706 }
707 } else {
708 /* Init failed, cleanup */
709 flush_work(&hdev->tx_work);
710 flush_work(&hdev->cmd_work);
711 flush_work(&hdev->rx_work);
712
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
715
716 if (hdev->flush)
717 hdev->flush(hdev);
718
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
722 }
723
724 hdev->close(hdev);
725 hdev->flags = 0;
726 }
727
728 done:
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732 }
733
734 static int hci_dev_do_close(struct hci_dev *hdev)
735 {
736 BT_DBG("%s %p", hdev->name, hdev);
737
738 hci_req_cancel(hdev, ENODEV);
739 hci_req_lock(hdev);
740
741 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
742 del_timer_sync(&hdev->cmd_timer);
743 hci_req_unlock(hdev);
744 return 0;
745 }
746
747 /* Flush RX and TX works */
748 flush_work(&hdev->tx_work);
749 flush_work(&hdev->rx_work);
750
751 if (hdev->discov_timeout > 0) {
752 cancel_delayed_work(&hdev->discov_off);
753 hdev->discov_timeout = 0;
754 }
755
756 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
757 cancel_delayed_work(&hdev->power_off);
758
759 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
760 cancel_delayed_work(&hdev->service_cache);
761
762 hci_dev_lock(hdev);
763 inquiry_cache_flush(hdev);
764 hci_conn_hash_flush(hdev);
765 hci_dev_unlock(hdev);
766
767 hci_notify(hdev, HCI_DEV_DOWN);
768
769 if (hdev->flush)
770 hdev->flush(hdev);
771
772 /* Reset device */
773 skb_queue_purge(&hdev->cmd_q);
774 atomic_set(&hdev->cmd_cnt, 1);
775 if (!test_bit(HCI_RAW, &hdev->flags) &&
776 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
777 set_bit(HCI_INIT, &hdev->flags);
778 __hci_request(hdev, hci_reset_req, 0,
779 msecs_to_jiffies(250));
780 clear_bit(HCI_INIT, &hdev->flags);
781 }
782
783 /* flush cmd work */
784 flush_work(&hdev->cmd_work);
785
786 /* Drop queues */
787 skb_queue_purge(&hdev->rx_q);
788 skb_queue_purge(&hdev->cmd_q);
789 skb_queue_purge(&hdev->raw_q);
790
791 /* Drop last sent command */
792 if (hdev->sent_cmd) {
793 del_timer_sync(&hdev->cmd_timer);
794 kfree_skb(hdev->sent_cmd);
795 hdev->sent_cmd = NULL;
796 }
797
798 /* After this point our queues are empty
799 * and no tasks are scheduled. */
800 hdev->close(hdev);
801
802 hci_dev_lock(hdev);
803 mgmt_powered(hdev, 0);
804 hci_dev_unlock(hdev);
805
806 /* Clear flags */
807 hdev->flags = 0;
808
809 hci_req_unlock(hdev);
810
811 hci_dev_put(hdev);
812 return 0;
813 }
814
815 int hci_dev_close(__u16 dev)
816 {
817 struct hci_dev *hdev;
818 int err;
819
820 hdev = hci_dev_get(dev);
821 if (!hdev)
822 return -ENODEV;
823 err = hci_dev_do_close(hdev);
824 hci_dev_put(hdev);
825 return err;
826 }
827
828 int hci_dev_reset(__u16 dev)
829 {
830 struct hci_dev *hdev;
831 int ret = 0;
832
833 hdev = hci_dev_get(dev);
834 if (!hdev)
835 return -ENODEV;
836
837 hci_req_lock(hdev);
838
839 if (!test_bit(HCI_UP, &hdev->flags))
840 goto done;
841
842 /* Drop queues */
843 skb_queue_purge(&hdev->rx_q);
844 skb_queue_purge(&hdev->cmd_q);
845
846 hci_dev_lock(hdev);
847 inquiry_cache_flush(hdev);
848 hci_conn_hash_flush(hdev);
849 hci_dev_unlock(hdev);
850
851 if (hdev->flush)
852 hdev->flush(hdev);
853
854 atomic_set(&hdev->cmd_cnt, 1);
855 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
856
857 if (!test_bit(HCI_RAW, &hdev->flags))
858 ret = __hci_request(hdev, hci_reset_req, 0,
859 msecs_to_jiffies(HCI_INIT_TIMEOUT));
860
861 done:
862 hci_req_unlock(hdev);
863 hci_dev_put(hdev);
864 return ret;
865 }
866
867 int hci_dev_reset_stat(__u16 dev)
868 {
869 struct hci_dev *hdev;
870 int ret = 0;
871
872 hdev = hci_dev_get(dev);
873 if (!hdev)
874 return -ENODEV;
875
876 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
877
878 hci_dev_put(hdev);
879
880 return ret;
881 }
882
883 int hci_dev_cmd(unsigned int cmd, void __user *arg)
884 {
885 struct hci_dev *hdev;
886 struct hci_dev_req dr;
887 int err = 0;
888
889 if (copy_from_user(&dr, arg, sizeof(dr)))
890 return -EFAULT;
891
892 hdev = hci_dev_get(dr.dev_id);
893 if (!hdev)
894 return -ENODEV;
895
896 switch (cmd) {
897 case HCISETAUTH:
898 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
899 msecs_to_jiffies(HCI_INIT_TIMEOUT));
900 break;
901
902 case HCISETENCRYPT:
903 if (!lmp_encrypt_capable(hdev)) {
904 err = -EOPNOTSUPP;
905 break;
906 }
907
908 if (!test_bit(HCI_AUTH, &hdev->flags)) {
909 /* Auth must be enabled first */
910 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
911 msecs_to_jiffies(HCI_INIT_TIMEOUT));
912 if (err)
913 break;
914 }
915
916 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
917 msecs_to_jiffies(HCI_INIT_TIMEOUT));
918 break;
919
920 case HCISETSCAN:
921 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
922 msecs_to_jiffies(HCI_INIT_TIMEOUT));
923 break;
924
925 case HCISETLINKPOL:
926 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT));
928 break;
929
930 case HCISETLINKMODE:
931 hdev->link_mode = ((__u16) dr.dev_opt) &
932 (HCI_LM_MASTER | HCI_LM_ACCEPT);
933 break;
934
935 case HCISETPTYPE:
936 hdev->pkt_type = (__u16) dr.dev_opt;
937 break;
938
939 case HCISETACLMTU:
940 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
941 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
942 break;
943
944 case HCISETSCOMTU:
945 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
946 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
947 break;
948
949 default:
950 err = -EINVAL;
951 break;
952 }
953
954 hci_dev_put(hdev);
955 return err;
956 }
957
958 int hci_get_dev_list(void __user *arg)
959 {
960 struct hci_dev *hdev;
961 struct hci_dev_list_req *dl;
962 struct hci_dev_req *dr;
963 int n = 0, size, err;
964 __u16 dev_num;
965
966 if (get_user(dev_num, (__u16 __user *) arg))
967 return -EFAULT;
968
969 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
970 return -EINVAL;
971
972 size = sizeof(*dl) + dev_num * sizeof(*dr);
973
974 dl = kzalloc(size, GFP_KERNEL);
975 if (!dl)
976 return -ENOMEM;
977
978 dr = dl->dev_req;
979
980 read_lock(&hci_dev_list_lock);
981 list_for_each_entry(hdev, &hci_dev_list, list) {
982 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
983 cancel_delayed_work(&hdev->power_off);
984
985 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
986 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
987
988 (dr + n)->dev_id = hdev->id;
989 (dr + n)->dev_opt = hdev->flags;
990
991 if (++n >= dev_num)
992 break;
993 }
994 read_unlock(&hci_dev_list_lock);
995
996 dl->dev_num = n;
997 size = sizeof(*dl) + n * sizeof(*dr);
998
999 err = copy_to_user(arg, dl, size);
1000 kfree(dl);
1001
1002 return err ? -EFAULT : 0;
1003 }
1004
1005 int hci_get_dev_info(void __user *arg)
1006 {
1007 struct hci_dev *hdev;
1008 struct hci_dev_info di;
1009 int err = 0;
1010
1011 if (copy_from_user(&di, arg, sizeof(di)))
1012 return -EFAULT;
1013
1014 hdev = hci_dev_get(di.dev_id);
1015 if (!hdev)
1016 return -ENODEV;
1017
1018 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1019 cancel_delayed_work_sync(&hdev->power_off);
1020
1021 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1022 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1023
1024 strcpy(di.name, hdev->name);
1025 di.bdaddr = hdev->bdaddr;
1026 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1027 di.flags = hdev->flags;
1028 di.pkt_type = hdev->pkt_type;
1029 di.acl_mtu = hdev->acl_mtu;
1030 di.acl_pkts = hdev->acl_pkts;
1031 di.sco_mtu = hdev->sco_mtu;
1032 di.sco_pkts = hdev->sco_pkts;
1033 di.link_policy = hdev->link_policy;
1034 di.link_mode = hdev->link_mode;
1035
1036 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1037 memcpy(&di.features, &hdev->features, sizeof(di.features));
1038
1039 if (copy_to_user(arg, &di, sizeof(di)))
1040 err = -EFAULT;
1041
1042 hci_dev_put(hdev);
1043
1044 return err;
1045 }
1046
1047 /* ---- Interface to HCI drivers ---- */
1048
1049 static int hci_rfkill_set_block(void *data, bool blocked)
1050 {
1051 struct hci_dev *hdev = data;
1052
1053 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1054
1055 if (!blocked)
1056 return 0;
1057
1058 hci_dev_do_close(hdev);
1059
1060 return 0;
1061 }
1062
1063 static const struct rfkill_ops hci_rfkill_ops = {
1064 .set_block = hci_rfkill_set_block,
1065 };
1066
1067 /* Alloc HCI device */
1068 struct hci_dev *hci_alloc_dev(void)
1069 {
1070 struct hci_dev *hdev;
1071
1072 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1073 if (!hdev)
1074 return NULL;
1075
1076 hci_init_sysfs(hdev);
1077 skb_queue_head_init(&hdev->driver_init);
1078
1079 return hdev;
1080 }
1081 EXPORT_SYMBOL(hci_alloc_dev);
1082
1083 /* Free HCI device */
1084 void hci_free_dev(struct hci_dev *hdev)
1085 {
1086 skb_queue_purge(&hdev->driver_init);
1087
1088 /* will free via device release */
1089 put_device(&hdev->dev);
1090 }
1091 EXPORT_SYMBOL(hci_free_dev);
1092
1093 static void hci_power_on(struct work_struct *work)
1094 {
1095 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1096
1097 BT_DBG("%s", hdev->name);
1098
1099 if (hci_dev_open(hdev->id) < 0)
1100 return;
1101
1102 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1103 schedule_delayed_work(&hdev->power_off,
1104 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1105
1106 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1107 mgmt_index_added(hdev);
1108 }
1109
1110 static void hci_power_off(struct work_struct *work)
1111 {
1112 struct hci_dev *hdev = container_of(work, struct hci_dev,
1113 power_off.work);
1114
1115 BT_DBG("%s", hdev->name);
1116
1117 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1118
1119 hci_dev_close(hdev->id);
1120 }
1121
1122 static void hci_discov_off(struct work_struct *work)
1123 {
1124 struct hci_dev *hdev;
1125 u8 scan = SCAN_PAGE;
1126
1127 hdev = container_of(work, struct hci_dev, discov_off.work);
1128
1129 BT_DBG("%s", hdev->name);
1130
1131 hci_dev_lock(hdev);
1132
1133 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1134
1135 hdev->discov_timeout = 0;
1136
1137 hci_dev_unlock(hdev);
1138 }
1139
1140 int hci_uuids_clear(struct hci_dev *hdev)
1141 {
1142 struct list_head *p, *n;
1143
1144 list_for_each_safe(p, n, &hdev->uuids) {
1145 struct bt_uuid *uuid;
1146
1147 uuid = list_entry(p, struct bt_uuid, list);
1148
1149 list_del(p);
1150 kfree(uuid);
1151 }
1152
1153 return 0;
1154 }
1155
1156 int hci_link_keys_clear(struct hci_dev *hdev)
1157 {
1158 struct list_head *p, *n;
1159
1160 list_for_each_safe(p, n, &hdev->link_keys) {
1161 struct link_key *key;
1162
1163 key = list_entry(p, struct link_key, list);
1164
1165 list_del(p);
1166 kfree(key);
1167 }
1168
1169 return 0;
1170 }
1171
1172 int hci_smp_ltks_clear(struct hci_dev *hdev)
1173 {
1174 struct smp_ltk *k, *tmp;
1175
1176 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1177 list_del(&k->list);
1178 kfree(k);
1179 }
1180
1181 return 0;
1182 }
1183
1184 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1185 {
1186 struct link_key *k;
1187
1188 list_for_each_entry(k, &hdev->link_keys, list)
1189 if (bacmp(bdaddr, &k->bdaddr) == 0)
1190 return k;
1191
1192 return NULL;
1193 }
1194
1195 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 u8 key_type, u8 old_key_type)
1197 {
1198 /* Legacy key */
1199 if (key_type < 0x03)
1200 return 1;
1201
1202 /* Debug keys are insecure so don't store them persistently */
1203 if (key_type == HCI_LK_DEBUG_COMBINATION)
1204 return 0;
1205
1206 /* Changed combination key and there's no previous one */
1207 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1208 return 0;
1209
1210 /* Security mode 3 case */
1211 if (!conn)
1212 return 1;
1213
1214 /* Neither local nor remote side had no-bonding as requirement */
1215 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1216 return 1;
1217
1218 /* Local side had dedicated bonding as requirement */
1219 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1220 return 1;
1221
1222 /* Remote side had dedicated bonding as requirement */
1223 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1224 return 1;
1225
1226 /* If none of the above criteria match, then don't store the key
1227 * persistently */
1228 return 0;
1229 }
1230
1231 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1232 {
1233 struct smp_ltk *k;
1234
1235 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand)))
1238 continue;
1239
1240 return k;
1241 }
1242
1243 return NULL;
1244 }
1245 EXPORT_SYMBOL(hci_find_ltk);
1246
1247 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1248 u8 addr_type)
1249 {
1250 struct smp_ltk *k;
1251
1252 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0)
1255 return k;
1256
1257 return NULL;
1258 }
1259 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1260
1261 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1263 {
1264 struct link_key *key, *old_key;
1265 u8 old_key_type, persistent;
1266
1267 old_key = hci_find_link_key(hdev, bdaddr);
1268 if (old_key) {
1269 old_key_type = old_key->type;
1270 key = old_key;
1271 } else {
1272 old_key_type = conn ? conn->key_type : 0xff;
1273 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1274 if (!key)
1275 return -ENOMEM;
1276 list_add(&key->list, &hdev->link_keys);
1277 }
1278
1279 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1280
1281 /* Some buggy controller combinations generate a changed
1282 * combination key for legacy pairing even when there's no
1283 * previous key */
1284 if (type == HCI_LK_CHANGED_COMBINATION &&
1285 (!conn || conn->remote_auth == 0xff) &&
1286 old_key_type == 0xff) {
1287 type = HCI_LK_COMBINATION;
1288 if (conn)
1289 conn->key_type = type;
1290 }
1291
1292 bacpy(&key->bdaddr, bdaddr);
1293 memcpy(key->val, val, 16);
1294 key->pin_len = pin_len;
1295
1296 if (type == HCI_LK_CHANGED_COMBINATION)
1297 key->type = old_key_type;
1298 else
1299 key->type = type;
1300
1301 if (!new_key)
1302 return 0;
1303
1304 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1305
1306 mgmt_new_link_key(hdev, key, persistent);
1307
1308 if (!persistent) {
1309 list_del(&key->list);
1310 kfree(key);
1311 }
1312
1313 return 0;
1314 }
1315
1316 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1317 int new_key, u8 authenticated, u8 tk[16],
1318 u8 enc_size, u16 ediv, u8 rand[8])
1319 {
1320 struct smp_ltk *key, *old_key;
1321
1322 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1323 return 0;
1324
1325 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1326 if (old_key)
1327 key = old_key;
1328 else {
1329 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1330 if (!key)
1331 return -ENOMEM;
1332 list_add(&key->list, &hdev->long_term_keys);
1333 }
1334
1335 bacpy(&key->bdaddr, bdaddr);
1336 key->bdaddr_type = addr_type;
1337 memcpy(key->val, tk, sizeof(key->val));
1338 key->authenticated = authenticated;
1339 key->ediv = ediv;
1340 key->enc_size = enc_size;
1341 key->type = type;
1342 memcpy(key->rand, rand, sizeof(key->rand));
1343
1344 if (!new_key)
1345 return 0;
1346
1347 if (type & HCI_SMP_LTK)
1348 mgmt_new_ltk(hdev, key, 1);
1349
1350 return 0;
1351 }
1352
1353 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1354 {
1355 struct link_key *key;
1356
1357 key = hci_find_link_key(hdev, bdaddr);
1358 if (!key)
1359 return -ENOENT;
1360
1361 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1362
1363 list_del(&key->list);
1364 kfree(key);
1365
1366 return 0;
1367 }
1368
1369 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1370 {
1371 struct smp_ltk *k, *tmp;
1372
1373 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1374 if (bacmp(bdaddr, &k->bdaddr))
1375 continue;
1376
1377 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1378
1379 list_del(&k->list);
1380 kfree(k);
1381 }
1382
1383 return 0;
1384 }
1385
1386 /* HCI command timer function */
1387 static void hci_cmd_timer(unsigned long arg)
1388 {
1389 struct hci_dev *hdev = (void *) arg;
1390
1391 BT_ERR("%s command tx timeout", hdev->name);
1392 atomic_set(&hdev->cmd_cnt, 1);
1393 queue_work(hdev->workqueue, &hdev->cmd_work);
1394 }
1395
1396 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1397 bdaddr_t *bdaddr)
1398 {
1399 struct oob_data *data;
1400
1401 list_for_each_entry(data, &hdev->remote_oob_data, list)
1402 if (bacmp(bdaddr, &data->bdaddr) == 0)
1403 return data;
1404
1405 return NULL;
1406 }
1407
1408 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1409 {
1410 struct oob_data *data;
1411
1412 data = hci_find_remote_oob_data(hdev, bdaddr);
1413 if (!data)
1414 return -ENOENT;
1415
1416 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1417
1418 list_del(&data->list);
1419 kfree(data);
1420
1421 return 0;
1422 }
1423
1424 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1425 {
1426 struct oob_data *data, *n;
1427
1428 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1429 list_del(&data->list);
1430 kfree(data);
1431 }
1432
1433 return 0;
1434 }
1435
1436 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1437 u8 *randomizer)
1438 {
1439 struct oob_data *data;
1440
1441 data = hci_find_remote_oob_data(hdev, bdaddr);
1442
1443 if (!data) {
1444 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1445 if (!data)
1446 return -ENOMEM;
1447
1448 bacpy(&data->bdaddr, bdaddr);
1449 list_add(&data->list, &hdev->remote_oob_data);
1450 }
1451
1452 memcpy(data->hash, hash, sizeof(data->hash));
1453 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1454
1455 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1456
1457 return 0;
1458 }
1459
1460 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1461 bdaddr_t *bdaddr)
1462 {
1463 struct bdaddr_list *b;
1464
1465 list_for_each_entry(b, &hdev->blacklist, list)
1466 if (bacmp(bdaddr, &b->bdaddr) == 0)
1467 return b;
1468
1469 return NULL;
1470 }
1471
1472 int hci_blacklist_clear(struct hci_dev *hdev)
1473 {
1474 struct list_head *p, *n;
1475
1476 list_for_each_safe(p, n, &hdev->blacklist) {
1477 struct bdaddr_list *b;
1478
1479 b = list_entry(p, struct bdaddr_list, list);
1480
1481 list_del(p);
1482 kfree(b);
1483 }
1484
1485 return 0;
1486 }
1487
1488 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1489 {
1490 struct bdaddr_list *entry;
1491
1492 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1493 return -EBADF;
1494
1495 if (hci_blacklist_lookup(hdev, bdaddr))
1496 return -EEXIST;
1497
1498 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1499 if (!entry)
1500 return -ENOMEM;
1501
1502 bacpy(&entry->bdaddr, bdaddr);
1503
1504 list_add(&entry->list, &hdev->blacklist);
1505
1506 return mgmt_device_blocked(hdev, bdaddr);
1507 }
1508
1509 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1510 {
1511 struct bdaddr_list *entry;
1512
1513 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1514 return hci_blacklist_clear(hdev);
1515
1516 entry = hci_blacklist_lookup(hdev, bdaddr);
1517 if (!entry)
1518 return -ENOENT;
1519
1520 list_del(&entry->list);
1521 kfree(entry);
1522
1523 return mgmt_device_unblocked(hdev, bdaddr);
1524 }
1525
1526 static void hci_clear_adv_cache(struct work_struct *work)
1527 {
1528 struct hci_dev *hdev = container_of(work, struct hci_dev,
1529 adv_work.work);
1530
1531 hci_dev_lock(hdev);
1532
1533 hci_adv_entries_clear(hdev);
1534
1535 hci_dev_unlock(hdev);
1536 }
1537
1538 int hci_adv_entries_clear(struct hci_dev *hdev)
1539 {
1540 struct adv_entry *entry, *tmp;
1541
1542 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1543 list_del(&entry->list);
1544 kfree(entry);
1545 }
1546
1547 BT_DBG("%s adv cache cleared", hdev->name);
1548
1549 return 0;
1550 }
1551
1552 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1553 {
1554 struct adv_entry *entry;
1555
1556 list_for_each_entry(entry, &hdev->adv_entries, list)
1557 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1558 return entry;
1559
1560 return NULL;
1561 }
1562
1563 static inline int is_connectable_adv(u8 evt_type)
1564 {
1565 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1566 return 1;
1567
1568 return 0;
1569 }
1570
1571 int hci_add_adv_entry(struct hci_dev *hdev,
1572 struct hci_ev_le_advertising_info *ev)
1573 {
1574 struct adv_entry *entry;
1575
1576 if (!is_connectable_adv(ev->evt_type))
1577 return -EINVAL;
1578
1579 /* Only new entries should be added to adv_entries. So, if
1580 * bdaddr was found, don't add it. */
1581 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1582 return 0;
1583
1584 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1585 if (!entry)
1586 return -ENOMEM;
1587
1588 bacpy(&entry->bdaddr, &ev->bdaddr);
1589 entry->bdaddr_type = ev->bdaddr_type;
1590
1591 list_add(&entry->list, &hdev->adv_entries);
1592
1593 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1594 batostr(&entry->bdaddr), entry->bdaddr_type);
1595
1596 return 0;
1597 }
1598
1599 /* Register HCI device */
1600 int hci_register_dev(struct hci_dev *hdev)
1601 {
1602 struct list_head *head = &hci_dev_list, *p;
1603 int i, id, error;
1604
1605 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1606
1607 if (!hdev->open || !hdev->close)
1608 return -EINVAL;
1609
1610 /* Do not allow HCI_AMP devices to register at index 0,
1611 * so the index can be used as the AMP controller ID.
1612 */
1613 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1614
1615 write_lock(&hci_dev_list_lock);
1616
1617 /* Find first available device id */
1618 list_for_each(p, &hci_dev_list) {
1619 if (list_entry(p, struct hci_dev, list)->id != id)
1620 break;
1621 head = p; id++;
1622 }
1623
1624 sprintf(hdev->name, "hci%d", id);
1625 hdev->id = id;
1626 list_add_tail(&hdev->list, head);
1627
1628 mutex_init(&hdev->lock);
1629
1630 hdev->flags = 0;
1631 hdev->dev_flags = 0;
1632 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1633 hdev->esco_type = (ESCO_HV1);
1634 hdev->link_mode = (HCI_LM_ACCEPT);
1635 hdev->io_capability = 0x03; /* No Input No Output */
1636
1637 hdev->idle_timeout = 0;
1638 hdev->sniff_max_interval = 800;
1639 hdev->sniff_min_interval = 80;
1640
1641 INIT_WORK(&hdev->rx_work, hci_rx_work);
1642 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1643 INIT_WORK(&hdev->tx_work, hci_tx_work);
1644
1645
1646 skb_queue_head_init(&hdev->rx_q);
1647 skb_queue_head_init(&hdev->cmd_q);
1648 skb_queue_head_init(&hdev->raw_q);
1649
1650 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1651
1652 for (i = 0; i < NUM_REASSEMBLY; i++)
1653 hdev->reassembly[i] = NULL;
1654
1655 init_waitqueue_head(&hdev->req_wait_q);
1656 mutex_init(&hdev->req_lock);
1657
1658 discovery_init(hdev);
1659
1660 hci_conn_hash_init(hdev);
1661
1662 INIT_LIST_HEAD(&hdev->mgmt_pending);
1663
1664 INIT_LIST_HEAD(&hdev->blacklist);
1665
1666 INIT_LIST_HEAD(&hdev->uuids);
1667
1668 INIT_LIST_HEAD(&hdev->link_keys);
1669 INIT_LIST_HEAD(&hdev->long_term_keys);
1670
1671 INIT_LIST_HEAD(&hdev->remote_oob_data);
1672
1673 INIT_LIST_HEAD(&hdev->adv_entries);
1674
1675 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1676 INIT_WORK(&hdev->power_on, hci_power_on);
1677 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1678
1679 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1680
1681 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1682
1683 atomic_set(&hdev->promisc, 0);
1684
1685 write_unlock(&hci_dev_list_lock);
1686
1687 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1688 WQ_MEM_RECLAIM, 1);
1689 if (!hdev->workqueue) {
1690 error = -ENOMEM;
1691 goto err;
1692 }
1693
1694 error = hci_add_sysfs(hdev);
1695 if (error < 0)
1696 goto err_wqueue;
1697
1698 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1699 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1700 if (hdev->rfkill) {
1701 if (rfkill_register(hdev->rfkill) < 0) {
1702 rfkill_destroy(hdev->rfkill);
1703 hdev->rfkill = NULL;
1704 }
1705 }
1706
1707 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1708 set_bit(HCI_SETUP, &hdev->dev_flags);
1709 schedule_work(&hdev->power_on);
1710
1711 hci_notify(hdev, HCI_DEV_REG);
1712 hci_dev_hold(hdev);
1713
1714 return id;
1715
1716 err_wqueue:
1717 destroy_workqueue(hdev->workqueue);
1718 err:
1719 write_lock(&hci_dev_list_lock);
1720 list_del(&hdev->list);
1721 write_unlock(&hci_dev_list_lock);
1722
1723 return error;
1724 }
1725 EXPORT_SYMBOL(hci_register_dev);
1726
1727 /* Unregister HCI device */
1728 void hci_unregister_dev(struct hci_dev *hdev)
1729 {
1730 int i;
1731
1732 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1733
1734 write_lock(&hci_dev_list_lock);
1735 list_del(&hdev->list);
1736 write_unlock(&hci_dev_list_lock);
1737
1738 hci_dev_do_close(hdev);
1739
1740 for (i = 0; i < NUM_REASSEMBLY; i++)
1741 kfree_skb(hdev->reassembly[i]);
1742
1743 if (!test_bit(HCI_INIT, &hdev->flags) &&
1744 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1745 hci_dev_lock(hdev);
1746 mgmt_index_removed(hdev);
1747 hci_dev_unlock(hdev);
1748 }
1749
1750 /* mgmt_index_removed should take care of emptying the
1751 * pending list */
1752 BUG_ON(!list_empty(&hdev->mgmt_pending));
1753
1754 hci_notify(hdev, HCI_DEV_UNREG);
1755
1756 if (hdev->rfkill) {
1757 rfkill_unregister(hdev->rfkill);
1758 rfkill_destroy(hdev->rfkill);
1759 }
1760
1761 hci_del_sysfs(hdev);
1762
1763 cancel_delayed_work_sync(&hdev->adv_work);
1764
1765 destroy_workqueue(hdev->workqueue);
1766
1767 hci_dev_lock(hdev);
1768 hci_blacklist_clear(hdev);
1769 hci_uuids_clear(hdev);
1770 hci_link_keys_clear(hdev);
1771 hci_smp_ltks_clear(hdev);
1772 hci_remote_oob_data_clear(hdev);
1773 hci_adv_entries_clear(hdev);
1774 hci_dev_unlock(hdev);
1775
1776 hci_dev_put(hdev);
1777 }
1778 EXPORT_SYMBOL(hci_unregister_dev);
1779
1780 /* Suspend HCI device */
1781 int hci_suspend_dev(struct hci_dev *hdev)
1782 {
1783 hci_notify(hdev, HCI_DEV_SUSPEND);
1784 return 0;
1785 }
1786 EXPORT_SYMBOL(hci_suspend_dev);
1787
1788 /* Resume HCI device */
1789 int hci_resume_dev(struct hci_dev *hdev)
1790 {
1791 hci_notify(hdev, HCI_DEV_RESUME);
1792 return 0;
1793 }
1794 EXPORT_SYMBOL(hci_resume_dev);
1795
1796 /* Receive frame from HCI drivers */
1797 int hci_recv_frame(struct sk_buff *skb)
1798 {
1799 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1800 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1801 && !test_bit(HCI_INIT, &hdev->flags))) {
1802 kfree_skb(skb);
1803 return -ENXIO;
1804 }
1805
1806 /* Incomming skb */
1807 bt_cb(skb)->incoming = 1;
1808
1809 /* Time stamp */
1810 __net_timestamp(skb);
1811
1812 skb_queue_tail(&hdev->rx_q, skb);
1813 queue_work(hdev->workqueue, &hdev->rx_work);
1814
1815 return 0;
1816 }
1817 EXPORT_SYMBOL(hci_recv_frame);
1818
1819 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1820 int count, __u8 index)
1821 {
1822 int len = 0;
1823 int hlen = 0;
1824 int remain = count;
1825 struct sk_buff *skb;
1826 struct bt_skb_cb *scb;
1827
1828 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1829 index >= NUM_REASSEMBLY)
1830 return -EILSEQ;
1831
1832 skb = hdev->reassembly[index];
1833
1834 if (!skb) {
1835 switch (type) {
1836 case HCI_ACLDATA_PKT:
1837 len = HCI_MAX_FRAME_SIZE;
1838 hlen = HCI_ACL_HDR_SIZE;
1839 break;
1840 case HCI_EVENT_PKT:
1841 len = HCI_MAX_EVENT_SIZE;
1842 hlen = HCI_EVENT_HDR_SIZE;
1843 break;
1844 case HCI_SCODATA_PKT:
1845 len = HCI_MAX_SCO_SIZE;
1846 hlen = HCI_SCO_HDR_SIZE;
1847 break;
1848 }
1849
1850 skb = bt_skb_alloc(len, GFP_ATOMIC);
1851 if (!skb)
1852 return -ENOMEM;
1853
1854 scb = (void *) skb->cb;
1855 scb->expect = hlen;
1856 scb->pkt_type = type;
1857
1858 skb->dev = (void *) hdev;
1859 hdev->reassembly[index] = skb;
1860 }
1861
1862 while (count) {
1863 scb = (void *) skb->cb;
1864 len = min(scb->expect, (__u16)count);
1865
1866 memcpy(skb_put(skb, len), data, len);
1867
1868 count -= len;
1869 data += len;
1870 scb->expect -= len;
1871 remain = count;
1872
1873 switch (type) {
1874 case HCI_EVENT_PKT:
1875 if (skb->len == HCI_EVENT_HDR_SIZE) {
1876 struct hci_event_hdr *h = hci_event_hdr(skb);
1877 scb->expect = h->plen;
1878
1879 if (skb_tailroom(skb) < scb->expect) {
1880 kfree_skb(skb);
1881 hdev->reassembly[index] = NULL;
1882 return -ENOMEM;
1883 }
1884 }
1885 break;
1886
1887 case HCI_ACLDATA_PKT:
1888 if (skb->len == HCI_ACL_HDR_SIZE) {
1889 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1890 scb->expect = __le16_to_cpu(h->dlen);
1891
1892 if (skb_tailroom(skb) < scb->expect) {
1893 kfree_skb(skb);
1894 hdev->reassembly[index] = NULL;
1895 return -ENOMEM;
1896 }
1897 }
1898 break;
1899
1900 case HCI_SCODATA_PKT:
1901 if (skb->len == HCI_SCO_HDR_SIZE) {
1902 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1903 scb->expect = h->dlen;
1904
1905 if (skb_tailroom(skb) < scb->expect) {
1906 kfree_skb(skb);
1907 hdev->reassembly[index] = NULL;
1908 return -ENOMEM;
1909 }
1910 }
1911 break;
1912 }
1913
1914 if (scb->expect == 0) {
1915 /* Complete frame */
1916
1917 bt_cb(skb)->pkt_type = type;
1918 hci_recv_frame(skb);
1919
1920 hdev->reassembly[index] = NULL;
1921 return remain;
1922 }
1923 }
1924
1925 return remain;
1926 }
1927
1928 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1929 {
1930 int rem = 0;
1931
1932 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1933 return -EILSEQ;
1934
1935 while (count) {
1936 rem = hci_reassembly(hdev, type, data, count, type - 1);
1937 if (rem < 0)
1938 return rem;
1939
1940 data += (count - rem);
1941 count = rem;
1942 }
1943
1944 return rem;
1945 }
1946 EXPORT_SYMBOL(hci_recv_fragment);
1947
1948 #define STREAM_REASSEMBLY 0
1949
1950 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1951 {
1952 int type;
1953 int rem = 0;
1954
1955 while (count) {
1956 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1957
1958 if (!skb) {
1959 struct { char type; } *pkt;
1960
1961 /* Start of the frame */
1962 pkt = data;
1963 type = pkt->type;
1964
1965 data++;
1966 count--;
1967 } else
1968 type = bt_cb(skb)->pkt_type;
1969
1970 rem = hci_reassembly(hdev, type, data, count,
1971 STREAM_REASSEMBLY);
1972 if (rem < 0)
1973 return rem;
1974
1975 data += (count - rem);
1976 count = rem;
1977 }
1978
1979 return rem;
1980 }
1981 EXPORT_SYMBOL(hci_recv_stream_fragment);
1982
1983 /* ---- Interface to upper protocols ---- */
1984
1985 int hci_register_cb(struct hci_cb *cb)
1986 {
1987 BT_DBG("%p name %s", cb, cb->name);
1988
1989 write_lock(&hci_cb_list_lock);
1990 list_add(&cb->list, &hci_cb_list);
1991 write_unlock(&hci_cb_list_lock);
1992
1993 return 0;
1994 }
1995 EXPORT_SYMBOL(hci_register_cb);
1996
1997 int hci_unregister_cb(struct hci_cb *cb)
1998 {
1999 BT_DBG("%p name %s", cb, cb->name);
2000
2001 write_lock(&hci_cb_list_lock);
2002 list_del(&cb->list);
2003 write_unlock(&hci_cb_list_lock);
2004
2005 return 0;
2006 }
2007 EXPORT_SYMBOL(hci_unregister_cb);
2008
2009 static int hci_send_frame(struct sk_buff *skb)
2010 {
2011 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2012
2013 if (!hdev) {
2014 kfree_skb(skb);
2015 return -ENODEV;
2016 }
2017
2018 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2019
2020 if (atomic_read(&hdev->promisc)) {
2021 /* Time stamp */
2022 __net_timestamp(skb);
2023
2024 hci_send_to_sock(hdev, skb, NULL);
2025 }
2026
2027 /* Get rid of skb owner, prior to sending to the driver. */
2028 skb_orphan(skb);
2029
2030 return hdev->send(skb);
2031 }
2032
2033 /* Send HCI command */
2034 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2035 {
2036 int len = HCI_COMMAND_HDR_SIZE + plen;
2037 struct hci_command_hdr *hdr;
2038 struct sk_buff *skb;
2039
2040 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2041
2042 skb = bt_skb_alloc(len, GFP_ATOMIC);
2043 if (!skb) {
2044 BT_ERR("%s no memory for command", hdev->name);
2045 return -ENOMEM;
2046 }
2047
2048 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2049 hdr->opcode = cpu_to_le16(opcode);
2050 hdr->plen = plen;
2051
2052 if (plen)
2053 memcpy(skb_put(skb, plen), param, plen);
2054
2055 BT_DBG("skb len %d", skb->len);
2056
2057 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2058 skb->dev = (void *) hdev;
2059
2060 if (test_bit(HCI_INIT, &hdev->flags))
2061 hdev->init_last_cmd = opcode;
2062
2063 skb_queue_tail(&hdev->cmd_q, skb);
2064 queue_work(hdev->workqueue, &hdev->cmd_work);
2065
2066 return 0;
2067 }
2068
2069 /* Get data from the previously sent command */
2070 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2071 {
2072 struct hci_command_hdr *hdr;
2073
2074 if (!hdev->sent_cmd)
2075 return NULL;
2076
2077 hdr = (void *) hdev->sent_cmd->data;
2078
2079 if (hdr->opcode != cpu_to_le16(opcode))
2080 return NULL;
2081
2082 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2083
2084 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2085 }
2086
2087 /* Send ACL data */
2088 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2089 {
2090 struct hci_acl_hdr *hdr;
2091 int len = skb->len;
2092
2093 skb_push(skb, HCI_ACL_HDR_SIZE);
2094 skb_reset_transport_header(skb);
2095 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2096 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2097 hdr->dlen = cpu_to_le16(len);
2098 }
2099
2100 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2101 struct sk_buff *skb, __u16 flags)
2102 {
2103 struct hci_dev *hdev = conn->hdev;
2104 struct sk_buff *list;
2105
2106 list = skb_shinfo(skb)->frag_list;
2107 if (!list) {
2108 /* Non fragmented */
2109 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2110
2111 skb_queue_tail(queue, skb);
2112 } else {
2113 /* Fragmented */
2114 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2115
2116 skb_shinfo(skb)->frag_list = NULL;
2117
2118 /* Queue all fragments atomically */
2119 spin_lock(&queue->lock);
2120
2121 __skb_queue_tail(queue, skb);
2122
2123 flags &= ~ACL_START;
2124 flags |= ACL_CONT;
2125 do {
2126 skb = list; list = list->next;
2127
2128 skb->dev = (void *) hdev;
2129 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2130 hci_add_acl_hdr(skb, conn->handle, flags);
2131
2132 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2133
2134 __skb_queue_tail(queue, skb);
2135 } while (list);
2136
2137 spin_unlock(&queue->lock);
2138 }
2139 }
2140
2141 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2142 {
2143 struct hci_conn *conn = chan->conn;
2144 struct hci_dev *hdev = conn->hdev;
2145
2146 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2147
2148 skb->dev = (void *) hdev;
2149 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2150 hci_add_acl_hdr(skb, conn->handle, flags);
2151
2152 hci_queue_acl(conn, &chan->data_q, skb, flags);
2153
2154 queue_work(hdev->workqueue, &hdev->tx_work);
2155 }
2156 EXPORT_SYMBOL(hci_send_acl);
2157
2158 /* Send SCO data */
2159 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2160 {
2161 struct hci_dev *hdev = conn->hdev;
2162 struct hci_sco_hdr hdr;
2163
2164 BT_DBG("%s len %d", hdev->name, skb->len);
2165
2166 hdr.handle = cpu_to_le16(conn->handle);
2167 hdr.dlen = skb->len;
2168
2169 skb_push(skb, HCI_SCO_HDR_SIZE);
2170 skb_reset_transport_header(skb);
2171 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2172
2173 skb->dev = (void *) hdev;
2174 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2175
2176 skb_queue_tail(&conn->data_q, skb);
2177 queue_work(hdev->workqueue, &hdev->tx_work);
2178 }
2179 EXPORT_SYMBOL(hci_send_sco);
2180
2181 /* ---- HCI TX task (outgoing data) ---- */
2182
2183 /* HCI Connection scheduler */
2184 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2185 {
2186 struct hci_conn_hash *h = &hdev->conn_hash;
2187 struct hci_conn *conn = NULL, *c;
2188 int num = 0, min = ~0;
2189
2190 /* We don't have to lock device here. Connections are always
2191 * added and removed with TX task disabled. */
2192
2193 rcu_read_lock();
2194
2195 list_for_each_entry_rcu(c, &h->list, list) {
2196 if (c->type != type || skb_queue_empty(&c->data_q))
2197 continue;
2198
2199 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2200 continue;
2201
2202 num++;
2203
2204 if (c->sent < min) {
2205 min = c->sent;
2206 conn = c;
2207 }
2208
2209 if (hci_conn_num(hdev, type) == num)
2210 break;
2211 }
2212
2213 rcu_read_unlock();
2214
2215 if (conn) {
2216 int cnt, q;
2217
2218 switch (conn->type) {
2219 case ACL_LINK:
2220 cnt = hdev->acl_cnt;
2221 break;
2222 case SCO_LINK:
2223 case ESCO_LINK:
2224 cnt = hdev->sco_cnt;
2225 break;
2226 case LE_LINK:
2227 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2228 break;
2229 default:
2230 cnt = 0;
2231 BT_ERR("Unknown link type");
2232 }
2233
2234 q = cnt / num;
2235 *quote = q ? q : 1;
2236 } else
2237 *quote = 0;
2238
2239 BT_DBG("conn %p quote %d", conn, *quote);
2240 return conn;
2241 }
2242
2243 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2244 {
2245 struct hci_conn_hash *h = &hdev->conn_hash;
2246 struct hci_conn *c;
2247
2248 BT_ERR("%s link tx timeout", hdev->name);
2249
2250 rcu_read_lock();
2251
2252 /* Kill stalled connections */
2253 list_for_each_entry_rcu(c, &h->list, list) {
2254 if (c->type == type && c->sent) {
2255 BT_ERR("%s killing stalled connection %s",
2256 hdev->name, batostr(&c->dst));
2257 hci_acl_disconn(c, 0x13);
2258 }
2259 }
2260
2261 rcu_read_unlock();
2262 }
2263
2264 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2265 int *quote)
2266 {
2267 struct hci_conn_hash *h = &hdev->conn_hash;
2268 struct hci_chan *chan = NULL;
2269 int num = 0, min = ~0, cur_prio = 0;
2270 struct hci_conn *conn;
2271 int cnt, q, conn_num = 0;
2272
2273 BT_DBG("%s", hdev->name);
2274
2275 rcu_read_lock();
2276
2277 list_for_each_entry_rcu(conn, &h->list, list) {
2278 struct hci_chan *tmp;
2279
2280 if (conn->type != type)
2281 continue;
2282
2283 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2284 continue;
2285
2286 conn_num++;
2287
2288 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2289 struct sk_buff *skb;
2290
2291 if (skb_queue_empty(&tmp->data_q))
2292 continue;
2293
2294 skb = skb_peek(&tmp->data_q);
2295 if (skb->priority < cur_prio)
2296 continue;
2297
2298 if (skb->priority > cur_prio) {
2299 num = 0;
2300 min = ~0;
2301 cur_prio = skb->priority;
2302 }
2303
2304 num++;
2305
2306 if (conn->sent < min) {
2307 min = conn->sent;
2308 chan = tmp;
2309 }
2310 }
2311
2312 if (hci_conn_num(hdev, type) == conn_num)
2313 break;
2314 }
2315
2316 rcu_read_unlock();
2317
2318 if (!chan)
2319 return NULL;
2320
2321 switch (chan->conn->type) {
2322 case ACL_LINK:
2323 cnt = hdev->acl_cnt;
2324 break;
2325 case SCO_LINK:
2326 case ESCO_LINK:
2327 cnt = hdev->sco_cnt;
2328 break;
2329 case LE_LINK:
2330 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2331 break;
2332 default:
2333 cnt = 0;
2334 BT_ERR("Unknown link type");
2335 }
2336
2337 q = cnt / num;
2338 *quote = q ? q : 1;
2339 BT_DBG("chan %p quote %d", chan, *quote);
2340 return chan;
2341 }
2342
2343 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2344 {
2345 struct hci_conn_hash *h = &hdev->conn_hash;
2346 struct hci_conn *conn;
2347 int num = 0;
2348
2349 BT_DBG("%s", hdev->name);
2350
2351 rcu_read_lock();
2352
2353 list_for_each_entry_rcu(conn, &h->list, list) {
2354 struct hci_chan *chan;
2355
2356 if (conn->type != type)
2357 continue;
2358
2359 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2360 continue;
2361
2362 num++;
2363
2364 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2365 struct sk_buff *skb;
2366
2367 if (chan->sent) {
2368 chan->sent = 0;
2369 continue;
2370 }
2371
2372 if (skb_queue_empty(&chan->data_q))
2373 continue;
2374
2375 skb = skb_peek(&chan->data_q);
2376 if (skb->priority >= HCI_PRIO_MAX - 1)
2377 continue;
2378
2379 skb->priority = HCI_PRIO_MAX - 1;
2380
2381 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2382 skb->priority);
2383 }
2384
2385 if (hci_conn_num(hdev, type) == num)
2386 break;
2387 }
2388
2389 rcu_read_unlock();
2390
2391 }
2392
2393 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2394 {
2395 /* Calculate count of blocks used by this packet */
2396 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2397 }
2398
2399 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2400 {
2401 if (!test_bit(HCI_RAW, &hdev->flags)) {
2402 /* ACL tx timeout must be longer than maximum
2403 * link supervision timeout (40.9 seconds) */
2404 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2405 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2406 hci_link_tx_to(hdev, ACL_LINK);
2407 }
2408 }
2409
2410 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2411 {
2412 unsigned int cnt = hdev->acl_cnt;
2413 struct hci_chan *chan;
2414 struct sk_buff *skb;
2415 int quote;
2416
2417 __check_timeout(hdev, cnt);
2418
2419 while (hdev->acl_cnt &&
2420 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2421 u32 priority = (skb_peek(&chan->data_q))->priority;
2422 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2423 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2424 skb->len, skb->priority);
2425
2426 /* Stop if priority has changed */
2427 if (skb->priority < priority)
2428 break;
2429
2430 skb = skb_dequeue(&chan->data_q);
2431
2432 hci_conn_enter_active_mode(chan->conn,
2433 bt_cb(skb)->force_active);
2434
2435 hci_send_frame(skb);
2436 hdev->acl_last_tx = jiffies;
2437
2438 hdev->acl_cnt--;
2439 chan->sent++;
2440 chan->conn->sent++;
2441 }
2442 }
2443
2444 if (cnt != hdev->acl_cnt)
2445 hci_prio_recalculate(hdev, ACL_LINK);
2446 }
2447
2448 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2449 {
2450 unsigned int cnt = hdev->block_cnt;
2451 struct hci_chan *chan;
2452 struct sk_buff *skb;
2453 int quote;
2454
2455 __check_timeout(hdev, cnt);
2456
2457 while (hdev->block_cnt > 0 &&
2458 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2459 u32 priority = (skb_peek(&chan->data_q))->priority;
2460 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2461 int blocks;
2462
2463 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2464 skb->len, skb->priority);
2465
2466 /* Stop if priority has changed */
2467 if (skb->priority < priority)
2468 break;
2469
2470 skb = skb_dequeue(&chan->data_q);
2471
2472 blocks = __get_blocks(hdev, skb);
2473 if (blocks > hdev->block_cnt)
2474 return;
2475
2476 hci_conn_enter_active_mode(chan->conn,
2477 bt_cb(skb)->force_active);
2478
2479 hci_send_frame(skb);
2480 hdev->acl_last_tx = jiffies;
2481
2482 hdev->block_cnt -= blocks;
2483 quote -= blocks;
2484
2485 chan->sent += blocks;
2486 chan->conn->sent += blocks;
2487 }
2488 }
2489
2490 if (cnt != hdev->block_cnt)
2491 hci_prio_recalculate(hdev, ACL_LINK);
2492 }
2493
2494 static inline void hci_sched_acl(struct hci_dev *hdev)
2495 {
2496 BT_DBG("%s", hdev->name);
2497
2498 if (!hci_conn_num(hdev, ACL_LINK))
2499 return;
2500
2501 switch (hdev->flow_ctl_mode) {
2502 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2503 hci_sched_acl_pkt(hdev);
2504 break;
2505
2506 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2507 hci_sched_acl_blk(hdev);
2508 break;
2509 }
2510 }
2511
2512 /* Schedule SCO */
2513 static inline void hci_sched_sco(struct hci_dev *hdev)
2514 {
2515 struct hci_conn *conn;
2516 struct sk_buff *skb;
2517 int quote;
2518
2519 BT_DBG("%s", hdev->name);
2520
2521 if (!hci_conn_num(hdev, SCO_LINK))
2522 return;
2523
2524 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2525 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2526 BT_DBG("skb %p len %d", skb, skb->len);
2527 hci_send_frame(skb);
2528
2529 conn->sent++;
2530 if (conn->sent == ~0)
2531 conn->sent = 0;
2532 }
2533 }
2534 }
2535
2536 static inline void hci_sched_esco(struct hci_dev *hdev)
2537 {
2538 struct hci_conn *conn;
2539 struct sk_buff *skb;
2540 int quote;
2541
2542 BT_DBG("%s", hdev->name);
2543
2544 if (!hci_conn_num(hdev, ESCO_LINK))
2545 return;
2546
2547 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2548 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2549 BT_DBG("skb %p len %d", skb, skb->len);
2550 hci_send_frame(skb);
2551
2552 conn->sent++;
2553 if (conn->sent == ~0)
2554 conn->sent = 0;
2555 }
2556 }
2557 }
2558
2559 static inline void hci_sched_le(struct hci_dev *hdev)
2560 {
2561 struct hci_chan *chan;
2562 struct sk_buff *skb;
2563 int quote, cnt, tmp;
2564
2565 BT_DBG("%s", hdev->name);
2566
2567 if (!hci_conn_num(hdev, LE_LINK))
2568 return;
2569
2570 if (!test_bit(HCI_RAW, &hdev->flags)) {
2571 /* LE tx timeout must be longer than maximum
2572 * link supervision timeout (40.9 seconds) */
2573 if (!hdev->le_cnt && hdev->le_pkts &&
2574 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2575 hci_link_tx_to(hdev, LE_LINK);
2576 }
2577
2578 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2579 tmp = cnt;
2580 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2581 u32 priority = (skb_peek(&chan->data_q))->priority;
2582 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2583 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2584 skb->len, skb->priority);
2585
2586 /* Stop if priority has changed */
2587 if (skb->priority < priority)
2588 break;
2589
2590 skb = skb_dequeue(&chan->data_q);
2591
2592 hci_send_frame(skb);
2593 hdev->le_last_tx = jiffies;
2594
2595 cnt--;
2596 chan->sent++;
2597 chan->conn->sent++;
2598 }
2599 }
2600
2601 if (hdev->le_pkts)
2602 hdev->le_cnt = cnt;
2603 else
2604 hdev->acl_cnt = cnt;
2605
2606 if (cnt != tmp)
2607 hci_prio_recalculate(hdev, LE_LINK);
2608 }
2609
2610 static void hci_tx_work(struct work_struct *work)
2611 {
2612 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2613 struct sk_buff *skb;
2614
2615 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2616 hdev->sco_cnt, hdev->le_cnt);
2617
2618 /* Schedule queues and send stuff to HCI driver */
2619
2620 hci_sched_acl(hdev);
2621
2622 hci_sched_sco(hdev);
2623
2624 hci_sched_esco(hdev);
2625
2626 hci_sched_le(hdev);
2627
2628 /* Send next queued raw (unknown type) packet */
2629 while ((skb = skb_dequeue(&hdev->raw_q)))
2630 hci_send_frame(skb);
2631 }
2632
2633 /* ----- HCI RX task (incoming data processing) ----- */
2634
2635 /* ACL data packet */
2636 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2637 {
2638 struct hci_acl_hdr *hdr = (void *) skb->data;
2639 struct hci_conn *conn;
2640 __u16 handle, flags;
2641
2642 skb_pull(skb, HCI_ACL_HDR_SIZE);
2643
2644 handle = __le16_to_cpu(hdr->handle);
2645 flags = hci_flags(handle);
2646 handle = hci_handle(handle);
2647
2648 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2649
2650 hdev->stat.acl_rx++;
2651
2652 hci_dev_lock(hdev);
2653 conn = hci_conn_hash_lookup_handle(hdev, handle);
2654 hci_dev_unlock(hdev);
2655
2656 if (conn) {
2657 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2658
2659 /* Send to upper protocol */
2660 l2cap_recv_acldata(conn, skb, flags);
2661 return;
2662 } else {
2663 BT_ERR("%s ACL packet for unknown connection handle %d",
2664 hdev->name, handle);
2665 }
2666
2667 kfree_skb(skb);
2668 }
2669
2670 /* SCO data packet */
2671 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2672 {
2673 struct hci_sco_hdr *hdr = (void *) skb->data;
2674 struct hci_conn *conn;
2675 __u16 handle;
2676
2677 skb_pull(skb, HCI_SCO_HDR_SIZE);
2678
2679 handle = __le16_to_cpu(hdr->handle);
2680
2681 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2682
2683 hdev->stat.sco_rx++;
2684
2685 hci_dev_lock(hdev);
2686 conn = hci_conn_hash_lookup_handle(hdev, handle);
2687 hci_dev_unlock(hdev);
2688
2689 if (conn) {
2690 /* Send to upper protocol */
2691 sco_recv_scodata(conn, skb);
2692 return;
2693 } else {
2694 BT_ERR("%s SCO packet for unknown connection handle %d",
2695 hdev->name, handle);
2696 }
2697
2698 kfree_skb(skb);
2699 }
2700
2701 static void hci_rx_work(struct work_struct *work)
2702 {
2703 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2704 struct sk_buff *skb;
2705
2706 BT_DBG("%s", hdev->name);
2707
2708 while ((skb = skb_dequeue(&hdev->rx_q))) {
2709 if (atomic_read(&hdev->promisc)) {
2710 /* Send copy to the sockets */
2711 hci_send_to_sock(hdev, skb, NULL);
2712 }
2713
2714 if (test_bit(HCI_RAW, &hdev->flags)) {
2715 kfree_skb(skb);
2716 continue;
2717 }
2718
2719 if (test_bit(HCI_INIT, &hdev->flags)) {
2720 /* Don't process data packets in this states. */
2721 switch (bt_cb(skb)->pkt_type) {
2722 case HCI_ACLDATA_PKT:
2723 case HCI_SCODATA_PKT:
2724 kfree_skb(skb);
2725 continue;
2726 }
2727 }
2728
2729 /* Process frame */
2730 switch (bt_cb(skb)->pkt_type) {
2731 case HCI_EVENT_PKT:
2732 BT_DBG("%s Event packet", hdev->name);
2733 hci_event_packet(hdev, skb);
2734 break;
2735
2736 case HCI_ACLDATA_PKT:
2737 BT_DBG("%s ACL data packet", hdev->name);
2738 hci_acldata_packet(hdev, skb);
2739 break;
2740
2741 case HCI_SCODATA_PKT:
2742 BT_DBG("%s SCO data packet", hdev->name);
2743 hci_scodata_packet(hdev, skb);
2744 break;
2745
2746 default:
2747 kfree_skb(skb);
2748 break;
2749 }
2750 }
2751 }
2752
2753 static void hci_cmd_work(struct work_struct *work)
2754 {
2755 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2756 struct sk_buff *skb;
2757
2758 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2759
2760 /* Send queued commands */
2761 if (atomic_read(&hdev->cmd_cnt)) {
2762 skb = skb_dequeue(&hdev->cmd_q);
2763 if (!skb)
2764 return;
2765
2766 kfree_skb(hdev->sent_cmd);
2767
2768 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2769 if (hdev->sent_cmd) {
2770 atomic_dec(&hdev->cmd_cnt);
2771 hci_send_frame(skb);
2772 if (test_bit(HCI_RESET, &hdev->flags))
2773 del_timer(&hdev->cmd_timer);
2774 else
2775 mod_timer(&hdev->cmd_timer,
2776 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2777 } else {
2778 skb_queue_head(&hdev->cmd_q, skb);
2779 queue_work(hdev->workqueue, &hdev->cmd_work);
2780 }
2781 }
2782 }
2783
2784 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2785 {
2786 /* General inquiry access code (GIAC) */
2787 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2788 struct hci_cp_inquiry cp;
2789
2790 BT_DBG("%s", hdev->name);
2791
2792 if (test_bit(HCI_INQUIRY, &hdev->flags))
2793 return -EINPROGRESS;
2794
2795 inquiry_cache_flush(hdev);
2796
2797 memset(&cp, 0, sizeof(cp));
2798 memcpy(&cp.lap, lap, sizeof(cp.lap));
2799 cp.length = length;
2800
2801 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2802 }
2803
2804 int hci_cancel_inquiry(struct hci_dev *hdev)
2805 {
2806 BT_DBG("%s", hdev->name);
2807
2808 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2809 return -EPERM;
2810
2811 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2812 }
2813
2814 module_param(enable_hs, bool, 0644);
2815 MODULE_PARM_DESC(enable_hs, "Enable High Speed");
This page took 0.12782 seconds and 5 git commands to generate.