8806869ad4d67c188e735d2218d1e4c78b1d5521
[deliverable/linux.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30
31 #include <linux/rfkill.h>
32
33 #include <net/bluetooth/bluetooth.h>
34 #include <net/bluetooth/hci_core.h>
35
36 static void hci_rx_work(struct work_struct *work);
37 static void hci_cmd_work(struct work_struct *work);
38 static void hci_tx_work(struct work_struct *work);
39
40 /* HCI device list */
41 LIST_HEAD(hci_dev_list);
42 DEFINE_RWLOCK(hci_dev_list_lock);
43
44 /* HCI callback list */
45 LIST_HEAD(hci_cb_list);
46 DEFINE_RWLOCK(hci_cb_list_lock);
47
48 /* HCI ID Numbering */
49 static DEFINE_IDA(hci_index_ida);
50
51 /* ---- HCI notifications ---- */
52
53 static void hci_notify(struct hci_dev *hdev, int event)
54 {
55 hci_sock_dev_event(hdev, event);
56 }
57
58 /* ---- HCI requests ---- */
59
60 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
61 {
62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
63
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
69 u16 opcode = __le16_to_cpu(sent->opcode);
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
88 return;
89 }
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96 }
97
98 static void hci_req_cancel(struct hci_dev *hdev, int err)
99 {
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107 }
108
109 /* Execute request and wait for completion. */
110 static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
113 {
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
134 err = -bt_to_errno(hdev->req_result);
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
144 }
145
146 hdev->req_status = hdev->req_result = 0;
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151 }
152
153 static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
156 {
157 int ret;
158
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168 }
169
170 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171 {
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
175 set_bit(HCI_RESET, &hdev->flags);
176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
177 }
178
179 static void bredr_init(struct hci_dev *hdev)
180 {
181 struct hci_cp_delete_stored_link_key cp;
182 __le16 param;
183 __u8 flt_type;
184
185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
187 /* Mandatory initialization */
188
189 /* Read Local Supported Features */
190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
191
192 /* Read Local Version */
193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
194
195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
197
198 /* Read BD Address */
199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
206
207 /* Read Voice Setting */
208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
209
210 /* Optional initialization */
211
212 /* Clear Event Filters */
213 flt_type = HCI_FLT_CLEAR_ALL;
214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
215
216 /* Connection accept timeout ~20 secs */
217 param = __constant_cpu_to_le16(0x7d00);
218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
223 }
224
225 static void amp_init(struct hci_dev *hdev)
226 {
227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
231
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
234 }
235
236 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
237 {
238 struct sk_buff *skb;
239
240 BT_DBG("%s %ld", hdev->name, opt);
241
242 /* Driver initialization */
243
244 /* Special commands */
245 while ((skb = skb_dequeue(&hdev->driver_init))) {
246 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
247 skb->dev = (void *) hdev;
248
249 skb_queue_tail(&hdev->cmd_q, skb);
250 queue_work(hdev->workqueue, &hdev->cmd_work);
251 }
252 skb_queue_purge(&hdev->driver_init);
253
254 /* Reset */
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
256 hci_reset_req(hdev, 0);
257
258 switch (hdev->dev_type) {
259 case HCI_BREDR:
260 bredr_init(hdev);
261 break;
262
263 case HCI_AMP:
264 amp_init(hdev);
265 break;
266
267 default:
268 BT_ERR("Unknown device type %d", hdev->dev_type);
269 break;
270 }
271
272 }
273
274 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
275 {
276 BT_DBG("%s", hdev->name);
277
278 /* Read LE buffer size */
279 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
280 }
281
282 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
283 {
284 __u8 scan = opt;
285
286 BT_DBG("%s %x", hdev->name, scan);
287
288 /* Inquiry and Page scans */
289 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
290 }
291
292 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
293 {
294 __u8 auth = opt;
295
296 BT_DBG("%s %x", hdev->name, auth);
297
298 /* Authentication */
299 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
300 }
301
302 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
303 {
304 __u8 encrypt = opt;
305
306 BT_DBG("%s %x", hdev->name, encrypt);
307
308 /* Encryption */
309 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
310 }
311
312 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
313 {
314 __le16 policy = cpu_to_le16(opt);
315
316 BT_DBG("%s %x", hdev->name, policy);
317
318 /* Default link policy */
319 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
320 }
321
322 /* Get HCI device by index.
323 * Device is held on return. */
324 struct hci_dev *hci_dev_get(int index)
325 {
326 struct hci_dev *hdev = NULL, *d;
327
328 BT_DBG("%d", index);
329
330 if (index < 0)
331 return NULL;
332
333 read_lock(&hci_dev_list_lock);
334 list_for_each_entry(d, &hci_dev_list, list) {
335 if (d->id == index) {
336 hdev = hci_dev_hold(d);
337 break;
338 }
339 }
340 read_unlock(&hci_dev_list_lock);
341 return hdev;
342 }
343
344 /* ---- Inquiry support ---- */
345
346 bool hci_discovery_active(struct hci_dev *hdev)
347 {
348 struct discovery_state *discov = &hdev->discovery;
349
350 switch (discov->state) {
351 case DISCOVERY_FINDING:
352 case DISCOVERY_RESOLVING:
353 return true;
354
355 default:
356 return false;
357 }
358 }
359
360 void hci_discovery_set_state(struct hci_dev *hdev, int state)
361 {
362 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
363
364 if (hdev->discovery.state == state)
365 return;
366
367 switch (state) {
368 case DISCOVERY_STOPPED:
369 if (hdev->discovery.state != DISCOVERY_STARTING)
370 mgmt_discovering(hdev, 0);
371 break;
372 case DISCOVERY_STARTING:
373 break;
374 case DISCOVERY_FINDING:
375 mgmt_discovering(hdev, 1);
376 break;
377 case DISCOVERY_RESOLVING:
378 break;
379 case DISCOVERY_STOPPING:
380 break;
381 }
382
383 hdev->discovery.state = state;
384 }
385
386 static void inquiry_cache_flush(struct hci_dev *hdev)
387 {
388 struct discovery_state *cache = &hdev->discovery;
389 struct inquiry_entry *p, *n;
390
391 list_for_each_entry_safe(p, n, &cache->all, all) {
392 list_del(&p->all);
393 kfree(p);
394 }
395
396 INIT_LIST_HEAD(&cache->unknown);
397 INIT_LIST_HEAD(&cache->resolve);
398 }
399
400 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
401 bdaddr_t *bdaddr)
402 {
403 struct discovery_state *cache = &hdev->discovery;
404 struct inquiry_entry *e;
405
406 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
407
408 list_for_each_entry(e, &cache->all, all) {
409 if (!bacmp(&e->data.bdaddr, bdaddr))
410 return e;
411 }
412
413 return NULL;
414 }
415
416 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
417 bdaddr_t *bdaddr)
418 {
419 struct discovery_state *cache = &hdev->discovery;
420 struct inquiry_entry *e;
421
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
424 list_for_each_entry(e, &cache->unknown, list) {
425 if (!bacmp(&e->data.bdaddr, bdaddr))
426 return e;
427 }
428
429 return NULL;
430 }
431
432 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
433 bdaddr_t *bdaddr,
434 int state)
435 {
436 struct discovery_state *cache = &hdev->discovery;
437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
440
441 list_for_each_entry(e, &cache->resolve, list) {
442 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
443 return e;
444 if (!bacmp(&e->data.bdaddr, bdaddr))
445 return e;
446 }
447
448 return NULL;
449 }
450
451 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
452 struct inquiry_entry *ie)
453 {
454 struct discovery_state *cache = &hdev->discovery;
455 struct list_head *pos = &cache->resolve;
456 struct inquiry_entry *p;
457
458 list_del(&ie->list);
459
460 list_for_each_entry(p, &cache->resolve, list) {
461 if (p->name_state != NAME_PENDING &&
462 abs(p->data.rssi) >= abs(ie->data.rssi))
463 break;
464 pos = &p->list;
465 }
466
467 list_add(&ie->list, pos);
468 }
469
470 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
471 bool name_known, bool *ssp)
472 {
473 struct discovery_state *cache = &hdev->discovery;
474 struct inquiry_entry *ie;
475
476 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
477
478 if (ssp)
479 *ssp = data->ssp_mode;
480
481 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
482 if (ie) {
483 if (ie->data.ssp_mode && ssp)
484 *ssp = true;
485
486 if (ie->name_state == NAME_NEEDED &&
487 data->rssi != ie->data.rssi) {
488 ie->data.rssi = data->rssi;
489 hci_inquiry_cache_update_resolve(hdev, ie);
490 }
491
492 goto update;
493 }
494
495 /* Entry not in the cache. Add new one. */
496 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
497 if (!ie)
498 return false;
499
500 list_add(&ie->all, &cache->all);
501
502 if (name_known) {
503 ie->name_state = NAME_KNOWN;
504 } else {
505 ie->name_state = NAME_NOT_KNOWN;
506 list_add(&ie->list, &cache->unknown);
507 }
508
509 update:
510 if (name_known && ie->name_state != NAME_KNOWN &&
511 ie->name_state != NAME_PENDING) {
512 ie->name_state = NAME_KNOWN;
513 list_del(&ie->list);
514 }
515
516 memcpy(&ie->data, data, sizeof(*data));
517 ie->timestamp = jiffies;
518 cache->timestamp = jiffies;
519
520 if (ie->name_state == NAME_NOT_KNOWN)
521 return false;
522
523 return true;
524 }
525
526 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
527 {
528 struct discovery_state *cache = &hdev->discovery;
529 struct inquiry_info *info = (struct inquiry_info *) buf;
530 struct inquiry_entry *e;
531 int copied = 0;
532
533 list_for_each_entry(e, &cache->all, all) {
534 struct inquiry_data *data = &e->data;
535
536 if (copied >= num)
537 break;
538
539 bacpy(&info->bdaddr, &data->bdaddr);
540 info->pscan_rep_mode = data->pscan_rep_mode;
541 info->pscan_period_mode = data->pscan_period_mode;
542 info->pscan_mode = data->pscan_mode;
543 memcpy(info->dev_class, data->dev_class, 3);
544 info->clock_offset = data->clock_offset;
545
546 info++;
547 copied++;
548 }
549
550 BT_DBG("cache %p, copied %d", cache, copied);
551 return copied;
552 }
553
554 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
555 {
556 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
557 struct hci_cp_inquiry cp;
558
559 BT_DBG("%s", hdev->name);
560
561 if (test_bit(HCI_INQUIRY, &hdev->flags))
562 return;
563
564 /* Start Inquiry */
565 memcpy(&cp.lap, &ir->lap, 3);
566 cp.length = ir->length;
567 cp.num_rsp = ir->num_rsp;
568 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
569 }
570
571 int hci_inquiry(void __user *arg)
572 {
573 __u8 __user *ptr = arg;
574 struct hci_inquiry_req ir;
575 struct hci_dev *hdev;
576 int err = 0, do_inquiry = 0, max_rsp;
577 long timeo;
578 __u8 *buf;
579
580 if (copy_from_user(&ir, ptr, sizeof(ir)))
581 return -EFAULT;
582
583 hdev = hci_dev_get(ir.dev_id);
584 if (!hdev)
585 return -ENODEV;
586
587 hci_dev_lock(hdev);
588 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
589 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
590 inquiry_cache_flush(hdev);
591 do_inquiry = 1;
592 }
593 hci_dev_unlock(hdev);
594
595 timeo = ir.length * msecs_to_jiffies(2000);
596
597 if (do_inquiry) {
598 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
599 if (err < 0)
600 goto done;
601 }
602
603 /* for unlimited number of responses we will use buffer with
604 * 255 entries
605 */
606 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
607
608 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
609 * copy it to the user space.
610 */
611 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
612 if (!buf) {
613 err = -ENOMEM;
614 goto done;
615 }
616
617 hci_dev_lock(hdev);
618 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
619 hci_dev_unlock(hdev);
620
621 BT_DBG("num_rsp %d", ir.num_rsp);
622
623 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
624 ptr += sizeof(ir);
625 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
626 ir.num_rsp))
627 err = -EFAULT;
628 } else
629 err = -EFAULT;
630
631 kfree(buf);
632
633 done:
634 hci_dev_put(hdev);
635 return err;
636 }
637
638 /* ---- HCI ioctl helpers ---- */
639
640 int hci_dev_open(__u16 dev)
641 {
642 struct hci_dev *hdev;
643 int ret = 0;
644
645 hdev = hci_dev_get(dev);
646 if (!hdev)
647 return -ENODEV;
648
649 BT_DBG("%s %p", hdev->name, hdev);
650
651 hci_req_lock(hdev);
652
653 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
654 ret = -ENODEV;
655 goto done;
656 }
657
658 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
659 ret = -ERFKILL;
660 goto done;
661 }
662
663 if (test_bit(HCI_UP, &hdev->flags)) {
664 ret = -EALREADY;
665 goto done;
666 }
667
668 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
669 set_bit(HCI_RAW, &hdev->flags);
670
671 /* Treat all non BR/EDR controllers as raw devices if
672 enable_hs is not set */
673 if (hdev->dev_type != HCI_BREDR && !enable_hs)
674 set_bit(HCI_RAW, &hdev->flags);
675
676 if (hdev->open(hdev)) {
677 ret = -EIO;
678 goto done;
679 }
680
681 if (!test_bit(HCI_RAW, &hdev->flags)) {
682 atomic_set(&hdev->cmd_cnt, 1);
683 set_bit(HCI_INIT, &hdev->flags);
684 hdev->init_last_cmd = 0;
685
686 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
687
688 if (lmp_host_le_capable(hdev))
689 ret = __hci_request(hdev, hci_le_init_req, 0,
690 HCI_INIT_TIMEOUT);
691
692 clear_bit(HCI_INIT, &hdev->flags);
693 }
694
695 if (!ret) {
696 hci_dev_hold(hdev);
697 set_bit(HCI_UP, &hdev->flags);
698 hci_notify(hdev, HCI_DEV_UP);
699 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
700 mgmt_valid_hdev(hdev)) {
701 hci_dev_lock(hdev);
702 mgmt_powered(hdev, 1);
703 hci_dev_unlock(hdev);
704 }
705 } else {
706 /* Init failed, cleanup */
707 flush_work(&hdev->tx_work);
708 flush_work(&hdev->cmd_work);
709 flush_work(&hdev->rx_work);
710
711 skb_queue_purge(&hdev->cmd_q);
712 skb_queue_purge(&hdev->rx_q);
713
714 if (hdev->flush)
715 hdev->flush(hdev);
716
717 if (hdev->sent_cmd) {
718 kfree_skb(hdev->sent_cmd);
719 hdev->sent_cmd = NULL;
720 }
721
722 hdev->close(hdev);
723 hdev->flags = 0;
724 }
725
726 done:
727 hci_req_unlock(hdev);
728 hci_dev_put(hdev);
729 return ret;
730 }
731
732 static int hci_dev_do_close(struct hci_dev *hdev)
733 {
734 BT_DBG("%s %p", hdev->name, hdev);
735
736 cancel_work_sync(&hdev->le_scan);
737
738 cancel_delayed_work(&hdev->power_off);
739
740 hci_req_cancel(hdev, ENODEV);
741 hci_req_lock(hdev);
742
743 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
744 del_timer_sync(&hdev->cmd_timer);
745 hci_req_unlock(hdev);
746 return 0;
747 }
748
749 /* Flush RX and TX works */
750 flush_work(&hdev->tx_work);
751 flush_work(&hdev->rx_work);
752
753 if (hdev->discov_timeout > 0) {
754 cancel_delayed_work(&hdev->discov_off);
755 hdev->discov_timeout = 0;
756 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
757 }
758
759 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
760 cancel_delayed_work(&hdev->service_cache);
761
762 cancel_delayed_work_sync(&hdev->le_scan_disable);
763
764 hci_dev_lock(hdev);
765 inquiry_cache_flush(hdev);
766 hci_conn_hash_flush(hdev);
767 hci_dev_unlock(hdev);
768
769 hci_notify(hdev, HCI_DEV_DOWN);
770
771 if (hdev->flush)
772 hdev->flush(hdev);
773
774 /* Reset device */
775 skb_queue_purge(&hdev->cmd_q);
776 atomic_set(&hdev->cmd_cnt, 1);
777 if (!test_bit(HCI_RAW, &hdev->flags) &&
778 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
779 set_bit(HCI_INIT, &hdev->flags);
780 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
781 clear_bit(HCI_INIT, &hdev->flags);
782 }
783
784 /* flush cmd work */
785 flush_work(&hdev->cmd_work);
786
787 /* Drop queues */
788 skb_queue_purge(&hdev->rx_q);
789 skb_queue_purge(&hdev->cmd_q);
790 skb_queue_purge(&hdev->raw_q);
791
792 /* Drop last sent command */
793 if (hdev->sent_cmd) {
794 del_timer_sync(&hdev->cmd_timer);
795 kfree_skb(hdev->sent_cmd);
796 hdev->sent_cmd = NULL;
797 }
798
799 /* After this point our queues are empty
800 * and no tasks are scheduled. */
801 hdev->close(hdev);
802
803 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
804 mgmt_valid_hdev(hdev)) {
805 hci_dev_lock(hdev);
806 mgmt_powered(hdev, 0);
807 hci_dev_unlock(hdev);
808 }
809
810 /* Clear flags */
811 hdev->flags = 0;
812
813 memset(hdev->eir, 0, sizeof(hdev->eir));
814 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
815
816 hci_req_unlock(hdev);
817
818 hci_dev_put(hdev);
819 return 0;
820 }
821
822 int hci_dev_close(__u16 dev)
823 {
824 struct hci_dev *hdev;
825 int err;
826
827 hdev = hci_dev_get(dev);
828 if (!hdev)
829 return -ENODEV;
830
831 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
832 cancel_delayed_work(&hdev->power_off);
833
834 err = hci_dev_do_close(hdev);
835
836 hci_dev_put(hdev);
837 return err;
838 }
839
840 int hci_dev_reset(__u16 dev)
841 {
842 struct hci_dev *hdev;
843 int ret = 0;
844
845 hdev = hci_dev_get(dev);
846 if (!hdev)
847 return -ENODEV;
848
849 hci_req_lock(hdev);
850
851 if (!test_bit(HCI_UP, &hdev->flags))
852 goto done;
853
854 /* Drop queues */
855 skb_queue_purge(&hdev->rx_q);
856 skb_queue_purge(&hdev->cmd_q);
857
858 hci_dev_lock(hdev);
859 inquiry_cache_flush(hdev);
860 hci_conn_hash_flush(hdev);
861 hci_dev_unlock(hdev);
862
863 if (hdev->flush)
864 hdev->flush(hdev);
865
866 atomic_set(&hdev->cmd_cnt, 1);
867 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
868
869 if (!test_bit(HCI_RAW, &hdev->flags))
870 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
871
872 done:
873 hci_req_unlock(hdev);
874 hci_dev_put(hdev);
875 return ret;
876 }
877
878 int hci_dev_reset_stat(__u16 dev)
879 {
880 struct hci_dev *hdev;
881 int ret = 0;
882
883 hdev = hci_dev_get(dev);
884 if (!hdev)
885 return -ENODEV;
886
887 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
888
889 hci_dev_put(hdev);
890
891 return ret;
892 }
893
894 int hci_dev_cmd(unsigned int cmd, void __user *arg)
895 {
896 struct hci_dev *hdev;
897 struct hci_dev_req dr;
898 int err = 0;
899
900 if (copy_from_user(&dr, arg, sizeof(dr)))
901 return -EFAULT;
902
903 hdev = hci_dev_get(dr.dev_id);
904 if (!hdev)
905 return -ENODEV;
906
907 switch (cmd) {
908 case HCISETAUTH:
909 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
910 HCI_INIT_TIMEOUT);
911 break;
912
913 case HCISETENCRYPT:
914 if (!lmp_encrypt_capable(hdev)) {
915 err = -EOPNOTSUPP;
916 break;
917 }
918
919 if (!test_bit(HCI_AUTH, &hdev->flags)) {
920 /* Auth must be enabled first */
921 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
922 HCI_INIT_TIMEOUT);
923 if (err)
924 break;
925 }
926
927 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
928 HCI_INIT_TIMEOUT);
929 break;
930
931 case HCISETSCAN:
932 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
933 HCI_INIT_TIMEOUT);
934 break;
935
936 case HCISETLINKPOL:
937 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
938 HCI_INIT_TIMEOUT);
939 break;
940
941 case HCISETLINKMODE:
942 hdev->link_mode = ((__u16) dr.dev_opt) &
943 (HCI_LM_MASTER | HCI_LM_ACCEPT);
944 break;
945
946 case HCISETPTYPE:
947 hdev->pkt_type = (__u16) dr.dev_opt;
948 break;
949
950 case HCISETACLMTU:
951 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
952 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
953 break;
954
955 case HCISETSCOMTU:
956 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
957 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
958 break;
959
960 default:
961 err = -EINVAL;
962 break;
963 }
964
965 hci_dev_put(hdev);
966 return err;
967 }
968
969 int hci_get_dev_list(void __user *arg)
970 {
971 struct hci_dev *hdev;
972 struct hci_dev_list_req *dl;
973 struct hci_dev_req *dr;
974 int n = 0, size, err;
975 __u16 dev_num;
976
977 if (get_user(dev_num, (__u16 __user *) arg))
978 return -EFAULT;
979
980 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
981 return -EINVAL;
982
983 size = sizeof(*dl) + dev_num * sizeof(*dr);
984
985 dl = kzalloc(size, GFP_KERNEL);
986 if (!dl)
987 return -ENOMEM;
988
989 dr = dl->dev_req;
990
991 read_lock(&hci_dev_list_lock);
992 list_for_each_entry(hdev, &hci_dev_list, list) {
993 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
994 cancel_delayed_work(&hdev->power_off);
995
996 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
997 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
998
999 (dr + n)->dev_id = hdev->id;
1000 (dr + n)->dev_opt = hdev->flags;
1001
1002 if (++n >= dev_num)
1003 break;
1004 }
1005 read_unlock(&hci_dev_list_lock);
1006
1007 dl->dev_num = n;
1008 size = sizeof(*dl) + n * sizeof(*dr);
1009
1010 err = copy_to_user(arg, dl, size);
1011 kfree(dl);
1012
1013 return err ? -EFAULT : 0;
1014 }
1015
1016 int hci_get_dev_info(void __user *arg)
1017 {
1018 struct hci_dev *hdev;
1019 struct hci_dev_info di;
1020 int err = 0;
1021
1022 if (copy_from_user(&di, arg, sizeof(di)))
1023 return -EFAULT;
1024
1025 hdev = hci_dev_get(di.dev_id);
1026 if (!hdev)
1027 return -ENODEV;
1028
1029 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1030 cancel_delayed_work_sync(&hdev->power_off);
1031
1032 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1033 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1034
1035 strcpy(di.name, hdev->name);
1036 di.bdaddr = hdev->bdaddr;
1037 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1038 di.flags = hdev->flags;
1039 di.pkt_type = hdev->pkt_type;
1040 di.acl_mtu = hdev->acl_mtu;
1041 di.acl_pkts = hdev->acl_pkts;
1042 di.sco_mtu = hdev->sco_mtu;
1043 di.sco_pkts = hdev->sco_pkts;
1044 di.link_policy = hdev->link_policy;
1045 di.link_mode = hdev->link_mode;
1046
1047 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1048 memcpy(&di.features, &hdev->features, sizeof(di.features));
1049
1050 if (copy_to_user(arg, &di, sizeof(di)))
1051 err = -EFAULT;
1052
1053 hci_dev_put(hdev);
1054
1055 return err;
1056 }
1057
1058 /* ---- Interface to HCI drivers ---- */
1059
1060 static int hci_rfkill_set_block(void *data, bool blocked)
1061 {
1062 struct hci_dev *hdev = data;
1063
1064 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1065
1066 if (!blocked)
1067 return 0;
1068
1069 hci_dev_do_close(hdev);
1070
1071 return 0;
1072 }
1073
1074 static const struct rfkill_ops hci_rfkill_ops = {
1075 .set_block = hci_rfkill_set_block,
1076 };
1077
1078 static void hci_power_on(struct work_struct *work)
1079 {
1080 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1081
1082 BT_DBG("%s", hdev->name);
1083
1084 if (hci_dev_open(hdev->id) < 0)
1085 return;
1086
1087 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1088 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
1089
1090 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1091 mgmt_index_added(hdev);
1092 }
1093
1094 static void hci_power_off(struct work_struct *work)
1095 {
1096 struct hci_dev *hdev = container_of(work, struct hci_dev,
1097 power_off.work);
1098
1099 BT_DBG("%s", hdev->name);
1100
1101 hci_dev_do_close(hdev);
1102 }
1103
1104 static void hci_discov_off(struct work_struct *work)
1105 {
1106 struct hci_dev *hdev;
1107 u8 scan = SCAN_PAGE;
1108
1109 hdev = container_of(work, struct hci_dev, discov_off.work);
1110
1111 BT_DBG("%s", hdev->name);
1112
1113 hci_dev_lock(hdev);
1114
1115 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1116
1117 hdev->discov_timeout = 0;
1118
1119 hci_dev_unlock(hdev);
1120 }
1121
1122 int hci_uuids_clear(struct hci_dev *hdev)
1123 {
1124 struct list_head *p, *n;
1125
1126 list_for_each_safe(p, n, &hdev->uuids) {
1127 struct bt_uuid *uuid;
1128
1129 uuid = list_entry(p, struct bt_uuid, list);
1130
1131 list_del(p);
1132 kfree(uuid);
1133 }
1134
1135 return 0;
1136 }
1137
1138 int hci_link_keys_clear(struct hci_dev *hdev)
1139 {
1140 struct list_head *p, *n;
1141
1142 list_for_each_safe(p, n, &hdev->link_keys) {
1143 struct link_key *key;
1144
1145 key = list_entry(p, struct link_key, list);
1146
1147 list_del(p);
1148 kfree(key);
1149 }
1150
1151 return 0;
1152 }
1153
1154 int hci_smp_ltks_clear(struct hci_dev *hdev)
1155 {
1156 struct smp_ltk *k, *tmp;
1157
1158 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1159 list_del(&k->list);
1160 kfree(k);
1161 }
1162
1163 return 0;
1164 }
1165
1166 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1167 {
1168 struct link_key *k;
1169
1170 list_for_each_entry(k, &hdev->link_keys, list)
1171 if (bacmp(bdaddr, &k->bdaddr) == 0)
1172 return k;
1173
1174 return NULL;
1175 }
1176
1177 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1178 u8 key_type, u8 old_key_type)
1179 {
1180 /* Legacy key */
1181 if (key_type < 0x03)
1182 return true;
1183
1184 /* Debug keys are insecure so don't store them persistently */
1185 if (key_type == HCI_LK_DEBUG_COMBINATION)
1186 return false;
1187
1188 /* Changed combination key and there's no previous one */
1189 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1190 return false;
1191
1192 /* Security mode 3 case */
1193 if (!conn)
1194 return true;
1195
1196 /* Neither local nor remote side had no-bonding as requirement */
1197 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1198 return true;
1199
1200 /* Local side had dedicated bonding as requirement */
1201 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1202 return true;
1203
1204 /* Remote side had dedicated bonding as requirement */
1205 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1206 return true;
1207
1208 /* If none of the above criteria match, then don't store the key
1209 * persistently */
1210 return false;
1211 }
1212
1213 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1214 {
1215 struct smp_ltk *k;
1216
1217 list_for_each_entry(k, &hdev->long_term_keys, list) {
1218 if (k->ediv != ediv ||
1219 memcmp(rand, k->rand, sizeof(k->rand)))
1220 continue;
1221
1222 return k;
1223 }
1224
1225 return NULL;
1226 }
1227
1228 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1229 u8 addr_type)
1230 {
1231 struct smp_ltk *k;
1232
1233 list_for_each_entry(k, &hdev->long_term_keys, list)
1234 if (addr_type == k->bdaddr_type &&
1235 bacmp(bdaddr, &k->bdaddr) == 0)
1236 return k;
1237
1238 return NULL;
1239 }
1240
1241 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1242 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1243 {
1244 struct link_key *key, *old_key;
1245 u8 old_key_type;
1246 bool persistent;
1247
1248 old_key = hci_find_link_key(hdev, bdaddr);
1249 if (old_key) {
1250 old_key_type = old_key->type;
1251 key = old_key;
1252 } else {
1253 old_key_type = conn ? conn->key_type : 0xff;
1254 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1255 if (!key)
1256 return -ENOMEM;
1257 list_add(&key->list, &hdev->link_keys);
1258 }
1259
1260 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1261
1262 /* Some buggy controller combinations generate a changed
1263 * combination key for legacy pairing even when there's no
1264 * previous key */
1265 if (type == HCI_LK_CHANGED_COMBINATION &&
1266 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
1267 type = HCI_LK_COMBINATION;
1268 if (conn)
1269 conn->key_type = type;
1270 }
1271
1272 bacpy(&key->bdaddr, bdaddr);
1273 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
1274 key->pin_len = pin_len;
1275
1276 if (type == HCI_LK_CHANGED_COMBINATION)
1277 key->type = old_key_type;
1278 else
1279 key->type = type;
1280
1281 if (!new_key)
1282 return 0;
1283
1284 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1285
1286 mgmt_new_link_key(hdev, key, persistent);
1287
1288 if (conn)
1289 conn->flush_key = !persistent;
1290
1291 return 0;
1292 }
1293
1294 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1295 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
1296 ediv, u8 rand[8])
1297 {
1298 struct smp_ltk *key, *old_key;
1299
1300 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1301 return 0;
1302
1303 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1304 if (old_key)
1305 key = old_key;
1306 else {
1307 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1308 if (!key)
1309 return -ENOMEM;
1310 list_add(&key->list, &hdev->long_term_keys);
1311 }
1312
1313 bacpy(&key->bdaddr, bdaddr);
1314 key->bdaddr_type = addr_type;
1315 memcpy(key->val, tk, sizeof(key->val));
1316 key->authenticated = authenticated;
1317 key->ediv = ediv;
1318 key->enc_size = enc_size;
1319 key->type = type;
1320 memcpy(key->rand, rand, sizeof(key->rand));
1321
1322 if (!new_key)
1323 return 0;
1324
1325 if (type & HCI_SMP_LTK)
1326 mgmt_new_ltk(hdev, key, 1);
1327
1328 return 0;
1329 }
1330
1331 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1332 {
1333 struct link_key *key;
1334
1335 key = hci_find_link_key(hdev, bdaddr);
1336 if (!key)
1337 return -ENOENT;
1338
1339 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1340
1341 list_del(&key->list);
1342 kfree(key);
1343
1344 return 0;
1345 }
1346
1347 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1348 {
1349 struct smp_ltk *k, *tmp;
1350
1351 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1352 if (bacmp(bdaddr, &k->bdaddr))
1353 continue;
1354
1355 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1356
1357 list_del(&k->list);
1358 kfree(k);
1359 }
1360
1361 return 0;
1362 }
1363
1364 /* HCI command timer function */
1365 static void hci_cmd_timeout(unsigned long arg)
1366 {
1367 struct hci_dev *hdev = (void *) arg;
1368
1369 if (hdev->sent_cmd) {
1370 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1371 u16 opcode = __le16_to_cpu(sent->opcode);
1372
1373 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1374 } else {
1375 BT_ERR("%s command tx timeout", hdev->name);
1376 }
1377
1378 atomic_set(&hdev->cmd_cnt, 1);
1379 queue_work(hdev->workqueue, &hdev->cmd_work);
1380 }
1381
1382 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1383 bdaddr_t *bdaddr)
1384 {
1385 struct oob_data *data;
1386
1387 list_for_each_entry(data, &hdev->remote_oob_data, list)
1388 if (bacmp(bdaddr, &data->bdaddr) == 0)
1389 return data;
1390
1391 return NULL;
1392 }
1393
1394 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1395 {
1396 struct oob_data *data;
1397
1398 data = hci_find_remote_oob_data(hdev, bdaddr);
1399 if (!data)
1400 return -ENOENT;
1401
1402 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1403
1404 list_del(&data->list);
1405 kfree(data);
1406
1407 return 0;
1408 }
1409
1410 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1411 {
1412 struct oob_data *data, *n;
1413
1414 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1415 list_del(&data->list);
1416 kfree(data);
1417 }
1418
1419 return 0;
1420 }
1421
1422 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1423 u8 *randomizer)
1424 {
1425 struct oob_data *data;
1426
1427 data = hci_find_remote_oob_data(hdev, bdaddr);
1428
1429 if (!data) {
1430 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1431 if (!data)
1432 return -ENOMEM;
1433
1434 bacpy(&data->bdaddr, bdaddr);
1435 list_add(&data->list, &hdev->remote_oob_data);
1436 }
1437
1438 memcpy(data->hash, hash, sizeof(data->hash));
1439 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1440
1441 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1442
1443 return 0;
1444 }
1445
1446 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1447 {
1448 struct bdaddr_list *b;
1449
1450 list_for_each_entry(b, &hdev->blacklist, list)
1451 if (bacmp(bdaddr, &b->bdaddr) == 0)
1452 return b;
1453
1454 return NULL;
1455 }
1456
1457 int hci_blacklist_clear(struct hci_dev *hdev)
1458 {
1459 struct list_head *p, *n;
1460
1461 list_for_each_safe(p, n, &hdev->blacklist) {
1462 struct bdaddr_list *b;
1463
1464 b = list_entry(p, struct bdaddr_list, list);
1465
1466 list_del(p);
1467 kfree(b);
1468 }
1469
1470 return 0;
1471 }
1472
1473 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1474 {
1475 struct bdaddr_list *entry;
1476
1477 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1478 return -EBADF;
1479
1480 if (hci_blacklist_lookup(hdev, bdaddr))
1481 return -EEXIST;
1482
1483 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1484 if (!entry)
1485 return -ENOMEM;
1486
1487 bacpy(&entry->bdaddr, bdaddr);
1488
1489 list_add(&entry->list, &hdev->blacklist);
1490
1491 return mgmt_device_blocked(hdev, bdaddr, type);
1492 }
1493
1494 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1495 {
1496 struct bdaddr_list *entry;
1497
1498 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1499 return hci_blacklist_clear(hdev);
1500
1501 entry = hci_blacklist_lookup(hdev, bdaddr);
1502 if (!entry)
1503 return -ENOENT;
1504
1505 list_del(&entry->list);
1506 kfree(entry);
1507
1508 return mgmt_device_unblocked(hdev, bdaddr, type);
1509 }
1510
1511 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1512 {
1513 struct le_scan_params *param = (struct le_scan_params *) opt;
1514 struct hci_cp_le_set_scan_param cp;
1515
1516 memset(&cp, 0, sizeof(cp));
1517 cp.type = param->type;
1518 cp.interval = cpu_to_le16(param->interval);
1519 cp.window = cpu_to_le16(param->window);
1520
1521 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1522 }
1523
1524 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1525 {
1526 struct hci_cp_le_set_scan_enable cp;
1527
1528 memset(&cp, 0, sizeof(cp));
1529 cp.enable = 1;
1530 cp.filter_dup = 1;
1531
1532 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1533 }
1534
1535 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1536 u16 window, int timeout)
1537 {
1538 long timeo = msecs_to_jiffies(3000);
1539 struct le_scan_params param;
1540 int err;
1541
1542 BT_DBG("%s", hdev->name);
1543
1544 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1545 return -EINPROGRESS;
1546
1547 param.type = type;
1548 param.interval = interval;
1549 param.window = window;
1550
1551 hci_req_lock(hdev);
1552
1553 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1554 timeo);
1555 if (!err)
1556 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1557
1558 hci_req_unlock(hdev);
1559
1560 if (err < 0)
1561 return err;
1562
1563 schedule_delayed_work(&hdev->le_scan_disable,
1564 msecs_to_jiffies(timeout));
1565
1566 return 0;
1567 }
1568
1569 int hci_cancel_le_scan(struct hci_dev *hdev)
1570 {
1571 BT_DBG("%s", hdev->name);
1572
1573 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1574 return -EALREADY;
1575
1576 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1577 struct hci_cp_le_set_scan_enable cp;
1578
1579 /* Send HCI command to disable LE Scan */
1580 memset(&cp, 0, sizeof(cp));
1581 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1582 }
1583
1584 return 0;
1585 }
1586
1587 static void le_scan_disable_work(struct work_struct *work)
1588 {
1589 struct hci_dev *hdev = container_of(work, struct hci_dev,
1590 le_scan_disable.work);
1591 struct hci_cp_le_set_scan_enable cp;
1592
1593 BT_DBG("%s", hdev->name);
1594
1595 memset(&cp, 0, sizeof(cp));
1596
1597 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1598 }
1599
1600 static void le_scan_work(struct work_struct *work)
1601 {
1602 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1603 struct le_scan_params *param = &hdev->le_scan_params;
1604
1605 BT_DBG("%s", hdev->name);
1606
1607 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1608 param->timeout);
1609 }
1610
1611 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1612 int timeout)
1613 {
1614 struct le_scan_params *param = &hdev->le_scan_params;
1615
1616 BT_DBG("%s", hdev->name);
1617
1618 if (work_busy(&hdev->le_scan))
1619 return -EINPROGRESS;
1620
1621 param->type = type;
1622 param->interval = interval;
1623 param->window = window;
1624 param->timeout = timeout;
1625
1626 queue_work(system_long_wq, &hdev->le_scan);
1627
1628 return 0;
1629 }
1630
1631 /* Alloc HCI device */
1632 struct hci_dev *hci_alloc_dev(void)
1633 {
1634 struct hci_dev *hdev;
1635
1636 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1637 if (!hdev)
1638 return NULL;
1639
1640 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1641 hdev->esco_type = (ESCO_HV1);
1642 hdev->link_mode = (HCI_LM_ACCEPT);
1643 hdev->io_capability = 0x03; /* No Input No Output */
1644
1645 hdev->sniff_max_interval = 800;
1646 hdev->sniff_min_interval = 80;
1647
1648 mutex_init(&hdev->lock);
1649 mutex_init(&hdev->req_lock);
1650
1651 INIT_LIST_HEAD(&hdev->mgmt_pending);
1652 INIT_LIST_HEAD(&hdev->blacklist);
1653 INIT_LIST_HEAD(&hdev->uuids);
1654 INIT_LIST_HEAD(&hdev->link_keys);
1655 INIT_LIST_HEAD(&hdev->long_term_keys);
1656 INIT_LIST_HEAD(&hdev->remote_oob_data);
1657
1658 INIT_WORK(&hdev->rx_work, hci_rx_work);
1659 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1660 INIT_WORK(&hdev->tx_work, hci_tx_work);
1661 INIT_WORK(&hdev->power_on, hci_power_on);
1662 INIT_WORK(&hdev->le_scan, le_scan_work);
1663
1664 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1665 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1666 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1667
1668 skb_queue_head_init(&hdev->driver_init);
1669 skb_queue_head_init(&hdev->rx_q);
1670 skb_queue_head_init(&hdev->cmd_q);
1671 skb_queue_head_init(&hdev->raw_q);
1672
1673 init_waitqueue_head(&hdev->req_wait_q);
1674
1675 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
1676
1677 hci_init_sysfs(hdev);
1678 discovery_init(hdev);
1679 hci_conn_hash_init(hdev);
1680
1681 return hdev;
1682 }
1683 EXPORT_SYMBOL(hci_alloc_dev);
1684
1685 /* Free HCI device */
1686 void hci_free_dev(struct hci_dev *hdev)
1687 {
1688 skb_queue_purge(&hdev->driver_init);
1689
1690 /* will free via device release */
1691 put_device(&hdev->dev);
1692 }
1693 EXPORT_SYMBOL(hci_free_dev);
1694
1695 /* Register HCI device */
1696 int hci_register_dev(struct hci_dev *hdev)
1697 {
1698 int id, error;
1699
1700 if (!hdev->open || !hdev->close)
1701 return -EINVAL;
1702
1703 /* Do not allow HCI_AMP devices to register at index 0,
1704 * so the index can be used as the AMP controller ID.
1705 */
1706 switch (hdev->dev_type) {
1707 case HCI_BREDR:
1708 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1709 break;
1710 case HCI_AMP:
1711 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1712 break;
1713 default:
1714 return -EINVAL;
1715 }
1716
1717 if (id < 0)
1718 return id;
1719
1720 sprintf(hdev->name, "hci%d", id);
1721 hdev->id = id;
1722
1723 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1724
1725 write_lock(&hci_dev_list_lock);
1726 list_add(&hdev->list, &hci_dev_list);
1727 write_unlock(&hci_dev_list_lock);
1728
1729 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1730 WQ_MEM_RECLAIM, 1);
1731 if (!hdev->workqueue) {
1732 error = -ENOMEM;
1733 goto err;
1734 }
1735
1736 error = hci_add_sysfs(hdev);
1737 if (error < 0)
1738 goto err_wqueue;
1739
1740 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1741 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1742 hdev);
1743 if (hdev->rfkill) {
1744 if (rfkill_register(hdev->rfkill) < 0) {
1745 rfkill_destroy(hdev->rfkill);
1746 hdev->rfkill = NULL;
1747 }
1748 }
1749
1750 set_bit(HCI_SETUP, &hdev->dev_flags);
1751
1752 if (hdev->dev_type != HCI_AMP)
1753 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1754
1755 schedule_work(&hdev->power_on);
1756
1757 hci_notify(hdev, HCI_DEV_REG);
1758 hci_dev_hold(hdev);
1759
1760 return id;
1761
1762 err_wqueue:
1763 destroy_workqueue(hdev->workqueue);
1764 err:
1765 ida_simple_remove(&hci_index_ida, hdev->id);
1766 write_lock(&hci_dev_list_lock);
1767 list_del(&hdev->list);
1768 write_unlock(&hci_dev_list_lock);
1769
1770 return error;
1771 }
1772 EXPORT_SYMBOL(hci_register_dev);
1773
1774 /* Unregister HCI device */
1775 void hci_unregister_dev(struct hci_dev *hdev)
1776 {
1777 int i, id;
1778
1779 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1780
1781 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1782
1783 id = hdev->id;
1784
1785 write_lock(&hci_dev_list_lock);
1786 list_del(&hdev->list);
1787 write_unlock(&hci_dev_list_lock);
1788
1789 hci_dev_do_close(hdev);
1790
1791 for (i = 0; i < NUM_REASSEMBLY; i++)
1792 kfree_skb(hdev->reassembly[i]);
1793
1794 if (!test_bit(HCI_INIT, &hdev->flags) &&
1795 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1796 hci_dev_lock(hdev);
1797 mgmt_index_removed(hdev);
1798 hci_dev_unlock(hdev);
1799 }
1800
1801 /* mgmt_index_removed should take care of emptying the
1802 * pending list */
1803 BUG_ON(!list_empty(&hdev->mgmt_pending));
1804
1805 hci_notify(hdev, HCI_DEV_UNREG);
1806
1807 if (hdev->rfkill) {
1808 rfkill_unregister(hdev->rfkill);
1809 rfkill_destroy(hdev->rfkill);
1810 }
1811
1812 hci_del_sysfs(hdev);
1813
1814 destroy_workqueue(hdev->workqueue);
1815
1816 hci_dev_lock(hdev);
1817 hci_blacklist_clear(hdev);
1818 hci_uuids_clear(hdev);
1819 hci_link_keys_clear(hdev);
1820 hci_smp_ltks_clear(hdev);
1821 hci_remote_oob_data_clear(hdev);
1822 hci_dev_unlock(hdev);
1823
1824 hci_dev_put(hdev);
1825
1826 ida_simple_remove(&hci_index_ida, id);
1827 }
1828 EXPORT_SYMBOL(hci_unregister_dev);
1829
1830 /* Suspend HCI device */
1831 int hci_suspend_dev(struct hci_dev *hdev)
1832 {
1833 hci_notify(hdev, HCI_DEV_SUSPEND);
1834 return 0;
1835 }
1836 EXPORT_SYMBOL(hci_suspend_dev);
1837
1838 /* Resume HCI device */
1839 int hci_resume_dev(struct hci_dev *hdev)
1840 {
1841 hci_notify(hdev, HCI_DEV_RESUME);
1842 return 0;
1843 }
1844 EXPORT_SYMBOL(hci_resume_dev);
1845
1846 /* Receive frame from HCI drivers */
1847 int hci_recv_frame(struct sk_buff *skb)
1848 {
1849 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1850 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1851 && !test_bit(HCI_INIT, &hdev->flags))) {
1852 kfree_skb(skb);
1853 return -ENXIO;
1854 }
1855
1856 /* Incomming skb */
1857 bt_cb(skb)->incoming = 1;
1858
1859 /* Time stamp */
1860 __net_timestamp(skb);
1861
1862 skb_queue_tail(&hdev->rx_q, skb);
1863 queue_work(hdev->workqueue, &hdev->rx_work);
1864
1865 return 0;
1866 }
1867 EXPORT_SYMBOL(hci_recv_frame);
1868
1869 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1870 int count, __u8 index)
1871 {
1872 int len = 0;
1873 int hlen = 0;
1874 int remain = count;
1875 struct sk_buff *skb;
1876 struct bt_skb_cb *scb;
1877
1878 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1879 index >= NUM_REASSEMBLY)
1880 return -EILSEQ;
1881
1882 skb = hdev->reassembly[index];
1883
1884 if (!skb) {
1885 switch (type) {
1886 case HCI_ACLDATA_PKT:
1887 len = HCI_MAX_FRAME_SIZE;
1888 hlen = HCI_ACL_HDR_SIZE;
1889 break;
1890 case HCI_EVENT_PKT:
1891 len = HCI_MAX_EVENT_SIZE;
1892 hlen = HCI_EVENT_HDR_SIZE;
1893 break;
1894 case HCI_SCODATA_PKT:
1895 len = HCI_MAX_SCO_SIZE;
1896 hlen = HCI_SCO_HDR_SIZE;
1897 break;
1898 }
1899
1900 skb = bt_skb_alloc(len, GFP_ATOMIC);
1901 if (!skb)
1902 return -ENOMEM;
1903
1904 scb = (void *) skb->cb;
1905 scb->expect = hlen;
1906 scb->pkt_type = type;
1907
1908 skb->dev = (void *) hdev;
1909 hdev->reassembly[index] = skb;
1910 }
1911
1912 while (count) {
1913 scb = (void *) skb->cb;
1914 len = min_t(uint, scb->expect, count);
1915
1916 memcpy(skb_put(skb, len), data, len);
1917
1918 count -= len;
1919 data += len;
1920 scb->expect -= len;
1921 remain = count;
1922
1923 switch (type) {
1924 case HCI_EVENT_PKT:
1925 if (skb->len == HCI_EVENT_HDR_SIZE) {
1926 struct hci_event_hdr *h = hci_event_hdr(skb);
1927 scb->expect = h->plen;
1928
1929 if (skb_tailroom(skb) < scb->expect) {
1930 kfree_skb(skb);
1931 hdev->reassembly[index] = NULL;
1932 return -ENOMEM;
1933 }
1934 }
1935 break;
1936
1937 case HCI_ACLDATA_PKT:
1938 if (skb->len == HCI_ACL_HDR_SIZE) {
1939 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1940 scb->expect = __le16_to_cpu(h->dlen);
1941
1942 if (skb_tailroom(skb) < scb->expect) {
1943 kfree_skb(skb);
1944 hdev->reassembly[index] = NULL;
1945 return -ENOMEM;
1946 }
1947 }
1948 break;
1949
1950 case HCI_SCODATA_PKT:
1951 if (skb->len == HCI_SCO_HDR_SIZE) {
1952 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1953 scb->expect = h->dlen;
1954
1955 if (skb_tailroom(skb) < scb->expect) {
1956 kfree_skb(skb);
1957 hdev->reassembly[index] = NULL;
1958 return -ENOMEM;
1959 }
1960 }
1961 break;
1962 }
1963
1964 if (scb->expect == 0) {
1965 /* Complete frame */
1966
1967 bt_cb(skb)->pkt_type = type;
1968 hci_recv_frame(skb);
1969
1970 hdev->reassembly[index] = NULL;
1971 return remain;
1972 }
1973 }
1974
1975 return remain;
1976 }
1977
1978 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1979 {
1980 int rem = 0;
1981
1982 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1983 return -EILSEQ;
1984
1985 while (count) {
1986 rem = hci_reassembly(hdev, type, data, count, type - 1);
1987 if (rem < 0)
1988 return rem;
1989
1990 data += (count - rem);
1991 count = rem;
1992 }
1993
1994 return rem;
1995 }
1996 EXPORT_SYMBOL(hci_recv_fragment);
1997
1998 #define STREAM_REASSEMBLY 0
1999
2000 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2001 {
2002 int type;
2003 int rem = 0;
2004
2005 while (count) {
2006 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2007
2008 if (!skb) {
2009 struct { char type; } *pkt;
2010
2011 /* Start of the frame */
2012 pkt = data;
2013 type = pkt->type;
2014
2015 data++;
2016 count--;
2017 } else
2018 type = bt_cb(skb)->pkt_type;
2019
2020 rem = hci_reassembly(hdev, type, data, count,
2021 STREAM_REASSEMBLY);
2022 if (rem < 0)
2023 return rem;
2024
2025 data += (count - rem);
2026 count = rem;
2027 }
2028
2029 return rem;
2030 }
2031 EXPORT_SYMBOL(hci_recv_stream_fragment);
2032
2033 /* ---- Interface to upper protocols ---- */
2034
2035 int hci_register_cb(struct hci_cb *cb)
2036 {
2037 BT_DBG("%p name %s", cb, cb->name);
2038
2039 write_lock(&hci_cb_list_lock);
2040 list_add(&cb->list, &hci_cb_list);
2041 write_unlock(&hci_cb_list_lock);
2042
2043 return 0;
2044 }
2045 EXPORT_SYMBOL(hci_register_cb);
2046
2047 int hci_unregister_cb(struct hci_cb *cb)
2048 {
2049 BT_DBG("%p name %s", cb, cb->name);
2050
2051 write_lock(&hci_cb_list_lock);
2052 list_del(&cb->list);
2053 write_unlock(&hci_cb_list_lock);
2054
2055 return 0;
2056 }
2057 EXPORT_SYMBOL(hci_unregister_cb);
2058
2059 static int hci_send_frame(struct sk_buff *skb)
2060 {
2061 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2062
2063 if (!hdev) {
2064 kfree_skb(skb);
2065 return -ENODEV;
2066 }
2067
2068 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2069
2070 /* Time stamp */
2071 __net_timestamp(skb);
2072
2073 /* Send copy to monitor */
2074 hci_send_to_monitor(hdev, skb);
2075
2076 if (atomic_read(&hdev->promisc)) {
2077 /* Send copy to the sockets */
2078 hci_send_to_sock(hdev, skb);
2079 }
2080
2081 /* Get rid of skb owner, prior to sending to the driver. */
2082 skb_orphan(skb);
2083
2084 return hdev->send(skb);
2085 }
2086
2087 /* Send HCI command */
2088 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2089 {
2090 int len = HCI_COMMAND_HDR_SIZE + plen;
2091 struct hci_command_hdr *hdr;
2092 struct sk_buff *skb;
2093
2094 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2095
2096 skb = bt_skb_alloc(len, GFP_ATOMIC);
2097 if (!skb) {
2098 BT_ERR("%s no memory for command", hdev->name);
2099 return -ENOMEM;
2100 }
2101
2102 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2103 hdr->opcode = cpu_to_le16(opcode);
2104 hdr->plen = plen;
2105
2106 if (plen)
2107 memcpy(skb_put(skb, plen), param, plen);
2108
2109 BT_DBG("skb len %d", skb->len);
2110
2111 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2112 skb->dev = (void *) hdev;
2113
2114 if (test_bit(HCI_INIT, &hdev->flags))
2115 hdev->init_last_cmd = opcode;
2116
2117 skb_queue_tail(&hdev->cmd_q, skb);
2118 queue_work(hdev->workqueue, &hdev->cmd_work);
2119
2120 return 0;
2121 }
2122
2123 /* Get data from the previously sent command */
2124 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2125 {
2126 struct hci_command_hdr *hdr;
2127
2128 if (!hdev->sent_cmd)
2129 return NULL;
2130
2131 hdr = (void *) hdev->sent_cmd->data;
2132
2133 if (hdr->opcode != cpu_to_le16(opcode))
2134 return NULL;
2135
2136 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2137
2138 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2139 }
2140
2141 /* Send ACL data */
2142 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2143 {
2144 struct hci_acl_hdr *hdr;
2145 int len = skb->len;
2146
2147 skb_push(skb, HCI_ACL_HDR_SIZE);
2148 skb_reset_transport_header(skb);
2149 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2150 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2151 hdr->dlen = cpu_to_le16(len);
2152 }
2153
2154 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2155 struct sk_buff *skb, __u16 flags)
2156 {
2157 struct hci_dev *hdev = conn->hdev;
2158 struct sk_buff *list;
2159
2160 skb->len = skb_headlen(skb);
2161 skb->data_len = 0;
2162
2163 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2164 hci_add_acl_hdr(skb, conn->handle, flags);
2165
2166 list = skb_shinfo(skb)->frag_list;
2167 if (!list) {
2168 /* Non fragmented */
2169 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2170
2171 skb_queue_tail(queue, skb);
2172 } else {
2173 /* Fragmented */
2174 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2175
2176 skb_shinfo(skb)->frag_list = NULL;
2177
2178 /* Queue all fragments atomically */
2179 spin_lock(&queue->lock);
2180
2181 __skb_queue_tail(queue, skb);
2182
2183 flags &= ~ACL_START;
2184 flags |= ACL_CONT;
2185 do {
2186 skb = list; list = list->next;
2187
2188 skb->dev = (void *) hdev;
2189 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2190 hci_add_acl_hdr(skb, conn->handle, flags);
2191
2192 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2193
2194 __skb_queue_tail(queue, skb);
2195 } while (list);
2196
2197 spin_unlock(&queue->lock);
2198 }
2199 }
2200
2201 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2202 {
2203 struct hci_conn *conn = chan->conn;
2204 struct hci_dev *hdev = conn->hdev;
2205
2206 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
2207
2208 skb->dev = (void *) hdev;
2209
2210 hci_queue_acl(conn, &chan->data_q, skb, flags);
2211
2212 queue_work(hdev->workqueue, &hdev->tx_work);
2213 }
2214
2215 /* Send SCO data */
2216 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2217 {
2218 struct hci_dev *hdev = conn->hdev;
2219 struct hci_sco_hdr hdr;
2220
2221 BT_DBG("%s len %d", hdev->name, skb->len);
2222
2223 hdr.handle = cpu_to_le16(conn->handle);
2224 hdr.dlen = skb->len;
2225
2226 skb_push(skb, HCI_SCO_HDR_SIZE);
2227 skb_reset_transport_header(skb);
2228 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2229
2230 skb->dev = (void *) hdev;
2231 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2232
2233 skb_queue_tail(&conn->data_q, skb);
2234 queue_work(hdev->workqueue, &hdev->tx_work);
2235 }
2236
2237 /* ---- HCI TX task (outgoing data) ---- */
2238
2239 /* HCI Connection scheduler */
2240 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2241 int *quote)
2242 {
2243 struct hci_conn_hash *h = &hdev->conn_hash;
2244 struct hci_conn *conn = NULL, *c;
2245 unsigned int num = 0, min = ~0;
2246
2247 /* We don't have to lock device here. Connections are always
2248 * added and removed with TX task disabled. */
2249
2250 rcu_read_lock();
2251
2252 list_for_each_entry_rcu(c, &h->list, list) {
2253 if (c->type != type || skb_queue_empty(&c->data_q))
2254 continue;
2255
2256 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2257 continue;
2258
2259 num++;
2260
2261 if (c->sent < min) {
2262 min = c->sent;
2263 conn = c;
2264 }
2265
2266 if (hci_conn_num(hdev, type) == num)
2267 break;
2268 }
2269
2270 rcu_read_unlock();
2271
2272 if (conn) {
2273 int cnt, q;
2274
2275 switch (conn->type) {
2276 case ACL_LINK:
2277 cnt = hdev->acl_cnt;
2278 break;
2279 case SCO_LINK:
2280 case ESCO_LINK:
2281 cnt = hdev->sco_cnt;
2282 break;
2283 case LE_LINK:
2284 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2285 break;
2286 default:
2287 cnt = 0;
2288 BT_ERR("Unknown link type");
2289 }
2290
2291 q = cnt / num;
2292 *quote = q ? q : 1;
2293 } else
2294 *quote = 0;
2295
2296 BT_DBG("conn %p quote %d", conn, *quote);
2297 return conn;
2298 }
2299
2300 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2301 {
2302 struct hci_conn_hash *h = &hdev->conn_hash;
2303 struct hci_conn *c;
2304
2305 BT_ERR("%s link tx timeout", hdev->name);
2306
2307 rcu_read_lock();
2308
2309 /* Kill stalled connections */
2310 list_for_each_entry_rcu(c, &h->list, list) {
2311 if (c->type == type && c->sent) {
2312 BT_ERR("%s killing stalled connection %s",
2313 hdev->name, batostr(&c->dst));
2314 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
2315 }
2316 }
2317
2318 rcu_read_unlock();
2319 }
2320
2321 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2322 int *quote)
2323 {
2324 struct hci_conn_hash *h = &hdev->conn_hash;
2325 struct hci_chan *chan = NULL;
2326 unsigned int num = 0, min = ~0, cur_prio = 0;
2327 struct hci_conn *conn;
2328 int cnt, q, conn_num = 0;
2329
2330 BT_DBG("%s", hdev->name);
2331
2332 rcu_read_lock();
2333
2334 list_for_each_entry_rcu(conn, &h->list, list) {
2335 struct hci_chan *tmp;
2336
2337 if (conn->type != type)
2338 continue;
2339
2340 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2341 continue;
2342
2343 conn_num++;
2344
2345 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2346 struct sk_buff *skb;
2347
2348 if (skb_queue_empty(&tmp->data_q))
2349 continue;
2350
2351 skb = skb_peek(&tmp->data_q);
2352 if (skb->priority < cur_prio)
2353 continue;
2354
2355 if (skb->priority > cur_prio) {
2356 num = 0;
2357 min = ~0;
2358 cur_prio = skb->priority;
2359 }
2360
2361 num++;
2362
2363 if (conn->sent < min) {
2364 min = conn->sent;
2365 chan = tmp;
2366 }
2367 }
2368
2369 if (hci_conn_num(hdev, type) == conn_num)
2370 break;
2371 }
2372
2373 rcu_read_unlock();
2374
2375 if (!chan)
2376 return NULL;
2377
2378 switch (chan->conn->type) {
2379 case ACL_LINK:
2380 cnt = hdev->acl_cnt;
2381 break;
2382 case SCO_LINK:
2383 case ESCO_LINK:
2384 cnt = hdev->sco_cnt;
2385 break;
2386 case LE_LINK:
2387 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2388 break;
2389 default:
2390 cnt = 0;
2391 BT_ERR("Unknown link type");
2392 }
2393
2394 q = cnt / num;
2395 *quote = q ? q : 1;
2396 BT_DBG("chan %p quote %d", chan, *quote);
2397 return chan;
2398 }
2399
2400 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2401 {
2402 struct hci_conn_hash *h = &hdev->conn_hash;
2403 struct hci_conn *conn;
2404 int num = 0;
2405
2406 BT_DBG("%s", hdev->name);
2407
2408 rcu_read_lock();
2409
2410 list_for_each_entry_rcu(conn, &h->list, list) {
2411 struct hci_chan *chan;
2412
2413 if (conn->type != type)
2414 continue;
2415
2416 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2417 continue;
2418
2419 num++;
2420
2421 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2422 struct sk_buff *skb;
2423
2424 if (chan->sent) {
2425 chan->sent = 0;
2426 continue;
2427 }
2428
2429 if (skb_queue_empty(&chan->data_q))
2430 continue;
2431
2432 skb = skb_peek(&chan->data_q);
2433 if (skb->priority >= HCI_PRIO_MAX - 1)
2434 continue;
2435
2436 skb->priority = HCI_PRIO_MAX - 1;
2437
2438 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2439 skb->priority);
2440 }
2441
2442 if (hci_conn_num(hdev, type) == num)
2443 break;
2444 }
2445
2446 rcu_read_unlock();
2447
2448 }
2449
2450 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2451 {
2452 /* Calculate count of blocks used by this packet */
2453 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2454 }
2455
2456 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2457 {
2458 if (!test_bit(HCI_RAW, &hdev->flags)) {
2459 /* ACL tx timeout must be longer than maximum
2460 * link supervision timeout (40.9 seconds) */
2461 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2462 HCI_ACL_TX_TIMEOUT))
2463 hci_link_tx_to(hdev, ACL_LINK);
2464 }
2465 }
2466
2467 static void hci_sched_acl_pkt(struct hci_dev *hdev)
2468 {
2469 unsigned int cnt = hdev->acl_cnt;
2470 struct hci_chan *chan;
2471 struct sk_buff *skb;
2472 int quote;
2473
2474 __check_timeout(hdev, cnt);
2475
2476 while (hdev->acl_cnt &&
2477 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2478 u32 priority = (skb_peek(&chan->data_q))->priority;
2479 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2480 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2481 skb->len, skb->priority);
2482
2483 /* Stop if priority has changed */
2484 if (skb->priority < priority)
2485 break;
2486
2487 skb = skb_dequeue(&chan->data_q);
2488
2489 hci_conn_enter_active_mode(chan->conn,
2490 bt_cb(skb)->force_active);
2491
2492 hci_send_frame(skb);
2493 hdev->acl_last_tx = jiffies;
2494
2495 hdev->acl_cnt--;
2496 chan->sent++;
2497 chan->conn->sent++;
2498 }
2499 }
2500
2501 if (cnt != hdev->acl_cnt)
2502 hci_prio_recalculate(hdev, ACL_LINK);
2503 }
2504
2505 static void hci_sched_acl_blk(struct hci_dev *hdev)
2506 {
2507 unsigned int cnt = hdev->block_cnt;
2508 struct hci_chan *chan;
2509 struct sk_buff *skb;
2510 int quote;
2511
2512 __check_timeout(hdev, cnt);
2513
2514 while (hdev->block_cnt > 0 &&
2515 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2516 u32 priority = (skb_peek(&chan->data_q))->priority;
2517 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2518 int blocks;
2519
2520 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2521 skb->len, skb->priority);
2522
2523 /* Stop if priority has changed */
2524 if (skb->priority < priority)
2525 break;
2526
2527 skb = skb_dequeue(&chan->data_q);
2528
2529 blocks = __get_blocks(hdev, skb);
2530 if (blocks > hdev->block_cnt)
2531 return;
2532
2533 hci_conn_enter_active_mode(chan->conn,
2534 bt_cb(skb)->force_active);
2535
2536 hci_send_frame(skb);
2537 hdev->acl_last_tx = jiffies;
2538
2539 hdev->block_cnt -= blocks;
2540 quote -= blocks;
2541
2542 chan->sent += blocks;
2543 chan->conn->sent += blocks;
2544 }
2545 }
2546
2547 if (cnt != hdev->block_cnt)
2548 hci_prio_recalculate(hdev, ACL_LINK);
2549 }
2550
2551 static void hci_sched_acl(struct hci_dev *hdev)
2552 {
2553 BT_DBG("%s", hdev->name);
2554
2555 if (!hci_conn_num(hdev, ACL_LINK))
2556 return;
2557
2558 switch (hdev->flow_ctl_mode) {
2559 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2560 hci_sched_acl_pkt(hdev);
2561 break;
2562
2563 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2564 hci_sched_acl_blk(hdev);
2565 break;
2566 }
2567 }
2568
2569 /* Schedule SCO */
2570 static void hci_sched_sco(struct hci_dev *hdev)
2571 {
2572 struct hci_conn *conn;
2573 struct sk_buff *skb;
2574 int quote;
2575
2576 BT_DBG("%s", hdev->name);
2577
2578 if (!hci_conn_num(hdev, SCO_LINK))
2579 return;
2580
2581 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2582 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2583 BT_DBG("skb %p len %d", skb, skb->len);
2584 hci_send_frame(skb);
2585
2586 conn->sent++;
2587 if (conn->sent == ~0)
2588 conn->sent = 0;
2589 }
2590 }
2591 }
2592
2593 static void hci_sched_esco(struct hci_dev *hdev)
2594 {
2595 struct hci_conn *conn;
2596 struct sk_buff *skb;
2597 int quote;
2598
2599 BT_DBG("%s", hdev->name);
2600
2601 if (!hci_conn_num(hdev, ESCO_LINK))
2602 return;
2603
2604 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2605 &quote))) {
2606 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2607 BT_DBG("skb %p len %d", skb, skb->len);
2608 hci_send_frame(skb);
2609
2610 conn->sent++;
2611 if (conn->sent == ~0)
2612 conn->sent = 0;
2613 }
2614 }
2615 }
2616
2617 static void hci_sched_le(struct hci_dev *hdev)
2618 {
2619 struct hci_chan *chan;
2620 struct sk_buff *skb;
2621 int quote, cnt, tmp;
2622
2623 BT_DBG("%s", hdev->name);
2624
2625 if (!hci_conn_num(hdev, LE_LINK))
2626 return;
2627
2628 if (!test_bit(HCI_RAW, &hdev->flags)) {
2629 /* LE tx timeout must be longer than maximum
2630 * link supervision timeout (40.9 seconds) */
2631 if (!hdev->le_cnt && hdev->le_pkts &&
2632 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2633 hci_link_tx_to(hdev, LE_LINK);
2634 }
2635
2636 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2637 tmp = cnt;
2638 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2639 u32 priority = (skb_peek(&chan->data_q))->priority;
2640 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2641 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2642 skb->len, skb->priority);
2643
2644 /* Stop if priority has changed */
2645 if (skb->priority < priority)
2646 break;
2647
2648 skb = skb_dequeue(&chan->data_q);
2649
2650 hci_send_frame(skb);
2651 hdev->le_last_tx = jiffies;
2652
2653 cnt--;
2654 chan->sent++;
2655 chan->conn->sent++;
2656 }
2657 }
2658
2659 if (hdev->le_pkts)
2660 hdev->le_cnt = cnt;
2661 else
2662 hdev->acl_cnt = cnt;
2663
2664 if (cnt != tmp)
2665 hci_prio_recalculate(hdev, LE_LINK);
2666 }
2667
2668 static void hci_tx_work(struct work_struct *work)
2669 {
2670 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2671 struct sk_buff *skb;
2672
2673 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2674 hdev->sco_cnt, hdev->le_cnt);
2675
2676 /* Schedule queues and send stuff to HCI driver */
2677
2678 hci_sched_acl(hdev);
2679
2680 hci_sched_sco(hdev);
2681
2682 hci_sched_esco(hdev);
2683
2684 hci_sched_le(hdev);
2685
2686 /* Send next queued raw (unknown type) packet */
2687 while ((skb = skb_dequeue(&hdev->raw_q)))
2688 hci_send_frame(skb);
2689 }
2690
2691 /* ----- HCI RX task (incoming data processing) ----- */
2692
2693 /* ACL data packet */
2694 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2695 {
2696 struct hci_acl_hdr *hdr = (void *) skb->data;
2697 struct hci_conn *conn;
2698 __u16 handle, flags;
2699
2700 skb_pull(skb, HCI_ACL_HDR_SIZE);
2701
2702 handle = __le16_to_cpu(hdr->handle);
2703 flags = hci_flags(handle);
2704 handle = hci_handle(handle);
2705
2706 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
2707 handle, flags);
2708
2709 hdev->stat.acl_rx++;
2710
2711 hci_dev_lock(hdev);
2712 conn = hci_conn_hash_lookup_handle(hdev, handle);
2713 hci_dev_unlock(hdev);
2714
2715 if (conn) {
2716 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2717
2718 hci_dev_lock(hdev);
2719 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2720 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2721 mgmt_device_connected(hdev, &conn->dst, conn->type,
2722 conn->dst_type, 0, NULL, 0,
2723 conn->dev_class);
2724 hci_dev_unlock(hdev);
2725
2726 /* Send to upper protocol */
2727 l2cap_recv_acldata(conn, skb, flags);
2728 return;
2729 } else {
2730 BT_ERR("%s ACL packet for unknown connection handle %d",
2731 hdev->name, handle);
2732 }
2733
2734 kfree_skb(skb);
2735 }
2736
2737 /* SCO data packet */
2738 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2739 {
2740 struct hci_sco_hdr *hdr = (void *) skb->data;
2741 struct hci_conn *conn;
2742 __u16 handle;
2743
2744 skb_pull(skb, HCI_SCO_HDR_SIZE);
2745
2746 handle = __le16_to_cpu(hdr->handle);
2747
2748 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
2749
2750 hdev->stat.sco_rx++;
2751
2752 hci_dev_lock(hdev);
2753 conn = hci_conn_hash_lookup_handle(hdev, handle);
2754 hci_dev_unlock(hdev);
2755
2756 if (conn) {
2757 /* Send to upper protocol */
2758 sco_recv_scodata(conn, skb);
2759 return;
2760 } else {
2761 BT_ERR("%s SCO packet for unknown connection handle %d",
2762 hdev->name, handle);
2763 }
2764
2765 kfree_skb(skb);
2766 }
2767
2768 static void hci_rx_work(struct work_struct *work)
2769 {
2770 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2771 struct sk_buff *skb;
2772
2773 BT_DBG("%s", hdev->name);
2774
2775 while ((skb = skb_dequeue(&hdev->rx_q))) {
2776 /* Send copy to monitor */
2777 hci_send_to_monitor(hdev, skb);
2778
2779 if (atomic_read(&hdev->promisc)) {
2780 /* Send copy to the sockets */
2781 hci_send_to_sock(hdev, skb);
2782 }
2783
2784 if (test_bit(HCI_RAW, &hdev->flags)) {
2785 kfree_skb(skb);
2786 continue;
2787 }
2788
2789 if (test_bit(HCI_INIT, &hdev->flags)) {
2790 /* Don't process data packets in this states. */
2791 switch (bt_cb(skb)->pkt_type) {
2792 case HCI_ACLDATA_PKT:
2793 case HCI_SCODATA_PKT:
2794 kfree_skb(skb);
2795 continue;
2796 }
2797 }
2798
2799 /* Process frame */
2800 switch (bt_cb(skb)->pkt_type) {
2801 case HCI_EVENT_PKT:
2802 BT_DBG("%s Event packet", hdev->name);
2803 hci_event_packet(hdev, skb);
2804 break;
2805
2806 case HCI_ACLDATA_PKT:
2807 BT_DBG("%s ACL data packet", hdev->name);
2808 hci_acldata_packet(hdev, skb);
2809 break;
2810
2811 case HCI_SCODATA_PKT:
2812 BT_DBG("%s SCO data packet", hdev->name);
2813 hci_scodata_packet(hdev, skb);
2814 break;
2815
2816 default:
2817 kfree_skb(skb);
2818 break;
2819 }
2820 }
2821 }
2822
2823 static void hci_cmd_work(struct work_struct *work)
2824 {
2825 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2826 struct sk_buff *skb;
2827
2828 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2829 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
2830
2831 /* Send queued commands */
2832 if (atomic_read(&hdev->cmd_cnt)) {
2833 skb = skb_dequeue(&hdev->cmd_q);
2834 if (!skb)
2835 return;
2836
2837 kfree_skb(hdev->sent_cmd);
2838
2839 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2840 if (hdev->sent_cmd) {
2841 atomic_dec(&hdev->cmd_cnt);
2842 hci_send_frame(skb);
2843 if (test_bit(HCI_RESET, &hdev->flags))
2844 del_timer(&hdev->cmd_timer);
2845 else
2846 mod_timer(&hdev->cmd_timer,
2847 jiffies + HCI_CMD_TIMEOUT);
2848 } else {
2849 skb_queue_head(&hdev->cmd_q, skb);
2850 queue_work(hdev->workqueue, &hdev->cmd_work);
2851 }
2852 }
2853 }
2854
2855 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2856 {
2857 /* General inquiry access code (GIAC) */
2858 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2859 struct hci_cp_inquiry cp;
2860
2861 BT_DBG("%s", hdev->name);
2862
2863 if (test_bit(HCI_INQUIRY, &hdev->flags))
2864 return -EINPROGRESS;
2865
2866 inquiry_cache_flush(hdev);
2867
2868 memset(&cp, 0, sizeof(cp));
2869 memcpy(&cp.lap, lap, sizeof(cp.lap));
2870 cp.length = length;
2871
2872 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2873 }
2874
2875 int hci_cancel_inquiry(struct hci_dev *hdev)
2876 {
2877 BT_DBG("%s", hdev->name);
2878
2879 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2880 return -EALREADY;
2881
2882 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2883 }
2884
2885 u8 bdaddr_to_le(u8 bdaddr_type)
2886 {
2887 switch (bdaddr_type) {
2888 case BDADDR_LE_PUBLIC:
2889 return ADDR_LE_DEV_PUBLIC;
2890
2891 default:
2892 /* Fallback to LE Random address type */
2893 return ADDR_LE_DEV_RANDOM;
2894 }
2895 }
This page took 0.125948 seconds and 5 git commands to generate.