Bluetooth: Use struct delayed_work for HCI command timeout
[deliverable/linux.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38
39 #include "smp.h"
40
41 static void hci_rx_work(struct work_struct *work);
42 static void hci_cmd_work(struct work_struct *work);
43 static void hci_tx_work(struct work_struct *work);
44
45 /* HCI device list */
46 LIST_HEAD(hci_dev_list);
47 DEFINE_RWLOCK(hci_dev_list_lock);
48
49 /* HCI callback list */
50 LIST_HEAD(hci_cb_list);
51 DEFINE_RWLOCK(hci_cb_list_lock);
52
53 /* HCI ID Numbering */
54 static DEFINE_IDA(hci_index_ida);
55
56 /* ---- HCI notifications ---- */
57
58 static void hci_notify(struct hci_dev *hdev, int event)
59 {
60 hci_sock_dev_event(hdev, event);
61 }
62
63 /* ---- HCI debugfs entries ---- */
64
65 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67 {
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75 }
76
77 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79 {
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
98 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
118 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
119
120 return count;
121 }
122
123 static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128 };
129
130 static int features_show(struct seq_file *f, void *ptr)
131 {
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
151 hci_dev_unlock(hdev);
152
153 return 0;
154 }
155
156 static int features_open(struct inode *inode, struct file *file)
157 {
158 return single_open(file, features_show, inode->i_private);
159 }
160
161 static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166 };
167
168 static int blacklist_show(struct seq_file *f, void *p)
169 {
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
176 hci_dev_unlock(hdev);
177
178 return 0;
179 }
180
181 static int blacklist_open(struct inode *inode, struct file *file)
182 {
183 return single_open(file, blacklist_show, inode->i_private);
184 }
185
186 static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191 };
192
193 static int uuids_show(struct seq_file *f, void *p)
194 {
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
200 u8 i, val[16];
201
202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
208
209 seq_printf(f, "%pUb\n", val);
210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214 }
215
216 static int uuids_open(struct inode *inode, struct file *file)
217 {
218 return single_open(file, uuids_show, inode->i_private);
219 }
220
221 static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226 };
227
228 static int inquiry_cache_show(struct seq_file *f, void *p)
229 {
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250 }
251
252 static int inquiry_cache_open(struct inode *inode, struct file *file)
253 {
254 return single_open(file, inquiry_cache_show, inode->i_private);
255 }
256
257 static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262 };
263
264 static int link_keys_show(struct seq_file *f, void *ptr)
265 {
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278 }
279
280 static int link_keys_open(struct inode *inode, struct file *file)
281 {
282 return single_open(file, link_keys_show, inode->i_private);
283 }
284
285 static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290 };
291
292 static int dev_class_show(struct seq_file *f, void *ptr)
293 {
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302 }
303
304 static int dev_class_open(struct inode *inode, struct file *file)
305 {
306 return single_open(file, dev_class_show, inode->i_private);
307 }
308
309 static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314 };
315
316 static int voice_setting_get(void *data, u64 *val)
317 {
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325 }
326
327 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
330 static int auto_accept_delay_set(void *data, u64 val)
331 {
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339 }
340
341 static int auto_accept_delay_get(void *data, u64 *val)
342 {
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350 }
351
352 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
355 static int ssp_debug_mode_set(void *data, u64 val)
356 {
357 struct hci_dev *hdev = data;
358 struct sk_buff *skb;
359 __u8 mode;
360 int err;
361
362 if (val != 0 && val != 1)
363 return -EINVAL;
364
365 if (!test_bit(HCI_UP, &hdev->flags))
366 return -ENETDOWN;
367
368 hci_req_lock(hdev);
369 mode = val;
370 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371 &mode, HCI_CMD_TIMEOUT);
372 hci_req_unlock(hdev);
373
374 if (IS_ERR(skb))
375 return PTR_ERR(skb);
376
377 err = -bt_to_errno(skb->data[0]);
378 kfree_skb(skb);
379
380 if (err < 0)
381 return err;
382
383 hci_dev_lock(hdev);
384 hdev->ssp_debug_mode = val;
385 hci_dev_unlock(hdev);
386
387 return 0;
388 }
389
390 static int ssp_debug_mode_get(void *data, u64 *val)
391 {
392 struct hci_dev *hdev = data;
393
394 hci_dev_lock(hdev);
395 *val = hdev->ssp_debug_mode;
396 hci_dev_unlock(hdev);
397
398 return 0;
399 }
400
401 DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402 ssp_debug_mode_set, "%llu\n");
403
404 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405 size_t count, loff_t *ppos)
406 {
407 struct hci_dev *hdev = file->private_data;
408 char buf[3];
409
410 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
411 buf[1] = '\n';
412 buf[2] = '\0';
413 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414 }
415
416 static ssize_t force_sc_support_write(struct file *file,
417 const char __user *user_buf,
418 size_t count, loff_t *ppos)
419 {
420 struct hci_dev *hdev = file->private_data;
421 char buf[32];
422 size_t buf_size = min(count, (sizeof(buf)-1));
423 bool enable;
424
425 if (test_bit(HCI_UP, &hdev->flags))
426 return -EBUSY;
427
428 if (copy_from_user(buf, user_buf, buf_size))
429 return -EFAULT;
430
431 buf[buf_size] = '\0';
432 if (strtobool(buf, &enable))
433 return -EINVAL;
434
435 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
436 return -EALREADY;
437
438 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
439
440 return count;
441 }
442
443 static const struct file_operations force_sc_support_fops = {
444 .open = simple_open,
445 .read = force_sc_support_read,
446 .write = force_sc_support_write,
447 .llseek = default_llseek,
448 };
449
450 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
451 size_t count, loff_t *ppos)
452 {
453 struct hci_dev *hdev = file->private_data;
454 char buf[3];
455
456 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
457 buf[1] = '\n';
458 buf[2] = '\0';
459 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
460 }
461
462 static const struct file_operations sc_only_mode_fops = {
463 .open = simple_open,
464 .read = sc_only_mode_read,
465 .llseek = default_llseek,
466 };
467
468 static int idle_timeout_set(void *data, u64 val)
469 {
470 struct hci_dev *hdev = data;
471
472 if (val != 0 && (val < 500 || val > 3600000))
473 return -EINVAL;
474
475 hci_dev_lock(hdev);
476 hdev->idle_timeout = val;
477 hci_dev_unlock(hdev);
478
479 return 0;
480 }
481
482 static int idle_timeout_get(void *data, u64 *val)
483 {
484 struct hci_dev *hdev = data;
485
486 hci_dev_lock(hdev);
487 *val = hdev->idle_timeout;
488 hci_dev_unlock(hdev);
489
490 return 0;
491 }
492
493 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
494 idle_timeout_set, "%llu\n");
495
496 static int rpa_timeout_set(void *data, u64 val)
497 {
498 struct hci_dev *hdev = data;
499
500 /* Require the RPA timeout to be at least 30 seconds and at most
501 * 24 hours.
502 */
503 if (val < 30 || val > (60 * 60 * 24))
504 return -EINVAL;
505
506 hci_dev_lock(hdev);
507 hdev->rpa_timeout = val;
508 hci_dev_unlock(hdev);
509
510 return 0;
511 }
512
513 static int rpa_timeout_get(void *data, u64 *val)
514 {
515 struct hci_dev *hdev = data;
516
517 hci_dev_lock(hdev);
518 *val = hdev->rpa_timeout;
519 hci_dev_unlock(hdev);
520
521 return 0;
522 }
523
524 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
525 rpa_timeout_set, "%llu\n");
526
527 static int sniff_min_interval_set(void *data, u64 val)
528 {
529 struct hci_dev *hdev = data;
530
531 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
532 return -EINVAL;
533
534 hci_dev_lock(hdev);
535 hdev->sniff_min_interval = val;
536 hci_dev_unlock(hdev);
537
538 return 0;
539 }
540
541 static int sniff_min_interval_get(void *data, u64 *val)
542 {
543 struct hci_dev *hdev = data;
544
545 hci_dev_lock(hdev);
546 *val = hdev->sniff_min_interval;
547 hci_dev_unlock(hdev);
548
549 return 0;
550 }
551
552 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
553 sniff_min_interval_set, "%llu\n");
554
555 static int sniff_max_interval_set(void *data, u64 val)
556 {
557 struct hci_dev *hdev = data;
558
559 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
560 return -EINVAL;
561
562 hci_dev_lock(hdev);
563 hdev->sniff_max_interval = val;
564 hci_dev_unlock(hdev);
565
566 return 0;
567 }
568
569 static int sniff_max_interval_get(void *data, u64 *val)
570 {
571 struct hci_dev *hdev = data;
572
573 hci_dev_lock(hdev);
574 *val = hdev->sniff_max_interval;
575 hci_dev_unlock(hdev);
576
577 return 0;
578 }
579
580 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
581 sniff_max_interval_set, "%llu\n");
582
583 static int conn_info_min_age_set(void *data, u64 val)
584 {
585 struct hci_dev *hdev = data;
586
587 if (val == 0 || val > hdev->conn_info_max_age)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
591 hdev->conn_info_min_age = val;
592 hci_dev_unlock(hdev);
593
594 return 0;
595 }
596
597 static int conn_info_min_age_get(void *data, u64 *val)
598 {
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->conn_info_min_age;
603 hci_dev_unlock(hdev);
604
605 return 0;
606 }
607
608 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609 conn_info_min_age_set, "%llu\n");
610
611 static int conn_info_max_age_set(void *data, u64 val)
612 {
613 struct hci_dev *hdev = data;
614
615 if (val == 0 || val < hdev->conn_info_min_age)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
619 hdev->conn_info_max_age = val;
620 hci_dev_unlock(hdev);
621
622 return 0;
623 }
624
625 static int conn_info_max_age_get(void *data, u64 *val)
626 {
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->conn_info_max_age;
631 hci_dev_unlock(hdev);
632
633 return 0;
634 }
635
636 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637 conn_info_max_age_set, "%llu\n");
638
639 static int identity_show(struct seq_file *f, void *p)
640 {
641 struct hci_dev *hdev = f->private;
642 bdaddr_t addr;
643 u8 addr_type;
644
645 hci_dev_lock(hdev);
646
647 hci_copy_identity_address(hdev, &addr, &addr_type);
648
649 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
650 16, hdev->irk, &hdev->rpa);
651
652 hci_dev_unlock(hdev);
653
654 return 0;
655 }
656
657 static int identity_open(struct inode *inode, struct file *file)
658 {
659 return single_open(file, identity_show, inode->i_private);
660 }
661
662 static const struct file_operations identity_fops = {
663 .open = identity_open,
664 .read = seq_read,
665 .llseek = seq_lseek,
666 .release = single_release,
667 };
668
669 static int random_address_show(struct seq_file *f, void *p)
670 {
671 struct hci_dev *hdev = f->private;
672
673 hci_dev_lock(hdev);
674 seq_printf(f, "%pMR\n", &hdev->random_addr);
675 hci_dev_unlock(hdev);
676
677 return 0;
678 }
679
680 static int random_address_open(struct inode *inode, struct file *file)
681 {
682 return single_open(file, random_address_show, inode->i_private);
683 }
684
685 static const struct file_operations random_address_fops = {
686 .open = random_address_open,
687 .read = seq_read,
688 .llseek = seq_lseek,
689 .release = single_release,
690 };
691
692 static int static_address_show(struct seq_file *f, void *p)
693 {
694 struct hci_dev *hdev = f->private;
695
696 hci_dev_lock(hdev);
697 seq_printf(f, "%pMR\n", &hdev->static_addr);
698 hci_dev_unlock(hdev);
699
700 return 0;
701 }
702
703 static int static_address_open(struct inode *inode, struct file *file)
704 {
705 return single_open(file, static_address_show, inode->i_private);
706 }
707
708 static const struct file_operations static_address_fops = {
709 .open = static_address_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713 };
714
715 static ssize_t force_static_address_read(struct file *file,
716 char __user *user_buf,
717 size_t count, loff_t *ppos)
718 {
719 struct hci_dev *hdev = file->private_data;
720 char buf[3];
721
722 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
723 buf[1] = '\n';
724 buf[2] = '\0';
725 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
726 }
727
728 static ssize_t force_static_address_write(struct file *file,
729 const char __user *user_buf,
730 size_t count, loff_t *ppos)
731 {
732 struct hci_dev *hdev = file->private_data;
733 char buf[32];
734 size_t buf_size = min(count, (sizeof(buf)-1));
735 bool enable;
736
737 if (test_bit(HCI_UP, &hdev->flags))
738 return -EBUSY;
739
740 if (copy_from_user(buf, user_buf, buf_size))
741 return -EFAULT;
742
743 buf[buf_size] = '\0';
744 if (strtobool(buf, &enable))
745 return -EINVAL;
746
747 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
748 return -EALREADY;
749
750 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
751
752 return count;
753 }
754
755 static const struct file_operations force_static_address_fops = {
756 .open = simple_open,
757 .read = force_static_address_read,
758 .write = force_static_address_write,
759 .llseek = default_llseek,
760 };
761
762 static int white_list_show(struct seq_file *f, void *ptr)
763 {
764 struct hci_dev *hdev = f->private;
765 struct bdaddr_list *b;
766
767 hci_dev_lock(hdev);
768 list_for_each_entry(b, &hdev->le_white_list, list)
769 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
770 hci_dev_unlock(hdev);
771
772 return 0;
773 }
774
775 static int white_list_open(struct inode *inode, struct file *file)
776 {
777 return single_open(file, white_list_show, inode->i_private);
778 }
779
780 static const struct file_operations white_list_fops = {
781 .open = white_list_open,
782 .read = seq_read,
783 .llseek = seq_lseek,
784 .release = single_release,
785 };
786
787 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
788 {
789 struct hci_dev *hdev = f->private;
790 struct list_head *p, *n;
791
792 hci_dev_lock(hdev);
793 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
794 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
795 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
796 &irk->bdaddr, irk->addr_type,
797 16, irk->val, &irk->rpa);
798 }
799 hci_dev_unlock(hdev);
800
801 return 0;
802 }
803
804 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
805 {
806 return single_open(file, identity_resolving_keys_show,
807 inode->i_private);
808 }
809
810 static const struct file_operations identity_resolving_keys_fops = {
811 .open = identity_resolving_keys_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = single_release,
815 };
816
817 static int long_term_keys_show(struct seq_file *f, void *ptr)
818 {
819 struct hci_dev *hdev = f->private;
820 struct list_head *p, *n;
821
822 hci_dev_lock(hdev);
823 list_for_each_safe(p, n, &hdev->long_term_keys) {
824 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
828 __le64_to_cpu(ltk->rand), 16, ltk->val);
829 }
830 hci_dev_unlock(hdev);
831
832 return 0;
833 }
834
835 static int long_term_keys_open(struct inode *inode, struct file *file)
836 {
837 return single_open(file, long_term_keys_show, inode->i_private);
838 }
839
840 static const struct file_operations long_term_keys_fops = {
841 .open = long_term_keys_open,
842 .read = seq_read,
843 .llseek = seq_lseek,
844 .release = single_release,
845 };
846
847 static int conn_min_interval_set(void *data, u64 val)
848 {
849 struct hci_dev *hdev = data;
850
851 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852 return -EINVAL;
853
854 hci_dev_lock(hdev);
855 hdev->le_conn_min_interval = val;
856 hci_dev_unlock(hdev);
857
858 return 0;
859 }
860
861 static int conn_min_interval_get(void *data, u64 *val)
862 {
863 struct hci_dev *hdev = data;
864
865 hci_dev_lock(hdev);
866 *val = hdev->le_conn_min_interval;
867 hci_dev_unlock(hdev);
868
869 return 0;
870 }
871
872 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873 conn_min_interval_set, "%llu\n");
874
875 static int conn_max_interval_set(void *data, u64 val)
876 {
877 struct hci_dev *hdev = data;
878
879 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880 return -EINVAL;
881
882 hci_dev_lock(hdev);
883 hdev->le_conn_max_interval = val;
884 hci_dev_unlock(hdev);
885
886 return 0;
887 }
888
889 static int conn_max_interval_get(void *data, u64 *val)
890 {
891 struct hci_dev *hdev = data;
892
893 hci_dev_lock(hdev);
894 *val = hdev->le_conn_max_interval;
895 hci_dev_unlock(hdev);
896
897 return 0;
898 }
899
900 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n");
902
903 static int adv_channel_map_set(void *data, u64 val)
904 {
905 struct hci_dev *hdev = data;
906
907 if (val < 0x01 || val > 0x07)
908 return -EINVAL;
909
910 hci_dev_lock(hdev);
911 hdev->le_adv_channel_map = val;
912 hci_dev_unlock(hdev);
913
914 return 0;
915 }
916
917 static int adv_channel_map_get(void *data, u64 *val)
918 {
919 struct hci_dev *hdev = data;
920
921 hci_dev_lock(hdev);
922 *val = hdev->le_adv_channel_map;
923 hci_dev_unlock(hdev);
924
925 return 0;
926 }
927
928 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
929 adv_channel_map_set, "%llu\n");
930
931 static ssize_t lowpan_read(struct file *file, char __user *user_buf,
932 size_t count, loff_t *ppos)
933 {
934 struct hci_dev *hdev = file->private_data;
935 char buf[3];
936
937 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
938 buf[1] = '\n';
939 buf[2] = '\0';
940 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
941 }
942
943 static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
944 size_t count, loff_t *position)
945 {
946 struct hci_dev *hdev = fp->private_data;
947 bool enable;
948 char buf[32];
949 size_t buf_size = min(count, (sizeof(buf)-1));
950
951 if (copy_from_user(buf, user_buffer, buf_size))
952 return -EFAULT;
953
954 buf[buf_size] = '\0';
955
956 if (strtobool(buf, &enable) < 0)
957 return -EINVAL;
958
959 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
960 return -EALREADY;
961
962 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
963
964 return count;
965 }
966
967 static const struct file_operations lowpan_debugfs_fops = {
968 .open = simple_open,
969 .read = lowpan_read,
970 .write = lowpan_write,
971 .llseek = default_llseek,
972 };
973
974 static int le_auto_conn_show(struct seq_file *sf, void *ptr)
975 {
976 struct hci_dev *hdev = sf->private;
977 struct hci_conn_params *p;
978
979 hci_dev_lock(hdev);
980
981 list_for_each_entry(p, &hdev->le_conn_params, list) {
982 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
983 p->auto_connect);
984 }
985
986 hci_dev_unlock(hdev);
987
988 return 0;
989 }
990
991 static int le_auto_conn_open(struct inode *inode, struct file *file)
992 {
993 return single_open(file, le_auto_conn_show, inode->i_private);
994 }
995
996 static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
997 size_t count, loff_t *offset)
998 {
999 struct seq_file *sf = file->private_data;
1000 struct hci_dev *hdev = sf->private;
1001 u8 auto_connect = 0;
1002 bdaddr_t addr;
1003 u8 addr_type;
1004 char *buf;
1005 int err = 0;
1006 int n;
1007
1008 /* Don't allow partial write */
1009 if (*offset != 0)
1010 return -EINVAL;
1011
1012 if (count < 3)
1013 return -EINVAL;
1014
1015 buf = memdup_user(data, count);
1016 if (IS_ERR(buf))
1017 return PTR_ERR(buf);
1018
1019 if (memcmp(buf, "add", 3) == 0) {
1020 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
1021 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1022 &addr.b[1], &addr.b[0], &addr_type,
1023 &auto_connect);
1024
1025 if (n < 7) {
1026 err = -EINVAL;
1027 goto done;
1028 }
1029
1030 hci_dev_lock(hdev);
1031 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
1032 hdev->le_conn_min_interval,
1033 hdev->le_conn_max_interval);
1034 hci_dev_unlock(hdev);
1035
1036 if (err)
1037 goto done;
1038 } else if (memcmp(buf, "del", 3) == 0) {
1039 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1040 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1041 &addr.b[1], &addr.b[0], &addr_type);
1042
1043 if (n < 7) {
1044 err = -EINVAL;
1045 goto done;
1046 }
1047
1048 hci_dev_lock(hdev);
1049 hci_conn_params_del(hdev, &addr, addr_type);
1050 hci_dev_unlock(hdev);
1051 } else if (memcmp(buf, "clr", 3) == 0) {
1052 hci_dev_lock(hdev);
1053 hci_conn_params_clear(hdev);
1054 hci_pend_le_conns_clear(hdev);
1055 hci_update_background_scan(hdev);
1056 hci_dev_unlock(hdev);
1057 } else {
1058 err = -EINVAL;
1059 }
1060
1061 done:
1062 kfree(buf);
1063
1064 if (err)
1065 return err;
1066 else
1067 return count;
1068 }
1069
1070 static const struct file_operations le_auto_conn_fops = {
1071 .open = le_auto_conn_open,
1072 .read = seq_read,
1073 .write = le_auto_conn_write,
1074 .llseek = seq_lseek,
1075 .release = single_release,
1076 };
1077
1078 /* ---- HCI requests ---- */
1079
1080 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1081 {
1082 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1083
1084 if (hdev->req_status == HCI_REQ_PEND) {
1085 hdev->req_result = result;
1086 hdev->req_status = HCI_REQ_DONE;
1087 wake_up_interruptible(&hdev->req_wait_q);
1088 }
1089 }
1090
1091 static void hci_req_cancel(struct hci_dev *hdev, int err)
1092 {
1093 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1094
1095 if (hdev->req_status == HCI_REQ_PEND) {
1096 hdev->req_result = err;
1097 hdev->req_status = HCI_REQ_CANCELED;
1098 wake_up_interruptible(&hdev->req_wait_q);
1099 }
1100 }
1101
1102 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1103 u8 event)
1104 {
1105 struct hci_ev_cmd_complete *ev;
1106 struct hci_event_hdr *hdr;
1107 struct sk_buff *skb;
1108
1109 hci_dev_lock(hdev);
1110
1111 skb = hdev->recv_evt;
1112 hdev->recv_evt = NULL;
1113
1114 hci_dev_unlock(hdev);
1115
1116 if (!skb)
1117 return ERR_PTR(-ENODATA);
1118
1119 if (skb->len < sizeof(*hdr)) {
1120 BT_ERR("Too short HCI event");
1121 goto failed;
1122 }
1123
1124 hdr = (void *) skb->data;
1125 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1126
1127 if (event) {
1128 if (hdr->evt != event)
1129 goto failed;
1130 return skb;
1131 }
1132
1133 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1134 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1135 goto failed;
1136 }
1137
1138 if (skb->len < sizeof(*ev)) {
1139 BT_ERR("Too short cmd_complete event");
1140 goto failed;
1141 }
1142
1143 ev = (void *) skb->data;
1144 skb_pull(skb, sizeof(*ev));
1145
1146 if (opcode == __le16_to_cpu(ev->opcode))
1147 return skb;
1148
1149 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1150 __le16_to_cpu(ev->opcode));
1151
1152 failed:
1153 kfree_skb(skb);
1154 return ERR_PTR(-ENODATA);
1155 }
1156
1157 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1158 const void *param, u8 event, u32 timeout)
1159 {
1160 DECLARE_WAITQUEUE(wait, current);
1161 struct hci_request req;
1162 int err = 0;
1163
1164 BT_DBG("%s", hdev->name);
1165
1166 hci_req_init(&req, hdev);
1167
1168 hci_req_add_ev(&req, opcode, plen, param, event);
1169
1170 hdev->req_status = HCI_REQ_PEND;
1171
1172 err = hci_req_run(&req, hci_req_sync_complete);
1173 if (err < 0)
1174 return ERR_PTR(err);
1175
1176 add_wait_queue(&hdev->req_wait_q, &wait);
1177 set_current_state(TASK_INTERRUPTIBLE);
1178
1179 schedule_timeout(timeout);
1180
1181 remove_wait_queue(&hdev->req_wait_q, &wait);
1182
1183 if (signal_pending(current))
1184 return ERR_PTR(-EINTR);
1185
1186 switch (hdev->req_status) {
1187 case HCI_REQ_DONE:
1188 err = -bt_to_errno(hdev->req_result);
1189 break;
1190
1191 case HCI_REQ_CANCELED:
1192 err = -hdev->req_result;
1193 break;
1194
1195 default:
1196 err = -ETIMEDOUT;
1197 break;
1198 }
1199
1200 hdev->req_status = hdev->req_result = 0;
1201
1202 BT_DBG("%s end: err %d", hdev->name, err);
1203
1204 if (err < 0)
1205 return ERR_PTR(err);
1206
1207 return hci_get_cmd_complete(hdev, opcode, event);
1208 }
1209 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1210
1211 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1212 const void *param, u32 timeout)
1213 {
1214 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1215 }
1216 EXPORT_SYMBOL(__hci_cmd_sync);
1217
1218 /* Execute request and wait for completion. */
1219 static int __hci_req_sync(struct hci_dev *hdev,
1220 void (*func)(struct hci_request *req,
1221 unsigned long opt),
1222 unsigned long opt, __u32 timeout)
1223 {
1224 struct hci_request req;
1225 DECLARE_WAITQUEUE(wait, current);
1226 int err = 0;
1227
1228 BT_DBG("%s start", hdev->name);
1229
1230 hci_req_init(&req, hdev);
1231
1232 hdev->req_status = HCI_REQ_PEND;
1233
1234 func(&req, opt);
1235
1236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
1238 hdev->req_status = 0;
1239
1240 /* ENODATA means the HCI request command queue is empty.
1241 * This can happen when a request with conditionals doesn't
1242 * trigger any commands to be sent. This is normal behavior
1243 * and should not trigger an error return.
1244 */
1245 if (err == -ENODATA)
1246 return 0;
1247
1248 return err;
1249 }
1250
1251 add_wait_queue(&hdev->req_wait_q, &wait);
1252 set_current_state(TASK_INTERRUPTIBLE);
1253
1254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
1263 err = -bt_to_errno(hdev->req_result);
1264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
1273 }
1274
1275 hdev->req_status = hdev->req_result = 0;
1276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280 }
1281
1282 static int hci_req_sync(struct hci_dev *hdev,
1283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
1285 unsigned long opt, __u32 timeout)
1286 {
1287 int ret;
1288
1289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
1292 /* Serialize all requests */
1293 hci_req_lock(hdev);
1294 ret = __hci_req_sync(hdev, req, opt, timeout);
1295 hci_req_unlock(hdev);
1296
1297 return ret;
1298 }
1299
1300 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1301 {
1302 BT_DBG("%s %ld", req->hdev->name, opt);
1303
1304 /* Reset device */
1305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1307 }
1308
1309 static void bredr_init(struct hci_request *req)
1310 {
1311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1312
1313 /* Read Local Supported Features */
1314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1315
1316 /* Read Local Version */
1317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1318
1319 /* Read BD Address */
1320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1321 }
1322
1323 static void amp_init(struct hci_request *req)
1324 {
1325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1326
1327 /* Read Local Version */
1328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1329
1330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
1336 /* Read Local AMP Info */
1337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1338
1339 /* Read Data Blk size */
1340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1341
1342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
1345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1347 }
1348
1349 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1350 {
1351 struct hci_dev *hdev = req->hdev;
1352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
1355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1357 hci_reset_req(req, 0);
1358
1359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
1361 bredr_init(req);
1362 break;
1363
1364 case HCI_AMP:
1365 amp_init(req);
1366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
1372 }
1373
1374 static void bredr_setup(struct hci_request *req)
1375 {
1376 struct hci_dev *hdev = req->hdev;
1377
1378 __le16 param;
1379 __u8 flt_type;
1380
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1383
1384 /* Read Class of Device */
1385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1386
1387 /* Read Local Name */
1388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1389
1390 /* Read Voice Setting */
1391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1392
1393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
1396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
1399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
1401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1402
1403 /* Connection accept timeout ~20 secs */
1404 param = cpu_to_le16(0x7d00);
1405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1406
1407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1409 */
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413 }
1414 }
1415
1416 static void le_setup(struct hci_request *req)
1417 {
1418 struct hci_dev *hdev = req->hdev;
1419
1420 /* Read LE Buffer Size */
1421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1422
1423 /* Read LE Local Supported Features */
1424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1425
1426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
1429 /* Read LE Advertising Channel TX Power */
1430 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1431
1432 /* Read LE White List Size */
1433 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1434
1435 /* Clear LE White List */
1436 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1437
1438 /* LE-only controllers have LE implicitly enabled */
1439 if (!lmp_bredr_capable(hdev))
1440 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1441 }
1442
1443 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1444 {
1445 if (lmp_ext_inq_capable(hdev))
1446 return 0x02;
1447
1448 if (lmp_inq_rssi_capable(hdev))
1449 return 0x01;
1450
1451 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1452 hdev->lmp_subver == 0x0757)
1453 return 0x01;
1454
1455 if (hdev->manufacturer == 15) {
1456 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1457 return 0x01;
1458 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1459 return 0x01;
1460 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1461 return 0x01;
1462 }
1463
1464 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1465 hdev->lmp_subver == 0x1805)
1466 return 0x01;
1467
1468 return 0x00;
1469 }
1470
1471 static void hci_setup_inquiry_mode(struct hci_request *req)
1472 {
1473 u8 mode;
1474
1475 mode = hci_get_inquiry_mode(req->hdev);
1476
1477 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1478 }
1479
1480 static void hci_setup_event_mask(struct hci_request *req)
1481 {
1482 struct hci_dev *hdev = req->hdev;
1483
1484 /* The second byte is 0xff instead of 0x9f (two reserved bits
1485 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1486 * command otherwise.
1487 */
1488 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1489
1490 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1491 * any event mask for pre 1.2 devices.
1492 */
1493 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1494 return;
1495
1496 if (lmp_bredr_capable(hdev)) {
1497 events[4] |= 0x01; /* Flow Specification Complete */
1498 events[4] |= 0x02; /* Inquiry Result with RSSI */
1499 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1500 events[5] |= 0x08; /* Synchronous Connection Complete */
1501 events[5] |= 0x10; /* Synchronous Connection Changed */
1502 } else {
1503 /* Use a different default for LE-only devices */
1504 memset(events, 0, sizeof(events));
1505 events[0] |= 0x10; /* Disconnection Complete */
1506 events[0] |= 0x80; /* Encryption Change */
1507 events[1] |= 0x08; /* Read Remote Version Information Complete */
1508 events[1] |= 0x20; /* Command Complete */
1509 events[1] |= 0x40; /* Command Status */
1510 events[1] |= 0x80; /* Hardware Error */
1511 events[2] |= 0x04; /* Number of Completed Packets */
1512 events[3] |= 0x02; /* Data Buffer Overflow */
1513 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1521
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1545 */
1546 }
1547
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1550
1551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1552
1553 if (lmp_le_capable(hdev)) {
1554 memset(events, 0, sizeof(events));
1555 events[0] = 0x1f;
1556 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1557 sizeof(events), events);
1558 }
1559 }
1560
1561 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1562 {
1563 struct hci_dev *hdev = req->hdev;
1564
1565 if (lmp_bredr_capable(hdev))
1566 bredr_setup(req);
1567 else
1568 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1569
1570 if (lmp_le_capable(hdev))
1571 le_setup(req);
1572
1573 hci_setup_event_mask(req);
1574
1575 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1576 * local supported commands HCI command.
1577 */
1578 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1579 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1580
1581 if (lmp_ssp_capable(hdev)) {
1582 /* When SSP is available, then the host features page
1583 * should also be available as well. However some
1584 * controllers list the max_page as 0 as long as SSP
1585 * has not been enabled. To achieve proper debugging
1586 * output, force the minimum max_page to 1 at least.
1587 */
1588 hdev->max_page = 0x01;
1589
1590 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1591 u8 mode = 0x01;
1592 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1593 sizeof(mode), &mode);
1594 } else {
1595 struct hci_cp_write_eir cp;
1596
1597 memset(hdev->eir, 0, sizeof(hdev->eir));
1598 memset(&cp, 0, sizeof(cp));
1599
1600 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1601 }
1602 }
1603
1604 if (lmp_inq_rssi_capable(hdev))
1605 hci_setup_inquiry_mode(req);
1606
1607 if (lmp_inq_tx_pwr_capable(hdev))
1608 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1609
1610 if (lmp_ext_feat_capable(hdev)) {
1611 struct hci_cp_read_local_ext_features cp;
1612
1613 cp.page = 0x01;
1614 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1615 sizeof(cp), &cp);
1616 }
1617
1618 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1619 u8 enable = 1;
1620 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1621 &enable);
1622 }
1623 }
1624
1625 static void hci_setup_link_policy(struct hci_request *req)
1626 {
1627 struct hci_dev *hdev = req->hdev;
1628 struct hci_cp_write_def_link_policy cp;
1629 u16 link_policy = 0;
1630
1631 if (lmp_rswitch_capable(hdev))
1632 link_policy |= HCI_LP_RSWITCH;
1633 if (lmp_hold_capable(hdev))
1634 link_policy |= HCI_LP_HOLD;
1635 if (lmp_sniff_capable(hdev))
1636 link_policy |= HCI_LP_SNIFF;
1637 if (lmp_park_capable(hdev))
1638 link_policy |= HCI_LP_PARK;
1639
1640 cp.policy = cpu_to_le16(link_policy);
1641 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1642 }
1643
1644 static void hci_set_le_support(struct hci_request *req)
1645 {
1646 struct hci_dev *hdev = req->hdev;
1647 struct hci_cp_write_le_host_supported cp;
1648
1649 /* LE-only devices do not support explicit enablement */
1650 if (!lmp_bredr_capable(hdev))
1651 return;
1652
1653 memset(&cp, 0, sizeof(cp));
1654
1655 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1656 cp.le = 0x01;
1657 cp.simul = lmp_le_br_capable(hdev);
1658 }
1659
1660 if (cp.le != lmp_host_le_capable(hdev))
1661 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1662 &cp);
1663 }
1664
1665 static void hci_set_event_mask_page_2(struct hci_request *req)
1666 {
1667 struct hci_dev *hdev = req->hdev;
1668 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1669
1670 /* If Connectionless Slave Broadcast master role is supported
1671 * enable all necessary events for it.
1672 */
1673 if (lmp_csb_master_capable(hdev)) {
1674 events[1] |= 0x40; /* Triggered Clock Capture */
1675 events[1] |= 0x80; /* Synchronization Train Complete */
1676 events[2] |= 0x10; /* Slave Page Response Timeout */
1677 events[2] |= 0x20; /* CSB Channel Map Change */
1678 }
1679
1680 /* If Connectionless Slave Broadcast slave role is supported
1681 * enable all necessary events for it.
1682 */
1683 if (lmp_csb_slave_capable(hdev)) {
1684 events[2] |= 0x01; /* Synchronization Train Received */
1685 events[2] |= 0x02; /* CSB Receive */
1686 events[2] |= 0x04; /* CSB Timeout */
1687 events[2] |= 0x08; /* Truncated Page Complete */
1688 }
1689
1690 /* Enable Authenticated Payload Timeout Expired event if supported */
1691 if (lmp_ping_capable(hdev))
1692 events[2] |= 0x80;
1693
1694 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1695 }
1696
1697 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1698 {
1699 struct hci_dev *hdev = req->hdev;
1700 u8 p;
1701
1702 /* Some Broadcom based Bluetooth controllers do not support the
1703 * Delete Stored Link Key command. They are clearly indicating its
1704 * absence in the bit mask of supported commands.
1705 *
1706 * Check the supported commands and only if the the command is marked
1707 * as supported send it. If not supported assume that the controller
1708 * does not have actual support for stored link keys which makes this
1709 * command redundant anyway.
1710 *
1711 * Some controllers indicate that they support handling deleting
1712 * stored link keys, but they don't. The quirk lets a driver
1713 * just disable this command.
1714 */
1715 if (hdev->commands[6] & 0x80 &&
1716 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1717 struct hci_cp_delete_stored_link_key cp;
1718
1719 bacpy(&cp.bdaddr, BDADDR_ANY);
1720 cp.delete_all = 0x01;
1721 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1722 sizeof(cp), &cp);
1723 }
1724
1725 if (hdev->commands[5] & 0x10)
1726 hci_setup_link_policy(req);
1727
1728 if (lmp_le_capable(hdev))
1729 hci_set_le_support(req);
1730
1731 /* Read features beyond page 1 if available */
1732 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1733 struct hci_cp_read_local_ext_features cp;
1734
1735 cp.page = p;
1736 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1737 sizeof(cp), &cp);
1738 }
1739 }
1740
1741 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1742 {
1743 struct hci_dev *hdev = req->hdev;
1744
1745 /* Set event mask page 2 if the HCI command for it is supported */
1746 if (hdev->commands[22] & 0x04)
1747 hci_set_event_mask_page_2(req);
1748
1749 /* Check for Synchronization Train support */
1750 if (lmp_sync_train_capable(hdev))
1751 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1752
1753 /* Enable Secure Connections if supported and configured */
1754 if ((lmp_sc_capable(hdev) ||
1755 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
1756 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1757 u8 support = 0x01;
1758 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1759 sizeof(support), &support);
1760 }
1761 }
1762
1763 static int __hci_init(struct hci_dev *hdev)
1764 {
1765 int err;
1766
1767 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1768 if (err < 0)
1769 return err;
1770
1771 /* The Device Under Test (DUT) mode is special and available for
1772 * all controller types. So just create it early on.
1773 */
1774 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1775 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1776 &dut_mode_fops);
1777 }
1778
1779 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1780 * BR/EDR/LE type controllers. AMP controllers only need the
1781 * first stage init.
1782 */
1783 if (hdev->dev_type != HCI_BREDR)
1784 return 0;
1785
1786 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1787 if (err < 0)
1788 return err;
1789
1790 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1791 if (err < 0)
1792 return err;
1793
1794 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1797
1798 /* Only create debugfs entries during the initial setup
1799 * phase and not every time the controller gets powered on.
1800 */
1801 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1802 return 0;
1803
1804 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1805 &features_fops);
1806 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1807 &hdev->manufacturer);
1808 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1809 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1810 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1811 &blacklist_fops);
1812 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1813
1814 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1815 &conn_info_min_age_fops);
1816 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1817 &conn_info_max_age_fops);
1818
1819 if (lmp_bredr_capable(hdev)) {
1820 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1821 hdev, &inquiry_cache_fops);
1822 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1823 hdev, &link_keys_fops);
1824 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1825 hdev, &dev_class_fops);
1826 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1827 hdev, &voice_setting_fops);
1828 }
1829
1830 if (lmp_ssp_capable(hdev)) {
1831 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1832 hdev, &auto_accept_delay_fops);
1833 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1834 hdev, &ssp_debug_mode_fops);
1835 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1836 hdev, &force_sc_support_fops);
1837 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1838 hdev, &sc_only_mode_fops);
1839 }
1840
1841 if (lmp_sniff_capable(hdev)) {
1842 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1843 hdev, &idle_timeout_fops);
1844 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1845 hdev, &sniff_min_interval_fops);
1846 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1847 hdev, &sniff_max_interval_fops);
1848 }
1849
1850 if (lmp_le_capable(hdev)) {
1851 debugfs_create_file("identity", 0400, hdev->debugfs,
1852 hdev, &identity_fops);
1853 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1854 hdev, &rpa_timeout_fops);
1855 debugfs_create_file("random_address", 0444, hdev->debugfs,
1856 hdev, &random_address_fops);
1857 debugfs_create_file("static_address", 0444, hdev->debugfs,
1858 hdev, &static_address_fops);
1859
1860 /* For controllers with a public address, provide a debug
1861 * option to force the usage of the configured static
1862 * address. By default the public address is used.
1863 */
1864 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1865 debugfs_create_file("force_static_address", 0644,
1866 hdev->debugfs, hdev,
1867 &force_static_address_fops);
1868
1869 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1870 &hdev->le_white_list_size);
1871 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1872 &white_list_fops);
1873 debugfs_create_file("identity_resolving_keys", 0400,
1874 hdev->debugfs, hdev,
1875 &identity_resolving_keys_fops);
1876 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1877 hdev, &long_term_keys_fops);
1878 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1879 hdev, &conn_min_interval_fops);
1880 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1881 hdev, &conn_max_interval_fops);
1882 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1883 hdev, &adv_channel_map_fops);
1884 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1885 &lowpan_debugfs_fops);
1886 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1887 &le_auto_conn_fops);
1888 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889 hdev->debugfs,
1890 &hdev->discov_interleaved_timeout);
1891 }
1892
1893 return 0;
1894 }
1895
1896 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1897 {
1898 __u8 scan = opt;
1899
1900 BT_DBG("%s %x", req->hdev->name, scan);
1901
1902 /* Inquiry and Page scans */
1903 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1904 }
1905
1906 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1907 {
1908 __u8 auth = opt;
1909
1910 BT_DBG("%s %x", req->hdev->name, auth);
1911
1912 /* Authentication */
1913 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1914 }
1915
1916 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1917 {
1918 __u8 encrypt = opt;
1919
1920 BT_DBG("%s %x", req->hdev->name, encrypt);
1921
1922 /* Encryption */
1923 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1924 }
1925
1926 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1927 {
1928 __le16 policy = cpu_to_le16(opt);
1929
1930 BT_DBG("%s %x", req->hdev->name, policy);
1931
1932 /* Default link policy */
1933 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1934 }
1935
1936 /* Get HCI device by index.
1937 * Device is held on return. */
1938 struct hci_dev *hci_dev_get(int index)
1939 {
1940 struct hci_dev *hdev = NULL, *d;
1941
1942 BT_DBG("%d", index);
1943
1944 if (index < 0)
1945 return NULL;
1946
1947 read_lock(&hci_dev_list_lock);
1948 list_for_each_entry(d, &hci_dev_list, list) {
1949 if (d->id == index) {
1950 hdev = hci_dev_hold(d);
1951 break;
1952 }
1953 }
1954 read_unlock(&hci_dev_list_lock);
1955 return hdev;
1956 }
1957
1958 /* ---- Inquiry support ---- */
1959
1960 bool hci_discovery_active(struct hci_dev *hdev)
1961 {
1962 struct discovery_state *discov = &hdev->discovery;
1963
1964 switch (discov->state) {
1965 case DISCOVERY_FINDING:
1966 case DISCOVERY_RESOLVING:
1967 return true;
1968
1969 default:
1970 return false;
1971 }
1972 }
1973
1974 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1975 {
1976 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1977
1978 if (hdev->discovery.state == state)
1979 return;
1980
1981 switch (state) {
1982 case DISCOVERY_STOPPED:
1983 hci_update_background_scan(hdev);
1984
1985 if (hdev->discovery.state != DISCOVERY_STARTING)
1986 mgmt_discovering(hdev, 0);
1987 break;
1988 case DISCOVERY_STARTING:
1989 break;
1990 case DISCOVERY_FINDING:
1991 mgmt_discovering(hdev, 1);
1992 break;
1993 case DISCOVERY_RESOLVING:
1994 break;
1995 case DISCOVERY_STOPPING:
1996 break;
1997 }
1998
1999 hdev->discovery.state = state;
2000 }
2001
2002 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2003 {
2004 struct discovery_state *cache = &hdev->discovery;
2005 struct inquiry_entry *p, *n;
2006
2007 list_for_each_entry_safe(p, n, &cache->all, all) {
2008 list_del(&p->all);
2009 kfree(p);
2010 }
2011
2012 INIT_LIST_HEAD(&cache->unknown);
2013 INIT_LIST_HEAD(&cache->resolve);
2014 }
2015
2016 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2017 bdaddr_t *bdaddr)
2018 {
2019 struct discovery_state *cache = &hdev->discovery;
2020 struct inquiry_entry *e;
2021
2022 BT_DBG("cache %p, %pMR", cache, bdaddr);
2023
2024 list_for_each_entry(e, &cache->all, all) {
2025 if (!bacmp(&e->data.bdaddr, bdaddr))
2026 return e;
2027 }
2028
2029 return NULL;
2030 }
2031
2032 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2033 bdaddr_t *bdaddr)
2034 {
2035 struct discovery_state *cache = &hdev->discovery;
2036 struct inquiry_entry *e;
2037
2038 BT_DBG("cache %p, %pMR", cache, bdaddr);
2039
2040 list_for_each_entry(e, &cache->unknown, list) {
2041 if (!bacmp(&e->data.bdaddr, bdaddr))
2042 return e;
2043 }
2044
2045 return NULL;
2046 }
2047
2048 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2049 bdaddr_t *bdaddr,
2050 int state)
2051 {
2052 struct discovery_state *cache = &hdev->discovery;
2053 struct inquiry_entry *e;
2054
2055 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2056
2057 list_for_each_entry(e, &cache->resolve, list) {
2058 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2059 return e;
2060 if (!bacmp(&e->data.bdaddr, bdaddr))
2061 return e;
2062 }
2063
2064 return NULL;
2065 }
2066
2067 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2068 struct inquiry_entry *ie)
2069 {
2070 struct discovery_state *cache = &hdev->discovery;
2071 struct list_head *pos = &cache->resolve;
2072 struct inquiry_entry *p;
2073
2074 list_del(&ie->list);
2075
2076 list_for_each_entry(p, &cache->resolve, list) {
2077 if (p->name_state != NAME_PENDING &&
2078 abs(p->data.rssi) >= abs(ie->data.rssi))
2079 break;
2080 pos = &p->list;
2081 }
2082
2083 list_add(&ie->list, pos);
2084 }
2085
2086 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2087 bool name_known, bool *ssp)
2088 {
2089 struct discovery_state *cache = &hdev->discovery;
2090 struct inquiry_entry *ie;
2091
2092 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2093
2094 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2095
2096 *ssp = data->ssp_mode;
2097
2098 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2099 if (ie) {
2100 if (ie->data.ssp_mode)
2101 *ssp = true;
2102
2103 if (ie->name_state == NAME_NEEDED &&
2104 data->rssi != ie->data.rssi) {
2105 ie->data.rssi = data->rssi;
2106 hci_inquiry_cache_update_resolve(hdev, ie);
2107 }
2108
2109 goto update;
2110 }
2111
2112 /* Entry not in the cache. Add new one. */
2113 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2114 if (!ie)
2115 return false;
2116
2117 list_add(&ie->all, &cache->all);
2118
2119 if (name_known) {
2120 ie->name_state = NAME_KNOWN;
2121 } else {
2122 ie->name_state = NAME_NOT_KNOWN;
2123 list_add(&ie->list, &cache->unknown);
2124 }
2125
2126 update:
2127 if (name_known && ie->name_state != NAME_KNOWN &&
2128 ie->name_state != NAME_PENDING) {
2129 ie->name_state = NAME_KNOWN;
2130 list_del(&ie->list);
2131 }
2132
2133 memcpy(&ie->data, data, sizeof(*data));
2134 ie->timestamp = jiffies;
2135 cache->timestamp = jiffies;
2136
2137 if (ie->name_state == NAME_NOT_KNOWN)
2138 return false;
2139
2140 return true;
2141 }
2142
2143 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2144 {
2145 struct discovery_state *cache = &hdev->discovery;
2146 struct inquiry_info *info = (struct inquiry_info *) buf;
2147 struct inquiry_entry *e;
2148 int copied = 0;
2149
2150 list_for_each_entry(e, &cache->all, all) {
2151 struct inquiry_data *data = &e->data;
2152
2153 if (copied >= num)
2154 break;
2155
2156 bacpy(&info->bdaddr, &data->bdaddr);
2157 info->pscan_rep_mode = data->pscan_rep_mode;
2158 info->pscan_period_mode = data->pscan_period_mode;
2159 info->pscan_mode = data->pscan_mode;
2160 memcpy(info->dev_class, data->dev_class, 3);
2161 info->clock_offset = data->clock_offset;
2162
2163 info++;
2164 copied++;
2165 }
2166
2167 BT_DBG("cache %p, copied %d", cache, copied);
2168 return copied;
2169 }
2170
2171 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2172 {
2173 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2174 struct hci_dev *hdev = req->hdev;
2175 struct hci_cp_inquiry cp;
2176
2177 BT_DBG("%s", hdev->name);
2178
2179 if (test_bit(HCI_INQUIRY, &hdev->flags))
2180 return;
2181
2182 /* Start Inquiry */
2183 memcpy(&cp.lap, &ir->lap, 3);
2184 cp.length = ir->length;
2185 cp.num_rsp = ir->num_rsp;
2186 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2187 }
2188
2189 static int wait_inquiry(void *word)
2190 {
2191 schedule();
2192 return signal_pending(current);
2193 }
2194
2195 int hci_inquiry(void __user *arg)
2196 {
2197 __u8 __user *ptr = arg;
2198 struct hci_inquiry_req ir;
2199 struct hci_dev *hdev;
2200 int err = 0, do_inquiry = 0, max_rsp;
2201 long timeo;
2202 __u8 *buf;
2203
2204 if (copy_from_user(&ir, ptr, sizeof(ir)))
2205 return -EFAULT;
2206
2207 hdev = hci_dev_get(ir.dev_id);
2208 if (!hdev)
2209 return -ENODEV;
2210
2211 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2212 err = -EBUSY;
2213 goto done;
2214 }
2215
2216 if (hdev->dev_type != HCI_BREDR) {
2217 err = -EOPNOTSUPP;
2218 goto done;
2219 }
2220
2221 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2222 err = -EOPNOTSUPP;
2223 goto done;
2224 }
2225
2226 hci_dev_lock(hdev);
2227 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2228 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2229 hci_inquiry_cache_flush(hdev);
2230 do_inquiry = 1;
2231 }
2232 hci_dev_unlock(hdev);
2233
2234 timeo = ir.length * msecs_to_jiffies(2000);
2235
2236 if (do_inquiry) {
2237 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2238 timeo);
2239 if (err < 0)
2240 goto done;
2241
2242 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2243 * cleared). If it is interrupted by a signal, return -EINTR.
2244 */
2245 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2246 TASK_INTERRUPTIBLE))
2247 return -EINTR;
2248 }
2249
2250 /* for unlimited number of responses we will use buffer with
2251 * 255 entries
2252 */
2253 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2254
2255 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2256 * copy it to the user space.
2257 */
2258 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2259 if (!buf) {
2260 err = -ENOMEM;
2261 goto done;
2262 }
2263
2264 hci_dev_lock(hdev);
2265 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2266 hci_dev_unlock(hdev);
2267
2268 BT_DBG("num_rsp %d", ir.num_rsp);
2269
2270 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2271 ptr += sizeof(ir);
2272 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2273 ir.num_rsp))
2274 err = -EFAULT;
2275 } else
2276 err = -EFAULT;
2277
2278 kfree(buf);
2279
2280 done:
2281 hci_dev_put(hdev);
2282 return err;
2283 }
2284
2285 static int hci_dev_do_open(struct hci_dev *hdev)
2286 {
2287 int ret = 0;
2288
2289 BT_DBG("%s %p", hdev->name, hdev);
2290
2291 hci_req_lock(hdev);
2292
2293 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2294 ret = -ENODEV;
2295 goto done;
2296 }
2297
2298 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2299 /* Check for rfkill but allow the HCI setup stage to
2300 * proceed (which in itself doesn't cause any RF activity).
2301 */
2302 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2303 ret = -ERFKILL;
2304 goto done;
2305 }
2306
2307 /* Check for valid public address or a configured static
2308 * random adddress, but let the HCI setup proceed to
2309 * be able to determine if there is a public address
2310 * or not.
2311 *
2312 * In case of user channel usage, it is not important
2313 * if a public address or static random address is
2314 * available.
2315 *
2316 * This check is only valid for BR/EDR controllers
2317 * since AMP controllers do not have an address.
2318 */
2319 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2320 hdev->dev_type == HCI_BREDR &&
2321 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2322 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2323 ret = -EADDRNOTAVAIL;
2324 goto done;
2325 }
2326 }
2327
2328 if (test_bit(HCI_UP, &hdev->flags)) {
2329 ret = -EALREADY;
2330 goto done;
2331 }
2332
2333 if (hdev->open(hdev)) {
2334 ret = -EIO;
2335 goto done;
2336 }
2337
2338 atomic_set(&hdev->cmd_cnt, 1);
2339 set_bit(HCI_INIT, &hdev->flags);
2340
2341 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2342 ret = hdev->setup(hdev);
2343
2344 if (!ret) {
2345 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2346 set_bit(HCI_RAW, &hdev->flags);
2347
2348 if (!test_bit(HCI_RAW, &hdev->flags) &&
2349 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2350 ret = __hci_init(hdev);
2351 }
2352
2353 clear_bit(HCI_INIT, &hdev->flags);
2354
2355 if (!ret) {
2356 hci_dev_hold(hdev);
2357 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2358 set_bit(HCI_UP, &hdev->flags);
2359 hci_notify(hdev, HCI_DEV_UP);
2360 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2361 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2362 hdev->dev_type == HCI_BREDR) {
2363 hci_dev_lock(hdev);
2364 mgmt_powered(hdev, 1);
2365 hci_dev_unlock(hdev);
2366 }
2367 } else {
2368 /* Init failed, cleanup */
2369 flush_work(&hdev->tx_work);
2370 flush_work(&hdev->cmd_work);
2371 flush_work(&hdev->rx_work);
2372
2373 skb_queue_purge(&hdev->cmd_q);
2374 skb_queue_purge(&hdev->rx_q);
2375
2376 if (hdev->flush)
2377 hdev->flush(hdev);
2378
2379 if (hdev->sent_cmd) {
2380 kfree_skb(hdev->sent_cmd);
2381 hdev->sent_cmd = NULL;
2382 }
2383
2384 hdev->close(hdev);
2385 hdev->flags = 0;
2386 }
2387
2388 done:
2389 hci_req_unlock(hdev);
2390 return ret;
2391 }
2392
2393 /* ---- HCI ioctl helpers ---- */
2394
2395 int hci_dev_open(__u16 dev)
2396 {
2397 struct hci_dev *hdev;
2398 int err;
2399
2400 hdev = hci_dev_get(dev);
2401 if (!hdev)
2402 return -ENODEV;
2403
2404 /* We need to ensure that no other power on/off work is pending
2405 * before proceeding to call hci_dev_do_open. This is
2406 * particularly important if the setup procedure has not yet
2407 * completed.
2408 */
2409 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2410 cancel_delayed_work(&hdev->power_off);
2411
2412 /* After this call it is guaranteed that the setup procedure
2413 * has finished. This means that error conditions like RFKILL
2414 * or no valid public or static random address apply.
2415 */
2416 flush_workqueue(hdev->req_workqueue);
2417
2418 err = hci_dev_do_open(hdev);
2419
2420 hci_dev_put(hdev);
2421
2422 return err;
2423 }
2424
2425 static int hci_dev_do_close(struct hci_dev *hdev)
2426 {
2427 BT_DBG("%s %p", hdev->name, hdev);
2428
2429 cancel_delayed_work(&hdev->power_off);
2430
2431 hci_req_cancel(hdev, ENODEV);
2432 hci_req_lock(hdev);
2433
2434 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2435 cancel_delayed_work_sync(&hdev->cmd_timer);
2436 hci_req_unlock(hdev);
2437 return 0;
2438 }
2439
2440 /* Flush RX and TX works */
2441 flush_work(&hdev->tx_work);
2442 flush_work(&hdev->rx_work);
2443
2444 if (hdev->discov_timeout > 0) {
2445 cancel_delayed_work(&hdev->discov_off);
2446 hdev->discov_timeout = 0;
2447 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2448 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2449 }
2450
2451 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2452 cancel_delayed_work(&hdev->service_cache);
2453
2454 cancel_delayed_work_sync(&hdev->le_scan_disable);
2455
2456 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2457 cancel_delayed_work_sync(&hdev->rpa_expired);
2458
2459 hci_dev_lock(hdev);
2460 hci_inquiry_cache_flush(hdev);
2461 hci_conn_hash_flush(hdev);
2462 hci_pend_le_conns_clear(hdev);
2463 hci_dev_unlock(hdev);
2464
2465 hci_notify(hdev, HCI_DEV_DOWN);
2466
2467 if (hdev->flush)
2468 hdev->flush(hdev);
2469
2470 /* Reset device */
2471 skb_queue_purge(&hdev->cmd_q);
2472 atomic_set(&hdev->cmd_cnt, 1);
2473 if (!test_bit(HCI_RAW, &hdev->flags) &&
2474 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2475 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2476 set_bit(HCI_INIT, &hdev->flags);
2477 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2478 clear_bit(HCI_INIT, &hdev->flags);
2479 }
2480
2481 /* flush cmd work */
2482 flush_work(&hdev->cmd_work);
2483
2484 /* Drop queues */
2485 skb_queue_purge(&hdev->rx_q);
2486 skb_queue_purge(&hdev->cmd_q);
2487 skb_queue_purge(&hdev->raw_q);
2488
2489 /* Drop last sent command */
2490 if (hdev->sent_cmd) {
2491 cancel_delayed_work_sync(&hdev->cmd_timer);
2492 kfree_skb(hdev->sent_cmd);
2493 hdev->sent_cmd = NULL;
2494 }
2495
2496 kfree_skb(hdev->recv_evt);
2497 hdev->recv_evt = NULL;
2498
2499 /* After this point our queues are empty
2500 * and no tasks are scheduled. */
2501 hdev->close(hdev);
2502
2503 /* Clear flags */
2504 hdev->flags = 0;
2505 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2506
2507 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2508 if (hdev->dev_type == HCI_BREDR) {
2509 hci_dev_lock(hdev);
2510 mgmt_powered(hdev, 0);
2511 hci_dev_unlock(hdev);
2512 }
2513 }
2514
2515 /* Controller radio is available but is currently powered down */
2516 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2517
2518 memset(hdev->eir, 0, sizeof(hdev->eir));
2519 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2520 bacpy(&hdev->random_addr, BDADDR_ANY);
2521
2522 hci_req_unlock(hdev);
2523
2524 hci_dev_put(hdev);
2525 return 0;
2526 }
2527
2528 int hci_dev_close(__u16 dev)
2529 {
2530 struct hci_dev *hdev;
2531 int err;
2532
2533 hdev = hci_dev_get(dev);
2534 if (!hdev)
2535 return -ENODEV;
2536
2537 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2538 err = -EBUSY;
2539 goto done;
2540 }
2541
2542 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2543 cancel_delayed_work(&hdev->power_off);
2544
2545 err = hci_dev_do_close(hdev);
2546
2547 done:
2548 hci_dev_put(hdev);
2549 return err;
2550 }
2551
2552 int hci_dev_reset(__u16 dev)
2553 {
2554 struct hci_dev *hdev;
2555 int ret = 0;
2556
2557 hdev = hci_dev_get(dev);
2558 if (!hdev)
2559 return -ENODEV;
2560
2561 hci_req_lock(hdev);
2562
2563 if (!test_bit(HCI_UP, &hdev->flags)) {
2564 ret = -ENETDOWN;
2565 goto done;
2566 }
2567
2568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569 ret = -EBUSY;
2570 goto done;
2571 }
2572
2573 /* Drop queues */
2574 skb_queue_purge(&hdev->rx_q);
2575 skb_queue_purge(&hdev->cmd_q);
2576
2577 hci_dev_lock(hdev);
2578 hci_inquiry_cache_flush(hdev);
2579 hci_conn_hash_flush(hdev);
2580 hci_dev_unlock(hdev);
2581
2582 if (hdev->flush)
2583 hdev->flush(hdev);
2584
2585 atomic_set(&hdev->cmd_cnt, 1);
2586 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2587
2588 if (!test_bit(HCI_RAW, &hdev->flags))
2589 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2590
2591 done:
2592 hci_req_unlock(hdev);
2593 hci_dev_put(hdev);
2594 return ret;
2595 }
2596
2597 int hci_dev_reset_stat(__u16 dev)
2598 {
2599 struct hci_dev *hdev;
2600 int ret = 0;
2601
2602 hdev = hci_dev_get(dev);
2603 if (!hdev)
2604 return -ENODEV;
2605
2606 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2607 ret = -EBUSY;
2608 goto done;
2609 }
2610
2611 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2612
2613 done:
2614 hci_dev_put(hdev);
2615 return ret;
2616 }
2617
2618 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2619 {
2620 struct hci_dev *hdev;
2621 struct hci_dev_req dr;
2622 int err = 0;
2623
2624 if (copy_from_user(&dr, arg, sizeof(dr)))
2625 return -EFAULT;
2626
2627 hdev = hci_dev_get(dr.dev_id);
2628 if (!hdev)
2629 return -ENODEV;
2630
2631 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2632 err = -EBUSY;
2633 goto done;
2634 }
2635
2636 if (hdev->dev_type != HCI_BREDR) {
2637 err = -EOPNOTSUPP;
2638 goto done;
2639 }
2640
2641 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2642 err = -EOPNOTSUPP;
2643 goto done;
2644 }
2645
2646 switch (cmd) {
2647 case HCISETAUTH:
2648 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2649 HCI_INIT_TIMEOUT);
2650 break;
2651
2652 case HCISETENCRYPT:
2653 if (!lmp_encrypt_capable(hdev)) {
2654 err = -EOPNOTSUPP;
2655 break;
2656 }
2657
2658 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2659 /* Auth must be enabled first */
2660 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2661 HCI_INIT_TIMEOUT);
2662 if (err)
2663 break;
2664 }
2665
2666 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2667 HCI_INIT_TIMEOUT);
2668 break;
2669
2670 case HCISETSCAN:
2671 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2672 HCI_INIT_TIMEOUT);
2673 break;
2674
2675 case HCISETLINKPOL:
2676 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2677 HCI_INIT_TIMEOUT);
2678 break;
2679
2680 case HCISETLINKMODE:
2681 hdev->link_mode = ((__u16) dr.dev_opt) &
2682 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2683 break;
2684
2685 case HCISETPTYPE:
2686 hdev->pkt_type = (__u16) dr.dev_opt;
2687 break;
2688
2689 case HCISETACLMTU:
2690 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2691 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2692 break;
2693
2694 case HCISETSCOMTU:
2695 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2696 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2697 break;
2698
2699 default:
2700 err = -EINVAL;
2701 break;
2702 }
2703
2704 done:
2705 hci_dev_put(hdev);
2706 return err;
2707 }
2708
2709 int hci_get_dev_list(void __user *arg)
2710 {
2711 struct hci_dev *hdev;
2712 struct hci_dev_list_req *dl;
2713 struct hci_dev_req *dr;
2714 int n = 0, size, err;
2715 __u16 dev_num;
2716
2717 if (get_user(dev_num, (__u16 __user *) arg))
2718 return -EFAULT;
2719
2720 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2721 return -EINVAL;
2722
2723 size = sizeof(*dl) + dev_num * sizeof(*dr);
2724
2725 dl = kzalloc(size, GFP_KERNEL);
2726 if (!dl)
2727 return -ENOMEM;
2728
2729 dr = dl->dev_req;
2730
2731 read_lock(&hci_dev_list_lock);
2732 list_for_each_entry(hdev, &hci_dev_list, list) {
2733 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2734 cancel_delayed_work(&hdev->power_off);
2735
2736 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2737 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2738
2739 (dr + n)->dev_id = hdev->id;
2740 (dr + n)->dev_opt = hdev->flags;
2741
2742 if (++n >= dev_num)
2743 break;
2744 }
2745 read_unlock(&hci_dev_list_lock);
2746
2747 dl->dev_num = n;
2748 size = sizeof(*dl) + n * sizeof(*dr);
2749
2750 err = copy_to_user(arg, dl, size);
2751 kfree(dl);
2752
2753 return err ? -EFAULT : 0;
2754 }
2755
2756 int hci_get_dev_info(void __user *arg)
2757 {
2758 struct hci_dev *hdev;
2759 struct hci_dev_info di;
2760 int err = 0;
2761
2762 if (copy_from_user(&di, arg, sizeof(di)))
2763 return -EFAULT;
2764
2765 hdev = hci_dev_get(di.dev_id);
2766 if (!hdev)
2767 return -ENODEV;
2768
2769 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2770 cancel_delayed_work_sync(&hdev->power_off);
2771
2772 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2773 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2774
2775 strcpy(di.name, hdev->name);
2776 di.bdaddr = hdev->bdaddr;
2777 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2778 di.flags = hdev->flags;
2779 di.pkt_type = hdev->pkt_type;
2780 if (lmp_bredr_capable(hdev)) {
2781 di.acl_mtu = hdev->acl_mtu;
2782 di.acl_pkts = hdev->acl_pkts;
2783 di.sco_mtu = hdev->sco_mtu;
2784 di.sco_pkts = hdev->sco_pkts;
2785 } else {
2786 di.acl_mtu = hdev->le_mtu;
2787 di.acl_pkts = hdev->le_pkts;
2788 di.sco_mtu = 0;
2789 di.sco_pkts = 0;
2790 }
2791 di.link_policy = hdev->link_policy;
2792 di.link_mode = hdev->link_mode;
2793
2794 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2795 memcpy(&di.features, &hdev->features, sizeof(di.features));
2796
2797 if (copy_to_user(arg, &di, sizeof(di)))
2798 err = -EFAULT;
2799
2800 hci_dev_put(hdev);
2801
2802 return err;
2803 }
2804
2805 /* ---- Interface to HCI drivers ---- */
2806
2807 static int hci_rfkill_set_block(void *data, bool blocked)
2808 {
2809 struct hci_dev *hdev = data;
2810
2811 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2812
2813 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2814 return -EBUSY;
2815
2816 if (blocked) {
2817 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2818 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2819 hci_dev_do_close(hdev);
2820 } else {
2821 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2822 }
2823
2824 return 0;
2825 }
2826
2827 static const struct rfkill_ops hci_rfkill_ops = {
2828 .set_block = hci_rfkill_set_block,
2829 };
2830
2831 static void hci_power_on(struct work_struct *work)
2832 {
2833 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2834 int err;
2835
2836 BT_DBG("%s", hdev->name);
2837
2838 err = hci_dev_do_open(hdev);
2839 if (err < 0) {
2840 mgmt_set_powered_failed(hdev, err);
2841 return;
2842 }
2843
2844 /* During the HCI setup phase, a few error conditions are
2845 * ignored and they need to be checked now. If they are still
2846 * valid, it is important to turn the device back off.
2847 */
2848 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2849 (hdev->dev_type == HCI_BREDR &&
2850 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2851 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2852 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2853 hci_dev_do_close(hdev);
2854 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2855 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2856 HCI_AUTO_OFF_TIMEOUT);
2857 }
2858
2859 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
2860 mgmt_index_added(hdev);
2861 }
2862
2863 static void hci_power_off(struct work_struct *work)
2864 {
2865 struct hci_dev *hdev = container_of(work, struct hci_dev,
2866 power_off.work);
2867
2868 BT_DBG("%s", hdev->name);
2869
2870 hci_dev_do_close(hdev);
2871 }
2872
2873 static void hci_discov_off(struct work_struct *work)
2874 {
2875 struct hci_dev *hdev;
2876
2877 hdev = container_of(work, struct hci_dev, discov_off.work);
2878
2879 BT_DBG("%s", hdev->name);
2880
2881 mgmt_discoverable_timeout(hdev);
2882 }
2883
2884 void hci_uuids_clear(struct hci_dev *hdev)
2885 {
2886 struct bt_uuid *uuid, *tmp;
2887
2888 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2889 list_del(&uuid->list);
2890 kfree(uuid);
2891 }
2892 }
2893
2894 void hci_link_keys_clear(struct hci_dev *hdev)
2895 {
2896 struct list_head *p, *n;
2897
2898 list_for_each_safe(p, n, &hdev->link_keys) {
2899 struct link_key *key;
2900
2901 key = list_entry(p, struct link_key, list);
2902
2903 list_del(p);
2904 kfree(key);
2905 }
2906 }
2907
2908 void hci_smp_ltks_clear(struct hci_dev *hdev)
2909 {
2910 struct smp_ltk *k, *tmp;
2911
2912 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2913 list_del(&k->list);
2914 kfree(k);
2915 }
2916 }
2917
2918 void hci_smp_irks_clear(struct hci_dev *hdev)
2919 {
2920 struct smp_irk *k, *tmp;
2921
2922 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2923 list_del(&k->list);
2924 kfree(k);
2925 }
2926 }
2927
2928 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2929 {
2930 struct link_key *k;
2931
2932 list_for_each_entry(k, &hdev->link_keys, list)
2933 if (bacmp(bdaddr, &k->bdaddr) == 0)
2934 return k;
2935
2936 return NULL;
2937 }
2938
2939 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2940 u8 key_type, u8 old_key_type)
2941 {
2942 /* Legacy key */
2943 if (key_type < 0x03)
2944 return true;
2945
2946 /* Debug keys are insecure so don't store them persistently */
2947 if (key_type == HCI_LK_DEBUG_COMBINATION)
2948 return false;
2949
2950 /* Changed combination key and there's no previous one */
2951 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2952 return false;
2953
2954 /* Security mode 3 case */
2955 if (!conn)
2956 return true;
2957
2958 /* Neither local nor remote side had no-bonding as requirement */
2959 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2960 return true;
2961
2962 /* Local side had dedicated bonding as requirement */
2963 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2964 return true;
2965
2966 /* Remote side had dedicated bonding as requirement */
2967 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2968 return true;
2969
2970 /* If none of the above criteria match, then don't store the key
2971 * persistently */
2972 return false;
2973 }
2974
2975 static bool ltk_type_master(u8 type)
2976 {
2977 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2978 return true;
2979
2980 return false;
2981 }
2982
2983 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2984 bool master)
2985 {
2986 struct smp_ltk *k;
2987
2988 list_for_each_entry(k, &hdev->long_term_keys, list) {
2989 if (k->ediv != ediv || k->rand != rand)
2990 continue;
2991
2992 if (ltk_type_master(k->type) != master)
2993 continue;
2994
2995 return k;
2996 }
2997
2998 return NULL;
2999 }
3000
3001 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3002 u8 addr_type, bool master)
3003 {
3004 struct smp_ltk *k;
3005
3006 list_for_each_entry(k, &hdev->long_term_keys, list)
3007 if (addr_type == k->bdaddr_type &&
3008 bacmp(bdaddr, &k->bdaddr) == 0 &&
3009 ltk_type_master(k->type) == master)
3010 return k;
3011
3012 return NULL;
3013 }
3014
3015 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3016 {
3017 struct smp_irk *irk;
3018
3019 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3020 if (!bacmp(&irk->rpa, rpa))
3021 return irk;
3022 }
3023
3024 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3025 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3026 bacpy(&irk->rpa, rpa);
3027 return irk;
3028 }
3029 }
3030
3031 return NULL;
3032 }
3033
3034 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3035 u8 addr_type)
3036 {
3037 struct smp_irk *irk;
3038
3039 /* Identity Address must be public or static random */
3040 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3041 return NULL;
3042
3043 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3044 if (addr_type == irk->addr_type &&
3045 bacmp(bdaddr, &irk->bdaddr) == 0)
3046 return irk;
3047 }
3048
3049 return NULL;
3050 }
3051
3052 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
3053 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
3054 {
3055 struct link_key *key, *old_key;
3056 u8 old_key_type;
3057 bool persistent;
3058
3059 old_key = hci_find_link_key(hdev, bdaddr);
3060 if (old_key) {
3061 old_key_type = old_key->type;
3062 key = old_key;
3063 } else {
3064 old_key_type = conn ? conn->key_type : 0xff;
3065 key = kzalloc(sizeof(*key), GFP_KERNEL);
3066 if (!key)
3067 return -ENOMEM;
3068 list_add(&key->list, &hdev->link_keys);
3069 }
3070
3071 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3072
3073 /* Some buggy controller combinations generate a changed
3074 * combination key for legacy pairing even when there's no
3075 * previous key */
3076 if (type == HCI_LK_CHANGED_COMBINATION &&
3077 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3078 type = HCI_LK_COMBINATION;
3079 if (conn)
3080 conn->key_type = type;
3081 }
3082
3083 bacpy(&key->bdaddr, bdaddr);
3084 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3085 key->pin_len = pin_len;
3086
3087 if (type == HCI_LK_CHANGED_COMBINATION)
3088 key->type = old_key_type;
3089 else
3090 key->type = type;
3091
3092 if (!new_key)
3093 return 0;
3094
3095 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3096
3097 mgmt_new_link_key(hdev, key, persistent);
3098
3099 if (conn)
3100 conn->flush_key = !persistent;
3101
3102 return 0;
3103 }
3104
3105 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3106 u8 addr_type, u8 type, u8 authenticated,
3107 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3108 {
3109 struct smp_ltk *key, *old_key;
3110 bool master = ltk_type_master(type);
3111
3112 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3113 if (old_key)
3114 key = old_key;
3115 else {
3116 key = kzalloc(sizeof(*key), GFP_KERNEL);
3117 if (!key)
3118 return NULL;
3119 list_add(&key->list, &hdev->long_term_keys);
3120 }
3121
3122 bacpy(&key->bdaddr, bdaddr);
3123 key->bdaddr_type = addr_type;
3124 memcpy(key->val, tk, sizeof(key->val));
3125 key->authenticated = authenticated;
3126 key->ediv = ediv;
3127 key->rand = rand;
3128 key->enc_size = enc_size;
3129 key->type = type;
3130
3131 return key;
3132 }
3133
3134 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3135 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3136 {
3137 struct smp_irk *irk;
3138
3139 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3140 if (!irk) {
3141 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3142 if (!irk)
3143 return NULL;
3144
3145 bacpy(&irk->bdaddr, bdaddr);
3146 irk->addr_type = addr_type;
3147
3148 list_add(&irk->list, &hdev->identity_resolving_keys);
3149 }
3150
3151 memcpy(irk->val, val, 16);
3152 bacpy(&irk->rpa, rpa);
3153
3154 return irk;
3155 }
3156
3157 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3158 {
3159 struct link_key *key;
3160
3161 key = hci_find_link_key(hdev, bdaddr);
3162 if (!key)
3163 return -ENOENT;
3164
3165 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3166
3167 list_del(&key->list);
3168 kfree(key);
3169
3170 return 0;
3171 }
3172
3173 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3174 {
3175 struct smp_ltk *k, *tmp;
3176 int removed = 0;
3177
3178 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3179 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3180 continue;
3181
3182 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3183
3184 list_del(&k->list);
3185 kfree(k);
3186 removed++;
3187 }
3188
3189 return removed ? 0 : -ENOENT;
3190 }
3191
3192 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3193 {
3194 struct smp_irk *k, *tmp;
3195
3196 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3197 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3198 continue;
3199
3200 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3201
3202 list_del(&k->list);
3203 kfree(k);
3204 }
3205 }
3206
3207 /* HCI command timer function */
3208 static void hci_cmd_timeout(struct work_struct *work)
3209 {
3210 struct hci_dev *hdev = container_of(work, struct hci_dev,
3211 cmd_timer.work);
3212
3213 if (hdev->sent_cmd) {
3214 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3215 u16 opcode = __le16_to_cpu(sent->opcode);
3216
3217 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3218 } else {
3219 BT_ERR("%s command tx timeout", hdev->name);
3220 }
3221
3222 atomic_set(&hdev->cmd_cnt, 1);
3223 queue_work(hdev->workqueue, &hdev->cmd_work);
3224 }
3225
3226 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3227 bdaddr_t *bdaddr)
3228 {
3229 struct oob_data *data;
3230
3231 list_for_each_entry(data, &hdev->remote_oob_data, list)
3232 if (bacmp(bdaddr, &data->bdaddr) == 0)
3233 return data;
3234
3235 return NULL;
3236 }
3237
3238 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3239 {
3240 struct oob_data *data;
3241
3242 data = hci_find_remote_oob_data(hdev, bdaddr);
3243 if (!data)
3244 return -ENOENT;
3245
3246 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3247
3248 list_del(&data->list);
3249 kfree(data);
3250
3251 return 0;
3252 }
3253
3254 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3255 {
3256 struct oob_data *data, *n;
3257
3258 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3259 list_del(&data->list);
3260 kfree(data);
3261 }
3262 }
3263
3264 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3265 u8 *hash, u8 *randomizer)
3266 {
3267 struct oob_data *data;
3268
3269 data = hci_find_remote_oob_data(hdev, bdaddr);
3270 if (!data) {
3271 data = kmalloc(sizeof(*data), GFP_KERNEL);
3272 if (!data)
3273 return -ENOMEM;
3274
3275 bacpy(&data->bdaddr, bdaddr);
3276 list_add(&data->list, &hdev->remote_oob_data);
3277 }
3278
3279 memcpy(data->hash192, hash, sizeof(data->hash192));
3280 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3281
3282 memset(data->hash256, 0, sizeof(data->hash256));
3283 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3284
3285 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3286
3287 return 0;
3288 }
3289
3290 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3291 u8 *hash192, u8 *randomizer192,
3292 u8 *hash256, u8 *randomizer256)
3293 {
3294 struct oob_data *data;
3295
3296 data = hci_find_remote_oob_data(hdev, bdaddr);
3297 if (!data) {
3298 data = kmalloc(sizeof(*data), GFP_KERNEL);
3299 if (!data)
3300 return -ENOMEM;
3301
3302 bacpy(&data->bdaddr, bdaddr);
3303 list_add(&data->list, &hdev->remote_oob_data);
3304 }
3305
3306 memcpy(data->hash192, hash192, sizeof(data->hash192));
3307 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3308
3309 memcpy(data->hash256, hash256, sizeof(data->hash256));
3310 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3311
3312 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3313
3314 return 0;
3315 }
3316
3317 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3318 bdaddr_t *bdaddr, u8 type)
3319 {
3320 struct bdaddr_list *b;
3321
3322 list_for_each_entry(b, &hdev->blacklist, list) {
3323 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3324 return b;
3325 }
3326
3327 return NULL;
3328 }
3329
3330 static void hci_blacklist_clear(struct hci_dev *hdev)
3331 {
3332 struct list_head *p, *n;
3333
3334 list_for_each_safe(p, n, &hdev->blacklist) {
3335 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3336
3337 list_del(p);
3338 kfree(b);
3339 }
3340 }
3341
3342 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3343 {
3344 struct bdaddr_list *entry;
3345
3346 if (!bacmp(bdaddr, BDADDR_ANY))
3347 return -EBADF;
3348
3349 if (hci_blacklist_lookup(hdev, bdaddr, type))
3350 return -EEXIST;
3351
3352 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3353 if (!entry)
3354 return -ENOMEM;
3355
3356 bacpy(&entry->bdaddr, bdaddr);
3357 entry->bdaddr_type = type;
3358
3359 list_add(&entry->list, &hdev->blacklist);
3360
3361 return mgmt_device_blocked(hdev, bdaddr, type);
3362 }
3363
3364 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3365 {
3366 struct bdaddr_list *entry;
3367
3368 if (!bacmp(bdaddr, BDADDR_ANY)) {
3369 hci_blacklist_clear(hdev);
3370 return 0;
3371 }
3372
3373 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3374 if (!entry)
3375 return -ENOENT;
3376
3377 list_del(&entry->list);
3378 kfree(entry);
3379
3380 return mgmt_device_unblocked(hdev, bdaddr, type);
3381 }
3382
3383 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3384 bdaddr_t *bdaddr, u8 type)
3385 {
3386 struct bdaddr_list *b;
3387
3388 list_for_each_entry(b, &hdev->le_white_list, list) {
3389 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3390 return b;
3391 }
3392
3393 return NULL;
3394 }
3395
3396 void hci_white_list_clear(struct hci_dev *hdev)
3397 {
3398 struct list_head *p, *n;
3399
3400 list_for_each_safe(p, n, &hdev->le_white_list) {
3401 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3402
3403 list_del(p);
3404 kfree(b);
3405 }
3406 }
3407
3408 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3409 {
3410 struct bdaddr_list *entry;
3411
3412 if (!bacmp(bdaddr, BDADDR_ANY))
3413 return -EBADF;
3414
3415 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3416 if (!entry)
3417 return -ENOMEM;
3418
3419 bacpy(&entry->bdaddr, bdaddr);
3420 entry->bdaddr_type = type;
3421
3422 list_add(&entry->list, &hdev->le_white_list);
3423
3424 return 0;
3425 }
3426
3427 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3428 {
3429 struct bdaddr_list *entry;
3430
3431 if (!bacmp(bdaddr, BDADDR_ANY))
3432 return -EBADF;
3433
3434 entry = hci_white_list_lookup(hdev, bdaddr, type);
3435 if (!entry)
3436 return -ENOENT;
3437
3438 list_del(&entry->list);
3439 kfree(entry);
3440
3441 return 0;
3442 }
3443
3444 /* This function requires the caller holds hdev->lock */
3445 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3446 bdaddr_t *addr, u8 addr_type)
3447 {
3448 struct hci_conn_params *params;
3449
3450 list_for_each_entry(params, &hdev->le_conn_params, list) {
3451 if (bacmp(&params->addr, addr) == 0 &&
3452 params->addr_type == addr_type) {
3453 return params;
3454 }
3455 }
3456
3457 return NULL;
3458 }
3459
3460 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3461 {
3462 struct hci_conn *conn;
3463
3464 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3465 if (!conn)
3466 return false;
3467
3468 if (conn->dst_type != type)
3469 return false;
3470
3471 if (conn->state != BT_CONNECTED)
3472 return false;
3473
3474 return true;
3475 }
3476
3477 static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3478 {
3479 if (addr_type == ADDR_LE_DEV_PUBLIC)
3480 return true;
3481
3482 /* Check for Random Static address type */
3483 if ((addr->b[5] & 0xc0) == 0xc0)
3484 return true;
3485
3486 return false;
3487 }
3488
3489 /* This function requires the caller holds hdev->lock */
3490 int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3491 u8 auto_connect, u16 conn_min_interval,
3492 u16 conn_max_interval)
3493 {
3494 struct hci_conn_params *params;
3495
3496 if (!is_identity_address(addr, addr_type))
3497 return -EINVAL;
3498
3499 params = hci_conn_params_lookup(hdev, addr, addr_type);
3500 if (params)
3501 goto update;
3502
3503 params = kzalloc(sizeof(*params), GFP_KERNEL);
3504 if (!params) {
3505 BT_ERR("Out of memory");
3506 return -ENOMEM;
3507 }
3508
3509 bacpy(&params->addr, addr);
3510 params->addr_type = addr_type;
3511
3512 list_add(&params->list, &hdev->le_conn_params);
3513
3514 update:
3515 params->conn_min_interval = conn_min_interval;
3516 params->conn_max_interval = conn_max_interval;
3517 params->auto_connect = auto_connect;
3518
3519 switch (auto_connect) {
3520 case HCI_AUTO_CONN_DISABLED:
3521 case HCI_AUTO_CONN_LINK_LOSS:
3522 hci_pend_le_conn_del(hdev, addr, addr_type);
3523 break;
3524 case HCI_AUTO_CONN_ALWAYS:
3525 if (!is_connected(hdev, addr, addr_type))
3526 hci_pend_le_conn_add(hdev, addr, addr_type);
3527 break;
3528 }
3529
3530 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3531 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3532 conn_min_interval, conn_max_interval);
3533
3534 return 0;
3535 }
3536
3537 /* This function requires the caller holds hdev->lock */
3538 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3539 {
3540 struct hci_conn_params *params;
3541
3542 params = hci_conn_params_lookup(hdev, addr, addr_type);
3543 if (!params)
3544 return;
3545
3546 hci_pend_le_conn_del(hdev, addr, addr_type);
3547
3548 list_del(&params->list);
3549 kfree(params);
3550
3551 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3552 }
3553
3554 /* This function requires the caller holds hdev->lock */
3555 void hci_conn_params_clear(struct hci_dev *hdev)
3556 {
3557 struct hci_conn_params *params, *tmp;
3558
3559 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3560 list_del(&params->list);
3561 kfree(params);
3562 }
3563
3564 BT_DBG("All LE connection parameters were removed");
3565 }
3566
3567 /* This function requires the caller holds hdev->lock */
3568 struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3569 bdaddr_t *addr, u8 addr_type)
3570 {
3571 struct bdaddr_list *entry;
3572
3573 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3574 if (bacmp(&entry->bdaddr, addr) == 0 &&
3575 entry->bdaddr_type == addr_type)
3576 return entry;
3577 }
3578
3579 return NULL;
3580 }
3581
3582 /* This function requires the caller holds hdev->lock */
3583 void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3584 {
3585 struct bdaddr_list *entry;
3586
3587 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3588 if (entry)
3589 goto done;
3590
3591 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3592 if (!entry) {
3593 BT_ERR("Out of memory");
3594 return;
3595 }
3596
3597 bacpy(&entry->bdaddr, addr);
3598 entry->bdaddr_type = addr_type;
3599
3600 list_add(&entry->list, &hdev->pend_le_conns);
3601
3602 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3603
3604 done:
3605 hci_update_background_scan(hdev);
3606 }
3607
3608 /* This function requires the caller holds hdev->lock */
3609 void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3610 {
3611 struct bdaddr_list *entry;
3612
3613 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3614 if (!entry)
3615 goto done;
3616
3617 list_del(&entry->list);
3618 kfree(entry);
3619
3620 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3621
3622 done:
3623 hci_update_background_scan(hdev);
3624 }
3625
3626 /* This function requires the caller holds hdev->lock */
3627 void hci_pend_le_conns_clear(struct hci_dev *hdev)
3628 {
3629 struct bdaddr_list *entry, *tmp;
3630
3631 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3632 list_del(&entry->list);
3633 kfree(entry);
3634 }
3635
3636 BT_DBG("All LE pending connections cleared");
3637 }
3638
3639 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3640 {
3641 if (status) {
3642 BT_ERR("Failed to start inquiry: status %d", status);
3643
3644 hci_dev_lock(hdev);
3645 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3646 hci_dev_unlock(hdev);
3647 return;
3648 }
3649 }
3650
3651 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3652 {
3653 /* General inquiry access code (GIAC) */
3654 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3655 struct hci_request req;
3656 struct hci_cp_inquiry cp;
3657 int err;
3658
3659 if (status) {
3660 BT_ERR("Failed to disable LE scanning: status %d", status);
3661 return;
3662 }
3663
3664 switch (hdev->discovery.type) {
3665 case DISCOV_TYPE_LE:
3666 hci_dev_lock(hdev);
3667 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3668 hci_dev_unlock(hdev);
3669 break;
3670
3671 case DISCOV_TYPE_INTERLEAVED:
3672 hci_req_init(&req, hdev);
3673
3674 memset(&cp, 0, sizeof(cp));
3675 memcpy(&cp.lap, lap, sizeof(cp.lap));
3676 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3677 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3678
3679 hci_dev_lock(hdev);
3680
3681 hci_inquiry_cache_flush(hdev);
3682
3683 err = hci_req_run(&req, inquiry_complete);
3684 if (err) {
3685 BT_ERR("Inquiry request failed: err %d", err);
3686 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3687 }
3688
3689 hci_dev_unlock(hdev);
3690 break;
3691 }
3692 }
3693
3694 static void le_scan_disable_work(struct work_struct *work)
3695 {
3696 struct hci_dev *hdev = container_of(work, struct hci_dev,
3697 le_scan_disable.work);
3698 struct hci_request req;
3699 int err;
3700
3701 BT_DBG("%s", hdev->name);
3702
3703 hci_req_init(&req, hdev);
3704
3705 hci_req_add_le_scan_disable(&req);
3706
3707 err = hci_req_run(&req, le_scan_disable_work_complete);
3708 if (err)
3709 BT_ERR("Disable LE scanning request failed: err %d", err);
3710 }
3711
3712 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3713 {
3714 struct hci_dev *hdev = req->hdev;
3715
3716 /* If we're advertising or initiating an LE connection we can't
3717 * go ahead and change the random address at this time. This is
3718 * because the eventual initiator address used for the
3719 * subsequently created connection will be undefined (some
3720 * controllers use the new address and others the one we had
3721 * when the operation started).
3722 *
3723 * In this kind of scenario skip the update and let the random
3724 * address be updated at the next cycle.
3725 */
3726 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3727 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3728 BT_DBG("Deferring random address update");
3729 return;
3730 }
3731
3732 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3733 }
3734
3735 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3736 u8 *own_addr_type)
3737 {
3738 struct hci_dev *hdev = req->hdev;
3739 int err;
3740
3741 /* If privacy is enabled use a resolvable private address. If
3742 * current RPA has expired or there is something else than
3743 * the current RPA in use, then generate a new one.
3744 */
3745 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3746 int to;
3747
3748 *own_addr_type = ADDR_LE_DEV_RANDOM;
3749
3750 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3751 !bacmp(&hdev->random_addr, &hdev->rpa))
3752 return 0;
3753
3754 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3755 if (err < 0) {
3756 BT_ERR("%s failed to generate new RPA", hdev->name);
3757 return err;
3758 }
3759
3760 set_random_addr(req, &hdev->rpa);
3761
3762 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3763 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3764
3765 return 0;
3766 }
3767
3768 /* In case of required privacy without resolvable private address,
3769 * use an unresolvable private address. This is useful for active
3770 * scanning and non-connectable advertising.
3771 */
3772 if (require_privacy) {
3773 bdaddr_t urpa;
3774
3775 get_random_bytes(&urpa, 6);
3776 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3777
3778 *own_addr_type = ADDR_LE_DEV_RANDOM;
3779 set_random_addr(req, &urpa);
3780 return 0;
3781 }
3782
3783 /* If forcing static address is in use or there is no public
3784 * address use the static address as random address (but skip
3785 * the HCI command if the current random address is already the
3786 * static one.
3787 */
3788 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3789 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3790 *own_addr_type = ADDR_LE_DEV_RANDOM;
3791 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3792 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3793 &hdev->static_addr);
3794 return 0;
3795 }
3796
3797 /* Neither privacy nor static address is being used so use a
3798 * public address.
3799 */
3800 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3801
3802 return 0;
3803 }
3804
3805 /* Copy the Identity Address of the controller.
3806 *
3807 * If the controller has a public BD_ADDR, then by default use that one.
3808 * If this is a LE only controller without a public address, default to
3809 * the static random address.
3810 *
3811 * For debugging purposes it is possible to force controllers with a
3812 * public address to use the static random address instead.
3813 */
3814 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3815 u8 *bdaddr_type)
3816 {
3817 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3818 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3819 bacpy(bdaddr, &hdev->static_addr);
3820 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3821 } else {
3822 bacpy(bdaddr, &hdev->bdaddr);
3823 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3824 }
3825 }
3826
3827 /* Alloc HCI device */
3828 struct hci_dev *hci_alloc_dev(void)
3829 {
3830 struct hci_dev *hdev;
3831
3832 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3833 if (!hdev)
3834 return NULL;
3835
3836 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3837 hdev->esco_type = (ESCO_HV1);
3838 hdev->link_mode = (HCI_LM_ACCEPT);
3839 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3840 hdev->io_capability = 0x03; /* No Input No Output */
3841 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3842 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3843
3844 hdev->sniff_max_interval = 800;
3845 hdev->sniff_min_interval = 80;
3846
3847 hdev->le_adv_channel_map = 0x07;
3848 hdev->le_scan_interval = 0x0060;
3849 hdev->le_scan_window = 0x0030;
3850 hdev->le_conn_min_interval = 0x0028;
3851 hdev->le_conn_max_interval = 0x0038;
3852
3853 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3854 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3855 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3856 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3857
3858 mutex_init(&hdev->lock);
3859 mutex_init(&hdev->req_lock);
3860
3861 INIT_LIST_HEAD(&hdev->mgmt_pending);
3862 INIT_LIST_HEAD(&hdev->blacklist);
3863 INIT_LIST_HEAD(&hdev->uuids);
3864 INIT_LIST_HEAD(&hdev->link_keys);
3865 INIT_LIST_HEAD(&hdev->long_term_keys);
3866 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3867 INIT_LIST_HEAD(&hdev->remote_oob_data);
3868 INIT_LIST_HEAD(&hdev->le_white_list);
3869 INIT_LIST_HEAD(&hdev->le_conn_params);
3870 INIT_LIST_HEAD(&hdev->pend_le_conns);
3871 INIT_LIST_HEAD(&hdev->conn_hash.list);
3872
3873 INIT_WORK(&hdev->rx_work, hci_rx_work);
3874 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3875 INIT_WORK(&hdev->tx_work, hci_tx_work);
3876 INIT_WORK(&hdev->power_on, hci_power_on);
3877
3878 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3879 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3880 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3881
3882 skb_queue_head_init(&hdev->rx_q);
3883 skb_queue_head_init(&hdev->cmd_q);
3884 skb_queue_head_init(&hdev->raw_q);
3885
3886 init_waitqueue_head(&hdev->req_wait_q);
3887
3888 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3889
3890 hci_init_sysfs(hdev);
3891 discovery_init(hdev);
3892
3893 return hdev;
3894 }
3895 EXPORT_SYMBOL(hci_alloc_dev);
3896
3897 /* Free HCI device */
3898 void hci_free_dev(struct hci_dev *hdev)
3899 {
3900 /* will free via device release */
3901 put_device(&hdev->dev);
3902 }
3903 EXPORT_SYMBOL(hci_free_dev);
3904
3905 /* Register HCI device */
3906 int hci_register_dev(struct hci_dev *hdev)
3907 {
3908 int id, error;
3909
3910 if (!hdev->open || !hdev->close)
3911 return -EINVAL;
3912
3913 /* Do not allow HCI_AMP devices to register at index 0,
3914 * so the index can be used as the AMP controller ID.
3915 */
3916 switch (hdev->dev_type) {
3917 case HCI_BREDR:
3918 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3919 break;
3920 case HCI_AMP:
3921 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3922 break;
3923 default:
3924 return -EINVAL;
3925 }
3926
3927 if (id < 0)
3928 return id;
3929
3930 sprintf(hdev->name, "hci%d", id);
3931 hdev->id = id;
3932
3933 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3934
3935 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3936 WQ_MEM_RECLAIM, 1, hdev->name);
3937 if (!hdev->workqueue) {
3938 error = -ENOMEM;
3939 goto err;
3940 }
3941
3942 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3943 WQ_MEM_RECLAIM, 1, hdev->name);
3944 if (!hdev->req_workqueue) {
3945 destroy_workqueue(hdev->workqueue);
3946 error = -ENOMEM;
3947 goto err;
3948 }
3949
3950 if (!IS_ERR_OR_NULL(bt_debugfs))
3951 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3952
3953 dev_set_name(&hdev->dev, "%s", hdev->name);
3954
3955 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3956 CRYPTO_ALG_ASYNC);
3957 if (IS_ERR(hdev->tfm_aes)) {
3958 BT_ERR("Unable to create crypto context");
3959 error = PTR_ERR(hdev->tfm_aes);
3960 hdev->tfm_aes = NULL;
3961 goto err_wqueue;
3962 }
3963
3964 error = device_add(&hdev->dev);
3965 if (error < 0)
3966 goto err_tfm;
3967
3968 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3969 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3970 hdev);
3971 if (hdev->rfkill) {
3972 if (rfkill_register(hdev->rfkill) < 0) {
3973 rfkill_destroy(hdev->rfkill);
3974 hdev->rfkill = NULL;
3975 }
3976 }
3977
3978 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3979 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3980
3981 set_bit(HCI_SETUP, &hdev->dev_flags);
3982 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3983
3984 if (hdev->dev_type == HCI_BREDR) {
3985 /* Assume BR/EDR support until proven otherwise (such as
3986 * through reading supported features during init.
3987 */
3988 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3989 }
3990
3991 write_lock(&hci_dev_list_lock);
3992 list_add(&hdev->list, &hci_dev_list);
3993 write_unlock(&hci_dev_list_lock);
3994
3995 hci_notify(hdev, HCI_DEV_REG);
3996 hci_dev_hold(hdev);
3997
3998 queue_work(hdev->req_workqueue, &hdev->power_on);
3999
4000 return id;
4001
4002 err_tfm:
4003 crypto_free_blkcipher(hdev->tfm_aes);
4004 err_wqueue:
4005 destroy_workqueue(hdev->workqueue);
4006 destroy_workqueue(hdev->req_workqueue);
4007 err:
4008 ida_simple_remove(&hci_index_ida, hdev->id);
4009
4010 return error;
4011 }
4012 EXPORT_SYMBOL(hci_register_dev);
4013
4014 /* Unregister HCI device */
4015 void hci_unregister_dev(struct hci_dev *hdev)
4016 {
4017 int i, id;
4018
4019 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4020
4021 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4022
4023 id = hdev->id;
4024
4025 write_lock(&hci_dev_list_lock);
4026 list_del(&hdev->list);
4027 write_unlock(&hci_dev_list_lock);
4028
4029 hci_dev_do_close(hdev);
4030
4031 for (i = 0; i < NUM_REASSEMBLY; i++)
4032 kfree_skb(hdev->reassembly[i]);
4033
4034 cancel_work_sync(&hdev->power_on);
4035
4036 if (!test_bit(HCI_INIT, &hdev->flags) &&
4037 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4038 hci_dev_lock(hdev);
4039 mgmt_index_removed(hdev);
4040 hci_dev_unlock(hdev);
4041 }
4042
4043 /* mgmt_index_removed should take care of emptying the
4044 * pending list */
4045 BUG_ON(!list_empty(&hdev->mgmt_pending));
4046
4047 hci_notify(hdev, HCI_DEV_UNREG);
4048
4049 if (hdev->rfkill) {
4050 rfkill_unregister(hdev->rfkill);
4051 rfkill_destroy(hdev->rfkill);
4052 }
4053
4054 if (hdev->tfm_aes)
4055 crypto_free_blkcipher(hdev->tfm_aes);
4056
4057 device_del(&hdev->dev);
4058
4059 debugfs_remove_recursive(hdev->debugfs);
4060
4061 destroy_workqueue(hdev->workqueue);
4062 destroy_workqueue(hdev->req_workqueue);
4063
4064 hci_dev_lock(hdev);
4065 hci_blacklist_clear(hdev);
4066 hci_uuids_clear(hdev);
4067 hci_link_keys_clear(hdev);
4068 hci_smp_ltks_clear(hdev);
4069 hci_smp_irks_clear(hdev);
4070 hci_remote_oob_data_clear(hdev);
4071 hci_white_list_clear(hdev);
4072 hci_conn_params_clear(hdev);
4073 hci_pend_le_conns_clear(hdev);
4074 hci_dev_unlock(hdev);
4075
4076 hci_dev_put(hdev);
4077
4078 ida_simple_remove(&hci_index_ida, id);
4079 }
4080 EXPORT_SYMBOL(hci_unregister_dev);
4081
4082 /* Suspend HCI device */
4083 int hci_suspend_dev(struct hci_dev *hdev)
4084 {
4085 hci_notify(hdev, HCI_DEV_SUSPEND);
4086 return 0;
4087 }
4088 EXPORT_SYMBOL(hci_suspend_dev);
4089
4090 /* Resume HCI device */
4091 int hci_resume_dev(struct hci_dev *hdev)
4092 {
4093 hci_notify(hdev, HCI_DEV_RESUME);
4094 return 0;
4095 }
4096 EXPORT_SYMBOL(hci_resume_dev);
4097
4098 /* Receive frame from HCI drivers */
4099 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4100 {
4101 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4102 && !test_bit(HCI_INIT, &hdev->flags))) {
4103 kfree_skb(skb);
4104 return -ENXIO;
4105 }
4106
4107 /* Incoming skb */
4108 bt_cb(skb)->incoming = 1;
4109
4110 /* Time stamp */
4111 __net_timestamp(skb);
4112
4113 skb_queue_tail(&hdev->rx_q, skb);
4114 queue_work(hdev->workqueue, &hdev->rx_work);
4115
4116 return 0;
4117 }
4118 EXPORT_SYMBOL(hci_recv_frame);
4119
4120 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4121 int count, __u8 index)
4122 {
4123 int len = 0;
4124 int hlen = 0;
4125 int remain = count;
4126 struct sk_buff *skb;
4127 struct bt_skb_cb *scb;
4128
4129 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4130 index >= NUM_REASSEMBLY)
4131 return -EILSEQ;
4132
4133 skb = hdev->reassembly[index];
4134
4135 if (!skb) {
4136 switch (type) {
4137 case HCI_ACLDATA_PKT:
4138 len = HCI_MAX_FRAME_SIZE;
4139 hlen = HCI_ACL_HDR_SIZE;
4140 break;
4141 case HCI_EVENT_PKT:
4142 len = HCI_MAX_EVENT_SIZE;
4143 hlen = HCI_EVENT_HDR_SIZE;
4144 break;
4145 case HCI_SCODATA_PKT:
4146 len = HCI_MAX_SCO_SIZE;
4147 hlen = HCI_SCO_HDR_SIZE;
4148 break;
4149 }
4150
4151 skb = bt_skb_alloc(len, GFP_ATOMIC);
4152 if (!skb)
4153 return -ENOMEM;
4154
4155 scb = (void *) skb->cb;
4156 scb->expect = hlen;
4157 scb->pkt_type = type;
4158
4159 hdev->reassembly[index] = skb;
4160 }
4161
4162 while (count) {
4163 scb = (void *) skb->cb;
4164 len = min_t(uint, scb->expect, count);
4165
4166 memcpy(skb_put(skb, len), data, len);
4167
4168 count -= len;
4169 data += len;
4170 scb->expect -= len;
4171 remain = count;
4172
4173 switch (type) {
4174 case HCI_EVENT_PKT:
4175 if (skb->len == HCI_EVENT_HDR_SIZE) {
4176 struct hci_event_hdr *h = hci_event_hdr(skb);
4177 scb->expect = h->plen;
4178
4179 if (skb_tailroom(skb) < scb->expect) {
4180 kfree_skb(skb);
4181 hdev->reassembly[index] = NULL;
4182 return -ENOMEM;
4183 }
4184 }
4185 break;
4186
4187 case HCI_ACLDATA_PKT:
4188 if (skb->len == HCI_ACL_HDR_SIZE) {
4189 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4190 scb->expect = __le16_to_cpu(h->dlen);
4191
4192 if (skb_tailroom(skb) < scb->expect) {
4193 kfree_skb(skb);
4194 hdev->reassembly[index] = NULL;
4195 return -ENOMEM;
4196 }
4197 }
4198 break;
4199
4200 case HCI_SCODATA_PKT:
4201 if (skb->len == HCI_SCO_HDR_SIZE) {
4202 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4203 scb->expect = h->dlen;
4204
4205 if (skb_tailroom(skb) < scb->expect) {
4206 kfree_skb(skb);
4207 hdev->reassembly[index] = NULL;
4208 return -ENOMEM;
4209 }
4210 }
4211 break;
4212 }
4213
4214 if (scb->expect == 0) {
4215 /* Complete frame */
4216
4217 bt_cb(skb)->pkt_type = type;
4218 hci_recv_frame(hdev, skb);
4219
4220 hdev->reassembly[index] = NULL;
4221 return remain;
4222 }
4223 }
4224
4225 return remain;
4226 }
4227
4228 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4229 {
4230 int rem = 0;
4231
4232 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4233 return -EILSEQ;
4234
4235 while (count) {
4236 rem = hci_reassembly(hdev, type, data, count, type - 1);
4237 if (rem < 0)
4238 return rem;
4239
4240 data += (count - rem);
4241 count = rem;
4242 }
4243
4244 return rem;
4245 }
4246 EXPORT_SYMBOL(hci_recv_fragment);
4247
4248 #define STREAM_REASSEMBLY 0
4249
4250 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4251 {
4252 int type;
4253 int rem = 0;
4254
4255 while (count) {
4256 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4257
4258 if (!skb) {
4259 struct { char type; } *pkt;
4260
4261 /* Start of the frame */
4262 pkt = data;
4263 type = pkt->type;
4264
4265 data++;
4266 count--;
4267 } else
4268 type = bt_cb(skb)->pkt_type;
4269
4270 rem = hci_reassembly(hdev, type, data, count,
4271 STREAM_REASSEMBLY);
4272 if (rem < 0)
4273 return rem;
4274
4275 data += (count - rem);
4276 count = rem;
4277 }
4278
4279 return rem;
4280 }
4281 EXPORT_SYMBOL(hci_recv_stream_fragment);
4282
4283 /* ---- Interface to upper protocols ---- */
4284
4285 int hci_register_cb(struct hci_cb *cb)
4286 {
4287 BT_DBG("%p name %s", cb, cb->name);
4288
4289 write_lock(&hci_cb_list_lock);
4290 list_add(&cb->list, &hci_cb_list);
4291 write_unlock(&hci_cb_list_lock);
4292
4293 return 0;
4294 }
4295 EXPORT_SYMBOL(hci_register_cb);
4296
4297 int hci_unregister_cb(struct hci_cb *cb)
4298 {
4299 BT_DBG("%p name %s", cb, cb->name);
4300
4301 write_lock(&hci_cb_list_lock);
4302 list_del(&cb->list);
4303 write_unlock(&hci_cb_list_lock);
4304
4305 return 0;
4306 }
4307 EXPORT_SYMBOL(hci_unregister_cb);
4308
4309 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4310 {
4311 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4312
4313 /* Time stamp */
4314 __net_timestamp(skb);
4315
4316 /* Send copy to monitor */
4317 hci_send_to_monitor(hdev, skb);
4318
4319 if (atomic_read(&hdev->promisc)) {
4320 /* Send copy to the sockets */
4321 hci_send_to_sock(hdev, skb);
4322 }
4323
4324 /* Get rid of skb owner, prior to sending to the driver. */
4325 skb_orphan(skb);
4326
4327 if (hdev->send(hdev, skb) < 0)
4328 BT_ERR("%s sending frame failed", hdev->name);
4329 }
4330
4331 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4332 {
4333 skb_queue_head_init(&req->cmd_q);
4334 req->hdev = hdev;
4335 req->err = 0;
4336 }
4337
4338 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4339 {
4340 struct hci_dev *hdev = req->hdev;
4341 struct sk_buff *skb;
4342 unsigned long flags;
4343
4344 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4345
4346 /* If an error occured during request building, remove all HCI
4347 * commands queued on the HCI request queue.
4348 */
4349 if (req->err) {
4350 skb_queue_purge(&req->cmd_q);
4351 return req->err;
4352 }
4353
4354 /* Do not allow empty requests */
4355 if (skb_queue_empty(&req->cmd_q))
4356 return -ENODATA;
4357
4358 skb = skb_peek_tail(&req->cmd_q);
4359 bt_cb(skb)->req.complete = complete;
4360
4361 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4362 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4363 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4364
4365 queue_work(hdev->workqueue, &hdev->cmd_work);
4366
4367 return 0;
4368 }
4369
4370 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4371 u32 plen, const void *param)
4372 {
4373 int len = HCI_COMMAND_HDR_SIZE + plen;
4374 struct hci_command_hdr *hdr;
4375 struct sk_buff *skb;
4376
4377 skb = bt_skb_alloc(len, GFP_ATOMIC);
4378 if (!skb)
4379 return NULL;
4380
4381 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4382 hdr->opcode = cpu_to_le16(opcode);
4383 hdr->plen = plen;
4384
4385 if (plen)
4386 memcpy(skb_put(skb, plen), param, plen);
4387
4388 BT_DBG("skb len %d", skb->len);
4389
4390 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4391
4392 return skb;
4393 }
4394
4395 /* Send HCI command */
4396 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4397 const void *param)
4398 {
4399 struct sk_buff *skb;
4400
4401 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4402
4403 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4404 if (!skb) {
4405 BT_ERR("%s no memory for command", hdev->name);
4406 return -ENOMEM;
4407 }
4408
4409 /* Stand-alone HCI commands must be flaged as
4410 * single-command requests.
4411 */
4412 bt_cb(skb)->req.start = true;
4413
4414 skb_queue_tail(&hdev->cmd_q, skb);
4415 queue_work(hdev->workqueue, &hdev->cmd_work);
4416
4417 return 0;
4418 }
4419
4420 /* Queue a command to an asynchronous HCI request */
4421 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4422 const void *param, u8 event)
4423 {
4424 struct hci_dev *hdev = req->hdev;
4425 struct sk_buff *skb;
4426
4427 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4428
4429 /* If an error occured during request building, there is no point in
4430 * queueing the HCI command. We can simply return.
4431 */
4432 if (req->err)
4433 return;
4434
4435 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4436 if (!skb) {
4437 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4438 hdev->name, opcode);
4439 req->err = -ENOMEM;
4440 return;
4441 }
4442
4443 if (skb_queue_empty(&req->cmd_q))
4444 bt_cb(skb)->req.start = true;
4445
4446 bt_cb(skb)->req.event = event;
4447
4448 skb_queue_tail(&req->cmd_q, skb);
4449 }
4450
4451 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4452 const void *param)
4453 {
4454 hci_req_add_ev(req, opcode, plen, param, 0);
4455 }
4456
4457 /* Get data from the previously sent command */
4458 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4459 {
4460 struct hci_command_hdr *hdr;
4461
4462 if (!hdev->sent_cmd)
4463 return NULL;
4464
4465 hdr = (void *) hdev->sent_cmd->data;
4466
4467 if (hdr->opcode != cpu_to_le16(opcode))
4468 return NULL;
4469
4470 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4471
4472 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4473 }
4474
4475 /* Send ACL data */
4476 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4477 {
4478 struct hci_acl_hdr *hdr;
4479 int len = skb->len;
4480
4481 skb_push(skb, HCI_ACL_HDR_SIZE);
4482 skb_reset_transport_header(skb);
4483 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4484 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4485 hdr->dlen = cpu_to_le16(len);
4486 }
4487
4488 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4489 struct sk_buff *skb, __u16 flags)
4490 {
4491 struct hci_conn *conn = chan->conn;
4492 struct hci_dev *hdev = conn->hdev;
4493 struct sk_buff *list;
4494
4495 skb->len = skb_headlen(skb);
4496 skb->data_len = 0;
4497
4498 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4499
4500 switch (hdev->dev_type) {
4501 case HCI_BREDR:
4502 hci_add_acl_hdr(skb, conn->handle, flags);
4503 break;
4504 case HCI_AMP:
4505 hci_add_acl_hdr(skb, chan->handle, flags);
4506 break;
4507 default:
4508 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4509 return;
4510 }
4511
4512 list = skb_shinfo(skb)->frag_list;
4513 if (!list) {
4514 /* Non fragmented */
4515 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4516
4517 skb_queue_tail(queue, skb);
4518 } else {
4519 /* Fragmented */
4520 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4521
4522 skb_shinfo(skb)->frag_list = NULL;
4523
4524 /* Queue all fragments atomically */
4525 spin_lock(&queue->lock);
4526
4527 __skb_queue_tail(queue, skb);
4528
4529 flags &= ~ACL_START;
4530 flags |= ACL_CONT;
4531 do {
4532 skb = list; list = list->next;
4533
4534 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4535 hci_add_acl_hdr(skb, conn->handle, flags);
4536
4537 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4538
4539 __skb_queue_tail(queue, skb);
4540 } while (list);
4541
4542 spin_unlock(&queue->lock);
4543 }
4544 }
4545
4546 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4547 {
4548 struct hci_dev *hdev = chan->conn->hdev;
4549
4550 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4551
4552 hci_queue_acl(chan, &chan->data_q, skb, flags);
4553
4554 queue_work(hdev->workqueue, &hdev->tx_work);
4555 }
4556
4557 /* Send SCO data */
4558 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4559 {
4560 struct hci_dev *hdev = conn->hdev;
4561 struct hci_sco_hdr hdr;
4562
4563 BT_DBG("%s len %d", hdev->name, skb->len);
4564
4565 hdr.handle = cpu_to_le16(conn->handle);
4566 hdr.dlen = skb->len;
4567
4568 skb_push(skb, HCI_SCO_HDR_SIZE);
4569 skb_reset_transport_header(skb);
4570 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4571
4572 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4573
4574 skb_queue_tail(&conn->data_q, skb);
4575 queue_work(hdev->workqueue, &hdev->tx_work);
4576 }
4577
4578 /* ---- HCI TX task (outgoing data) ---- */
4579
4580 /* HCI Connection scheduler */
4581 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4582 int *quote)
4583 {
4584 struct hci_conn_hash *h = &hdev->conn_hash;
4585 struct hci_conn *conn = NULL, *c;
4586 unsigned int num = 0, min = ~0;
4587
4588 /* We don't have to lock device here. Connections are always
4589 * added and removed with TX task disabled. */
4590
4591 rcu_read_lock();
4592
4593 list_for_each_entry_rcu(c, &h->list, list) {
4594 if (c->type != type || skb_queue_empty(&c->data_q))
4595 continue;
4596
4597 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4598 continue;
4599
4600 num++;
4601
4602 if (c->sent < min) {
4603 min = c->sent;
4604 conn = c;
4605 }
4606
4607 if (hci_conn_num(hdev, type) == num)
4608 break;
4609 }
4610
4611 rcu_read_unlock();
4612
4613 if (conn) {
4614 int cnt, q;
4615
4616 switch (conn->type) {
4617 case ACL_LINK:
4618 cnt = hdev->acl_cnt;
4619 break;
4620 case SCO_LINK:
4621 case ESCO_LINK:
4622 cnt = hdev->sco_cnt;
4623 break;
4624 case LE_LINK:
4625 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4626 break;
4627 default:
4628 cnt = 0;
4629 BT_ERR("Unknown link type");
4630 }
4631
4632 q = cnt / num;
4633 *quote = q ? q : 1;
4634 } else
4635 *quote = 0;
4636
4637 BT_DBG("conn %p quote %d", conn, *quote);
4638 return conn;
4639 }
4640
4641 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4642 {
4643 struct hci_conn_hash *h = &hdev->conn_hash;
4644 struct hci_conn *c;
4645
4646 BT_ERR("%s link tx timeout", hdev->name);
4647
4648 rcu_read_lock();
4649
4650 /* Kill stalled connections */
4651 list_for_each_entry_rcu(c, &h->list, list) {
4652 if (c->type == type && c->sent) {
4653 BT_ERR("%s killing stalled connection %pMR",
4654 hdev->name, &c->dst);
4655 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4656 }
4657 }
4658
4659 rcu_read_unlock();
4660 }
4661
4662 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4663 int *quote)
4664 {
4665 struct hci_conn_hash *h = &hdev->conn_hash;
4666 struct hci_chan *chan = NULL;
4667 unsigned int num = 0, min = ~0, cur_prio = 0;
4668 struct hci_conn *conn;
4669 int cnt, q, conn_num = 0;
4670
4671 BT_DBG("%s", hdev->name);
4672
4673 rcu_read_lock();
4674
4675 list_for_each_entry_rcu(conn, &h->list, list) {
4676 struct hci_chan *tmp;
4677
4678 if (conn->type != type)
4679 continue;
4680
4681 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4682 continue;
4683
4684 conn_num++;
4685
4686 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4687 struct sk_buff *skb;
4688
4689 if (skb_queue_empty(&tmp->data_q))
4690 continue;
4691
4692 skb = skb_peek(&tmp->data_q);
4693 if (skb->priority < cur_prio)
4694 continue;
4695
4696 if (skb->priority > cur_prio) {
4697 num = 0;
4698 min = ~0;
4699 cur_prio = skb->priority;
4700 }
4701
4702 num++;
4703
4704 if (conn->sent < min) {
4705 min = conn->sent;
4706 chan = tmp;
4707 }
4708 }
4709
4710 if (hci_conn_num(hdev, type) == conn_num)
4711 break;
4712 }
4713
4714 rcu_read_unlock();
4715
4716 if (!chan)
4717 return NULL;
4718
4719 switch (chan->conn->type) {
4720 case ACL_LINK:
4721 cnt = hdev->acl_cnt;
4722 break;
4723 case AMP_LINK:
4724 cnt = hdev->block_cnt;
4725 break;
4726 case SCO_LINK:
4727 case ESCO_LINK:
4728 cnt = hdev->sco_cnt;
4729 break;
4730 case LE_LINK:
4731 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4732 break;
4733 default:
4734 cnt = 0;
4735 BT_ERR("Unknown link type");
4736 }
4737
4738 q = cnt / num;
4739 *quote = q ? q : 1;
4740 BT_DBG("chan %p quote %d", chan, *quote);
4741 return chan;
4742 }
4743
4744 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4745 {
4746 struct hci_conn_hash *h = &hdev->conn_hash;
4747 struct hci_conn *conn;
4748 int num = 0;
4749
4750 BT_DBG("%s", hdev->name);
4751
4752 rcu_read_lock();
4753
4754 list_for_each_entry_rcu(conn, &h->list, list) {
4755 struct hci_chan *chan;
4756
4757 if (conn->type != type)
4758 continue;
4759
4760 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4761 continue;
4762
4763 num++;
4764
4765 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4766 struct sk_buff *skb;
4767
4768 if (chan->sent) {
4769 chan->sent = 0;
4770 continue;
4771 }
4772
4773 if (skb_queue_empty(&chan->data_q))
4774 continue;
4775
4776 skb = skb_peek(&chan->data_q);
4777 if (skb->priority >= HCI_PRIO_MAX - 1)
4778 continue;
4779
4780 skb->priority = HCI_PRIO_MAX - 1;
4781
4782 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4783 skb->priority);
4784 }
4785
4786 if (hci_conn_num(hdev, type) == num)
4787 break;
4788 }
4789
4790 rcu_read_unlock();
4791
4792 }
4793
4794 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4795 {
4796 /* Calculate count of blocks used by this packet */
4797 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4798 }
4799
4800 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4801 {
4802 if (!test_bit(HCI_RAW, &hdev->flags)) {
4803 /* ACL tx timeout must be longer than maximum
4804 * link supervision timeout (40.9 seconds) */
4805 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4806 HCI_ACL_TX_TIMEOUT))
4807 hci_link_tx_to(hdev, ACL_LINK);
4808 }
4809 }
4810
4811 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4812 {
4813 unsigned int cnt = hdev->acl_cnt;
4814 struct hci_chan *chan;
4815 struct sk_buff *skb;
4816 int quote;
4817
4818 __check_timeout(hdev, cnt);
4819
4820 while (hdev->acl_cnt &&
4821 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4822 u32 priority = (skb_peek(&chan->data_q))->priority;
4823 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4824 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4825 skb->len, skb->priority);
4826
4827 /* Stop if priority has changed */
4828 if (skb->priority < priority)
4829 break;
4830
4831 skb = skb_dequeue(&chan->data_q);
4832
4833 hci_conn_enter_active_mode(chan->conn,
4834 bt_cb(skb)->force_active);
4835
4836 hci_send_frame(hdev, skb);
4837 hdev->acl_last_tx = jiffies;
4838
4839 hdev->acl_cnt--;
4840 chan->sent++;
4841 chan->conn->sent++;
4842 }
4843 }
4844
4845 if (cnt != hdev->acl_cnt)
4846 hci_prio_recalculate(hdev, ACL_LINK);
4847 }
4848
4849 static void hci_sched_acl_blk(struct hci_dev *hdev)
4850 {
4851 unsigned int cnt = hdev->block_cnt;
4852 struct hci_chan *chan;
4853 struct sk_buff *skb;
4854 int quote;
4855 u8 type;
4856
4857 __check_timeout(hdev, cnt);
4858
4859 BT_DBG("%s", hdev->name);
4860
4861 if (hdev->dev_type == HCI_AMP)
4862 type = AMP_LINK;
4863 else
4864 type = ACL_LINK;
4865
4866 while (hdev->block_cnt > 0 &&
4867 (chan = hci_chan_sent(hdev, type, &quote))) {
4868 u32 priority = (skb_peek(&chan->data_q))->priority;
4869 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4870 int blocks;
4871
4872 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4873 skb->len, skb->priority);
4874
4875 /* Stop if priority has changed */
4876 if (skb->priority < priority)
4877 break;
4878
4879 skb = skb_dequeue(&chan->data_q);
4880
4881 blocks = __get_blocks(hdev, skb);
4882 if (blocks > hdev->block_cnt)
4883 return;
4884
4885 hci_conn_enter_active_mode(chan->conn,
4886 bt_cb(skb)->force_active);
4887
4888 hci_send_frame(hdev, skb);
4889 hdev->acl_last_tx = jiffies;
4890
4891 hdev->block_cnt -= blocks;
4892 quote -= blocks;
4893
4894 chan->sent += blocks;
4895 chan->conn->sent += blocks;
4896 }
4897 }
4898
4899 if (cnt != hdev->block_cnt)
4900 hci_prio_recalculate(hdev, type);
4901 }
4902
4903 static void hci_sched_acl(struct hci_dev *hdev)
4904 {
4905 BT_DBG("%s", hdev->name);
4906
4907 /* No ACL link over BR/EDR controller */
4908 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4909 return;
4910
4911 /* No AMP link over AMP controller */
4912 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4913 return;
4914
4915 switch (hdev->flow_ctl_mode) {
4916 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4917 hci_sched_acl_pkt(hdev);
4918 break;
4919
4920 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4921 hci_sched_acl_blk(hdev);
4922 break;
4923 }
4924 }
4925
4926 /* Schedule SCO */
4927 static void hci_sched_sco(struct hci_dev *hdev)
4928 {
4929 struct hci_conn *conn;
4930 struct sk_buff *skb;
4931 int quote;
4932
4933 BT_DBG("%s", hdev->name);
4934
4935 if (!hci_conn_num(hdev, SCO_LINK))
4936 return;
4937
4938 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4939 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4940 BT_DBG("skb %p len %d", skb, skb->len);
4941 hci_send_frame(hdev, skb);
4942
4943 conn->sent++;
4944 if (conn->sent == ~0)
4945 conn->sent = 0;
4946 }
4947 }
4948 }
4949
4950 static void hci_sched_esco(struct hci_dev *hdev)
4951 {
4952 struct hci_conn *conn;
4953 struct sk_buff *skb;
4954 int quote;
4955
4956 BT_DBG("%s", hdev->name);
4957
4958 if (!hci_conn_num(hdev, ESCO_LINK))
4959 return;
4960
4961 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4962 &quote))) {
4963 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4964 BT_DBG("skb %p len %d", skb, skb->len);
4965 hci_send_frame(hdev, skb);
4966
4967 conn->sent++;
4968 if (conn->sent == ~0)
4969 conn->sent = 0;
4970 }
4971 }
4972 }
4973
4974 static void hci_sched_le(struct hci_dev *hdev)
4975 {
4976 struct hci_chan *chan;
4977 struct sk_buff *skb;
4978 int quote, cnt, tmp;
4979
4980 BT_DBG("%s", hdev->name);
4981
4982 if (!hci_conn_num(hdev, LE_LINK))
4983 return;
4984
4985 if (!test_bit(HCI_RAW, &hdev->flags)) {
4986 /* LE tx timeout must be longer than maximum
4987 * link supervision timeout (40.9 seconds) */
4988 if (!hdev->le_cnt && hdev->le_pkts &&
4989 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4990 hci_link_tx_to(hdev, LE_LINK);
4991 }
4992
4993 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4994 tmp = cnt;
4995 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4996 u32 priority = (skb_peek(&chan->data_q))->priority;
4997 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4998 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4999 skb->len, skb->priority);
5000
5001 /* Stop if priority has changed */
5002 if (skb->priority < priority)
5003 break;
5004
5005 skb = skb_dequeue(&chan->data_q);
5006
5007 hci_send_frame(hdev, skb);
5008 hdev->le_last_tx = jiffies;
5009
5010 cnt--;
5011 chan->sent++;
5012 chan->conn->sent++;
5013 }
5014 }
5015
5016 if (hdev->le_pkts)
5017 hdev->le_cnt = cnt;
5018 else
5019 hdev->acl_cnt = cnt;
5020
5021 if (cnt != tmp)
5022 hci_prio_recalculate(hdev, LE_LINK);
5023 }
5024
5025 static void hci_tx_work(struct work_struct *work)
5026 {
5027 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5028 struct sk_buff *skb;
5029
5030 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5031 hdev->sco_cnt, hdev->le_cnt);
5032
5033 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5034 /* Schedule queues and send stuff to HCI driver */
5035 hci_sched_acl(hdev);
5036 hci_sched_sco(hdev);
5037 hci_sched_esco(hdev);
5038 hci_sched_le(hdev);
5039 }
5040
5041 /* Send next queued raw (unknown type) packet */
5042 while ((skb = skb_dequeue(&hdev->raw_q)))
5043 hci_send_frame(hdev, skb);
5044 }
5045
5046 /* ----- HCI RX task (incoming data processing) ----- */
5047
5048 /* ACL data packet */
5049 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5050 {
5051 struct hci_acl_hdr *hdr = (void *) skb->data;
5052 struct hci_conn *conn;
5053 __u16 handle, flags;
5054
5055 skb_pull(skb, HCI_ACL_HDR_SIZE);
5056
5057 handle = __le16_to_cpu(hdr->handle);
5058 flags = hci_flags(handle);
5059 handle = hci_handle(handle);
5060
5061 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5062 handle, flags);
5063
5064 hdev->stat.acl_rx++;
5065
5066 hci_dev_lock(hdev);
5067 conn = hci_conn_hash_lookup_handle(hdev, handle);
5068 hci_dev_unlock(hdev);
5069
5070 if (conn) {
5071 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5072
5073 /* Send to upper protocol */
5074 l2cap_recv_acldata(conn, skb, flags);
5075 return;
5076 } else {
5077 BT_ERR("%s ACL packet for unknown connection handle %d",
5078 hdev->name, handle);
5079 }
5080
5081 kfree_skb(skb);
5082 }
5083
5084 /* SCO data packet */
5085 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5086 {
5087 struct hci_sco_hdr *hdr = (void *) skb->data;
5088 struct hci_conn *conn;
5089 __u16 handle;
5090
5091 skb_pull(skb, HCI_SCO_HDR_SIZE);
5092
5093 handle = __le16_to_cpu(hdr->handle);
5094
5095 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5096
5097 hdev->stat.sco_rx++;
5098
5099 hci_dev_lock(hdev);
5100 conn = hci_conn_hash_lookup_handle(hdev, handle);
5101 hci_dev_unlock(hdev);
5102
5103 if (conn) {
5104 /* Send to upper protocol */
5105 sco_recv_scodata(conn, skb);
5106 return;
5107 } else {
5108 BT_ERR("%s SCO packet for unknown connection handle %d",
5109 hdev->name, handle);
5110 }
5111
5112 kfree_skb(skb);
5113 }
5114
5115 static bool hci_req_is_complete(struct hci_dev *hdev)
5116 {
5117 struct sk_buff *skb;
5118
5119 skb = skb_peek(&hdev->cmd_q);
5120 if (!skb)
5121 return true;
5122
5123 return bt_cb(skb)->req.start;
5124 }
5125
5126 static void hci_resend_last(struct hci_dev *hdev)
5127 {
5128 struct hci_command_hdr *sent;
5129 struct sk_buff *skb;
5130 u16 opcode;
5131
5132 if (!hdev->sent_cmd)
5133 return;
5134
5135 sent = (void *) hdev->sent_cmd->data;
5136 opcode = __le16_to_cpu(sent->opcode);
5137 if (opcode == HCI_OP_RESET)
5138 return;
5139
5140 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5141 if (!skb)
5142 return;
5143
5144 skb_queue_head(&hdev->cmd_q, skb);
5145 queue_work(hdev->workqueue, &hdev->cmd_work);
5146 }
5147
5148 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5149 {
5150 hci_req_complete_t req_complete = NULL;
5151 struct sk_buff *skb;
5152 unsigned long flags;
5153
5154 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5155
5156 /* If the completed command doesn't match the last one that was
5157 * sent we need to do special handling of it.
5158 */
5159 if (!hci_sent_cmd_data(hdev, opcode)) {
5160 /* Some CSR based controllers generate a spontaneous
5161 * reset complete event during init and any pending
5162 * command will never be completed. In such a case we
5163 * need to resend whatever was the last sent
5164 * command.
5165 */
5166 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5167 hci_resend_last(hdev);
5168
5169 return;
5170 }
5171
5172 /* If the command succeeded and there's still more commands in
5173 * this request the request is not yet complete.
5174 */
5175 if (!status && !hci_req_is_complete(hdev))
5176 return;
5177
5178 /* If this was the last command in a request the complete
5179 * callback would be found in hdev->sent_cmd instead of the
5180 * command queue (hdev->cmd_q).
5181 */
5182 if (hdev->sent_cmd) {
5183 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5184
5185 if (req_complete) {
5186 /* We must set the complete callback to NULL to
5187 * avoid calling the callback more than once if
5188 * this function gets called again.
5189 */
5190 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5191
5192 goto call_complete;
5193 }
5194 }
5195
5196 /* Remove all pending commands belonging to this request */
5197 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5198 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5199 if (bt_cb(skb)->req.start) {
5200 __skb_queue_head(&hdev->cmd_q, skb);
5201 break;
5202 }
5203
5204 req_complete = bt_cb(skb)->req.complete;
5205 kfree_skb(skb);
5206 }
5207 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5208
5209 call_complete:
5210 if (req_complete)
5211 req_complete(hdev, status);
5212 }
5213
5214 static void hci_rx_work(struct work_struct *work)
5215 {
5216 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5217 struct sk_buff *skb;
5218
5219 BT_DBG("%s", hdev->name);
5220
5221 while ((skb = skb_dequeue(&hdev->rx_q))) {
5222 /* Send copy to monitor */
5223 hci_send_to_monitor(hdev, skb);
5224
5225 if (atomic_read(&hdev->promisc)) {
5226 /* Send copy to the sockets */
5227 hci_send_to_sock(hdev, skb);
5228 }
5229
5230 if (test_bit(HCI_RAW, &hdev->flags) ||
5231 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5232 kfree_skb(skb);
5233 continue;
5234 }
5235
5236 if (test_bit(HCI_INIT, &hdev->flags)) {
5237 /* Don't process data packets in this states. */
5238 switch (bt_cb(skb)->pkt_type) {
5239 case HCI_ACLDATA_PKT:
5240 case HCI_SCODATA_PKT:
5241 kfree_skb(skb);
5242 continue;
5243 }
5244 }
5245
5246 /* Process frame */
5247 switch (bt_cb(skb)->pkt_type) {
5248 case HCI_EVENT_PKT:
5249 BT_DBG("%s Event packet", hdev->name);
5250 hci_event_packet(hdev, skb);
5251 break;
5252
5253 case HCI_ACLDATA_PKT:
5254 BT_DBG("%s ACL data packet", hdev->name);
5255 hci_acldata_packet(hdev, skb);
5256 break;
5257
5258 case HCI_SCODATA_PKT:
5259 BT_DBG("%s SCO data packet", hdev->name);
5260 hci_scodata_packet(hdev, skb);
5261 break;
5262
5263 default:
5264 kfree_skb(skb);
5265 break;
5266 }
5267 }
5268 }
5269
5270 static void hci_cmd_work(struct work_struct *work)
5271 {
5272 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5273 struct sk_buff *skb;
5274
5275 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5276 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5277
5278 /* Send queued commands */
5279 if (atomic_read(&hdev->cmd_cnt)) {
5280 skb = skb_dequeue(&hdev->cmd_q);
5281 if (!skb)
5282 return;
5283
5284 kfree_skb(hdev->sent_cmd);
5285
5286 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5287 if (hdev->sent_cmd) {
5288 atomic_dec(&hdev->cmd_cnt);
5289 hci_send_frame(hdev, skb);
5290 if (test_bit(HCI_RESET, &hdev->flags))
5291 cancel_delayed_work(&hdev->cmd_timer);
5292 else
5293 schedule_delayed_work(&hdev->cmd_timer,
5294 HCI_CMD_TIMEOUT);
5295 } else {
5296 skb_queue_head(&hdev->cmd_q, skb);
5297 queue_work(hdev->workqueue, &hdev->cmd_work);
5298 }
5299 }
5300 }
5301
5302 void hci_req_add_le_scan_disable(struct hci_request *req)
5303 {
5304 struct hci_cp_le_set_scan_enable cp;
5305
5306 memset(&cp, 0, sizeof(cp));
5307 cp.enable = LE_SCAN_DISABLE;
5308 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5309 }
5310
5311 void hci_req_add_le_passive_scan(struct hci_request *req)
5312 {
5313 struct hci_cp_le_set_scan_param param_cp;
5314 struct hci_cp_le_set_scan_enable enable_cp;
5315 struct hci_dev *hdev = req->hdev;
5316 u8 own_addr_type;
5317
5318 /* Set require_privacy to true to avoid identification from
5319 * unknown peer devices. Since this is passive scanning, no
5320 * SCAN_REQ using the local identity should be sent. Mandating
5321 * privacy is just an extra precaution.
5322 */
5323 if (hci_update_random_address(req, true, &own_addr_type))
5324 return;
5325
5326 memset(&param_cp, 0, sizeof(param_cp));
5327 param_cp.type = LE_SCAN_PASSIVE;
5328 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5329 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5330 param_cp.own_address_type = own_addr_type;
5331 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5332 &param_cp);
5333
5334 memset(&enable_cp, 0, sizeof(enable_cp));
5335 enable_cp.enable = LE_SCAN_ENABLE;
5336 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5337 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5338 &enable_cp);
5339 }
5340
5341 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5342 {
5343 if (status)
5344 BT_DBG("HCI request failed to update background scanning: "
5345 "status 0x%2.2x", status);
5346 }
5347
5348 /* This function controls the background scanning based on hdev->pend_le_conns
5349 * list. If there are pending LE connection we start the background scanning,
5350 * otherwise we stop it.
5351 *
5352 * This function requires the caller holds hdev->lock.
5353 */
5354 void hci_update_background_scan(struct hci_dev *hdev)
5355 {
5356 struct hci_request req;
5357 struct hci_conn *conn;
5358 int err;
5359
5360 hci_req_init(&req, hdev);
5361
5362 if (list_empty(&hdev->pend_le_conns)) {
5363 /* If there is no pending LE connections, we should stop
5364 * the background scanning.
5365 */
5366
5367 /* If controller is not scanning we are done. */
5368 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5369 return;
5370
5371 hci_req_add_le_scan_disable(&req);
5372
5373 BT_DBG("%s stopping background scanning", hdev->name);
5374 } else {
5375 /* If there is at least one pending LE connection, we should
5376 * keep the background scan running.
5377 */
5378
5379 /* If controller is connecting, we should not start scanning
5380 * since some controllers are not able to scan and connect at
5381 * the same time.
5382 */
5383 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5384 if (conn)
5385 return;
5386
5387 /* If controller is currently scanning, we stop it to ensure we
5388 * don't miss any advertising (due to duplicates filter).
5389 */
5390 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5391 hci_req_add_le_scan_disable(&req);
5392
5393 hci_req_add_le_passive_scan(&req);
5394
5395 BT_DBG("%s starting background scanning", hdev->name);
5396 }
5397
5398 err = hci_req_run(&req, update_background_scan_complete);
5399 if (err)
5400 BT_ERR("Failed to run HCI request: err %d", err);
5401 }
This page took 0.163498 seconds and 5 git commands to generate.