Bluetooth: Add quirk for external configuration requirement
[deliverable/linux.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ---- HCI notifications ---- */
58
59 static void hci_notify(struct hci_dev *hdev, int event)
60 {
61 hci_sock_dev_event(hdev, event);
62 }
63
64 /* ---- HCI debugfs entries ---- */
65
66 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
67 size_t count, loff_t *ppos)
68 {
69 struct hci_dev *hdev = file->private_data;
70 char buf[3];
71
72 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
73 buf[1] = '\n';
74 buf[2] = '\0';
75 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
76 }
77
78 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
79 size_t count, loff_t *ppos)
80 {
81 struct hci_dev *hdev = file->private_data;
82 struct sk_buff *skb;
83 char buf[32];
84 size_t buf_size = min(count, (sizeof(buf)-1));
85 bool enable;
86 int err;
87
88 if (!test_bit(HCI_UP, &hdev->flags))
89 return -ENETDOWN;
90
91 if (copy_from_user(buf, user_buf, buf_size))
92 return -EFAULT;
93
94 buf[buf_size] = '\0';
95 if (strtobool(buf, &enable))
96 return -EINVAL;
97
98 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
99 return -EALREADY;
100
101 hci_req_lock(hdev);
102 if (enable)
103 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
104 HCI_CMD_TIMEOUT);
105 else
106 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
107 HCI_CMD_TIMEOUT);
108 hci_req_unlock(hdev);
109
110 if (IS_ERR(skb))
111 return PTR_ERR(skb);
112
113 err = -bt_to_errno(skb->data[0]);
114 kfree_skb(skb);
115
116 if (err < 0)
117 return err;
118
119 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
120
121 return count;
122 }
123
124 static const struct file_operations dut_mode_fops = {
125 .open = simple_open,
126 .read = dut_mode_read,
127 .write = dut_mode_write,
128 .llseek = default_llseek,
129 };
130
131 static int features_show(struct seq_file *f, void *ptr)
132 {
133 struct hci_dev *hdev = f->private;
134 u8 p;
135
136 hci_dev_lock(hdev);
137 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
138 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
139 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
140 hdev->features[p][0], hdev->features[p][1],
141 hdev->features[p][2], hdev->features[p][3],
142 hdev->features[p][4], hdev->features[p][5],
143 hdev->features[p][6], hdev->features[p][7]);
144 }
145 if (lmp_le_capable(hdev))
146 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
147 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
148 hdev->le_features[0], hdev->le_features[1],
149 hdev->le_features[2], hdev->le_features[3],
150 hdev->le_features[4], hdev->le_features[5],
151 hdev->le_features[6], hdev->le_features[7]);
152 hci_dev_unlock(hdev);
153
154 return 0;
155 }
156
157 static int features_open(struct inode *inode, struct file *file)
158 {
159 return single_open(file, features_show, inode->i_private);
160 }
161
162 static const struct file_operations features_fops = {
163 .open = features_open,
164 .read = seq_read,
165 .llseek = seq_lseek,
166 .release = single_release,
167 };
168
169 static int blacklist_show(struct seq_file *f, void *p)
170 {
171 struct hci_dev *hdev = f->private;
172 struct bdaddr_list *b;
173
174 hci_dev_lock(hdev);
175 list_for_each_entry(b, &hdev->blacklist, list)
176 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
177 hci_dev_unlock(hdev);
178
179 return 0;
180 }
181
182 static int blacklist_open(struct inode *inode, struct file *file)
183 {
184 return single_open(file, blacklist_show, inode->i_private);
185 }
186
187 static const struct file_operations blacklist_fops = {
188 .open = blacklist_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = single_release,
192 };
193
194 static int uuids_show(struct seq_file *f, void *p)
195 {
196 struct hci_dev *hdev = f->private;
197 struct bt_uuid *uuid;
198
199 hci_dev_lock(hdev);
200 list_for_each_entry(uuid, &hdev->uuids, list) {
201 u8 i, val[16];
202
203 /* The Bluetooth UUID values are stored in big endian,
204 * but with reversed byte order. So convert them into
205 * the right order for the %pUb modifier.
206 */
207 for (i = 0; i < 16; i++)
208 val[i] = uuid->uuid[15 - i];
209
210 seq_printf(f, "%pUb\n", val);
211 }
212 hci_dev_unlock(hdev);
213
214 return 0;
215 }
216
217 static int uuids_open(struct inode *inode, struct file *file)
218 {
219 return single_open(file, uuids_show, inode->i_private);
220 }
221
222 static const struct file_operations uuids_fops = {
223 .open = uuids_open,
224 .read = seq_read,
225 .llseek = seq_lseek,
226 .release = single_release,
227 };
228
229 static int inquiry_cache_show(struct seq_file *f, void *p)
230 {
231 struct hci_dev *hdev = f->private;
232 struct discovery_state *cache = &hdev->discovery;
233 struct inquiry_entry *e;
234
235 hci_dev_lock(hdev);
236
237 list_for_each_entry(e, &cache->all, all) {
238 struct inquiry_data *data = &e->data;
239 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
240 &data->bdaddr,
241 data->pscan_rep_mode, data->pscan_period_mode,
242 data->pscan_mode, data->dev_class[2],
243 data->dev_class[1], data->dev_class[0],
244 __le16_to_cpu(data->clock_offset),
245 data->rssi, data->ssp_mode, e->timestamp);
246 }
247
248 hci_dev_unlock(hdev);
249
250 return 0;
251 }
252
253 static int inquiry_cache_open(struct inode *inode, struct file *file)
254 {
255 return single_open(file, inquiry_cache_show, inode->i_private);
256 }
257
258 static const struct file_operations inquiry_cache_fops = {
259 .open = inquiry_cache_open,
260 .read = seq_read,
261 .llseek = seq_lseek,
262 .release = single_release,
263 };
264
265 static int link_keys_show(struct seq_file *f, void *ptr)
266 {
267 struct hci_dev *hdev = f->private;
268 struct list_head *p, *n;
269
270 hci_dev_lock(hdev);
271 list_for_each_safe(p, n, &hdev->link_keys) {
272 struct link_key *key = list_entry(p, struct link_key, list);
273 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
274 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
275 }
276 hci_dev_unlock(hdev);
277
278 return 0;
279 }
280
281 static int link_keys_open(struct inode *inode, struct file *file)
282 {
283 return single_open(file, link_keys_show, inode->i_private);
284 }
285
286 static const struct file_operations link_keys_fops = {
287 .open = link_keys_open,
288 .read = seq_read,
289 .llseek = seq_lseek,
290 .release = single_release,
291 };
292
293 static int dev_class_show(struct seq_file *f, void *ptr)
294 {
295 struct hci_dev *hdev = f->private;
296
297 hci_dev_lock(hdev);
298 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
299 hdev->dev_class[1], hdev->dev_class[0]);
300 hci_dev_unlock(hdev);
301
302 return 0;
303 }
304
305 static int dev_class_open(struct inode *inode, struct file *file)
306 {
307 return single_open(file, dev_class_show, inode->i_private);
308 }
309
310 static const struct file_operations dev_class_fops = {
311 .open = dev_class_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315 };
316
317 static int voice_setting_get(void *data, u64 *val)
318 {
319 struct hci_dev *hdev = data;
320
321 hci_dev_lock(hdev);
322 *val = hdev->voice_setting;
323 hci_dev_unlock(hdev);
324
325 return 0;
326 }
327
328 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
329 NULL, "0x%4.4llx\n");
330
331 static int auto_accept_delay_set(void *data, u64 val)
332 {
333 struct hci_dev *hdev = data;
334
335 hci_dev_lock(hdev);
336 hdev->auto_accept_delay = val;
337 hci_dev_unlock(hdev);
338
339 return 0;
340 }
341
342 static int auto_accept_delay_get(void *data, u64 *val)
343 {
344 struct hci_dev *hdev = data;
345
346 hci_dev_lock(hdev);
347 *val = hdev->auto_accept_delay;
348 hci_dev_unlock(hdev);
349
350 return 0;
351 }
352
353 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
354 auto_accept_delay_set, "%llu\n");
355
356 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
357 size_t count, loff_t *ppos)
358 {
359 struct hci_dev *hdev = file->private_data;
360 char buf[3];
361
362 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
363 buf[1] = '\n';
364 buf[2] = '\0';
365 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
366 }
367
368 static ssize_t force_sc_support_write(struct file *file,
369 const char __user *user_buf,
370 size_t count, loff_t *ppos)
371 {
372 struct hci_dev *hdev = file->private_data;
373 char buf[32];
374 size_t buf_size = min(count, (sizeof(buf)-1));
375 bool enable;
376
377 if (test_bit(HCI_UP, &hdev->flags))
378 return -EBUSY;
379
380 if (copy_from_user(buf, user_buf, buf_size))
381 return -EFAULT;
382
383 buf[buf_size] = '\0';
384 if (strtobool(buf, &enable))
385 return -EINVAL;
386
387 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
388 return -EALREADY;
389
390 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
391
392 return count;
393 }
394
395 static const struct file_operations force_sc_support_fops = {
396 .open = simple_open,
397 .read = force_sc_support_read,
398 .write = force_sc_support_write,
399 .llseek = default_llseek,
400 };
401
402 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
403 size_t count, loff_t *ppos)
404 {
405 struct hci_dev *hdev = file->private_data;
406 char buf[3];
407
408 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
409 buf[1] = '\n';
410 buf[2] = '\0';
411 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
412 }
413
414 static const struct file_operations sc_only_mode_fops = {
415 .open = simple_open,
416 .read = sc_only_mode_read,
417 .llseek = default_llseek,
418 };
419
420 static int idle_timeout_set(void *data, u64 val)
421 {
422 struct hci_dev *hdev = data;
423
424 if (val != 0 && (val < 500 || val > 3600000))
425 return -EINVAL;
426
427 hci_dev_lock(hdev);
428 hdev->idle_timeout = val;
429 hci_dev_unlock(hdev);
430
431 return 0;
432 }
433
434 static int idle_timeout_get(void *data, u64 *val)
435 {
436 struct hci_dev *hdev = data;
437
438 hci_dev_lock(hdev);
439 *val = hdev->idle_timeout;
440 hci_dev_unlock(hdev);
441
442 return 0;
443 }
444
445 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
446 idle_timeout_set, "%llu\n");
447
448 static int rpa_timeout_set(void *data, u64 val)
449 {
450 struct hci_dev *hdev = data;
451
452 /* Require the RPA timeout to be at least 30 seconds and at most
453 * 24 hours.
454 */
455 if (val < 30 || val > (60 * 60 * 24))
456 return -EINVAL;
457
458 hci_dev_lock(hdev);
459 hdev->rpa_timeout = val;
460 hci_dev_unlock(hdev);
461
462 return 0;
463 }
464
465 static int rpa_timeout_get(void *data, u64 *val)
466 {
467 struct hci_dev *hdev = data;
468
469 hci_dev_lock(hdev);
470 *val = hdev->rpa_timeout;
471 hci_dev_unlock(hdev);
472
473 return 0;
474 }
475
476 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
477 rpa_timeout_set, "%llu\n");
478
479 static int sniff_min_interval_set(void *data, u64 val)
480 {
481 struct hci_dev *hdev = data;
482
483 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
484 return -EINVAL;
485
486 hci_dev_lock(hdev);
487 hdev->sniff_min_interval = val;
488 hci_dev_unlock(hdev);
489
490 return 0;
491 }
492
493 static int sniff_min_interval_get(void *data, u64 *val)
494 {
495 struct hci_dev *hdev = data;
496
497 hci_dev_lock(hdev);
498 *val = hdev->sniff_min_interval;
499 hci_dev_unlock(hdev);
500
501 return 0;
502 }
503
504 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
505 sniff_min_interval_set, "%llu\n");
506
507 static int sniff_max_interval_set(void *data, u64 val)
508 {
509 struct hci_dev *hdev = data;
510
511 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
512 return -EINVAL;
513
514 hci_dev_lock(hdev);
515 hdev->sniff_max_interval = val;
516 hci_dev_unlock(hdev);
517
518 return 0;
519 }
520
521 static int sniff_max_interval_get(void *data, u64 *val)
522 {
523 struct hci_dev *hdev = data;
524
525 hci_dev_lock(hdev);
526 *val = hdev->sniff_max_interval;
527 hci_dev_unlock(hdev);
528
529 return 0;
530 }
531
532 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
533 sniff_max_interval_set, "%llu\n");
534
535 static int conn_info_min_age_set(void *data, u64 val)
536 {
537 struct hci_dev *hdev = data;
538
539 if (val == 0 || val > hdev->conn_info_max_age)
540 return -EINVAL;
541
542 hci_dev_lock(hdev);
543 hdev->conn_info_min_age = val;
544 hci_dev_unlock(hdev);
545
546 return 0;
547 }
548
549 static int conn_info_min_age_get(void *data, u64 *val)
550 {
551 struct hci_dev *hdev = data;
552
553 hci_dev_lock(hdev);
554 *val = hdev->conn_info_min_age;
555 hci_dev_unlock(hdev);
556
557 return 0;
558 }
559
560 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
561 conn_info_min_age_set, "%llu\n");
562
563 static int conn_info_max_age_set(void *data, u64 val)
564 {
565 struct hci_dev *hdev = data;
566
567 if (val == 0 || val < hdev->conn_info_min_age)
568 return -EINVAL;
569
570 hci_dev_lock(hdev);
571 hdev->conn_info_max_age = val;
572 hci_dev_unlock(hdev);
573
574 return 0;
575 }
576
577 static int conn_info_max_age_get(void *data, u64 *val)
578 {
579 struct hci_dev *hdev = data;
580
581 hci_dev_lock(hdev);
582 *val = hdev->conn_info_max_age;
583 hci_dev_unlock(hdev);
584
585 return 0;
586 }
587
588 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
589 conn_info_max_age_set, "%llu\n");
590
591 static int identity_show(struct seq_file *f, void *p)
592 {
593 struct hci_dev *hdev = f->private;
594 bdaddr_t addr;
595 u8 addr_type;
596
597 hci_dev_lock(hdev);
598
599 hci_copy_identity_address(hdev, &addr, &addr_type);
600
601 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
602 16, hdev->irk, &hdev->rpa);
603
604 hci_dev_unlock(hdev);
605
606 return 0;
607 }
608
609 static int identity_open(struct inode *inode, struct file *file)
610 {
611 return single_open(file, identity_show, inode->i_private);
612 }
613
614 static const struct file_operations identity_fops = {
615 .open = identity_open,
616 .read = seq_read,
617 .llseek = seq_lseek,
618 .release = single_release,
619 };
620
621 static int random_address_show(struct seq_file *f, void *p)
622 {
623 struct hci_dev *hdev = f->private;
624
625 hci_dev_lock(hdev);
626 seq_printf(f, "%pMR\n", &hdev->random_addr);
627 hci_dev_unlock(hdev);
628
629 return 0;
630 }
631
632 static int random_address_open(struct inode *inode, struct file *file)
633 {
634 return single_open(file, random_address_show, inode->i_private);
635 }
636
637 static const struct file_operations random_address_fops = {
638 .open = random_address_open,
639 .read = seq_read,
640 .llseek = seq_lseek,
641 .release = single_release,
642 };
643
644 static int static_address_show(struct seq_file *f, void *p)
645 {
646 struct hci_dev *hdev = f->private;
647
648 hci_dev_lock(hdev);
649 seq_printf(f, "%pMR\n", &hdev->static_addr);
650 hci_dev_unlock(hdev);
651
652 return 0;
653 }
654
655 static int static_address_open(struct inode *inode, struct file *file)
656 {
657 return single_open(file, static_address_show, inode->i_private);
658 }
659
660 static const struct file_operations static_address_fops = {
661 .open = static_address_open,
662 .read = seq_read,
663 .llseek = seq_lseek,
664 .release = single_release,
665 };
666
667 static ssize_t force_static_address_read(struct file *file,
668 char __user *user_buf,
669 size_t count, loff_t *ppos)
670 {
671 struct hci_dev *hdev = file->private_data;
672 char buf[3];
673
674 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
675 buf[1] = '\n';
676 buf[2] = '\0';
677 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
678 }
679
680 static ssize_t force_static_address_write(struct file *file,
681 const char __user *user_buf,
682 size_t count, loff_t *ppos)
683 {
684 struct hci_dev *hdev = file->private_data;
685 char buf[32];
686 size_t buf_size = min(count, (sizeof(buf)-1));
687 bool enable;
688
689 if (test_bit(HCI_UP, &hdev->flags))
690 return -EBUSY;
691
692 if (copy_from_user(buf, user_buf, buf_size))
693 return -EFAULT;
694
695 buf[buf_size] = '\0';
696 if (strtobool(buf, &enable))
697 return -EINVAL;
698
699 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
700 return -EALREADY;
701
702 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
703
704 return count;
705 }
706
707 static const struct file_operations force_static_address_fops = {
708 .open = simple_open,
709 .read = force_static_address_read,
710 .write = force_static_address_write,
711 .llseek = default_llseek,
712 };
713
714 static int white_list_show(struct seq_file *f, void *ptr)
715 {
716 struct hci_dev *hdev = f->private;
717 struct bdaddr_list *b;
718
719 hci_dev_lock(hdev);
720 list_for_each_entry(b, &hdev->le_white_list, list)
721 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
722 hci_dev_unlock(hdev);
723
724 return 0;
725 }
726
727 static int white_list_open(struct inode *inode, struct file *file)
728 {
729 return single_open(file, white_list_show, inode->i_private);
730 }
731
732 static const struct file_operations white_list_fops = {
733 .open = white_list_open,
734 .read = seq_read,
735 .llseek = seq_lseek,
736 .release = single_release,
737 };
738
739 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
740 {
741 struct hci_dev *hdev = f->private;
742 struct list_head *p, *n;
743
744 hci_dev_lock(hdev);
745 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
746 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
747 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
748 &irk->bdaddr, irk->addr_type,
749 16, irk->val, &irk->rpa);
750 }
751 hci_dev_unlock(hdev);
752
753 return 0;
754 }
755
756 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
757 {
758 return single_open(file, identity_resolving_keys_show,
759 inode->i_private);
760 }
761
762 static const struct file_operations identity_resolving_keys_fops = {
763 .open = identity_resolving_keys_open,
764 .read = seq_read,
765 .llseek = seq_lseek,
766 .release = single_release,
767 };
768
769 static int long_term_keys_show(struct seq_file *f, void *ptr)
770 {
771 struct hci_dev *hdev = f->private;
772 struct list_head *p, *n;
773
774 hci_dev_lock(hdev);
775 list_for_each_safe(p, n, &hdev->long_term_keys) {
776 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
777 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
778 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
779 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
780 __le64_to_cpu(ltk->rand), 16, ltk->val);
781 }
782 hci_dev_unlock(hdev);
783
784 return 0;
785 }
786
787 static int long_term_keys_open(struct inode *inode, struct file *file)
788 {
789 return single_open(file, long_term_keys_show, inode->i_private);
790 }
791
792 static const struct file_operations long_term_keys_fops = {
793 .open = long_term_keys_open,
794 .read = seq_read,
795 .llseek = seq_lseek,
796 .release = single_release,
797 };
798
799 static int conn_min_interval_set(void *data, u64 val)
800 {
801 struct hci_dev *hdev = data;
802
803 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
804 return -EINVAL;
805
806 hci_dev_lock(hdev);
807 hdev->le_conn_min_interval = val;
808 hci_dev_unlock(hdev);
809
810 return 0;
811 }
812
813 static int conn_min_interval_get(void *data, u64 *val)
814 {
815 struct hci_dev *hdev = data;
816
817 hci_dev_lock(hdev);
818 *val = hdev->le_conn_min_interval;
819 hci_dev_unlock(hdev);
820
821 return 0;
822 }
823
824 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
825 conn_min_interval_set, "%llu\n");
826
827 static int conn_max_interval_set(void *data, u64 val)
828 {
829 struct hci_dev *hdev = data;
830
831 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
832 return -EINVAL;
833
834 hci_dev_lock(hdev);
835 hdev->le_conn_max_interval = val;
836 hci_dev_unlock(hdev);
837
838 return 0;
839 }
840
841 static int conn_max_interval_get(void *data, u64 *val)
842 {
843 struct hci_dev *hdev = data;
844
845 hci_dev_lock(hdev);
846 *val = hdev->le_conn_max_interval;
847 hci_dev_unlock(hdev);
848
849 return 0;
850 }
851
852 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
853 conn_max_interval_set, "%llu\n");
854
855 static int conn_latency_set(void *data, u64 val)
856 {
857 struct hci_dev *hdev = data;
858
859 if (val > 0x01f3)
860 return -EINVAL;
861
862 hci_dev_lock(hdev);
863 hdev->le_conn_latency = val;
864 hci_dev_unlock(hdev);
865
866 return 0;
867 }
868
869 static int conn_latency_get(void *data, u64 *val)
870 {
871 struct hci_dev *hdev = data;
872
873 hci_dev_lock(hdev);
874 *val = hdev->le_conn_latency;
875 hci_dev_unlock(hdev);
876
877 return 0;
878 }
879
880 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
881 conn_latency_set, "%llu\n");
882
883 static int supervision_timeout_set(void *data, u64 val)
884 {
885 struct hci_dev *hdev = data;
886
887 if (val < 0x000a || val > 0x0c80)
888 return -EINVAL;
889
890 hci_dev_lock(hdev);
891 hdev->le_supv_timeout = val;
892 hci_dev_unlock(hdev);
893
894 return 0;
895 }
896
897 static int supervision_timeout_get(void *data, u64 *val)
898 {
899 struct hci_dev *hdev = data;
900
901 hci_dev_lock(hdev);
902 *val = hdev->le_supv_timeout;
903 hci_dev_unlock(hdev);
904
905 return 0;
906 }
907
908 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
909 supervision_timeout_set, "%llu\n");
910
911 static int adv_channel_map_set(void *data, u64 val)
912 {
913 struct hci_dev *hdev = data;
914
915 if (val < 0x01 || val > 0x07)
916 return -EINVAL;
917
918 hci_dev_lock(hdev);
919 hdev->le_adv_channel_map = val;
920 hci_dev_unlock(hdev);
921
922 return 0;
923 }
924
925 static int adv_channel_map_get(void *data, u64 *val)
926 {
927 struct hci_dev *hdev = data;
928
929 hci_dev_lock(hdev);
930 *val = hdev->le_adv_channel_map;
931 hci_dev_unlock(hdev);
932
933 return 0;
934 }
935
936 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
937 adv_channel_map_set, "%llu\n");
938
939 static int device_list_show(struct seq_file *f, void *ptr)
940 {
941 struct hci_dev *hdev = f->private;
942 struct hci_conn_params *p;
943
944 hci_dev_lock(hdev);
945 list_for_each_entry(p, &hdev->le_conn_params, list) {
946 seq_printf(f, "%pMR %u %u\n", &p->addr, p->addr_type,
947 p->auto_connect);
948 }
949 hci_dev_unlock(hdev);
950
951 return 0;
952 }
953
954 static int device_list_open(struct inode *inode, struct file *file)
955 {
956 return single_open(file, device_list_show, inode->i_private);
957 }
958
959 static const struct file_operations device_list_fops = {
960 .open = device_list_open,
961 .read = seq_read,
962 .llseek = seq_lseek,
963 .release = single_release,
964 };
965
966 /* ---- HCI requests ---- */
967
968 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
969 {
970 BT_DBG("%s result 0x%2.2x", hdev->name, result);
971
972 if (hdev->req_status == HCI_REQ_PEND) {
973 hdev->req_result = result;
974 hdev->req_status = HCI_REQ_DONE;
975 wake_up_interruptible(&hdev->req_wait_q);
976 }
977 }
978
979 static void hci_req_cancel(struct hci_dev *hdev, int err)
980 {
981 BT_DBG("%s err 0x%2.2x", hdev->name, err);
982
983 if (hdev->req_status == HCI_REQ_PEND) {
984 hdev->req_result = err;
985 hdev->req_status = HCI_REQ_CANCELED;
986 wake_up_interruptible(&hdev->req_wait_q);
987 }
988 }
989
990 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
991 u8 event)
992 {
993 struct hci_ev_cmd_complete *ev;
994 struct hci_event_hdr *hdr;
995 struct sk_buff *skb;
996
997 hci_dev_lock(hdev);
998
999 skb = hdev->recv_evt;
1000 hdev->recv_evt = NULL;
1001
1002 hci_dev_unlock(hdev);
1003
1004 if (!skb)
1005 return ERR_PTR(-ENODATA);
1006
1007 if (skb->len < sizeof(*hdr)) {
1008 BT_ERR("Too short HCI event");
1009 goto failed;
1010 }
1011
1012 hdr = (void *) skb->data;
1013 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1014
1015 if (event) {
1016 if (hdr->evt != event)
1017 goto failed;
1018 return skb;
1019 }
1020
1021 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1022 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1023 goto failed;
1024 }
1025
1026 if (skb->len < sizeof(*ev)) {
1027 BT_ERR("Too short cmd_complete event");
1028 goto failed;
1029 }
1030
1031 ev = (void *) skb->data;
1032 skb_pull(skb, sizeof(*ev));
1033
1034 if (opcode == __le16_to_cpu(ev->opcode))
1035 return skb;
1036
1037 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1038 __le16_to_cpu(ev->opcode));
1039
1040 failed:
1041 kfree_skb(skb);
1042 return ERR_PTR(-ENODATA);
1043 }
1044
1045 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1046 const void *param, u8 event, u32 timeout)
1047 {
1048 DECLARE_WAITQUEUE(wait, current);
1049 struct hci_request req;
1050 int err = 0;
1051
1052 BT_DBG("%s", hdev->name);
1053
1054 hci_req_init(&req, hdev);
1055
1056 hci_req_add_ev(&req, opcode, plen, param, event);
1057
1058 hdev->req_status = HCI_REQ_PEND;
1059
1060 err = hci_req_run(&req, hci_req_sync_complete);
1061 if (err < 0)
1062 return ERR_PTR(err);
1063
1064 add_wait_queue(&hdev->req_wait_q, &wait);
1065 set_current_state(TASK_INTERRUPTIBLE);
1066
1067 schedule_timeout(timeout);
1068
1069 remove_wait_queue(&hdev->req_wait_q, &wait);
1070
1071 if (signal_pending(current))
1072 return ERR_PTR(-EINTR);
1073
1074 switch (hdev->req_status) {
1075 case HCI_REQ_DONE:
1076 err = -bt_to_errno(hdev->req_result);
1077 break;
1078
1079 case HCI_REQ_CANCELED:
1080 err = -hdev->req_result;
1081 break;
1082
1083 default:
1084 err = -ETIMEDOUT;
1085 break;
1086 }
1087
1088 hdev->req_status = hdev->req_result = 0;
1089
1090 BT_DBG("%s end: err %d", hdev->name, err);
1091
1092 if (err < 0)
1093 return ERR_PTR(err);
1094
1095 return hci_get_cmd_complete(hdev, opcode, event);
1096 }
1097 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1098
1099 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1100 const void *param, u32 timeout)
1101 {
1102 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1103 }
1104 EXPORT_SYMBOL(__hci_cmd_sync);
1105
1106 /* Execute request and wait for completion. */
1107 static int __hci_req_sync(struct hci_dev *hdev,
1108 void (*func)(struct hci_request *req,
1109 unsigned long opt),
1110 unsigned long opt, __u32 timeout)
1111 {
1112 struct hci_request req;
1113 DECLARE_WAITQUEUE(wait, current);
1114 int err = 0;
1115
1116 BT_DBG("%s start", hdev->name);
1117
1118 hci_req_init(&req, hdev);
1119
1120 hdev->req_status = HCI_REQ_PEND;
1121
1122 func(&req, opt);
1123
1124 err = hci_req_run(&req, hci_req_sync_complete);
1125 if (err < 0) {
1126 hdev->req_status = 0;
1127
1128 /* ENODATA means the HCI request command queue is empty.
1129 * This can happen when a request with conditionals doesn't
1130 * trigger any commands to be sent. This is normal behavior
1131 * and should not trigger an error return.
1132 */
1133 if (err == -ENODATA)
1134 return 0;
1135
1136 return err;
1137 }
1138
1139 add_wait_queue(&hdev->req_wait_q, &wait);
1140 set_current_state(TASK_INTERRUPTIBLE);
1141
1142 schedule_timeout(timeout);
1143
1144 remove_wait_queue(&hdev->req_wait_q, &wait);
1145
1146 if (signal_pending(current))
1147 return -EINTR;
1148
1149 switch (hdev->req_status) {
1150 case HCI_REQ_DONE:
1151 err = -bt_to_errno(hdev->req_result);
1152 break;
1153
1154 case HCI_REQ_CANCELED:
1155 err = -hdev->req_result;
1156 break;
1157
1158 default:
1159 err = -ETIMEDOUT;
1160 break;
1161 }
1162
1163 hdev->req_status = hdev->req_result = 0;
1164
1165 BT_DBG("%s end: err %d", hdev->name, err);
1166
1167 return err;
1168 }
1169
1170 static int hci_req_sync(struct hci_dev *hdev,
1171 void (*req)(struct hci_request *req,
1172 unsigned long opt),
1173 unsigned long opt, __u32 timeout)
1174 {
1175 int ret;
1176
1177 if (!test_bit(HCI_UP, &hdev->flags))
1178 return -ENETDOWN;
1179
1180 /* Serialize all requests */
1181 hci_req_lock(hdev);
1182 ret = __hci_req_sync(hdev, req, opt, timeout);
1183 hci_req_unlock(hdev);
1184
1185 return ret;
1186 }
1187
1188 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1189 {
1190 BT_DBG("%s %ld", req->hdev->name, opt);
1191
1192 /* Reset device */
1193 set_bit(HCI_RESET, &req->hdev->flags);
1194 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1195 }
1196
1197 static void bredr_init(struct hci_request *req)
1198 {
1199 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1200
1201 /* Read Local Supported Features */
1202 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1203
1204 /* Read Local Version */
1205 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1206
1207 /* Read BD Address */
1208 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1209 }
1210
1211 static void amp_init(struct hci_request *req)
1212 {
1213 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1214
1215 /* Read Local Version */
1216 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1217
1218 /* Read Local Supported Commands */
1219 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1220
1221 /* Read Local Supported Features */
1222 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1223
1224 /* Read Local AMP Info */
1225 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1226
1227 /* Read Data Blk size */
1228 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1229
1230 /* Read Flow Control Mode */
1231 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1232
1233 /* Read Location Data */
1234 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1235 }
1236
1237 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1238 {
1239 struct hci_dev *hdev = req->hdev;
1240
1241 BT_DBG("%s %ld", hdev->name, opt);
1242
1243 /* Reset */
1244 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1245 hci_reset_req(req, 0);
1246
1247 switch (hdev->dev_type) {
1248 case HCI_BREDR:
1249 bredr_init(req);
1250 break;
1251
1252 case HCI_AMP:
1253 amp_init(req);
1254 break;
1255
1256 default:
1257 BT_ERR("Unknown device type %d", hdev->dev_type);
1258 break;
1259 }
1260 }
1261
1262 static void bredr_setup(struct hci_request *req)
1263 {
1264 struct hci_dev *hdev = req->hdev;
1265
1266 __le16 param;
1267 __u8 flt_type;
1268
1269 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1270 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1271
1272 /* Read Class of Device */
1273 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1274
1275 /* Read Local Name */
1276 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1277
1278 /* Read Voice Setting */
1279 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1280
1281 /* Read Number of Supported IAC */
1282 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1283
1284 /* Read Current IAC LAP */
1285 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1286
1287 /* Clear Event Filters */
1288 flt_type = HCI_FLT_CLEAR_ALL;
1289 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1290
1291 /* Connection accept timeout ~20 secs */
1292 param = cpu_to_le16(0x7d00);
1293 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1294
1295 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1296 * but it does not support page scan related HCI commands.
1297 */
1298 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1299 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1300 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1301 }
1302 }
1303
1304 static void le_setup(struct hci_request *req)
1305 {
1306 struct hci_dev *hdev = req->hdev;
1307
1308 /* Read LE Buffer Size */
1309 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1310
1311 /* Read LE Local Supported Features */
1312 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1313
1314 /* Read LE Supported States */
1315 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1316
1317 /* Read LE Advertising Channel TX Power */
1318 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1319
1320 /* Read LE White List Size */
1321 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1322
1323 /* Clear LE White List */
1324 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1325
1326 /* LE-only controllers have LE implicitly enabled */
1327 if (!lmp_bredr_capable(hdev))
1328 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1329 }
1330
1331 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1332 {
1333 if (lmp_ext_inq_capable(hdev))
1334 return 0x02;
1335
1336 if (lmp_inq_rssi_capable(hdev))
1337 return 0x01;
1338
1339 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1340 hdev->lmp_subver == 0x0757)
1341 return 0x01;
1342
1343 if (hdev->manufacturer == 15) {
1344 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1345 return 0x01;
1346 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1347 return 0x01;
1348 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1349 return 0x01;
1350 }
1351
1352 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1353 hdev->lmp_subver == 0x1805)
1354 return 0x01;
1355
1356 return 0x00;
1357 }
1358
1359 static void hci_setup_inquiry_mode(struct hci_request *req)
1360 {
1361 u8 mode;
1362
1363 mode = hci_get_inquiry_mode(req->hdev);
1364
1365 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1366 }
1367
1368 static void hci_setup_event_mask(struct hci_request *req)
1369 {
1370 struct hci_dev *hdev = req->hdev;
1371
1372 /* The second byte is 0xff instead of 0x9f (two reserved bits
1373 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1374 * command otherwise.
1375 */
1376 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1377
1378 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1379 * any event mask for pre 1.2 devices.
1380 */
1381 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1382 return;
1383
1384 if (lmp_bredr_capable(hdev)) {
1385 events[4] |= 0x01; /* Flow Specification Complete */
1386 events[4] |= 0x02; /* Inquiry Result with RSSI */
1387 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1388 events[5] |= 0x08; /* Synchronous Connection Complete */
1389 events[5] |= 0x10; /* Synchronous Connection Changed */
1390 } else {
1391 /* Use a different default for LE-only devices */
1392 memset(events, 0, sizeof(events));
1393 events[0] |= 0x10; /* Disconnection Complete */
1394 events[0] |= 0x80; /* Encryption Change */
1395 events[1] |= 0x08; /* Read Remote Version Information Complete */
1396 events[1] |= 0x20; /* Command Complete */
1397 events[1] |= 0x40; /* Command Status */
1398 events[1] |= 0x80; /* Hardware Error */
1399 events[2] |= 0x04; /* Number of Completed Packets */
1400 events[3] |= 0x02; /* Data Buffer Overflow */
1401 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1402 }
1403
1404 if (lmp_inq_rssi_capable(hdev))
1405 events[4] |= 0x02; /* Inquiry Result with RSSI */
1406
1407 if (lmp_sniffsubr_capable(hdev))
1408 events[5] |= 0x20; /* Sniff Subrating */
1409
1410 if (lmp_pause_enc_capable(hdev))
1411 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1412
1413 if (lmp_ext_inq_capable(hdev))
1414 events[5] |= 0x40; /* Extended Inquiry Result */
1415
1416 if (lmp_no_flush_capable(hdev))
1417 events[7] |= 0x01; /* Enhanced Flush Complete */
1418
1419 if (lmp_lsto_capable(hdev))
1420 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1421
1422 if (lmp_ssp_capable(hdev)) {
1423 events[6] |= 0x01; /* IO Capability Request */
1424 events[6] |= 0x02; /* IO Capability Response */
1425 events[6] |= 0x04; /* User Confirmation Request */
1426 events[6] |= 0x08; /* User Passkey Request */
1427 events[6] |= 0x10; /* Remote OOB Data Request */
1428 events[6] |= 0x20; /* Simple Pairing Complete */
1429 events[7] |= 0x04; /* User Passkey Notification */
1430 events[7] |= 0x08; /* Keypress Notification */
1431 events[7] |= 0x10; /* Remote Host Supported
1432 * Features Notification
1433 */
1434 }
1435
1436 if (lmp_le_capable(hdev))
1437 events[7] |= 0x20; /* LE Meta-Event */
1438
1439 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1440 }
1441
1442 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1443 {
1444 struct hci_dev *hdev = req->hdev;
1445
1446 if (lmp_bredr_capable(hdev))
1447 bredr_setup(req);
1448 else
1449 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1450
1451 if (lmp_le_capable(hdev))
1452 le_setup(req);
1453
1454 hci_setup_event_mask(req);
1455
1456 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1457 * local supported commands HCI command.
1458 */
1459 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1460 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1461
1462 if (lmp_ssp_capable(hdev)) {
1463 /* When SSP is available, then the host features page
1464 * should also be available as well. However some
1465 * controllers list the max_page as 0 as long as SSP
1466 * has not been enabled. To achieve proper debugging
1467 * output, force the minimum max_page to 1 at least.
1468 */
1469 hdev->max_page = 0x01;
1470
1471 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1472 u8 mode = 0x01;
1473 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1474 sizeof(mode), &mode);
1475 } else {
1476 struct hci_cp_write_eir cp;
1477
1478 memset(hdev->eir, 0, sizeof(hdev->eir));
1479 memset(&cp, 0, sizeof(cp));
1480
1481 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1482 }
1483 }
1484
1485 if (lmp_inq_rssi_capable(hdev))
1486 hci_setup_inquiry_mode(req);
1487
1488 if (lmp_inq_tx_pwr_capable(hdev))
1489 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1490
1491 if (lmp_ext_feat_capable(hdev)) {
1492 struct hci_cp_read_local_ext_features cp;
1493
1494 cp.page = 0x01;
1495 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1496 sizeof(cp), &cp);
1497 }
1498
1499 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1500 u8 enable = 1;
1501 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1502 &enable);
1503 }
1504 }
1505
1506 static void hci_setup_link_policy(struct hci_request *req)
1507 {
1508 struct hci_dev *hdev = req->hdev;
1509 struct hci_cp_write_def_link_policy cp;
1510 u16 link_policy = 0;
1511
1512 if (lmp_rswitch_capable(hdev))
1513 link_policy |= HCI_LP_RSWITCH;
1514 if (lmp_hold_capable(hdev))
1515 link_policy |= HCI_LP_HOLD;
1516 if (lmp_sniff_capable(hdev))
1517 link_policy |= HCI_LP_SNIFF;
1518 if (lmp_park_capable(hdev))
1519 link_policy |= HCI_LP_PARK;
1520
1521 cp.policy = cpu_to_le16(link_policy);
1522 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1523 }
1524
1525 static void hci_set_le_support(struct hci_request *req)
1526 {
1527 struct hci_dev *hdev = req->hdev;
1528 struct hci_cp_write_le_host_supported cp;
1529
1530 /* LE-only devices do not support explicit enablement */
1531 if (!lmp_bredr_capable(hdev))
1532 return;
1533
1534 memset(&cp, 0, sizeof(cp));
1535
1536 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1537 cp.le = 0x01;
1538 cp.simul = lmp_le_br_capable(hdev);
1539 }
1540
1541 if (cp.le != lmp_host_le_capable(hdev))
1542 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1543 &cp);
1544 }
1545
1546 static void hci_set_event_mask_page_2(struct hci_request *req)
1547 {
1548 struct hci_dev *hdev = req->hdev;
1549 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1550
1551 /* If Connectionless Slave Broadcast master role is supported
1552 * enable all necessary events for it.
1553 */
1554 if (lmp_csb_master_capable(hdev)) {
1555 events[1] |= 0x40; /* Triggered Clock Capture */
1556 events[1] |= 0x80; /* Synchronization Train Complete */
1557 events[2] |= 0x10; /* Slave Page Response Timeout */
1558 events[2] |= 0x20; /* CSB Channel Map Change */
1559 }
1560
1561 /* If Connectionless Slave Broadcast slave role is supported
1562 * enable all necessary events for it.
1563 */
1564 if (lmp_csb_slave_capable(hdev)) {
1565 events[2] |= 0x01; /* Synchronization Train Received */
1566 events[2] |= 0x02; /* CSB Receive */
1567 events[2] |= 0x04; /* CSB Timeout */
1568 events[2] |= 0x08; /* Truncated Page Complete */
1569 }
1570
1571 /* Enable Authenticated Payload Timeout Expired event if supported */
1572 if (lmp_ping_capable(hdev))
1573 events[2] |= 0x80;
1574
1575 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1576 }
1577
1578 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1579 {
1580 struct hci_dev *hdev = req->hdev;
1581 u8 p;
1582
1583 /* Some Broadcom based Bluetooth controllers do not support the
1584 * Delete Stored Link Key command. They are clearly indicating its
1585 * absence in the bit mask of supported commands.
1586 *
1587 * Check the supported commands and only if the the command is marked
1588 * as supported send it. If not supported assume that the controller
1589 * does not have actual support for stored link keys which makes this
1590 * command redundant anyway.
1591 *
1592 * Some controllers indicate that they support handling deleting
1593 * stored link keys, but they don't. The quirk lets a driver
1594 * just disable this command.
1595 */
1596 if (hdev->commands[6] & 0x80 &&
1597 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1598 struct hci_cp_delete_stored_link_key cp;
1599
1600 bacpy(&cp.bdaddr, BDADDR_ANY);
1601 cp.delete_all = 0x01;
1602 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1603 sizeof(cp), &cp);
1604 }
1605
1606 if (hdev->commands[5] & 0x10)
1607 hci_setup_link_policy(req);
1608
1609 if (lmp_le_capable(hdev)) {
1610 u8 events[8];
1611
1612 memset(events, 0, sizeof(events));
1613 events[0] = 0x1f;
1614
1615 /* If controller supports the Connection Parameters Request
1616 * Link Layer Procedure, enable the corresponding event.
1617 */
1618 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1619 events[0] |= 0x20; /* LE Remote Connection
1620 * Parameter Request
1621 */
1622
1623 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1624 events);
1625
1626 hci_set_le_support(req);
1627 }
1628
1629 /* Read features beyond page 1 if available */
1630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1631 struct hci_cp_read_local_ext_features cp;
1632
1633 cp.page = p;
1634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1635 sizeof(cp), &cp);
1636 }
1637 }
1638
1639 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1640 {
1641 struct hci_dev *hdev = req->hdev;
1642
1643 /* Set event mask page 2 if the HCI command for it is supported */
1644 if (hdev->commands[22] & 0x04)
1645 hci_set_event_mask_page_2(req);
1646
1647 /* Check for Synchronization Train support */
1648 if (lmp_sync_train_capable(hdev))
1649 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1650
1651 /* Enable Secure Connections if supported and configured */
1652 if ((lmp_sc_capable(hdev) ||
1653 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1654 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1655 u8 support = 0x01;
1656 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1657 sizeof(support), &support);
1658 }
1659 }
1660
1661 static int __hci_init(struct hci_dev *hdev)
1662 {
1663 int err;
1664
1665 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1666 if (err < 0)
1667 return err;
1668
1669 /* The Device Under Test (DUT) mode is special and available for
1670 * all controller types. So just create it early on.
1671 */
1672 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1673 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1674 &dut_mode_fops);
1675 }
1676
1677 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1678 * BR/EDR/LE type controllers. AMP controllers only need the
1679 * first stage init.
1680 */
1681 if (hdev->dev_type != HCI_BREDR)
1682 return 0;
1683
1684 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1685 if (err < 0)
1686 return err;
1687
1688 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1689 if (err < 0)
1690 return err;
1691
1692 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1693 if (err < 0)
1694 return err;
1695
1696 /* Only create debugfs entries during the initial setup
1697 * phase and not every time the controller gets powered on.
1698 */
1699 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1700 return 0;
1701
1702 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1703 &features_fops);
1704 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1705 &hdev->manufacturer);
1706 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1707 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1708 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1709 &blacklist_fops);
1710 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1711
1712 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1713 &conn_info_min_age_fops);
1714 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1715 &conn_info_max_age_fops);
1716
1717 if (lmp_bredr_capable(hdev)) {
1718 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1719 hdev, &inquiry_cache_fops);
1720 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1721 hdev, &link_keys_fops);
1722 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1723 hdev, &dev_class_fops);
1724 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1725 hdev, &voice_setting_fops);
1726 }
1727
1728 if (lmp_ssp_capable(hdev)) {
1729 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1730 hdev, &auto_accept_delay_fops);
1731 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1732 hdev, &force_sc_support_fops);
1733 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1734 hdev, &sc_only_mode_fops);
1735 }
1736
1737 if (lmp_sniff_capable(hdev)) {
1738 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1739 hdev, &idle_timeout_fops);
1740 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1741 hdev, &sniff_min_interval_fops);
1742 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1743 hdev, &sniff_max_interval_fops);
1744 }
1745
1746 if (lmp_le_capable(hdev)) {
1747 debugfs_create_file("identity", 0400, hdev->debugfs,
1748 hdev, &identity_fops);
1749 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1750 hdev, &rpa_timeout_fops);
1751 debugfs_create_file("random_address", 0444, hdev->debugfs,
1752 hdev, &random_address_fops);
1753 debugfs_create_file("static_address", 0444, hdev->debugfs,
1754 hdev, &static_address_fops);
1755
1756 /* For controllers with a public address, provide a debug
1757 * option to force the usage of the configured static
1758 * address. By default the public address is used.
1759 */
1760 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1761 debugfs_create_file("force_static_address", 0644,
1762 hdev->debugfs, hdev,
1763 &force_static_address_fops);
1764
1765 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1766 &hdev->le_white_list_size);
1767 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1768 &white_list_fops);
1769 debugfs_create_file("identity_resolving_keys", 0400,
1770 hdev->debugfs, hdev,
1771 &identity_resolving_keys_fops);
1772 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1773 hdev, &long_term_keys_fops);
1774 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1775 hdev, &conn_min_interval_fops);
1776 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1777 hdev, &conn_max_interval_fops);
1778 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1779 hdev, &conn_latency_fops);
1780 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1781 hdev, &supervision_timeout_fops);
1782 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1783 hdev, &adv_channel_map_fops);
1784 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1785 &device_list_fops);
1786 debugfs_create_u16("discov_interleaved_timeout", 0644,
1787 hdev->debugfs,
1788 &hdev->discov_interleaved_timeout);
1789 }
1790
1791 return 0;
1792 }
1793
1794 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1795 {
1796 __u8 scan = opt;
1797
1798 BT_DBG("%s %x", req->hdev->name, scan);
1799
1800 /* Inquiry and Page scans */
1801 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1802 }
1803
1804 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1805 {
1806 __u8 auth = opt;
1807
1808 BT_DBG("%s %x", req->hdev->name, auth);
1809
1810 /* Authentication */
1811 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1812 }
1813
1814 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1815 {
1816 __u8 encrypt = opt;
1817
1818 BT_DBG("%s %x", req->hdev->name, encrypt);
1819
1820 /* Encryption */
1821 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1822 }
1823
1824 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1825 {
1826 __le16 policy = cpu_to_le16(opt);
1827
1828 BT_DBG("%s %x", req->hdev->name, policy);
1829
1830 /* Default link policy */
1831 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1832 }
1833
1834 /* Get HCI device by index.
1835 * Device is held on return. */
1836 struct hci_dev *hci_dev_get(int index)
1837 {
1838 struct hci_dev *hdev = NULL, *d;
1839
1840 BT_DBG("%d", index);
1841
1842 if (index < 0)
1843 return NULL;
1844
1845 read_lock(&hci_dev_list_lock);
1846 list_for_each_entry(d, &hci_dev_list, list) {
1847 if (d->id == index) {
1848 hdev = hci_dev_hold(d);
1849 break;
1850 }
1851 }
1852 read_unlock(&hci_dev_list_lock);
1853 return hdev;
1854 }
1855
1856 /* ---- Inquiry support ---- */
1857
1858 bool hci_discovery_active(struct hci_dev *hdev)
1859 {
1860 struct discovery_state *discov = &hdev->discovery;
1861
1862 switch (discov->state) {
1863 case DISCOVERY_FINDING:
1864 case DISCOVERY_RESOLVING:
1865 return true;
1866
1867 default:
1868 return false;
1869 }
1870 }
1871
1872 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1873 {
1874 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1875
1876 if (hdev->discovery.state == state)
1877 return;
1878
1879 switch (state) {
1880 case DISCOVERY_STOPPED:
1881 hci_update_background_scan(hdev);
1882
1883 if (hdev->discovery.state != DISCOVERY_STARTING)
1884 mgmt_discovering(hdev, 0);
1885 break;
1886 case DISCOVERY_STARTING:
1887 break;
1888 case DISCOVERY_FINDING:
1889 mgmt_discovering(hdev, 1);
1890 break;
1891 case DISCOVERY_RESOLVING:
1892 break;
1893 case DISCOVERY_STOPPING:
1894 break;
1895 }
1896
1897 hdev->discovery.state = state;
1898 }
1899
1900 void hci_inquiry_cache_flush(struct hci_dev *hdev)
1901 {
1902 struct discovery_state *cache = &hdev->discovery;
1903 struct inquiry_entry *p, *n;
1904
1905 list_for_each_entry_safe(p, n, &cache->all, all) {
1906 list_del(&p->all);
1907 kfree(p);
1908 }
1909
1910 INIT_LIST_HEAD(&cache->unknown);
1911 INIT_LIST_HEAD(&cache->resolve);
1912 }
1913
1914 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1915 bdaddr_t *bdaddr)
1916 {
1917 struct discovery_state *cache = &hdev->discovery;
1918 struct inquiry_entry *e;
1919
1920 BT_DBG("cache %p, %pMR", cache, bdaddr);
1921
1922 list_for_each_entry(e, &cache->all, all) {
1923 if (!bacmp(&e->data.bdaddr, bdaddr))
1924 return e;
1925 }
1926
1927 return NULL;
1928 }
1929
1930 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
1931 bdaddr_t *bdaddr)
1932 {
1933 struct discovery_state *cache = &hdev->discovery;
1934 struct inquiry_entry *e;
1935
1936 BT_DBG("cache %p, %pMR", cache, bdaddr);
1937
1938 list_for_each_entry(e, &cache->unknown, list) {
1939 if (!bacmp(&e->data.bdaddr, bdaddr))
1940 return e;
1941 }
1942
1943 return NULL;
1944 }
1945
1946 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
1947 bdaddr_t *bdaddr,
1948 int state)
1949 {
1950 struct discovery_state *cache = &hdev->discovery;
1951 struct inquiry_entry *e;
1952
1953 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
1954
1955 list_for_each_entry(e, &cache->resolve, list) {
1956 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1957 return e;
1958 if (!bacmp(&e->data.bdaddr, bdaddr))
1959 return e;
1960 }
1961
1962 return NULL;
1963 }
1964
1965 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
1966 struct inquiry_entry *ie)
1967 {
1968 struct discovery_state *cache = &hdev->discovery;
1969 struct list_head *pos = &cache->resolve;
1970 struct inquiry_entry *p;
1971
1972 list_del(&ie->list);
1973
1974 list_for_each_entry(p, &cache->resolve, list) {
1975 if (p->name_state != NAME_PENDING &&
1976 abs(p->data.rssi) >= abs(ie->data.rssi))
1977 break;
1978 pos = &p->list;
1979 }
1980
1981 list_add(&ie->list, pos);
1982 }
1983
1984 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
1985 bool name_known)
1986 {
1987 struct discovery_state *cache = &hdev->discovery;
1988 struct inquiry_entry *ie;
1989 u32 flags = 0;
1990
1991 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1992
1993 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1994
1995 if (!data->ssp_mode)
1996 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
1997
1998 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
1999 if (ie) {
2000 if (!ie->data.ssp_mode)
2001 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2002
2003 if (ie->name_state == NAME_NEEDED &&
2004 data->rssi != ie->data.rssi) {
2005 ie->data.rssi = data->rssi;
2006 hci_inquiry_cache_update_resolve(hdev, ie);
2007 }
2008
2009 goto update;
2010 }
2011
2012 /* Entry not in the cache. Add new one. */
2013 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2014 if (!ie) {
2015 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2016 goto done;
2017 }
2018
2019 list_add(&ie->all, &cache->all);
2020
2021 if (name_known) {
2022 ie->name_state = NAME_KNOWN;
2023 } else {
2024 ie->name_state = NAME_NOT_KNOWN;
2025 list_add(&ie->list, &cache->unknown);
2026 }
2027
2028 update:
2029 if (name_known && ie->name_state != NAME_KNOWN &&
2030 ie->name_state != NAME_PENDING) {
2031 ie->name_state = NAME_KNOWN;
2032 list_del(&ie->list);
2033 }
2034
2035 memcpy(&ie->data, data, sizeof(*data));
2036 ie->timestamp = jiffies;
2037 cache->timestamp = jiffies;
2038
2039 if (ie->name_state == NAME_NOT_KNOWN)
2040 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2041
2042 done:
2043 return flags;
2044 }
2045
2046 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2047 {
2048 struct discovery_state *cache = &hdev->discovery;
2049 struct inquiry_info *info = (struct inquiry_info *) buf;
2050 struct inquiry_entry *e;
2051 int copied = 0;
2052
2053 list_for_each_entry(e, &cache->all, all) {
2054 struct inquiry_data *data = &e->data;
2055
2056 if (copied >= num)
2057 break;
2058
2059 bacpy(&info->bdaddr, &data->bdaddr);
2060 info->pscan_rep_mode = data->pscan_rep_mode;
2061 info->pscan_period_mode = data->pscan_period_mode;
2062 info->pscan_mode = data->pscan_mode;
2063 memcpy(info->dev_class, data->dev_class, 3);
2064 info->clock_offset = data->clock_offset;
2065
2066 info++;
2067 copied++;
2068 }
2069
2070 BT_DBG("cache %p, copied %d", cache, copied);
2071 return copied;
2072 }
2073
2074 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2075 {
2076 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2077 struct hci_dev *hdev = req->hdev;
2078 struct hci_cp_inquiry cp;
2079
2080 BT_DBG("%s", hdev->name);
2081
2082 if (test_bit(HCI_INQUIRY, &hdev->flags))
2083 return;
2084
2085 /* Start Inquiry */
2086 memcpy(&cp.lap, &ir->lap, 3);
2087 cp.length = ir->length;
2088 cp.num_rsp = ir->num_rsp;
2089 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2090 }
2091
2092 static int wait_inquiry(void *word)
2093 {
2094 schedule();
2095 return signal_pending(current);
2096 }
2097
2098 int hci_inquiry(void __user *arg)
2099 {
2100 __u8 __user *ptr = arg;
2101 struct hci_inquiry_req ir;
2102 struct hci_dev *hdev;
2103 int err = 0, do_inquiry = 0, max_rsp;
2104 long timeo;
2105 __u8 *buf;
2106
2107 if (copy_from_user(&ir, ptr, sizeof(ir)))
2108 return -EFAULT;
2109
2110 hdev = hci_dev_get(ir.dev_id);
2111 if (!hdev)
2112 return -ENODEV;
2113
2114 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2115 err = -EBUSY;
2116 goto done;
2117 }
2118
2119 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2120 err = -EOPNOTSUPP;
2121 goto done;
2122 }
2123
2124 if (hdev->dev_type != HCI_BREDR) {
2125 err = -EOPNOTSUPP;
2126 goto done;
2127 }
2128
2129 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2130 err = -EOPNOTSUPP;
2131 goto done;
2132 }
2133
2134 hci_dev_lock(hdev);
2135 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2136 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2137 hci_inquiry_cache_flush(hdev);
2138 do_inquiry = 1;
2139 }
2140 hci_dev_unlock(hdev);
2141
2142 timeo = ir.length * msecs_to_jiffies(2000);
2143
2144 if (do_inquiry) {
2145 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2146 timeo);
2147 if (err < 0)
2148 goto done;
2149
2150 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2151 * cleared). If it is interrupted by a signal, return -EINTR.
2152 */
2153 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2154 TASK_INTERRUPTIBLE))
2155 return -EINTR;
2156 }
2157
2158 /* for unlimited number of responses we will use buffer with
2159 * 255 entries
2160 */
2161 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2162
2163 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2164 * copy it to the user space.
2165 */
2166 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2167 if (!buf) {
2168 err = -ENOMEM;
2169 goto done;
2170 }
2171
2172 hci_dev_lock(hdev);
2173 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2174 hci_dev_unlock(hdev);
2175
2176 BT_DBG("num_rsp %d", ir.num_rsp);
2177
2178 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2179 ptr += sizeof(ir);
2180 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2181 ir.num_rsp))
2182 err = -EFAULT;
2183 } else
2184 err = -EFAULT;
2185
2186 kfree(buf);
2187
2188 done:
2189 hci_dev_put(hdev);
2190 return err;
2191 }
2192
2193 static int hci_dev_do_open(struct hci_dev *hdev)
2194 {
2195 int ret = 0;
2196
2197 BT_DBG("%s %p", hdev->name, hdev);
2198
2199 hci_req_lock(hdev);
2200
2201 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2202 ret = -ENODEV;
2203 goto done;
2204 }
2205
2206 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2207 /* Check for rfkill but allow the HCI setup stage to
2208 * proceed (which in itself doesn't cause any RF activity).
2209 */
2210 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2211 ret = -ERFKILL;
2212 goto done;
2213 }
2214
2215 /* Check for valid public address or a configured static
2216 * random adddress, but let the HCI setup proceed to
2217 * be able to determine if there is a public address
2218 * or not.
2219 *
2220 * In case of user channel usage, it is not important
2221 * if a public address or static random address is
2222 * available.
2223 *
2224 * This check is only valid for BR/EDR controllers
2225 * since AMP controllers do not have an address.
2226 */
2227 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2228 hdev->dev_type == HCI_BREDR &&
2229 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2230 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2231 ret = -EADDRNOTAVAIL;
2232 goto done;
2233 }
2234 }
2235
2236 if (test_bit(HCI_UP, &hdev->flags)) {
2237 ret = -EALREADY;
2238 goto done;
2239 }
2240
2241 if (hdev->open(hdev)) {
2242 ret = -EIO;
2243 goto done;
2244 }
2245
2246 atomic_set(&hdev->cmd_cnt, 1);
2247 set_bit(HCI_INIT, &hdev->flags);
2248
2249 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags)) {
2250 ret = hdev->setup(hdev);
2251
2252 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2253 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2254 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2255 }
2256
2257 /* If public address change is configured, ensure that the
2258 * address gets programmed. If the driver does not support
2259 * changing the public address, fail the power on procedure.
2260 */
2261 if (!ret && bacmp(&hdev->public_addr, BDADDR_ANY)) {
2262 if (hdev->set_bdaddr)
2263 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2264 else
2265 ret = -EADDRNOTAVAIL;
2266 }
2267
2268 if (!ret) {
2269 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2270 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2271 ret = __hci_init(hdev);
2272 }
2273
2274 clear_bit(HCI_INIT, &hdev->flags);
2275
2276 if (!ret) {
2277 hci_dev_hold(hdev);
2278 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2279 set_bit(HCI_UP, &hdev->flags);
2280 hci_notify(hdev, HCI_DEV_UP);
2281 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2282 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2283 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2284 hdev->dev_type == HCI_BREDR) {
2285 hci_dev_lock(hdev);
2286 mgmt_powered(hdev, 1);
2287 hci_dev_unlock(hdev);
2288 }
2289 } else {
2290 /* Init failed, cleanup */
2291 flush_work(&hdev->tx_work);
2292 flush_work(&hdev->cmd_work);
2293 flush_work(&hdev->rx_work);
2294
2295 skb_queue_purge(&hdev->cmd_q);
2296 skb_queue_purge(&hdev->rx_q);
2297
2298 if (hdev->flush)
2299 hdev->flush(hdev);
2300
2301 if (hdev->sent_cmd) {
2302 kfree_skb(hdev->sent_cmd);
2303 hdev->sent_cmd = NULL;
2304 }
2305
2306 hdev->close(hdev);
2307 hdev->flags &= BIT(HCI_RAW);
2308 }
2309
2310 done:
2311 hci_req_unlock(hdev);
2312 return ret;
2313 }
2314
2315 /* ---- HCI ioctl helpers ---- */
2316
2317 int hci_dev_open(__u16 dev)
2318 {
2319 struct hci_dev *hdev;
2320 int err;
2321
2322 hdev = hci_dev_get(dev);
2323 if (!hdev)
2324 return -ENODEV;
2325
2326 /* Devices that are marked as unconfigured can only be powered
2327 * up as user channel. Trying to bring them up as normal devices
2328 * will result into a failure. Only user channel operation is
2329 * possible.
2330 *
2331 * When this function is called for a user channel, the flag
2332 * HCI_USER_CHANNEL will be set first before attempting to
2333 * open the device.
2334 */
2335 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2336 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2337 err = -EOPNOTSUPP;
2338 goto done;
2339 }
2340
2341 /* We need to ensure that no other power on/off work is pending
2342 * before proceeding to call hci_dev_do_open. This is
2343 * particularly important if the setup procedure has not yet
2344 * completed.
2345 */
2346 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2347 cancel_delayed_work(&hdev->power_off);
2348
2349 /* After this call it is guaranteed that the setup procedure
2350 * has finished. This means that error conditions like RFKILL
2351 * or no valid public or static random address apply.
2352 */
2353 flush_workqueue(hdev->req_workqueue);
2354
2355 err = hci_dev_do_open(hdev);
2356
2357 done:
2358 hci_dev_put(hdev);
2359 return err;
2360 }
2361
2362 /* This function requires the caller holds hdev->lock */
2363 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2364 {
2365 struct hci_conn_params *p;
2366
2367 list_for_each_entry(p, &hdev->le_conn_params, list)
2368 list_del_init(&p->action);
2369
2370 BT_DBG("All LE pending actions cleared");
2371 }
2372
2373 static int hci_dev_do_close(struct hci_dev *hdev)
2374 {
2375 BT_DBG("%s %p", hdev->name, hdev);
2376
2377 cancel_delayed_work(&hdev->power_off);
2378
2379 hci_req_cancel(hdev, ENODEV);
2380 hci_req_lock(hdev);
2381
2382 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2383 cancel_delayed_work_sync(&hdev->cmd_timer);
2384 hci_req_unlock(hdev);
2385 return 0;
2386 }
2387
2388 /* Flush RX and TX works */
2389 flush_work(&hdev->tx_work);
2390 flush_work(&hdev->rx_work);
2391
2392 if (hdev->discov_timeout > 0) {
2393 cancel_delayed_work(&hdev->discov_off);
2394 hdev->discov_timeout = 0;
2395 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2396 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2397 }
2398
2399 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2400 cancel_delayed_work(&hdev->service_cache);
2401
2402 cancel_delayed_work_sync(&hdev->le_scan_disable);
2403
2404 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2405 cancel_delayed_work_sync(&hdev->rpa_expired);
2406
2407 hci_dev_lock(hdev);
2408 hci_inquiry_cache_flush(hdev);
2409 hci_conn_hash_flush(hdev);
2410 hci_pend_le_actions_clear(hdev);
2411 hci_dev_unlock(hdev);
2412
2413 hci_notify(hdev, HCI_DEV_DOWN);
2414
2415 if (hdev->flush)
2416 hdev->flush(hdev);
2417
2418 /* Reset device */
2419 skb_queue_purge(&hdev->cmd_q);
2420 atomic_set(&hdev->cmd_cnt, 1);
2421 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2422 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2423 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2424 set_bit(HCI_INIT, &hdev->flags);
2425 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2426 clear_bit(HCI_INIT, &hdev->flags);
2427 }
2428
2429 /* flush cmd work */
2430 flush_work(&hdev->cmd_work);
2431
2432 /* Drop queues */
2433 skb_queue_purge(&hdev->rx_q);
2434 skb_queue_purge(&hdev->cmd_q);
2435 skb_queue_purge(&hdev->raw_q);
2436
2437 /* Drop last sent command */
2438 if (hdev->sent_cmd) {
2439 cancel_delayed_work_sync(&hdev->cmd_timer);
2440 kfree_skb(hdev->sent_cmd);
2441 hdev->sent_cmd = NULL;
2442 }
2443
2444 kfree_skb(hdev->recv_evt);
2445 hdev->recv_evt = NULL;
2446
2447 /* After this point our queues are empty
2448 * and no tasks are scheduled. */
2449 hdev->close(hdev);
2450
2451 /* Clear flags */
2452 hdev->flags &= BIT(HCI_RAW);
2453 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2454
2455 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2456 if (hdev->dev_type == HCI_BREDR) {
2457 hci_dev_lock(hdev);
2458 mgmt_powered(hdev, 0);
2459 hci_dev_unlock(hdev);
2460 }
2461 }
2462
2463 /* Controller radio is available but is currently powered down */
2464 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2465
2466 memset(hdev->eir, 0, sizeof(hdev->eir));
2467 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2468 bacpy(&hdev->random_addr, BDADDR_ANY);
2469
2470 hci_req_unlock(hdev);
2471
2472 hci_dev_put(hdev);
2473 return 0;
2474 }
2475
2476 int hci_dev_close(__u16 dev)
2477 {
2478 struct hci_dev *hdev;
2479 int err;
2480
2481 hdev = hci_dev_get(dev);
2482 if (!hdev)
2483 return -ENODEV;
2484
2485 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2486 err = -EBUSY;
2487 goto done;
2488 }
2489
2490 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2491 cancel_delayed_work(&hdev->power_off);
2492
2493 err = hci_dev_do_close(hdev);
2494
2495 done:
2496 hci_dev_put(hdev);
2497 return err;
2498 }
2499
2500 int hci_dev_reset(__u16 dev)
2501 {
2502 struct hci_dev *hdev;
2503 int ret = 0;
2504
2505 hdev = hci_dev_get(dev);
2506 if (!hdev)
2507 return -ENODEV;
2508
2509 hci_req_lock(hdev);
2510
2511 if (!test_bit(HCI_UP, &hdev->flags)) {
2512 ret = -ENETDOWN;
2513 goto done;
2514 }
2515
2516 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2517 ret = -EBUSY;
2518 goto done;
2519 }
2520
2521 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2522 ret = -EOPNOTSUPP;
2523 goto done;
2524 }
2525
2526 /* Drop queues */
2527 skb_queue_purge(&hdev->rx_q);
2528 skb_queue_purge(&hdev->cmd_q);
2529
2530 hci_dev_lock(hdev);
2531 hci_inquiry_cache_flush(hdev);
2532 hci_conn_hash_flush(hdev);
2533 hci_dev_unlock(hdev);
2534
2535 if (hdev->flush)
2536 hdev->flush(hdev);
2537
2538 atomic_set(&hdev->cmd_cnt, 1);
2539 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2540
2541 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2542
2543 done:
2544 hci_req_unlock(hdev);
2545 hci_dev_put(hdev);
2546 return ret;
2547 }
2548
2549 int hci_dev_reset_stat(__u16 dev)
2550 {
2551 struct hci_dev *hdev;
2552 int ret = 0;
2553
2554 hdev = hci_dev_get(dev);
2555 if (!hdev)
2556 return -ENODEV;
2557
2558 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2559 ret = -EBUSY;
2560 goto done;
2561 }
2562
2563 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2564 ret = -EOPNOTSUPP;
2565 goto done;
2566 }
2567
2568 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2569
2570 done:
2571 hci_dev_put(hdev);
2572 return ret;
2573 }
2574
2575 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2576 {
2577 struct hci_dev *hdev;
2578 struct hci_dev_req dr;
2579 int err = 0;
2580
2581 if (copy_from_user(&dr, arg, sizeof(dr)))
2582 return -EFAULT;
2583
2584 hdev = hci_dev_get(dr.dev_id);
2585 if (!hdev)
2586 return -ENODEV;
2587
2588 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2589 err = -EBUSY;
2590 goto done;
2591 }
2592
2593 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2594 err = -EOPNOTSUPP;
2595 goto done;
2596 }
2597
2598 if (hdev->dev_type != HCI_BREDR) {
2599 err = -EOPNOTSUPP;
2600 goto done;
2601 }
2602
2603 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2604 err = -EOPNOTSUPP;
2605 goto done;
2606 }
2607
2608 switch (cmd) {
2609 case HCISETAUTH:
2610 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2611 HCI_INIT_TIMEOUT);
2612 break;
2613
2614 case HCISETENCRYPT:
2615 if (!lmp_encrypt_capable(hdev)) {
2616 err = -EOPNOTSUPP;
2617 break;
2618 }
2619
2620 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2621 /* Auth must be enabled first */
2622 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2623 HCI_INIT_TIMEOUT);
2624 if (err)
2625 break;
2626 }
2627
2628 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2629 HCI_INIT_TIMEOUT);
2630 break;
2631
2632 case HCISETSCAN:
2633 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2634 HCI_INIT_TIMEOUT);
2635 break;
2636
2637 case HCISETLINKPOL:
2638 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2639 HCI_INIT_TIMEOUT);
2640 break;
2641
2642 case HCISETLINKMODE:
2643 hdev->link_mode = ((__u16) dr.dev_opt) &
2644 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2645 break;
2646
2647 case HCISETPTYPE:
2648 hdev->pkt_type = (__u16) dr.dev_opt;
2649 break;
2650
2651 case HCISETACLMTU:
2652 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2653 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2654 break;
2655
2656 case HCISETSCOMTU:
2657 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2658 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2659 break;
2660
2661 default:
2662 err = -EINVAL;
2663 break;
2664 }
2665
2666 done:
2667 hci_dev_put(hdev);
2668 return err;
2669 }
2670
2671 int hci_get_dev_list(void __user *arg)
2672 {
2673 struct hci_dev *hdev;
2674 struct hci_dev_list_req *dl;
2675 struct hci_dev_req *dr;
2676 int n = 0, size, err;
2677 __u16 dev_num;
2678
2679 if (get_user(dev_num, (__u16 __user *) arg))
2680 return -EFAULT;
2681
2682 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2683 return -EINVAL;
2684
2685 size = sizeof(*dl) + dev_num * sizeof(*dr);
2686
2687 dl = kzalloc(size, GFP_KERNEL);
2688 if (!dl)
2689 return -ENOMEM;
2690
2691 dr = dl->dev_req;
2692
2693 read_lock(&hci_dev_list_lock);
2694 list_for_each_entry(hdev, &hci_dev_list, list) {
2695 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2696 cancel_delayed_work(&hdev->power_off);
2697
2698 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2699 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2700
2701 (dr + n)->dev_id = hdev->id;
2702 (dr + n)->dev_opt = hdev->flags;
2703
2704 if (++n >= dev_num)
2705 break;
2706 }
2707 read_unlock(&hci_dev_list_lock);
2708
2709 dl->dev_num = n;
2710 size = sizeof(*dl) + n * sizeof(*dr);
2711
2712 err = copy_to_user(arg, dl, size);
2713 kfree(dl);
2714
2715 return err ? -EFAULT : 0;
2716 }
2717
2718 int hci_get_dev_info(void __user *arg)
2719 {
2720 struct hci_dev *hdev;
2721 struct hci_dev_info di;
2722 int err = 0;
2723
2724 if (copy_from_user(&di, arg, sizeof(di)))
2725 return -EFAULT;
2726
2727 hdev = hci_dev_get(di.dev_id);
2728 if (!hdev)
2729 return -ENODEV;
2730
2731 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2732 cancel_delayed_work_sync(&hdev->power_off);
2733
2734 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2735 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
2736
2737 strcpy(di.name, hdev->name);
2738 di.bdaddr = hdev->bdaddr;
2739 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2740 di.flags = hdev->flags;
2741 di.pkt_type = hdev->pkt_type;
2742 if (lmp_bredr_capable(hdev)) {
2743 di.acl_mtu = hdev->acl_mtu;
2744 di.acl_pkts = hdev->acl_pkts;
2745 di.sco_mtu = hdev->sco_mtu;
2746 di.sco_pkts = hdev->sco_pkts;
2747 } else {
2748 di.acl_mtu = hdev->le_mtu;
2749 di.acl_pkts = hdev->le_pkts;
2750 di.sco_mtu = 0;
2751 di.sco_pkts = 0;
2752 }
2753 di.link_policy = hdev->link_policy;
2754 di.link_mode = hdev->link_mode;
2755
2756 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2757 memcpy(&di.features, &hdev->features, sizeof(di.features));
2758
2759 if (copy_to_user(arg, &di, sizeof(di)))
2760 err = -EFAULT;
2761
2762 hci_dev_put(hdev);
2763
2764 return err;
2765 }
2766
2767 /* ---- Interface to HCI drivers ---- */
2768
2769 static int hci_rfkill_set_block(void *data, bool blocked)
2770 {
2771 struct hci_dev *hdev = data;
2772
2773 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2774
2775 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2776 return -EBUSY;
2777
2778 if (blocked) {
2779 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2780 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2781 hci_dev_do_close(hdev);
2782 } else {
2783 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2784 }
2785
2786 return 0;
2787 }
2788
2789 static const struct rfkill_ops hci_rfkill_ops = {
2790 .set_block = hci_rfkill_set_block,
2791 };
2792
2793 static void hci_power_on(struct work_struct *work)
2794 {
2795 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
2796 int err;
2797
2798 BT_DBG("%s", hdev->name);
2799
2800 err = hci_dev_do_open(hdev);
2801 if (err < 0) {
2802 mgmt_set_powered_failed(hdev, err);
2803 return;
2804 }
2805
2806 /* During the HCI setup phase, a few error conditions are
2807 * ignored and they need to be checked now. If they are still
2808 * valid, it is important to turn the device back off.
2809 */
2810 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2811 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
2812 (hdev->dev_type == HCI_BREDR &&
2813 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2814 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
2815 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2816 hci_dev_do_close(hdev);
2817 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2818 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2819 HCI_AUTO_OFF_TIMEOUT);
2820 }
2821
2822 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
2823 /* For unconfigured devices, set the HCI_RAW flag
2824 * so that userspace can easily identify them.
2825 */
2826 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2827 set_bit(HCI_RAW, &hdev->flags);
2828
2829 /* For fully configured devices, this will send
2830 * the Index Added event. For unconfigured devices,
2831 * it will send Unconfigued Index Added event.
2832 *
2833 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
2834 * and no event will be send.
2835 */
2836 mgmt_index_added(hdev);
2837 }
2838 }
2839
2840 static void hci_power_off(struct work_struct *work)
2841 {
2842 struct hci_dev *hdev = container_of(work, struct hci_dev,
2843 power_off.work);
2844
2845 BT_DBG("%s", hdev->name);
2846
2847 hci_dev_do_close(hdev);
2848 }
2849
2850 static void hci_discov_off(struct work_struct *work)
2851 {
2852 struct hci_dev *hdev;
2853
2854 hdev = container_of(work, struct hci_dev, discov_off.work);
2855
2856 BT_DBG("%s", hdev->name);
2857
2858 mgmt_discoverable_timeout(hdev);
2859 }
2860
2861 void hci_uuids_clear(struct hci_dev *hdev)
2862 {
2863 struct bt_uuid *uuid, *tmp;
2864
2865 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2866 list_del(&uuid->list);
2867 kfree(uuid);
2868 }
2869 }
2870
2871 void hci_link_keys_clear(struct hci_dev *hdev)
2872 {
2873 struct list_head *p, *n;
2874
2875 list_for_each_safe(p, n, &hdev->link_keys) {
2876 struct link_key *key;
2877
2878 key = list_entry(p, struct link_key, list);
2879
2880 list_del(p);
2881 kfree(key);
2882 }
2883 }
2884
2885 void hci_smp_ltks_clear(struct hci_dev *hdev)
2886 {
2887 struct smp_ltk *k, *tmp;
2888
2889 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2890 list_del(&k->list);
2891 kfree(k);
2892 }
2893 }
2894
2895 void hci_smp_irks_clear(struct hci_dev *hdev)
2896 {
2897 struct smp_irk *k, *tmp;
2898
2899 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2900 list_del(&k->list);
2901 kfree(k);
2902 }
2903 }
2904
2905 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2906 {
2907 struct link_key *k;
2908
2909 list_for_each_entry(k, &hdev->link_keys, list)
2910 if (bacmp(bdaddr, &k->bdaddr) == 0)
2911 return k;
2912
2913 return NULL;
2914 }
2915
2916 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
2917 u8 key_type, u8 old_key_type)
2918 {
2919 /* Legacy key */
2920 if (key_type < 0x03)
2921 return true;
2922
2923 /* Debug keys are insecure so don't store them persistently */
2924 if (key_type == HCI_LK_DEBUG_COMBINATION)
2925 return false;
2926
2927 /* Changed combination key and there's no previous one */
2928 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
2929 return false;
2930
2931 /* Security mode 3 case */
2932 if (!conn)
2933 return true;
2934
2935 /* Neither local nor remote side had no-bonding as requirement */
2936 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
2937 return true;
2938
2939 /* Local side had dedicated bonding as requirement */
2940 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
2941 return true;
2942
2943 /* Remote side had dedicated bonding as requirement */
2944 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
2945 return true;
2946
2947 /* If none of the above criteria match, then don't store the key
2948 * persistently */
2949 return false;
2950 }
2951
2952 static bool ltk_type_master(u8 type)
2953 {
2954 return (type == SMP_LTK);
2955 }
2956
2957 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
2958 bool master)
2959 {
2960 struct smp_ltk *k;
2961
2962 list_for_each_entry(k, &hdev->long_term_keys, list) {
2963 if (k->ediv != ediv || k->rand != rand)
2964 continue;
2965
2966 if (ltk_type_master(k->type) != master)
2967 continue;
2968
2969 return k;
2970 }
2971
2972 return NULL;
2973 }
2974
2975 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2976 u8 addr_type, bool master)
2977 {
2978 struct smp_ltk *k;
2979
2980 list_for_each_entry(k, &hdev->long_term_keys, list)
2981 if (addr_type == k->bdaddr_type &&
2982 bacmp(bdaddr, &k->bdaddr) == 0 &&
2983 ltk_type_master(k->type) == master)
2984 return k;
2985
2986 return NULL;
2987 }
2988
2989 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2990 {
2991 struct smp_irk *irk;
2992
2993 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2994 if (!bacmp(&irk->rpa, rpa))
2995 return irk;
2996 }
2997
2998 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2999 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3000 bacpy(&irk->rpa, rpa);
3001 return irk;
3002 }
3003 }
3004
3005 return NULL;
3006 }
3007
3008 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3009 u8 addr_type)
3010 {
3011 struct smp_irk *irk;
3012
3013 /* Identity Address must be public or static random */
3014 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3015 return NULL;
3016
3017 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3018 if (addr_type == irk->addr_type &&
3019 bacmp(bdaddr, &irk->bdaddr) == 0)
3020 return irk;
3021 }
3022
3023 return NULL;
3024 }
3025
3026 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3027 bdaddr_t *bdaddr, u8 *val, u8 type,
3028 u8 pin_len, bool *persistent)
3029 {
3030 struct link_key *key, *old_key;
3031 u8 old_key_type;
3032
3033 old_key = hci_find_link_key(hdev, bdaddr);
3034 if (old_key) {
3035 old_key_type = old_key->type;
3036 key = old_key;
3037 } else {
3038 old_key_type = conn ? conn->key_type : 0xff;
3039 key = kzalloc(sizeof(*key), GFP_KERNEL);
3040 if (!key)
3041 return NULL;
3042 list_add(&key->list, &hdev->link_keys);
3043 }
3044
3045 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3046
3047 /* Some buggy controller combinations generate a changed
3048 * combination key for legacy pairing even when there's no
3049 * previous key */
3050 if (type == HCI_LK_CHANGED_COMBINATION &&
3051 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3052 type = HCI_LK_COMBINATION;
3053 if (conn)
3054 conn->key_type = type;
3055 }
3056
3057 bacpy(&key->bdaddr, bdaddr);
3058 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3059 key->pin_len = pin_len;
3060
3061 if (type == HCI_LK_CHANGED_COMBINATION)
3062 key->type = old_key_type;
3063 else
3064 key->type = type;
3065
3066 if (persistent)
3067 *persistent = hci_persistent_key(hdev, conn, type,
3068 old_key_type);
3069
3070 return key;
3071 }
3072
3073 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3074 u8 addr_type, u8 type, u8 authenticated,
3075 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3076 {
3077 struct smp_ltk *key, *old_key;
3078 bool master = ltk_type_master(type);
3079
3080 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
3081 if (old_key)
3082 key = old_key;
3083 else {
3084 key = kzalloc(sizeof(*key), GFP_KERNEL);
3085 if (!key)
3086 return NULL;
3087 list_add(&key->list, &hdev->long_term_keys);
3088 }
3089
3090 bacpy(&key->bdaddr, bdaddr);
3091 key->bdaddr_type = addr_type;
3092 memcpy(key->val, tk, sizeof(key->val));
3093 key->authenticated = authenticated;
3094 key->ediv = ediv;
3095 key->rand = rand;
3096 key->enc_size = enc_size;
3097 key->type = type;
3098
3099 return key;
3100 }
3101
3102 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3103 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3104 {
3105 struct smp_irk *irk;
3106
3107 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3108 if (!irk) {
3109 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3110 if (!irk)
3111 return NULL;
3112
3113 bacpy(&irk->bdaddr, bdaddr);
3114 irk->addr_type = addr_type;
3115
3116 list_add(&irk->list, &hdev->identity_resolving_keys);
3117 }
3118
3119 memcpy(irk->val, val, 16);
3120 bacpy(&irk->rpa, rpa);
3121
3122 return irk;
3123 }
3124
3125 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3126 {
3127 struct link_key *key;
3128
3129 key = hci_find_link_key(hdev, bdaddr);
3130 if (!key)
3131 return -ENOENT;
3132
3133 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3134
3135 list_del(&key->list);
3136 kfree(key);
3137
3138 return 0;
3139 }
3140
3141 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3142 {
3143 struct smp_ltk *k, *tmp;
3144 int removed = 0;
3145
3146 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
3147 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3148 continue;
3149
3150 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3151
3152 list_del(&k->list);
3153 kfree(k);
3154 removed++;
3155 }
3156
3157 return removed ? 0 : -ENOENT;
3158 }
3159
3160 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3161 {
3162 struct smp_irk *k, *tmp;
3163
3164 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
3165 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3166 continue;
3167
3168 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3169
3170 list_del(&k->list);
3171 kfree(k);
3172 }
3173 }
3174
3175 /* HCI command timer function */
3176 static void hci_cmd_timeout(struct work_struct *work)
3177 {
3178 struct hci_dev *hdev = container_of(work, struct hci_dev,
3179 cmd_timer.work);
3180
3181 if (hdev->sent_cmd) {
3182 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3183 u16 opcode = __le16_to_cpu(sent->opcode);
3184
3185 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3186 } else {
3187 BT_ERR("%s command tx timeout", hdev->name);
3188 }
3189
3190 atomic_set(&hdev->cmd_cnt, 1);
3191 queue_work(hdev->workqueue, &hdev->cmd_work);
3192 }
3193
3194 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3195 bdaddr_t *bdaddr)
3196 {
3197 struct oob_data *data;
3198
3199 list_for_each_entry(data, &hdev->remote_oob_data, list)
3200 if (bacmp(bdaddr, &data->bdaddr) == 0)
3201 return data;
3202
3203 return NULL;
3204 }
3205
3206 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3207 {
3208 struct oob_data *data;
3209
3210 data = hci_find_remote_oob_data(hdev, bdaddr);
3211 if (!data)
3212 return -ENOENT;
3213
3214 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3215
3216 list_del(&data->list);
3217 kfree(data);
3218
3219 return 0;
3220 }
3221
3222 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3223 {
3224 struct oob_data *data, *n;
3225
3226 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3227 list_del(&data->list);
3228 kfree(data);
3229 }
3230 }
3231
3232 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3233 u8 *hash, u8 *randomizer)
3234 {
3235 struct oob_data *data;
3236
3237 data = hci_find_remote_oob_data(hdev, bdaddr);
3238 if (!data) {
3239 data = kmalloc(sizeof(*data), GFP_KERNEL);
3240 if (!data)
3241 return -ENOMEM;
3242
3243 bacpy(&data->bdaddr, bdaddr);
3244 list_add(&data->list, &hdev->remote_oob_data);
3245 }
3246
3247 memcpy(data->hash192, hash, sizeof(data->hash192));
3248 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
3249
3250 memset(data->hash256, 0, sizeof(data->hash256));
3251 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3252
3253 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3254
3255 return 0;
3256 }
3257
3258 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3259 u8 *hash192, u8 *randomizer192,
3260 u8 *hash256, u8 *randomizer256)
3261 {
3262 struct oob_data *data;
3263
3264 data = hci_find_remote_oob_data(hdev, bdaddr);
3265 if (!data) {
3266 data = kmalloc(sizeof(*data), GFP_KERNEL);
3267 if (!data)
3268 return -ENOMEM;
3269
3270 bacpy(&data->bdaddr, bdaddr);
3271 list_add(&data->list, &hdev->remote_oob_data);
3272 }
3273
3274 memcpy(data->hash192, hash192, sizeof(data->hash192));
3275 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3276
3277 memcpy(data->hash256, hash256, sizeof(data->hash256));
3278 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3279
3280 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3281
3282 return 0;
3283 }
3284
3285 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3286 bdaddr_t *bdaddr, u8 type)
3287 {
3288 struct bdaddr_list *b;
3289
3290 list_for_each_entry(b, &hdev->blacklist, list) {
3291 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3292 return b;
3293 }
3294
3295 return NULL;
3296 }
3297
3298 static void hci_blacklist_clear(struct hci_dev *hdev)
3299 {
3300 struct list_head *p, *n;
3301
3302 list_for_each_safe(p, n, &hdev->blacklist) {
3303 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3304
3305 list_del(p);
3306 kfree(b);
3307 }
3308 }
3309
3310 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3311 {
3312 struct bdaddr_list *entry;
3313
3314 if (!bacmp(bdaddr, BDADDR_ANY))
3315 return -EBADF;
3316
3317 if (hci_blacklist_lookup(hdev, bdaddr, type))
3318 return -EEXIST;
3319
3320 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3321 if (!entry)
3322 return -ENOMEM;
3323
3324 bacpy(&entry->bdaddr, bdaddr);
3325 entry->bdaddr_type = type;
3326
3327 list_add(&entry->list, &hdev->blacklist);
3328
3329 return 0;
3330 }
3331
3332 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3333 {
3334 struct bdaddr_list *entry;
3335
3336 if (!bacmp(bdaddr, BDADDR_ANY)) {
3337 hci_blacklist_clear(hdev);
3338 return 0;
3339 }
3340
3341 entry = hci_blacklist_lookup(hdev, bdaddr, type);
3342 if (!entry)
3343 return -ENOENT;
3344
3345 list_del(&entry->list);
3346 kfree(entry);
3347
3348 return 0;
3349 }
3350
3351 struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3352 bdaddr_t *bdaddr, u8 type)
3353 {
3354 struct bdaddr_list *b;
3355
3356 list_for_each_entry(b, &hdev->le_white_list, list) {
3357 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3358 return b;
3359 }
3360
3361 return NULL;
3362 }
3363
3364 void hci_white_list_clear(struct hci_dev *hdev)
3365 {
3366 struct list_head *p, *n;
3367
3368 list_for_each_safe(p, n, &hdev->le_white_list) {
3369 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3370
3371 list_del(p);
3372 kfree(b);
3373 }
3374 }
3375
3376 int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3377 {
3378 struct bdaddr_list *entry;
3379
3380 if (!bacmp(bdaddr, BDADDR_ANY))
3381 return -EBADF;
3382
3383 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3384 if (!entry)
3385 return -ENOMEM;
3386
3387 bacpy(&entry->bdaddr, bdaddr);
3388 entry->bdaddr_type = type;
3389
3390 list_add(&entry->list, &hdev->le_white_list);
3391
3392 return 0;
3393 }
3394
3395 int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3396 {
3397 struct bdaddr_list *entry;
3398
3399 if (!bacmp(bdaddr, BDADDR_ANY))
3400 return -EBADF;
3401
3402 entry = hci_white_list_lookup(hdev, bdaddr, type);
3403 if (!entry)
3404 return -ENOENT;
3405
3406 list_del(&entry->list);
3407 kfree(entry);
3408
3409 return 0;
3410 }
3411
3412 /* This function requires the caller holds hdev->lock */
3413 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3414 bdaddr_t *addr, u8 addr_type)
3415 {
3416 struct hci_conn_params *params;
3417
3418 /* The conn params list only contains identity addresses */
3419 if (!hci_is_identity_address(addr, addr_type))
3420 return NULL;
3421
3422 list_for_each_entry(params, &hdev->le_conn_params, list) {
3423 if (bacmp(&params->addr, addr) == 0 &&
3424 params->addr_type == addr_type) {
3425 return params;
3426 }
3427 }
3428
3429 return NULL;
3430 }
3431
3432 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3433 {
3434 struct hci_conn *conn;
3435
3436 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3437 if (!conn)
3438 return false;
3439
3440 if (conn->dst_type != type)
3441 return false;
3442
3443 if (conn->state != BT_CONNECTED)
3444 return false;
3445
3446 return true;
3447 }
3448
3449 /* This function requires the caller holds hdev->lock */
3450 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3451 bdaddr_t *addr, u8 addr_type)
3452 {
3453 struct hci_conn_params *param;
3454
3455 /* The list only contains identity addresses */
3456 if (!hci_is_identity_address(addr, addr_type))
3457 return NULL;
3458
3459 list_for_each_entry(param, list, action) {
3460 if (bacmp(&param->addr, addr) == 0 &&
3461 param->addr_type == addr_type)
3462 return param;
3463 }
3464
3465 return NULL;
3466 }
3467
3468 /* This function requires the caller holds hdev->lock */
3469 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3470 bdaddr_t *addr, u8 addr_type)
3471 {
3472 struct hci_conn_params *params;
3473
3474 if (!hci_is_identity_address(addr, addr_type))
3475 return NULL;
3476
3477 params = hci_conn_params_lookup(hdev, addr, addr_type);
3478 if (params)
3479 return params;
3480
3481 params = kzalloc(sizeof(*params), GFP_KERNEL);
3482 if (!params) {
3483 BT_ERR("Out of memory");
3484 return NULL;
3485 }
3486
3487 bacpy(&params->addr, addr);
3488 params->addr_type = addr_type;
3489
3490 list_add(&params->list, &hdev->le_conn_params);
3491 INIT_LIST_HEAD(&params->action);
3492
3493 params->conn_min_interval = hdev->le_conn_min_interval;
3494 params->conn_max_interval = hdev->le_conn_max_interval;
3495 params->conn_latency = hdev->le_conn_latency;
3496 params->supervision_timeout = hdev->le_supv_timeout;
3497 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3498
3499 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3500
3501 return params;
3502 }
3503
3504 /* This function requires the caller holds hdev->lock */
3505 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3506 u8 auto_connect)
3507 {
3508 struct hci_conn_params *params;
3509
3510 params = hci_conn_params_add(hdev, addr, addr_type);
3511 if (!params)
3512 return -EIO;
3513
3514 if (params->auto_connect == auto_connect)
3515 return 0;
3516
3517 list_del_init(&params->action);
3518
3519 switch (auto_connect) {
3520 case HCI_AUTO_CONN_DISABLED:
3521 case HCI_AUTO_CONN_LINK_LOSS:
3522 hci_update_background_scan(hdev);
3523 break;
3524 case HCI_AUTO_CONN_REPORT:
3525 list_add(&params->action, &hdev->pend_le_reports);
3526 hci_update_background_scan(hdev);
3527 break;
3528 case HCI_AUTO_CONN_ALWAYS:
3529 if (!is_connected(hdev, addr, addr_type)) {
3530 list_add(&params->action, &hdev->pend_le_conns);
3531 hci_update_background_scan(hdev);
3532 }
3533 break;
3534 }
3535
3536 params->auto_connect = auto_connect;
3537
3538 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3539 auto_connect);
3540
3541 return 0;
3542 }
3543
3544 /* This function requires the caller holds hdev->lock */
3545 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3546 {
3547 struct hci_conn_params *params;
3548
3549 params = hci_conn_params_lookup(hdev, addr, addr_type);
3550 if (!params)
3551 return;
3552
3553 list_del(&params->action);
3554 list_del(&params->list);
3555 kfree(params);
3556
3557 hci_update_background_scan(hdev);
3558
3559 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3560 }
3561
3562 /* This function requires the caller holds hdev->lock */
3563 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3564 {
3565 struct hci_conn_params *params, *tmp;
3566
3567 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3568 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3569 continue;
3570 list_del(&params->list);
3571 kfree(params);
3572 }
3573
3574 BT_DBG("All LE disabled connection parameters were removed");
3575 }
3576
3577 /* This function requires the caller holds hdev->lock */
3578 void hci_conn_params_clear_enabled(struct hci_dev *hdev)
3579 {
3580 struct hci_conn_params *params, *tmp;
3581
3582 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3583 if (params->auto_connect == HCI_AUTO_CONN_DISABLED)
3584 continue;
3585 list_del(&params->action);
3586 list_del(&params->list);
3587 kfree(params);
3588 }
3589
3590 hci_update_background_scan(hdev);
3591
3592 BT_DBG("All enabled LE connection parameters were removed");
3593 }
3594
3595 /* This function requires the caller holds hdev->lock */
3596 void hci_conn_params_clear_all(struct hci_dev *hdev)
3597 {
3598 struct hci_conn_params *params, *tmp;
3599
3600 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3601 list_del(&params->action);
3602 list_del(&params->list);
3603 kfree(params);
3604 }
3605
3606 hci_update_background_scan(hdev);
3607
3608 BT_DBG("All LE connection parameters were removed");
3609 }
3610
3611 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3612 {
3613 if (status) {
3614 BT_ERR("Failed to start inquiry: status %d", status);
3615
3616 hci_dev_lock(hdev);
3617 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3618 hci_dev_unlock(hdev);
3619 return;
3620 }
3621 }
3622
3623 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3624 {
3625 /* General inquiry access code (GIAC) */
3626 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3627 struct hci_request req;
3628 struct hci_cp_inquiry cp;
3629 int err;
3630
3631 if (status) {
3632 BT_ERR("Failed to disable LE scanning: status %d", status);
3633 return;
3634 }
3635
3636 switch (hdev->discovery.type) {
3637 case DISCOV_TYPE_LE:
3638 hci_dev_lock(hdev);
3639 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3640 hci_dev_unlock(hdev);
3641 break;
3642
3643 case DISCOV_TYPE_INTERLEAVED:
3644 hci_req_init(&req, hdev);
3645
3646 memset(&cp, 0, sizeof(cp));
3647 memcpy(&cp.lap, lap, sizeof(cp.lap));
3648 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3649 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3650
3651 hci_dev_lock(hdev);
3652
3653 hci_inquiry_cache_flush(hdev);
3654
3655 err = hci_req_run(&req, inquiry_complete);
3656 if (err) {
3657 BT_ERR("Inquiry request failed: err %d", err);
3658 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3659 }
3660
3661 hci_dev_unlock(hdev);
3662 break;
3663 }
3664 }
3665
3666 static void le_scan_disable_work(struct work_struct *work)
3667 {
3668 struct hci_dev *hdev = container_of(work, struct hci_dev,
3669 le_scan_disable.work);
3670 struct hci_request req;
3671 int err;
3672
3673 BT_DBG("%s", hdev->name);
3674
3675 hci_req_init(&req, hdev);
3676
3677 hci_req_add_le_scan_disable(&req);
3678
3679 err = hci_req_run(&req, le_scan_disable_work_complete);
3680 if (err)
3681 BT_ERR("Disable LE scanning request failed: err %d", err);
3682 }
3683
3684 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3685 {
3686 struct hci_dev *hdev = req->hdev;
3687
3688 /* If we're advertising or initiating an LE connection we can't
3689 * go ahead and change the random address at this time. This is
3690 * because the eventual initiator address used for the
3691 * subsequently created connection will be undefined (some
3692 * controllers use the new address and others the one we had
3693 * when the operation started).
3694 *
3695 * In this kind of scenario skip the update and let the random
3696 * address be updated at the next cycle.
3697 */
3698 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3699 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3700 BT_DBG("Deferring random address update");
3701 return;
3702 }
3703
3704 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3705 }
3706
3707 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3708 u8 *own_addr_type)
3709 {
3710 struct hci_dev *hdev = req->hdev;
3711 int err;
3712
3713 /* If privacy is enabled use a resolvable private address. If
3714 * current RPA has expired or there is something else than
3715 * the current RPA in use, then generate a new one.
3716 */
3717 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3718 int to;
3719
3720 *own_addr_type = ADDR_LE_DEV_RANDOM;
3721
3722 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3723 !bacmp(&hdev->random_addr, &hdev->rpa))
3724 return 0;
3725
3726 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
3727 if (err < 0) {
3728 BT_ERR("%s failed to generate new RPA", hdev->name);
3729 return err;
3730 }
3731
3732 set_random_addr(req, &hdev->rpa);
3733
3734 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3735 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3736
3737 return 0;
3738 }
3739
3740 /* In case of required privacy without resolvable private address,
3741 * use an unresolvable private address. This is useful for active
3742 * scanning and non-connectable advertising.
3743 */
3744 if (require_privacy) {
3745 bdaddr_t urpa;
3746
3747 get_random_bytes(&urpa, 6);
3748 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3749
3750 *own_addr_type = ADDR_LE_DEV_RANDOM;
3751 set_random_addr(req, &urpa);
3752 return 0;
3753 }
3754
3755 /* If forcing static address is in use or there is no public
3756 * address use the static address as random address (but skip
3757 * the HCI command if the current random address is already the
3758 * static one.
3759 */
3760 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3761 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3762 *own_addr_type = ADDR_LE_DEV_RANDOM;
3763 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3764 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3765 &hdev->static_addr);
3766 return 0;
3767 }
3768
3769 /* Neither privacy nor static address is being used so use a
3770 * public address.
3771 */
3772 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3773
3774 return 0;
3775 }
3776
3777 /* Copy the Identity Address of the controller.
3778 *
3779 * If the controller has a public BD_ADDR, then by default use that one.
3780 * If this is a LE only controller without a public address, default to
3781 * the static random address.
3782 *
3783 * For debugging purposes it is possible to force controllers with a
3784 * public address to use the static random address instead.
3785 */
3786 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3787 u8 *bdaddr_type)
3788 {
3789 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3790 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3791 bacpy(bdaddr, &hdev->static_addr);
3792 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3793 } else {
3794 bacpy(bdaddr, &hdev->bdaddr);
3795 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3796 }
3797 }
3798
3799 /* Alloc HCI device */
3800 struct hci_dev *hci_alloc_dev(void)
3801 {
3802 struct hci_dev *hdev;
3803
3804 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3805 if (!hdev)
3806 return NULL;
3807
3808 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3809 hdev->esco_type = (ESCO_HV1);
3810 hdev->link_mode = (HCI_LM_ACCEPT);
3811 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3812 hdev->io_capability = 0x03; /* No Input No Output */
3813 hdev->manufacturer = 0xffff; /* Default to internal use */
3814 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3815 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3816
3817 hdev->sniff_max_interval = 800;
3818 hdev->sniff_min_interval = 80;
3819
3820 hdev->le_adv_channel_map = 0x07;
3821 hdev->le_scan_interval = 0x0060;
3822 hdev->le_scan_window = 0x0030;
3823 hdev->le_conn_min_interval = 0x0028;
3824 hdev->le_conn_max_interval = 0x0038;
3825 hdev->le_conn_latency = 0x0000;
3826 hdev->le_supv_timeout = 0x002a;
3827
3828 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3829 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
3830 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3831 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
3832
3833 mutex_init(&hdev->lock);
3834 mutex_init(&hdev->req_lock);
3835
3836 INIT_LIST_HEAD(&hdev->mgmt_pending);
3837 INIT_LIST_HEAD(&hdev->blacklist);
3838 INIT_LIST_HEAD(&hdev->uuids);
3839 INIT_LIST_HEAD(&hdev->link_keys);
3840 INIT_LIST_HEAD(&hdev->long_term_keys);
3841 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
3842 INIT_LIST_HEAD(&hdev->remote_oob_data);
3843 INIT_LIST_HEAD(&hdev->le_white_list);
3844 INIT_LIST_HEAD(&hdev->le_conn_params);
3845 INIT_LIST_HEAD(&hdev->pend_le_conns);
3846 INIT_LIST_HEAD(&hdev->pend_le_reports);
3847 INIT_LIST_HEAD(&hdev->conn_hash.list);
3848
3849 INIT_WORK(&hdev->rx_work, hci_rx_work);
3850 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3851 INIT_WORK(&hdev->tx_work, hci_tx_work);
3852 INIT_WORK(&hdev->power_on, hci_power_on);
3853
3854 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3855 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3856 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3857
3858 skb_queue_head_init(&hdev->rx_q);
3859 skb_queue_head_init(&hdev->cmd_q);
3860 skb_queue_head_init(&hdev->raw_q);
3861
3862 init_waitqueue_head(&hdev->req_wait_q);
3863
3864 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
3865
3866 hci_init_sysfs(hdev);
3867 discovery_init(hdev);
3868
3869 return hdev;
3870 }
3871 EXPORT_SYMBOL(hci_alloc_dev);
3872
3873 /* Free HCI device */
3874 void hci_free_dev(struct hci_dev *hdev)
3875 {
3876 /* will free via device release */
3877 put_device(&hdev->dev);
3878 }
3879 EXPORT_SYMBOL(hci_free_dev);
3880
3881 /* Register HCI device */
3882 int hci_register_dev(struct hci_dev *hdev)
3883 {
3884 int id, error;
3885
3886 if (!hdev->open || !hdev->close)
3887 return -EINVAL;
3888
3889 /* Do not allow HCI_AMP devices to register at index 0,
3890 * so the index can be used as the AMP controller ID.
3891 */
3892 switch (hdev->dev_type) {
3893 case HCI_BREDR:
3894 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3895 break;
3896 case HCI_AMP:
3897 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3898 break;
3899 default:
3900 return -EINVAL;
3901 }
3902
3903 if (id < 0)
3904 return id;
3905
3906 sprintf(hdev->name, "hci%d", id);
3907 hdev->id = id;
3908
3909 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3910
3911 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3912 WQ_MEM_RECLAIM, 1, hdev->name);
3913 if (!hdev->workqueue) {
3914 error = -ENOMEM;
3915 goto err;
3916 }
3917
3918 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3919 WQ_MEM_RECLAIM, 1, hdev->name);
3920 if (!hdev->req_workqueue) {
3921 destroy_workqueue(hdev->workqueue);
3922 error = -ENOMEM;
3923 goto err;
3924 }
3925
3926 if (!IS_ERR_OR_NULL(bt_debugfs))
3927 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3928
3929 dev_set_name(&hdev->dev, "%s", hdev->name);
3930
3931 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3932 CRYPTO_ALG_ASYNC);
3933 if (IS_ERR(hdev->tfm_aes)) {
3934 BT_ERR("Unable to create crypto context");
3935 error = PTR_ERR(hdev->tfm_aes);
3936 hdev->tfm_aes = NULL;
3937 goto err_wqueue;
3938 }
3939
3940 error = device_add(&hdev->dev);
3941 if (error < 0)
3942 goto err_tfm;
3943
3944 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
3945 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3946 hdev);
3947 if (hdev->rfkill) {
3948 if (rfkill_register(hdev->rfkill) < 0) {
3949 rfkill_destroy(hdev->rfkill);
3950 hdev->rfkill = NULL;
3951 }
3952 }
3953
3954 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3955 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3956
3957 set_bit(HCI_SETUP, &hdev->dev_flags);
3958 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3959
3960 if (hdev->dev_type == HCI_BREDR) {
3961 /* Assume BR/EDR support until proven otherwise (such as
3962 * through reading supported features during init.
3963 */
3964 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3965 }
3966
3967 write_lock(&hci_dev_list_lock);
3968 list_add(&hdev->list, &hci_dev_list);
3969 write_unlock(&hci_dev_list_lock);
3970
3971 /* Devices that are marked for raw-only usage are unconfigured
3972 * and should not be included in normal operation.
3973 */
3974 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
3975 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
3976
3977 hci_notify(hdev, HCI_DEV_REG);
3978 hci_dev_hold(hdev);
3979
3980 queue_work(hdev->req_workqueue, &hdev->power_on);
3981
3982 return id;
3983
3984 err_tfm:
3985 crypto_free_blkcipher(hdev->tfm_aes);
3986 err_wqueue:
3987 destroy_workqueue(hdev->workqueue);
3988 destroy_workqueue(hdev->req_workqueue);
3989 err:
3990 ida_simple_remove(&hci_index_ida, hdev->id);
3991
3992 return error;
3993 }
3994 EXPORT_SYMBOL(hci_register_dev);
3995
3996 /* Unregister HCI device */
3997 void hci_unregister_dev(struct hci_dev *hdev)
3998 {
3999 int i, id;
4000
4001 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4002
4003 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4004
4005 id = hdev->id;
4006
4007 write_lock(&hci_dev_list_lock);
4008 list_del(&hdev->list);
4009 write_unlock(&hci_dev_list_lock);
4010
4011 hci_dev_do_close(hdev);
4012
4013 for (i = 0; i < NUM_REASSEMBLY; i++)
4014 kfree_skb(hdev->reassembly[i]);
4015
4016 cancel_work_sync(&hdev->power_on);
4017
4018 if (!test_bit(HCI_INIT, &hdev->flags) &&
4019 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
4020 hci_dev_lock(hdev);
4021 mgmt_index_removed(hdev);
4022 hci_dev_unlock(hdev);
4023 }
4024
4025 /* mgmt_index_removed should take care of emptying the
4026 * pending list */
4027 BUG_ON(!list_empty(&hdev->mgmt_pending));
4028
4029 hci_notify(hdev, HCI_DEV_UNREG);
4030
4031 if (hdev->rfkill) {
4032 rfkill_unregister(hdev->rfkill);
4033 rfkill_destroy(hdev->rfkill);
4034 }
4035
4036 if (hdev->tfm_aes)
4037 crypto_free_blkcipher(hdev->tfm_aes);
4038
4039 device_del(&hdev->dev);
4040
4041 debugfs_remove_recursive(hdev->debugfs);
4042
4043 destroy_workqueue(hdev->workqueue);
4044 destroy_workqueue(hdev->req_workqueue);
4045
4046 hci_dev_lock(hdev);
4047 hci_blacklist_clear(hdev);
4048 hci_uuids_clear(hdev);
4049 hci_link_keys_clear(hdev);
4050 hci_smp_ltks_clear(hdev);
4051 hci_smp_irks_clear(hdev);
4052 hci_remote_oob_data_clear(hdev);
4053 hci_white_list_clear(hdev);
4054 hci_conn_params_clear_all(hdev);
4055 hci_dev_unlock(hdev);
4056
4057 hci_dev_put(hdev);
4058
4059 ida_simple_remove(&hci_index_ida, id);
4060 }
4061 EXPORT_SYMBOL(hci_unregister_dev);
4062
4063 /* Suspend HCI device */
4064 int hci_suspend_dev(struct hci_dev *hdev)
4065 {
4066 hci_notify(hdev, HCI_DEV_SUSPEND);
4067 return 0;
4068 }
4069 EXPORT_SYMBOL(hci_suspend_dev);
4070
4071 /* Resume HCI device */
4072 int hci_resume_dev(struct hci_dev *hdev)
4073 {
4074 hci_notify(hdev, HCI_DEV_RESUME);
4075 return 0;
4076 }
4077 EXPORT_SYMBOL(hci_resume_dev);
4078
4079 /* Receive frame from HCI drivers */
4080 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4081 {
4082 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4083 && !test_bit(HCI_INIT, &hdev->flags))) {
4084 kfree_skb(skb);
4085 return -ENXIO;
4086 }
4087
4088 /* Incoming skb */
4089 bt_cb(skb)->incoming = 1;
4090
4091 /* Time stamp */
4092 __net_timestamp(skb);
4093
4094 skb_queue_tail(&hdev->rx_q, skb);
4095 queue_work(hdev->workqueue, &hdev->rx_work);
4096
4097 return 0;
4098 }
4099 EXPORT_SYMBOL(hci_recv_frame);
4100
4101 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4102 int count, __u8 index)
4103 {
4104 int len = 0;
4105 int hlen = 0;
4106 int remain = count;
4107 struct sk_buff *skb;
4108 struct bt_skb_cb *scb;
4109
4110 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4111 index >= NUM_REASSEMBLY)
4112 return -EILSEQ;
4113
4114 skb = hdev->reassembly[index];
4115
4116 if (!skb) {
4117 switch (type) {
4118 case HCI_ACLDATA_PKT:
4119 len = HCI_MAX_FRAME_SIZE;
4120 hlen = HCI_ACL_HDR_SIZE;
4121 break;
4122 case HCI_EVENT_PKT:
4123 len = HCI_MAX_EVENT_SIZE;
4124 hlen = HCI_EVENT_HDR_SIZE;
4125 break;
4126 case HCI_SCODATA_PKT:
4127 len = HCI_MAX_SCO_SIZE;
4128 hlen = HCI_SCO_HDR_SIZE;
4129 break;
4130 }
4131
4132 skb = bt_skb_alloc(len, GFP_ATOMIC);
4133 if (!skb)
4134 return -ENOMEM;
4135
4136 scb = (void *) skb->cb;
4137 scb->expect = hlen;
4138 scb->pkt_type = type;
4139
4140 hdev->reassembly[index] = skb;
4141 }
4142
4143 while (count) {
4144 scb = (void *) skb->cb;
4145 len = min_t(uint, scb->expect, count);
4146
4147 memcpy(skb_put(skb, len), data, len);
4148
4149 count -= len;
4150 data += len;
4151 scb->expect -= len;
4152 remain = count;
4153
4154 switch (type) {
4155 case HCI_EVENT_PKT:
4156 if (skb->len == HCI_EVENT_HDR_SIZE) {
4157 struct hci_event_hdr *h = hci_event_hdr(skb);
4158 scb->expect = h->plen;
4159
4160 if (skb_tailroom(skb) < scb->expect) {
4161 kfree_skb(skb);
4162 hdev->reassembly[index] = NULL;
4163 return -ENOMEM;
4164 }
4165 }
4166 break;
4167
4168 case HCI_ACLDATA_PKT:
4169 if (skb->len == HCI_ACL_HDR_SIZE) {
4170 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4171 scb->expect = __le16_to_cpu(h->dlen);
4172
4173 if (skb_tailroom(skb) < scb->expect) {
4174 kfree_skb(skb);
4175 hdev->reassembly[index] = NULL;
4176 return -ENOMEM;
4177 }
4178 }
4179 break;
4180
4181 case HCI_SCODATA_PKT:
4182 if (skb->len == HCI_SCO_HDR_SIZE) {
4183 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4184 scb->expect = h->dlen;
4185
4186 if (skb_tailroom(skb) < scb->expect) {
4187 kfree_skb(skb);
4188 hdev->reassembly[index] = NULL;
4189 return -ENOMEM;
4190 }
4191 }
4192 break;
4193 }
4194
4195 if (scb->expect == 0) {
4196 /* Complete frame */
4197
4198 bt_cb(skb)->pkt_type = type;
4199 hci_recv_frame(hdev, skb);
4200
4201 hdev->reassembly[index] = NULL;
4202 return remain;
4203 }
4204 }
4205
4206 return remain;
4207 }
4208
4209 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4210 {
4211 int rem = 0;
4212
4213 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4214 return -EILSEQ;
4215
4216 while (count) {
4217 rem = hci_reassembly(hdev, type, data, count, type - 1);
4218 if (rem < 0)
4219 return rem;
4220
4221 data += (count - rem);
4222 count = rem;
4223 }
4224
4225 return rem;
4226 }
4227 EXPORT_SYMBOL(hci_recv_fragment);
4228
4229 #define STREAM_REASSEMBLY 0
4230
4231 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4232 {
4233 int type;
4234 int rem = 0;
4235
4236 while (count) {
4237 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4238
4239 if (!skb) {
4240 struct { char type; } *pkt;
4241
4242 /* Start of the frame */
4243 pkt = data;
4244 type = pkt->type;
4245
4246 data++;
4247 count--;
4248 } else
4249 type = bt_cb(skb)->pkt_type;
4250
4251 rem = hci_reassembly(hdev, type, data, count,
4252 STREAM_REASSEMBLY);
4253 if (rem < 0)
4254 return rem;
4255
4256 data += (count - rem);
4257 count = rem;
4258 }
4259
4260 return rem;
4261 }
4262 EXPORT_SYMBOL(hci_recv_stream_fragment);
4263
4264 /* ---- Interface to upper protocols ---- */
4265
4266 int hci_register_cb(struct hci_cb *cb)
4267 {
4268 BT_DBG("%p name %s", cb, cb->name);
4269
4270 write_lock(&hci_cb_list_lock);
4271 list_add(&cb->list, &hci_cb_list);
4272 write_unlock(&hci_cb_list_lock);
4273
4274 return 0;
4275 }
4276 EXPORT_SYMBOL(hci_register_cb);
4277
4278 int hci_unregister_cb(struct hci_cb *cb)
4279 {
4280 BT_DBG("%p name %s", cb, cb->name);
4281
4282 write_lock(&hci_cb_list_lock);
4283 list_del(&cb->list);
4284 write_unlock(&hci_cb_list_lock);
4285
4286 return 0;
4287 }
4288 EXPORT_SYMBOL(hci_unregister_cb);
4289
4290 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4291 {
4292 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4293
4294 /* Time stamp */
4295 __net_timestamp(skb);
4296
4297 /* Send copy to monitor */
4298 hci_send_to_monitor(hdev, skb);
4299
4300 if (atomic_read(&hdev->promisc)) {
4301 /* Send copy to the sockets */
4302 hci_send_to_sock(hdev, skb);
4303 }
4304
4305 /* Get rid of skb owner, prior to sending to the driver. */
4306 skb_orphan(skb);
4307
4308 if (hdev->send(hdev, skb) < 0)
4309 BT_ERR("%s sending frame failed", hdev->name);
4310 }
4311
4312 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4313 {
4314 skb_queue_head_init(&req->cmd_q);
4315 req->hdev = hdev;
4316 req->err = 0;
4317 }
4318
4319 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4320 {
4321 struct hci_dev *hdev = req->hdev;
4322 struct sk_buff *skb;
4323 unsigned long flags;
4324
4325 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4326
4327 /* If an error occured during request building, remove all HCI
4328 * commands queued on the HCI request queue.
4329 */
4330 if (req->err) {
4331 skb_queue_purge(&req->cmd_q);
4332 return req->err;
4333 }
4334
4335 /* Do not allow empty requests */
4336 if (skb_queue_empty(&req->cmd_q))
4337 return -ENODATA;
4338
4339 skb = skb_peek_tail(&req->cmd_q);
4340 bt_cb(skb)->req.complete = complete;
4341
4342 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4343 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4344 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4345
4346 queue_work(hdev->workqueue, &hdev->cmd_work);
4347
4348 return 0;
4349 }
4350
4351 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4352 u32 plen, const void *param)
4353 {
4354 int len = HCI_COMMAND_HDR_SIZE + plen;
4355 struct hci_command_hdr *hdr;
4356 struct sk_buff *skb;
4357
4358 skb = bt_skb_alloc(len, GFP_ATOMIC);
4359 if (!skb)
4360 return NULL;
4361
4362 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4363 hdr->opcode = cpu_to_le16(opcode);
4364 hdr->plen = plen;
4365
4366 if (plen)
4367 memcpy(skb_put(skb, plen), param, plen);
4368
4369 BT_DBG("skb len %d", skb->len);
4370
4371 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4372
4373 return skb;
4374 }
4375
4376 /* Send HCI command */
4377 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4378 const void *param)
4379 {
4380 struct sk_buff *skb;
4381
4382 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4383
4384 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4385 if (!skb) {
4386 BT_ERR("%s no memory for command", hdev->name);
4387 return -ENOMEM;
4388 }
4389
4390 /* Stand-alone HCI commands must be flaged as
4391 * single-command requests.
4392 */
4393 bt_cb(skb)->req.start = true;
4394
4395 skb_queue_tail(&hdev->cmd_q, skb);
4396 queue_work(hdev->workqueue, &hdev->cmd_work);
4397
4398 return 0;
4399 }
4400
4401 /* Queue a command to an asynchronous HCI request */
4402 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4403 const void *param, u8 event)
4404 {
4405 struct hci_dev *hdev = req->hdev;
4406 struct sk_buff *skb;
4407
4408 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4409
4410 /* If an error occured during request building, there is no point in
4411 * queueing the HCI command. We can simply return.
4412 */
4413 if (req->err)
4414 return;
4415
4416 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4417 if (!skb) {
4418 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4419 hdev->name, opcode);
4420 req->err = -ENOMEM;
4421 return;
4422 }
4423
4424 if (skb_queue_empty(&req->cmd_q))
4425 bt_cb(skb)->req.start = true;
4426
4427 bt_cb(skb)->req.event = event;
4428
4429 skb_queue_tail(&req->cmd_q, skb);
4430 }
4431
4432 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4433 const void *param)
4434 {
4435 hci_req_add_ev(req, opcode, plen, param, 0);
4436 }
4437
4438 /* Get data from the previously sent command */
4439 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4440 {
4441 struct hci_command_hdr *hdr;
4442
4443 if (!hdev->sent_cmd)
4444 return NULL;
4445
4446 hdr = (void *) hdev->sent_cmd->data;
4447
4448 if (hdr->opcode != cpu_to_le16(opcode))
4449 return NULL;
4450
4451 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4452
4453 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4454 }
4455
4456 /* Send ACL data */
4457 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4458 {
4459 struct hci_acl_hdr *hdr;
4460 int len = skb->len;
4461
4462 skb_push(skb, HCI_ACL_HDR_SIZE);
4463 skb_reset_transport_header(skb);
4464 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4465 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4466 hdr->dlen = cpu_to_le16(len);
4467 }
4468
4469 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4470 struct sk_buff *skb, __u16 flags)
4471 {
4472 struct hci_conn *conn = chan->conn;
4473 struct hci_dev *hdev = conn->hdev;
4474 struct sk_buff *list;
4475
4476 skb->len = skb_headlen(skb);
4477 skb->data_len = 0;
4478
4479 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4480
4481 switch (hdev->dev_type) {
4482 case HCI_BREDR:
4483 hci_add_acl_hdr(skb, conn->handle, flags);
4484 break;
4485 case HCI_AMP:
4486 hci_add_acl_hdr(skb, chan->handle, flags);
4487 break;
4488 default:
4489 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4490 return;
4491 }
4492
4493 list = skb_shinfo(skb)->frag_list;
4494 if (!list) {
4495 /* Non fragmented */
4496 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4497
4498 skb_queue_tail(queue, skb);
4499 } else {
4500 /* Fragmented */
4501 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4502
4503 skb_shinfo(skb)->frag_list = NULL;
4504
4505 /* Queue all fragments atomically */
4506 spin_lock(&queue->lock);
4507
4508 __skb_queue_tail(queue, skb);
4509
4510 flags &= ~ACL_START;
4511 flags |= ACL_CONT;
4512 do {
4513 skb = list; list = list->next;
4514
4515 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4516 hci_add_acl_hdr(skb, conn->handle, flags);
4517
4518 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4519
4520 __skb_queue_tail(queue, skb);
4521 } while (list);
4522
4523 spin_unlock(&queue->lock);
4524 }
4525 }
4526
4527 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4528 {
4529 struct hci_dev *hdev = chan->conn->hdev;
4530
4531 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4532
4533 hci_queue_acl(chan, &chan->data_q, skb, flags);
4534
4535 queue_work(hdev->workqueue, &hdev->tx_work);
4536 }
4537
4538 /* Send SCO data */
4539 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4540 {
4541 struct hci_dev *hdev = conn->hdev;
4542 struct hci_sco_hdr hdr;
4543
4544 BT_DBG("%s len %d", hdev->name, skb->len);
4545
4546 hdr.handle = cpu_to_le16(conn->handle);
4547 hdr.dlen = skb->len;
4548
4549 skb_push(skb, HCI_SCO_HDR_SIZE);
4550 skb_reset_transport_header(skb);
4551 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4552
4553 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4554
4555 skb_queue_tail(&conn->data_q, skb);
4556 queue_work(hdev->workqueue, &hdev->tx_work);
4557 }
4558
4559 /* ---- HCI TX task (outgoing data) ---- */
4560
4561 /* HCI Connection scheduler */
4562 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4563 int *quote)
4564 {
4565 struct hci_conn_hash *h = &hdev->conn_hash;
4566 struct hci_conn *conn = NULL, *c;
4567 unsigned int num = 0, min = ~0;
4568
4569 /* We don't have to lock device here. Connections are always
4570 * added and removed with TX task disabled. */
4571
4572 rcu_read_lock();
4573
4574 list_for_each_entry_rcu(c, &h->list, list) {
4575 if (c->type != type || skb_queue_empty(&c->data_q))
4576 continue;
4577
4578 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4579 continue;
4580
4581 num++;
4582
4583 if (c->sent < min) {
4584 min = c->sent;
4585 conn = c;
4586 }
4587
4588 if (hci_conn_num(hdev, type) == num)
4589 break;
4590 }
4591
4592 rcu_read_unlock();
4593
4594 if (conn) {
4595 int cnt, q;
4596
4597 switch (conn->type) {
4598 case ACL_LINK:
4599 cnt = hdev->acl_cnt;
4600 break;
4601 case SCO_LINK:
4602 case ESCO_LINK:
4603 cnt = hdev->sco_cnt;
4604 break;
4605 case LE_LINK:
4606 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4607 break;
4608 default:
4609 cnt = 0;
4610 BT_ERR("Unknown link type");
4611 }
4612
4613 q = cnt / num;
4614 *quote = q ? q : 1;
4615 } else
4616 *quote = 0;
4617
4618 BT_DBG("conn %p quote %d", conn, *quote);
4619 return conn;
4620 }
4621
4622 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4623 {
4624 struct hci_conn_hash *h = &hdev->conn_hash;
4625 struct hci_conn *c;
4626
4627 BT_ERR("%s link tx timeout", hdev->name);
4628
4629 rcu_read_lock();
4630
4631 /* Kill stalled connections */
4632 list_for_each_entry_rcu(c, &h->list, list) {
4633 if (c->type == type && c->sent) {
4634 BT_ERR("%s killing stalled connection %pMR",
4635 hdev->name, &c->dst);
4636 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4637 }
4638 }
4639
4640 rcu_read_unlock();
4641 }
4642
4643 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4644 int *quote)
4645 {
4646 struct hci_conn_hash *h = &hdev->conn_hash;
4647 struct hci_chan *chan = NULL;
4648 unsigned int num = 0, min = ~0, cur_prio = 0;
4649 struct hci_conn *conn;
4650 int cnt, q, conn_num = 0;
4651
4652 BT_DBG("%s", hdev->name);
4653
4654 rcu_read_lock();
4655
4656 list_for_each_entry_rcu(conn, &h->list, list) {
4657 struct hci_chan *tmp;
4658
4659 if (conn->type != type)
4660 continue;
4661
4662 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4663 continue;
4664
4665 conn_num++;
4666
4667 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4668 struct sk_buff *skb;
4669
4670 if (skb_queue_empty(&tmp->data_q))
4671 continue;
4672
4673 skb = skb_peek(&tmp->data_q);
4674 if (skb->priority < cur_prio)
4675 continue;
4676
4677 if (skb->priority > cur_prio) {
4678 num = 0;
4679 min = ~0;
4680 cur_prio = skb->priority;
4681 }
4682
4683 num++;
4684
4685 if (conn->sent < min) {
4686 min = conn->sent;
4687 chan = tmp;
4688 }
4689 }
4690
4691 if (hci_conn_num(hdev, type) == conn_num)
4692 break;
4693 }
4694
4695 rcu_read_unlock();
4696
4697 if (!chan)
4698 return NULL;
4699
4700 switch (chan->conn->type) {
4701 case ACL_LINK:
4702 cnt = hdev->acl_cnt;
4703 break;
4704 case AMP_LINK:
4705 cnt = hdev->block_cnt;
4706 break;
4707 case SCO_LINK:
4708 case ESCO_LINK:
4709 cnt = hdev->sco_cnt;
4710 break;
4711 case LE_LINK:
4712 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4713 break;
4714 default:
4715 cnt = 0;
4716 BT_ERR("Unknown link type");
4717 }
4718
4719 q = cnt / num;
4720 *quote = q ? q : 1;
4721 BT_DBG("chan %p quote %d", chan, *quote);
4722 return chan;
4723 }
4724
4725 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4726 {
4727 struct hci_conn_hash *h = &hdev->conn_hash;
4728 struct hci_conn *conn;
4729 int num = 0;
4730
4731 BT_DBG("%s", hdev->name);
4732
4733 rcu_read_lock();
4734
4735 list_for_each_entry_rcu(conn, &h->list, list) {
4736 struct hci_chan *chan;
4737
4738 if (conn->type != type)
4739 continue;
4740
4741 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4742 continue;
4743
4744 num++;
4745
4746 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4747 struct sk_buff *skb;
4748
4749 if (chan->sent) {
4750 chan->sent = 0;
4751 continue;
4752 }
4753
4754 if (skb_queue_empty(&chan->data_q))
4755 continue;
4756
4757 skb = skb_peek(&chan->data_q);
4758 if (skb->priority >= HCI_PRIO_MAX - 1)
4759 continue;
4760
4761 skb->priority = HCI_PRIO_MAX - 1;
4762
4763 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4764 skb->priority);
4765 }
4766
4767 if (hci_conn_num(hdev, type) == num)
4768 break;
4769 }
4770
4771 rcu_read_unlock();
4772
4773 }
4774
4775 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4776 {
4777 /* Calculate count of blocks used by this packet */
4778 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4779 }
4780
4781 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4782 {
4783 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4784 /* ACL tx timeout must be longer than maximum
4785 * link supervision timeout (40.9 seconds) */
4786 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4787 HCI_ACL_TX_TIMEOUT))
4788 hci_link_tx_to(hdev, ACL_LINK);
4789 }
4790 }
4791
4792 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4793 {
4794 unsigned int cnt = hdev->acl_cnt;
4795 struct hci_chan *chan;
4796 struct sk_buff *skb;
4797 int quote;
4798
4799 __check_timeout(hdev, cnt);
4800
4801 while (hdev->acl_cnt &&
4802 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4803 u32 priority = (skb_peek(&chan->data_q))->priority;
4804 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4805 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4806 skb->len, skb->priority);
4807
4808 /* Stop if priority has changed */
4809 if (skb->priority < priority)
4810 break;
4811
4812 skb = skb_dequeue(&chan->data_q);
4813
4814 hci_conn_enter_active_mode(chan->conn,
4815 bt_cb(skb)->force_active);
4816
4817 hci_send_frame(hdev, skb);
4818 hdev->acl_last_tx = jiffies;
4819
4820 hdev->acl_cnt--;
4821 chan->sent++;
4822 chan->conn->sent++;
4823 }
4824 }
4825
4826 if (cnt != hdev->acl_cnt)
4827 hci_prio_recalculate(hdev, ACL_LINK);
4828 }
4829
4830 static void hci_sched_acl_blk(struct hci_dev *hdev)
4831 {
4832 unsigned int cnt = hdev->block_cnt;
4833 struct hci_chan *chan;
4834 struct sk_buff *skb;
4835 int quote;
4836 u8 type;
4837
4838 __check_timeout(hdev, cnt);
4839
4840 BT_DBG("%s", hdev->name);
4841
4842 if (hdev->dev_type == HCI_AMP)
4843 type = AMP_LINK;
4844 else
4845 type = ACL_LINK;
4846
4847 while (hdev->block_cnt > 0 &&
4848 (chan = hci_chan_sent(hdev, type, &quote))) {
4849 u32 priority = (skb_peek(&chan->data_q))->priority;
4850 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4851 int blocks;
4852
4853 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4854 skb->len, skb->priority);
4855
4856 /* Stop if priority has changed */
4857 if (skb->priority < priority)
4858 break;
4859
4860 skb = skb_dequeue(&chan->data_q);
4861
4862 blocks = __get_blocks(hdev, skb);
4863 if (blocks > hdev->block_cnt)
4864 return;
4865
4866 hci_conn_enter_active_mode(chan->conn,
4867 bt_cb(skb)->force_active);
4868
4869 hci_send_frame(hdev, skb);
4870 hdev->acl_last_tx = jiffies;
4871
4872 hdev->block_cnt -= blocks;
4873 quote -= blocks;
4874
4875 chan->sent += blocks;
4876 chan->conn->sent += blocks;
4877 }
4878 }
4879
4880 if (cnt != hdev->block_cnt)
4881 hci_prio_recalculate(hdev, type);
4882 }
4883
4884 static void hci_sched_acl(struct hci_dev *hdev)
4885 {
4886 BT_DBG("%s", hdev->name);
4887
4888 /* No ACL link over BR/EDR controller */
4889 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4890 return;
4891
4892 /* No AMP link over AMP controller */
4893 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
4894 return;
4895
4896 switch (hdev->flow_ctl_mode) {
4897 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4898 hci_sched_acl_pkt(hdev);
4899 break;
4900
4901 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4902 hci_sched_acl_blk(hdev);
4903 break;
4904 }
4905 }
4906
4907 /* Schedule SCO */
4908 static void hci_sched_sco(struct hci_dev *hdev)
4909 {
4910 struct hci_conn *conn;
4911 struct sk_buff *skb;
4912 int quote;
4913
4914 BT_DBG("%s", hdev->name);
4915
4916 if (!hci_conn_num(hdev, SCO_LINK))
4917 return;
4918
4919 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4920 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4921 BT_DBG("skb %p len %d", skb, skb->len);
4922 hci_send_frame(hdev, skb);
4923
4924 conn->sent++;
4925 if (conn->sent == ~0)
4926 conn->sent = 0;
4927 }
4928 }
4929 }
4930
4931 static void hci_sched_esco(struct hci_dev *hdev)
4932 {
4933 struct hci_conn *conn;
4934 struct sk_buff *skb;
4935 int quote;
4936
4937 BT_DBG("%s", hdev->name);
4938
4939 if (!hci_conn_num(hdev, ESCO_LINK))
4940 return;
4941
4942 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4943 &quote))) {
4944 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4945 BT_DBG("skb %p len %d", skb, skb->len);
4946 hci_send_frame(hdev, skb);
4947
4948 conn->sent++;
4949 if (conn->sent == ~0)
4950 conn->sent = 0;
4951 }
4952 }
4953 }
4954
4955 static void hci_sched_le(struct hci_dev *hdev)
4956 {
4957 struct hci_chan *chan;
4958 struct sk_buff *skb;
4959 int quote, cnt, tmp;
4960
4961 BT_DBG("%s", hdev->name);
4962
4963 if (!hci_conn_num(hdev, LE_LINK))
4964 return;
4965
4966 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4967 /* LE tx timeout must be longer than maximum
4968 * link supervision timeout (40.9 seconds) */
4969 if (!hdev->le_cnt && hdev->le_pkts &&
4970 time_after(jiffies, hdev->le_last_tx + HZ * 45))
4971 hci_link_tx_to(hdev, LE_LINK);
4972 }
4973
4974 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
4975 tmp = cnt;
4976 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
4977 u32 priority = (skb_peek(&chan->data_q))->priority;
4978 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4979 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4980 skb->len, skb->priority);
4981
4982 /* Stop if priority has changed */
4983 if (skb->priority < priority)
4984 break;
4985
4986 skb = skb_dequeue(&chan->data_q);
4987
4988 hci_send_frame(hdev, skb);
4989 hdev->le_last_tx = jiffies;
4990
4991 cnt--;
4992 chan->sent++;
4993 chan->conn->sent++;
4994 }
4995 }
4996
4997 if (hdev->le_pkts)
4998 hdev->le_cnt = cnt;
4999 else
5000 hdev->acl_cnt = cnt;
5001
5002 if (cnt != tmp)
5003 hci_prio_recalculate(hdev, LE_LINK);
5004 }
5005
5006 static void hci_tx_work(struct work_struct *work)
5007 {
5008 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5009 struct sk_buff *skb;
5010
5011 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5012 hdev->sco_cnt, hdev->le_cnt);
5013
5014 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5015 /* Schedule queues and send stuff to HCI driver */
5016 hci_sched_acl(hdev);
5017 hci_sched_sco(hdev);
5018 hci_sched_esco(hdev);
5019 hci_sched_le(hdev);
5020 }
5021
5022 /* Send next queued raw (unknown type) packet */
5023 while ((skb = skb_dequeue(&hdev->raw_q)))
5024 hci_send_frame(hdev, skb);
5025 }
5026
5027 /* ----- HCI RX task (incoming data processing) ----- */
5028
5029 /* ACL data packet */
5030 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5031 {
5032 struct hci_acl_hdr *hdr = (void *) skb->data;
5033 struct hci_conn *conn;
5034 __u16 handle, flags;
5035
5036 skb_pull(skb, HCI_ACL_HDR_SIZE);
5037
5038 handle = __le16_to_cpu(hdr->handle);
5039 flags = hci_flags(handle);
5040 handle = hci_handle(handle);
5041
5042 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5043 handle, flags);
5044
5045 hdev->stat.acl_rx++;
5046
5047 hci_dev_lock(hdev);
5048 conn = hci_conn_hash_lookup_handle(hdev, handle);
5049 hci_dev_unlock(hdev);
5050
5051 if (conn) {
5052 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5053
5054 /* Send to upper protocol */
5055 l2cap_recv_acldata(conn, skb, flags);
5056 return;
5057 } else {
5058 BT_ERR("%s ACL packet for unknown connection handle %d",
5059 hdev->name, handle);
5060 }
5061
5062 kfree_skb(skb);
5063 }
5064
5065 /* SCO data packet */
5066 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5067 {
5068 struct hci_sco_hdr *hdr = (void *) skb->data;
5069 struct hci_conn *conn;
5070 __u16 handle;
5071
5072 skb_pull(skb, HCI_SCO_HDR_SIZE);
5073
5074 handle = __le16_to_cpu(hdr->handle);
5075
5076 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5077
5078 hdev->stat.sco_rx++;
5079
5080 hci_dev_lock(hdev);
5081 conn = hci_conn_hash_lookup_handle(hdev, handle);
5082 hci_dev_unlock(hdev);
5083
5084 if (conn) {
5085 /* Send to upper protocol */
5086 sco_recv_scodata(conn, skb);
5087 return;
5088 } else {
5089 BT_ERR("%s SCO packet for unknown connection handle %d",
5090 hdev->name, handle);
5091 }
5092
5093 kfree_skb(skb);
5094 }
5095
5096 static bool hci_req_is_complete(struct hci_dev *hdev)
5097 {
5098 struct sk_buff *skb;
5099
5100 skb = skb_peek(&hdev->cmd_q);
5101 if (!skb)
5102 return true;
5103
5104 return bt_cb(skb)->req.start;
5105 }
5106
5107 static void hci_resend_last(struct hci_dev *hdev)
5108 {
5109 struct hci_command_hdr *sent;
5110 struct sk_buff *skb;
5111 u16 opcode;
5112
5113 if (!hdev->sent_cmd)
5114 return;
5115
5116 sent = (void *) hdev->sent_cmd->data;
5117 opcode = __le16_to_cpu(sent->opcode);
5118 if (opcode == HCI_OP_RESET)
5119 return;
5120
5121 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5122 if (!skb)
5123 return;
5124
5125 skb_queue_head(&hdev->cmd_q, skb);
5126 queue_work(hdev->workqueue, &hdev->cmd_work);
5127 }
5128
5129 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5130 {
5131 hci_req_complete_t req_complete = NULL;
5132 struct sk_buff *skb;
5133 unsigned long flags;
5134
5135 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5136
5137 /* If the completed command doesn't match the last one that was
5138 * sent we need to do special handling of it.
5139 */
5140 if (!hci_sent_cmd_data(hdev, opcode)) {
5141 /* Some CSR based controllers generate a spontaneous
5142 * reset complete event during init and any pending
5143 * command will never be completed. In such a case we
5144 * need to resend whatever was the last sent
5145 * command.
5146 */
5147 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5148 hci_resend_last(hdev);
5149
5150 return;
5151 }
5152
5153 /* If the command succeeded and there's still more commands in
5154 * this request the request is not yet complete.
5155 */
5156 if (!status && !hci_req_is_complete(hdev))
5157 return;
5158
5159 /* If this was the last command in a request the complete
5160 * callback would be found in hdev->sent_cmd instead of the
5161 * command queue (hdev->cmd_q).
5162 */
5163 if (hdev->sent_cmd) {
5164 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5165
5166 if (req_complete) {
5167 /* We must set the complete callback to NULL to
5168 * avoid calling the callback more than once if
5169 * this function gets called again.
5170 */
5171 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5172
5173 goto call_complete;
5174 }
5175 }
5176
5177 /* Remove all pending commands belonging to this request */
5178 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5179 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5180 if (bt_cb(skb)->req.start) {
5181 __skb_queue_head(&hdev->cmd_q, skb);
5182 break;
5183 }
5184
5185 req_complete = bt_cb(skb)->req.complete;
5186 kfree_skb(skb);
5187 }
5188 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5189
5190 call_complete:
5191 if (req_complete)
5192 req_complete(hdev, status);
5193 }
5194
5195 static void hci_rx_work(struct work_struct *work)
5196 {
5197 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5198 struct sk_buff *skb;
5199
5200 BT_DBG("%s", hdev->name);
5201
5202 while ((skb = skb_dequeue(&hdev->rx_q))) {
5203 /* Send copy to monitor */
5204 hci_send_to_monitor(hdev, skb);
5205
5206 if (atomic_read(&hdev->promisc)) {
5207 /* Send copy to the sockets */
5208 hci_send_to_sock(hdev, skb);
5209 }
5210
5211 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5212 kfree_skb(skb);
5213 continue;
5214 }
5215
5216 if (test_bit(HCI_INIT, &hdev->flags)) {
5217 /* Don't process data packets in this states. */
5218 switch (bt_cb(skb)->pkt_type) {
5219 case HCI_ACLDATA_PKT:
5220 case HCI_SCODATA_PKT:
5221 kfree_skb(skb);
5222 continue;
5223 }
5224 }
5225
5226 /* Process frame */
5227 switch (bt_cb(skb)->pkt_type) {
5228 case HCI_EVENT_PKT:
5229 BT_DBG("%s Event packet", hdev->name);
5230 hci_event_packet(hdev, skb);
5231 break;
5232
5233 case HCI_ACLDATA_PKT:
5234 BT_DBG("%s ACL data packet", hdev->name);
5235 hci_acldata_packet(hdev, skb);
5236 break;
5237
5238 case HCI_SCODATA_PKT:
5239 BT_DBG("%s SCO data packet", hdev->name);
5240 hci_scodata_packet(hdev, skb);
5241 break;
5242
5243 default:
5244 kfree_skb(skb);
5245 break;
5246 }
5247 }
5248 }
5249
5250 static void hci_cmd_work(struct work_struct *work)
5251 {
5252 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5253 struct sk_buff *skb;
5254
5255 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5256 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5257
5258 /* Send queued commands */
5259 if (atomic_read(&hdev->cmd_cnt)) {
5260 skb = skb_dequeue(&hdev->cmd_q);
5261 if (!skb)
5262 return;
5263
5264 kfree_skb(hdev->sent_cmd);
5265
5266 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5267 if (hdev->sent_cmd) {
5268 atomic_dec(&hdev->cmd_cnt);
5269 hci_send_frame(hdev, skb);
5270 if (test_bit(HCI_RESET, &hdev->flags))
5271 cancel_delayed_work(&hdev->cmd_timer);
5272 else
5273 schedule_delayed_work(&hdev->cmd_timer,
5274 HCI_CMD_TIMEOUT);
5275 } else {
5276 skb_queue_head(&hdev->cmd_q, skb);
5277 queue_work(hdev->workqueue, &hdev->cmd_work);
5278 }
5279 }
5280 }
5281
5282 void hci_req_add_le_scan_disable(struct hci_request *req)
5283 {
5284 struct hci_cp_le_set_scan_enable cp;
5285
5286 memset(&cp, 0, sizeof(cp));
5287 cp.enable = LE_SCAN_DISABLE;
5288 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5289 }
5290
5291 void hci_req_add_le_passive_scan(struct hci_request *req)
5292 {
5293 struct hci_cp_le_set_scan_param param_cp;
5294 struct hci_cp_le_set_scan_enable enable_cp;
5295 struct hci_dev *hdev = req->hdev;
5296 u8 own_addr_type;
5297
5298 /* Set require_privacy to false since no SCAN_REQ are send
5299 * during passive scanning. Not using an unresolvable address
5300 * here is important so that peer devices using direct
5301 * advertising with our address will be correctly reported
5302 * by the controller.
5303 */
5304 if (hci_update_random_address(req, false, &own_addr_type))
5305 return;
5306
5307 memset(&param_cp, 0, sizeof(param_cp));
5308 param_cp.type = LE_SCAN_PASSIVE;
5309 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5310 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5311 param_cp.own_address_type = own_addr_type;
5312 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5313 &param_cp);
5314
5315 memset(&enable_cp, 0, sizeof(enable_cp));
5316 enable_cp.enable = LE_SCAN_ENABLE;
5317 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5318 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5319 &enable_cp);
5320 }
5321
5322 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5323 {
5324 if (status)
5325 BT_DBG("HCI request failed to update background scanning: "
5326 "status 0x%2.2x", status);
5327 }
5328
5329 /* This function controls the background scanning based on hdev->pend_le_conns
5330 * list. If there are pending LE connection we start the background scanning,
5331 * otherwise we stop it.
5332 *
5333 * This function requires the caller holds hdev->lock.
5334 */
5335 void hci_update_background_scan(struct hci_dev *hdev)
5336 {
5337 struct hci_request req;
5338 struct hci_conn *conn;
5339 int err;
5340
5341 if (!test_bit(HCI_UP, &hdev->flags) ||
5342 test_bit(HCI_INIT, &hdev->flags) ||
5343 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5344 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5345 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5346 return;
5347
5348 hci_req_init(&req, hdev);
5349
5350 if (list_empty(&hdev->pend_le_conns) &&
5351 list_empty(&hdev->pend_le_reports)) {
5352 /* If there is no pending LE connections or devices
5353 * to be scanned for, we should stop the background
5354 * scanning.
5355 */
5356
5357 /* If controller is not scanning we are done. */
5358 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5359 return;
5360
5361 hci_req_add_le_scan_disable(&req);
5362
5363 BT_DBG("%s stopping background scanning", hdev->name);
5364 } else {
5365 /* If there is at least one pending LE connection, we should
5366 * keep the background scan running.
5367 */
5368
5369 /* If controller is connecting, we should not start scanning
5370 * since some controllers are not able to scan and connect at
5371 * the same time.
5372 */
5373 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5374 if (conn)
5375 return;
5376
5377 /* If controller is currently scanning, we stop it to ensure we
5378 * don't miss any advertising (due to duplicates filter).
5379 */
5380 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5381 hci_req_add_le_scan_disable(&req);
5382
5383 hci_req_add_le_passive_scan(&req);
5384
5385 BT_DBG("%s starting background scanning", hdev->name);
5386 }
5387
5388 err = hci_req_run(&req, update_background_scan_complete);
5389 if (err)
5390 BT_ERR("Failed to run HCI request: err %d", err);
5391 }
This page took 0.523253 seconds and 5 git commands to generate.