bbefb4eea36e8f60296c6ee64a2efd2a66620441
[deliverable/linux.git] / net / bluetooth / hci_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/export.h>
29 #include <linux/idr.h>
30 #include <linux/rfkill.h>
31 #include <linux/debugfs.h>
32 #include <linux/crypto.h>
33 #include <asm/unaligned.h>
34
35 #include <net/bluetooth/bluetooth.h>
36 #include <net/bluetooth/hci_core.h>
37 #include <net/bluetooth/l2cap.h>
38 #include <net/bluetooth/mgmt.h>
39
40 #include "smp.h"
41
42 static void hci_rx_work(struct work_struct *work);
43 static void hci_cmd_work(struct work_struct *work);
44 static void hci_tx_work(struct work_struct *work);
45
46 /* HCI device list */
47 LIST_HEAD(hci_dev_list);
48 DEFINE_RWLOCK(hci_dev_list_lock);
49
50 /* HCI callback list */
51 LIST_HEAD(hci_cb_list);
52 DEFINE_RWLOCK(hci_cb_list_lock);
53
54 /* HCI ID Numbering */
55 static DEFINE_IDA(hci_index_ida);
56
57 /* ----- HCI requests ----- */
58
59 #define HCI_REQ_DONE 0
60 #define HCI_REQ_PEND 1
61 #define HCI_REQ_CANCELED 2
62
63 #define hci_req_lock(d) mutex_lock(&d->req_lock)
64 #define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
66 /* ---- HCI notifications ---- */
67
68 static void hci_notify(struct hci_dev *hdev, int event)
69 {
70 hci_sock_dev_event(hdev, event);
71 }
72
73 /* ---- HCI debugfs entries ---- */
74
75 static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77 {
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85 }
86
87 static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89 {
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
129
130 return count;
131 }
132
133 static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138 };
139
140 static int features_show(struct seq_file *f, void *ptr)
141 {
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
161 hci_dev_unlock(hdev);
162
163 return 0;
164 }
165
166 static int features_open(struct inode *inode, struct file *file)
167 {
168 return single_open(file, features_show, inode->i_private);
169 }
170
171 static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176 };
177
178 static int blacklist_show(struct seq_file *f, void *p)
179 {
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
186 hci_dev_unlock(hdev);
187
188 return 0;
189 }
190
191 static int blacklist_open(struct inode *inode, struct file *file)
192 {
193 return single_open(file, blacklist_show, inode->i_private);
194 }
195
196 static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201 };
202
203 static int uuids_show(struct seq_file *f, void *p)
204 {
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
210 u8 i, val[16];
211
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
218
219 seq_printf(f, "%pUb\n", val);
220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224 }
225
226 static int uuids_open(struct inode *inode, struct file *file)
227 {
228 return single_open(file, uuids_show, inode->i_private);
229 }
230
231 static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236 };
237
238 static int inquiry_cache_show(struct seq_file *f, void *p)
239 {
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260 }
261
262 static int inquiry_cache_open(struct inode *inode, struct file *file)
263 {
264 return single_open(file, inquiry_cache_show, inode->i_private);
265 }
266
267 static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272 };
273
274 static int link_keys_show(struct seq_file *f, void *ptr)
275 {
276 struct hci_dev *hdev = f->private;
277 struct list_head *p, *n;
278
279 hci_dev_lock(hdev);
280 list_for_each_safe(p, n, &hdev->link_keys) {
281 struct link_key *key = list_entry(p, struct link_key, list);
282 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
283 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
284 }
285 hci_dev_unlock(hdev);
286
287 return 0;
288 }
289
290 static int link_keys_open(struct inode *inode, struct file *file)
291 {
292 return single_open(file, link_keys_show, inode->i_private);
293 }
294
295 static const struct file_operations link_keys_fops = {
296 .open = link_keys_open,
297 .read = seq_read,
298 .llseek = seq_lseek,
299 .release = single_release,
300 };
301
302 static int dev_class_show(struct seq_file *f, void *ptr)
303 {
304 struct hci_dev *hdev = f->private;
305
306 hci_dev_lock(hdev);
307 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
308 hdev->dev_class[1], hdev->dev_class[0]);
309 hci_dev_unlock(hdev);
310
311 return 0;
312 }
313
314 static int dev_class_open(struct inode *inode, struct file *file)
315 {
316 return single_open(file, dev_class_show, inode->i_private);
317 }
318
319 static const struct file_operations dev_class_fops = {
320 .open = dev_class_open,
321 .read = seq_read,
322 .llseek = seq_lseek,
323 .release = single_release,
324 };
325
326 static int voice_setting_get(void *data, u64 *val)
327 {
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 *val = hdev->voice_setting;
332 hci_dev_unlock(hdev);
333
334 return 0;
335 }
336
337 DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
338 NULL, "0x%4.4llx\n");
339
340 static int auto_accept_delay_set(void *data, u64 val)
341 {
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 hdev->auto_accept_delay = val;
346 hci_dev_unlock(hdev);
347
348 return 0;
349 }
350
351 static int auto_accept_delay_get(void *data, u64 *val)
352 {
353 struct hci_dev *hdev = data;
354
355 hci_dev_lock(hdev);
356 *val = hdev->auto_accept_delay;
357 hci_dev_unlock(hdev);
358
359 return 0;
360 }
361
362 DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
363 auto_accept_delay_set, "%llu\n");
364
365 static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
366 size_t count, loff_t *ppos)
367 {
368 struct hci_dev *hdev = file->private_data;
369 char buf[3];
370
371 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
372 buf[1] = '\n';
373 buf[2] = '\0';
374 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
375 }
376
377 static ssize_t force_sc_support_write(struct file *file,
378 const char __user *user_buf,
379 size_t count, loff_t *ppos)
380 {
381 struct hci_dev *hdev = file->private_data;
382 char buf[32];
383 size_t buf_size = min(count, (sizeof(buf)-1));
384 bool enable;
385
386 if (test_bit(HCI_UP, &hdev->flags))
387 return -EBUSY;
388
389 if (copy_from_user(buf, user_buf, buf_size))
390 return -EFAULT;
391
392 buf[buf_size] = '\0';
393 if (strtobool(buf, &enable))
394 return -EINVAL;
395
396 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
397 return -EALREADY;
398
399 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
400
401 return count;
402 }
403
404 static const struct file_operations force_sc_support_fops = {
405 .open = simple_open,
406 .read = force_sc_support_read,
407 .write = force_sc_support_write,
408 .llseek = default_llseek,
409 };
410
411 static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
412 size_t count, loff_t *ppos)
413 {
414 struct hci_dev *hdev = file->private_data;
415 char buf[3];
416
417 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
418 buf[1] = '\n';
419 buf[2] = '\0';
420 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
421 }
422
423 static const struct file_operations sc_only_mode_fops = {
424 .open = simple_open,
425 .read = sc_only_mode_read,
426 .llseek = default_llseek,
427 };
428
429 static int idle_timeout_set(void *data, u64 val)
430 {
431 struct hci_dev *hdev = data;
432
433 if (val != 0 && (val < 500 || val > 3600000))
434 return -EINVAL;
435
436 hci_dev_lock(hdev);
437 hdev->idle_timeout = val;
438 hci_dev_unlock(hdev);
439
440 return 0;
441 }
442
443 static int idle_timeout_get(void *data, u64 *val)
444 {
445 struct hci_dev *hdev = data;
446
447 hci_dev_lock(hdev);
448 *val = hdev->idle_timeout;
449 hci_dev_unlock(hdev);
450
451 return 0;
452 }
453
454 DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
455 idle_timeout_set, "%llu\n");
456
457 static int rpa_timeout_set(void *data, u64 val)
458 {
459 struct hci_dev *hdev = data;
460
461 /* Require the RPA timeout to be at least 30 seconds and at most
462 * 24 hours.
463 */
464 if (val < 30 || val > (60 * 60 * 24))
465 return -EINVAL;
466
467 hci_dev_lock(hdev);
468 hdev->rpa_timeout = val;
469 hci_dev_unlock(hdev);
470
471 return 0;
472 }
473
474 static int rpa_timeout_get(void *data, u64 *val)
475 {
476 struct hci_dev *hdev = data;
477
478 hci_dev_lock(hdev);
479 *val = hdev->rpa_timeout;
480 hci_dev_unlock(hdev);
481
482 return 0;
483 }
484
485 DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
486 rpa_timeout_set, "%llu\n");
487
488 static int sniff_min_interval_set(void *data, u64 val)
489 {
490 struct hci_dev *hdev = data;
491
492 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
493 return -EINVAL;
494
495 hci_dev_lock(hdev);
496 hdev->sniff_min_interval = val;
497 hci_dev_unlock(hdev);
498
499 return 0;
500 }
501
502 static int sniff_min_interval_get(void *data, u64 *val)
503 {
504 struct hci_dev *hdev = data;
505
506 hci_dev_lock(hdev);
507 *val = hdev->sniff_min_interval;
508 hci_dev_unlock(hdev);
509
510 return 0;
511 }
512
513 DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
514 sniff_min_interval_set, "%llu\n");
515
516 static int sniff_max_interval_set(void *data, u64 val)
517 {
518 struct hci_dev *hdev = data;
519
520 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
521 return -EINVAL;
522
523 hci_dev_lock(hdev);
524 hdev->sniff_max_interval = val;
525 hci_dev_unlock(hdev);
526
527 return 0;
528 }
529
530 static int sniff_max_interval_get(void *data, u64 *val)
531 {
532 struct hci_dev *hdev = data;
533
534 hci_dev_lock(hdev);
535 *val = hdev->sniff_max_interval;
536 hci_dev_unlock(hdev);
537
538 return 0;
539 }
540
541 DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
542 sniff_max_interval_set, "%llu\n");
543
544 static int conn_info_min_age_set(void *data, u64 val)
545 {
546 struct hci_dev *hdev = data;
547
548 if (val == 0 || val > hdev->conn_info_max_age)
549 return -EINVAL;
550
551 hci_dev_lock(hdev);
552 hdev->conn_info_min_age = val;
553 hci_dev_unlock(hdev);
554
555 return 0;
556 }
557
558 static int conn_info_min_age_get(void *data, u64 *val)
559 {
560 struct hci_dev *hdev = data;
561
562 hci_dev_lock(hdev);
563 *val = hdev->conn_info_min_age;
564 hci_dev_unlock(hdev);
565
566 return 0;
567 }
568
569 DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
570 conn_info_min_age_set, "%llu\n");
571
572 static int conn_info_max_age_set(void *data, u64 val)
573 {
574 struct hci_dev *hdev = data;
575
576 if (val == 0 || val < hdev->conn_info_min_age)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->conn_info_max_age = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584 }
585
586 static int conn_info_max_age_get(void *data, u64 *val)
587 {
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->conn_info_max_age;
592 hci_dev_unlock(hdev);
593
594 return 0;
595 }
596
597 DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
598 conn_info_max_age_set, "%llu\n");
599
600 static int identity_show(struct seq_file *f, void *p)
601 {
602 struct hci_dev *hdev = f->private;
603 bdaddr_t addr;
604 u8 addr_type;
605
606 hci_dev_lock(hdev);
607
608 hci_copy_identity_address(hdev, &addr, &addr_type);
609
610 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
611 16, hdev->irk, &hdev->rpa);
612
613 hci_dev_unlock(hdev);
614
615 return 0;
616 }
617
618 static int identity_open(struct inode *inode, struct file *file)
619 {
620 return single_open(file, identity_show, inode->i_private);
621 }
622
623 static const struct file_operations identity_fops = {
624 .open = identity_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628 };
629
630 static int random_address_show(struct seq_file *f, void *p)
631 {
632 struct hci_dev *hdev = f->private;
633
634 hci_dev_lock(hdev);
635 seq_printf(f, "%pMR\n", &hdev->random_addr);
636 hci_dev_unlock(hdev);
637
638 return 0;
639 }
640
641 static int random_address_open(struct inode *inode, struct file *file)
642 {
643 return single_open(file, random_address_show, inode->i_private);
644 }
645
646 static const struct file_operations random_address_fops = {
647 .open = random_address_open,
648 .read = seq_read,
649 .llseek = seq_lseek,
650 .release = single_release,
651 };
652
653 static int static_address_show(struct seq_file *f, void *p)
654 {
655 struct hci_dev *hdev = f->private;
656
657 hci_dev_lock(hdev);
658 seq_printf(f, "%pMR\n", &hdev->static_addr);
659 hci_dev_unlock(hdev);
660
661 return 0;
662 }
663
664 static int static_address_open(struct inode *inode, struct file *file)
665 {
666 return single_open(file, static_address_show, inode->i_private);
667 }
668
669 static const struct file_operations static_address_fops = {
670 .open = static_address_open,
671 .read = seq_read,
672 .llseek = seq_lseek,
673 .release = single_release,
674 };
675
676 static ssize_t force_static_address_read(struct file *file,
677 char __user *user_buf,
678 size_t count, loff_t *ppos)
679 {
680 struct hci_dev *hdev = file->private_data;
681 char buf[3];
682
683 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
684 buf[1] = '\n';
685 buf[2] = '\0';
686 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
687 }
688
689 static ssize_t force_static_address_write(struct file *file,
690 const char __user *user_buf,
691 size_t count, loff_t *ppos)
692 {
693 struct hci_dev *hdev = file->private_data;
694 char buf[32];
695 size_t buf_size = min(count, (sizeof(buf)-1));
696 bool enable;
697
698 if (test_bit(HCI_UP, &hdev->flags))
699 return -EBUSY;
700
701 if (copy_from_user(buf, user_buf, buf_size))
702 return -EFAULT;
703
704 buf[buf_size] = '\0';
705 if (strtobool(buf, &enable))
706 return -EINVAL;
707
708 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
709 return -EALREADY;
710
711 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
712
713 return count;
714 }
715
716 static const struct file_operations force_static_address_fops = {
717 .open = simple_open,
718 .read = force_static_address_read,
719 .write = force_static_address_write,
720 .llseek = default_llseek,
721 };
722
723 static int white_list_show(struct seq_file *f, void *ptr)
724 {
725 struct hci_dev *hdev = f->private;
726 struct bdaddr_list *b;
727
728 hci_dev_lock(hdev);
729 list_for_each_entry(b, &hdev->le_white_list, list)
730 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
731 hci_dev_unlock(hdev);
732
733 return 0;
734 }
735
736 static int white_list_open(struct inode *inode, struct file *file)
737 {
738 return single_open(file, white_list_show, inode->i_private);
739 }
740
741 static const struct file_operations white_list_fops = {
742 .open = white_list_open,
743 .read = seq_read,
744 .llseek = seq_lseek,
745 .release = single_release,
746 };
747
748 static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
749 {
750 struct hci_dev *hdev = f->private;
751 struct smp_irk *irk;
752
753 rcu_read_lock();
754 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
755 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
756 &irk->bdaddr, irk->addr_type,
757 16, irk->val, &irk->rpa);
758 }
759 rcu_read_unlock();
760
761 return 0;
762 }
763
764 static int identity_resolving_keys_open(struct inode *inode, struct file *file)
765 {
766 return single_open(file, identity_resolving_keys_show,
767 inode->i_private);
768 }
769
770 static const struct file_operations identity_resolving_keys_fops = {
771 .open = identity_resolving_keys_open,
772 .read = seq_read,
773 .llseek = seq_lseek,
774 .release = single_release,
775 };
776
777 static int long_term_keys_show(struct seq_file *f, void *ptr)
778 {
779 struct hci_dev *hdev = f->private;
780 struct smp_ltk *ltk;
781
782 rcu_read_lock();
783 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
784 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
785 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
786 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
787 __le64_to_cpu(ltk->rand), 16, ltk->val);
788 rcu_read_unlock();
789
790 return 0;
791 }
792
793 static int long_term_keys_open(struct inode *inode, struct file *file)
794 {
795 return single_open(file, long_term_keys_show, inode->i_private);
796 }
797
798 static const struct file_operations long_term_keys_fops = {
799 .open = long_term_keys_open,
800 .read = seq_read,
801 .llseek = seq_lseek,
802 .release = single_release,
803 };
804
805 static int conn_min_interval_set(void *data, u64 val)
806 {
807 struct hci_dev *hdev = data;
808
809 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
810 return -EINVAL;
811
812 hci_dev_lock(hdev);
813 hdev->le_conn_min_interval = val;
814 hci_dev_unlock(hdev);
815
816 return 0;
817 }
818
819 static int conn_min_interval_get(void *data, u64 *val)
820 {
821 struct hci_dev *hdev = data;
822
823 hci_dev_lock(hdev);
824 *val = hdev->le_conn_min_interval;
825 hci_dev_unlock(hdev);
826
827 return 0;
828 }
829
830 DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
831 conn_min_interval_set, "%llu\n");
832
833 static int conn_max_interval_set(void *data, u64 val)
834 {
835 struct hci_dev *hdev = data;
836
837 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
838 return -EINVAL;
839
840 hci_dev_lock(hdev);
841 hdev->le_conn_max_interval = val;
842 hci_dev_unlock(hdev);
843
844 return 0;
845 }
846
847 static int conn_max_interval_get(void *data, u64 *val)
848 {
849 struct hci_dev *hdev = data;
850
851 hci_dev_lock(hdev);
852 *val = hdev->le_conn_max_interval;
853 hci_dev_unlock(hdev);
854
855 return 0;
856 }
857
858 DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
859 conn_max_interval_set, "%llu\n");
860
861 static int conn_latency_set(void *data, u64 val)
862 {
863 struct hci_dev *hdev = data;
864
865 if (val > 0x01f3)
866 return -EINVAL;
867
868 hci_dev_lock(hdev);
869 hdev->le_conn_latency = val;
870 hci_dev_unlock(hdev);
871
872 return 0;
873 }
874
875 static int conn_latency_get(void *data, u64 *val)
876 {
877 struct hci_dev *hdev = data;
878
879 hci_dev_lock(hdev);
880 *val = hdev->le_conn_latency;
881 hci_dev_unlock(hdev);
882
883 return 0;
884 }
885
886 DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
887 conn_latency_set, "%llu\n");
888
889 static int supervision_timeout_set(void *data, u64 val)
890 {
891 struct hci_dev *hdev = data;
892
893 if (val < 0x000a || val > 0x0c80)
894 return -EINVAL;
895
896 hci_dev_lock(hdev);
897 hdev->le_supv_timeout = val;
898 hci_dev_unlock(hdev);
899
900 return 0;
901 }
902
903 static int supervision_timeout_get(void *data, u64 *val)
904 {
905 struct hci_dev *hdev = data;
906
907 hci_dev_lock(hdev);
908 *val = hdev->le_supv_timeout;
909 hci_dev_unlock(hdev);
910
911 return 0;
912 }
913
914 DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
915 supervision_timeout_set, "%llu\n");
916
917 static int adv_channel_map_set(void *data, u64 val)
918 {
919 struct hci_dev *hdev = data;
920
921 if (val < 0x01 || val > 0x07)
922 return -EINVAL;
923
924 hci_dev_lock(hdev);
925 hdev->le_adv_channel_map = val;
926 hci_dev_unlock(hdev);
927
928 return 0;
929 }
930
931 static int adv_channel_map_get(void *data, u64 *val)
932 {
933 struct hci_dev *hdev = data;
934
935 hci_dev_lock(hdev);
936 *val = hdev->le_adv_channel_map;
937 hci_dev_unlock(hdev);
938
939 return 0;
940 }
941
942 DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
943 adv_channel_map_set, "%llu\n");
944
945 static int adv_min_interval_set(void *data, u64 val)
946 {
947 struct hci_dev *hdev = data;
948
949 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
950 return -EINVAL;
951
952 hci_dev_lock(hdev);
953 hdev->le_adv_min_interval = val;
954 hci_dev_unlock(hdev);
955
956 return 0;
957 }
958
959 static int adv_min_interval_get(void *data, u64 *val)
960 {
961 struct hci_dev *hdev = data;
962
963 hci_dev_lock(hdev);
964 *val = hdev->le_adv_min_interval;
965 hci_dev_unlock(hdev);
966
967 return 0;
968 }
969
970 DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
971 adv_min_interval_set, "%llu\n");
972
973 static int adv_max_interval_set(void *data, u64 val)
974 {
975 struct hci_dev *hdev = data;
976
977 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
978 return -EINVAL;
979
980 hci_dev_lock(hdev);
981 hdev->le_adv_max_interval = val;
982 hci_dev_unlock(hdev);
983
984 return 0;
985 }
986
987 static int adv_max_interval_get(void *data, u64 *val)
988 {
989 struct hci_dev *hdev = data;
990
991 hci_dev_lock(hdev);
992 *val = hdev->le_adv_max_interval;
993 hci_dev_unlock(hdev);
994
995 return 0;
996 }
997
998 DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
999 adv_max_interval_set, "%llu\n");
1000
1001 static int device_list_show(struct seq_file *f, void *ptr)
1002 {
1003 struct hci_dev *hdev = f->private;
1004 struct hci_conn_params *p;
1005 struct bdaddr_list *b;
1006
1007 hci_dev_lock(hdev);
1008 list_for_each_entry(b, &hdev->whitelist, list)
1009 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
1010 list_for_each_entry(p, &hdev->le_conn_params, list) {
1011 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
1012 p->auto_connect);
1013 }
1014 hci_dev_unlock(hdev);
1015
1016 return 0;
1017 }
1018
1019 static int device_list_open(struct inode *inode, struct file *file)
1020 {
1021 return single_open(file, device_list_show, inode->i_private);
1022 }
1023
1024 static const struct file_operations device_list_fops = {
1025 .open = device_list_open,
1026 .read = seq_read,
1027 .llseek = seq_lseek,
1028 .release = single_release,
1029 };
1030
1031 /* ---- HCI requests ---- */
1032
1033 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1034 {
1035 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1036
1037 if (hdev->req_status == HCI_REQ_PEND) {
1038 hdev->req_result = result;
1039 hdev->req_status = HCI_REQ_DONE;
1040 wake_up_interruptible(&hdev->req_wait_q);
1041 }
1042 }
1043
1044 static void hci_req_cancel(struct hci_dev *hdev, int err)
1045 {
1046 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1047
1048 if (hdev->req_status == HCI_REQ_PEND) {
1049 hdev->req_result = err;
1050 hdev->req_status = HCI_REQ_CANCELED;
1051 wake_up_interruptible(&hdev->req_wait_q);
1052 }
1053 }
1054
1055 static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1056 u8 event)
1057 {
1058 struct hci_ev_cmd_complete *ev;
1059 struct hci_event_hdr *hdr;
1060 struct sk_buff *skb;
1061
1062 hci_dev_lock(hdev);
1063
1064 skb = hdev->recv_evt;
1065 hdev->recv_evt = NULL;
1066
1067 hci_dev_unlock(hdev);
1068
1069 if (!skb)
1070 return ERR_PTR(-ENODATA);
1071
1072 if (skb->len < sizeof(*hdr)) {
1073 BT_ERR("Too short HCI event");
1074 goto failed;
1075 }
1076
1077 hdr = (void *) skb->data;
1078 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1079
1080 if (event) {
1081 if (hdr->evt != event)
1082 goto failed;
1083 return skb;
1084 }
1085
1086 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1087 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1088 goto failed;
1089 }
1090
1091 if (skb->len < sizeof(*ev)) {
1092 BT_ERR("Too short cmd_complete event");
1093 goto failed;
1094 }
1095
1096 ev = (void *) skb->data;
1097 skb_pull(skb, sizeof(*ev));
1098
1099 if (opcode == __le16_to_cpu(ev->opcode))
1100 return skb;
1101
1102 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1103 __le16_to_cpu(ev->opcode));
1104
1105 failed:
1106 kfree_skb(skb);
1107 return ERR_PTR(-ENODATA);
1108 }
1109
1110 struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
1111 const void *param, u8 event, u32 timeout)
1112 {
1113 DECLARE_WAITQUEUE(wait, current);
1114 struct hci_request req;
1115 int err = 0;
1116
1117 BT_DBG("%s", hdev->name);
1118
1119 hci_req_init(&req, hdev);
1120
1121 hci_req_add_ev(&req, opcode, plen, param, event);
1122
1123 hdev->req_status = HCI_REQ_PEND;
1124
1125 add_wait_queue(&hdev->req_wait_q, &wait);
1126 set_current_state(TASK_INTERRUPTIBLE);
1127
1128 err = hci_req_run(&req, hci_req_sync_complete);
1129 if (err < 0) {
1130 remove_wait_queue(&hdev->req_wait_q, &wait);
1131 return ERR_PTR(err);
1132 }
1133
1134 schedule_timeout(timeout);
1135
1136 remove_wait_queue(&hdev->req_wait_q, &wait);
1137
1138 if (signal_pending(current))
1139 return ERR_PTR(-EINTR);
1140
1141 switch (hdev->req_status) {
1142 case HCI_REQ_DONE:
1143 err = -bt_to_errno(hdev->req_result);
1144 break;
1145
1146 case HCI_REQ_CANCELED:
1147 err = -hdev->req_result;
1148 break;
1149
1150 default:
1151 err = -ETIMEDOUT;
1152 break;
1153 }
1154
1155 hdev->req_status = hdev->req_result = 0;
1156
1157 BT_DBG("%s end: err %d", hdev->name, err);
1158
1159 if (err < 0)
1160 return ERR_PTR(err);
1161
1162 return hci_get_cmd_complete(hdev, opcode, event);
1163 }
1164 EXPORT_SYMBOL(__hci_cmd_sync_ev);
1165
1166 struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
1167 const void *param, u32 timeout)
1168 {
1169 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
1170 }
1171 EXPORT_SYMBOL(__hci_cmd_sync);
1172
1173 /* Execute request and wait for completion. */
1174 static int __hci_req_sync(struct hci_dev *hdev,
1175 void (*func)(struct hci_request *req,
1176 unsigned long opt),
1177 unsigned long opt, __u32 timeout)
1178 {
1179 struct hci_request req;
1180 DECLARE_WAITQUEUE(wait, current);
1181 int err = 0;
1182
1183 BT_DBG("%s start", hdev->name);
1184
1185 hci_req_init(&req, hdev);
1186
1187 hdev->req_status = HCI_REQ_PEND;
1188
1189 func(&req, opt);
1190
1191 add_wait_queue(&hdev->req_wait_q, &wait);
1192 set_current_state(TASK_INTERRUPTIBLE);
1193
1194 err = hci_req_run(&req, hci_req_sync_complete);
1195 if (err < 0) {
1196 hdev->req_status = 0;
1197
1198 remove_wait_queue(&hdev->req_wait_q, &wait);
1199
1200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
1204 */
1205 if (err == -ENODATA)
1206 return 0;
1207
1208 return err;
1209 }
1210
1211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
1220 err = -bt_to_errno(hdev->req_result);
1221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
1230 }
1231
1232 hdev->req_status = hdev->req_result = 0;
1233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237 }
1238
1239 static int hci_req_sync(struct hci_dev *hdev,
1240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
1242 unsigned long opt, __u32 timeout)
1243 {
1244 int ret;
1245
1246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
1249 /* Serialize all requests */
1250 hci_req_lock(hdev);
1251 ret = __hci_req_sync(hdev, req, opt, timeout);
1252 hci_req_unlock(hdev);
1253
1254 return ret;
1255 }
1256
1257 static void hci_reset_req(struct hci_request *req, unsigned long opt)
1258 {
1259 BT_DBG("%s %ld", req->hdev->name, opt);
1260
1261 /* Reset device */
1262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1264 }
1265
1266 static void bredr_init(struct hci_request *req)
1267 {
1268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
1269
1270 /* Read Local Supported Features */
1271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1272
1273 /* Read Local Version */
1274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1275
1276 /* Read BD Address */
1277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1278 }
1279
1280 static void amp_init(struct hci_request *req)
1281 {
1282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
1283
1284 /* Read Local Version */
1285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1286
1287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
1293 /* Read Local AMP Info */
1294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
1295
1296 /* Read Data Blk size */
1297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
1298
1299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
1302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
1304 }
1305
1306 static void hci_init1_req(struct hci_request *req, unsigned long opt)
1307 {
1308 struct hci_dev *hdev = req->hdev;
1309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
1312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1314 hci_reset_req(req, 0);
1315
1316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
1318 bredr_init(req);
1319 break;
1320
1321 case HCI_AMP:
1322 amp_init(req);
1323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
1329 }
1330
1331 static void bredr_setup(struct hci_request *req)
1332 {
1333 struct hci_dev *hdev = req->hdev;
1334
1335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
1339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1340
1341 /* Read Class of Device */
1342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
1343
1344 /* Read Local Name */
1345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1346
1347 /* Read Voice Setting */
1348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1349
1350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
1353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
1356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
1358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1359
1360 /* Connection accept timeout ~20 secs */
1361 param = cpu_to_le16(0x7d00);
1362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
1363
1364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
1368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
1371 }
1372
1373 static void le_setup(struct hci_request *req)
1374 {
1375 struct hci_dev *hdev = req->hdev;
1376
1377 /* Read LE Buffer Size */
1378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
1379
1380 /* Read LE Local Supported Features */
1381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
1382
1383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
1386 /* Read LE White List Size */
1387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
1388
1389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
1391
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1395 }
1396
1397 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398 {
1399 if (lmp_ext_inq_capable(hdev))
1400 return 0x02;
1401
1402 if (lmp_inq_rssi_capable(hdev))
1403 return 0x01;
1404
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1407 return 0x01;
1408
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411 return 0x01;
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415 return 0x01;
1416 }
1417
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1420 return 0x01;
1421
1422 return 0x00;
1423 }
1424
1425 static void hci_setup_inquiry_mode(struct hci_request *req)
1426 {
1427 u8 mode;
1428
1429 mode = hci_get_inquiry_mode(req->hdev);
1430
1431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
1432 }
1433
1434 static void hci_setup_event_mask(struct hci_request *req)
1435 {
1436 struct hci_dev *hdev = req->hdev;
1437
1438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1441 */
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1446 */
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 return;
1449
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
1456 } else {
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
1460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
1466
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470 }
1471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
1508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
1509 }
1510
1511 static void hci_init2_req(struct hci_request *req, unsigned long opt)
1512 {
1513 struct hci_dev *hdev = req->hdev;
1514
1515 if (lmp_bredr_capable(hdev))
1516 bredr_setup(req);
1517 else
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
1519
1520 if (lmp_le_capable(hdev))
1521 le_setup(req);
1522
1523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
1527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1528
1529 if (lmp_ssp_capable(hdev)) {
1530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
1538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
1540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
1542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
1548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
1549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
1553 hci_setup_inquiry_mode(req);
1554
1555 if (lmp_inq_tx_pwr_capable(hdev))
1556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
1557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
1562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
1564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
1568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
1570 }
1571 }
1572
1573 static void hci_setup_link_policy(struct hci_request *req)
1574 {
1575 struct hci_dev *hdev = req->hdev;
1576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
1589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
1590 }
1591
1592 static void hci_set_le_support(struct hci_request *req)
1593 {
1594 struct hci_dev *hdev = req->hdev;
1595 struct hci_cp_write_le_host_supported cp;
1596
1597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
1601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
1605 cp.simul = 0x00;
1606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
1609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
1611 }
1612
1613 static void hci_set_event_mask_page_2(struct hci_request *req)
1614 {
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
1621 if (lmp_csb_master_capable(hdev)) {
1622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
1631 if (lmp_csb_slave_capable(hdev)) {
1632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
1638 /* Enable Authenticated Payload Timeout Expired event if supported */
1639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
1640 events[2] |= 0x80;
1641
1642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643 }
1644
1645 static void hci_init3_req(struct hci_request *req, unsigned long opt)
1646 {
1647 struct hci_dev *hdev = req->hdev;
1648 u8 p;
1649
1650 hci_setup_event_mask(req);
1651
1652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1655 *
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
1660 *
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
1664 */
1665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
1667 struct hci_cp_delete_stored_link_key cp;
1668
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672 sizeof(cp), &cp);
1673 }
1674
1675 if (hdev->commands[5] & 0x10)
1676 hci_setup_link_policy(req);
1677
1678 if (lmp_le_capable(hdev)) {
1679 u8 events[8];
1680
1681 memset(events, 0, sizeof(events));
1682 events[0] = 0x0f;
1683
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
1686
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1689 */
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1692 * Parameter Request
1693 */
1694
1695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696 events);
1697
1698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701 }
1702
1703 hci_set_le_support(req);
1704 }
1705
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1709
1710 cp.page = p;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712 sizeof(cp), &cp);
1713 }
1714 }
1715
1716 static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717 {
1718 struct hci_dev *hdev = req->hdev;
1719
1720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1723
1724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
1728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
1732 /* Check for Synchronization Train support */
1733 if (lmp_sync_train_capable(hdev))
1734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1735
1736 /* Enable Secure Connections if supported and configured */
1737 if ((lmp_sc_capable(hdev) ||
1738 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
1739 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1740 u8 support = 0x01;
1741 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1742 sizeof(support), &support);
1743 }
1744 }
1745
1746 static int __hci_init(struct hci_dev *hdev)
1747 {
1748 int err;
1749
1750 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1751 if (err < 0)
1752 return err;
1753
1754 /* The Device Under Test (DUT) mode is special and available for
1755 * all controller types. So just create it early on.
1756 */
1757 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1758 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1759 &dut_mode_fops);
1760 }
1761
1762 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1763 * BR/EDR/LE type controllers. AMP controllers only need the
1764 * first stage init.
1765 */
1766 if (hdev->dev_type != HCI_BREDR)
1767 return 0;
1768
1769 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1770 if (err < 0)
1771 return err;
1772
1773 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1774 if (err < 0)
1775 return err;
1776
1777 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1778 if (err < 0)
1779 return err;
1780
1781 /* Only create debugfs entries during the initial setup
1782 * phase and not every time the controller gets powered on.
1783 */
1784 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1785 return 0;
1786
1787 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1788 &features_fops);
1789 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1790 &hdev->manufacturer);
1791 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1792 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
1793 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1794 &device_list_fops);
1795 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1796 &blacklist_fops);
1797 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1798
1799 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_min_age_fops);
1801 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1802 &conn_info_max_age_fops);
1803
1804 if (lmp_bredr_capable(hdev)) {
1805 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1806 hdev, &inquiry_cache_fops);
1807 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1808 hdev, &link_keys_fops);
1809 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1810 hdev, &dev_class_fops);
1811 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1812 hdev, &voice_setting_fops);
1813 }
1814
1815 if (lmp_ssp_capable(hdev)) {
1816 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1817 hdev, &auto_accept_delay_fops);
1818 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1819 hdev, &force_sc_support_fops);
1820 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1821 hdev, &sc_only_mode_fops);
1822 }
1823
1824 if (lmp_sniff_capable(hdev)) {
1825 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1826 hdev, &idle_timeout_fops);
1827 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_min_interval_fops);
1829 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1830 hdev, &sniff_max_interval_fops);
1831 }
1832
1833 if (lmp_le_capable(hdev)) {
1834 debugfs_create_file("identity", 0400, hdev->debugfs,
1835 hdev, &identity_fops);
1836 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1837 hdev, &rpa_timeout_fops);
1838 debugfs_create_file("random_address", 0444, hdev->debugfs,
1839 hdev, &random_address_fops);
1840 debugfs_create_file("static_address", 0444, hdev->debugfs,
1841 hdev, &static_address_fops);
1842
1843 /* For controllers with a public address, provide a debug
1844 * option to force the usage of the configured static
1845 * address. By default the public address is used.
1846 */
1847 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1848 debugfs_create_file("force_static_address", 0644,
1849 hdev->debugfs, hdev,
1850 &force_static_address_fops);
1851
1852 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1853 &hdev->le_white_list_size);
1854 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1855 &white_list_fops);
1856 debugfs_create_file("identity_resolving_keys", 0400,
1857 hdev->debugfs, hdev,
1858 &identity_resolving_keys_fops);
1859 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1860 hdev, &long_term_keys_fops);
1861 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1862 hdev, &conn_min_interval_fops);
1863 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1864 hdev, &conn_max_interval_fops);
1865 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1866 hdev, &conn_latency_fops);
1867 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1868 hdev, &supervision_timeout_fops);
1869 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1870 hdev, &adv_channel_map_fops);
1871 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1872 hdev, &adv_min_interval_fops);
1873 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1874 hdev, &adv_max_interval_fops);
1875 debugfs_create_u16("discov_interleaved_timeout", 0644,
1876 hdev->debugfs,
1877 &hdev->discov_interleaved_timeout);
1878
1879 smp_register(hdev);
1880 }
1881
1882 return 0;
1883 }
1884
1885 static void hci_init0_req(struct hci_request *req, unsigned long opt)
1886 {
1887 struct hci_dev *hdev = req->hdev;
1888
1889 BT_DBG("%s %ld", hdev->name, opt);
1890
1891 /* Reset */
1892 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1893 hci_reset_req(req, 0);
1894
1895 /* Read Local Version */
1896 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1897
1898 /* Read BD Address */
1899 if (hdev->set_bdaddr)
1900 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1901 }
1902
1903 static int __hci_unconf_init(struct hci_dev *hdev)
1904 {
1905 int err;
1906
1907 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908 return 0;
1909
1910 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1911 if (err < 0)
1912 return err;
1913
1914 return 0;
1915 }
1916
1917 static void hci_scan_req(struct hci_request *req, unsigned long opt)
1918 {
1919 __u8 scan = opt;
1920
1921 BT_DBG("%s %x", req->hdev->name, scan);
1922
1923 /* Inquiry and Page scans */
1924 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1925 }
1926
1927 static void hci_auth_req(struct hci_request *req, unsigned long opt)
1928 {
1929 __u8 auth = opt;
1930
1931 BT_DBG("%s %x", req->hdev->name, auth);
1932
1933 /* Authentication */
1934 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1935 }
1936
1937 static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1938 {
1939 __u8 encrypt = opt;
1940
1941 BT_DBG("%s %x", req->hdev->name, encrypt);
1942
1943 /* Encryption */
1944 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1945 }
1946
1947 static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
1948 {
1949 __le16 policy = cpu_to_le16(opt);
1950
1951 BT_DBG("%s %x", req->hdev->name, policy);
1952
1953 /* Default link policy */
1954 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
1955 }
1956
1957 /* Get HCI device by index.
1958 * Device is held on return. */
1959 struct hci_dev *hci_dev_get(int index)
1960 {
1961 struct hci_dev *hdev = NULL, *d;
1962
1963 BT_DBG("%d", index);
1964
1965 if (index < 0)
1966 return NULL;
1967
1968 read_lock(&hci_dev_list_lock);
1969 list_for_each_entry(d, &hci_dev_list, list) {
1970 if (d->id == index) {
1971 hdev = hci_dev_hold(d);
1972 break;
1973 }
1974 }
1975 read_unlock(&hci_dev_list_lock);
1976 return hdev;
1977 }
1978
1979 /* ---- Inquiry support ---- */
1980
1981 bool hci_discovery_active(struct hci_dev *hdev)
1982 {
1983 struct discovery_state *discov = &hdev->discovery;
1984
1985 switch (discov->state) {
1986 case DISCOVERY_FINDING:
1987 case DISCOVERY_RESOLVING:
1988 return true;
1989
1990 default:
1991 return false;
1992 }
1993 }
1994
1995 void hci_discovery_set_state(struct hci_dev *hdev, int state)
1996 {
1997 int old_state = hdev->discovery.state;
1998
1999 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2000
2001 if (old_state == state)
2002 return;
2003
2004 hdev->discovery.state = state;
2005
2006 switch (state) {
2007 case DISCOVERY_STOPPED:
2008 hci_update_background_scan(hdev);
2009
2010 if (old_state != DISCOVERY_STARTING)
2011 mgmt_discovering(hdev, 0);
2012 break;
2013 case DISCOVERY_STARTING:
2014 break;
2015 case DISCOVERY_FINDING:
2016 mgmt_discovering(hdev, 1);
2017 break;
2018 case DISCOVERY_RESOLVING:
2019 break;
2020 case DISCOVERY_STOPPING:
2021 break;
2022 }
2023 }
2024
2025 void hci_inquiry_cache_flush(struct hci_dev *hdev)
2026 {
2027 struct discovery_state *cache = &hdev->discovery;
2028 struct inquiry_entry *p, *n;
2029
2030 list_for_each_entry_safe(p, n, &cache->all, all) {
2031 list_del(&p->all);
2032 kfree(p);
2033 }
2034
2035 INIT_LIST_HEAD(&cache->unknown);
2036 INIT_LIST_HEAD(&cache->resolve);
2037 }
2038
2039 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2040 bdaddr_t *bdaddr)
2041 {
2042 struct discovery_state *cache = &hdev->discovery;
2043 struct inquiry_entry *e;
2044
2045 BT_DBG("cache %p, %pMR", cache, bdaddr);
2046
2047 list_for_each_entry(e, &cache->all, all) {
2048 if (!bacmp(&e->data.bdaddr, bdaddr))
2049 return e;
2050 }
2051
2052 return NULL;
2053 }
2054
2055 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
2056 bdaddr_t *bdaddr)
2057 {
2058 struct discovery_state *cache = &hdev->discovery;
2059 struct inquiry_entry *e;
2060
2061 BT_DBG("cache %p, %pMR", cache, bdaddr);
2062
2063 list_for_each_entry(e, &cache->unknown, list) {
2064 if (!bacmp(&e->data.bdaddr, bdaddr))
2065 return e;
2066 }
2067
2068 return NULL;
2069 }
2070
2071 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
2072 bdaddr_t *bdaddr,
2073 int state)
2074 {
2075 struct discovery_state *cache = &hdev->discovery;
2076 struct inquiry_entry *e;
2077
2078 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
2079
2080 list_for_each_entry(e, &cache->resolve, list) {
2081 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2082 return e;
2083 if (!bacmp(&e->data.bdaddr, bdaddr))
2084 return e;
2085 }
2086
2087 return NULL;
2088 }
2089
2090 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
2091 struct inquiry_entry *ie)
2092 {
2093 struct discovery_state *cache = &hdev->discovery;
2094 struct list_head *pos = &cache->resolve;
2095 struct inquiry_entry *p;
2096
2097 list_del(&ie->list);
2098
2099 list_for_each_entry(p, &cache->resolve, list) {
2100 if (p->name_state != NAME_PENDING &&
2101 abs(p->data.rssi) >= abs(ie->data.rssi))
2102 break;
2103 pos = &p->list;
2104 }
2105
2106 list_add(&ie->list, pos);
2107 }
2108
2109 u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2110 bool name_known)
2111 {
2112 struct discovery_state *cache = &hdev->discovery;
2113 struct inquiry_entry *ie;
2114 u32 flags = 0;
2115
2116 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
2117
2118 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2119
2120 if (!data->ssp_mode)
2121 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2122
2123 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
2124 if (ie) {
2125 if (!ie->data.ssp_mode)
2126 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
2127
2128 if (ie->name_state == NAME_NEEDED &&
2129 data->rssi != ie->data.rssi) {
2130 ie->data.rssi = data->rssi;
2131 hci_inquiry_cache_update_resolve(hdev, ie);
2132 }
2133
2134 goto update;
2135 }
2136
2137 /* Entry not in the cache. Add new one. */
2138 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
2139 if (!ie) {
2140 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2141 goto done;
2142 }
2143
2144 list_add(&ie->all, &cache->all);
2145
2146 if (name_known) {
2147 ie->name_state = NAME_KNOWN;
2148 } else {
2149 ie->name_state = NAME_NOT_KNOWN;
2150 list_add(&ie->list, &cache->unknown);
2151 }
2152
2153 update:
2154 if (name_known && ie->name_state != NAME_KNOWN &&
2155 ie->name_state != NAME_PENDING) {
2156 ie->name_state = NAME_KNOWN;
2157 list_del(&ie->list);
2158 }
2159
2160 memcpy(&ie->data, data, sizeof(*data));
2161 ie->timestamp = jiffies;
2162 cache->timestamp = jiffies;
2163
2164 if (ie->name_state == NAME_NOT_KNOWN)
2165 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2166
2167 done:
2168 return flags;
2169 }
2170
2171 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2172 {
2173 struct discovery_state *cache = &hdev->discovery;
2174 struct inquiry_info *info = (struct inquiry_info *) buf;
2175 struct inquiry_entry *e;
2176 int copied = 0;
2177
2178 list_for_each_entry(e, &cache->all, all) {
2179 struct inquiry_data *data = &e->data;
2180
2181 if (copied >= num)
2182 break;
2183
2184 bacpy(&info->bdaddr, &data->bdaddr);
2185 info->pscan_rep_mode = data->pscan_rep_mode;
2186 info->pscan_period_mode = data->pscan_period_mode;
2187 info->pscan_mode = data->pscan_mode;
2188 memcpy(info->dev_class, data->dev_class, 3);
2189 info->clock_offset = data->clock_offset;
2190
2191 info++;
2192 copied++;
2193 }
2194
2195 BT_DBG("cache %p, copied %d", cache, copied);
2196 return copied;
2197 }
2198
2199 static void hci_inq_req(struct hci_request *req, unsigned long opt)
2200 {
2201 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
2202 struct hci_dev *hdev = req->hdev;
2203 struct hci_cp_inquiry cp;
2204
2205 BT_DBG("%s", hdev->name);
2206
2207 if (test_bit(HCI_INQUIRY, &hdev->flags))
2208 return;
2209
2210 /* Start Inquiry */
2211 memcpy(&cp.lap, &ir->lap, 3);
2212 cp.length = ir->length;
2213 cp.num_rsp = ir->num_rsp;
2214 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2215 }
2216
2217 int hci_inquiry(void __user *arg)
2218 {
2219 __u8 __user *ptr = arg;
2220 struct hci_inquiry_req ir;
2221 struct hci_dev *hdev;
2222 int err = 0, do_inquiry = 0, max_rsp;
2223 long timeo;
2224 __u8 *buf;
2225
2226 if (copy_from_user(&ir, ptr, sizeof(ir)))
2227 return -EFAULT;
2228
2229 hdev = hci_dev_get(ir.dev_id);
2230 if (!hdev)
2231 return -ENODEV;
2232
2233 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2234 err = -EBUSY;
2235 goto done;
2236 }
2237
2238 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2239 err = -EOPNOTSUPP;
2240 goto done;
2241 }
2242
2243 if (hdev->dev_type != HCI_BREDR) {
2244 err = -EOPNOTSUPP;
2245 goto done;
2246 }
2247
2248 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2249 err = -EOPNOTSUPP;
2250 goto done;
2251 }
2252
2253 hci_dev_lock(hdev);
2254 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
2255 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
2256 hci_inquiry_cache_flush(hdev);
2257 do_inquiry = 1;
2258 }
2259 hci_dev_unlock(hdev);
2260
2261 timeo = ir.length * msecs_to_jiffies(2000);
2262
2263 if (do_inquiry) {
2264 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2265 timeo);
2266 if (err < 0)
2267 goto done;
2268
2269 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2270 * cleared). If it is interrupted by a signal, return -EINTR.
2271 */
2272 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
2273 TASK_INTERRUPTIBLE))
2274 return -EINTR;
2275 }
2276
2277 /* for unlimited number of responses we will use buffer with
2278 * 255 entries
2279 */
2280 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2281
2282 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2283 * copy it to the user space.
2284 */
2285 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
2286 if (!buf) {
2287 err = -ENOMEM;
2288 goto done;
2289 }
2290
2291 hci_dev_lock(hdev);
2292 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
2293 hci_dev_unlock(hdev);
2294
2295 BT_DBG("num_rsp %d", ir.num_rsp);
2296
2297 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2298 ptr += sizeof(ir);
2299 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
2300 ir.num_rsp))
2301 err = -EFAULT;
2302 } else
2303 err = -EFAULT;
2304
2305 kfree(buf);
2306
2307 done:
2308 hci_dev_put(hdev);
2309 return err;
2310 }
2311
2312 static int hci_dev_do_open(struct hci_dev *hdev)
2313 {
2314 int ret = 0;
2315
2316 BT_DBG("%s %p", hdev->name, hdev);
2317
2318 hci_req_lock(hdev);
2319
2320 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2321 ret = -ENODEV;
2322 goto done;
2323 }
2324
2325 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2326 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2327 /* Check for rfkill but allow the HCI setup stage to
2328 * proceed (which in itself doesn't cause any RF activity).
2329 */
2330 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2331 ret = -ERFKILL;
2332 goto done;
2333 }
2334
2335 /* Check for valid public address or a configured static
2336 * random adddress, but let the HCI setup proceed to
2337 * be able to determine if there is a public address
2338 * or not.
2339 *
2340 * In case of user channel usage, it is not important
2341 * if a public address or static random address is
2342 * available.
2343 *
2344 * This check is only valid for BR/EDR controllers
2345 * since AMP controllers do not have an address.
2346 */
2347 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2348 hdev->dev_type == HCI_BREDR &&
2349 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2350 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2351 ret = -EADDRNOTAVAIL;
2352 goto done;
2353 }
2354 }
2355
2356 if (test_bit(HCI_UP, &hdev->flags)) {
2357 ret = -EALREADY;
2358 goto done;
2359 }
2360
2361 if (hdev->open(hdev)) {
2362 ret = -EIO;
2363 goto done;
2364 }
2365
2366 atomic_set(&hdev->cmd_cnt, 1);
2367 set_bit(HCI_INIT, &hdev->flags);
2368
2369 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2370 if (hdev->setup)
2371 ret = hdev->setup(hdev);
2372
2373 /* The transport driver can set these quirks before
2374 * creating the HCI device or in its setup callback.
2375 *
2376 * In case any of them is set, the controller has to
2377 * start up as unconfigured.
2378 */
2379 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2380 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
2381 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
2382
2383 /* For an unconfigured controller it is required to
2384 * read at least the version information provided by
2385 * the Read Local Version Information command.
2386 *
2387 * If the set_bdaddr driver callback is provided, then
2388 * also the original Bluetooth public device address
2389 * will be read using the Read BD Address command.
2390 */
2391 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2392 ret = __hci_unconf_init(hdev);
2393 }
2394
2395 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2396 /* If public address change is configured, ensure that
2397 * the address gets programmed. If the driver does not
2398 * support changing the public address, fail the power
2399 * on procedure.
2400 */
2401 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2402 hdev->set_bdaddr)
2403 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2404 else
2405 ret = -EADDRNOTAVAIL;
2406 }
2407
2408 if (!ret) {
2409 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2410 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2411 ret = __hci_init(hdev);
2412 }
2413
2414 clear_bit(HCI_INIT, &hdev->flags);
2415
2416 if (!ret) {
2417 hci_dev_hold(hdev);
2418 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2419 set_bit(HCI_UP, &hdev->flags);
2420 hci_notify(hdev, HCI_DEV_UP);
2421 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2422 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
2423 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2424 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2425 hdev->dev_type == HCI_BREDR) {
2426 hci_dev_lock(hdev);
2427 mgmt_powered(hdev, 1);
2428 hci_dev_unlock(hdev);
2429 }
2430 } else {
2431 /* Init failed, cleanup */
2432 flush_work(&hdev->tx_work);
2433 flush_work(&hdev->cmd_work);
2434 flush_work(&hdev->rx_work);
2435
2436 skb_queue_purge(&hdev->cmd_q);
2437 skb_queue_purge(&hdev->rx_q);
2438
2439 if (hdev->flush)
2440 hdev->flush(hdev);
2441
2442 if (hdev->sent_cmd) {
2443 kfree_skb(hdev->sent_cmd);
2444 hdev->sent_cmd = NULL;
2445 }
2446
2447 hdev->close(hdev);
2448 hdev->flags &= BIT(HCI_RAW);
2449 }
2450
2451 done:
2452 hci_req_unlock(hdev);
2453 return ret;
2454 }
2455
2456 /* ---- HCI ioctl helpers ---- */
2457
2458 int hci_dev_open(__u16 dev)
2459 {
2460 struct hci_dev *hdev;
2461 int err;
2462
2463 hdev = hci_dev_get(dev);
2464 if (!hdev)
2465 return -ENODEV;
2466
2467 /* Devices that are marked as unconfigured can only be powered
2468 * up as user channel. Trying to bring them up as normal devices
2469 * will result into a failure. Only user channel operation is
2470 * possible.
2471 *
2472 * When this function is called for a user channel, the flag
2473 * HCI_USER_CHANNEL will be set first before attempting to
2474 * open the device.
2475 */
2476 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2478 err = -EOPNOTSUPP;
2479 goto done;
2480 }
2481
2482 /* We need to ensure that no other power on/off work is pending
2483 * before proceeding to call hci_dev_do_open. This is
2484 * particularly important if the setup procedure has not yet
2485 * completed.
2486 */
2487 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2488 cancel_delayed_work(&hdev->power_off);
2489
2490 /* After this call it is guaranteed that the setup procedure
2491 * has finished. This means that error conditions like RFKILL
2492 * or no valid public or static random address apply.
2493 */
2494 flush_workqueue(hdev->req_workqueue);
2495
2496 /* For controllers not using the management interface and that
2497 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
2498 * so that pairing works for them. Once the management interface
2499 * is in use this bit will be cleared again and userspace has
2500 * to explicitly enable it.
2501 */
2502 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2503 !test_bit(HCI_MGMT, &hdev->dev_flags))
2504 set_bit(HCI_BONDABLE, &hdev->dev_flags);
2505
2506 err = hci_dev_do_open(hdev);
2507
2508 done:
2509 hci_dev_put(hdev);
2510 return err;
2511 }
2512
2513 /* This function requires the caller holds hdev->lock */
2514 static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2515 {
2516 struct hci_conn_params *p;
2517
2518 list_for_each_entry(p, &hdev->le_conn_params, list) {
2519 if (p->conn) {
2520 hci_conn_drop(p->conn);
2521 hci_conn_put(p->conn);
2522 p->conn = NULL;
2523 }
2524 list_del_init(&p->action);
2525 }
2526
2527 BT_DBG("All LE pending actions cleared");
2528 }
2529
2530 static int hci_dev_do_close(struct hci_dev *hdev)
2531 {
2532 BT_DBG("%s %p", hdev->name, hdev);
2533
2534 cancel_delayed_work(&hdev->power_off);
2535
2536 hci_req_cancel(hdev, ENODEV);
2537 hci_req_lock(hdev);
2538
2539 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
2540 cancel_delayed_work_sync(&hdev->cmd_timer);
2541 hci_req_unlock(hdev);
2542 return 0;
2543 }
2544
2545 /* Flush RX and TX works */
2546 flush_work(&hdev->tx_work);
2547 flush_work(&hdev->rx_work);
2548
2549 if (hdev->discov_timeout > 0) {
2550 cancel_delayed_work(&hdev->discov_off);
2551 hdev->discov_timeout = 0;
2552 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
2553 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2554 }
2555
2556 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
2557 cancel_delayed_work(&hdev->service_cache);
2558
2559 cancel_delayed_work_sync(&hdev->le_scan_disable);
2560
2561 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2562 cancel_delayed_work_sync(&hdev->rpa_expired);
2563
2564 hci_dev_lock(hdev);
2565 hci_inquiry_cache_flush(hdev);
2566 hci_pend_le_actions_clear(hdev);
2567 hci_conn_hash_flush(hdev);
2568 hci_dev_unlock(hdev);
2569
2570 hci_notify(hdev, HCI_DEV_DOWN);
2571
2572 if (hdev->flush)
2573 hdev->flush(hdev);
2574
2575 /* Reset device */
2576 skb_queue_purge(&hdev->cmd_q);
2577 atomic_set(&hdev->cmd_cnt, 1);
2578 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2579 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
2580 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
2581 set_bit(HCI_INIT, &hdev->flags);
2582 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
2583 clear_bit(HCI_INIT, &hdev->flags);
2584 }
2585
2586 /* flush cmd work */
2587 flush_work(&hdev->cmd_work);
2588
2589 /* Drop queues */
2590 skb_queue_purge(&hdev->rx_q);
2591 skb_queue_purge(&hdev->cmd_q);
2592 skb_queue_purge(&hdev->raw_q);
2593
2594 /* Drop last sent command */
2595 if (hdev->sent_cmd) {
2596 cancel_delayed_work_sync(&hdev->cmd_timer);
2597 kfree_skb(hdev->sent_cmd);
2598 hdev->sent_cmd = NULL;
2599 }
2600
2601 kfree_skb(hdev->recv_evt);
2602 hdev->recv_evt = NULL;
2603
2604 /* After this point our queues are empty
2605 * and no tasks are scheduled. */
2606 hdev->close(hdev);
2607
2608 /* Clear flags */
2609 hdev->flags &= BIT(HCI_RAW);
2610 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2611
2612 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2613 if (hdev->dev_type == HCI_BREDR) {
2614 hci_dev_lock(hdev);
2615 mgmt_powered(hdev, 0);
2616 hci_dev_unlock(hdev);
2617 }
2618 }
2619
2620 /* Controller radio is available but is currently powered down */
2621 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
2622
2623 memset(hdev->eir, 0, sizeof(hdev->eir));
2624 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
2625 bacpy(&hdev->random_addr, BDADDR_ANY);
2626
2627 hci_req_unlock(hdev);
2628
2629 hci_dev_put(hdev);
2630 return 0;
2631 }
2632
2633 int hci_dev_close(__u16 dev)
2634 {
2635 struct hci_dev *hdev;
2636 int err;
2637
2638 hdev = hci_dev_get(dev);
2639 if (!hdev)
2640 return -ENODEV;
2641
2642 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2643 err = -EBUSY;
2644 goto done;
2645 }
2646
2647 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2648 cancel_delayed_work(&hdev->power_off);
2649
2650 err = hci_dev_do_close(hdev);
2651
2652 done:
2653 hci_dev_put(hdev);
2654 return err;
2655 }
2656
2657 int hci_dev_reset(__u16 dev)
2658 {
2659 struct hci_dev *hdev;
2660 int ret = 0;
2661
2662 hdev = hci_dev_get(dev);
2663 if (!hdev)
2664 return -ENODEV;
2665
2666 hci_req_lock(hdev);
2667
2668 if (!test_bit(HCI_UP, &hdev->flags)) {
2669 ret = -ENETDOWN;
2670 goto done;
2671 }
2672
2673 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2674 ret = -EBUSY;
2675 goto done;
2676 }
2677
2678 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2679 ret = -EOPNOTSUPP;
2680 goto done;
2681 }
2682
2683 /* Drop queues */
2684 skb_queue_purge(&hdev->rx_q);
2685 skb_queue_purge(&hdev->cmd_q);
2686
2687 hci_dev_lock(hdev);
2688 hci_inquiry_cache_flush(hdev);
2689 hci_conn_hash_flush(hdev);
2690 hci_dev_unlock(hdev);
2691
2692 if (hdev->flush)
2693 hdev->flush(hdev);
2694
2695 atomic_set(&hdev->cmd_cnt, 1);
2696 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
2697
2698 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
2699
2700 done:
2701 hci_req_unlock(hdev);
2702 hci_dev_put(hdev);
2703 return ret;
2704 }
2705
2706 int hci_dev_reset_stat(__u16 dev)
2707 {
2708 struct hci_dev *hdev;
2709 int ret = 0;
2710
2711 hdev = hci_dev_get(dev);
2712 if (!hdev)
2713 return -ENODEV;
2714
2715 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2716 ret = -EBUSY;
2717 goto done;
2718 }
2719
2720 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2721 ret = -EOPNOTSUPP;
2722 goto done;
2723 }
2724
2725 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2726
2727 done:
2728 hci_dev_put(hdev);
2729 return ret;
2730 }
2731
2732 static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2733 {
2734 bool conn_changed, discov_changed;
2735
2736 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2737
2738 if ((scan & SCAN_PAGE))
2739 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2740 &hdev->dev_flags);
2741 else
2742 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2743 &hdev->dev_flags);
2744
2745 if ((scan & SCAN_INQUIRY)) {
2746 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2747 &hdev->dev_flags);
2748 } else {
2749 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2750 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2751 &hdev->dev_flags);
2752 }
2753
2754 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2755 return;
2756
2757 if (conn_changed || discov_changed) {
2758 /* In case this was disabled through mgmt */
2759 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2760
2761 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2762 mgmt_update_adv_data(hdev);
2763
2764 mgmt_new_settings(hdev);
2765 }
2766 }
2767
2768 int hci_dev_cmd(unsigned int cmd, void __user *arg)
2769 {
2770 struct hci_dev *hdev;
2771 struct hci_dev_req dr;
2772 int err = 0;
2773
2774 if (copy_from_user(&dr, arg, sizeof(dr)))
2775 return -EFAULT;
2776
2777 hdev = hci_dev_get(dr.dev_id);
2778 if (!hdev)
2779 return -ENODEV;
2780
2781 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2782 err = -EBUSY;
2783 goto done;
2784 }
2785
2786 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
2787 err = -EOPNOTSUPP;
2788 goto done;
2789 }
2790
2791 if (hdev->dev_type != HCI_BREDR) {
2792 err = -EOPNOTSUPP;
2793 goto done;
2794 }
2795
2796 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2797 err = -EOPNOTSUPP;
2798 goto done;
2799 }
2800
2801 switch (cmd) {
2802 case HCISETAUTH:
2803 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2804 HCI_INIT_TIMEOUT);
2805 break;
2806
2807 case HCISETENCRYPT:
2808 if (!lmp_encrypt_capable(hdev)) {
2809 err = -EOPNOTSUPP;
2810 break;
2811 }
2812
2813 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2814 /* Auth must be enabled first */
2815 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2816 HCI_INIT_TIMEOUT);
2817 if (err)
2818 break;
2819 }
2820
2821 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2822 HCI_INIT_TIMEOUT);
2823 break;
2824
2825 case HCISETSCAN:
2826 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2827 HCI_INIT_TIMEOUT);
2828
2829 /* Ensure that the connectable and discoverable states
2830 * get correctly modified as this was a non-mgmt change.
2831 */
2832 if (!err)
2833 hci_update_scan_state(hdev, dr.dev_opt);
2834 break;
2835
2836 case HCISETLINKPOL:
2837 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2838 HCI_INIT_TIMEOUT);
2839 break;
2840
2841 case HCISETLINKMODE:
2842 hdev->link_mode = ((__u16) dr.dev_opt) &
2843 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2844 break;
2845
2846 case HCISETPTYPE:
2847 hdev->pkt_type = (__u16) dr.dev_opt;
2848 break;
2849
2850 case HCISETACLMTU:
2851 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2852 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
2853 break;
2854
2855 case HCISETSCOMTU:
2856 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2857 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
2858 break;
2859
2860 default:
2861 err = -EINVAL;
2862 break;
2863 }
2864
2865 done:
2866 hci_dev_put(hdev);
2867 return err;
2868 }
2869
2870 int hci_get_dev_list(void __user *arg)
2871 {
2872 struct hci_dev *hdev;
2873 struct hci_dev_list_req *dl;
2874 struct hci_dev_req *dr;
2875 int n = 0, size, err;
2876 __u16 dev_num;
2877
2878 if (get_user(dev_num, (__u16 __user *) arg))
2879 return -EFAULT;
2880
2881 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2882 return -EINVAL;
2883
2884 size = sizeof(*dl) + dev_num * sizeof(*dr);
2885
2886 dl = kzalloc(size, GFP_KERNEL);
2887 if (!dl)
2888 return -ENOMEM;
2889
2890 dr = dl->dev_req;
2891
2892 read_lock(&hci_dev_list_lock);
2893 list_for_each_entry(hdev, &hci_dev_list, list) {
2894 unsigned long flags = hdev->flags;
2895
2896 /* When the auto-off is configured it means the transport
2897 * is running, but in that case still indicate that the
2898 * device is actually down.
2899 */
2900 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2901 flags &= ~BIT(HCI_UP);
2902
2903 (dr + n)->dev_id = hdev->id;
2904 (dr + n)->dev_opt = flags;
2905
2906 if (++n >= dev_num)
2907 break;
2908 }
2909 read_unlock(&hci_dev_list_lock);
2910
2911 dl->dev_num = n;
2912 size = sizeof(*dl) + n * sizeof(*dr);
2913
2914 err = copy_to_user(arg, dl, size);
2915 kfree(dl);
2916
2917 return err ? -EFAULT : 0;
2918 }
2919
2920 int hci_get_dev_info(void __user *arg)
2921 {
2922 struct hci_dev *hdev;
2923 struct hci_dev_info di;
2924 unsigned long flags;
2925 int err = 0;
2926
2927 if (copy_from_user(&di, arg, sizeof(di)))
2928 return -EFAULT;
2929
2930 hdev = hci_dev_get(di.dev_id);
2931 if (!hdev)
2932 return -ENODEV;
2933
2934 /* When the auto-off is configured it means the transport
2935 * is running, but in that case still indicate that the
2936 * device is actually down.
2937 */
2938 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2939 flags = hdev->flags & ~BIT(HCI_UP);
2940 else
2941 flags = hdev->flags;
2942
2943 strcpy(di.name, hdev->name);
2944 di.bdaddr = hdev->bdaddr;
2945 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2946 di.flags = flags;
2947 di.pkt_type = hdev->pkt_type;
2948 if (lmp_bredr_capable(hdev)) {
2949 di.acl_mtu = hdev->acl_mtu;
2950 di.acl_pkts = hdev->acl_pkts;
2951 di.sco_mtu = hdev->sco_mtu;
2952 di.sco_pkts = hdev->sco_pkts;
2953 } else {
2954 di.acl_mtu = hdev->le_mtu;
2955 di.acl_pkts = hdev->le_pkts;
2956 di.sco_mtu = 0;
2957 di.sco_pkts = 0;
2958 }
2959 di.link_policy = hdev->link_policy;
2960 di.link_mode = hdev->link_mode;
2961
2962 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2963 memcpy(&di.features, &hdev->features, sizeof(di.features));
2964
2965 if (copy_to_user(arg, &di, sizeof(di)))
2966 err = -EFAULT;
2967
2968 hci_dev_put(hdev);
2969
2970 return err;
2971 }
2972
2973 /* ---- Interface to HCI drivers ---- */
2974
2975 static int hci_rfkill_set_block(void *data, bool blocked)
2976 {
2977 struct hci_dev *hdev = data;
2978
2979 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2980
2981 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2982 return -EBUSY;
2983
2984 if (blocked) {
2985 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2986 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2987 !test_bit(HCI_CONFIG, &hdev->dev_flags))
2988 hci_dev_do_close(hdev);
2989 } else {
2990 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
2991 }
2992
2993 return 0;
2994 }
2995
2996 static const struct rfkill_ops hci_rfkill_ops = {
2997 .set_block = hci_rfkill_set_block,
2998 };
2999
3000 static void hci_power_on(struct work_struct *work)
3001 {
3002 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
3003 int err;
3004
3005 BT_DBG("%s", hdev->name);
3006
3007 err = hci_dev_do_open(hdev);
3008 if (err < 0) {
3009 mgmt_set_powered_failed(hdev, err);
3010 return;
3011 }
3012
3013 /* During the HCI setup phase, a few error conditions are
3014 * ignored and they need to be checked now. If they are still
3015 * valid, it is important to turn the device back off.
3016 */
3017 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
3018 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
3019 (hdev->dev_type == HCI_BREDR &&
3020 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3021 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
3022 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3023 hci_dev_do_close(hdev);
3024 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
3025 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3026 HCI_AUTO_OFF_TIMEOUT);
3027 }
3028
3029 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
3030 /* For unconfigured devices, set the HCI_RAW flag
3031 * so that userspace can easily identify them.
3032 */
3033 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3034 set_bit(HCI_RAW, &hdev->flags);
3035
3036 /* For fully configured devices, this will send
3037 * the Index Added event. For unconfigured devices,
3038 * it will send Unconfigued Index Added event.
3039 *
3040 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3041 * and no event will be send.
3042 */
3043 mgmt_index_added(hdev);
3044 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
3045 /* When the controller is now configured, then it
3046 * is important to clear the HCI_RAW flag.
3047 */
3048 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3049 clear_bit(HCI_RAW, &hdev->flags);
3050
3051 /* Powering on the controller with HCI_CONFIG set only
3052 * happens with the transition from unconfigured to
3053 * configured. This will send the Index Added event.
3054 */
3055 mgmt_index_added(hdev);
3056 }
3057 }
3058
3059 static void hci_power_off(struct work_struct *work)
3060 {
3061 struct hci_dev *hdev = container_of(work, struct hci_dev,
3062 power_off.work);
3063
3064 BT_DBG("%s", hdev->name);
3065
3066 hci_dev_do_close(hdev);
3067 }
3068
3069 static void hci_discov_off(struct work_struct *work)
3070 {
3071 struct hci_dev *hdev;
3072
3073 hdev = container_of(work, struct hci_dev, discov_off.work);
3074
3075 BT_DBG("%s", hdev->name);
3076
3077 mgmt_discoverable_timeout(hdev);
3078 }
3079
3080 void hci_uuids_clear(struct hci_dev *hdev)
3081 {
3082 struct bt_uuid *uuid, *tmp;
3083
3084 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3085 list_del(&uuid->list);
3086 kfree(uuid);
3087 }
3088 }
3089
3090 void hci_link_keys_clear(struct hci_dev *hdev)
3091 {
3092 struct list_head *p, *n;
3093
3094 list_for_each_safe(p, n, &hdev->link_keys) {
3095 struct link_key *key;
3096
3097 key = list_entry(p, struct link_key, list);
3098
3099 list_del(p);
3100 kfree(key);
3101 }
3102 }
3103
3104 void hci_smp_ltks_clear(struct hci_dev *hdev)
3105 {
3106 struct smp_ltk *k;
3107
3108 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3109 list_del_rcu(&k->list);
3110 kfree_rcu(k, rcu);
3111 }
3112 }
3113
3114 void hci_smp_irks_clear(struct hci_dev *hdev)
3115 {
3116 struct smp_irk *k;
3117
3118 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3119 list_del_rcu(&k->list);
3120 kfree_rcu(k, rcu);
3121 }
3122 }
3123
3124 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3125 {
3126 struct link_key *k;
3127
3128 list_for_each_entry(k, &hdev->link_keys, list)
3129 if (bacmp(bdaddr, &k->bdaddr) == 0)
3130 return k;
3131
3132 return NULL;
3133 }
3134
3135 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
3136 u8 key_type, u8 old_key_type)
3137 {
3138 /* Legacy key */
3139 if (key_type < 0x03)
3140 return true;
3141
3142 /* Debug keys are insecure so don't store them persistently */
3143 if (key_type == HCI_LK_DEBUG_COMBINATION)
3144 return false;
3145
3146 /* Changed combination key and there's no previous one */
3147 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
3148 return false;
3149
3150 /* Security mode 3 case */
3151 if (!conn)
3152 return true;
3153
3154 /* Neither local nor remote side had no-bonding as requirement */
3155 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
3156 return true;
3157
3158 /* Local side had dedicated bonding as requirement */
3159 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
3160 return true;
3161
3162 /* Remote side had dedicated bonding as requirement */
3163 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
3164 return true;
3165
3166 /* If none of the above criteria match, then don't store the key
3167 * persistently */
3168 return false;
3169 }
3170
3171 static u8 ltk_role(u8 type)
3172 {
3173 if (type == SMP_LTK)
3174 return HCI_ROLE_MASTER;
3175
3176 return HCI_ROLE_SLAVE;
3177 }
3178
3179 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
3180 u8 role)
3181 {
3182 struct smp_ltk *k;
3183
3184 rcu_read_lock();
3185 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3186 if (k->ediv != ediv || k->rand != rand)
3187 continue;
3188
3189 if (ltk_role(k->type) != role)
3190 continue;
3191
3192 rcu_read_unlock();
3193 return k;
3194 }
3195 rcu_read_unlock();
3196
3197 return NULL;
3198 }
3199
3200 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3201 u8 addr_type, u8 role)
3202 {
3203 struct smp_ltk *k;
3204
3205 rcu_read_lock();
3206 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3207 if (addr_type == k->bdaddr_type &&
3208 bacmp(bdaddr, &k->bdaddr) == 0 &&
3209 ltk_role(k->type) == role) {
3210 rcu_read_unlock();
3211 return k;
3212 }
3213 }
3214 rcu_read_unlock();
3215
3216 return NULL;
3217 }
3218
3219 struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3220 {
3221 struct smp_irk *irk;
3222
3223 rcu_read_lock();
3224 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3225 if (!bacmp(&irk->rpa, rpa)) {
3226 rcu_read_unlock();
3227 return irk;
3228 }
3229 }
3230
3231 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3232 if (smp_irk_matches(hdev, irk->val, rpa)) {
3233 bacpy(&irk->rpa, rpa);
3234 rcu_read_unlock();
3235 return irk;
3236 }
3237 }
3238 rcu_read_unlock();
3239
3240 return NULL;
3241 }
3242
3243 struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3244 u8 addr_type)
3245 {
3246 struct smp_irk *irk;
3247
3248 /* Identity Address must be public or static random */
3249 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3250 return NULL;
3251
3252 rcu_read_lock();
3253 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3254 if (addr_type == irk->addr_type &&
3255 bacmp(bdaddr, &irk->bdaddr) == 0) {
3256 rcu_read_unlock();
3257 return irk;
3258 }
3259 }
3260 rcu_read_unlock();
3261
3262 return NULL;
3263 }
3264
3265 struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
3266 bdaddr_t *bdaddr, u8 *val, u8 type,
3267 u8 pin_len, bool *persistent)
3268 {
3269 struct link_key *key, *old_key;
3270 u8 old_key_type;
3271
3272 old_key = hci_find_link_key(hdev, bdaddr);
3273 if (old_key) {
3274 old_key_type = old_key->type;
3275 key = old_key;
3276 } else {
3277 old_key_type = conn ? conn->key_type : 0xff;
3278 key = kzalloc(sizeof(*key), GFP_KERNEL);
3279 if (!key)
3280 return NULL;
3281 list_add(&key->list, &hdev->link_keys);
3282 }
3283
3284 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
3285
3286 /* Some buggy controller combinations generate a changed
3287 * combination key for legacy pairing even when there's no
3288 * previous key */
3289 if (type == HCI_LK_CHANGED_COMBINATION &&
3290 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
3291 type = HCI_LK_COMBINATION;
3292 if (conn)
3293 conn->key_type = type;
3294 }
3295
3296 bacpy(&key->bdaddr, bdaddr);
3297 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
3298 key->pin_len = pin_len;
3299
3300 if (type == HCI_LK_CHANGED_COMBINATION)
3301 key->type = old_key_type;
3302 else
3303 key->type = type;
3304
3305 if (persistent)
3306 *persistent = hci_persistent_key(hdev, conn, type,
3307 old_key_type);
3308
3309 return key;
3310 }
3311
3312 struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3313 u8 addr_type, u8 type, u8 authenticated,
3314 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
3315 {
3316 struct smp_ltk *key, *old_key;
3317 u8 role = ltk_role(type);
3318
3319 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
3320 if (old_key)
3321 key = old_key;
3322 else {
3323 key = kzalloc(sizeof(*key), GFP_KERNEL);
3324 if (!key)
3325 return NULL;
3326 list_add_rcu(&key->list, &hdev->long_term_keys);
3327 }
3328
3329 bacpy(&key->bdaddr, bdaddr);
3330 key->bdaddr_type = addr_type;
3331 memcpy(key->val, tk, sizeof(key->val));
3332 key->authenticated = authenticated;
3333 key->ediv = ediv;
3334 key->rand = rand;
3335 key->enc_size = enc_size;
3336 key->type = type;
3337
3338 return key;
3339 }
3340
3341 struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3342 u8 addr_type, u8 val[16], bdaddr_t *rpa)
3343 {
3344 struct smp_irk *irk;
3345
3346 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3347 if (!irk) {
3348 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3349 if (!irk)
3350 return NULL;
3351
3352 bacpy(&irk->bdaddr, bdaddr);
3353 irk->addr_type = addr_type;
3354
3355 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
3356 }
3357
3358 memcpy(irk->val, val, 16);
3359 bacpy(&irk->rpa, rpa);
3360
3361 return irk;
3362 }
3363
3364 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3365 {
3366 struct link_key *key;
3367
3368 key = hci_find_link_key(hdev, bdaddr);
3369 if (!key)
3370 return -ENOENT;
3371
3372 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3373
3374 list_del(&key->list);
3375 kfree(key);
3376
3377 return 0;
3378 }
3379
3380 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
3381 {
3382 struct smp_ltk *k;
3383 int removed = 0;
3384
3385 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3386 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
3387 continue;
3388
3389 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3390
3391 list_del_rcu(&k->list);
3392 kfree_rcu(k, rcu);
3393 removed++;
3394 }
3395
3396 return removed ? 0 : -ENOENT;
3397 }
3398
3399 void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3400 {
3401 struct smp_irk *k;
3402
3403 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3404 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3405 continue;
3406
3407 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3408
3409 list_del_rcu(&k->list);
3410 kfree_rcu(k, rcu);
3411 }
3412 }
3413
3414 /* HCI command timer function */
3415 static void hci_cmd_timeout(struct work_struct *work)
3416 {
3417 struct hci_dev *hdev = container_of(work, struct hci_dev,
3418 cmd_timer.work);
3419
3420 if (hdev->sent_cmd) {
3421 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3422 u16 opcode = __le16_to_cpu(sent->opcode);
3423
3424 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3425 } else {
3426 BT_ERR("%s command tx timeout", hdev->name);
3427 }
3428
3429 atomic_set(&hdev->cmd_cnt, 1);
3430 queue_work(hdev->workqueue, &hdev->cmd_work);
3431 }
3432
3433 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
3434 bdaddr_t *bdaddr)
3435 {
3436 struct oob_data *data;
3437
3438 list_for_each_entry(data, &hdev->remote_oob_data, list)
3439 if (bacmp(bdaddr, &data->bdaddr) == 0)
3440 return data;
3441
3442 return NULL;
3443 }
3444
3445 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3446 {
3447 struct oob_data *data;
3448
3449 data = hci_find_remote_oob_data(hdev, bdaddr);
3450 if (!data)
3451 return -ENOENT;
3452
3453 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3454
3455 list_del(&data->list);
3456 kfree(data);
3457
3458 return 0;
3459 }
3460
3461 void hci_remote_oob_data_clear(struct hci_dev *hdev)
3462 {
3463 struct oob_data *data, *n;
3464
3465 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3466 list_del(&data->list);
3467 kfree(data);
3468 }
3469 }
3470
3471 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3472 u8 *hash, u8 *rand)
3473 {
3474 struct oob_data *data;
3475
3476 data = hci_find_remote_oob_data(hdev, bdaddr);
3477 if (!data) {
3478 data = kmalloc(sizeof(*data), GFP_KERNEL);
3479 if (!data)
3480 return -ENOMEM;
3481
3482 bacpy(&data->bdaddr, bdaddr);
3483 list_add(&data->list, &hdev->remote_oob_data);
3484 }
3485
3486 memcpy(data->hash192, hash, sizeof(data->hash192));
3487 memcpy(data->rand192, rand, sizeof(data->rand192));
3488
3489 memset(data->hash256, 0, sizeof(data->hash256));
3490 memset(data->rand256, 0, sizeof(data->rand256));
3491
3492 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3493
3494 return 0;
3495 }
3496
3497 int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3498 u8 *hash192, u8 *rand192,
3499 u8 *hash256, u8 *rand256)
3500 {
3501 struct oob_data *data;
3502
3503 data = hci_find_remote_oob_data(hdev, bdaddr);
3504 if (!data) {
3505 data = kmalloc(sizeof(*data), GFP_KERNEL);
3506 if (!data)
3507 return -ENOMEM;
3508
3509 bacpy(&data->bdaddr, bdaddr);
3510 list_add(&data->list, &hdev->remote_oob_data);
3511 }
3512
3513 memcpy(data->hash192, hash192, sizeof(data->hash192));
3514 memcpy(data->rand192, rand192, sizeof(data->rand192));
3515
3516 memcpy(data->hash256, hash256, sizeof(data->hash256));
3517 memcpy(data->rand256, rand256, sizeof(data->rand256));
3518
3519 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3520
3521 return 0;
3522 }
3523
3524 struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
3525 bdaddr_t *bdaddr, u8 type)
3526 {
3527 struct bdaddr_list *b;
3528
3529 list_for_each_entry(b, bdaddr_list, list) {
3530 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3531 return b;
3532 }
3533
3534 return NULL;
3535 }
3536
3537 void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
3538 {
3539 struct list_head *p, *n;
3540
3541 list_for_each_safe(p, n, bdaddr_list) {
3542 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3543
3544 list_del(p);
3545 kfree(b);
3546 }
3547 }
3548
3549 int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3550 {
3551 struct bdaddr_list *entry;
3552
3553 if (!bacmp(bdaddr, BDADDR_ANY))
3554 return -EBADF;
3555
3556 if (hci_bdaddr_list_lookup(list, bdaddr, type))
3557 return -EEXIST;
3558
3559 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3560 if (!entry)
3561 return -ENOMEM;
3562
3563 bacpy(&entry->bdaddr, bdaddr);
3564 entry->bdaddr_type = type;
3565
3566 list_add(&entry->list, list);
3567
3568 return 0;
3569 }
3570
3571 int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
3572 {
3573 struct bdaddr_list *entry;
3574
3575 if (!bacmp(bdaddr, BDADDR_ANY)) {
3576 hci_bdaddr_list_clear(list);
3577 return 0;
3578 }
3579
3580 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
3581 if (!entry)
3582 return -ENOENT;
3583
3584 list_del(&entry->list);
3585 kfree(entry);
3586
3587 return 0;
3588 }
3589
3590 /* This function requires the caller holds hdev->lock */
3591 struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3592 bdaddr_t *addr, u8 addr_type)
3593 {
3594 struct hci_conn_params *params;
3595
3596 /* The conn params list only contains identity addresses */
3597 if (!hci_is_identity_address(addr, addr_type))
3598 return NULL;
3599
3600 list_for_each_entry(params, &hdev->le_conn_params, list) {
3601 if (bacmp(&params->addr, addr) == 0 &&
3602 params->addr_type == addr_type) {
3603 return params;
3604 }
3605 }
3606
3607 return NULL;
3608 }
3609
3610 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3611 {
3612 struct hci_conn *conn;
3613
3614 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3615 if (!conn)
3616 return false;
3617
3618 if (conn->dst_type != type)
3619 return false;
3620
3621 if (conn->state != BT_CONNECTED)
3622 return false;
3623
3624 return true;
3625 }
3626
3627 /* This function requires the caller holds hdev->lock */
3628 struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3629 bdaddr_t *addr, u8 addr_type)
3630 {
3631 struct hci_conn_params *param;
3632
3633 /* The list only contains identity addresses */
3634 if (!hci_is_identity_address(addr, addr_type))
3635 return NULL;
3636
3637 list_for_each_entry(param, list, action) {
3638 if (bacmp(&param->addr, addr) == 0 &&
3639 param->addr_type == addr_type)
3640 return param;
3641 }
3642
3643 return NULL;
3644 }
3645
3646 /* This function requires the caller holds hdev->lock */
3647 struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3648 bdaddr_t *addr, u8 addr_type)
3649 {
3650 struct hci_conn_params *params;
3651
3652 if (!hci_is_identity_address(addr, addr_type))
3653 return NULL;
3654
3655 params = hci_conn_params_lookup(hdev, addr, addr_type);
3656 if (params)
3657 return params;
3658
3659 params = kzalloc(sizeof(*params), GFP_KERNEL);
3660 if (!params) {
3661 BT_ERR("Out of memory");
3662 return NULL;
3663 }
3664
3665 bacpy(&params->addr, addr);
3666 params->addr_type = addr_type;
3667
3668 list_add(&params->list, &hdev->le_conn_params);
3669 INIT_LIST_HEAD(&params->action);
3670
3671 params->conn_min_interval = hdev->le_conn_min_interval;
3672 params->conn_max_interval = hdev->le_conn_max_interval;
3673 params->conn_latency = hdev->le_conn_latency;
3674 params->supervision_timeout = hdev->le_supv_timeout;
3675 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3676
3677 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3678
3679 return params;
3680 }
3681
3682 /* This function requires the caller holds hdev->lock */
3683 int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3684 u8 auto_connect)
3685 {
3686 struct hci_conn_params *params;
3687
3688 params = hci_conn_params_add(hdev, addr, addr_type);
3689 if (!params)
3690 return -EIO;
3691
3692 if (params->auto_connect == auto_connect)
3693 return 0;
3694
3695 list_del_init(&params->action);
3696
3697 switch (auto_connect) {
3698 case HCI_AUTO_CONN_DISABLED:
3699 case HCI_AUTO_CONN_LINK_LOSS:
3700 hci_update_background_scan(hdev);
3701 break;
3702 case HCI_AUTO_CONN_REPORT:
3703 list_add(&params->action, &hdev->pend_le_reports);
3704 hci_update_background_scan(hdev);
3705 break;
3706 case HCI_AUTO_CONN_DIRECT:
3707 case HCI_AUTO_CONN_ALWAYS:
3708 if (!is_connected(hdev, addr, addr_type)) {
3709 list_add(&params->action, &hdev->pend_le_conns);
3710 hci_update_background_scan(hdev);
3711 }
3712 break;
3713 }
3714
3715 params->auto_connect = auto_connect;
3716
3717 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3718 auto_connect);
3719
3720 return 0;
3721 }
3722
3723 static void hci_conn_params_free(struct hci_conn_params *params)
3724 {
3725 if (params->conn) {
3726 hci_conn_drop(params->conn);
3727 hci_conn_put(params->conn);
3728 }
3729
3730 list_del(&params->action);
3731 list_del(&params->list);
3732 kfree(params);
3733 }
3734
3735 /* This function requires the caller holds hdev->lock */
3736 void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3737 {
3738 struct hci_conn_params *params;
3739
3740 params = hci_conn_params_lookup(hdev, addr, addr_type);
3741 if (!params)
3742 return;
3743
3744 hci_conn_params_free(params);
3745
3746 hci_update_background_scan(hdev);
3747
3748 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3749 }
3750
3751 /* This function requires the caller holds hdev->lock */
3752 void hci_conn_params_clear_disabled(struct hci_dev *hdev)
3753 {
3754 struct hci_conn_params *params, *tmp;
3755
3756 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3757 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3758 continue;
3759 list_del(&params->list);
3760 kfree(params);
3761 }
3762
3763 BT_DBG("All LE disabled connection parameters were removed");
3764 }
3765
3766 /* This function requires the caller holds hdev->lock */
3767 void hci_conn_params_clear_all(struct hci_dev *hdev)
3768 {
3769 struct hci_conn_params *params, *tmp;
3770
3771 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3772 hci_conn_params_free(params);
3773
3774 hci_update_background_scan(hdev);
3775
3776 BT_DBG("All LE connection parameters were removed");
3777 }
3778
3779 static void inquiry_complete(struct hci_dev *hdev, u8 status)
3780 {
3781 if (status) {
3782 BT_ERR("Failed to start inquiry: status %d", status);
3783
3784 hci_dev_lock(hdev);
3785 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3786 hci_dev_unlock(hdev);
3787 return;
3788 }
3789 }
3790
3791 static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
3792 {
3793 /* General inquiry access code (GIAC) */
3794 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3795 struct hci_request req;
3796 struct hci_cp_inquiry cp;
3797 int err;
3798
3799 if (status) {
3800 BT_ERR("Failed to disable LE scanning: status %d", status);
3801 return;
3802 }
3803
3804 switch (hdev->discovery.type) {
3805 case DISCOV_TYPE_LE:
3806 hci_dev_lock(hdev);
3807 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3808 hci_dev_unlock(hdev);
3809 break;
3810
3811 case DISCOV_TYPE_INTERLEAVED:
3812 hci_req_init(&req, hdev);
3813
3814 memset(&cp, 0, sizeof(cp));
3815 memcpy(&cp.lap, lap, sizeof(cp.lap));
3816 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3817 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
3818
3819 hci_dev_lock(hdev);
3820
3821 hci_inquiry_cache_flush(hdev);
3822
3823 err = hci_req_run(&req, inquiry_complete);
3824 if (err) {
3825 BT_ERR("Inquiry request failed: err %d", err);
3826 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3827 }
3828
3829 hci_dev_unlock(hdev);
3830 break;
3831 }
3832 }
3833
3834 static void le_scan_disable_work(struct work_struct *work)
3835 {
3836 struct hci_dev *hdev = container_of(work, struct hci_dev,
3837 le_scan_disable.work);
3838 struct hci_request req;
3839 int err;
3840
3841 BT_DBG("%s", hdev->name);
3842
3843 hci_req_init(&req, hdev);
3844
3845 hci_req_add_le_scan_disable(&req);
3846
3847 err = hci_req_run(&req, le_scan_disable_work_complete);
3848 if (err)
3849 BT_ERR("Disable LE scanning request failed: err %d", err);
3850 }
3851
3852 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3853 {
3854 struct hci_dev *hdev = req->hdev;
3855
3856 /* If we're advertising or initiating an LE connection we can't
3857 * go ahead and change the random address at this time. This is
3858 * because the eventual initiator address used for the
3859 * subsequently created connection will be undefined (some
3860 * controllers use the new address and others the one we had
3861 * when the operation started).
3862 *
3863 * In this kind of scenario skip the update and let the random
3864 * address be updated at the next cycle.
3865 */
3866 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
3867 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3868 BT_DBG("Deferring random address update");
3869 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
3870 return;
3871 }
3872
3873 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3874 }
3875
3876 int hci_update_random_address(struct hci_request *req, bool require_privacy,
3877 u8 *own_addr_type)
3878 {
3879 struct hci_dev *hdev = req->hdev;
3880 int err;
3881
3882 /* If privacy is enabled use a resolvable private address. If
3883 * current RPA has expired or there is something else than
3884 * the current RPA in use, then generate a new one.
3885 */
3886 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3887 int to;
3888
3889 *own_addr_type = ADDR_LE_DEV_RANDOM;
3890
3891 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
3892 !bacmp(&hdev->random_addr, &hdev->rpa))
3893 return 0;
3894
3895 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
3896 if (err < 0) {
3897 BT_ERR("%s failed to generate new RPA", hdev->name);
3898 return err;
3899 }
3900
3901 set_random_addr(req, &hdev->rpa);
3902
3903 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3904 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3905
3906 return 0;
3907 }
3908
3909 /* In case of required privacy without resolvable private address,
3910 * use an unresolvable private address. This is useful for active
3911 * scanning and non-connectable advertising.
3912 */
3913 if (require_privacy) {
3914 bdaddr_t urpa;
3915
3916 get_random_bytes(&urpa, 6);
3917 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3918
3919 *own_addr_type = ADDR_LE_DEV_RANDOM;
3920 set_random_addr(req, &urpa);
3921 return 0;
3922 }
3923
3924 /* If forcing static address is in use or there is no public
3925 * address use the static address as random address (but skip
3926 * the HCI command if the current random address is already the
3927 * static one.
3928 */
3929 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3930 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3931 *own_addr_type = ADDR_LE_DEV_RANDOM;
3932 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3933 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3934 &hdev->static_addr);
3935 return 0;
3936 }
3937
3938 /* Neither privacy nor static address is being used so use a
3939 * public address.
3940 */
3941 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3942
3943 return 0;
3944 }
3945
3946 /* Copy the Identity Address of the controller.
3947 *
3948 * If the controller has a public BD_ADDR, then by default use that one.
3949 * If this is a LE only controller without a public address, default to
3950 * the static random address.
3951 *
3952 * For debugging purposes it is possible to force controllers with a
3953 * public address to use the static random address instead.
3954 */
3955 void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3956 u8 *bdaddr_type)
3957 {
3958 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
3959 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3960 bacpy(bdaddr, &hdev->static_addr);
3961 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3962 } else {
3963 bacpy(bdaddr, &hdev->bdaddr);
3964 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3965 }
3966 }
3967
3968 /* Alloc HCI device */
3969 struct hci_dev *hci_alloc_dev(void)
3970 {
3971 struct hci_dev *hdev;
3972
3973 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
3974 if (!hdev)
3975 return NULL;
3976
3977 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3978 hdev->esco_type = (ESCO_HV1);
3979 hdev->link_mode = (HCI_LM_ACCEPT);
3980 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3981 hdev->io_capability = 0x03; /* No Input No Output */
3982 hdev->manufacturer = 0xffff; /* Default to internal use */
3983 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3984 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
3985
3986 hdev->sniff_max_interval = 800;
3987 hdev->sniff_min_interval = 80;
3988
3989 hdev->le_adv_channel_map = 0x07;
3990 hdev->le_adv_min_interval = 0x0800;
3991 hdev->le_adv_max_interval = 0x0800;
3992 hdev->le_scan_interval = 0x0060;
3993 hdev->le_scan_window = 0x0030;
3994 hdev->le_conn_min_interval = 0x0028;
3995 hdev->le_conn_max_interval = 0x0038;
3996 hdev->le_conn_latency = 0x0000;
3997 hdev->le_supv_timeout = 0x002a;
3998
3999 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
4000 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
4001 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4002 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
4003
4004 mutex_init(&hdev->lock);
4005 mutex_init(&hdev->req_lock);
4006
4007 INIT_LIST_HEAD(&hdev->mgmt_pending);
4008 INIT_LIST_HEAD(&hdev->blacklist);
4009 INIT_LIST_HEAD(&hdev->whitelist);
4010 INIT_LIST_HEAD(&hdev->uuids);
4011 INIT_LIST_HEAD(&hdev->link_keys);
4012 INIT_LIST_HEAD(&hdev->long_term_keys);
4013 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
4014 INIT_LIST_HEAD(&hdev->remote_oob_data);
4015 INIT_LIST_HEAD(&hdev->le_white_list);
4016 INIT_LIST_HEAD(&hdev->le_conn_params);
4017 INIT_LIST_HEAD(&hdev->pend_le_conns);
4018 INIT_LIST_HEAD(&hdev->pend_le_reports);
4019 INIT_LIST_HEAD(&hdev->conn_hash.list);
4020
4021 INIT_WORK(&hdev->rx_work, hci_rx_work);
4022 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4023 INIT_WORK(&hdev->tx_work, hci_tx_work);
4024 INIT_WORK(&hdev->power_on, hci_power_on);
4025
4026 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4027 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4028 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4029
4030 skb_queue_head_init(&hdev->rx_q);
4031 skb_queue_head_init(&hdev->cmd_q);
4032 skb_queue_head_init(&hdev->raw_q);
4033
4034 init_waitqueue_head(&hdev->req_wait_q);
4035
4036 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
4037
4038 hci_init_sysfs(hdev);
4039 discovery_init(hdev);
4040
4041 return hdev;
4042 }
4043 EXPORT_SYMBOL(hci_alloc_dev);
4044
4045 /* Free HCI device */
4046 void hci_free_dev(struct hci_dev *hdev)
4047 {
4048 /* will free via device release */
4049 put_device(&hdev->dev);
4050 }
4051 EXPORT_SYMBOL(hci_free_dev);
4052
4053 /* Register HCI device */
4054 int hci_register_dev(struct hci_dev *hdev)
4055 {
4056 int id, error;
4057
4058 if (!hdev->open || !hdev->close || !hdev->send)
4059 return -EINVAL;
4060
4061 /* Do not allow HCI_AMP devices to register at index 0,
4062 * so the index can be used as the AMP controller ID.
4063 */
4064 switch (hdev->dev_type) {
4065 case HCI_BREDR:
4066 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4067 break;
4068 case HCI_AMP:
4069 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4070 break;
4071 default:
4072 return -EINVAL;
4073 }
4074
4075 if (id < 0)
4076 return id;
4077
4078 sprintf(hdev->name, "hci%d", id);
4079 hdev->id = id;
4080
4081 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4082
4083 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4084 WQ_MEM_RECLAIM, 1, hdev->name);
4085 if (!hdev->workqueue) {
4086 error = -ENOMEM;
4087 goto err;
4088 }
4089
4090 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4091 WQ_MEM_RECLAIM, 1, hdev->name);
4092 if (!hdev->req_workqueue) {
4093 destroy_workqueue(hdev->workqueue);
4094 error = -ENOMEM;
4095 goto err;
4096 }
4097
4098 if (!IS_ERR_OR_NULL(bt_debugfs))
4099 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4100
4101 dev_set_name(&hdev->dev, "%s", hdev->name);
4102
4103 error = device_add(&hdev->dev);
4104 if (error < 0)
4105 goto err_wqueue;
4106
4107 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
4108 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4109 hdev);
4110 if (hdev->rfkill) {
4111 if (rfkill_register(hdev->rfkill) < 0) {
4112 rfkill_destroy(hdev->rfkill);
4113 hdev->rfkill = NULL;
4114 }
4115 }
4116
4117 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4118 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4119
4120 set_bit(HCI_SETUP, &hdev->dev_flags);
4121 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
4122
4123 if (hdev->dev_type == HCI_BREDR) {
4124 /* Assume BR/EDR support until proven otherwise (such as
4125 * through reading supported features during init.
4126 */
4127 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4128 }
4129
4130 write_lock(&hci_dev_list_lock);
4131 list_add(&hdev->list, &hci_dev_list);
4132 write_unlock(&hci_dev_list_lock);
4133
4134 /* Devices that are marked for raw-only usage are unconfigured
4135 * and should not be included in normal operation.
4136 */
4137 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4138 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
4139
4140 hci_notify(hdev, HCI_DEV_REG);
4141 hci_dev_hold(hdev);
4142
4143 queue_work(hdev->req_workqueue, &hdev->power_on);
4144
4145 return id;
4146
4147 err_wqueue:
4148 destroy_workqueue(hdev->workqueue);
4149 destroy_workqueue(hdev->req_workqueue);
4150 err:
4151 ida_simple_remove(&hci_index_ida, hdev->id);
4152
4153 return error;
4154 }
4155 EXPORT_SYMBOL(hci_register_dev);
4156
4157 /* Unregister HCI device */
4158 void hci_unregister_dev(struct hci_dev *hdev)
4159 {
4160 int i, id;
4161
4162 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4163
4164 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4165
4166 id = hdev->id;
4167
4168 write_lock(&hci_dev_list_lock);
4169 list_del(&hdev->list);
4170 write_unlock(&hci_dev_list_lock);
4171
4172 hci_dev_do_close(hdev);
4173
4174 for (i = 0; i < NUM_REASSEMBLY; i++)
4175 kfree_skb(hdev->reassembly[i]);
4176
4177 cancel_work_sync(&hdev->power_on);
4178
4179 if (!test_bit(HCI_INIT, &hdev->flags) &&
4180 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4181 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
4182 hci_dev_lock(hdev);
4183 mgmt_index_removed(hdev);
4184 hci_dev_unlock(hdev);
4185 }
4186
4187 /* mgmt_index_removed should take care of emptying the
4188 * pending list */
4189 BUG_ON(!list_empty(&hdev->mgmt_pending));
4190
4191 hci_notify(hdev, HCI_DEV_UNREG);
4192
4193 if (hdev->rfkill) {
4194 rfkill_unregister(hdev->rfkill);
4195 rfkill_destroy(hdev->rfkill);
4196 }
4197
4198 smp_unregister(hdev);
4199
4200 device_del(&hdev->dev);
4201
4202 debugfs_remove_recursive(hdev->debugfs);
4203
4204 destroy_workqueue(hdev->workqueue);
4205 destroy_workqueue(hdev->req_workqueue);
4206
4207 hci_dev_lock(hdev);
4208 hci_bdaddr_list_clear(&hdev->blacklist);
4209 hci_bdaddr_list_clear(&hdev->whitelist);
4210 hci_uuids_clear(hdev);
4211 hci_link_keys_clear(hdev);
4212 hci_smp_ltks_clear(hdev);
4213 hci_smp_irks_clear(hdev);
4214 hci_remote_oob_data_clear(hdev);
4215 hci_bdaddr_list_clear(&hdev->le_white_list);
4216 hci_conn_params_clear_all(hdev);
4217 hci_dev_unlock(hdev);
4218
4219 hci_dev_put(hdev);
4220
4221 ida_simple_remove(&hci_index_ida, id);
4222 }
4223 EXPORT_SYMBOL(hci_unregister_dev);
4224
4225 /* Suspend HCI device */
4226 int hci_suspend_dev(struct hci_dev *hdev)
4227 {
4228 hci_notify(hdev, HCI_DEV_SUSPEND);
4229 return 0;
4230 }
4231 EXPORT_SYMBOL(hci_suspend_dev);
4232
4233 /* Resume HCI device */
4234 int hci_resume_dev(struct hci_dev *hdev)
4235 {
4236 hci_notify(hdev, HCI_DEV_RESUME);
4237 return 0;
4238 }
4239 EXPORT_SYMBOL(hci_resume_dev);
4240
4241 /* Reset HCI device */
4242 int hci_reset_dev(struct hci_dev *hdev)
4243 {
4244 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4245 struct sk_buff *skb;
4246
4247 skb = bt_skb_alloc(3, GFP_ATOMIC);
4248 if (!skb)
4249 return -ENOMEM;
4250
4251 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4252 memcpy(skb_put(skb, 3), hw_err, 3);
4253
4254 /* Send Hardware Error to upper stack */
4255 return hci_recv_frame(hdev, skb);
4256 }
4257 EXPORT_SYMBOL(hci_reset_dev);
4258
4259 /* Receive frame from HCI drivers */
4260 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
4261 {
4262 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
4263 && !test_bit(HCI_INIT, &hdev->flags))) {
4264 kfree_skb(skb);
4265 return -ENXIO;
4266 }
4267
4268 /* Incoming skb */
4269 bt_cb(skb)->incoming = 1;
4270
4271 /* Time stamp */
4272 __net_timestamp(skb);
4273
4274 skb_queue_tail(&hdev->rx_q, skb);
4275 queue_work(hdev->workqueue, &hdev->rx_work);
4276
4277 return 0;
4278 }
4279 EXPORT_SYMBOL(hci_recv_frame);
4280
4281 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
4282 int count, __u8 index)
4283 {
4284 int len = 0;
4285 int hlen = 0;
4286 int remain = count;
4287 struct sk_buff *skb;
4288 struct bt_skb_cb *scb;
4289
4290 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
4291 index >= NUM_REASSEMBLY)
4292 return -EILSEQ;
4293
4294 skb = hdev->reassembly[index];
4295
4296 if (!skb) {
4297 switch (type) {
4298 case HCI_ACLDATA_PKT:
4299 len = HCI_MAX_FRAME_SIZE;
4300 hlen = HCI_ACL_HDR_SIZE;
4301 break;
4302 case HCI_EVENT_PKT:
4303 len = HCI_MAX_EVENT_SIZE;
4304 hlen = HCI_EVENT_HDR_SIZE;
4305 break;
4306 case HCI_SCODATA_PKT:
4307 len = HCI_MAX_SCO_SIZE;
4308 hlen = HCI_SCO_HDR_SIZE;
4309 break;
4310 }
4311
4312 skb = bt_skb_alloc(len, GFP_ATOMIC);
4313 if (!skb)
4314 return -ENOMEM;
4315
4316 scb = (void *) skb->cb;
4317 scb->expect = hlen;
4318 scb->pkt_type = type;
4319
4320 hdev->reassembly[index] = skb;
4321 }
4322
4323 while (count) {
4324 scb = (void *) skb->cb;
4325 len = min_t(uint, scb->expect, count);
4326
4327 memcpy(skb_put(skb, len), data, len);
4328
4329 count -= len;
4330 data += len;
4331 scb->expect -= len;
4332 remain = count;
4333
4334 switch (type) {
4335 case HCI_EVENT_PKT:
4336 if (skb->len == HCI_EVENT_HDR_SIZE) {
4337 struct hci_event_hdr *h = hci_event_hdr(skb);
4338 scb->expect = h->plen;
4339
4340 if (skb_tailroom(skb) < scb->expect) {
4341 kfree_skb(skb);
4342 hdev->reassembly[index] = NULL;
4343 return -ENOMEM;
4344 }
4345 }
4346 break;
4347
4348 case HCI_ACLDATA_PKT:
4349 if (skb->len == HCI_ACL_HDR_SIZE) {
4350 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4351 scb->expect = __le16_to_cpu(h->dlen);
4352
4353 if (skb_tailroom(skb) < scb->expect) {
4354 kfree_skb(skb);
4355 hdev->reassembly[index] = NULL;
4356 return -ENOMEM;
4357 }
4358 }
4359 break;
4360
4361 case HCI_SCODATA_PKT:
4362 if (skb->len == HCI_SCO_HDR_SIZE) {
4363 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4364 scb->expect = h->dlen;
4365
4366 if (skb_tailroom(skb) < scb->expect) {
4367 kfree_skb(skb);
4368 hdev->reassembly[index] = NULL;
4369 return -ENOMEM;
4370 }
4371 }
4372 break;
4373 }
4374
4375 if (scb->expect == 0) {
4376 /* Complete frame */
4377
4378 bt_cb(skb)->pkt_type = type;
4379 hci_recv_frame(hdev, skb);
4380
4381 hdev->reassembly[index] = NULL;
4382 return remain;
4383 }
4384 }
4385
4386 return remain;
4387 }
4388
4389 #define STREAM_REASSEMBLY 0
4390
4391 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4392 {
4393 int type;
4394 int rem = 0;
4395
4396 while (count) {
4397 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4398
4399 if (!skb) {
4400 struct { char type; } *pkt;
4401
4402 /* Start of the frame */
4403 pkt = data;
4404 type = pkt->type;
4405
4406 data++;
4407 count--;
4408 } else
4409 type = bt_cb(skb)->pkt_type;
4410
4411 rem = hci_reassembly(hdev, type, data, count,
4412 STREAM_REASSEMBLY);
4413 if (rem < 0)
4414 return rem;
4415
4416 data += (count - rem);
4417 count = rem;
4418 }
4419
4420 return rem;
4421 }
4422 EXPORT_SYMBOL(hci_recv_stream_fragment);
4423
4424 /* ---- Interface to upper protocols ---- */
4425
4426 int hci_register_cb(struct hci_cb *cb)
4427 {
4428 BT_DBG("%p name %s", cb, cb->name);
4429
4430 write_lock(&hci_cb_list_lock);
4431 list_add(&cb->list, &hci_cb_list);
4432 write_unlock(&hci_cb_list_lock);
4433
4434 return 0;
4435 }
4436 EXPORT_SYMBOL(hci_register_cb);
4437
4438 int hci_unregister_cb(struct hci_cb *cb)
4439 {
4440 BT_DBG("%p name %s", cb, cb->name);
4441
4442 write_lock(&hci_cb_list_lock);
4443 list_del(&cb->list);
4444 write_unlock(&hci_cb_list_lock);
4445
4446 return 0;
4447 }
4448 EXPORT_SYMBOL(hci_unregister_cb);
4449
4450 static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
4451 {
4452 int err;
4453
4454 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
4455
4456 /* Time stamp */
4457 __net_timestamp(skb);
4458
4459 /* Send copy to monitor */
4460 hci_send_to_monitor(hdev, skb);
4461
4462 if (atomic_read(&hdev->promisc)) {
4463 /* Send copy to the sockets */
4464 hci_send_to_sock(hdev, skb);
4465 }
4466
4467 /* Get rid of skb owner, prior to sending to the driver. */
4468 skb_orphan(skb);
4469
4470 err = hdev->send(hdev, skb);
4471 if (err < 0) {
4472 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4473 kfree_skb(skb);
4474 }
4475 }
4476
4477 void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4478 {
4479 skb_queue_head_init(&req->cmd_q);
4480 req->hdev = hdev;
4481 req->err = 0;
4482 }
4483
4484 int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4485 {
4486 struct hci_dev *hdev = req->hdev;
4487 struct sk_buff *skb;
4488 unsigned long flags;
4489
4490 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4491
4492 /* If an error occurred during request building, remove all HCI
4493 * commands queued on the HCI request queue.
4494 */
4495 if (req->err) {
4496 skb_queue_purge(&req->cmd_q);
4497 return req->err;
4498 }
4499
4500 /* Do not allow empty requests */
4501 if (skb_queue_empty(&req->cmd_q))
4502 return -ENODATA;
4503
4504 skb = skb_peek_tail(&req->cmd_q);
4505 bt_cb(skb)->req.complete = complete;
4506
4507 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4508 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4509 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4510
4511 queue_work(hdev->workqueue, &hdev->cmd_work);
4512
4513 return 0;
4514 }
4515
4516 bool hci_req_pending(struct hci_dev *hdev)
4517 {
4518 return (hdev->req_status == HCI_REQ_PEND);
4519 }
4520
4521 static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
4522 u32 plen, const void *param)
4523 {
4524 int len = HCI_COMMAND_HDR_SIZE + plen;
4525 struct hci_command_hdr *hdr;
4526 struct sk_buff *skb;
4527
4528 skb = bt_skb_alloc(len, GFP_ATOMIC);
4529 if (!skb)
4530 return NULL;
4531
4532 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
4533 hdr->opcode = cpu_to_le16(opcode);
4534 hdr->plen = plen;
4535
4536 if (plen)
4537 memcpy(skb_put(skb, plen), param, plen);
4538
4539 BT_DBG("skb len %d", skb->len);
4540
4541 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
4542 bt_cb(skb)->opcode = opcode;
4543
4544 return skb;
4545 }
4546
4547 /* Send HCI command */
4548 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4549 const void *param)
4550 {
4551 struct sk_buff *skb;
4552
4553 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4554
4555 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4556 if (!skb) {
4557 BT_ERR("%s no memory for command", hdev->name);
4558 return -ENOMEM;
4559 }
4560
4561 /* Stand-alone HCI commands must be flagged as
4562 * single-command requests.
4563 */
4564 bt_cb(skb)->req.start = true;
4565
4566 skb_queue_tail(&hdev->cmd_q, skb);
4567 queue_work(hdev->workqueue, &hdev->cmd_work);
4568
4569 return 0;
4570 }
4571
4572 /* Queue a command to an asynchronous HCI request */
4573 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4574 const void *param, u8 event)
4575 {
4576 struct hci_dev *hdev = req->hdev;
4577 struct sk_buff *skb;
4578
4579 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4580
4581 /* If an error occurred during request building, there is no point in
4582 * queueing the HCI command. We can simply return.
4583 */
4584 if (req->err)
4585 return;
4586
4587 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4588 if (!skb) {
4589 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4590 hdev->name, opcode);
4591 req->err = -ENOMEM;
4592 return;
4593 }
4594
4595 if (skb_queue_empty(&req->cmd_q))
4596 bt_cb(skb)->req.start = true;
4597
4598 bt_cb(skb)->req.event = event;
4599
4600 skb_queue_tail(&req->cmd_q, skb);
4601 }
4602
4603 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4604 const void *param)
4605 {
4606 hci_req_add_ev(req, opcode, plen, param, 0);
4607 }
4608
4609 /* Get data from the previously sent command */
4610 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
4611 {
4612 struct hci_command_hdr *hdr;
4613
4614 if (!hdev->sent_cmd)
4615 return NULL;
4616
4617 hdr = (void *) hdev->sent_cmd->data;
4618
4619 if (hdr->opcode != cpu_to_le16(opcode))
4620 return NULL;
4621
4622 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
4623
4624 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4625 }
4626
4627 /* Send ACL data */
4628 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4629 {
4630 struct hci_acl_hdr *hdr;
4631 int len = skb->len;
4632
4633 skb_push(skb, HCI_ACL_HDR_SIZE);
4634 skb_reset_transport_header(skb);
4635 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
4636 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4637 hdr->dlen = cpu_to_le16(len);
4638 }
4639
4640 static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
4641 struct sk_buff *skb, __u16 flags)
4642 {
4643 struct hci_conn *conn = chan->conn;
4644 struct hci_dev *hdev = conn->hdev;
4645 struct sk_buff *list;
4646
4647 skb->len = skb_headlen(skb);
4648 skb->data_len = 0;
4649
4650 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4651
4652 switch (hdev->dev_type) {
4653 case HCI_BREDR:
4654 hci_add_acl_hdr(skb, conn->handle, flags);
4655 break;
4656 case HCI_AMP:
4657 hci_add_acl_hdr(skb, chan->handle, flags);
4658 break;
4659 default:
4660 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4661 return;
4662 }
4663
4664 list = skb_shinfo(skb)->frag_list;
4665 if (!list) {
4666 /* Non fragmented */
4667 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4668
4669 skb_queue_tail(queue, skb);
4670 } else {
4671 /* Fragmented */
4672 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4673
4674 skb_shinfo(skb)->frag_list = NULL;
4675
4676 /* Queue all fragments atomically. We need to use spin_lock_bh
4677 * here because of 6LoWPAN links, as there this function is
4678 * called from softirq and using normal spin lock could cause
4679 * deadlocks.
4680 */
4681 spin_lock_bh(&queue->lock);
4682
4683 __skb_queue_tail(queue, skb);
4684
4685 flags &= ~ACL_START;
4686 flags |= ACL_CONT;
4687 do {
4688 skb = list; list = list->next;
4689
4690 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
4691 hci_add_acl_hdr(skb, conn->handle, flags);
4692
4693 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4694
4695 __skb_queue_tail(queue, skb);
4696 } while (list);
4697
4698 spin_unlock_bh(&queue->lock);
4699 }
4700 }
4701
4702 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4703 {
4704 struct hci_dev *hdev = chan->conn->hdev;
4705
4706 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
4707
4708 hci_queue_acl(chan, &chan->data_q, skb, flags);
4709
4710 queue_work(hdev->workqueue, &hdev->tx_work);
4711 }
4712
4713 /* Send SCO data */
4714 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
4715 {
4716 struct hci_dev *hdev = conn->hdev;
4717 struct hci_sco_hdr hdr;
4718
4719 BT_DBG("%s len %d", hdev->name, skb->len);
4720
4721 hdr.handle = cpu_to_le16(conn->handle);
4722 hdr.dlen = skb->len;
4723
4724 skb_push(skb, HCI_SCO_HDR_SIZE);
4725 skb_reset_transport_header(skb);
4726 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
4727
4728 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
4729
4730 skb_queue_tail(&conn->data_q, skb);
4731 queue_work(hdev->workqueue, &hdev->tx_work);
4732 }
4733
4734 /* ---- HCI TX task (outgoing data) ---- */
4735
4736 /* HCI Connection scheduler */
4737 static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4738 int *quote)
4739 {
4740 struct hci_conn_hash *h = &hdev->conn_hash;
4741 struct hci_conn *conn = NULL, *c;
4742 unsigned int num = 0, min = ~0;
4743
4744 /* We don't have to lock device here. Connections are always
4745 * added and removed with TX task disabled. */
4746
4747 rcu_read_lock();
4748
4749 list_for_each_entry_rcu(c, &h->list, list) {
4750 if (c->type != type || skb_queue_empty(&c->data_q))
4751 continue;
4752
4753 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4754 continue;
4755
4756 num++;
4757
4758 if (c->sent < min) {
4759 min = c->sent;
4760 conn = c;
4761 }
4762
4763 if (hci_conn_num(hdev, type) == num)
4764 break;
4765 }
4766
4767 rcu_read_unlock();
4768
4769 if (conn) {
4770 int cnt, q;
4771
4772 switch (conn->type) {
4773 case ACL_LINK:
4774 cnt = hdev->acl_cnt;
4775 break;
4776 case SCO_LINK:
4777 case ESCO_LINK:
4778 cnt = hdev->sco_cnt;
4779 break;
4780 case LE_LINK:
4781 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4782 break;
4783 default:
4784 cnt = 0;
4785 BT_ERR("Unknown link type");
4786 }
4787
4788 q = cnt / num;
4789 *quote = q ? q : 1;
4790 } else
4791 *quote = 0;
4792
4793 BT_DBG("conn %p quote %d", conn, *quote);
4794 return conn;
4795 }
4796
4797 static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
4798 {
4799 struct hci_conn_hash *h = &hdev->conn_hash;
4800 struct hci_conn *c;
4801
4802 BT_ERR("%s link tx timeout", hdev->name);
4803
4804 rcu_read_lock();
4805
4806 /* Kill stalled connections */
4807 list_for_each_entry_rcu(c, &h->list, list) {
4808 if (c->type == type && c->sent) {
4809 BT_ERR("%s killing stalled connection %pMR",
4810 hdev->name, &c->dst);
4811 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
4812 }
4813 }
4814
4815 rcu_read_unlock();
4816 }
4817
4818 static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4819 int *quote)
4820 {
4821 struct hci_conn_hash *h = &hdev->conn_hash;
4822 struct hci_chan *chan = NULL;
4823 unsigned int num = 0, min = ~0, cur_prio = 0;
4824 struct hci_conn *conn;
4825 int cnt, q, conn_num = 0;
4826
4827 BT_DBG("%s", hdev->name);
4828
4829 rcu_read_lock();
4830
4831 list_for_each_entry_rcu(conn, &h->list, list) {
4832 struct hci_chan *tmp;
4833
4834 if (conn->type != type)
4835 continue;
4836
4837 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4838 continue;
4839
4840 conn_num++;
4841
4842 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
4843 struct sk_buff *skb;
4844
4845 if (skb_queue_empty(&tmp->data_q))
4846 continue;
4847
4848 skb = skb_peek(&tmp->data_q);
4849 if (skb->priority < cur_prio)
4850 continue;
4851
4852 if (skb->priority > cur_prio) {
4853 num = 0;
4854 min = ~0;
4855 cur_prio = skb->priority;
4856 }
4857
4858 num++;
4859
4860 if (conn->sent < min) {
4861 min = conn->sent;
4862 chan = tmp;
4863 }
4864 }
4865
4866 if (hci_conn_num(hdev, type) == conn_num)
4867 break;
4868 }
4869
4870 rcu_read_unlock();
4871
4872 if (!chan)
4873 return NULL;
4874
4875 switch (chan->conn->type) {
4876 case ACL_LINK:
4877 cnt = hdev->acl_cnt;
4878 break;
4879 case AMP_LINK:
4880 cnt = hdev->block_cnt;
4881 break;
4882 case SCO_LINK:
4883 case ESCO_LINK:
4884 cnt = hdev->sco_cnt;
4885 break;
4886 case LE_LINK:
4887 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4888 break;
4889 default:
4890 cnt = 0;
4891 BT_ERR("Unknown link type");
4892 }
4893
4894 q = cnt / num;
4895 *quote = q ? q : 1;
4896 BT_DBG("chan %p quote %d", chan, *quote);
4897 return chan;
4898 }
4899
4900 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4901 {
4902 struct hci_conn_hash *h = &hdev->conn_hash;
4903 struct hci_conn *conn;
4904 int num = 0;
4905
4906 BT_DBG("%s", hdev->name);
4907
4908 rcu_read_lock();
4909
4910 list_for_each_entry_rcu(conn, &h->list, list) {
4911 struct hci_chan *chan;
4912
4913 if (conn->type != type)
4914 continue;
4915
4916 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4917 continue;
4918
4919 num++;
4920
4921 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
4922 struct sk_buff *skb;
4923
4924 if (chan->sent) {
4925 chan->sent = 0;
4926 continue;
4927 }
4928
4929 if (skb_queue_empty(&chan->data_q))
4930 continue;
4931
4932 skb = skb_peek(&chan->data_q);
4933 if (skb->priority >= HCI_PRIO_MAX - 1)
4934 continue;
4935
4936 skb->priority = HCI_PRIO_MAX - 1;
4937
4938 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
4939 skb->priority);
4940 }
4941
4942 if (hci_conn_num(hdev, type) == num)
4943 break;
4944 }
4945
4946 rcu_read_unlock();
4947
4948 }
4949
4950 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4951 {
4952 /* Calculate count of blocks used by this packet */
4953 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4954 }
4955
4956 static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
4957 {
4958 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
4959 /* ACL tx timeout must be longer than maximum
4960 * link supervision timeout (40.9 seconds) */
4961 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
4962 HCI_ACL_TX_TIMEOUT))
4963 hci_link_tx_to(hdev, ACL_LINK);
4964 }
4965 }
4966
4967 static void hci_sched_acl_pkt(struct hci_dev *hdev)
4968 {
4969 unsigned int cnt = hdev->acl_cnt;
4970 struct hci_chan *chan;
4971 struct sk_buff *skb;
4972 int quote;
4973
4974 __check_timeout(hdev, cnt);
4975
4976 while (hdev->acl_cnt &&
4977 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
4978 u32 priority = (skb_peek(&chan->data_q))->priority;
4979 while (quote-- && (skb = skb_peek(&chan->data_q))) {
4980 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
4981 skb->len, skb->priority);
4982
4983 /* Stop if priority has changed */
4984 if (skb->priority < priority)
4985 break;
4986
4987 skb = skb_dequeue(&chan->data_q);
4988
4989 hci_conn_enter_active_mode(chan->conn,
4990 bt_cb(skb)->force_active);
4991
4992 hci_send_frame(hdev, skb);
4993 hdev->acl_last_tx = jiffies;
4994
4995 hdev->acl_cnt--;
4996 chan->sent++;
4997 chan->conn->sent++;
4998 }
4999 }
5000
5001 if (cnt != hdev->acl_cnt)
5002 hci_prio_recalculate(hdev, ACL_LINK);
5003 }
5004
5005 static void hci_sched_acl_blk(struct hci_dev *hdev)
5006 {
5007 unsigned int cnt = hdev->block_cnt;
5008 struct hci_chan *chan;
5009 struct sk_buff *skb;
5010 int quote;
5011 u8 type;
5012
5013 __check_timeout(hdev, cnt);
5014
5015 BT_DBG("%s", hdev->name);
5016
5017 if (hdev->dev_type == HCI_AMP)
5018 type = AMP_LINK;
5019 else
5020 type = ACL_LINK;
5021
5022 while (hdev->block_cnt > 0 &&
5023 (chan = hci_chan_sent(hdev, type, &quote))) {
5024 u32 priority = (skb_peek(&chan->data_q))->priority;
5025 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5026 int blocks;
5027
5028 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5029 skb->len, skb->priority);
5030
5031 /* Stop if priority has changed */
5032 if (skb->priority < priority)
5033 break;
5034
5035 skb = skb_dequeue(&chan->data_q);
5036
5037 blocks = __get_blocks(hdev, skb);
5038 if (blocks > hdev->block_cnt)
5039 return;
5040
5041 hci_conn_enter_active_mode(chan->conn,
5042 bt_cb(skb)->force_active);
5043
5044 hci_send_frame(hdev, skb);
5045 hdev->acl_last_tx = jiffies;
5046
5047 hdev->block_cnt -= blocks;
5048 quote -= blocks;
5049
5050 chan->sent += blocks;
5051 chan->conn->sent += blocks;
5052 }
5053 }
5054
5055 if (cnt != hdev->block_cnt)
5056 hci_prio_recalculate(hdev, type);
5057 }
5058
5059 static void hci_sched_acl(struct hci_dev *hdev)
5060 {
5061 BT_DBG("%s", hdev->name);
5062
5063 /* No ACL link over BR/EDR controller */
5064 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5065 return;
5066
5067 /* No AMP link over AMP controller */
5068 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
5069 return;
5070
5071 switch (hdev->flow_ctl_mode) {
5072 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5073 hci_sched_acl_pkt(hdev);
5074 break;
5075
5076 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5077 hci_sched_acl_blk(hdev);
5078 break;
5079 }
5080 }
5081
5082 /* Schedule SCO */
5083 static void hci_sched_sco(struct hci_dev *hdev)
5084 {
5085 struct hci_conn *conn;
5086 struct sk_buff *skb;
5087 int quote;
5088
5089 BT_DBG("%s", hdev->name);
5090
5091 if (!hci_conn_num(hdev, SCO_LINK))
5092 return;
5093
5094 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5095 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5096 BT_DBG("skb %p len %d", skb, skb->len);
5097 hci_send_frame(hdev, skb);
5098
5099 conn->sent++;
5100 if (conn->sent == ~0)
5101 conn->sent = 0;
5102 }
5103 }
5104 }
5105
5106 static void hci_sched_esco(struct hci_dev *hdev)
5107 {
5108 struct hci_conn *conn;
5109 struct sk_buff *skb;
5110 int quote;
5111
5112 BT_DBG("%s", hdev->name);
5113
5114 if (!hci_conn_num(hdev, ESCO_LINK))
5115 return;
5116
5117 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5118 &quote))) {
5119 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5120 BT_DBG("skb %p len %d", skb, skb->len);
5121 hci_send_frame(hdev, skb);
5122
5123 conn->sent++;
5124 if (conn->sent == ~0)
5125 conn->sent = 0;
5126 }
5127 }
5128 }
5129
5130 static void hci_sched_le(struct hci_dev *hdev)
5131 {
5132 struct hci_chan *chan;
5133 struct sk_buff *skb;
5134 int quote, cnt, tmp;
5135
5136 BT_DBG("%s", hdev->name);
5137
5138 if (!hci_conn_num(hdev, LE_LINK))
5139 return;
5140
5141 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
5142 /* LE tx timeout must be longer than maximum
5143 * link supervision timeout (40.9 seconds) */
5144 if (!hdev->le_cnt && hdev->le_pkts &&
5145 time_after(jiffies, hdev->le_last_tx + HZ * 45))
5146 hci_link_tx_to(hdev, LE_LINK);
5147 }
5148
5149 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
5150 tmp = cnt;
5151 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
5152 u32 priority = (skb_peek(&chan->data_q))->priority;
5153 while (quote-- && (skb = skb_peek(&chan->data_q))) {
5154 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
5155 skb->len, skb->priority);
5156
5157 /* Stop if priority has changed */
5158 if (skb->priority < priority)
5159 break;
5160
5161 skb = skb_dequeue(&chan->data_q);
5162
5163 hci_send_frame(hdev, skb);
5164 hdev->le_last_tx = jiffies;
5165
5166 cnt--;
5167 chan->sent++;
5168 chan->conn->sent++;
5169 }
5170 }
5171
5172 if (hdev->le_pkts)
5173 hdev->le_cnt = cnt;
5174 else
5175 hdev->acl_cnt = cnt;
5176
5177 if (cnt != tmp)
5178 hci_prio_recalculate(hdev, LE_LINK);
5179 }
5180
5181 static void hci_tx_work(struct work_struct *work)
5182 {
5183 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
5184 struct sk_buff *skb;
5185
5186 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
5187 hdev->sco_cnt, hdev->le_cnt);
5188
5189 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5190 /* Schedule queues and send stuff to HCI driver */
5191 hci_sched_acl(hdev);
5192 hci_sched_sco(hdev);
5193 hci_sched_esco(hdev);
5194 hci_sched_le(hdev);
5195 }
5196
5197 /* Send next queued raw (unknown type) packet */
5198 while ((skb = skb_dequeue(&hdev->raw_q)))
5199 hci_send_frame(hdev, skb);
5200 }
5201
5202 /* ----- HCI RX task (incoming data processing) ----- */
5203
5204 /* ACL data packet */
5205 static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5206 {
5207 struct hci_acl_hdr *hdr = (void *) skb->data;
5208 struct hci_conn *conn;
5209 __u16 handle, flags;
5210
5211 skb_pull(skb, HCI_ACL_HDR_SIZE);
5212
5213 handle = __le16_to_cpu(hdr->handle);
5214 flags = hci_flags(handle);
5215 handle = hci_handle(handle);
5216
5217 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
5218 handle, flags);
5219
5220 hdev->stat.acl_rx++;
5221
5222 hci_dev_lock(hdev);
5223 conn = hci_conn_hash_lookup_handle(hdev, handle);
5224 hci_dev_unlock(hdev);
5225
5226 if (conn) {
5227 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
5228
5229 /* Send to upper protocol */
5230 l2cap_recv_acldata(conn, skb, flags);
5231 return;
5232 } else {
5233 BT_ERR("%s ACL packet for unknown connection handle %d",
5234 hdev->name, handle);
5235 }
5236
5237 kfree_skb(skb);
5238 }
5239
5240 /* SCO data packet */
5241 static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
5242 {
5243 struct hci_sco_hdr *hdr = (void *) skb->data;
5244 struct hci_conn *conn;
5245 __u16 handle;
5246
5247 skb_pull(skb, HCI_SCO_HDR_SIZE);
5248
5249 handle = __le16_to_cpu(hdr->handle);
5250
5251 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
5252
5253 hdev->stat.sco_rx++;
5254
5255 hci_dev_lock(hdev);
5256 conn = hci_conn_hash_lookup_handle(hdev, handle);
5257 hci_dev_unlock(hdev);
5258
5259 if (conn) {
5260 /* Send to upper protocol */
5261 sco_recv_scodata(conn, skb);
5262 return;
5263 } else {
5264 BT_ERR("%s SCO packet for unknown connection handle %d",
5265 hdev->name, handle);
5266 }
5267
5268 kfree_skb(skb);
5269 }
5270
5271 static bool hci_req_is_complete(struct hci_dev *hdev)
5272 {
5273 struct sk_buff *skb;
5274
5275 skb = skb_peek(&hdev->cmd_q);
5276 if (!skb)
5277 return true;
5278
5279 return bt_cb(skb)->req.start;
5280 }
5281
5282 static void hci_resend_last(struct hci_dev *hdev)
5283 {
5284 struct hci_command_hdr *sent;
5285 struct sk_buff *skb;
5286 u16 opcode;
5287
5288 if (!hdev->sent_cmd)
5289 return;
5290
5291 sent = (void *) hdev->sent_cmd->data;
5292 opcode = __le16_to_cpu(sent->opcode);
5293 if (opcode == HCI_OP_RESET)
5294 return;
5295
5296 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5297 if (!skb)
5298 return;
5299
5300 skb_queue_head(&hdev->cmd_q, skb);
5301 queue_work(hdev->workqueue, &hdev->cmd_work);
5302 }
5303
5304 void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5305 {
5306 hci_req_complete_t req_complete = NULL;
5307 struct sk_buff *skb;
5308 unsigned long flags;
5309
5310 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5311
5312 /* If the completed command doesn't match the last one that was
5313 * sent we need to do special handling of it.
5314 */
5315 if (!hci_sent_cmd_data(hdev, opcode)) {
5316 /* Some CSR based controllers generate a spontaneous
5317 * reset complete event during init and any pending
5318 * command will never be completed. In such a case we
5319 * need to resend whatever was the last sent
5320 * command.
5321 */
5322 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5323 hci_resend_last(hdev);
5324
5325 return;
5326 }
5327
5328 /* If the command succeeded and there's still more commands in
5329 * this request the request is not yet complete.
5330 */
5331 if (!status && !hci_req_is_complete(hdev))
5332 return;
5333
5334 /* If this was the last command in a request the complete
5335 * callback would be found in hdev->sent_cmd instead of the
5336 * command queue (hdev->cmd_q).
5337 */
5338 if (hdev->sent_cmd) {
5339 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
5340
5341 if (req_complete) {
5342 /* We must set the complete callback to NULL to
5343 * avoid calling the callback more than once if
5344 * this function gets called again.
5345 */
5346 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5347
5348 goto call_complete;
5349 }
5350 }
5351
5352 /* Remove all pending commands belonging to this request */
5353 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5354 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5355 if (bt_cb(skb)->req.start) {
5356 __skb_queue_head(&hdev->cmd_q, skb);
5357 break;
5358 }
5359
5360 req_complete = bt_cb(skb)->req.complete;
5361 kfree_skb(skb);
5362 }
5363 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5364
5365 call_complete:
5366 if (req_complete)
5367 req_complete(hdev, status);
5368 }
5369
5370 static void hci_rx_work(struct work_struct *work)
5371 {
5372 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
5373 struct sk_buff *skb;
5374
5375 BT_DBG("%s", hdev->name);
5376
5377 while ((skb = skb_dequeue(&hdev->rx_q))) {
5378 /* Send copy to monitor */
5379 hci_send_to_monitor(hdev, skb);
5380
5381 if (atomic_read(&hdev->promisc)) {
5382 /* Send copy to the sockets */
5383 hci_send_to_sock(hdev, skb);
5384 }
5385
5386 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5387 kfree_skb(skb);
5388 continue;
5389 }
5390
5391 if (test_bit(HCI_INIT, &hdev->flags)) {
5392 /* Don't process data packets in this states. */
5393 switch (bt_cb(skb)->pkt_type) {
5394 case HCI_ACLDATA_PKT:
5395 case HCI_SCODATA_PKT:
5396 kfree_skb(skb);
5397 continue;
5398 }
5399 }
5400
5401 /* Process frame */
5402 switch (bt_cb(skb)->pkt_type) {
5403 case HCI_EVENT_PKT:
5404 BT_DBG("%s Event packet", hdev->name);
5405 hci_event_packet(hdev, skb);
5406 break;
5407
5408 case HCI_ACLDATA_PKT:
5409 BT_DBG("%s ACL data packet", hdev->name);
5410 hci_acldata_packet(hdev, skb);
5411 break;
5412
5413 case HCI_SCODATA_PKT:
5414 BT_DBG("%s SCO data packet", hdev->name);
5415 hci_scodata_packet(hdev, skb);
5416 break;
5417
5418 default:
5419 kfree_skb(skb);
5420 break;
5421 }
5422 }
5423 }
5424
5425 static void hci_cmd_work(struct work_struct *work)
5426 {
5427 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
5428 struct sk_buff *skb;
5429
5430 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5431 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
5432
5433 /* Send queued commands */
5434 if (atomic_read(&hdev->cmd_cnt)) {
5435 skb = skb_dequeue(&hdev->cmd_q);
5436 if (!skb)
5437 return;
5438
5439 kfree_skb(hdev->sent_cmd);
5440
5441 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
5442 if (hdev->sent_cmd) {
5443 atomic_dec(&hdev->cmd_cnt);
5444 hci_send_frame(hdev, skb);
5445 if (test_bit(HCI_RESET, &hdev->flags))
5446 cancel_delayed_work(&hdev->cmd_timer);
5447 else
5448 schedule_delayed_work(&hdev->cmd_timer,
5449 HCI_CMD_TIMEOUT);
5450 } else {
5451 skb_queue_head(&hdev->cmd_q, skb);
5452 queue_work(hdev->workqueue, &hdev->cmd_work);
5453 }
5454 }
5455 }
5456
5457 void hci_req_add_le_scan_disable(struct hci_request *req)
5458 {
5459 struct hci_cp_le_set_scan_enable cp;
5460
5461 memset(&cp, 0, sizeof(cp));
5462 cp.enable = LE_SCAN_DISABLE;
5463 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5464 }
5465
5466 static void add_to_white_list(struct hci_request *req,
5467 struct hci_conn_params *params)
5468 {
5469 struct hci_cp_le_add_to_white_list cp;
5470
5471 cp.bdaddr_type = params->addr_type;
5472 bacpy(&cp.bdaddr, &params->addr);
5473
5474 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5475 }
5476
5477 static u8 update_white_list(struct hci_request *req)
5478 {
5479 struct hci_dev *hdev = req->hdev;
5480 struct hci_conn_params *params;
5481 struct bdaddr_list *b;
5482 uint8_t white_list_entries = 0;
5483
5484 /* Go through the current white list programmed into the
5485 * controller one by one and check if that address is still
5486 * in the list of pending connections or list of devices to
5487 * report. If not present in either list, then queue the
5488 * command to remove it from the controller.
5489 */
5490 list_for_each_entry(b, &hdev->le_white_list, list) {
5491 struct hci_cp_le_del_from_white_list cp;
5492
5493 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5494 &b->bdaddr, b->bdaddr_type) ||
5495 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5496 &b->bdaddr, b->bdaddr_type)) {
5497 white_list_entries++;
5498 continue;
5499 }
5500
5501 cp.bdaddr_type = b->bdaddr_type;
5502 bacpy(&cp.bdaddr, &b->bdaddr);
5503
5504 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5505 sizeof(cp), &cp);
5506 }
5507
5508 /* Since all no longer valid white list entries have been
5509 * removed, walk through the list of pending connections
5510 * and ensure that any new device gets programmed into
5511 * the controller.
5512 *
5513 * If the list of the devices is larger than the list of
5514 * available white list entries in the controller, then
5515 * just abort and return filer policy value to not use the
5516 * white list.
5517 */
5518 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5519 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5520 &params->addr, params->addr_type))
5521 continue;
5522
5523 if (white_list_entries >= hdev->le_white_list_size) {
5524 /* Select filter policy to accept all advertising */
5525 return 0x00;
5526 }
5527
5528 if (hci_find_irk_by_addr(hdev, &params->addr,
5529 params->addr_type)) {
5530 /* White list can not be used with RPAs */
5531 return 0x00;
5532 }
5533
5534 white_list_entries++;
5535 add_to_white_list(req, params);
5536 }
5537
5538 /* After adding all new pending connections, walk through
5539 * the list of pending reports and also add these to the
5540 * white list if there is still space.
5541 */
5542 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5543 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5544 &params->addr, params->addr_type))
5545 continue;
5546
5547 if (white_list_entries >= hdev->le_white_list_size) {
5548 /* Select filter policy to accept all advertising */
5549 return 0x00;
5550 }
5551
5552 if (hci_find_irk_by_addr(hdev, &params->addr,
5553 params->addr_type)) {
5554 /* White list can not be used with RPAs */
5555 return 0x00;
5556 }
5557
5558 white_list_entries++;
5559 add_to_white_list(req, params);
5560 }
5561
5562 /* Select filter policy to use white list */
5563 return 0x01;
5564 }
5565
5566 void hci_req_add_le_passive_scan(struct hci_request *req)
5567 {
5568 struct hci_cp_le_set_scan_param param_cp;
5569 struct hci_cp_le_set_scan_enable enable_cp;
5570 struct hci_dev *hdev = req->hdev;
5571 u8 own_addr_type;
5572 u8 filter_policy;
5573
5574 /* Set require_privacy to false since no SCAN_REQ are send
5575 * during passive scanning. Not using an unresolvable address
5576 * here is important so that peer devices using direct
5577 * advertising with our address will be correctly reported
5578 * by the controller.
5579 */
5580 if (hci_update_random_address(req, false, &own_addr_type))
5581 return;
5582
5583 /* Adding or removing entries from the white list must
5584 * happen before enabling scanning. The controller does
5585 * not allow white list modification while scanning.
5586 */
5587 filter_policy = update_white_list(req);
5588
5589 memset(&param_cp, 0, sizeof(param_cp));
5590 param_cp.type = LE_SCAN_PASSIVE;
5591 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5592 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5593 param_cp.own_address_type = own_addr_type;
5594 param_cp.filter_policy = filter_policy;
5595 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5596 &param_cp);
5597
5598 memset(&enable_cp, 0, sizeof(enable_cp));
5599 enable_cp.enable = LE_SCAN_ENABLE;
5600 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
5601 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5602 &enable_cp);
5603 }
5604
5605 static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5606 {
5607 if (status)
5608 BT_DBG("HCI request failed to update background scanning: "
5609 "status 0x%2.2x", status);
5610 }
5611
5612 /* This function controls the background scanning based on hdev->pend_le_conns
5613 * list. If there are pending LE connection we start the background scanning,
5614 * otherwise we stop it.
5615 *
5616 * This function requires the caller holds hdev->lock.
5617 */
5618 void hci_update_background_scan(struct hci_dev *hdev)
5619 {
5620 struct hci_request req;
5621 struct hci_conn *conn;
5622 int err;
5623
5624 if (!test_bit(HCI_UP, &hdev->flags) ||
5625 test_bit(HCI_INIT, &hdev->flags) ||
5626 test_bit(HCI_SETUP, &hdev->dev_flags) ||
5627 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
5628 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
5629 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
5630 return;
5631
5632 /* No point in doing scanning if LE support hasn't been enabled */
5633 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5634 return;
5635
5636 /* If discovery is active don't interfere with it */
5637 if (hdev->discovery.state != DISCOVERY_STOPPED)
5638 return;
5639
5640 hci_req_init(&req, hdev);
5641
5642 if (list_empty(&hdev->pend_le_conns) &&
5643 list_empty(&hdev->pend_le_reports)) {
5644 /* If there is no pending LE connections or devices
5645 * to be scanned for, we should stop the background
5646 * scanning.
5647 */
5648
5649 /* If controller is not scanning we are done. */
5650 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5651 return;
5652
5653 hci_req_add_le_scan_disable(&req);
5654
5655 BT_DBG("%s stopping background scanning", hdev->name);
5656 } else {
5657 /* If there is at least one pending LE connection, we should
5658 * keep the background scan running.
5659 */
5660
5661 /* If controller is connecting, we should not start scanning
5662 * since some controllers are not able to scan and connect at
5663 * the same time.
5664 */
5665 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5666 if (conn)
5667 return;
5668
5669 /* If controller is currently scanning, we stop it to ensure we
5670 * don't miss any advertising (due to duplicates filter).
5671 */
5672 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5673 hci_req_add_le_scan_disable(&req);
5674
5675 hci_req_add_le_passive_scan(&req);
5676
5677 BT_DBG("%s starting background scanning", hdev->name);
5678 }
5679
5680 err = hci_req_run(&req, update_background_scan_complete);
5681 if (err)
5682 BT_ERR("Failed to run HCI request: err %d", err);
5683 }
5684
5685 static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5686 {
5687 struct bdaddr_list *b;
5688
5689 list_for_each_entry(b, &hdev->whitelist, list) {
5690 struct hci_conn *conn;
5691
5692 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5693 if (!conn)
5694 return true;
5695
5696 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5697 return true;
5698 }
5699
5700 return false;
5701 }
5702
5703 void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5704 {
5705 u8 scan;
5706
5707 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5708 return;
5709
5710 if (!hdev_is_powered(hdev))
5711 return;
5712
5713 if (mgmt_powering_down(hdev))
5714 return;
5715
5716 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
5717 disconnected_whitelist_entries(hdev))
5718 scan = SCAN_PAGE;
5719 else
5720 scan = SCAN_DISABLED;
5721
5722 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5723 return;
5724
5725 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5726 scan |= SCAN_INQUIRY;
5727
5728 if (req)
5729 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5730 else
5731 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5732 }
This page took 0.165395 seconds and 4 git commands to generate.