Merge tag 'urgent-slab-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
1da177e4 38
970c4e46
JH
39#include "smp.h"
40
b78752cc 41static void hci_rx_work(struct work_struct *work);
c347b765 42static void hci_cmd_work(struct work_struct *work);
3eff45ea 43static void hci_tx_work(struct work_struct *work);
1da177e4 44
1da177e4
LT
45/* HCI device list */
46LIST_HEAD(hci_dev_list);
47DEFINE_RWLOCK(hci_dev_list_lock);
48
49/* HCI callback list */
50LIST_HEAD(hci_cb_list);
51DEFINE_RWLOCK(hci_cb_list_lock);
52
3df92b31
SL
53/* HCI ID Numbering */
54static DEFINE_IDA(hci_index_ida);
55
1da177e4
LT
56/* ---- HCI notifications ---- */
57
6516455d 58static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 59{
040030ef 60 hci_sock_dev_event(hdev, event);
1da177e4
LT
61}
62
baf27f6e
MH
63/* ---- HCI debugfs entries ---- */
64
4b4148e9
MH
65static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
66 size_t count, loff_t *ppos)
67{
68 struct hci_dev *hdev = file->private_data;
69 char buf[3];
70
71 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
72 buf[1] = '\n';
73 buf[2] = '\0';
74 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
75}
76
77static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
78 size_t count, loff_t *ppos)
79{
80 struct hci_dev *hdev = file->private_data;
81 struct sk_buff *skb;
82 char buf[32];
83 size_t buf_size = min(count, (sizeof(buf)-1));
84 bool enable;
85 int err;
86
87 if (!test_bit(HCI_UP, &hdev->flags))
88 return -ENETDOWN;
89
90 if (copy_from_user(buf, user_buf, buf_size))
91 return -EFAULT;
92
93 buf[buf_size] = '\0';
94 if (strtobool(buf, &enable))
95 return -EINVAL;
96
97 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
98 return -EALREADY;
99
100 hci_req_lock(hdev);
101 if (enable)
102 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 else
105 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
106 HCI_CMD_TIMEOUT);
107 hci_req_unlock(hdev);
108
109 if (IS_ERR(skb))
110 return PTR_ERR(skb);
111
112 err = -bt_to_errno(skb->data[0]);
113 kfree_skb(skb);
114
115 if (err < 0)
116 return err;
117
118 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
119
120 return count;
121}
122
123static const struct file_operations dut_mode_fops = {
124 .open = simple_open,
125 .read = dut_mode_read,
126 .write = dut_mode_write,
127 .llseek = default_llseek,
128};
129
dfb826a8
MH
130static int features_show(struct seq_file *f, void *ptr)
131{
132 struct hci_dev *hdev = f->private;
133 u8 p;
134
135 hci_dev_lock(hdev);
136 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 137 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
138 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
139 hdev->features[p][0], hdev->features[p][1],
140 hdev->features[p][2], hdev->features[p][3],
141 hdev->features[p][4], hdev->features[p][5],
142 hdev->features[p][6], hdev->features[p][7]);
143 }
cfbb2b5b
MH
144 if (lmp_le_capable(hdev))
145 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
146 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
147 hdev->le_features[0], hdev->le_features[1],
148 hdev->le_features[2], hdev->le_features[3],
149 hdev->le_features[4], hdev->le_features[5],
150 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
151 hci_dev_unlock(hdev);
152
153 return 0;
154}
155
156static int features_open(struct inode *inode, struct file *file)
157{
158 return single_open(file, features_show, inode->i_private);
159}
160
161static const struct file_operations features_fops = {
162 .open = features_open,
163 .read = seq_read,
164 .llseek = seq_lseek,
165 .release = single_release,
166};
167
70afe0b8
MH
168static int blacklist_show(struct seq_file *f, void *p)
169{
170 struct hci_dev *hdev = f->private;
171 struct bdaddr_list *b;
172
173 hci_dev_lock(hdev);
174 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 175 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
176 hci_dev_unlock(hdev);
177
178 return 0;
179}
180
181static int blacklist_open(struct inode *inode, struct file *file)
182{
183 return single_open(file, blacklist_show, inode->i_private);
184}
185
186static const struct file_operations blacklist_fops = {
187 .open = blacklist_open,
188 .read = seq_read,
189 .llseek = seq_lseek,
190 .release = single_release,
191};
192
47219839
MH
193static int uuids_show(struct seq_file *f, void *p)
194{
195 struct hci_dev *hdev = f->private;
196 struct bt_uuid *uuid;
197
198 hci_dev_lock(hdev);
199 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
200 u8 i, val[16];
201
202 /* The Bluetooth UUID values are stored in big endian,
203 * but with reversed byte order. So convert them into
204 * the right order for the %pUb modifier.
205 */
206 for (i = 0; i < 16; i++)
207 val[i] = uuid->uuid[15 - i];
208
209 seq_printf(f, "%pUb\n", val);
47219839
MH
210 }
211 hci_dev_unlock(hdev);
212
213 return 0;
214}
215
216static int uuids_open(struct inode *inode, struct file *file)
217{
218 return single_open(file, uuids_show, inode->i_private);
219}
220
221static const struct file_operations uuids_fops = {
222 .open = uuids_open,
223 .read = seq_read,
224 .llseek = seq_lseek,
225 .release = single_release,
226};
227
baf27f6e
MH
228static int inquiry_cache_show(struct seq_file *f, void *p)
229{
230 struct hci_dev *hdev = f->private;
231 struct discovery_state *cache = &hdev->discovery;
232 struct inquiry_entry *e;
233
234 hci_dev_lock(hdev);
235
236 list_for_each_entry(e, &cache->all, all) {
237 struct inquiry_data *data = &e->data;
238 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
239 &data->bdaddr,
240 data->pscan_rep_mode, data->pscan_period_mode,
241 data->pscan_mode, data->dev_class[2],
242 data->dev_class[1], data->dev_class[0],
243 __le16_to_cpu(data->clock_offset),
244 data->rssi, data->ssp_mode, e->timestamp);
245 }
246
247 hci_dev_unlock(hdev);
248
249 return 0;
250}
251
252static int inquiry_cache_open(struct inode *inode, struct file *file)
253{
254 return single_open(file, inquiry_cache_show, inode->i_private);
255}
256
257static const struct file_operations inquiry_cache_fops = {
258 .open = inquiry_cache_open,
259 .read = seq_read,
260 .llseek = seq_lseek,
261 .release = single_release,
262};
263
02d08d15
MH
264static int link_keys_show(struct seq_file *f, void *ptr)
265{
266 struct hci_dev *hdev = f->private;
267 struct list_head *p, *n;
268
269 hci_dev_lock(hdev);
270 list_for_each_safe(p, n, &hdev->link_keys) {
271 struct link_key *key = list_entry(p, struct link_key, list);
272 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
273 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
274 }
275 hci_dev_unlock(hdev);
276
277 return 0;
278}
279
280static int link_keys_open(struct inode *inode, struct file *file)
281{
282 return single_open(file, link_keys_show, inode->i_private);
283}
284
285static const struct file_operations link_keys_fops = {
286 .open = link_keys_open,
287 .read = seq_read,
288 .llseek = seq_lseek,
289 .release = single_release,
290};
291
babdbb3c
MH
292static int dev_class_show(struct seq_file *f, void *ptr)
293{
294 struct hci_dev *hdev = f->private;
295
296 hci_dev_lock(hdev);
297 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
298 hdev->dev_class[1], hdev->dev_class[0]);
299 hci_dev_unlock(hdev);
300
301 return 0;
302}
303
304static int dev_class_open(struct inode *inode, struct file *file)
305{
306 return single_open(file, dev_class_show, inode->i_private);
307}
308
309static const struct file_operations dev_class_fops = {
310 .open = dev_class_open,
311 .read = seq_read,
312 .llseek = seq_lseek,
313 .release = single_release,
314};
315
041000b9
MH
316static int voice_setting_get(void *data, u64 *val)
317{
318 struct hci_dev *hdev = data;
319
320 hci_dev_lock(hdev);
321 *val = hdev->voice_setting;
322 hci_dev_unlock(hdev);
323
324 return 0;
325}
326
327DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
328 NULL, "0x%4.4llx\n");
329
ebd1e33b
MH
330static int auto_accept_delay_set(void *data, u64 val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 hdev->auto_accept_delay = val;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341static int auto_accept_delay_get(void *data, u64 *val)
342{
343 struct hci_dev *hdev = data;
344
345 hci_dev_lock(hdev);
346 *val = hdev->auto_accept_delay;
347 hci_dev_unlock(hdev);
348
349 return 0;
350}
351
352DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
353 auto_accept_delay_set, "%llu\n");
354
06f5b778
MH
355static int ssp_debug_mode_set(void *data, u64 val)
356{
357 struct hci_dev *hdev = data;
358 struct sk_buff *skb;
359 __u8 mode;
360 int err;
361
362 if (val != 0 && val != 1)
363 return -EINVAL;
364
365 if (!test_bit(HCI_UP, &hdev->flags))
366 return -ENETDOWN;
367
368 hci_req_lock(hdev);
369 mode = val;
370 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
371 &mode, HCI_CMD_TIMEOUT);
372 hci_req_unlock(hdev);
373
374 if (IS_ERR(skb))
375 return PTR_ERR(skb);
376
377 err = -bt_to_errno(skb->data[0]);
378 kfree_skb(skb);
379
380 if (err < 0)
381 return err;
382
383 hci_dev_lock(hdev);
384 hdev->ssp_debug_mode = val;
385 hci_dev_unlock(hdev);
386
387 return 0;
388}
389
390static int ssp_debug_mode_get(void *data, u64 *val)
391{
392 struct hci_dev *hdev = data;
393
394 hci_dev_lock(hdev);
395 *val = hdev->ssp_debug_mode;
396 hci_dev_unlock(hdev);
397
398 return 0;
399}
400
401DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
402 ssp_debug_mode_set, "%llu\n");
403
5afeac14
MH
404static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
405 size_t count, loff_t *ppos)
406{
407 struct hci_dev *hdev = file->private_data;
408 char buf[3];
409
410 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
411 buf[1] = '\n';
412 buf[2] = '\0';
413 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
414}
415
416static ssize_t force_sc_support_write(struct file *file,
417 const char __user *user_buf,
418 size_t count, loff_t *ppos)
419{
420 struct hci_dev *hdev = file->private_data;
421 char buf[32];
422 size_t buf_size = min(count, (sizeof(buf)-1));
423 bool enable;
424
425 if (test_bit(HCI_UP, &hdev->flags))
426 return -EBUSY;
427
428 if (copy_from_user(buf, user_buf, buf_size))
429 return -EFAULT;
430
431 buf[buf_size] = '\0';
432 if (strtobool(buf, &enable))
433 return -EINVAL;
434
435 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
436 return -EALREADY;
437
438 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
439
440 return count;
441}
442
443static const struct file_operations force_sc_support_fops = {
444 .open = simple_open,
445 .read = force_sc_support_read,
446 .write = force_sc_support_write,
447 .llseek = default_llseek,
448};
449
134c2a89
MH
450static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
451 size_t count, loff_t *ppos)
452{
453 struct hci_dev *hdev = file->private_data;
454 char buf[3];
455
456 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
457 buf[1] = '\n';
458 buf[2] = '\0';
459 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
460}
461
462static const struct file_operations sc_only_mode_fops = {
463 .open = simple_open,
464 .read = sc_only_mode_read,
465 .llseek = default_llseek,
466};
467
2bfa3531
MH
468static int idle_timeout_set(void *data, u64 val)
469{
470 struct hci_dev *hdev = data;
471
472 if (val != 0 && (val < 500 || val > 3600000))
473 return -EINVAL;
474
475 hci_dev_lock(hdev);
2be48b65 476 hdev->idle_timeout = val;
2bfa3531
MH
477 hci_dev_unlock(hdev);
478
479 return 0;
480}
481
482static int idle_timeout_get(void *data, u64 *val)
483{
484 struct hci_dev *hdev = data;
485
486 hci_dev_lock(hdev);
487 *val = hdev->idle_timeout;
488 hci_dev_unlock(hdev);
489
490 return 0;
491}
492
493DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
494 idle_timeout_set, "%llu\n");
495
c982b2ea
JH
496static int rpa_timeout_set(void *data, u64 val)
497{
498 struct hci_dev *hdev = data;
499
500 /* Require the RPA timeout to be at least 30 seconds and at most
501 * 24 hours.
502 */
503 if (val < 30 || val > (60 * 60 * 24))
504 return -EINVAL;
505
506 hci_dev_lock(hdev);
507 hdev->rpa_timeout = val;
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int rpa_timeout_get(void *data, u64 *val)
514{
515 struct hci_dev *hdev = data;
516
517 hci_dev_lock(hdev);
518 *val = hdev->rpa_timeout;
519 hci_dev_unlock(hdev);
520
521 return 0;
522}
523
524DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
525 rpa_timeout_set, "%llu\n");
526
2bfa3531
MH
527static int sniff_min_interval_set(void *data, u64 val)
528{
529 struct hci_dev *hdev = data;
530
531 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
532 return -EINVAL;
533
534 hci_dev_lock(hdev);
2be48b65 535 hdev->sniff_min_interval = val;
2bfa3531
MH
536 hci_dev_unlock(hdev);
537
538 return 0;
539}
540
541static int sniff_min_interval_get(void *data, u64 *val)
542{
543 struct hci_dev *hdev = data;
544
545 hci_dev_lock(hdev);
546 *val = hdev->sniff_min_interval;
547 hci_dev_unlock(hdev);
548
549 return 0;
550}
551
552DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
553 sniff_min_interval_set, "%llu\n");
554
555static int sniff_max_interval_set(void *data, u64 val)
556{
557 struct hci_dev *hdev = data;
558
559 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
560 return -EINVAL;
561
562 hci_dev_lock(hdev);
2be48b65 563 hdev->sniff_max_interval = val;
2bfa3531
MH
564 hci_dev_unlock(hdev);
565
566 return 0;
567}
568
569static int sniff_max_interval_get(void *data, u64 *val)
570{
571 struct hci_dev *hdev = data;
572
573 hci_dev_lock(hdev);
574 *val = hdev->sniff_max_interval;
575 hci_dev_unlock(hdev);
576
577 return 0;
578}
579
580DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
581 sniff_max_interval_set, "%llu\n");
582
31ad1691
AK
583static int conn_info_min_age_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val == 0 || val > hdev->conn_info_max_age)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
591 hdev->conn_info_min_age = val;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_info_min_age_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->conn_info_min_age;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
609 conn_info_min_age_set, "%llu\n");
610
611static int conn_info_max_age_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val == 0 || val < hdev->conn_info_min_age)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
619 hdev->conn_info_max_age = val;
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_info_max_age_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->conn_info_max_age;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
637 conn_info_max_age_set, "%llu\n");
638
ac345813
MH
639static int identity_show(struct seq_file *f, void *p)
640{
641 struct hci_dev *hdev = f->private;
a1f4c318 642 bdaddr_t addr;
ac345813
MH
643 u8 addr_type;
644
645 hci_dev_lock(hdev);
646
a1f4c318 647 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 648
a1f4c318 649 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 650 16, hdev->irk, &hdev->rpa);
ac345813
MH
651
652 hci_dev_unlock(hdev);
653
654 return 0;
655}
656
657static int identity_open(struct inode *inode, struct file *file)
658{
659 return single_open(file, identity_show, inode->i_private);
660}
661
662static const struct file_operations identity_fops = {
663 .open = identity_open,
664 .read = seq_read,
665 .llseek = seq_lseek,
666 .release = single_release,
667};
668
7a4cd51d
MH
669static int random_address_show(struct seq_file *f, void *p)
670{
671 struct hci_dev *hdev = f->private;
672
673 hci_dev_lock(hdev);
674 seq_printf(f, "%pMR\n", &hdev->random_addr);
675 hci_dev_unlock(hdev);
676
677 return 0;
678}
679
680static int random_address_open(struct inode *inode, struct file *file)
681{
682 return single_open(file, random_address_show, inode->i_private);
683}
684
685static const struct file_operations random_address_fops = {
686 .open = random_address_open,
687 .read = seq_read,
688 .llseek = seq_lseek,
689 .release = single_release,
690};
691
e7b8fc92
MH
692static int static_address_show(struct seq_file *f, void *p)
693{
694 struct hci_dev *hdev = f->private;
695
696 hci_dev_lock(hdev);
697 seq_printf(f, "%pMR\n", &hdev->static_addr);
698 hci_dev_unlock(hdev);
699
700 return 0;
701}
702
703static int static_address_open(struct inode *inode, struct file *file)
704{
705 return single_open(file, static_address_show, inode->i_private);
706}
707
708static const struct file_operations static_address_fops = {
709 .open = static_address_open,
710 .read = seq_read,
711 .llseek = seq_lseek,
712 .release = single_release,
713};
714
b32bba6c
MH
715static ssize_t force_static_address_read(struct file *file,
716 char __user *user_buf,
717 size_t count, loff_t *ppos)
92202185 718{
b32bba6c
MH
719 struct hci_dev *hdev = file->private_data;
720 char buf[3];
92202185 721
b32bba6c
MH
722 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
723 buf[1] = '\n';
724 buf[2] = '\0';
725 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
726}
727
b32bba6c
MH
728static ssize_t force_static_address_write(struct file *file,
729 const char __user *user_buf,
730 size_t count, loff_t *ppos)
92202185 731{
b32bba6c
MH
732 struct hci_dev *hdev = file->private_data;
733 char buf[32];
734 size_t buf_size = min(count, (sizeof(buf)-1));
735 bool enable;
92202185 736
b32bba6c
MH
737 if (test_bit(HCI_UP, &hdev->flags))
738 return -EBUSY;
92202185 739
b32bba6c
MH
740 if (copy_from_user(buf, user_buf, buf_size))
741 return -EFAULT;
742
743 buf[buf_size] = '\0';
744 if (strtobool(buf, &enable))
745 return -EINVAL;
746
747 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
748 return -EALREADY;
749
750 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
751
752 return count;
92202185
MH
753}
754
b32bba6c
MH
755static const struct file_operations force_static_address_fops = {
756 .open = simple_open,
757 .read = force_static_address_read,
758 .write = force_static_address_write,
759 .llseek = default_llseek,
760};
92202185 761
d2ab0ac1
MH
762static int white_list_show(struct seq_file *f, void *ptr)
763{
764 struct hci_dev *hdev = f->private;
765 struct bdaddr_list *b;
766
767 hci_dev_lock(hdev);
768 list_for_each_entry(b, &hdev->le_white_list, list)
769 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
770 hci_dev_unlock(hdev);
771
772 return 0;
773}
774
775static int white_list_open(struct inode *inode, struct file *file)
776{
777 return single_open(file, white_list_show, inode->i_private);
778}
779
780static const struct file_operations white_list_fops = {
781 .open = white_list_open,
782 .read = seq_read,
783 .llseek = seq_lseek,
784 .release = single_release,
785};
786
3698d704
MH
787static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
788{
789 struct hci_dev *hdev = f->private;
790 struct list_head *p, *n;
791
792 hci_dev_lock(hdev);
793 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
794 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
795 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
796 &irk->bdaddr, irk->addr_type,
797 16, irk->val, &irk->rpa);
798 }
799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int identity_resolving_keys_open(struct inode *inode, struct file *file)
805{
806 return single_open(file, identity_resolving_keys_show,
807 inode->i_private);
808}
809
810static const struct file_operations identity_resolving_keys_fops = {
811 .open = identity_resolving_keys_open,
812 .read = seq_read,
813 .llseek = seq_lseek,
814 .release = single_release,
815};
816
8f8625cd
MH
817static int long_term_keys_show(struct seq_file *f, void *ptr)
818{
819 struct hci_dev *hdev = f->private;
820 struct list_head *p, *n;
821
822 hci_dev_lock(hdev);
f813f1be 823 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 824 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
fe39c7b2 825 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
826 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
827 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 828 __le64_to_cpu(ltk->rand), 16, ltk->val);
8f8625cd
MH
829 }
830 hci_dev_unlock(hdev);
831
832 return 0;
833}
834
835static int long_term_keys_open(struct inode *inode, struct file *file)
836{
837 return single_open(file, long_term_keys_show, inode->i_private);
838}
839
840static const struct file_operations long_term_keys_fops = {
841 .open = long_term_keys_open,
842 .read = seq_read,
843 .llseek = seq_lseek,
844 .release = single_release,
845};
846
4e70c7e7
MH
847static int conn_min_interval_set(void *data, u64 val)
848{
849 struct hci_dev *hdev = data;
850
851 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
852 return -EINVAL;
853
854 hci_dev_lock(hdev);
2be48b65 855 hdev->le_conn_min_interval = val;
4e70c7e7
MH
856 hci_dev_unlock(hdev);
857
858 return 0;
859}
860
861static int conn_min_interval_get(void *data, u64 *val)
862{
863 struct hci_dev *hdev = data;
864
865 hci_dev_lock(hdev);
866 *val = hdev->le_conn_min_interval;
867 hci_dev_unlock(hdev);
868
869 return 0;
870}
871
872DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
873 conn_min_interval_set, "%llu\n");
874
875static int conn_max_interval_set(void *data, u64 val)
876{
877 struct hci_dev *hdev = data;
878
879 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
880 return -EINVAL;
881
882 hci_dev_lock(hdev);
2be48b65 883 hdev->le_conn_max_interval = val;
4e70c7e7
MH
884 hci_dev_unlock(hdev);
885
886 return 0;
887}
888
889static int conn_max_interval_get(void *data, u64 *val)
890{
891 struct hci_dev *hdev = data;
892
893 hci_dev_lock(hdev);
894 *val = hdev->le_conn_max_interval;
895 hci_dev_unlock(hdev);
896
897 return 0;
898}
899
900DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
901 conn_max_interval_set, "%llu\n");
902
3f959d46
MH
903static int adv_channel_map_set(void *data, u64 val)
904{
905 struct hci_dev *hdev = data;
906
907 if (val < 0x01 || val > 0x07)
908 return -EINVAL;
909
910 hci_dev_lock(hdev);
911 hdev->le_adv_channel_map = val;
912 hci_dev_unlock(hdev);
913
914 return 0;
915}
916
917static int adv_channel_map_get(void *data, u64 *val)
918{
919 struct hci_dev *hdev = data;
920
921 hci_dev_lock(hdev);
922 *val = hdev->le_adv_channel_map;
923 hci_dev_unlock(hdev);
924
925 return 0;
926}
927
928DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
929 adv_channel_map_set, "%llu\n");
930
89863109
JR
931static ssize_t lowpan_read(struct file *file, char __user *user_buf,
932 size_t count, loff_t *ppos)
933{
934 struct hci_dev *hdev = file->private_data;
935 char buf[3];
936
937 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
938 buf[1] = '\n';
939 buf[2] = '\0';
940 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
941}
942
943static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
944 size_t count, loff_t *position)
945{
946 struct hci_dev *hdev = fp->private_data;
947 bool enable;
948 char buf[32];
949 size_t buf_size = min(count, (sizeof(buf)-1));
950
951 if (copy_from_user(buf, user_buffer, buf_size))
952 return -EFAULT;
953
954 buf[buf_size] = '\0';
955
956 if (strtobool(buf, &enable) < 0)
957 return -EINVAL;
958
959 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
960 return -EALREADY;
961
962 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
963
964 return count;
965}
966
967static const struct file_operations lowpan_debugfs_fops = {
968 .open = simple_open,
969 .read = lowpan_read,
970 .write = lowpan_write,
971 .llseek = default_llseek,
972};
973
7d474e06
AG
974static int le_auto_conn_show(struct seq_file *sf, void *ptr)
975{
976 struct hci_dev *hdev = sf->private;
977 struct hci_conn_params *p;
978
979 hci_dev_lock(hdev);
980
981 list_for_each_entry(p, &hdev->le_conn_params, list) {
982 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
983 p->auto_connect);
984 }
985
986 hci_dev_unlock(hdev);
987
988 return 0;
989}
990
991static int le_auto_conn_open(struct inode *inode, struct file *file)
992{
993 return single_open(file, le_auto_conn_show, inode->i_private);
994}
995
996static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
997 size_t count, loff_t *offset)
998{
999 struct seq_file *sf = file->private_data;
1000 struct hci_dev *hdev = sf->private;
1001 u8 auto_connect = 0;
1002 bdaddr_t addr;
1003 u8 addr_type;
1004 char *buf;
1005 int err = 0;
1006 int n;
1007
1008 /* Don't allow partial write */
1009 if (*offset != 0)
1010 return -EINVAL;
1011
1012 if (count < 3)
1013 return -EINVAL;
1014
4408dd15
AG
1015 buf = memdup_user(data, count);
1016 if (IS_ERR(buf))
1017 return PTR_ERR(buf);
7d474e06
AG
1018
1019 if (memcmp(buf, "add", 3) == 0) {
1020 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
1021 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1022 &addr.b[1], &addr.b[0], &addr_type,
1023 &auto_connect);
1024
1025 if (n < 7) {
1026 err = -EINVAL;
1027 goto done;
1028 }
1029
1030 hci_dev_lock(hdev);
1031 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
1032 hdev->le_conn_min_interval,
1033 hdev->le_conn_max_interval);
1034 hci_dev_unlock(hdev);
1035
1036 if (err)
1037 goto done;
1038 } else if (memcmp(buf, "del", 3) == 0) {
1039 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
1040 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
1041 &addr.b[1], &addr.b[0], &addr_type);
1042
1043 if (n < 7) {
1044 err = -EINVAL;
1045 goto done;
1046 }
1047
1048 hci_dev_lock(hdev);
1049 hci_conn_params_del(hdev, &addr, addr_type);
1050 hci_dev_unlock(hdev);
1051 } else if (memcmp(buf, "clr", 3) == 0) {
1052 hci_dev_lock(hdev);
1053 hci_conn_params_clear(hdev);
1054 hci_pend_le_conns_clear(hdev);
1055 hci_update_background_scan(hdev);
1056 hci_dev_unlock(hdev);
1057 } else {
1058 err = -EINVAL;
1059 }
1060
1061done:
1062 kfree(buf);
1063
1064 if (err)
1065 return err;
1066 else
1067 return count;
1068}
1069
1070static const struct file_operations le_auto_conn_fops = {
1071 .open = le_auto_conn_open,
1072 .read = seq_read,
1073 .write = le_auto_conn_write,
1074 .llseek = seq_lseek,
1075 .release = single_release,
1076};
1077
1da177e4
LT
1078/* ---- HCI requests ---- */
1079
42c6b129 1080static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1081{
42c6b129 1082 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1083
1084 if (hdev->req_status == HCI_REQ_PEND) {
1085 hdev->req_result = result;
1086 hdev->req_status = HCI_REQ_DONE;
1087 wake_up_interruptible(&hdev->req_wait_q);
1088 }
1089}
1090
1091static void hci_req_cancel(struct hci_dev *hdev, int err)
1092{
1093 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1094
1095 if (hdev->req_status == HCI_REQ_PEND) {
1096 hdev->req_result = err;
1097 hdev->req_status = HCI_REQ_CANCELED;
1098 wake_up_interruptible(&hdev->req_wait_q);
1099 }
1100}
1101
77a63e0a
FW
1102static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1103 u8 event)
75e84b7c
JH
1104{
1105 struct hci_ev_cmd_complete *ev;
1106 struct hci_event_hdr *hdr;
1107 struct sk_buff *skb;
1108
1109 hci_dev_lock(hdev);
1110
1111 skb = hdev->recv_evt;
1112 hdev->recv_evt = NULL;
1113
1114 hci_dev_unlock(hdev);
1115
1116 if (!skb)
1117 return ERR_PTR(-ENODATA);
1118
1119 if (skb->len < sizeof(*hdr)) {
1120 BT_ERR("Too short HCI event");
1121 goto failed;
1122 }
1123
1124 hdr = (void *) skb->data;
1125 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1126
7b1abbbe
JH
1127 if (event) {
1128 if (hdr->evt != event)
1129 goto failed;
1130 return skb;
1131 }
1132
75e84b7c
JH
1133 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1134 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1135 goto failed;
1136 }
1137
1138 if (skb->len < sizeof(*ev)) {
1139 BT_ERR("Too short cmd_complete event");
1140 goto failed;
1141 }
1142
1143 ev = (void *) skb->data;
1144 skb_pull(skb, sizeof(*ev));
1145
1146 if (opcode == __le16_to_cpu(ev->opcode))
1147 return skb;
1148
1149 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1150 __le16_to_cpu(ev->opcode));
1151
1152failed:
1153 kfree_skb(skb);
1154 return ERR_PTR(-ENODATA);
1155}
1156
7b1abbbe 1157struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1158 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1159{
1160 DECLARE_WAITQUEUE(wait, current);
1161 struct hci_request req;
1162 int err = 0;
1163
1164 BT_DBG("%s", hdev->name);
1165
1166 hci_req_init(&req, hdev);
1167
7b1abbbe 1168 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1169
1170 hdev->req_status = HCI_REQ_PEND;
1171
1172 err = hci_req_run(&req, hci_req_sync_complete);
1173 if (err < 0)
1174 return ERR_PTR(err);
1175
1176 add_wait_queue(&hdev->req_wait_q, &wait);
1177 set_current_state(TASK_INTERRUPTIBLE);
1178
1179 schedule_timeout(timeout);
1180
1181 remove_wait_queue(&hdev->req_wait_q, &wait);
1182
1183 if (signal_pending(current))
1184 return ERR_PTR(-EINTR);
1185
1186 switch (hdev->req_status) {
1187 case HCI_REQ_DONE:
1188 err = -bt_to_errno(hdev->req_result);
1189 break;
1190
1191 case HCI_REQ_CANCELED:
1192 err = -hdev->req_result;
1193 break;
1194
1195 default:
1196 err = -ETIMEDOUT;
1197 break;
1198 }
1199
1200 hdev->req_status = hdev->req_result = 0;
1201
1202 BT_DBG("%s end: err %d", hdev->name, err);
1203
1204 if (err < 0)
1205 return ERR_PTR(err);
1206
7b1abbbe
JH
1207 return hci_get_cmd_complete(hdev, opcode, event);
1208}
1209EXPORT_SYMBOL(__hci_cmd_sync_ev);
1210
1211struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1212 const void *param, u32 timeout)
7b1abbbe
JH
1213{
1214 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1215}
1216EXPORT_SYMBOL(__hci_cmd_sync);
1217
1da177e4 1218/* Execute request and wait for completion. */
01178cd4 1219static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1220 void (*func)(struct hci_request *req,
1221 unsigned long opt),
01178cd4 1222 unsigned long opt, __u32 timeout)
1da177e4 1223{
42c6b129 1224 struct hci_request req;
1da177e4
LT
1225 DECLARE_WAITQUEUE(wait, current);
1226 int err = 0;
1227
1228 BT_DBG("%s start", hdev->name);
1229
42c6b129
JH
1230 hci_req_init(&req, hdev);
1231
1da177e4
LT
1232 hdev->req_status = HCI_REQ_PEND;
1233
42c6b129 1234 func(&req, opt);
53cce22d 1235
42c6b129
JH
1236 err = hci_req_run(&req, hci_req_sync_complete);
1237 if (err < 0) {
53cce22d 1238 hdev->req_status = 0;
920c8300
AG
1239
1240 /* ENODATA means the HCI request command queue is empty.
1241 * This can happen when a request with conditionals doesn't
1242 * trigger any commands to be sent. This is normal behavior
1243 * and should not trigger an error return.
42c6b129 1244 */
920c8300
AG
1245 if (err == -ENODATA)
1246 return 0;
1247
1248 return err;
53cce22d
JH
1249 }
1250
bc4445c7
AG
1251 add_wait_queue(&hdev->req_wait_q, &wait);
1252 set_current_state(TASK_INTERRUPTIBLE);
1253
1da177e4
LT
1254 schedule_timeout(timeout);
1255
1256 remove_wait_queue(&hdev->req_wait_q, &wait);
1257
1258 if (signal_pending(current))
1259 return -EINTR;
1260
1261 switch (hdev->req_status) {
1262 case HCI_REQ_DONE:
e175072f 1263 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1264 break;
1265
1266 case HCI_REQ_CANCELED:
1267 err = -hdev->req_result;
1268 break;
1269
1270 default:
1271 err = -ETIMEDOUT;
1272 break;
3ff50b79 1273 }
1da177e4 1274
a5040efa 1275 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1276
1277 BT_DBG("%s end: err %d", hdev->name, err);
1278
1279 return err;
1280}
1281
01178cd4 1282static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1283 void (*req)(struct hci_request *req,
1284 unsigned long opt),
01178cd4 1285 unsigned long opt, __u32 timeout)
1da177e4
LT
1286{
1287 int ret;
1288
7c6a329e
MH
1289 if (!test_bit(HCI_UP, &hdev->flags))
1290 return -ENETDOWN;
1291
1da177e4
LT
1292 /* Serialize all requests */
1293 hci_req_lock(hdev);
01178cd4 1294 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1295 hci_req_unlock(hdev);
1296
1297 return ret;
1298}
1299
42c6b129 1300static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1301{
42c6b129 1302 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1303
1304 /* Reset device */
42c6b129
JH
1305 set_bit(HCI_RESET, &req->hdev->flags);
1306 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1307}
1308
42c6b129 1309static void bredr_init(struct hci_request *req)
1da177e4 1310{
42c6b129 1311 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1312
1da177e4 1313 /* Read Local Supported Features */
42c6b129 1314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1315
1143e5a6 1316 /* Read Local Version */
42c6b129 1317 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1318
1319 /* Read BD Address */
42c6b129 1320 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1321}
1322
42c6b129 1323static void amp_init(struct hci_request *req)
e61ef499 1324{
42c6b129 1325 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1326
e61ef499 1327 /* Read Local Version */
42c6b129 1328 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1329
f6996cfe
MH
1330 /* Read Local Supported Commands */
1331 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1332
1333 /* Read Local Supported Features */
1334 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1335
6bcbc489 1336 /* Read Local AMP Info */
42c6b129 1337 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1338
1339 /* Read Data Blk size */
42c6b129 1340 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1341
f38ba941
MH
1342 /* Read Flow Control Mode */
1343 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1344
7528ca1c
MH
1345 /* Read Location Data */
1346 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1347}
1348
42c6b129 1349static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1350{
42c6b129 1351 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1352
1353 BT_DBG("%s %ld", hdev->name, opt);
1354
11778716
AE
1355 /* Reset */
1356 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1357 hci_reset_req(req, 0);
11778716 1358
e61ef499
AE
1359 switch (hdev->dev_type) {
1360 case HCI_BREDR:
42c6b129 1361 bredr_init(req);
e61ef499
AE
1362 break;
1363
1364 case HCI_AMP:
42c6b129 1365 amp_init(req);
e61ef499
AE
1366 break;
1367
1368 default:
1369 BT_ERR("Unknown device type %d", hdev->dev_type);
1370 break;
1371 }
e61ef499
AE
1372}
1373
42c6b129 1374static void bredr_setup(struct hci_request *req)
2177bab5 1375{
4ca048e3
MH
1376 struct hci_dev *hdev = req->hdev;
1377
2177bab5
JH
1378 __le16 param;
1379 __u8 flt_type;
1380
1381 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1382 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1383
1384 /* Read Class of Device */
42c6b129 1385 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1386
1387 /* Read Local Name */
42c6b129 1388 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1389
1390 /* Read Voice Setting */
42c6b129 1391 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1392
b4cb9fb2
MH
1393 /* Read Number of Supported IAC */
1394 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1395
4b836f39
MH
1396 /* Read Current IAC LAP */
1397 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1398
2177bab5
JH
1399 /* Clear Event Filters */
1400 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1401 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1402
1403 /* Connection accept timeout ~20 secs */
dcf4adbf 1404 param = cpu_to_le16(0x7d00);
42c6b129 1405 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1406
4ca048e3
MH
1407 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1408 * but it does not support page scan related HCI commands.
1409 */
1410 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1411 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1412 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1413 }
2177bab5
JH
1414}
1415
42c6b129 1416static void le_setup(struct hci_request *req)
2177bab5 1417{
c73eee91
JH
1418 struct hci_dev *hdev = req->hdev;
1419
2177bab5 1420 /* Read LE Buffer Size */
42c6b129 1421 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1422
1423 /* Read LE Local Supported Features */
42c6b129 1424 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1425
747d3f03
MH
1426 /* Read LE Supported States */
1427 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1428
2177bab5 1429 /* Read LE Advertising Channel TX Power */
42c6b129 1430 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1431
1432 /* Read LE White List Size */
42c6b129 1433 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1434
747d3f03
MH
1435 /* Clear LE White List */
1436 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1437
1438 /* LE-only controllers have LE implicitly enabled */
1439 if (!lmp_bredr_capable(hdev))
1440 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1441}
1442
1443static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1444{
1445 if (lmp_ext_inq_capable(hdev))
1446 return 0x02;
1447
1448 if (lmp_inq_rssi_capable(hdev))
1449 return 0x01;
1450
1451 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1452 hdev->lmp_subver == 0x0757)
1453 return 0x01;
1454
1455 if (hdev->manufacturer == 15) {
1456 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1457 return 0x01;
1458 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1459 return 0x01;
1460 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1461 return 0x01;
1462 }
1463
1464 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1465 hdev->lmp_subver == 0x1805)
1466 return 0x01;
1467
1468 return 0x00;
1469}
1470
42c6b129 1471static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1472{
1473 u8 mode;
1474
42c6b129 1475 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1476
42c6b129 1477 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1478}
1479
42c6b129 1480static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1481{
42c6b129
JH
1482 struct hci_dev *hdev = req->hdev;
1483
2177bab5
JH
1484 /* The second byte is 0xff instead of 0x9f (two reserved bits
1485 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1486 * command otherwise.
1487 */
1488 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1489
1490 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1491 * any event mask for pre 1.2 devices.
1492 */
1493 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1494 return;
1495
1496 if (lmp_bredr_capable(hdev)) {
1497 events[4] |= 0x01; /* Flow Specification Complete */
1498 events[4] |= 0x02; /* Inquiry Result with RSSI */
1499 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1500 events[5] |= 0x08; /* Synchronous Connection Complete */
1501 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1502 } else {
1503 /* Use a different default for LE-only devices */
1504 memset(events, 0, sizeof(events));
1505 events[0] |= 0x10; /* Disconnection Complete */
1506 events[0] |= 0x80; /* Encryption Change */
1507 events[1] |= 0x08; /* Read Remote Version Information Complete */
1508 events[1] |= 0x20; /* Command Complete */
1509 events[1] |= 0x40; /* Command Status */
1510 events[1] |= 0x80; /* Hardware Error */
1511 events[2] |= 0x04; /* Number of Completed Packets */
1512 events[3] |= 0x02; /* Data Buffer Overflow */
1513 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1514 }
1515
1516 if (lmp_inq_rssi_capable(hdev))
1517 events[4] |= 0x02; /* Inquiry Result with RSSI */
1518
1519 if (lmp_sniffsubr_capable(hdev))
1520 events[5] |= 0x20; /* Sniff Subrating */
1521
1522 if (lmp_pause_enc_capable(hdev))
1523 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1524
1525 if (lmp_ext_inq_capable(hdev))
1526 events[5] |= 0x40; /* Extended Inquiry Result */
1527
1528 if (lmp_no_flush_capable(hdev))
1529 events[7] |= 0x01; /* Enhanced Flush Complete */
1530
1531 if (lmp_lsto_capable(hdev))
1532 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1533
1534 if (lmp_ssp_capable(hdev)) {
1535 events[6] |= 0x01; /* IO Capability Request */
1536 events[6] |= 0x02; /* IO Capability Response */
1537 events[6] |= 0x04; /* User Confirmation Request */
1538 events[6] |= 0x08; /* User Passkey Request */
1539 events[6] |= 0x10; /* Remote OOB Data Request */
1540 events[6] |= 0x20; /* Simple Pairing Complete */
1541 events[7] |= 0x04; /* User Passkey Notification */
1542 events[7] |= 0x08; /* Keypress Notification */
1543 events[7] |= 0x10; /* Remote Host Supported
1544 * Features Notification
1545 */
1546 }
1547
1548 if (lmp_le_capable(hdev))
1549 events[7] |= 0x20; /* LE Meta-Event */
1550
42c6b129 1551 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1552
1553 if (lmp_le_capable(hdev)) {
1554 memset(events, 0, sizeof(events));
1555 events[0] = 0x1f;
42c6b129
JH
1556 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1557 sizeof(events), events);
2177bab5
JH
1558 }
1559}
1560
42c6b129 1561static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1562{
42c6b129
JH
1563 struct hci_dev *hdev = req->hdev;
1564
2177bab5 1565 if (lmp_bredr_capable(hdev))
42c6b129 1566 bredr_setup(req);
56f87901
JH
1567 else
1568 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1569
1570 if (lmp_le_capable(hdev))
42c6b129 1571 le_setup(req);
2177bab5 1572
42c6b129 1573 hci_setup_event_mask(req);
2177bab5 1574
3f8e2d75
JH
1575 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1576 * local supported commands HCI command.
1577 */
1578 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1579 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1580
1581 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1582 /* When SSP is available, then the host features page
1583 * should also be available as well. However some
1584 * controllers list the max_page as 0 as long as SSP
1585 * has not been enabled. To achieve proper debugging
1586 * output, force the minimum max_page to 1 at least.
1587 */
1588 hdev->max_page = 0x01;
1589
2177bab5
JH
1590 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1591 u8 mode = 0x01;
42c6b129
JH
1592 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1593 sizeof(mode), &mode);
2177bab5
JH
1594 } else {
1595 struct hci_cp_write_eir cp;
1596
1597 memset(hdev->eir, 0, sizeof(hdev->eir));
1598 memset(&cp, 0, sizeof(cp));
1599
42c6b129 1600 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1601 }
1602 }
1603
1604 if (lmp_inq_rssi_capable(hdev))
42c6b129 1605 hci_setup_inquiry_mode(req);
2177bab5
JH
1606
1607 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1608 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1609
1610 if (lmp_ext_feat_capable(hdev)) {
1611 struct hci_cp_read_local_ext_features cp;
1612
1613 cp.page = 0x01;
42c6b129
JH
1614 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1615 sizeof(cp), &cp);
2177bab5
JH
1616 }
1617
1618 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1619 u8 enable = 1;
42c6b129
JH
1620 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1621 &enable);
2177bab5
JH
1622 }
1623}
1624
42c6b129 1625static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1626{
42c6b129 1627 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1628 struct hci_cp_write_def_link_policy cp;
1629 u16 link_policy = 0;
1630
1631 if (lmp_rswitch_capable(hdev))
1632 link_policy |= HCI_LP_RSWITCH;
1633 if (lmp_hold_capable(hdev))
1634 link_policy |= HCI_LP_HOLD;
1635 if (lmp_sniff_capable(hdev))
1636 link_policy |= HCI_LP_SNIFF;
1637 if (lmp_park_capable(hdev))
1638 link_policy |= HCI_LP_PARK;
1639
1640 cp.policy = cpu_to_le16(link_policy);
42c6b129 1641 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1642}
1643
42c6b129 1644static void hci_set_le_support(struct hci_request *req)
2177bab5 1645{
42c6b129 1646 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1647 struct hci_cp_write_le_host_supported cp;
1648
c73eee91
JH
1649 /* LE-only devices do not support explicit enablement */
1650 if (!lmp_bredr_capable(hdev))
1651 return;
1652
2177bab5
JH
1653 memset(&cp, 0, sizeof(cp));
1654
1655 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1656 cp.le = 0x01;
1657 cp.simul = lmp_le_br_capable(hdev);
1658 }
1659
1660 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1661 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1662 &cp);
2177bab5
JH
1663}
1664
d62e6d67
JH
1665static void hci_set_event_mask_page_2(struct hci_request *req)
1666{
1667 struct hci_dev *hdev = req->hdev;
1668 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1669
1670 /* If Connectionless Slave Broadcast master role is supported
1671 * enable all necessary events for it.
1672 */
53b834d2 1673 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1674 events[1] |= 0x40; /* Triggered Clock Capture */
1675 events[1] |= 0x80; /* Synchronization Train Complete */
1676 events[2] |= 0x10; /* Slave Page Response Timeout */
1677 events[2] |= 0x20; /* CSB Channel Map Change */
1678 }
1679
1680 /* If Connectionless Slave Broadcast slave role is supported
1681 * enable all necessary events for it.
1682 */
53b834d2 1683 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1684 events[2] |= 0x01; /* Synchronization Train Received */
1685 events[2] |= 0x02; /* CSB Receive */
1686 events[2] |= 0x04; /* CSB Timeout */
1687 events[2] |= 0x08; /* Truncated Page Complete */
1688 }
1689
40c59fcb
MH
1690 /* Enable Authenticated Payload Timeout Expired event if supported */
1691 if (lmp_ping_capable(hdev))
1692 events[2] |= 0x80;
1693
d62e6d67
JH
1694 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1695}
1696
42c6b129 1697static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1698{
42c6b129 1699 struct hci_dev *hdev = req->hdev;
d2c5d77f 1700 u8 p;
42c6b129 1701
b8f4e068
GP
1702 /* Some Broadcom based Bluetooth controllers do not support the
1703 * Delete Stored Link Key command. They are clearly indicating its
1704 * absence in the bit mask of supported commands.
1705 *
1706 * Check the supported commands and only if the the command is marked
1707 * as supported send it. If not supported assume that the controller
1708 * does not have actual support for stored link keys which makes this
1709 * command redundant anyway.
f9f462fa
MH
1710 *
1711 * Some controllers indicate that they support handling deleting
1712 * stored link keys, but they don't. The quirk lets a driver
1713 * just disable this command.
637b4cae 1714 */
f9f462fa
MH
1715 if (hdev->commands[6] & 0x80 &&
1716 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1717 struct hci_cp_delete_stored_link_key cp;
1718
1719 bacpy(&cp.bdaddr, BDADDR_ANY);
1720 cp.delete_all = 0x01;
1721 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1722 sizeof(cp), &cp);
1723 }
1724
2177bab5 1725 if (hdev->commands[5] & 0x10)
42c6b129 1726 hci_setup_link_policy(req);
2177bab5 1727
7bf32048 1728 if (lmp_le_capable(hdev))
42c6b129 1729 hci_set_le_support(req);
d2c5d77f
JH
1730
1731 /* Read features beyond page 1 if available */
1732 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1733 struct hci_cp_read_local_ext_features cp;
1734
1735 cp.page = p;
1736 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1737 sizeof(cp), &cp);
1738 }
2177bab5
JH
1739}
1740
5d4e7e8d
JH
1741static void hci_init4_req(struct hci_request *req, unsigned long opt)
1742{
1743 struct hci_dev *hdev = req->hdev;
1744
d62e6d67
JH
1745 /* Set event mask page 2 if the HCI command for it is supported */
1746 if (hdev->commands[22] & 0x04)
1747 hci_set_event_mask_page_2(req);
1748
5d4e7e8d 1749 /* Check for Synchronization Train support */
53b834d2 1750 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1751 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1752
1753 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1754 if ((lmp_sc_capable(hdev) ||
1755 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1756 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1757 u8 support = 0x01;
1758 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1759 sizeof(support), &support);
1760 }
5d4e7e8d
JH
1761}
1762
2177bab5
JH
1763static int __hci_init(struct hci_dev *hdev)
1764{
1765 int err;
1766
1767 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1768 if (err < 0)
1769 return err;
1770
4b4148e9
MH
1771 /* The Device Under Test (DUT) mode is special and available for
1772 * all controller types. So just create it early on.
1773 */
1774 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1775 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1776 &dut_mode_fops);
1777 }
1778
2177bab5
JH
1779 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1780 * BR/EDR/LE type controllers. AMP controllers only need the
1781 * first stage init.
1782 */
1783 if (hdev->dev_type != HCI_BREDR)
1784 return 0;
1785
1786 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1787 if (err < 0)
1788 return err;
1789
5d4e7e8d
JH
1790 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1791 if (err < 0)
1792 return err;
1793
baf27f6e
MH
1794 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1795 if (err < 0)
1796 return err;
1797
1798 /* Only create debugfs entries during the initial setup
1799 * phase and not every time the controller gets powered on.
1800 */
1801 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1802 return 0;
1803
dfb826a8
MH
1804 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1805 &features_fops);
ceeb3bc0
MH
1806 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1807 &hdev->manufacturer);
1808 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1809 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1810 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1811 &blacklist_fops);
47219839
MH
1812 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1813
31ad1691
AK
1814 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1815 &conn_info_min_age_fops);
1816 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1817 &conn_info_max_age_fops);
1818
baf27f6e
MH
1819 if (lmp_bredr_capable(hdev)) {
1820 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1821 hdev, &inquiry_cache_fops);
02d08d15
MH
1822 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1823 hdev, &link_keys_fops);
babdbb3c
MH
1824 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1825 hdev, &dev_class_fops);
041000b9
MH
1826 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1827 hdev, &voice_setting_fops);
baf27f6e
MH
1828 }
1829
06f5b778 1830 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1831 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1832 hdev, &auto_accept_delay_fops);
06f5b778
MH
1833 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1834 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1835 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1836 hdev, &force_sc_support_fops);
134c2a89
MH
1837 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1838 hdev, &sc_only_mode_fops);
06f5b778 1839 }
ebd1e33b 1840
2bfa3531
MH
1841 if (lmp_sniff_capable(hdev)) {
1842 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1843 hdev, &idle_timeout_fops);
1844 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1845 hdev, &sniff_min_interval_fops);
1846 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1847 hdev, &sniff_max_interval_fops);
1848 }
1849
d0f729b8 1850 if (lmp_le_capable(hdev)) {
ac345813
MH
1851 debugfs_create_file("identity", 0400, hdev->debugfs,
1852 hdev, &identity_fops);
1853 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1854 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1855 debugfs_create_file("random_address", 0444, hdev->debugfs,
1856 hdev, &random_address_fops);
b32bba6c
MH
1857 debugfs_create_file("static_address", 0444, hdev->debugfs,
1858 hdev, &static_address_fops);
1859
1860 /* For controllers with a public address, provide a debug
1861 * option to force the usage of the configured static
1862 * address. By default the public address is used.
1863 */
1864 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1865 debugfs_create_file("force_static_address", 0644,
1866 hdev->debugfs, hdev,
1867 &force_static_address_fops);
1868
d0f729b8
MH
1869 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1870 &hdev->le_white_list_size);
d2ab0ac1
MH
1871 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1872 &white_list_fops);
3698d704
MH
1873 debugfs_create_file("identity_resolving_keys", 0400,
1874 hdev->debugfs, hdev,
1875 &identity_resolving_keys_fops);
8f8625cd
MH
1876 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1877 hdev, &long_term_keys_fops);
4e70c7e7
MH
1878 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1879 hdev, &conn_min_interval_fops);
1880 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1881 hdev, &conn_max_interval_fops);
3f959d46
MH
1882 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1883 hdev, &adv_channel_map_fops);
89863109
JR
1884 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1885 &lowpan_debugfs_fops);
7d474e06
AG
1886 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1887 &le_auto_conn_fops);
b9a7a61e
LR
1888 debugfs_create_u16("discov_interleaved_timeout", 0644,
1889 hdev->debugfs,
1890 &hdev->discov_interleaved_timeout);
d0f729b8 1891 }
e7b8fc92 1892
baf27f6e 1893 return 0;
2177bab5
JH
1894}
1895
42c6b129 1896static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1897{
1898 __u8 scan = opt;
1899
42c6b129 1900 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1901
1902 /* Inquiry and Page scans */
42c6b129 1903 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1904}
1905
42c6b129 1906static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1907{
1908 __u8 auth = opt;
1909
42c6b129 1910 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1911
1912 /* Authentication */
42c6b129 1913 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1914}
1915
42c6b129 1916static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1917{
1918 __u8 encrypt = opt;
1919
42c6b129 1920 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1921
e4e8e37c 1922 /* Encryption */
42c6b129 1923 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1924}
1925
42c6b129 1926static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1927{
1928 __le16 policy = cpu_to_le16(opt);
1929
42c6b129 1930 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1931
1932 /* Default link policy */
42c6b129 1933 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1934}
1935
8e87d142 1936/* Get HCI device by index.
1da177e4
LT
1937 * Device is held on return. */
1938struct hci_dev *hci_dev_get(int index)
1939{
8035ded4 1940 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1941
1942 BT_DBG("%d", index);
1943
1944 if (index < 0)
1945 return NULL;
1946
1947 read_lock(&hci_dev_list_lock);
8035ded4 1948 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1949 if (d->id == index) {
1950 hdev = hci_dev_hold(d);
1951 break;
1952 }
1953 }
1954 read_unlock(&hci_dev_list_lock);
1955 return hdev;
1956}
1da177e4
LT
1957
1958/* ---- Inquiry support ---- */
ff9ef578 1959
30dc78e1
JH
1960bool hci_discovery_active(struct hci_dev *hdev)
1961{
1962 struct discovery_state *discov = &hdev->discovery;
1963
6fbe195d 1964 switch (discov->state) {
343f935b 1965 case DISCOVERY_FINDING:
6fbe195d 1966 case DISCOVERY_RESOLVING:
30dc78e1
JH
1967 return true;
1968
6fbe195d
AG
1969 default:
1970 return false;
1971 }
30dc78e1
JH
1972}
1973
ff9ef578
JH
1974void hci_discovery_set_state(struct hci_dev *hdev, int state)
1975{
1976 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1977
1978 if (hdev->discovery.state == state)
1979 return;
1980
1981 switch (state) {
1982 case DISCOVERY_STOPPED:
c54c3860
AG
1983 hci_update_background_scan(hdev);
1984
7b99b659
AG
1985 if (hdev->discovery.state != DISCOVERY_STARTING)
1986 mgmt_discovering(hdev, 0);
ff9ef578
JH
1987 break;
1988 case DISCOVERY_STARTING:
1989 break;
343f935b 1990 case DISCOVERY_FINDING:
ff9ef578
JH
1991 mgmt_discovering(hdev, 1);
1992 break;
30dc78e1
JH
1993 case DISCOVERY_RESOLVING:
1994 break;
ff9ef578
JH
1995 case DISCOVERY_STOPPING:
1996 break;
1997 }
1998
1999 hdev->discovery.state = state;
2000}
2001
1f9b9a5d 2002void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2003{
30883512 2004 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2005 struct inquiry_entry *p, *n;
1da177e4 2006
561aafbc
JH
2007 list_for_each_entry_safe(p, n, &cache->all, all) {
2008 list_del(&p->all);
b57c1a56 2009 kfree(p);
1da177e4 2010 }
561aafbc
JH
2011
2012 INIT_LIST_HEAD(&cache->unknown);
2013 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2014}
2015
a8c5fb1a
GP
2016struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2017 bdaddr_t *bdaddr)
1da177e4 2018{
30883512 2019 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2020 struct inquiry_entry *e;
2021
6ed93dc6 2022 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2023
561aafbc
JH
2024 list_for_each_entry(e, &cache->all, all) {
2025 if (!bacmp(&e->data.bdaddr, bdaddr))
2026 return e;
2027 }
2028
2029 return NULL;
2030}
2031
2032struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2033 bdaddr_t *bdaddr)
561aafbc 2034{
30883512 2035 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2036 struct inquiry_entry *e;
2037
6ed93dc6 2038 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2039
2040 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2041 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2042 return e;
2043 }
2044
2045 return NULL;
1da177e4
LT
2046}
2047
30dc78e1 2048struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2049 bdaddr_t *bdaddr,
2050 int state)
30dc78e1
JH
2051{
2052 struct discovery_state *cache = &hdev->discovery;
2053 struct inquiry_entry *e;
2054
6ed93dc6 2055 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2056
2057 list_for_each_entry(e, &cache->resolve, list) {
2058 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2059 return e;
2060 if (!bacmp(&e->data.bdaddr, bdaddr))
2061 return e;
2062 }
2063
2064 return NULL;
2065}
2066
a3d4e20a 2067void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2068 struct inquiry_entry *ie)
a3d4e20a
JH
2069{
2070 struct discovery_state *cache = &hdev->discovery;
2071 struct list_head *pos = &cache->resolve;
2072 struct inquiry_entry *p;
2073
2074 list_del(&ie->list);
2075
2076 list_for_each_entry(p, &cache->resolve, list) {
2077 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2078 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2079 break;
2080 pos = &p->list;
2081 }
2082
2083 list_add(&ie->list, pos);
2084}
2085
3175405b 2086bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 2087 bool name_known, bool *ssp)
1da177e4 2088{
30883512 2089 struct discovery_state *cache = &hdev->discovery;
70f23020 2090 struct inquiry_entry *ie;
1da177e4 2091
6ed93dc6 2092 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2093
2b2fec4d
SJ
2094 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2095
01735bbd 2096 *ssp = data->ssp_mode;
388fc8fa 2097
70f23020 2098 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2099 if (ie) {
8002d77c 2100 if (ie->data.ssp_mode)
388fc8fa
JH
2101 *ssp = true;
2102
a3d4e20a 2103 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2104 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2105 ie->data.rssi = data->rssi;
2106 hci_inquiry_cache_update_resolve(hdev, ie);
2107 }
2108
561aafbc 2109 goto update;
a3d4e20a 2110 }
561aafbc
JH
2111
2112 /* Entry not in the cache. Add new one. */
2113 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2114 if (!ie)
3175405b 2115 return false;
561aafbc
JH
2116
2117 list_add(&ie->all, &cache->all);
2118
2119 if (name_known) {
2120 ie->name_state = NAME_KNOWN;
2121 } else {
2122 ie->name_state = NAME_NOT_KNOWN;
2123 list_add(&ie->list, &cache->unknown);
2124 }
70f23020 2125
561aafbc
JH
2126update:
2127 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2128 ie->name_state != NAME_PENDING) {
561aafbc
JH
2129 ie->name_state = NAME_KNOWN;
2130 list_del(&ie->list);
1da177e4
LT
2131 }
2132
70f23020
AE
2133 memcpy(&ie->data, data, sizeof(*data));
2134 ie->timestamp = jiffies;
1da177e4 2135 cache->timestamp = jiffies;
3175405b
JH
2136
2137 if (ie->name_state == NAME_NOT_KNOWN)
2138 return false;
2139
2140 return true;
1da177e4
LT
2141}
2142
2143static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2144{
30883512 2145 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2146 struct inquiry_info *info = (struct inquiry_info *) buf;
2147 struct inquiry_entry *e;
2148 int copied = 0;
2149
561aafbc 2150 list_for_each_entry(e, &cache->all, all) {
1da177e4 2151 struct inquiry_data *data = &e->data;
b57c1a56
JH
2152
2153 if (copied >= num)
2154 break;
2155
1da177e4
LT
2156 bacpy(&info->bdaddr, &data->bdaddr);
2157 info->pscan_rep_mode = data->pscan_rep_mode;
2158 info->pscan_period_mode = data->pscan_period_mode;
2159 info->pscan_mode = data->pscan_mode;
2160 memcpy(info->dev_class, data->dev_class, 3);
2161 info->clock_offset = data->clock_offset;
b57c1a56 2162
1da177e4 2163 info++;
b57c1a56 2164 copied++;
1da177e4
LT
2165 }
2166
2167 BT_DBG("cache %p, copied %d", cache, copied);
2168 return copied;
2169}
2170
42c6b129 2171static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2172{
2173 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2174 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2175 struct hci_cp_inquiry cp;
2176
2177 BT_DBG("%s", hdev->name);
2178
2179 if (test_bit(HCI_INQUIRY, &hdev->flags))
2180 return;
2181
2182 /* Start Inquiry */
2183 memcpy(&cp.lap, &ir->lap, 3);
2184 cp.length = ir->length;
2185 cp.num_rsp = ir->num_rsp;
42c6b129 2186 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2187}
2188
3e13fa1e
AG
2189static int wait_inquiry(void *word)
2190{
2191 schedule();
2192 return signal_pending(current);
2193}
2194
1da177e4
LT
2195int hci_inquiry(void __user *arg)
2196{
2197 __u8 __user *ptr = arg;
2198 struct hci_inquiry_req ir;
2199 struct hci_dev *hdev;
2200 int err = 0, do_inquiry = 0, max_rsp;
2201 long timeo;
2202 __u8 *buf;
2203
2204 if (copy_from_user(&ir, ptr, sizeof(ir)))
2205 return -EFAULT;
2206
5a08ecce
AE
2207 hdev = hci_dev_get(ir.dev_id);
2208 if (!hdev)
1da177e4
LT
2209 return -ENODEV;
2210
0736cfa8
MH
2211 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2212 err = -EBUSY;
2213 goto done;
2214 }
2215
5b69bef5
MH
2216 if (hdev->dev_type != HCI_BREDR) {
2217 err = -EOPNOTSUPP;
2218 goto done;
2219 }
2220
56f87901
JH
2221 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2222 err = -EOPNOTSUPP;
2223 goto done;
2224 }
2225
09fd0de5 2226 hci_dev_lock(hdev);
8e87d142 2227 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2228 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2229 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2230 do_inquiry = 1;
2231 }
09fd0de5 2232 hci_dev_unlock(hdev);
1da177e4 2233
04837f64 2234 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2235
2236 if (do_inquiry) {
01178cd4
JH
2237 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2238 timeo);
70f23020
AE
2239 if (err < 0)
2240 goto done;
3e13fa1e
AG
2241
2242 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2243 * cleared). If it is interrupted by a signal, return -EINTR.
2244 */
2245 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2246 TASK_INTERRUPTIBLE))
2247 return -EINTR;
70f23020 2248 }
1da177e4 2249
8fc9ced3
GP
2250 /* for unlimited number of responses we will use buffer with
2251 * 255 entries
2252 */
1da177e4
LT
2253 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2254
2255 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2256 * copy it to the user space.
2257 */
01df8c31 2258 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2259 if (!buf) {
1da177e4
LT
2260 err = -ENOMEM;
2261 goto done;
2262 }
2263
09fd0de5 2264 hci_dev_lock(hdev);
1da177e4 2265 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2266 hci_dev_unlock(hdev);
1da177e4
LT
2267
2268 BT_DBG("num_rsp %d", ir.num_rsp);
2269
2270 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2271 ptr += sizeof(ir);
2272 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2273 ir.num_rsp))
1da177e4 2274 err = -EFAULT;
8e87d142 2275 } else
1da177e4
LT
2276 err = -EFAULT;
2277
2278 kfree(buf);
2279
2280done:
2281 hci_dev_put(hdev);
2282 return err;
2283}
2284
cbed0ca1 2285static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2286{
1da177e4
LT
2287 int ret = 0;
2288
1da177e4
LT
2289 BT_DBG("%s %p", hdev->name, hdev);
2290
2291 hci_req_lock(hdev);
2292
94324962
JH
2293 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2294 ret = -ENODEV;
2295 goto done;
2296 }
2297
a5c8f270
MH
2298 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2299 /* Check for rfkill but allow the HCI setup stage to
2300 * proceed (which in itself doesn't cause any RF activity).
2301 */
2302 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2303 ret = -ERFKILL;
2304 goto done;
2305 }
2306
2307 /* Check for valid public address or a configured static
2308 * random adddress, but let the HCI setup proceed to
2309 * be able to determine if there is a public address
2310 * or not.
2311 *
c6beca0e
MH
2312 * In case of user channel usage, it is not important
2313 * if a public address or static random address is
2314 * available.
2315 *
a5c8f270
MH
2316 * This check is only valid for BR/EDR controllers
2317 * since AMP controllers do not have an address.
2318 */
c6beca0e
MH
2319 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2320 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2321 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2322 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2323 ret = -EADDRNOTAVAIL;
2324 goto done;
2325 }
611b30f7
MH
2326 }
2327
1da177e4
LT
2328 if (test_bit(HCI_UP, &hdev->flags)) {
2329 ret = -EALREADY;
2330 goto done;
2331 }
2332
1da177e4
LT
2333 if (hdev->open(hdev)) {
2334 ret = -EIO;
2335 goto done;
2336 }
2337
f41c70c4
MH
2338 atomic_set(&hdev->cmd_cnt, 1);
2339 set_bit(HCI_INIT, &hdev->flags);
2340
2341 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2342 ret = hdev->setup(hdev);
2343
2344 if (!ret) {
f41c70c4
MH
2345 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2346 set_bit(HCI_RAW, &hdev->flags);
2347
0736cfa8
MH
2348 if (!test_bit(HCI_RAW, &hdev->flags) &&
2349 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2350 ret = __hci_init(hdev);
1da177e4
LT
2351 }
2352
f41c70c4
MH
2353 clear_bit(HCI_INIT, &hdev->flags);
2354
1da177e4
LT
2355 if (!ret) {
2356 hci_dev_hold(hdev);
d6bfd59c 2357 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2358 set_bit(HCI_UP, &hdev->flags);
2359 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2360 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2361 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2362 hdev->dev_type == HCI_BREDR) {
09fd0de5 2363 hci_dev_lock(hdev);
744cf19e 2364 mgmt_powered(hdev, 1);
09fd0de5 2365 hci_dev_unlock(hdev);
56e5cb86 2366 }
8e87d142 2367 } else {
1da177e4 2368 /* Init failed, cleanup */
3eff45ea 2369 flush_work(&hdev->tx_work);
c347b765 2370 flush_work(&hdev->cmd_work);
b78752cc 2371 flush_work(&hdev->rx_work);
1da177e4
LT
2372
2373 skb_queue_purge(&hdev->cmd_q);
2374 skb_queue_purge(&hdev->rx_q);
2375
2376 if (hdev->flush)
2377 hdev->flush(hdev);
2378
2379 if (hdev->sent_cmd) {
2380 kfree_skb(hdev->sent_cmd);
2381 hdev->sent_cmd = NULL;
2382 }
2383
2384 hdev->close(hdev);
2385 hdev->flags = 0;
2386 }
2387
2388done:
2389 hci_req_unlock(hdev);
1da177e4
LT
2390 return ret;
2391}
2392
cbed0ca1
JH
2393/* ---- HCI ioctl helpers ---- */
2394
2395int hci_dev_open(__u16 dev)
2396{
2397 struct hci_dev *hdev;
2398 int err;
2399
2400 hdev = hci_dev_get(dev);
2401 if (!hdev)
2402 return -ENODEV;
2403
e1d08f40
JH
2404 /* We need to ensure that no other power on/off work is pending
2405 * before proceeding to call hci_dev_do_open. This is
2406 * particularly important if the setup procedure has not yet
2407 * completed.
2408 */
2409 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2410 cancel_delayed_work(&hdev->power_off);
2411
a5c8f270
MH
2412 /* After this call it is guaranteed that the setup procedure
2413 * has finished. This means that error conditions like RFKILL
2414 * or no valid public or static random address apply.
2415 */
e1d08f40
JH
2416 flush_workqueue(hdev->req_workqueue);
2417
cbed0ca1
JH
2418 err = hci_dev_do_open(hdev);
2419
2420 hci_dev_put(hdev);
2421
2422 return err;
2423}
2424
1da177e4
LT
2425static int hci_dev_do_close(struct hci_dev *hdev)
2426{
2427 BT_DBG("%s %p", hdev->name, hdev);
2428
78c04c0b
VCG
2429 cancel_delayed_work(&hdev->power_off);
2430
1da177e4
LT
2431 hci_req_cancel(hdev, ENODEV);
2432 hci_req_lock(hdev);
2433
2434 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2435 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2436 hci_req_unlock(hdev);
2437 return 0;
2438 }
2439
3eff45ea
GP
2440 /* Flush RX and TX works */
2441 flush_work(&hdev->tx_work);
b78752cc 2442 flush_work(&hdev->rx_work);
1da177e4 2443
16ab91ab 2444 if (hdev->discov_timeout > 0) {
e0f9309f 2445 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2446 hdev->discov_timeout = 0;
5e5282bb 2447 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2448 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2449 }
2450
a8b2d5c2 2451 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2452 cancel_delayed_work(&hdev->service_cache);
2453
7ba8b4be 2454 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2455
2456 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2457 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2458
09fd0de5 2459 hci_dev_lock(hdev);
1f9b9a5d 2460 hci_inquiry_cache_flush(hdev);
1da177e4 2461 hci_conn_hash_flush(hdev);
6046dc3e 2462 hci_pend_le_conns_clear(hdev);
09fd0de5 2463 hci_dev_unlock(hdev);
1da177e4
LT
2464
2465 hci_notify(hdev, HCI_DEV_DOWN);
2466
2467 if (hdev->flush)
2468 hdev->flush(hdev);
2469
2470 /* Reset device */
2471 skb_queue_purge(&hdev->cmd_q);
2472 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2473 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2474 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2475 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2476 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2477 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2478 clear_bit(HCI_INIT, &hdev->flags);
2479 }
2480
c347b765
GP
2481 /* flush cmd work */
2482 flush_work(&hdev->cmd_work);
1da177e4
LT
2483
2484 /* Drop queues */
2485 skb_queue_purge(&hdev->rx_q);
2486 skb_queue_purge(&hdev->cmd_q);
2487 skb_queue_purge(&hdev->raw_q);
2488
2489 /* Drop last sent command */
2490 if (hdev->sent_cmd) {
b79f44c1 2491 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2492 kfree_skb(hdev->sent_cmd);
2493 hdev->sent_cmd = NULL;
2494 }
2495
b6ddb638
JH
2496 kfree_skb(hdev->recv_evt);
2497 hdev->recv_evt = NULL;
2498
1da177e4
LT
2499 /* After this point our queues are empty
2500 * and no tasks are scheduled. */
2501 hdev->close(hdev);
2502
35b973c9
JH
2503 /* Clear flags */
2504 hdev->flags = 0;
2505 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2506
93c311a0
MH
2507 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2508 if (hdev->dev_type == HCI_BREDR) {
2509 hci_dev_lock(hdev);
2510 mgmt_powered(hdev, 0);
2511 hci_dev_unlock(hdev);
2512 }
8ee56540 2513 }
5add6af8 2514
ced5c338 2515 /* Controller radio is available but is currently powered down */
536619e8 2516 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2517
e59fda8d 2518 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2519 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2520 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2521
1da177e4
LT
2522 hci_req_unlock(hdev);
2523
2524 hci_dev_put(hdev);
2525 return 0;
2526}
2527
2528int hci_dev_close(__u16 dev)
2529{
2530 struct hci_dev *hdev;
2531 int err;
2532
70f23020
AE
2533 hdev = hci_dev_get(dev);
2534 if (!hdev)
1da177e4 2535 return -ENODEV;
8ee56540 2536
0736cfa8
MH
2537 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2538 err = -EBUSY;
2539 goto done;
2540 }
2541
8ee56540
MH
2542 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2543 cancel_delayed_work(&hdev->power_off);
2544
1da177e4 2545 err = hci_dev_do_close(hdev);
8ee56540 2546
0736cfa8 2547done:
1da177e4
LT
2548 hci_dev_put(hdev);
2549 return err;
2550}
2551
2552int hci_dev_reset(__u16 dev)
2553{
2554 struct hci_dev *hdev;
2555 int ret = 0;
2556
70f23020
AE
2557 hdev = hci_dev_get(dev);
2558 if (!hdev)
1da177e4
LT
2559 return -ENODEV;
2560
2561 hci_req_lock(hdev);
1da177e4 2562
808a049e
MH
2563 if (!test_bit(HCI_UP, &hdev->flags)) {
2564 ret = -ENETDOWN;
1da177e4 2565 goto done;
808a049e 2566 }
1da177e4 2567
0736cfa8
MH
2568 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2569 ret = -EBUSY;
2570 goto done;
2571 }
2572
1da177e4
LT
2573 /* Drop queues */
2574 skb_queue_purge(&hdev->rx_q);
2575 skb_queue_purge(&hdev->cmd_q);
2576
09fd0de5 2577 hci_dev_lock(hdev);
1f9b9a5d 2578 hci_inquiry_cache_flush(hdev);
1da177e4 2579 hci_conn_hash_flush(hdev);
09fd0de5 2580 hci_dev_unlock(hdev);
1da177e4
LT
2581
2582 if (hdev->flush)
2583 hdev->flush(hdev);
2584
8e87d142 2585 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2586 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2587
2588 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2589 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2590
2591done:
1da177e4
LT
2592 hci_req_unlock(hdev);
2593 hci_dev_put(hdev);
2594 return ret;
2595}
2596
2597int hci_dev_reset_stat(__u16 dev)
2598{
2599 struct hci_dev *hdev;
2600 int ret = 0;
2601
70f23020
AE
2602 hdev = hci_dev_get(dev);
2603 if (!hdev)
1da177e4
LT
2604 return -ENODEV;
2605
0736cfa8
MH
2606 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2607 ret = -EBUSY;
2608 goto done;
2609 }
2610
1da177e4
LT
2611 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2612
0736cfa8 2613done:
1da177e4 2614 hci_dev_put(hdev);
1da177e4
LT
2615 return ret;
2616}
2617
2618int hci_dev_cmd(unsigned int cmd, void __user *arg)
2619{
2620 struct hci_dev *hdev;
2621 struct hci_dev_req dr;
2622 int err = 0;
2623
2624 if (copy_from_user(&dr, arg, sizeof(dr)))
2625 return -EFAULT;
2626
70f23020
AE
2627 hdev = hci_dev_get(dr.dev_id);
2628 if (!hdev)
1da177e4
LT
2629 return -ENODEV;
2630
0736cfa8
MH
2631 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2632 err = -EBUSY;
2633 goto done;
2634 }
2635
5b69bef5
MH
2636 if (hdev->dev_type != HCI_BREDR) {
2637 err = -EOPNOTSUPP;
2638 goto done;
2639 }
2640
56f87901
JH
2641 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2642 err = -EOPNOTSUPP;
2643 goto done;
2644 }
2645
1da177e4
LT
2646 switch (cmd) {
2647 case HCISETAUTH:
01178cd4
JH
2648 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2649 HCI_INIT_TIMEOUT);
1da177e4
LT
2650 break;
2651
2652 case HCISETENCRYPT:
2653 if (!lmp_encrypt_capable(hdev)) {
2654 err = -EOPNOTSUPP;
2655 break;
2656 }
2657
2658 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2659 /* Auth must be enabled first */
01178cd4
JH
2660 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2661 HCI_INIT_TIMEOUT);
1da177e4
LT
2662 if (err)
2663 break;
2664 }
2665
01178cd4
JH
2666 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2667 HCI_INIT_TIMEOUT);
1da177e4
LT
2668 break;
2669
2670 case HCISETSCAN:
01178cd4
JH
2671 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2672 HCI_INIT_TIMEOUT);
1da177e4
LT
2673 break;
2674
1da177e4 2675 case HCISETLINKPOL:
01178cd4
JH
2676 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2677 HCI_INIT_TIMEOUT);
1da177e4
LT
2678 break;
2679
2680 case HCISETLINKMODE:
e4e8e37c
MH
2681 hdev->link_mode = ((__u16) dr.dev_opt) &
2682 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2683 break;
2684
2685 case HCISETPTYPE:
2686 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2687 break;
2688
2689 case HCISETACLMTU:
e4e8e37c
MH
2690 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2691 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2692 break;
2693
2694 case HCISETSCOMTU:
e4e8e37c
MH
2695 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2696 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2697 break;
2698
2699 default:
2700 err = -EINVAL;
2701 break;
2702 }
e4e8e37c 2703
0736cfa8 2704done:
1da177e4
LT
2705 hci_dev_put(hdev);
2706 return err;
2707}
2708
2709int hci_get_dev_list(void __user *arg)
2710{
8035ded4 2711 struct hci_dev *hdev;
1da177e4
LT
2712 struct hci_dev_list_req *dl;
2713 struct hci_dev_req *dr;
1da177e4
LT
2714 int n = 0, size, err;
2715 __u16 dev_num;
2716
2717 if (get_user(dev_num, (__u16 __user *) arg))
2718 return -EFAULT;
2719
2720 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2721 return -EINVAL;
2722
2723 size = sizeof(*dl) + dev_num * sizeof(*dr);
2724
70f23020
AE
2725 dl = kzalloc(size, GFP_KERNEL);
2726 if (!dl)
1da177e4
LT
2727 return -ENOMEM;
2728
2729 dr = dl->dev_req;
2730
f20d09d5 2731 read_lock(&hci_dev_list_lock);
8035ded4 2732 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2733 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2734 cancel_delayed_work(&hdev->power_off);
c542a06c 2735
a8b2d5c2
JH
2736 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2737 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2738
1da177e4
LT
2739 (dr + n)->dev_id = hdev->id;
2740 (dr + n)->dev_opt = hdev->flags;
c542a06c 2741
1da177e4
LT
2742 if (++n >= dev_num)
2743 break;
2744 }
f20d09d5 2745 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2746
2747 dl->dev_num = n;
2748 size = sizeof(*dl) + n * sizeof(*dr);
2749
2750 err = copy_to_user(arg, dl, size);
2751 kfree(dl);
2752
2753 return err ? -EFAULT : 0;
2754}
2755
2756int hci_get_dev_info(void __user *arg)
2757{
2758 struct hci_dev *hdev;
2759 struct hci_dev_info di;
2760 int err = 0;
2761
2762 if (copy_from_user(&di, arg, sizeof(di)))
2763 return -EFAULT;
2764
70f23020
AE
2765 hdev = hci_dev_get(di.dev_id);
2766 if (!hdev)
1da177e4
LT
2767 return -ENODEV;
2768
a8b2d5c2 2769 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2770 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2771
a8b2d5c2
JH
2772 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2773 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2774
1da177e4
LT
2775 strcpy(di.name, hdev->name);
2776 di.bdaddr = hdev->bdaddr;
60f2a3ed 2777 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2778 di.flags = hdev->flags;
2779 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2780 if (lmp_bredr_capable(hdev)) {
2781 di.acl_mtu = hdev->acl_mtu;
2782 di.acl_pkts = hdev->acl_pkts;
2783 di.sco_mtu = hdev->sco_mtu;
2784 di.sco_pkts = hdev->sco_pkts;
2785 } else {
2786 di.acl_mtu = hdev->le_mtu;
2787 di.acl_pkts = hdev->le_pkts;
2788 di.sco_mtu = 0;
2789 di.sco_pkts = 0;
2790 }
1da177e4
LT
2791 di.link_policy = hdev->link_policy;
2792 di.link_mode = hdev->link_mode;
2793
2794 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2795 memcpy(&di.features, &hdev->features, sizeof(di.features));
2796
2797 if (copy_to_user(arg, &di, sizeof(di)))
2798 err = -EFAULT;
2799
2800 hci_dev_put(hdev);
2801
2802 return err;
2803}
2804
2805/* ---- Interface to HCI drivers ---- */
2806
611b30f7
MH
2807static int hci_rfkill_set_block(void *data, bool blocked)
2808{
2809 struct hci_dev *hdev = data;
2810
2811 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2812
0736cfa8
MH
2813 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2814 return -EBUSY;
2815
5e130367
JH
2816 if (blocked) {
2817 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2818 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2819 hci_dev_do_close(hdev);
5e130367
JH
2820 } else {
2821 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2822 }
611b30f7
MH
2823
2824 return 0;
2825}
2826
2827static const struct rfkill_ops hci_rfkill_ops = {
2828 .set_block = hci_rfkill_set_block,
2829};
2830
ab81cbf9
JH
2831static void hci_power_on(struct work_struct *work)
2832{
2833 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2834 int err;
ab81cbf9
JH
2835
2836 BT_DBG("%s", hdev->name);
2837
cbed0ca1 2838 err = hci_dev_do_open(hdev);
96570ffc
JH
2839 if (err < 0) {
2840 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2841 return;
96570ffc 2842 }
ab81cbf9 2843
a5c8f270
MH
2844 /* During the HCI setup phase, a few error conditions are
2845 * ignored and they need to be checked now. If they are still
2846 * valid, it is important to turn the device back off.
2847 */
2848 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2849 (hdev->dev_type == HCI_BREDR &&
2850 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2851 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2852 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2853 hci_dev_do_close(hdev);
2854 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2855 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2856 HCI_AUTO_OFF_TIMEOUT);
bf543036 2857 }
ab81cbf9 2858
a8b2d5c2 2859 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2860 mgmt_index_added(hdev);
ab81cbf9
JH
2861}
2862
2863static void hci_power_off(struct work_struct *work)
2864{
3243553f 2865 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2866 power_off.work);
ab81cbf9
JH
2867
2868 BT_DBG("%s", hdev->name);
2869
8ee56540 2870 hci_dev_do_close(hdev);
ab81cbf9
JH
2871}
2872
16ab91ab
JH
2873static void hci_discov_off(struct work_struct *work)
2874{
2875 struct hci_dev *hdev;
16ab91ab
JH
2876
2877 hdev = container_of(work, struct hci_dev, discov_off.work);
2878
2879 BT_DBG("%s", hdev->name);
2880
d1967ff8 2881 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2882}
2883
35f7498a 2884void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2885{
4821002c 2886 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2887
4821002c
JH
2888 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2889 list_del(&uuid->list);
2aeb9a1a
JH
2890 kfree(uuid);
2891 }
2aeb9a1a
JH
2892}
2893
35f7498a 2894void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2895{
2896 struct list_head *p, *n;
2897
2898 list_for_each_safe(p, n, &hdev->link_keys) {
2899 struct link_key *key;
2900
2901 key = list_entry(p, struct link_key, list);
2902
2903 list_del(p);
2904 kfree(key);
2905 }
55ed8ca1
JH
2906}
2907
35f7498a 2908void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2909{
2910 struct smp_ltk *k, *tmp;
2911
2912 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2913 list_del(&k->list);
2914 kfree(k);
2915 }
b899efaf
VCG
2916}
2917
970c4e46
JH
2918void hci_smp_irks_clear(struct hci_dev *hdev)
2919{
2920 struct smp_irk *k, *tmp;
2921
2922 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2923 list_del(&k->list);
2924 kfree(k);
2925 }
2926}
2927
55ed8ca1
JH
2928struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2929{
8035ded4 2930 struct link_key *k;
55ed8ca1 2931
8035ded4 2932 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2933 if (bacmp(bdaddr, &k->bdaddr) == 0)
2934 return k;
55ed8ca1
JH
2935
2936 return NULL;
2937}
2938
745c0ce3 2939static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2940 u8 key_type, u8 old_key_type)
d25e28ab
JH
2941{
2942 /* Legacy key */
2943 if (key_type < 0x03)
745c0ce3 2944 return true;
d25e28ab
JH
2945
2946 /* Debug keys are insecure so don't store them persistently */
2947 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2948 return false;
d25e28ab
JH
2949
2950 /* Changed combination key and there's no previous one */
2951 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2952 return false;
d25e28ab
JH
2953
2954 /* Security mode 3 case */
2955 if (!conn)
745c0ce3 2956 return true;
d25e28ab
JH
2957
2958 /* Neither local nor remote side had no-bonding as requirement */
2959 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2960 return true;
d25e28ab
JH
2961
2962 /* Local side had dedicated bonding as requirement */
2963 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2964 return true;
d25e28ab
JH
2965
2966 /* Remote side had dedicated bonding as requirement */
2967 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2968 return true;
d25e28ab
JH
2969
2970 /* If none of the above criteria match, then don't store the key
2971 * persistently */
745c0ce3 2972 return false;
d25e28ab
JH
2973}
2974
98a0b845
JH
2975static bool ltk_type_master(u8 type)
2976{
2977 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2978 return true;
2979
2980 return false;
2981}
2982
fe39c7b2 2983struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
98a0b845 2984 bool master)
75d262c2 2985{
c9839a11 2986 struct smp_ltk *k;
75d262c2 2987
c9839a11 2988 list_for_each_entry(k, &hdev->long_term_keys, list) {
fe39c7b2 2989 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
2990 continue;
2991
98a0b845
JH
2992 if (ltk_type_master(k->type) != master)
2993 continue;
2994
c9839a11 2995 return k;
75d262c2
VCG
2996 }
2997
2998 return NULL;
2999}
75d262c2 3000
c9839a11 3001struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 3002 u8 addr_type, bool master)
75d262c2 3003{
c9839a11 3004 struct smp_ltk *k;
75d262c2 3005
c9839a11
VCG
3006 list_for_each_entry(k, &hdev->long_term_keys, list)
3007 if (addr_type == k->bdaddr_type &&
98a0b845
JH
3008 bacmp(bdaddr, &k->bdaddr) == 0 &&
3009 ltk_type_master(k->type) == master)
75d262c2
VCG
3010 return k;
3011
3012 return NULL;
3013}
75d262c2 3014
970c4e46
JH
3015struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3016{
3017 struct smp_irk *irk;
3018
3019 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3020 if (!bacmp(&irk->rpa, rpa))
3021 return irk;
3022 }
3023
3024 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3025 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
3026 bacpy(&irk->rpa, rpa);
3027 return irk;
3028 }
3029 }
3030
3031 return NULL;
3032}
3033
3034struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3035 u8 addr_type)
3036{
3037 struct smp_irk *irk;
3038
6cfc9988
JH
3039 /* Identity Address must be public or static random */
3040 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3041 return NULL;
3042
970c4e46
JH
3043 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
3044 if (addr_type == irk->addr_type &&
3045 bacmp(bdaddr, &irk->bdaddr) == 0)
3046 return irk;
3047 }
3048
3049 return NULL;
3050}
3051
d25e28ab 3052int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 3053 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
3054{
3055 struct link_key *key, *old_key;
745c0ce3
VA
3056 u8 old_key_type;
3057 bool persistent;
55ed8ca1
JH
3058
3059 old_key = hci_find_link_key(hdev, bdaddr);
3060 if (old_key) {
3061 old_key_type = old_key->type;
3062 key = old_key;
3063 } else {
12adcf3a 3064 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3065 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
3066 if (!key)
3067 return -ENOMEM;
3068 list_add(&key->list, &hdev->link_keys);
3069 }
3070
6ed93dc6 3071 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3072
d25e28ab
JH
3073 /* Some buggy controller combinations generate a changed
3074 * combination key for legacy pairing even when there's no
3075 * previous key */
3076 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3077 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3078 type = HCI_LK_COMBINATION;
655fe6ec
JH
3079 if (conn)
3080 conn->key_type = type;
3081 }
d25e28ab 3082
55ed8ca1 3083 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3084 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3085 key->pin_len = pin_len;
3086
b6020ba0 3087 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3088 key->type = old_key_type;
4748fed2
JH
3089 else
3090 key->type = type;
3091
4df378a1
JH
3092 if (!new_key)
3093 return 0;
3094
3095 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3096
744cf19e 3097 mgmt_new_link_key(hdev, key, persistent);
4df378a1 3098
6ec5bcad
VA
3099 if (conn)
3100 conn->flush_key = !persistent;
55ed8ca1
JH
3101
3102 return 0;
3103}
3104
ca9142b8 3105struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3106 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3107 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3108{
c9839a11 3109 struct smp_ltk *key, *old_key;
98a0b845 3110 bool master = ltk_type_master(type);
75d262c2 3111
98a0b845 3112 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3113 if (old_key)
75d262c2 3114 key = old_key;
c9839a11 3115 else {
0a14ab41 3116 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3117 if (!key)
ca9142b8 3118 return NULL;
c9839a11 3119 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3120 }
3121
75d262c2 3122 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3123 key->bdaddr_type = addr_type;
3124 memcpy(key->val, tk, sizeof(key->val));
3125 key->authenticated = authenticated;
3126 key->ediv = ediv;
fe39c7b2 3127 key->rand = rand;
c9839a11
VCG
3128 key->enc_size = enc_size;
3129 key->type = type;
75d262c2 3130
ca9142b8 3131 return key;
75d262c2
VCG
3132}
3133
ca9142b8
JH
3134struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3135 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3136{
3137 struct smp_irk *irk;
3138
3139 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3140 if (!irk) {
3141 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3142 if (!irk)
ca9142b8 3143 return NULL;
970c4e46
JH
3144
3145 bacpy(&irk->bdaddr, bdaddr);
3146 irk->addr_type = addr_type;
3147
3148 list_add(&irk->list, &hdev->identity_resolving_keys);
3149 }
3150
3151 memcpy(irk->val, val, 16);
3152 bacpy(&irk->rpa, rpa);
3153
ca9142b8 3154 return irk;
970c4e46
JH
3155}
3156
55ed8ca1
JH
3157int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3158{
3159 struct link_key *key;
3160
3161 key = hci_find_link_key(hdev, bdaddr);
3162 if (!key)
3163 return -ENOENT;
3164
6ed93dc6 3165 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3166
3167 list_del(&key->list);
3168 kfree(key);
3169
3170 return 0;
3171}
3172
e0b2b27e 3173int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3174{
3175 struct smp_ltk *k, *tmp;
c51ffa0b 3176 int removed = 0;
b899efaf
VCG
3177
3178 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3179 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3180 continue;
3181
6ed93dc6 3182 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3183
3184 list_del(&k->list);
3185 kfree(k);
c51ffa0b 3186 removed++;
b899efaf
VCG
3187 }
3188
c51ffa0b 3189 return removed ? 0 : -ENOENT;
b899efaf
VCG
3190}
3191
a7ec7338
JH
3192void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3193{
3194 struct smp_irk *k, *tmp;
3195
668b7b19 3196 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3197 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3198 continue;
3199
3200 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3201
3202 list_del(&k->list);
3203 kfree(k);
3204 }
3205}
3206
6bd32326 3207/* HCI command timer function */
bda4f23a 3208static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
3209{
3210 struct hci_dev *hdev = (void *) arg;
3211
bda4f23a
AE
3212 if (hdev->sent_cmd) {
3213 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3214 u16 opcode = __le16_to_cpu(sent->opcode);
3215
3216 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3217 } else {
3218 BT_ERR("%s command tx timeout", hdev->name);
3219 }
3220
6bd32326 3221 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3222 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3223}
3224
2763eda6 3225struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3226 bdaddr_t *bdaddr)
2763eda6
SJ
3227{
3228 struct oob_data *data;
3229
3230 list_for_each_entry(data, &hdev->remote_oob_data, list)
3231 if (bacmp(bdaddr, &data->bdaddr) == 0)
3232 return data;
3233
3234 return NULL;
3235}
3236
3237int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3238{
3239 struct oob_data *data;
3240
3241 data = hci_find_remote_oob_data(hdev, bdaddr);
3242 if (!data)
3243 return -ENOENT;
3244
6ed93dc6 3245 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3246
3247 list_del(&data->list);
3248 kfree(data);
3249
3250 return 0;
3251}
3252
35f7498a 3253void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3254{
3255 struct oob_data *data, *n;
3256
3257 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3258 list_del(&data->list);
3259 kfree(data);
3260 }
2763eda6
SJ
3261}
3262
0798872e
MH
3263int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3264 u8 *hash, u8 *randomizer)
2763eda6
SJ
3265{
3266 struct oob_data *data;
3267
3268 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3269 if (!data) {
0a14ab41 3270 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3271 if (!data)
3272 return -ENOMEM;
3273
3274 bacpy(&data->bdaddr, bdaddr);
3275 list_add(&data->list, &hdev->remote_oob_data);
3276 }
3277
519ca9d0
MH
3278 memcpy(data->hash192, hash, sizeof(data->hash192));
3279 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3280
0798872e
MH
3281 memset(data->hash256, 0, sizeof(data->hash256));
3282 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3283
3284 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3285
3286 return 0;
3287}
3288
3289int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3290 u8 *hash192, u8 *randomizer192,
3291 u8 *hash256, u8 *randomizer256)
3292{
3293 struct oob_data *data;
3294
3295 data = hci_find_remote_oob_data(hdev, bdaddr);
3296 if (!data) {
0a14ab41 3297 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3298 if (!data)
3299 return -ENOMEM;
3300
3301 bacpy(&data->bdaddr, bdaddr);
3302 list_add(&data->list, &hdev->remote_oob_data);
3303 }
3304
3305 memcpy(data->hash192, hash192, sizeof(data->hash192));
3306 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3307
3308 memcpy(data->hash256, hash256, sizeof(data->hash256));
3309 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3310
6ed93dc6 3311 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3312
3313 return 0;
3314}
3315
b9ee0a78
MH
3316struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3317 bdaddr_t *bdaddr, u8 type)
b2a66aad 3318{
8035ded4 3319 struct bdaddr_list *b;
b2a66aad 3320
b9ee0a78
MH
3321 list_for_each_entry(b, &hdev->blacklist, list) {
3322 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3323 return b;
b9ee0a78 3324 }
b2a66aad
AJ
3325
3326 return NULL;
3327}
3328
c9507490 3329static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3330{
3331 struct list_head *p, *n;
3332
3333 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3334 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3335
3336 list_del(p);
3337 kfree(b);
3338 }
b2a66aad
AJ
3339}
3340
88c1fe4b 3341int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3342{
3343 struct bdaddr_list *entry;
b2a66aad 3344
b9ee0a78 3345 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3346 return -EBADF;
3347
b9ee0a78 3348 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3349 return -EEXIST;
b2a66aad
AJ
3350
3351 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3352 if (!entry)
3353 return -ENOMEM;
b2a66aad
AJ
3354
3355 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3356 entry->bdaddr_type = type;
b2a66aad
AJ
3357
3358 list_add(&entry->list, &hdev->blacklist);
3359
88c1fe4b 3360 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3361}
3362
88c1fe4b 3363int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3364{
3365 struct bdaddr_list *entry;
b2a66aad 3366
35f7498a
JH
3367 if (!bacmp(bdaddr, BDADDR_ANY)) {
3368 hci_blacklist_clear(hdev);
3369 return 0;
3370 }
b2a66aad 3371
b9ee0a78 3372 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3373 if (!entry)
5e762444 3374 return -ENOENT;
b2a66aad
AJ
3375
3376 list_del(&entry->list);
3377 kfree(entry);
3378
88c1fe4b 3379 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3380}
3381
d2ab0ac1
MH
3382struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3383 bdaddr_t *bdaddr, u8 type)
3384{
3385 struct bdaddr_list *b;
3386
3387 list_for_each_entry(b, &hdev->le_white_list, list) {
3388 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3389 return b;
3390 }
3391
3392 return NULL;
3393}
3394
3395void hci_white_list_clear(struct hci_dev *hdev)
3396{
3397 struct list_head *p, *n;
3398
3399 list_for_each_safe(p, n, &hdev->le_white_list) {
3400 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3401
3402 list_del(p);
3403 kfree(b);
3404 }
3405}
3406
3407int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3408{
3409 struct bdaddr_list *entry;
3410
3411 if (!bacmp(bdaddr, BDADDR_ANY))
3412 return -EBADF;
3413
3414 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3415 if (!entry)
3416 return -ENOMEM;
3417
3418 bacpy(&entry->bdaddr, bdaddr);
3419 entry->bdaddr_type = type;
3420
3421 list_add(&entry->list, &hdev->le_white_list);
3422
3423 return 0;
3424}
3425
3426int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3427{
3428 struct bdaddr_list *entry;
3429
3430 if (!bacmp(bdaddr, BDADDR_ANY))
3431 return -EBADF;
3432
3433 entry = hci_white_list_lookup(hdev, bdaddr, type);
3434 if (!entry)
3435 return -ENOENT;
3436
3437 list_del(&entry->list);
3438 kfree(entry);
3439
3440 return 0;
3441}
3442
15819a70
AG
3443/* This function requires the caller holds hdev->lock */
3444struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3445 bdaddr_t *addr, u8 addr_type)
3446{
3447 struct hci_conn_params *params;
3448
3449 list_for_each_entry(params, &hdev->le_conn_params, list) {
3450 if (bacmp(&params->addr, addr) == 0 &&
3451 params->addr_type == addr_type) {
3452 return params;
3453 }
3454 }
3455
3456 return NULL;
3457}
3458
cef952ce
AG
3459static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3460{
3461 struct hci_conn *conn;
3462
3463 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3464 if (!conn)
3465 return false;
3466
3467 if (conn->dst_type != type)
3468 return false;
3469
3470 if (conn->state != BT_CONNECTED)
3471 return false;
3472
3473 return true;
3474}
3475
a9b0a04c
AG
3476static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3477{
3478 if (addr_type == ADDR_LE_DEV_PUBLIC)
3479 return true;
3480
3481 /* Check for Random Static address type */
3482 if ((addr->b[5] & 0xc0) == 0xc0)
3483 return true;
3484
3485 return false;
3486}
3487
15819a70 3488/* This function requires the caller holds hdev->lock */
a9b0a04c
AG
3489int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3490 u8 auto_connect, u16 conn_min_interval,
3491 u16 conn_max_interval)
15819a70
AG
3492{
3493 struct hci_conn_params *params;
3494
a9b0a04c
AG
3495 if (!is_identity_address(addr, addr_type))
3496 return -EINVAL;
3497
15819a70 3498 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce
AG
3499 if (params)
3500 goto update;
15819a70
AG
3501
3502 params = kzalloc(sizeof(*params), GFP_KERNEL);
3503 if (!params) {
3504 BT_ERR("Out of memory");
a9b0a04c 3505 return -ENOMEM;
15819a70
AG
3506 }
3507
3508 bacpy(&params->addr, addr);
3509 params->addr_type = addr_type;
cef952ce
AG
3510
3511 list_add(&params->list, &hdev->le_conn_params);
3512
3513update:
15819a70
AG
3514 params->conn_min_interval = conn_min_interval;
3515 params->conn_max_interval = conn_max_interval;
9fcb18ef 3516 params->auto_connect = auto_connect;
15819a70 3517
cef952ce
AG
3518 switch (auto_connect) {
3519 case HCI_AUTO_CONN_DISABLED:
3520 case HCI_AUTO_CONN_LINK_LOSS:
3521 hci_pend_le_conn_del(hdev, addr, addr_type);
3522 break;
3523 case HCI_AUTO_CONN_ALWAYS:
3524 if (!is_connected(hdev, addr, addr_type))
3525 hci_pend_le_conn_add(hdev, addr, addr_type);
3526 break;
3527 }
15819a70 3528
9fcb18ef
AG
3529 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3530 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3531 conn_min_interval, conn_max_interval);
a9b0a04c
AG
3532
3533 return 0;
15819a70
AG
3534}
3535
3536/* This function requires the caller holds hdev->lock */
3537void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3538{
3539 struct hci_conn_params *params;
3540
3541 params = hci_conn_params_lookup(hdev, addr, addr_type);
3542 if (!params)
3543 return;
3544
cef952ce
AG
3545 hci_pend_le_conn_del(hdev, addr, addr_type);
3546
15819a70
AG
3547 list_del(&params->list);
3548 kfree(params);
3549
3550 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3551}
3552
3553/* This function requires the caller holds hdev->lock */
3554void hci_conn_params_clear(struct hci_dev *hdev)
3555{
3556 struct hci_conn_params *params, *tmp;
3557
3558 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3559 list_del(&params->list);
3560 kfree(params);
3561 }
3562
3563 BT_DBG("All LE connection parameters were removed");
3564}
3565
77a77a30
AG
3566/* This function requires the caller holds hdev->lock */
3567struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3568 bdaddr_t *addr, u8 addr_type)
3569{
3570 struct bdaddr_list *entry;
3571
3572 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3573 if (bacmp(&entry->bdaddr, addr) == 0 &&
3574 entry->bdaddr_type == addr_type)
3575 return entry;
3576 }
3577
3578 return NULL;
3579}
3580
3581/* This function requires the caller holds hdev->lock */
3582void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3583{
3584 struct bdaddr_list *entry;
3585
3586 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3587 if (entry)
a4790dbd 3588 goto done;
77a77a30
AG
3589
3590 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3591 if (!entry) {
3592 BT_ERR("Out of memory");
3593 return;
3594 }
3595
3596 bacpy(&entry->bdaddr, addr);
3597 entry->bdaddr_type = addr_type;
3598
3599 list_add(&entry->list, &hdev->pend_le_conns);
3600
3601 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3602
3603done:
3604 hci_update_background_scan(hdev);
77a77a30
AG
3605}
3606
3607/* This function requires the caller holds hdev->lock */
3608void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3609{
3610 struct bdaddr_list *entry;
3611
3612 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3613 if (!entry)
a4790dbd 3614 goto done;
77a77a30
AG
3615
3616 list_del(&entry->list);
3617 kfree(entry);
3618
3619 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3620
3621done:
3622 hci_update_background_scan(hdev);
77a77a30
AG
3623}
3624
3625/* This function requires the caller holds hdev->lock */
3626void hci_pend_le_conns_clear(struct hci_dev *hdev)
3627{
3628 struct bdaddr_list *entry, *tmp;
3629
3630 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3631 list_del(&entry->list);
3632 kfree(entry);
3633 }
3634
3635 BT_DBG("All LE pending connections cleared");
3636}
3637
4c87eaab 3638static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3639{
4c87eaab
AG
3640 if (status) {
3641 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3642
4c87eaab
AG
3643 hci_dev_lock(hdev);
3644 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3645 hci_dev_unlock(hdev);
3646 return;
3647 }
7ba8b4be
AG
3648}
3649
4c87eaab 3650static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3651{
4c87eaab
AG
3652 /* General inquiry access code (GIAC) */
3653 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3654 struct hci_request req;
3655 struct hci_cp_inquiry cp;
7ba8b4be
AG
3656 int err;
3657
4c87eaab
AG
3658 if (status) {
3659 BT_ERR("Failed to disable LE scanning: status %d", status);
3660 return;
3661 }
7ba8b4be 3662
4c87eaab
AG
3663 switch (hdev->discovery.type) {
3664 case DISCOV_TYPE_LE:
3665 hci_dev_lock(hdev);
3666 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3667 hci_dev_unlock(hdev);
3668 break;
7ba8b4be 3669
4c87eaab
AG
3670 case DISCOV_TYPE_INTERLEAVED:
3671 hci_req_init(&req, hdev);
7ba8b4be 3672
4c87eaab
AG
3673 memset(&cp, 0, sizeof(cp));
3674 memcpy(&cp.lap, lap, sizeof(cp.lap));
3675 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3676 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3677
4c87eaab 3678 hci_dev_lock(hdev);
7dbfac1d 3679
4c87eaab 3680 hci_inquiry_cache_flush(hdev);
7dbfac1d 3681
4c87eaab
AG
3682 err = hci_req_run(&req, inquiry_complete);
3683 if (err) {
3684 BT_ERR("Inquiry request failed: err %d", err);
3685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3686 }
7dbfac1d 3687
4c87eaab
AG
3688 hci_dev_unlock(hdev);
3689 break;
7dbfac1d 3690 }
7dbfac1d
AG
3691}
3692
7ba8b4be
AG
3693static void le_scan_disable_work(struct work_struct *work)
3694{
3695 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3696 le_scan_disable.work);
4c87eaab
AG
3697 struct hci_request req;
3698 int err;
7ba8b4be
AG
3699
3700 BT_DBG("%s", hdev->name);
3701
4c87eaab 3702 hci_req_init(&req, hdev);
28b75a89 3703
b1efcc28 3704 hci_req_add_le_scan_disable(&req);
28b75a89 3705
4c87eaab
AG
3706 err = hci_req_run(&req, le_scan_disable_work_complete);
3707 if (err)
3708 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3709}
3710
8d97250e
JH
3711static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3712{
3713 struct hci_dev *hdev = req->hdev;
3714
3715 /* If we're advertising or initiating an LE connection we can't
3716 * go ahead and change the random address at this time. This is
3717 * because the eventual initiator address used for the
3718 * subsequently created connection will be undefined (some
3719 * controllers use the new address and others the one we had
3720 * when the operation started).
3721 *
3722 * In this kind of scenario skip the update and let the random
3723 * address be updated at the next cycle.
3724 */
3725 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags) ||
3726 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3727 BT_DBG("Deferring random address update");
3728 return;
3729 }
3730
3731 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3732}
3733
94b1fc92
MH
3734int hci_update_random_address(struct hci_request *req, bool require_privacy,
3735 u8 *own_addr_type)
ebd3a747
JH
3736{
3737 struct hci_dev *hdev = req->hdev;
3738 int err;
3739
3740 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3741 * current RPA has expired or there is something else than
3742 * the current RPA in use, then generate a new one.
ebd3a747
JH
3743 */
3744 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3745 int to;
3746
3747 *own_addr_type = ADDR_LE_DEV_RANDOM;
3748
3749 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3750 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3751 return 0;
3752
2b5224dc 3753 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3754 if (err < 0) {
3755 BT_ERR("%s failed to generate new RPA", hdev->name);
3756 return err;
3757 }
3758
8d97250e 3759 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3760
3761 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3762 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3763
3764 return 0;
94b1fc92
MH
3765 }
3766
3767 /* In case of required privacy without resolvable private address,
3768 * use an unresolvable private address. This is useful for active
3769 * scanning and non-connectable advertising.
3770 */
3771 if (require_privacy) {
3772 bdaddr_t urpa;
3773
3774 get_random_bytes(&urpa, 6);
3775 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3776
3777 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3778 set_random_addr(req, &urpa);
94b1fc92 3779 return 0;
ebd3a747
JH
3780 }
3781
3782 /* If forcing static address is in use or there is no public
3783 * address use the static address as random address (but skip
3784 * the HCI command if the current random address is already the
3785 * static one.
3786 */
3787 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3788 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3789 *own_addr_type = ADDR_LE_DEV_RANDOM;
3790 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3791 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3792 &hdev->static_addr);
3793 return 0;
3794 }
3795
3796 /* Neither privacy nor static address is being used so use a
3797 * public address.
3798 */
3799 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3800
3801 return 0;
3802}
3803
a1f4c318
JH
3804/* Copy the Identity Address of the controller.
3805 *
3806 * If the controller has a public BD_ADDR, then by default use that one.
3807 * If this is a LE only controller without a public address, default to
3808 * the static random address.
3809 *
3810 * For debugging purposes it is possible to force controllers with a
3811 * public address to use the static random address instead.
3812 */
3813void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3814 u8 *bdaddr_type)
3815{
3816 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3817 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3818 bacpy(bdaddr, &hdev->static_addr);
3819 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3820 } else {
3821 bacpy(bdaddr, &hdev->bdaddr);
3822 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3823 }
3824}
3825
9be0dab7
DH
3826/* Alloc HCI device */
3827struct hci_dev *hci_alloc_dev(void)
3828{
3829 struct hci_dev *hdev;
3830
3831 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3832 if (!hdev)
3833 return NULL;
3834
b1b813d4
DH
3835 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3836 hdev->esco_type = (ESCO_HV1);
3837 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3838 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3839 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3840 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3841 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3842
b1b813d4
DH
3843 hdev->sniff_max_interval = 800;
3844 hdev->sniff_min_interval = 80;
3845
3f959d46 3846 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3847 hdev->le_scan_interval = 0x0060;
3848 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3849 hdev->le_conn_min_interval = 0x0028;
3850 hdev->le_conn_max_interval = 0x0038;
bef64738 3851
d6bfd59c 3852 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 3853 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
3854 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
3855 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 3856
b1b813d4
DH
3857 mutex_init(&hdev->lock);
3858 mutex_init(&hdev->req_lock);
3859
3860 INIT_LIST_HEAD(&hdev->mgmt_pending);
3861 INIT_LIST_HEAD(&hdev->blacklist);
3862 INIT_LIST_HEAD(&hdev->uuids);
3863 INIT_LIST_HEAD(&hdev->link_keys);
3864 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3865 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3866 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3867 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3868 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3869 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3870 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3871
3872 INIT_WORK(&hdev->rx_work, hci_rx_work);
3873 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3874 INIT_WORK(&hdev->tx_work, hci_tx_work);
3875 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3876
b1b813d4
DH
3877 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3878 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3879 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3880
b1b813d4
DH
3881 skb_queue_head_init(&hdev->rx_q);
3882 skb_queue_head_init(&hdev->cmd_q);
3883 skb_queue_head_init(&hdev->raw_q);
3884
3885 init_waitqueue_head(&hdev->req_wait_q);
3886
bda4f23a 3887 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3888
b1b813d4
DH
3889 hci_init_sysfs(hdev);
3890 discovery_init(hdev);
9be0dab7
DH
3891
3892 return hdev;
3893}
3894EXPORT_SYMBOL(hci_alloc_dev);
3895
3896/* Free HCI device */
3897void hci_free_dev(struct hci_dev *hdev)
3898{
9be0dab7
DH
3899 /* will free via device release */
3900 put_device(&hdev->dev);
3901}
3902EXPORT_SYMBOL(hci_free_dev);
3903
1da177e4
LT
3904/* Register HCI device */
3905int hci_register_dev(struct hci_dev *hdev)
3906{
b1b813d4 3907 int id, error;
1da177e4 3908
010666a1 3909 if (!hdev->open || !hdev->close)
1da177e4
LT
3910 return -EINVAL;
3911
08add513
MM
3912 /* Do not allow HCI_AMP devices to register at index 0,
3913 * so the index can be used as the AMP controller ID.
3914 */
3df92b31
SL
3915 switch (hdev->dev_type) {
3916 case HCI_BREDR:
3917 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3918 break;
3919 case HCI_AMP:
3920 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3921 break;
3922 default:
3923 return -EINVAL;
1da177e4 3924 }
8e87d142 3925
3df92b31
SL
3926 if (id < 0)
3927 return id;
3928
1da177e4
LT
3929 sprintf(hdev->name, "hci%d", id);
3930 hdev->id = id;
2d8b3a11
AE
3931
3932 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3933
d8537548
KC
3934 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3935 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3936 if (!hdev->workqueue) {
3937 error = -ENOMEM;
3938 goto err;
3939 }
f48fd9c8 3940
d8537548
KC
3941 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3942 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3943 if (!hdev->req_workqueue) {
3944 destroy_workqueue(hdev->workqueue);
3945 error = -ENOMEM;
3946 goto err;
3947 }
3948
0153e2ec
MH
3949 if (!IS_ERR_OR_NULL(bt_debugfs))
3950 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3951
bdc3e0f1
MH
3952 dev_set_name(&hdev->dev, "%s", hdev->name);
3953
99780a7b
JH
3954 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3955 CRYPTO_ALG_ASYNC);
3956 if (IS_ERR(hdev->tfm_aes)) {
3957 BT_ERR("Unable to create crypto context");
3958 error = PTR_ERR(hdev->tfm_aes);
3959 hdev->tfm_aes = NULL;
3960 goto err_wqueue;
3961 }
3962
bdc3e0f1 3963 error = device_add(&hdev->dev);
33ca954d 3964 if (error < 0)
99780a7b 3965 goto err_tfm;
1da177e4 3966
611b30f7 3967 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3968 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3969 hdev);
611b30f7
MH
3970 if (hdev->rfkill) {
3971 if (rfkill_register(hdev->rfkill) < 0) {
3972 rfkill_destroy(hdev->rfkill);
3973 hdev->rfkill = NULL;
3974 }
3975 }
3976
5e130367
JH
3977 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3978 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3979
a8b2d5c2 3980 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3981 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3982
01cd3404 3983 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3984 /* Assume BR/EDR support until proven otherwise (such as
3985 * through reading supported features during init.
3986 */
3987 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3988 }
ce2be9ac 3989
fcee3377
GP
3990 write_lock(&hci_dev_list_lock);
3991 list_add(&hdev->list, &hci_dev_list);
3992 write_unlock(&hci_dev_list_lock);
3993
1da177e4 3994 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3995 hci_dev_hold(hdev);
1da177e4 3996
19202573 3997 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3998
1da177e4 3999 return id;
f48fd9c8 4000
99780a7b
JH
4001err_tfm:
4002 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
4003err_wqueue:
4004 destroy_workqueue(hdev->workqueue);
6ead1bbc 4005 destroy_workqueue(hdev->req_workqueue);
33ca954d 4006err:
3df92b31 4007 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4008
33ca954d 4009 return error;
1da177e4
LT
4010}
4011EXPORT_SYMBOL(hci_register_dev);
4012
4013/* Unregister HCI device */
59735631 4014void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4015{
3df92b31 4016 int i, id;
ef222013 4017
c13854ce 4018 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4019
94324962
JH
4020 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4021
3df92b31
SL
4022 id = hdev->id;
4023
f20d09d5 4024 write_lock(&hci_dev_list_lock);
1da177e4 4025 list_del(&hdev->list);
f20d09d5 4026 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4027
4028 hci_dev_do_close(hdev);
4029
cd4c5391 4030 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4031 kfree_skb(hdev->reassembly[i]);
4032
b9b5ef18
GP
4033 cancel_work_sync(&hdev->power_on);
4034
ab81cbf9 4035 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 4036 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 4037 hci_dev_lock(hdev);
744cf19e 4038 mgmt_index_removed(hdev);
09fd0de5 4039 hci_dev_unlock(hdev);
56e5cb86 4040 }
ab81cbf9 4041
2e58ef3e
JH
4042 /* mgmt_index_removed should take care of emptying the
4043 * pending list */
4044 BUG_ON(!list_empty(&hdev->mgmt_pending));
4045
1da177e4
LT
4046 hci_notify(hdev, HCI_DEV_UNREG);
4047
611b30f7
MH
4048 if (hdev->rfkill) {
4049 rfkill_unregister(hdev->rfkill);
4050 rfkill_destroy(hdev->rfkill);
4051 }
4052
99780a7b
JH
4053 if (hdev->tfm_aes)
4054 crypto_free_blkcipher(hdev->tfm_aes);
4055
bdc3e0f1 4056 device_del(&hdev->dev);
147e2d59 4057
0153e2ec
MH
4058 debugfs_remove_recursive(hdev->debugfs);
4059
f48fd9c8 4060 destroy_workqueue(hdev->workqueue);
6ead1bbc 4061 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4062
09fd0de5 4063 hci_dev_lock(hdev);
e2e0cacb 4064 hci_blacklist_clear(hdev);
2aeb9a1a 4065 hci_uuids_clear(hdev);
55ed8ca1 4066 hci_link_keys_clear(hdev);
b899efaf 4067 hci_smp_ltks_clear(hdev);
970c4e46 4068 hci_smp_irks_clear(hdev);
2763eda6 4069 hci_remote_oob_data_clear(hdev);
d2ab0ac1 4070 hci_white_list_clear(hdev);
15819a70 4071 hci_conn_params_clear(hdev);
77a77a30 4072 hci_pend_le_conns_clear(hdev);
09fd0de5 4073 hci_dev_unlock(hdev);
e2e0cacb 4074
dc946bd8 4075 hci_dev_put(hdev);
3df92b31
SL
4076
4077 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4078}
4079EXPORT_SYMBOL(hci_unregister_dev);
4080
4081/* Suspend HCI device */
4082int hci_suspend_dev(struct hci_dev *hdev)
4083{
4084 hci_notify(hdev, HCI_DEV_SUSPEND);
4085 return 0;
4086}
4087EXPORT_SYMBOL(hci_suspend_dev);
4088
4089/* Resume HCI device */
4090int hci_resume_dev(struct hci_dev *hdev)
4091{
4092 hci_notify(hdev, HCI_DEV_RESUME);
4093 return 0;
4094}
4095EXPORT_SYMBOL(hci_resume_dev);
4096
76bca880 4097/* Receive frame from HCI drivers */
e1a26170 4098int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4099{
76bca880 4100 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4101 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4102 kfree_skb(skb);
4103 return -ENXIO;
4104 }
4105
d82603c6 4106 /* Incoming skb */
76bca880
MH
4107 bt_cb(skb)->incoming = 1;
4108
4109 /* Time stamp */
4110 __net_timestamp(skb);
4111
76bca880 4112 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4113 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4114
76bca880
MH
4115 return 0;
4116}
4117EXPORT_SYMBOL(hci_recv_frame);
4118
33e882a5 4119static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4120 int count, __u8 index)
33e882a5
SS
4121{
4122 int len = 0;
4123 int hlen = 0;
4124 int remain = count;
4125 struct sk_buff *skb;
4126 struct bt_skb_cb *scb;
4127
4128 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4129 index >= NUM_REASSEMBLY)
33e882a5
SS
4130 return -EILSEQ;
4131
4132 skb = hdev->reassembly[index];
4133
4134 if (!skb) {
4135 switch (type) {
4136 case HCI_ACLDATA_PKT:
4137 len = HCI_MAX_FRAME_SIZE;
4138 hlen = HCI_ACL_HDR_SIZE;
4139 break;
4140 case HCI_EVENT_PKT:
4141 len = HCI_MAX_EVENT_SIZE;
4142 hlen = HCI_EVENT_HDR_SIZE;
4143 break;
4144 case HCI_SCODATA_PKT:
4145 len = HCI_MAX_SCO_SIZE;
4146 hlen = HCI_SCO_HDR_SIZE;
4147 break;
4148 }
4149
1e429f38 4150 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4151 if (!skb)
4152 return -ENOMEM;
4153
4154 scb = (void *) skb->cb;
4155 scb->expect = hlen;
4156 scb->pkt_type = type;
4157
33e882a5
SS
4158 hdev->reassembly[index] = skb;
4159 }
4160
4161 while (count) {
4162 scb = (void *) skb->cb;
89bb46d0 4163 len = min_t(uint, scb->expect, count);
33e882a5
SS
4164
4165 memcpy(skb_put(skb, len), data, len);
4166
4167 count -= len;
4168 data += len;
4169 scb->expect -= len;
4170 remain = count;
4171
4172 switch (type) {
4173 case HCI_EVENT_PKT:
4174 if (skb->len == HCI_EVENT_HDR_SIZE) {
4175 struct hci_event_hdr *h = hci_event_hdr(skb);
4176 scb->expect = h->plen;
4177
4178 if (skb_tailroom(skb) < scb->expect) {
4179 kfree_skb(skb);
4180 hdev->reassembly[index] = NULL;
4181 return -ENOMEM;
4182 }
4183 }
4184 break;
4185
4186 case HCI_ACLDATA_PKT:
4187 if (skb->len == HCI_ACL_HDR_SIZE) {
4188 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4189 scb->expect = __le16_to_cpu(h->dlen);
4190
4191 if (skb_tailroom(skb) < scb->expect) {
4192 kfree_skb(skb);
4193 hdev->reassembly[index] = NULL;
4194 return -ENOMEM;
4195 }
4196 }
4197 break;
4198
4199 case HCI_SCODATA_PKT:
4200 if (skb->len == HCI_SCO_HDR_SIZE) {
4201 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4202 scb->expect = h->dlen;
4203
4204 if (skb_tailroom(skb) < scb->expect) {
4205 kfree_skb(skb);
4206 hdev->reassembly[index] = NULL;
4207 return -ENOMEM;
4208 }
4209 }
4210 break;
4211 }
4212
4213 if (scb->expect == 0) {
4214 /* Complete frame */
4215
4216 bt_cb(skb)->pkt_type = type;
e1a26170 4217 hci_recv_frame(hdev, skb);
33e882a5
SS
4218
4219 hdev->reassembly[index] = NULL;
4220 return remain;
4221 }
4222 }
4223
4224 return remain;
4225}
4226
ef222013
MH
4227int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4228{
f39a3c06
SS
4229 int rem = 0;
4230
ef222013
MH
4231 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4232 return -EILSEQ;
4233
da5f6c37 4234 while (count) {
1e429f38 4235 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4236 if (rem < 0)
4237 return rem;
ef222013 4238
f39a3c06
SS
4239 data += (count - rem);
4240 count = rem;
f81c6224 4241 }
ef222013 4242
f39a3c06 4243 return rem;
ef222013
MH
4244}
4245EXPORT_SYMBOL(hci_recv_fragment);
4246
99811510
SS
4247#define STREAM_REASSEMBLY 0
4248
4249int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4250{
4251 int type;
4252 int rem = 0;
4253
da5f6c37 4254 while (count) {
99811510
SS
4255 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4256
4257 if (!skb) {
4258 struct { char type; } *pkt;
4259
4260 /* Start of the frame */
4261 pkt = data;
4262 type = pkt->type;
4263
4264 data++;
4265 count--;
4266 } else
4267 type = bt_cb(skb)->pkt_type;
4268
1e429f38 4269 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4270 STREAM_REASSEMBLY);
99811510
SS
4271 if (rem < 0)
4272 return rem;
4273
4274 data += (count - rem);
4275 count = rem;
f81c6224 4276 }
99811510
SS
4277
4278 return rem;
4279}
4280EXPORT_SYMBOL(hci_recv_stream_fragment);
4281
1da177e4
LT
4282/* ---- Interface to upper protocols ---- */
4283
1da177e4
LT
4284int hci_register_cb(struct hci_cb *cb)
4285{
4286 BT_DBG("%p name %s", cb, cb->name);
4287
f20d09d5 4288 write_lock(&hci_cb_list_lock);
1da177e4 4289 list_add(&cb->list, &hci_cb_list);
f20d09d5 4290 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4291
4292 return 0;
4293}
4294EXPORT_SYMBOL(hci_register_cb);
4295
4296int hci_unregister_cb(struct hci_cb *cb)
4297{
4298 BT_DBG("%p name %s", cb, cb->name);
4299
f20d09d5 4300 write_lock(&hci_cb_list_lock);
1da177e4 4301 list_del(&cb->list);
f20d09d5 4302 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4303
4304 return 0;
4305}
4306EXPORT_SYMBOL(hci_unregister_cb);
4307
51086991 4308static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4309{
0d48d939 4310 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4311
cd82e61c
MH
4312 /* Time stamp */
4313 __net_timestamp(skb);
1da177e4 4314
cd82e61c
MH
4315 /* Send copy to monitor */
4316 hci_send_to_monitor(hdev, skb);
4317
4318 if (atomic_read(&hdev->promisc)) {
4319 /* Send copy to the sockets */
470fe1b5 4320 hci_send_to_sock(hdev, skb);
1da177e4
LT
4321 }
4322
4323 /* Get rid of skb owner, prior to sending to the driver. */
4324 skb_orphan(skb);
4325
7bd8f09f 4326 if (hdev->send(hdev, skb) < 0)
51086991 4327 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4328}
4329
3119ae95
JH
4330void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4331{
4332 skb_queue_head_init(&req->cmd_q);
4333 req->hdev = hdev;
5d73e034 4334 req->err = 0;
3119ae95
JH
4335}
4336
4337int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4338{
4339 struct hci_dev *hdev = req->hdev;
4340 struct sk_buff *skb;
4341 unsigned long flags;
4342
4343 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4344
5d73e034
AG
4345 /* If an error occured during request building, remove all HCI
4346 * commands queued on the HCI request queue.
4347 */
4348 if (req->err) {
4349 skb_queue_purge(&req->cmd_q);
4350 return req->err;
4351 }
4352
3119ae95
JH
4353 /* Do not allow empty requests */
4354 if (skb_queue_empty(&req->cmd_q))
382b0c39 4355 return -ENODATA;
3119ae95
JH
4356
4357 skb = skb_peek_tail(&req->cmd_q);
4358 bt_cb(skb)->req.complete = complete;
4359
4360 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4361 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4362 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4363
4364 queue_work(hdev->workqueue, &hdev->cmd_work);
4365
4366 return 0;
4367}
4368
1ca3a9d0 4369static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4370 u32 plen, const void *param)
1da177e4
LT
4371{
4372 int len = HCI_COMMAND_HDR_SIZE + plen;
4373 struct hci_command_hdr *hdr;
4374 struct sk_buff *skb;
4375
1da177e4 4376 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4377 if (!skb)
4378 return NULL;
1da177e4
LT
4379
4380 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4381 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4382 hdr->plen = plen;
4383
4384 if (plen)
4385 memcpy(skb_put(skb, plen), param, plen);
4386
4387 BT_DBG("skb len %d", skb->len);
4388
0d48d939 4389 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4390
1ca3a9d0
JH
4391 return skb;
4392}
4393
4394/* Send HCI command */
07dc93dd
JH
4395int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4396 const void *param)
1ca3a9d0
JH
4397{
4398 struct sk_buff *skb;
4399
4400 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4401
4402 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4403 if (!skb) {
4404 BT_ERR("%s no memory for command", hdev->name);
4405 return -ENOMEM;
4406 }
4407
11714b3d
JH
4408 /* Stand-alone HCI commands must be flaged as
4409 * single-command requests.
4410 */
4411 bt_cb(skb)->req.start = true;
4412
1da177e4 4413 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4414 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4415
4416 return 0;
4417}
1da177e4 4418
71c76a17 4419/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4420void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4421 const void *param, u8 event)
71c76a17
JH
4422{
4423 struct hci_dev *hdev = req->hdev;
4424 struct sk_buff *skb;
4425
4426 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4427
34739c1e
AG
4428 /* If an error occured during request building, there is no point in
4429 * queueing the HCI command. We can simply return.
4430 */
4431 if (req->err)
4432 return;
4433
71c76a17
JH
4434 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4435 if (!skb) {
5d73e034
AG
4436 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4437 hdev->name, opcode);
4438 req->err = -ENOMEM;
e348fe6b 4439 return;
71c76a17
JH
4440 }
4441
4442 if (skb_queue_empty(&req->cmd_q))
4443 bt_cb(skb)->req.start = true;
4444
02350a72
JH
4445 bt_cb(skb)->req.event = event;
4446
71c76a17 4447 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4448}
4449
07dc93dd
JH
4450void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4451 const void *param)
02350a72
JH
4452{
4453 hci_req_add_ev(req, opcode, plen, param, 0);
4454}
4455
1da177e4 4456/* Get data from the previously sent command */
a9de9248 4457void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4458{
4459 struct hci_command_hdr *hdr;
4460
4461 if (!hdev->sent_cmd)
4462 return NULL;
4463
4464 hdr = (void *) hdev->sent_cmd->data;
4465
a9de9248 4466 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4467 return NULL;
4468
f0e09510 4469 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4470
4471 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4472}
4473
4474/* Send ACL data */
4475static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4476{
4477 struct hci_acl_hdr *hdr;
4478 int len = skb->len;
4479
badff6d0
ACM
4480 skb_push(skb, HCI_ACL_HDR_SIZE);
4481 skb_reset_transport_header(skb);
9c70220b 4482 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4483 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4484 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4485}
4486
ee22be7e 4487static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4488 struct sk_buff *skb, __u16 flags)
1da177e4 4489{
ee22be7e 4490 struct hci_conn *conn = chan->conn;
1da177e4
LT
4491 struct hci_dev *hdev = conn->hdev;
4492 struct sk_buff *list;
4493
087bfd99
GP
4494 skb->len = skb_headlen(skb);
4495 skb->data_len = 0;
4496
4497 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4498
4499 switch (hdev->dev_type) {
4500 case HCI_BREDR:
4501 hci_add_acl_hdr(skb, conn->handle, flags);
4502 break;
4503 case HCI_AMP:
4504 hci_add_acl_hdr(skb, chan->handle, flags);
4505 break;
4506 default:
4507 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4508 return;
4509 }
087bfd99 4510
70f23020
AE
4511 list = skb_shinfo(skb)->frag_list;
4512 if (!list) {
1da177e4
LT
4513 /* Non fragmented */
4514 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4515
73d80deb 4516 skb_queue_tail(queue, skb);
1da177e4
LT
4517 } else {
4518 /* Fragmented */
4519 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4520
4521 skb_shinfo(skb)->frag_list = NULL;
4522
4523 /* Queue all fragments atomically */
af3e6359 4524 spin_lock(&queue->lock);
1da177e4 4525
73d80deb 4526 __skb_queue_tail(queue, skb);
e702112f
AE
4527
4528 flags &= ~ACL_START;
4529 flags |= ACL_CONT;
1da177e4
LT
4530 do {
4531 skb = list; list = list->next;
8e87d142 4532
0d48d939 4533 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4534 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4535
4536 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4537
73d80deb 4538 __skb_queue_tail(queue, skb);
1da177e4
LT
4539 } while (list);
4540
af3e6359 4541 spin_unlock(&queue->lock);
1da177e4 4542 }
73d80deb
LAD
4543}
4544
4545void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4546{
ee22be7e 4547 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4548
f0e09510 4549 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4550
ee22be7e 4551 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4552
3eff45ea 4553 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4554}
1da177e4
LT
4555
4556/* Send SCO data */
0d861d8b 4557void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4558{
4559 struct hci_dev *hdev = conn->hdev;
4560 struct hci_sco_hdr hdr;
4561
4562 BT_DBG("%s len %d", hdev->name, skb->len);
4563
aca3192c 4564 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4565 hdr.dlen = skb->len;
4566
badff6d0
ACM
4567 skb_push(skb, HCI_SCO_HDR_SIZE);
4568 skb_reset_transport_header(skb);
9c70220b 4569 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4570
0d48d939 4571 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4572
1da177e4 4573 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4574 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4575}
1da177e4
LT
4576
4577/* ---- HCI TX task (outgoing data) ---- */
4578
4579/* HCI Connection scheduler */
6039aa73
GP
4580static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4581 int *quote)
1da177e4
LT
4582{
4583 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4584 struct hci_conn *conn = NULL, *c;
abc5de8f 4585 unsigned int num = 0, min = ~0;
1da177e4 4586
8e87d142 4587 /* We don't have to lock device here. Connections are always
1da177e4 4588 * added and removed with TX task disabled. */
bf4c6325
GP
4589
4590 rcu_read_lock();
4591
4592 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4593 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4594 continue;
769be974
MH
4595
4596 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4597 continue;
4598
1da177e4
LT
4599 num++;
4600
4601 if (c->sent < min) {
4602 min = c->sent;
4603 conn = c;
4604 }
52087a79
LAD
4605
4606 if (hci_conn_num(hdev, type) == num)
4607 break;
1da177e4
LT
4608 }
4609
bf4c6325
GP
4610 rcu_read_unlock();
4611
1da177e4 4612 if (conn) {
6ed58ec5
VT
4613 int cnt, q;
4614
4615 switch (conn->type) {
4616 case ACL_LINK:
4617 cnt = hdev->acl_cnt;
4618 break;
4619 case SCO_LINK:
4620 case ESCO_LINK:
4621 cnt = hdev->sco_cnt;
4622 break;
4623 case LE_LINK:
4624 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4625 break;
4626 default:
4627 cnt = 0;
4628 BT_ERR("Unknown link type");
4629 }
4630
4631 q = cnt / num;
1da177e4
LT
4632 *quote = q ? q : 1;
4633 } else
4634 *quote = 0;
4635
4636 BT_DBG("conn %p quote %d", conn, *quote);
4637 return conn;
4638}
4639
6039aa73 4640static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4641{
4642 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4643 struct hci_conn *c;
1da177e4 4644
bae1f5d9 4645 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4646
bf4c6325
GP
4647 rcu_read_lock();
4648
1da177e4 4649 /* Kill stalled connections */
bf4c6325 4650 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4651 if (c->type == type && c->sent) {
6ed93dc6
AE
4652 BT_ERR("%s killing stalled connection %pMR",
4653 hdev->name, &c->dst);
bed71748 4654 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4655 }
4656 }
bf4c6325
GP
4657
4658 rcu_read_unlock();
1da177e4
LT
4659}
4660
6039aa73
GP
4661static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4662 int *quote)
1da177e4 4663{
73d80deb
LAD
4664 struct hci_conn_hash *h = &hdev->conn_hash;
4665 struct hci_chan *chan = NULL;
abc5de8f 4666 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4667 struct hci_conn *conn;
73d80deb
LAD
4668 int cnt, q, conn_num = 0;
4669
4670 BT_DBG("%s", hdev->name);
4671
bf4c6325
GP
4672 rcu_read_lock();
4673
4674 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4675 struct hci_chan *tmp;
4676
4677 if (conn->type != type)
4678 continue;
4679
4680 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4681 continue;
4682
4683 conn_num++;
4684
8192edef 4685 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4686 struct sk_buff *skb;
4687
4688 if (skb_queue_empty(&tmp->data_q))
4689 continue;
4690
4691 skb = skb_peek(&tmp->data_q);
4692 if (skb->priority < cur_prio)
4693 continue;
4694
4695 if (skb->priority > cur_prio) {
4696 num = 0;
4697 min = ~0;
4698 cur_prio = skb->priority;
4699 }
4700
4701 num++;
4702
4703 if (conn->sent < min) {
4704 min = conn->sent;
4705 chan = tmp;
4706 }
4707 }
4708
4709 if (hci_conn_num(hdev, type) == conn_num)
4710 break;
4711 }
4712
bf4c6325
GP
4713 rcu_read_unlock();
4714
73d80deb
LAD
4715 if (!chan)
4716 return NULL;
4717
4718 switch (chan->conn->type) {
4719 case ACL_LINK:
4720 cnt = hdev->acl_cnt;
4721 break;
bd1eb66b
AE
4722 case AMP_LINK:
4723 cnt = hdev->block_cnt;
4724 break;
73d80deb
LAD
4725 case SCO_LINK:
4726 case ESCO_LINK:
4727 cnt = hdev->sco_cnt;
4728 break;
4729 case LE_LINK:
4730 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4731 break;
4732 default:
4733 cnt = 0;
4734 BT_ERR("Unknown link type");
4735 }
4736
4737 q = cnt / num;
4738 *quote = q ? q : 1;
4739 BT_DBG("chan %p quote %d", chan, *quote);
4740 return chan;
4741}
4742
02b20f0b
LAD
4743static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4744{
4745 struct hci_conn_hash *h = &hdev->conn_hash;
4746 struct hci_conn *conn;
4747 int num = 0;
4748
4749 BT_DBG("%s", hdev->name);
4750
bf4c6325
GP
4751 rcu_read_lock();
4752
4753 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4754 struct hci_chan *chan;
4755
4756 if (conn->type != type)
4757 continue;
4758
4759 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4760 continue;
4761
4762 num++;
4763
8192edef 4764 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4765 struct sk_buff *skb;
4766
4767 if (chan->sent) {
4768 chan->sent = 0;
4769 continue;
4770 }
4771
4772 if (skb_queue_empty(&chan->data_q))
4773 continue;
4774
4775 skb = skb_peek(&chan->data_q);
4776 if (skb->priority >= HCI_PRIO_MAX - 1)
4777 continue;
4778
4779 skb->priority = HCI_PRIO_MAX - 1;
4780
4781 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4782 skb->priority);
02b20f0b
LAD
4783 }
4784
4785 if (hci_conn_num(hdev, type) == num)
4786 break;
4787 }
bf4c6325
GP
4788
4789 rcu_read_unlock();
4790
02b20f0b
LAD
4791}
4792
b71d385a
AE
4793static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4794{
4795 /* Calculate count of blocks used by this packet */
4796 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4797}
4798
6039aa73 4799static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4800{
1da177e4
LT
4801 if (!test_bit(HCI_RAW, &hdev->flags)) {
4802 /* ACL tx timeout must be longer than maximum
4803 * link supervision timeout (40.9 seconds) */
63d2bc1b 4804 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4805 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4806 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4807 }
63d2bc1b 4808}
1da177e4 4809
6039aa73 4810static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4811{
4812 unsigned int cnt = hdev->acl_cnt;
4813 struct hci_chan *chan;
4814 struct sk_buff *skb;
4815 int quote;
4816
4817 __check_timeout(hdev, cnt);
04837f64 4818
73d80deb 4819 while (hdev->acl_cnt &&
a8c5fb1a 4820 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4821 u32 priority = (skb_peek(&chan->data_q))->priority;
4822 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4823 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4824 skb->len, skb->priority);
73d80deb 4825
ec1cce24
LAD
4826 /* Stop if priority has changed */
4827 if (skb->priority < priority)
4828 break;
4829
4830 skb = skb_dequeue(&chan->data_q);
4831
73d80deb 4832 hci_conn_enter_active_mode(chan->conn,
04124681 4833 bt_cb(skb)->force_active);
04837f64 4834
57d17d70 4835 hci_send_frame(hdev, skb);
1da177e4
LT
4836 hdev->acl_last_tx = jiffies;
4837
4838 hdev->acl_cnt--;
73d80deb
LAD
4839 chan->sent++;
4840 chan->conn->sent++;
1da177e4
LT
4841 }
4842 }
02b20f0b
LAD
4843
4844 if (cnt != hdev->acl_cnt)
4845 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4846}
4847
6039aa73 4848static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4849{
63d2bc1b 4850 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4851 struct hci_chan *chan;
4852 struct sk_buff *skb;
4853 int quote;
bd1eb66b 4854 u8 type;
b71d385a 4855
63d2bc1b 4856 __check_timeout(hdev, cnt);
b71d385a 4857
bd1eb66b
AE
4858 BT_DBG("%s", hdev->name);
4859
4860 if (hdev->dev_type == HCI_AMP)
4861 type = AMP_LINK;
4862 else
4863 type = ACL_LINK;
4864
b71d385a 4865 while (hdev->block_cnt > 0 &&
bd1eb66b 4866 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4867 u32 priority = (skb_peek(&chan->data_q))->priority;
4868 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4869 int blocks;
4870
4871 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4872 skb->len, skb->priority);
b71d385a
AE
4873
4874 /* Stop if priority has changed */
4875 if (skb->priority < priority)
4876 break;
4877
4878 skb = skb_dequeue(&chan->data_q);
4879
4880 blocks = __get_blocks(hdev, skb);
4881 if (blocks > hdev->block_cnt)
4882 return;
4883
4884 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4885 bt_cb(skb)->force_active);
b71d385a 4886
57d17d70 4887 hci_send_frame(hdev, skb);
b71d385a
AE
4888 hdev->acl_last_tx = jiffies;
4889
4890 hdev->block_cnt -= blocks;
4891 quote -= blocks;
4892
4893 chan->sent += blocks;
4894 chan->conn->sent += blocks;
4895 }
4896 }
4897
4898 if (cnt != hdev->block_cnt)
bd1eb66b 4899 hci_prio_recalculate(hdev, type);
b71d385a
AE
4900}
4901
6039aa73 4902static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4903{
4904 BT_DBG("%s", hdev->name);
4905
bd1eb66b
AE
4906 /* No ACL link over BR/EDR controller */
4907 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4908 return;
4909
4910 /* No AMP link over AMP controller */
4911 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4912 return;
4913
4914 switch (hdev->flow_ctl_mode) {
4915 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4916 hci_sched_acl_pkt(hdev);
4917 break;
4918
4919 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4920 hci_sched_acl_blk(hdev);
4921 break;
4922 }
4923}
4924
1da177e4 4925/* Schedule SCO */
6039aa73 4926static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4927{
4928 struct hci_conn *conn;
4929 struct sk_buff *skb;
4930 int quote;
4931
4932 BT_DBG("%s", hdev->name);
4933
52087a79
LAD
4934 if (!hci_conn_num(hdev, SCO_LINK))
4935 return;
4936
1da177e4
LT
4937 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4938 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4939 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4940 hci_send_frame(hdev, skb);
1da177e4
LT
4941
4942 conn->sent++;
4943 if (conn->sent == ~0)
4944 conn->sent = 0;
4945 }
4946 }
4947}
4948
6039aa73 4949static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4950{
4951 struct hci_conn *conn;
4952 struct sk_buff *skb;
4953 int quote;
4954
4955 BT_DBG("%s", hdev->name);
4956
52087a79
LAD
4957 if (!hci_conn_num(hdev, ESCO_LINK))
4958 return;
4959
8fc9ced3
GP
4960 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4961 &quote))) {
b6a0dc82
MH
4962 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4963 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4964 hci_send_frame(hdev, skb);
b6a0dc82
MH
4965
4966 conn->sent++;
4967 if (conn->sent == ~0)
4968 conn->sent = 0;
4969 }
4970 }
4971}
4972
6039aa73 4973static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4974{
73d80deb 4975 struct hci_chan *chan;
6ed58ec5 4976 struct sk_buff *skb;
02b20f0b 4977 int quote, cnt, tmp;
6ed58ec5
VT
4978
4979 BT_DBG("%s", hdev->name);
4980
52087a79
LAD
4981 if (!hci_conn_num(hdev, LE_LINK))
4982 return;
4983
6ed58ec5
VT
4984 if (!test_bit(HCI_RAW, &hdev->flags)) {
4985 /* LE tx timeout must be longer than maximum
4986 * link supervision timeout (40.9 seconds) */
bae1f5d9 4987 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4988 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4989 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4990 }
4991
4992 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4993 tmp = cnt;
73d80deb 4994 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4995 u32 priority = (skb_peek(&chan->data_q))->priority;
4996 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4997 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4998 skb->len, skb->priority);
6ed58ec5 4999
ec1cce24
LAD
5000 /* Stop if priority has changed */
5001 if (skb->priority < priority)
5002 break;
5003
5004 skb = skb_dequeue(&chan->data_q);
5005
57d17d70 5006 hci_send_frame(hdev, skb);
6ed58ec5
VT
5007 hdev->le_last_tx = jiffies;
5008
5009 cnt--;
73d80deb
LAD
5010 chan->sent++;
5011 chan->conn->sent++;
6ed58ec5
VT
5012 }
5013 }
73d80deb 5014
6ed58ec5
VT
5015 if (hdev->le_pkts)
5016 hdev->le_cnt = cnt;
5017 else
5018 hdev->acl_cnt = cnt;
02b20f0b
LAD
5019
5020 if (cnt != tmp)
5021 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5022}
5023
3eff45ea 5024static void hci_tx_work(struct work_struct *work)
1da177e4 5025{
3eff45ea 5026 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5027 struct sk_buff *skb;
5028
6ed58ec5 5029 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5030 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5031
52de599e
MH
5032 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5033 /* Schedule queues and send stuff to HCI driver */
5034 hci_sched_acl(hdev);
5035 hci_sched_sco(hdev);
5036 hci_sched_esco(hdev);
5037 hci_sched_le(hdev);
5038 }
6ed58ec5 5039
1da177e4
LT
5040 /* Send next queued raw (unknown type) packet */
5041 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5042 hci_send_frame(hdev, skb);
1da177e4
LT
5043}
5044
25985edc 5045/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5046
5047/* ACL data packet */
6039aa73 5048static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5049{
5050 struct hci_acl_hdr *hdr = (void *) skb->data;
5051 struct hci_conn *conn;
5052 __u16 handle, flags;
5053
5054 skb_pull(skb, HCI_ACL_HDR_SIZE);
5055
5056 handle = __le16_to_cpu(hdr->handle);
5057 flags = hci_flags(handle);
5058 handle = hci_handle(handle);
5059
f0e09510 5060 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5061 handle, flags);
1da177e4
LT
5062
5063 hdev->stat.acl_rx++;
5064
5065 hci_dev_lock(hdev);
5066 conn = hci_conn_hash_lookup_handle(hdev, handle);
5067 hci_dev_unlock(hdev);
8e87d142 5068
1da177e4 5069 if (conn) {
65983fc7 5070 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5071
1da177e4 5072 /* Send to upper protocol */
686ebf28
UF
5073 l2cap_recv_acldata(conn, skb, flags);
5074 return;
1da177e4 5075 } else {
8e87d142 5076 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5077 hdev->name, handle);
1da177e4
LT
5078 }
5079
5080 kfree_skb(skb);
5081}
5082
5083/* SCO data packet */
6039aa73 5084static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5085{
5086 struct hci_sco_hdr *hdr = (void *) skb->data;
5087 struct hci_conn *conn;
5088 __u16 handle;
5089
5090 skb_pull(skb, HCI_SCO_HDR_SIZE);
5091
5092 handle = __le16_to_cpu(hdr->handle);
5093
f0e09510 5094 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5095
5096 hdev->stat.sco_rx++;
5097
5098 hci_dev_lock(hdev);
5099 conn = hci_conn_hash_lookup_handle(hdev, handle);
5100 hci_dev_unlock(hdev);
5101
5102 if (conn) {
1da177e4 5103 /* Send to upper protocol */
686ebf28
UF
5104 sco_recv_scodata(conn, skb);
5105 return;
1da177e4 5106 } else {
8e87d142 5107 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5108 hdev->name, handle);
1da177e4
LT
5109 }
5110
5111 kfree_skb(skb);
5112}
5113
9238f36a
JH
5114static bool hci_req_is_complete(struct hci_dev *hdev)
5115{
5116 struct sk_buff *skb;
5117
5118 skb = skb_peek(&hdev->cmd_q);
5119 if (!skb)
5120 return true;
5121
5122 return bt_cb(skb)->req.start;
5123}
5124
42c6b129
JH
5125static void hci_resend_last(struct hci_dev *hdev)
5126{
5127 struct hci_command_hdr *sent;
5128 struct sk_buff *skb;
5129 u16 opcode;
5130
5131 if (!hdev->sent_cmd)
5132 return;
5133
5134 sent = (void *) hdev->sent_cmd->data;
5135 opcode = __le16_to_cpu(sent->opcode);
5136 if (opcode == HCI_OP_RESET)
5137 return;
5138
5139 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5140 if (!skb)
5141 return;
5142
5143 skb_queue_head(&hdev->cmd_q, skb);
5144 queue_work(hdev->workqueue, &hdev->cmd_work);
5145}
5146
9238f36a
JH
5147void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5148{
5149 hci_req_complete_t req_complete = NULL;
5150 struct sk_buff *skb;
5151 unsigned long flags;
5152
5153 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5154
42c6b129
JH
5155 /* If the completed command doesn't match the last one that was
5156 * sent we need to do special handling of it.
9238f36a 5157 */
42c6b129
JH
5158 if (!hci_sent_cmd_data(hdev, opcode)) {
5159 /* Some CSR based controllers generate a spontaneous
5160 * reset complete event during init and any pending
5161 * command will never be completed. In such a case we
5162 * need to resend whatever was the last sent
5163 * command.
5164 */
5165 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5166 hci_resend_last(hdev);
5167
9238f36a 5168 return;
42c6b129 5169 }
9238f36a
JH
5170
5171 /* If the command succeeded and there's still more commands in
5172 * this request the request is not yet complete.
5173 */
5174 if (!status && !hci_req_is_complete(hdev))
5175 return;
5176
5177 /* If this was the last command in a request the complete
5178 * callback would be found in hdev->sent_cmd instead of the
5179 * command queue (hdev->cmd_q).
5180 */
5181 if (hdev->sent_cmd) {
5182 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5183
5184 if (req_complete) {
5185 /* We must set the complete callback to NULL to
5186 * avoid calling the callback more than once if
5187 * this function gets called again.
5188 */
5189 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5190
9238f36a 5191 goto call_complete;
53e21fbc 5192 }
9238f36a
JH
5193 }
5194
5195 /* Remove all pending commands belonging to this request */
5196 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5197 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5198 if (bt_cb(skb)->req.start) {
5199 __skb_queue_head(&hdev->cmd_q, skb);
5200 break;
5201 }
5202
5203 req_complete = bt_cb(skb)->req.complete;
5204 kfree_skb(skb);
5205 }
5206 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5207
5208call_complete:
5209 if (req_complete)
5210 req_complete(hdev, status);
5211}
5212
b78752cc 5213static void hci_rx_work(struct work_struct *work)
1da177e4 5214{
b78752cc 5215 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5216 struct sk_buff *skb;
5217
5218 BT_DBG("%s", hdev->name);
5219
1da177e4 5220 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5221 /* Send copy to monitor */
5222 hci_send_to_monitor(hdev, skb);
5223
1da177e4
LT
5224 if (atomic_read(&hdev->promisc)) {
5225 /* Send copy to the sockets */
470fe1b5 5226 hci_send_to_sock(hdev, skb);
1da177e4
LT
5227 }
5228
0736cfa8
MH
5229 if (test_bit(HCI_RAW, &hdev->flags) ||
5230 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5231 kfree_skb(skb);
5232 continue;
5233 }
5234
5235 if (test_bit(HCI_INIT, &hdev->flags)) {
5236 /* Don't process data packets in this states. */
0d48d939 5237 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5238 case HCI_ACLDATA_PKT:
5239 case HCI_SCODATA_PKT:
5240 kfree_skb(skb);
5241 continue;
3ff50b79 5242 }
1da177e4
LT
5243 }
5244
5245 /* Process frame */
0d48d939 5246 switch (bt_cb(skb)->pkt_type) {
1da177e4 5247 case HCI_EVENT_PKT:
b78752cc 5248 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5249 hci_event_packet(hdev, skb);
5250 break;
5251
5252 case HCI_ACLDATA_PKT:
5253 BT_DBG("%s ACL data packet", hdev->name);
5254 hci_acldata_packet(hdev, skb);
5255 break;
5256
5257 case HCI_SCODATA_PKT:
5258 BT_DBG("%s SCO data packet", hdev->name);
5259 hci_scodata_packet(hdev, skb);
5260 break;
5261
5262 default:
5263 kfree_skb(skb);
5264 break;
5265 }
5266 }
1da177e4
LT
5267}
5268
c347b765 5269static void hci_cmd_work(struct work_struct *work)
1da177e4 5270{
c347b765 5271 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5272 struct sk_buff *skb;
5273
2104786b
AE
5274 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5275 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5276
1da177e4 5277 /* Send queued commands */
5a08ecce
AE
5278 if (atomic_read(&hdev->cmd_cnt)) {
5279 skb = skb_dequeue(&hdev->cmd_q);
5280 if (!skb)
5281 return;
5282
7585b97a 5283 kfree_skb(hdev->sent_cmd);
1da177e4 5284
a675d7f1 5285 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5286 if (hdev->sent_cmd) {
1da177e4 5287 atomic_dec(&hdev->cmd_cnt);
57d17d70 5288 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
5289 if (test_bit(HCI_RESET, &hdev->flags))
5290 del_timer(&hdev->cmd_timer);
5291 else
5292 mod_timer(&hdev->cmd_timer,
5f246e89 5293 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
5294 } else {
5295 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5296 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5297 }
5298 }
5299}
b1efcc28
AG
5300
5301void hci_req_add_le_scan_disable(struct hci_request *req)
5302{
5303 struct hci_cp_le_set_scan_enable cp;
5304
5305 memset(&cp, 0, sizeof(cp));
5306 cp.enable = LE_SCAN_DISABLE;
5307 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5308}
a4790dbd 5309
8ef30fd3
AG
5310void hci_req_add_le_passive_scan(struct hci_request *req)
5311{
5312 struct hci_cp_le_set_scan_param param_cp;
5313 struct hci_cp_le_set_scan_enable enable_cp;
5314 struct hci_dev *hdev = req->hdev;
5315 u8 own_addr_type;
5316
5317 /* Set require_privacy to true to avoid identification from
5318 * unknown peer devices. Since this is passive scanning, no
5319 * SCAN_REQ using the local identity should be sent. Mandating
5320 * privacy is just an extra precaution.
5321 */
5322 if (hci_update_random_address(req, true, &own_addr_type))
5323 return;
5324
5325 memset(&param_cp, 0, sizeof(param_cp));
5326 param_cp.type = LE_SCAN_PASSIVE;
5327 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5328 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5329 param_cp.own_address_type = own_addr_type;
5330 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5331 &param_cp);
5332
5333 memset(&enable_cp, 0, sizeof(enable_cp));
5334 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5335 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5336 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5337 &enable_cp);
5338}
5339
a4790dbd
AG
5340static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5341{
5342 if (status)
5343 BT_DBG("HCI request failed to update background scanning: "
5344 "status 0x%2.2x", status);
5345}
5346
5347/* This function controls the background scanning based on hdev->pend_le_conns
5348 * list. If there are pending LE connection we start the background scanning,
5349 * otherwise we stop it.
5350 *
5351 * This function requires the caller holds hdev->lock.
5352 */
5353void hci_update_background_scan(struct hci_dev *hdev)
5354{
a4790dbd
AG
5355 struct hci_request req;
5356 struct hci_conn *conn;
5357 int err;
5358
5359 hci_req_init(&req, hdev);
5360
5361 if (list_empty(&hdev->pend_le_conns)) {
5362 /* If there is no pending LE connections, we should stop
5363 * the background scanning.
5364 */
5365
5366 /* If controller is not scanning we are done. */
5367 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5368 return;
5369
5370 hci_req_add_le_scan_disable(&req);
5371
5372 BT_DBG("%s stopping background scanning", hdev->name);
5373 } else {
a4790dbd
AG
5374 /* If there is at least one pending LE connection, we should
5375 * keep the background scan running.
5376 */
5377
a4790dbd
AG
5378 /* If controller is connecting, we should not start scanning
5379 * since some controllers are not able to scan and connect at
5380 * the same time.
5381 */
5382 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5383 if (conn)
5384 return;
5385
4340a124
AG
5386 /* If controller is currently scanning, we stop it to ensure we
5387 * don't miss any advertising (due to duplicates filter).
5388 */
5389 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5390 hci_req_add_le_scan_disable(&req);
5391
8ef30fd3 5392 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5393
5394 BT_DBG("%s starting background scanning", hdev->name);
5395 }
5396
5397 err = hci_req_run(&req, update_background_scan_complete);
5398 if (err)
5399 BT_ERR("Failed to run HCI request: err %d", err);
5400}
This page took 1.026071 seconds and 5 git commands to generate.