Bluetooth: Add timer to force power off
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
c982b2ea
JH
495static int rpa_timeout_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 /* Require the RPA timeout to be at least 30 seconds and at most
500 * 24 hours.
501 */
502 if (val < 30 || val > (60 * 60 * 24))
503 return -EINVAL;
504
505 hci_dev_lock(hdev);
506 hdev->rpa_timeout = val;
507 hci_dev_unlock(hdev);
508
509 return 0;
510}
511
512static int rpa_timeout_get(void *data, u64 *val)
513{
514 struct hci_dev *hdev = data;
515
516 hci_dev_lock(hdev);
517 *val = hdev->rpa_timeout;
518 hci_dev_unlock(hdev);
519
520 return 0;
521}
522
523DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
524 rpa_timeout_set, "%llu\n");
525
2bfa3531
MH
526static int sniff_min_interval_set(void *data, u64 val)
527{
528 struct hci_dev *hdev = data;
529
530 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
531 return -EINVAL;
532
533 hci_dev_lock(hdev);
2be48b65 534 hdev->sniff_min_interval = val;
2bfa3531
MH
535 hci_dev_unlock(hdev);
536
537 return 0;
538}
539
540static int sniff_min_interval_get(void *data, u64 *val)
541{
542 struct hci_dev *hdev = data;
543
544 hci_dev_lock(hdev);
545 *val = hdev->sniff_min_interval;
546 hci_dev_unlock(hdev);
547
548 return 0;
549}
550
551DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
552 sniff_min_interval_set, "%llu\n");
553
554static int sniff_max_interval_set(void *data, u64 val)
555{
556 struct hci_dev *hdev = data;
557
558 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
559 return -EINVAL;
560
561 hci_dev_lock(hdev);
2be48b65 562 hdev->sniff_max_interval = val;
2bfa3531
MH
563 hci_dev_unlock(hdev);
564
565 return 0;
566}
567
568static int sniff_max_interval_get(void *data, u64 *val)
569{
570 struct hci_dev *hdev = data;
571
572 hci_dev_lock(hdev);
573 *val = hdev->sniff_max_interval;
574 hci_dev_unlock(hdev);
575
576 return 0;
577}
578
579DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
580 sniff_max_interval_set, "%llu\n");
581
ac345813
MH
582static int identity_show(struct seq_file *f, void *p)
583{
584 struct hci_dev *hdev = f->private;
a1f4c318 585 bdaddr_t addr;
ac345813
MH
586 u8 addr_type;
587
588 hci_dev_lock(hdev);
589
a1f4c318 590 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 591
a1f4c318 592 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 593 16, hdev->irk, &hdev->rpa);
ac345813
MH
594
595 hci_dev_unlock(hdev);
596
597 return 0;
598}
599
600static int identity_open(struct inode *inode, struct file *file)
601{
602 return single_open(file, identity_show, inode->i_private);
603}
604
605static const struct file_operations identity_fops = {
606 .open = identity_open,
607 .read = seq_read,
608 .llseek = seq_lseek,
609 .release = single_release,
610};
611
7a4cd51d
MH
612static int random_address_show(struct seq_file *f, void *p)
613{
614 struct hci_dev *hdev = f->private;
615
616 hci_dev_lock(hdev);
617 seq_printf(f, "%pMR\n", &hdev->random_addr);
618 hci_dev_unlock(hdev);
619
620 return 0;
621}
622
623static int random_address_open(struct inode *inode, struct file *file)
624{
625 return single_open(file, random_address_show, inode->i_private);
626}
627
628static const struct file_operations random_address_fops = {
629 .open = random_address_open,
630 .read = seq_read,
631 .llseek = seq_lseek,
632 .release = single_release,
633};
634
e7b8fc92
MH
635static int static_address_show(struct seq_file *f, void *p)
636{
637 struct hci_dev *hdev = f->private;
638
639 hci_dev_lock(hdev);
640 seq_printf(f, "%pMR\n", &hdev->static_addr);
641 hci_dev_unlock(hdev);
642
643 return 0;
644}
645
646static int static_address_open(struct inode *inode, struct file *file)
647{
648 return single_open(file, static_address_show, inode->i_private);
649}
650
651static const struct file_operations static_address_fops = {
652 .open = static_address_open,
653 .read = seq_read,
654 .llseek = seq_lseek,
655 .release = single_release,
656};
657
b32bba6c
MH
658static ssize_t force_static_address_read(struct file *file,
659 char __user *user_buf,
660 size_t count, loff_t *ppos)
92202185 661{
b32bba6c
MH
662 struct hci_dev *hdev = file->private_data;
663 char buf[3];
92202185 664
b32bba6c
MH
665 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
666 buf[1] = '\n';
667 buf[2] = '\0';
668 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
669}
670
b32bba6c
MH
671static ssize_t force_static_address_write(struct file *file,
672 const char __user *user_buf,
673 size_t count, loff_t *ppos)
92202185 674{
b32bba6c
MH
675 struct hci_dev *hdev = file->private_data;
676 char buf[32];
677 size_t buf_size = min(count, (sizeof(buf)-1));
678 bool enable;
92202185 679
b32bba6c
MH
680 if (test_bit(HCI_UP, &hdev->flags))
681 return -EBUSY;
92202185 682
b32bba6c
MH
683 if (copy_from_user(buf, user_buf, buf_size))
684 return -EFAULT;
685
686 buf[buf_size] = '\0';
687 if (strtobool(buf, &enable))
688 return -EINVAL;
689
690 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
691 return -EALREADY;
692
693 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
694
695 return count;
92202185
MH
696}
697
b32bba6c
MH
698static const struct file_operations force_static_address_fops = {
699 .open = simple_open,
700 .read = force_static_address_read,
701 .write = force_static_address_write,
702 .llseek = default_llseek,
703};
92202185 704
d2ab0ac1
MH
705static int white_list_show(struct seq_file *f, void *ptr)
706{
707 struct hci_dev *hdev = f->private;
708 struct bdaddr_list *b;
709
710 hci_dev_lock(hdev);
711 list_for_each_entry(b, &hdev->le_white_list, list)
712 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
713 hci_dev_unlock(hdev);
714
715 return 0;
716}
717
718static int white_list_open(struct inode *inode, struct file *file)
719{
720 return single_open(file, white_list_show, inode->i_private);
721}
722
723static const struct file_operations white_list_fops = {
724 .open = white_list_open,
725 .read = seq_read,
726 .llseek = seq_lseek,
727 .release = single_release,
728};
729
3698d704
MH
730static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
731{
732 struct hci_dev *hdev = f->private;
733 struct list_head *p, *n;
734
735 hci_dev_lock(hdev);
736 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
737 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
738 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
739 &irk->bdaddr, irk->addr_type,
740 16, irk->val, &irk->rpa);
741 }
742 hci_dev_unlock(hdev);
743
744 return 0;
745}
746
747static int identity_resolving_keys_open(struct inode *inode, struct file *file)
748{
749 return single_open(file, identity_resolving_keys_show,
750 inode->i_private);
751}
752
753static const struct file_operations identity_resolving_keys_fops = {
754 .open = identity_resolving_keys_open,
755 .read = seq_read,
756 .llseek = seq_lseek,
757 .release = single_release,
758};
759
8f8625cd
MH
760static int long_term_keys_show(struct seq_file *f, void *ptr)
761{
762 struct hci_dev *hdev = f->private;
763 struct list_head *p, *n;
764
765 hci_dev_lock(hdev);
f813f1be 766 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 767 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 768 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
769 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
770 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
771 8, ltk->rand, 16, ltk->val);
772 }
773 hci_dev_unlock(hdev);
774
775 return 0;
776}
777
778static int long_term_keys_open(struct inode *inode, struct file *file)
779{
780 return single_open(file, long_term_keys_show, inode->i_private);
781}
782
783static const struct file_operations long_term_keys_fops = {
784 .open = long_term_keys_open,
785 .read = seq_read,
786 .llseek = seq_lseek,
787 .release = single_release,
788};
789
4e70c7e7
MH
790static int conn_min_interval_set(void *data, u64 val)
791{
792 struct hci_dev *hdev = data;
793
794 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
795 return -EINVAL;
796
797 hci_dev_lock(hdev);
2be48b65 798 hdev->le_conn_min_interval = val;
4e70c7e7
MH
799 hci_dev_unlock(hdev);
800
801 return 0;
802}
803
804static int conn_min_interval_get(void *data, u64 *val)
805{
806 struct hci_dev *hdev = data;
807
808 hci_dev_lock(hdev);
809 *val = hdev->le_conn_min_interval;
810 hci_dev_unlock(hdev);
811
812 return 0;
813}
814
815DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
816 conn_min_interval_set, "%llu\n");
817
818static int conn_max_interval_set(void *data, u64 val)
819{
820 struct hci_dev *hdev = data;
821
822 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
823 return -EINVAL;
824
825 hci_dev_lock(hdev);
2be48b65 826 hdev->le_conn_max_interval = val;
4e70c7e7
MH
827 hci_dev_unlock(hdev);
828
829 return 0;
830}
831
832static int conn_max_interval_get(void *data, u64 *val)
833{
834 struct hci_dev *hdev = data;
835
836 hci_dev_lock(hdev);
837 *val = hdev->le_conn_max_interval;
838 hci_dev_unlock(hdev);
839
840 return 0;
841}
842
843DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
844 conn_max_interval_set, "%llu\n");
845
3f959d46
MH
846static int adv_channel_map_set(void *data, u64 val)
847{
848 struct hci_dev *hdev = data;
849
850 if (val < 0x01 || val > 0x07)
851 return -EINVAL;
852
853 hci_dev_lock(hdev);
854 hdev->le_adv_channel_map = val;
855 hci_dev_unlock(hdev);
856
857 return 0;
858}
859
860static int adv_channel_map_get(void *data, u64 *val)
861{
862 struct hci_dev *hdev = data;
863
864 hci_dev_lock(hdev);
865 *val = hdev->le_adv_channel_map;
866 hci_dev_unlock(hdev);
867
868 return 0;
869}
870
871DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
872 adv_channel_map_set, "%llu\n");
873
89863109
JR
874static ssize_t lowpan_read(struct file *file, char __user *user_buf,
875 size_t count, loff_t *ppos)
876{
877 struct hci_dev *hdev = file->private_data;
878 char buf[3];
879
880 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
881 buf[1] = '\n';
882 buf[2] = '\0';
883 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
884}
885
886static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
887 size_t count, loff_t *position)
888{
889 struct hci_dev *hdev = fp->private_data;
890 bool enable;
891 char buf[32];
892 size_t buf_size = min(count, (sizeof(buf)-1));
893
894 if (copy_from_user(buf, user_buffer, buf_size))
895 return -EFAULT;
896
897 buf[buf_size] = '\0';
898
899 if (strtobool(buf, &enable) < 0)
900 return -EINVAL;
901
902 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
903 return -EALREADY;
904
905 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
906
907 return count;
908}
909
910static const struct file_operations lowpan_debugfs_fops = {
911 .open = simple_open,
912 .read = lowpan_read,
913 .write = lowpan_write,
914 .llseek = default_llseek,
915};
916
7d474e06
AG
917static int le_auto_conn_show(struct seq_file *sf, void *ptr)
918{
919 struct hci_dev *hdev = sf->private;
920 struct hci_conn_params *p;
921
922 hci_dev_lock(hdev);
923
924 list_for_each_entry(p, &hdev->le_conn_params, list) {
925 seq_printf(sf, "%pMR %u %u\n", &p->addr, p->addr_type,
926 p->auto_connect);
927 }
928
929 hci_dev_unlock(hdev);
930
931 return 0;
932}
933
934static int le_auto_conn_open(struct inode *inode, struct file *file)
935{
936 return single_open(file, le_auto_conn_show, inode->i_private);
937}
938
939static ssize_t le_auto_conn_write(struct file *file, const char __user *data,
940 size_t count, loff_t *offset)
941{
942 struct seq_file *sf = file->private_data;
943 struct hci_dev *hdev = sf->private;
944 u8 auto_connect = 0;
945 bdaddr_t addr;
946 u8 addr_type;
947 char *buf;
948 int err = 0;
949 int n;
950
951 /* Don't allow partial write */
952 if (*offset != 0)
953 return -EINVAL;
954
955 if (count < 3)
956 return -EINVAL;
957
958 buf = kzalloc(count, GFP_KERNEL);
959 if (!buf)
960 return -ENOMEM;
961
962 if (copy_from_user(buf, data, count)) {
963 err = -EFAULT;
964 goto done;
965 }
966
967 if (memcmp(buf, "add", 3) == 0) {
968 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu %hhu",
969 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
970 &addr.b[1], &addr.b[0], &addr_type,
971 &auto_connect);
972
973 if (n < 7) {
974 err = -EINVAL;
975 goto done;
976 }
977
978 hci_dev_lock(hdev);
979 err = hci_conn_params_add(hdev, &addr, addr_type, auto_connect,
980 hdev->le_conn_min_interval,
981 hdev->le_conn_max_interval);
982 hci_dev_unlock(hdev);
983
984 if (err)
985 goto done;
986 } else if (memcmp(buf, "del", 3) == 0) {
987 n = sscanf(&buf[4], "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu",
988 &addr.b[5], &addr.b[4], &addr.b[3], &addr.b[2],
989 &addr.b[1], &addr.b[0], &addr_type);
990
991 if (n < 7) {
992 err = -EINVAL;
993 goto done;
994 }
995
996 hci_dev_lock(hdev);
997 hci_conn_params_del(hdev, &addr, addr_type);
998 hci_dev_unlock(hdev);
999 } else if (memcmp(buf, "clr", 3) == 0) {
1000 hci_dev_lock(hdev);
1001 hci_conn_params_clear(hdev);
1002 hci_pend_le_conns_clear(hdev);
1003 hci_update_background_scan(hdev);
1004 hci_dev_unlock(hdev);
1005 } else {
1006 err = -EINVAL;
1007 }
1008
1009done:
1010 kfree(buf);
1011
1012 if (err)
1013 return err;
1014 else
1015 return count;
1016}
1017
1018static const struct file_operations le_auto_conn_fops = {
1019 .open = le_auto_conn_open,
1020 .read = seq_read,
1021 .write = le_auto_conn_write,
1022 .llseek = seq_lseek,
1023 .release = single_release,
1024};
1025
1da177e4
LT
1026/* ---- HCI requests ---- */
1027
42c6b129 1028static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1029{
42c6b129 1030 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1031
1032 if (hdev->req_status == HCI_REQ_PEND) {
1033 hdev->req_result = result;
1034 hdev->req_status = HCI_REQ_DONE;
1035 wake_up_interruptible(&hdev->req_wait_q);
1036 }
1037}
1038
1039static void hci_req_cancel(struct hci_dev *hdev, int err)
1040{
1041 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1042
1043 if (hdev->req_status == HCI_REQ_PEND) {
1044 hdev->req_result = err;
1045 hdev->req_status = HCI_REQ_CANCELED;
1046 wake_up_interruptible(&hdev->req_wait_q);
1047 }
1048}
1049
77a63e0a
FW
1050static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1051 u8 event)
75e84b7c
JH
1052{
1053 struct hci_ev_cmd_complete *ev;
1054 struct hci_event_hdr *hdr;
1055 struct sk_buff *skb;
1056
1057 hci_dev_lock(hdev);
1058
1059 skb = hdev->recv_evt;
1060 hdev->recv_evt = NULL;
1061
1062 hci_dev_unlock(hdev);
1063
1064 if (!skb)
1065 return ERR_PTR(-ENODATA);
1066
1067 if (skb->len < sizeof(*hdr)) {
1068 BT_ERR("Too short HCI event");
1069 goto failed;
1070 }
1071
1072 hdr = (void *) skb->data;
1073 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1074
7b1abbbe
JH
1075 if (event) {
1076 if (hdr->evt != event)
1077 goto failed;
1078 return skb;
1079 }
1080
75e84b7c
JH
1081 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1082 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1083 goto failed;
1084 }
1085
1086 if (skb->len < sizeof(*ev)) {
1087 BT_ERR("Too short cmd_complete event");
1088 goto failed;
1089 }
1090
1091 ev = (void *) skb->data;
1092 skb_pull(skb, sizeof(*ev));
1093
1094 if (opcode == __le16_to_cpu(ev->opcode))
1095 return skb;
1096
1097 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1098 __le16_to_cpu(ev->opcode));
1099
1100failed:
1101 kfree_skb(skb);
1102 return ERR_PTR(-ENODATA);
1103}
1104
7b1abbbe 1105struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1106 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1107{
1108 DECLARE_WAITQUEUE(wait, current);
1109 struct hci_request req;
1110 int err = 0;
1111
1112 BT_DBG("%s", hdev->name);
1113
1114 hci_req_init(&req, hdev);
1115
7b1abbbe 1116 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1117
1118 hdev->req_status = HCI_REQ_PEND;
1119
1120 err = hci_req_run(&req, hci_req_sync_complete);
1121 if (err < 0)
1122 return ERR_PTR(err);
1123
1124 add_wait_queue(&hdev->req_wait_q, &wait);
1125 set_current_state(TASK_INTERRUPTIBLE);
1126
1127 schedule_timeout(timeout);
1128
1129 remove_wait_queue(&hdev->req_wait_q, &wait);
1130
1131 if (signal_pending(current))
1132 return ERR_PTR(-EINTR);
1133
1134 switch (hdev->req_status) {
1135 case HCI_REQ_DONE:
1136 err = -bt_to_errno(hdev->req_result);
1137 break;
1138
1139 case HCI_REQ_CANCELED:
1140 err = -hdev->req_result;
1141 break;
1142
1143 default:
1144 err = -ETIMEDOUT;
1145 break;
1146 }
1147
1148 hdev->req_status = hdev->req_result = 0;
1149
1150 BT_DBG("%s end: err %d", hdev->name, err);
1151
1152 if (err < 0)
1153 return ERR_PTR(err);
1154
7b1abbbe
JH
1155 return hci_get_cmd_complete(hdev, opcode, event);
1156}
1157EXPORT_SYMBOL(__hci_cmd_sync_ev);
1158
1159struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1160 const void *param, u32 timeout)
7b1abbbe
JH
1161{
1162 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1163}
1164EXPORT_SYMBOL(__hci_cmd_sync);
1165
1da177e4 1166/* Execute request and wait for completion. */
01178cd4 1167static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1168 void (*func)(struct hci_request *req,
1169 unsigned long opt),
01178cd4 1170 unsigned long opt, __u32 timeout)
1da177e4 1171{
42c6b129 1172 struct hci_request req;
1da177e4
LT
1173 DECLARE_WAITQUEUE(wait, current);
1174 int err = 0;
1175
1176 BT_DBG("%s start", hdev->name);
1177
42c6b129
JH
1178 hci_req_init(&req, hdev);
1179
1da177e4
LT
1180 hdev->req_status = HCI_REQ_PEND;
1181
42c6b129 1182 func(&req, opt);
53cce22d 1183
42c6b129
JH
1184 err = hci_req_run(&req, hci_req_sync_complete);
1185 if (err < 0) {
53cce22d 1186 hdev->req_status = 0;
920c8300
AG
1187
1188 /* ENODATA means the HCI request command queue is empty.
1189 * This can happen when a request with conditionals doesn't
1190 * trigger any commands to be sent. This is normal behavior
1191 * and should not trigger an error return.
42c6b129 1192 */
920c8300
AG
1193 if (err == -ENODATA)
1194 return 0;
1195
1196 return err;
53cce22d
JH
1197 }
1198
bc4445c7
AG
1199 add_wait_queue(&hdev->req_wait_q, &wait);
1200 set_current_state(TASK_INTERRUPTIBLE);
1201
1da177e4
LT
1202 schedule_timeout(timeout);
1203
1204 remove_wait_queue(&hdev->req_wait_q, &wait);
1205
1206 if (signal_pending(current))
1207 return -EINTR;
1208
1209 switch (hdev->req_status) {
1210 case HCI_REQ_DONE:
e175072f 1211 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1212 break;
1213
1214 case HCI_REQ_CANCELED:
1215 err = -hdev->req_result;
1216 break;
1217
1218 default:
1219 err = -ETIMEDOUT;
1220 break;
3ff50b79 1221 }
1da177e4 1222
a5040efa 1223 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1224
1225 BT_DBG("%s end: err %d", hdev->name, err);
1226
1227 return err;
1228}
1229
01178cd4 1230static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1231 void (*req)(struct hci_request *req,
1232 unsigned long opt),
01178cd4 1233 unsigned long opt, __u32 timeout)
1da177e4
LT
1234{
1235 int ret;
1236
7c6a329e
MH
1237 if (!test_bit(HCI_UP, &hdev->flags))
1238 return -ENETDOWN;
1239
1da177e4
LT
1240 /* Serialize all requests */
1241 hci_req_lock(hdev);
01178cd4 1242 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1243 hci_req_unlock(hdev);
1244
1245 return ret;
1246}
1247
42c6b129 1248static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1249{
42c6b129 1250 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1251
1252 /* Reset device */
42c6b129
JH
1253 set_bit(HCI_RESET, &req->hdev->flags);
1254 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1255}
1256
42c6b129 1257static void bredr_init(struct hci_request *req)
1da177e4 1258{
42c6b129 1259 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1260
1da177e4 1261 /* Read Local Supported Features */
42c6b129 1262 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1263
1143e5a6 1264 /* Read Local Version */
42c6b129 1265 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1266
1267 /* Read BD Address */
42c6b129 1268 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1269}
1270
42c6b129 1271static void amp_init(struct hci_request *req)
e61ef499 1272{
42c6b129 1273 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1274
e61ef499 1275 /* Read Local Version */
42c6b129 1276 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1277
f6996cfe
MH
1278 /* Read Local Supported Commands */
1279 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1280
1281 /* Read Local Supported Features */
1282 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1283
6bcbc489 1284 /* Read Local AMP Info */
42c6b129 1285 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1286
1287 /* Read Data Blk size */
42c6b129 1288 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1289
f38ba941
MH
1290 /* Read Flow Control Mode */
1291 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1292
7528ca1c
MH
1293 /* Read Location Data */
1294 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1295}
1296
42c6b129 1297static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1298{
42c6b129 1299 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1300
1301 BT_DBG("%s %ld", hdev->name, opt);
1302
11778716
AE
1303 /* Reset */
1304 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1305 hci_reset_req(req, 0);
11778716 1306
e61ef499
AE
1307 switch (hdev->dev_type) {
1308 case HCI_BREDR:
42c6b129 1309 bredr_init(req);
e61ef499
AE
1310 break;
1311
1312 case HCI_AMP:
42c6b129 1313 amp_init(req);
e61ef499
AE
1314 break;
1315
1316 default:
1317 BT_ERR("Unknown device type %d", hdev->dev_type);
1318 break;
1319 }
e61ef499
AE
1320}
1321
42c6b129 1322static void bredr_setup(struct hci_request *req)
2177bab5 1323{
4ca048e3
MH
1324 struct hci_dev *hdev = req->hdev;
1325
2177bab5
JH
1326 __le16 param;
1327 __u8 flt_type;
1328
1329 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1330 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1331
1332 /* Read Class of Device */
42c6b129 1333 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1334
1335 /* Read Local Name */
42c6b129 1336 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1337
1338 /* Read Voice Setting */
42c6b129 1339 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1340
b4cb9fb2
MH
1341 /* Read Number of Supported IAC */
1342 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1343
4b836f39
MH
1344 /* Read Current IAC LAP */
1345 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1346
2177bab5
JH
1347 /* Clear Event Filters */
1348 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1349 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1350
1351 /* Connection accept timeout ~20 secs */
1352 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1353 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1354
4ca048e3
MH
1355 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1356 * but it does not support page scan related HCI commands.
1357 */
1358 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1359 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1360 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1361 }
2177bab5
JH
1362}
1363
42c6b129 1364static void le_setup(struct hci_request *req)
2177bab5 1365{
c73eee91
JH
1366 struct hci_dev *hdev = req->hdev;
1367
2177bab5 1368 /* Read LE Buffer Size */
42c6b129 1369 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1370
1371 /* Read LE Local Supported Features */
42c6b129 1372 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1373
747d3f03
MH
1374 /* Read LE Supported States */
1375 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1376
2177bab5 1377 /* Read LE Advertising Channel TX Power */
42c6b129 1378 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1379
1380 /* Read LE White List Size */
42c6b129 1381 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1382
747d3f03
MH
1383 /* Clear LE White List */
1384 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1385
1386 /* LE-only controllers have LE implicitly enabled */
1387 if (!lmp_bredr_capable(hdev))
1388 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1389}
1390
1391static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1392{
1393 if (lmp_ext_inq_capable(hdev))
1394 return 0x02;
1395
1396 if (lmp_inq_rssi_capable(hdev))
1397 return 0x01;
1398
1399 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1400 hdev->lmp_subver == 0x0757)
1401 return 0x01;
1402
1403 if (hdev->manufacturer == 15) {
1404 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1405 return 0x01;
1406 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1407 return 0x01;
1408 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1409 return 0x01;
1410 }
1411
1412 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1413 hdev->lmp_subver == 0x1805)
1414 return 0x01;
1415
1416 return 0x00;
1417}
1418
42c6b129 1419static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1420{
1421 u8 mode;
1422
42c6b129 1423 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1424
42c6b129 1425 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1426}
1427
42c6b129 1428static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1429{
42c6b129
JH
1430 struct hci_dev *hdev = req->hdev;
1431
2177bab5
JH
1432 /* The second byte is 0xff instead of 0x9f (two reserved bits
1433 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1434 * command otherwise.
1435 */
1436 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1437
1438 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1439 * any event mask for pre 1.2 devices.
1440 */
1441 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1442 return;
1443
1444 if (lmp_bredr_capable(hdev)) {
1445 events[4] |= 0x01; /* Flow Specification Complete */
1446 events[4] |= 0x02; /* Inquiry Result with RSSI */
1447 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1448 events[5] |= 0x08; /* Synchronous Connection Complete */
1449 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1450 } else {
1451 /* Use a different default for LE-only devices */
1452 memset(events, 0, sizeof(events));
1453 events[0] |= 0x10; /* Disconnection Complete */
1454 events[0] |= 0x80; /* Encryption Change */
1455 events[1] |= 0x08; /* Read Remote Version Information Complete */
1456 events[1] |= 0x20; /* Command Complete */
1457 events[1] |= 0x40; /* Command Status */
1458 events[1] |= 0x80; /* Hardware Error */
1459 events[2] |= 0x04; /* Number of Completed Packets */
1460 events[3] |= 0x02; /* Data Buffer Overflow */
1461 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1462 }
1463
1464 if (lmp_inq_rssi_capable(hdev))
1465 events[4] |= 0x02; /* Inquiry Result with RSSI */
1466
1467 if (lmp_sniffsubr_capable(hdev))
1468 events[5] |= 0x20; /* Sniff Subrating */
1469
1470 if (lmp_pause_enc_capable(hdev))
1471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1472
1473 if (lmp_ext_inq_capable(hdev))
1474 events[5] |= 0x40; /* Extended Inquiry Result */
1475
1476 if (lmp_no_flush_capable(hdev))
1477 events[7] |= 0x01; /* Enhanced Flush Complete */
1478
1479 if (lmp_lsto_capable(hdev))
1480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1481
1482 if (lmp_ssp_capable(hdev)) {
1483 events[6] |= 0x01; /* IO Capability Request */
1484 events[6] |= 0x02; /* IO Capability Response */
1485 events[6] |= 0x04; /* User Confirmation Request */
1486 events[6] |= 0x08; /* User Passkey Request */
1487 events[6] |= 0x10; /* Remote OOB Data Request */
1488 events[6] |= 0x20; /* Simple Pairing Complete */
1489 events[7] |= 0x04; /* User Passkey Notification */
1490 events[7] |= 0x08; /* Keypress Notification */
1491 events[7] |= 0x10; /* Remote Host Supported
1492 * Features Notification
1493 */
1494 }
1495
1496 if (lmp_le_capable(hdev))
1497 events[7] |= 0x20; /* LE Meta-Event */
1498
42c6b129 1499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1500
1501 if (lmp_le_capable(hdev)) {
1502 memset(events, 0, sizeof(events));
1503 events[0] = 0x1f;
42c6b129
JH
1504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1505 sizeof(events), events);
2177bab5
JH
1506 }
1507}
1508
42c6b129 1509static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1510{
42c6b129
JH
1511 struct hci_dev *hdev = req->hdev;
1512
2177bab5 1513 if (lmp_bredr_capable(hdev))
42c6b129 1514 bredr_setup(req);
56f87901
JH
1515 else
1516 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1517
1518 if (lmp_le_capable(hdev))
42c6b129 1519 le_setup(req);
2177bab5 1520
42c6b129 1521 hci_setup_event_mask(req);
2177bab5 1522
3f8e2d75
JH
1523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1528
1529 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
2177bab5
JH
1538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
42c6b129
JH
1540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
2177bab5
JH
1542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
42c6b129 1548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
42c6b129 1553 hci_setup_inquiry_mode(req);
2177bab5
JH
1554
1555 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
42c6b129
JH
1562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
2177bab5
JH
1564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
42c6b129
JH
1568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
2177bab5
JH
1570 }
1571}
1572
42c6b129 1573static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1574{
42c6b129 1575 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
42c6b129 1589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1590}
1591
42c6b129 1592static void hci_set_le_support(struct hci_request *req)
2177bab5 1593{
42c6b129 1594 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1595 struct hci_cp_write_le_host_supported cp;
1596
c73eee91
JH
1597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
2177bab5
JH
1601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
1605 cp.simul = lmp_le_br_capable(hdev);
1606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
2177bab5
JH
1611}
1612
d62e6d67
JH
1613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
53b834d2 1621 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
53b834d2 1631 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
40c59fcb
MH
1638 /* Enable Authenticated Payload Timeout Expired event if supported */
1639 if (lmp_ping_capable(hdev))
1640 events[2] |= 0x80;
1641
d62e6d67
JH
1642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
42c6b129 1645static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1646{
42c6b129 1647 struct hci_dev *hdev = req->hdev;
d2c5d77f 1648 u8 p;
42c6b129 1649
b8f4e068
GP
1650 /* Some Broadcom based Bluetooth controllers do not support the
1651 * Delete Stored Link Key command. They are clearly indicating its
1652 * absence in the bit mask of supported commands.
1653 *
1654 * Check the supported commands and only if the the command is marked
1655 * as supported send it. If not supported assume that the controller
1656 * does not have actual support for stored link keys which makes this
1657 * command redundant anyway.
f9f462fa
MH
1658 *
1659 * Some controllers indicate that they support handling deleting
1660 * stored link keys, but they don't. The quirk lets a driver
1661 * just disable this command.
637b4cae 1662 */
f9f462fa
MH
1663 if (hdev->commands[6] & 0x80 &&
1664 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1665 struct hci_cp_delete_stored_link_key cp;
1666
1667 bacpy(&cp.bdaddr, BDADDR_ANY);
1668 cp.delete_all = 0x01;
1669 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1670 sizeof(cp), &cp);
1671 }
1672
2177bab5 1673 if (hdev->commands[5] & 0x10)
42c6b129 1674 hci_setup_link_policy(req);
2177bab5 1675
7bf32048 1676 if (lmp_le_capable(hdev))
42c6b129 1677 hci_set_le_support(req);
d2c5d77f
JH
1678
1679 /* Read features beyond page 1 if available */
1680 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1681 struct hci_cp_read_local_ext_features cp;
1682
1683 cp.page = p;
1684 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1685 sizeof(cp), &cp);
1686 }
2177bab5
JH
1687}
1688
5d4e7e8d
JH
1689static void hci_init4_req(struct hci_request *req, unsigned long opt)
1690{
1691 struct hci_dev *hdev = req->hdev;
1692
d62e6d67
JH
1693 /* Set event mask page 2 if the HCI command for it is supported */
1694 if (hdev->commands[22] & 0x04)
1695 hci_set_event_mask_page_2(req);
1696
5d4e7e8d 1697 /* Check for Synchronization Train support */
53b834d2 1698 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1699 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1700
1701 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1702 if ((lmp_sc_capable(hdev) ||
1703 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1704 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1705 u8 support = 0x01;
1706 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1707 sizeof(support), &support);
1708 }
5d4e7e8d
JH
1709}
1710
2177bab5
JH
1711static int __hci_init(struct hci_dev *hdev)
1712{
1713 int err;
1714
1715 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1716 if (err < 0)
1717 return err;
1718
4b4148e9
MH
1719 /* The Device Under Test (DUT) mode is special and available for
1720 * all controller types. So just create it early on.
1721 */
1722 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1723 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1724 &dut_mode_fops);
1725 }
1726
2177bab5
JH
1727 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1728 * BR/EDR/LE type controllers. AMP controllers only need the
1729 * first stage init.
1730 */
1731 if (hdev->dev_type != HCI_BREDR)
1732 return 0;
1733
1734 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1735 if (err < 0)
1736 return err;
1737
5d4e7e8d
JH
1738 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1739 if (err < 0)
1740 return err;
1741
baf27f6e
MH
1742 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1743 if (err < 0)
1744 return err;
1745
1746 /* Only create debugfs entries during the initial setup
1747 * phase and not every time the controller gets powered on.
1748 */
1749 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1750 return 0;
1751
dfb826a8
MH
1752 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1753 &features_fops);
ceeb3bc0
MH
1754 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1755 &hdev->manufacturer);
1756 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1757 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1758 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1759 &blacklist_fops);
47219839
MH
1760 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1761
baf27f6e
MH
1762 if (lmp_bredr_capable(hdev)) {
1763 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1764 hdev, &inquiry_cache_fops);
02d08d15
MH
1765 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1766 hdev, &link_keys_fops);
babdbb3c
MH
1767 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1768 hdev, &dev_class_fops);
041000b9
MH
1769 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1770 hdev, &voice_setting_fops);
baf27f6e
MH
1771 }
1772
06f5b778 1773 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1774 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1775 hdev, &auto_accept_delay_fops);
06f5b778
MH
1776 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1777 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1778 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1779 hdev, &force_sc_support_fops);
134c2a89
MH
1780 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1781 hdev, &sc_only_mode_fops);
06f5b778 1782 }
ebd1e33b 1783
2bfa3531
MH
1784 if (lmp_sniff_capable(hdev)) {
1785 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1786 hdev, &idle_timeout_fops);
1787 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1788 hdev, &sniff_min_interval_fops);
1789 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1790 hdev, &sniff_max_interval_fops);
1791 }
1792
d0f729b8 1793 if (lmp_le_capable(hdev)) {
ac345813
MH
1794 debugfs_create_file("identity", 0400, hdev->debugfs,
1795 hdev, &identity_fops);
1796 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1797 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1798 debugfs_create_file("random_address", 0444, hdev->debugfs,
1799 hdev, &random_address_fops);
b32bba6c
MH
1800 debugfs_create_file("static_address", 0444, hdev->debugfs,
1801 hdev, &static_address_fops);
1802
1803 /* For controllers with a public address, provide a debug
1804 * option to force the usage of the configured static
1805 * address. By default the public address is used.
1806 */
1807 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1808 debugfs_create_file("force_static_address", 0644,
1809 hdev->debugfs, hdev,
1810 &force_static_address_fops);
1811
d0f729b8
MH
1812 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1813 &hdev->le_white_list_size);
d2ab0ac1
MH
1814 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1815 &white_list_fops);
3698d704
MH
1816 debugfs_create_file("identity_resolving_keys", 0400,
1817 hdev->debugfs, hdev,
1818 &identity_resolving_keys_fops);
8f8625cd
MH
1819 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1820 hdev, &long_term_keys_fops);
4e70c7e7
MH
1821 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1822 hdev, &conn_min_interval_fops);
1823 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1824 hdev, &conn_max_interval_fops);
3f959d46
MH
1825 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1826 hdev, &adv_channel_map_fops);
89863109
JR
1827 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1828 &lowpan_debugfs_fops);
7d474e06
AG
1829 debugfs_create_file("le_auto_conn", 0644, hdev->debugfs, hdev,
1830 &le_auto_conn_fops);
d0f729b8 1831 }
e7b8fc92 1832
baf27f6e 1833 return 0;
2177bab5
JH
1834}
1835
42c6b129 1836static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1837{
1838 __u8 scan = opt;
1839
42c6b129 1840 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1841
1842 /* Inquiry and Page scans */
42c6b129 1843 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1844}
1845
42c6b129 1846static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1847{
1848 __u8 auth = opt;
1849
42c6b129 1850 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1851
1852 /* Authentication */
42c6b129 1853 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1854}
1855
42c6b129 1856static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1857{
1858 __u8 encrypt = opt;
1859
42c6b129 1860 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1861
e4e8e37c 1862 /* Encryption */
42c6b129 1863 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1864}
1865
42c6b129 1866static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1867{
1868 __le16 policy = cpu_to_le16(opt);
1869
42c6b129 1870 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1871
1872 /* Default link policy */
42c6b129 1873 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1874}
1875
8e87d142 1876/* Get HCI device by index.
1da177e4
LT
1877 * Device is held on return. */
1878struct hci_dev *hci_dev_get(int index)
1879{
8035ded4 1880 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1881
1882 BT_DBG("%d", index);
1883
1884 if (index < 0)
1885 return NULL;
1886
1887 read_lock(&hci_dev_list_lock);
8035ded4 1888 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1889 if (d->id == index) {
1890 hdev = hci_dev_hold(d);
1891 break;
1892 }
1893 }
1894 read_unlock(&hci_dev_list_lock);
1895 return hdev;
1896}
1da177e4
LT
1897
1898/* ---- Inquiry support ---- */
ff9ef578 1899
30dc78e1
JH
1900bool hci_discovery_active(struct hci_dev *hdev)
1901{
1902 struct discovery_state *discov = &hdev->discovery;
1903
6fbe195d 1904 switch (discov->state) {
343f935b 1905 case DISCOVERY_FINDING:
6fbe195d 1906 case DISCOVERY_RESOLVING:
30dc78e1
JH
1907 return true;
1908
6fbe195d
AG
1909 default:
1910 return false;
1911 }
30dc78e1
JH
1912}
1913
ff9ef578
JH
1914void hci_discovery_set_state(struct hci_dev *hdev, int state)
1915{
1916 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1917
1918 if (hdev->discovery.state == state)
1919 return;
1920
1921 switch (state) {
1922 case DISCOVERY_STOPPED:
c54c3860
AG
1923 hci_update_background_scan(hdev);
1924
7b99b659
AG
1925 if (hdev->discovery.state != DISCOVERY_STARTING)
1926 mgmt_discovering(hdev, 0);
ff9ef578
JH
1927 break;
1928 case DISCOVERY_STARTING:
1929 break;
343f935b 1930 case DISCOVERY_FINDING:
ff9ef578
JH
1931 mgmt_discovering(hdev, 1);
1932 break;
30dc78e1
JH
1933 case DISCOVERY_RESOLVING:
1934 break;
ff9ef578
JH
1935 case DISCOVERY_STOPPING:
1936 break;
1937 }
1938
1939 hdev->discovery.state = state;
1940}
1941
1f9b9a5d 1942void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1943{
30883512 1944 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1945 struct inquiry_entry *p, *n;
1da177e4 1946
561aafbc
JH
1947 list_for_each_entry_safe(p, n, &cache->all, all) {
1948 list_del(&p->all);
b57c1a56 1949 kfree(p);
1da177e4 1950 }
561aafbc
JH
1951
1952 INIT_LIST_HEAD(&cache->unknown);
1953 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1954}
1955
a8c5fb1a
GP
1956struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1957 bdaddr_t *bdaddr)
1da177e4 1958{
30883512 1959 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1960 struct inquiry_entry *e;
1961
6ed93dc6 1962 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1963
561aafbc
JH
1964 list_for_each_entry(e, &cache->all, all) {
1965 if (!bacmp(&e->data.bdaddr, bdaddr))
1966 return e;
1967 }
1968
1969 return NULL;
1970}
1971
1972struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1973 bdaddr_t *bdaddr)
561aafbc 1974{
30883512 1975 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1976 struct inquiry_entry *e;
1977
6ed93dc6 1978 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1979
1980 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1981 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1982 return e;
1983 }
1984
1985 return NULL;
1da177e4
LT
1986}
1987
30dc78e1 1988struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1989 bdaddr_t *bdaddr,
1990 int state)
30dc78e1
JH
1991{
1992 struct discovery_state *cache = &hdev->discovery;
1993 struct inquiry_entry *e;
1994
6ed93dc6 1995 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1996
1997 list_for_each_entry(e, &cache->resolve, list) {
1998 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1999 return e;
2000 if (!bacmp(&e->data.bdaddr, bdaddr))
2001 return e;
2002 }
2003
2004 return NULL;
2005}
2006
a3d4e20a 2007void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2008 struct inquiry_entry *ie)
a3d4e20a
JH
2009{
2010 struct discovery_state *cache = &hdev->discovery;
2011 struct list_head *pos = &cache->resolve;
2012 struct inquiry_entry *p;
2013
2014 list_del(&ie->list);
2015
2016 list_for_each_entry(p, &cache->resolve, list) {
2017 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2018 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2019 break;
2020 pos = &p->list;
2021 }
2022
2023 list_add(&ie->list, pos);
2024}
2025
3175405b 2026bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 2027 bool name_known, bool *ssp)
1da177e4 2028{
30883512 2029 struct discovery_state *cache = &hdev->discovery;
70f23020 2030 struct inquiry_entry *ie;
1da177e4 2031
6ed93dc6 2032 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2033
2b2fec4d
SJ
2034 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2035
388fc8fa
JH
2036 if (ssp)
2037 *ssp = data->ssp_mode;
2038
70f23020 2039 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2040 if (ie) {
388fc8fa
JH
2041 if (ie->data.ssp_mode && ssp)
2042 *ssp = true;
2043
a3d4e20a 2044 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2045 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2046 ie->data.rssi = data->rssi;
2047 hci_inquiry_cache_update_resolve(hdev, ie);
2048 }
2049
561aafbc 2050 goto update;
a3d4e20a 2051 }
561aafbc
JH
2052
2053 /* Entry not in the cache. Add new one. */
2054 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
2055 if (!ie)
3175405b 2056 return false;
561aafbc
JH
2057
2058 list_add(&ie->all, &cache->all);
2059
2060 if (name_known) {
2061 ie->name_state = NAME_KNOWN;
2062 } else {
2063 ie->name_state = NAME_NOT_KNOWN;
2064 list_add(&ie->list, &cache->unknown);
2065 }
70f23020 2066
561aafbc
JH
2067update:
2068 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2069 ie->name_state != NAME_PENDING) {
561aafbc
JH
2070 ie->name_state = NAME_KNOWN;
2071 list_del(&ie->list);
1da177e4
LT
2072 }
2073
70f23020
AE
2074 memcpy(&ie->data, data, sizeof(*data));
2075 ie->timestamp = jiffies;
1da177e4 2076 cache->timestamp = jiffies;
3175405b
JH
2077
2078 if (ie->name_state == NAME_NOT_KNOWN)
2079 return false;
2080
2081 return true;
1da177e4
LT
2082}
2083
2084static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2085{
30883512 2086 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2087 struct inquiry_info *info = (struct inquiry_info *) buf;
2088 struct inquiry_entry *e;
2089 int copied = 0;
2090
561aafbc 2091 list_for_each_entry(e, &cache->all, all) {
1da177e4 2092 struct inquiry_data *data = &e->data;
b57c1a56
JH
2093
2094 if (copied >= num)
2095 break;
2096
1da177e4
LT
2097 bacpy(&info->bdaddr, &data->bdaddr);
2098 info->pscan_rep_mode = data->pscan_rep_mode;
2099 info->pscan_period_mode = data->pscan_period_mode;
2100 info->pscan_mode = data->pscan_mode;
2101 memcpy(info->dev_class, data->dev_class, 3);
2102 info->clock_offset = data->clock_offset;
b57c1a56 2103
1da177e4 2104 info++;
b57c1a56 2105 copied++;
1da177e4
LT
2106 }
2107
2108 BT_DBG("cache %p, copied %d", cache, copied);
2109 return copied;
2110}
2111
42c6b129 2112static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2113{
2114 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2115 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2116 struct hci_cp_inquiry cp;
2117
2118 BT_DBG("%s", hdev->name);
2119
2120 if (test_bit(HCI_INQUIRY, &hdev->flags))
2121 return;
2122
2123 /* Start Inquiry */
2124 memcpy(&cp.lap, &ir->lap, 3);
2125 cp.length = ir->length;
2126 cp.num_rsp = ir->num_rsp;
42c6b129 2127 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2128}
2129
3e13fa1e
AG
2130static int wait_inquiry(void *word)
2131{
2132 schedule();
2133 return signal_pending(current);
2134}
2135
1da177e4
LT
2136int hci_inquiry(void __user *arg)
2137{
2138 __u8 __user *ptr = arg;
2139 struct hci_inquiry_req ir;
2140 struct hci_dev *hdev;
2141 int err = 0, do_inquiry = 0, max_rsp;
2142 long timeo;
2143 __u8 *buf;
2144
2145 if (copy_from_user(&ir, ptr, sizeof(ir)))
2146 return -EFAULT;
2147
5a08ecce
AE
2148 hdev = hci_dev_get(ir.dev_id);
2149 if (!hdev)
1da177e4
LT
2150 return -ENODEV;
2151
0736cfa8
MH
2152 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2153 err = -EBUSY;
2154 goto done;
2155 }
2156
5b69bef5
MH
2157 if (hdev->dev_type != HCI_BREDR) {
2158 err = -EOPNOTSUPP;
2159 goto done;
2160 }
2161
56f87901
JH
2162 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2163 err = -EOPNOTSUPP;
2164 goto done;
2165 }
2166
09fd0de5 2167 hci_dev_lock(hdev);
8e87d142 2168 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2169 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2170 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2171 do_inquiry = 1;
2172 }
09fd0de5 2173 hci_dev_unlock(hdev);
1da177e4 2174
04837f64 2175 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2176
2177 if (do_inquiry) {
01178cd4
JH
2178 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2179 timeo);
70f23020
AE
2180 if (err < 0)
2181 goto done;
3e13fa1e
AG
2182
2183 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2184 * cleared). If it is interrupted by a signal, return -EINTR.
2185 */
2186 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
2187 TASK_INTERRUPTIBLE))
2188 return -EINTR;
70f23020 2189 }
1da177e4 2190
8fc9ced3
GP
2191 /* for unlimited number of responses we will use buffer with
2192 * 255 entries
2193 */
1da177e4
LT
2194 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2195
2196 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2197 * copy it to the user space.
2198 */
01df8c31 2199 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2200 if (!buf) {
1da177e4
LT
2201 err = -ENOMEM;
2202 goto done;
2203 }
2204
09fd0de5 2205 hci_dev_lock(hdev);
1da177e4 2206 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2207 hci_dev_unlock(hdev);
1da177e4
LT
2208
2209 BT_DBG("num_rsp %d", ir.num_rsp);
2210
2211 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2212 ptr += sizeof(ir);
2213 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2214 ir.num_rsp))
1da177e4 2215 err = -EFAULT;
8e87d142 2216 } else
1da177e4
LT
2217 err = -EFAULT;
2218
2219 kfree(buf);
2220
2221done:
2222 hci_dev_put(hdev);
2223 return err;
2224}
2225
cbed0ca1 2226static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2227{
1da177e4
LT
2228 int ret = 0;
2229
1da177e4
LT
2230 BT_DBG("%s %p", hdev->name, hdev);
2231
2232 hci_req_lock(hdev);
2233
94324962
JH
2234 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2235 ret = -ENODEV;
2236 goto done;
2237 }
2238
a5c8f270
MH
2239 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
2240 /* Check for rfkill but allow the HCI setup stage to
2241 * proceed (which in itself doesn't cause any RF activity).
2242 */
2243 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2244 ret = -ERFKILL;
2245 goto done;
2246 }
2247
2248 /* Check for valid public address or a configured static
2249 * random adddress, but let the HCI setup proceed to
2250 * be able to determine if there is a public address
2251 * or not.
2252 *
c6beca0e
MH
2253 * In case of user channel usage, it is not important
2254 * if a public address or static random address is
2255 * available.
2256 *
a5c8f270
MH
2257 * This check is only valid for BR/EDR controllers
2258 * since AMP controllers do not have an address.
2259 */
c6beca0e
MH
2260 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2261 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2262 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2263 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2264 ret = -EADDRNOTAVAIL;
2265 goto done;
2266 }
611b30f7
MH
2267 }
2268
1da177e4
LT
2269 if (test_bit(HCI_UP, &hdev->flags)) {
2270 ret = -EALREADY;
2271 goto done;
2272 }
2273
1da177e4
LT
2274 if (hdev->open(hdev)) {
2275 ret = -EIO;
2276 goto done;
2277 }
2278
f41c70c4
MH
2279 atomic_set(&hdev->cmd_cnt, 1);
2280 set_bit(HCI_INIT, &hdev->flags);
2281
2282 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2283 ret = hdev->setup(hdev);
2284
2285 if (!ret) {
f41c70c4
MH
2286 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2287 set_bit(HCI_RAW, &hdev->flags);
2288
0736cfa8
MH
2289 if (!test_bit(HCI_RAW, &hdev->flags) &&
2290 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2291 ret = __hci_init(hdev);
1da177e4
LT
2292 }
2293
f41c70c4
MH
2294 clear_bit(HCI_INIT, &hdev->flags);
2295
1da177e4
LT
2296 if (!ret) {
2297 hci_dev_hold(hdev);
d6bfd59c 2298 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2299 set_bit(HCI_UP, &hdev->flags);
2300 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2301 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2302 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2303 hdev->dev_type == HCI_BREDR) {
09fd0de5 2304 hci_dev_lock(hdev);
744cf19e 2305 mgmt_powered(hdev, 1);
09fd0de5 2306 hci_dev_unlock(hdev);
56e5cb86 2307 }
8e87d142 2308 } else {
1da177e4 2309 /* Init failed, cleanup */
3eff45ea 2310 flush_work(&hdev->tx_work);
c347b765 2311 flush_work(&hdev->cmd_work);
b78752cc 2312 flush_work(&hdev->rx_work);
1da177e4
LT
2313
2314 skb_queue_purge(&hdev->cmd_q);
2315 skb_queue_purge(&hdev->rx_q);
2316
2317 if (hdev->flush)
2318 hdev->flush(hdev);
2319
2320 if (hdev->sent_cmd) {
2321 kfree_skb(hdev->sent_cmd);
2322 hdev->sent_cmd = NULL;
2323 }
2324
2325 hdev->close(hdev);
2326 hdev->flags = 0;
2327 }
2328
2329done:
2330 hci_req_unlock(hdev);
1da177e4
LT
2331 return ret;
2332}
2333
cbed0ca1
JH
2334/* ---- HCI ioctl helpers ---- */
2335
2336int hci_dev_open(__u16 dev)
2337{
2338 struct hci_dev *hdev;
2339 int err;
2340
2341 hdev = hci_dev_get(dev);
2342 if (!hdev)
2343 return -ENODEV;
2344
e1d08f40
JH
2345 /* We need to ensure that no other power on/off work is pending
2346 * before proceeding to call hci_dev_do_open. This is
2347 * particularly important if the setup procedure has not yet
2348 * completed.
2349 */
2350 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2351 cancel_delayed_work(&hdev->power_off);
2352
a5c8f270
MH
2353 /* After this call it is guaranteed that the setup procedure
2354 * has finished. This means that error conditions like RFKILL
2355 * or no valid public or static random address apply.
2356 */
e1d08f40
JH
2357 flush_workqueue(hdev->req_workqueue);
2358
cbed0ca1
JH
2359 err = hci_dev_do_open(hdev);
2360
2361 hci_dev_put(hdev);
2362
2363 return err;
2364}
2365
1da177e4
LT
2366static int hci_dev_do_close(struct hci_dev *hdev)
2367{
2368 BT_DBG("%s %p", hdev->name, hdev);
2369
78c04c0b
VCG
2370 cancel_delayed_work(&hdev->power_off);
2371
1da177e4
LT
2372 hci_req_cancel(hdev, ENODEV);
2373 hci_req_lock(hdev);
2374
2375 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2376 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2377 hci_req_unlock(hdev);
2378 return 0;
2379 }
2380
3eff45ea
GP
2381 /* Flush RX and TX works */
2382 flush_work(&hdev->tx_work);
b78752cc 2383 flush_work(&hdev->rx_work);
1da177e4 2384
16ab91ab 2385 if (hdev->discov_timeout > 0) {
e0f9309f 2386 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2387 hdev->discov_timeout = 0;
5e5282bb 2388 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2389 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2390 }
2391
a8b2d5c2 2392 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2393 cancel_delayed_work(&hdev->service_cache);
2394
7ba8b4be 2395 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2396
2397 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2398 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2399
09fd0de5 2400 hci_dev_lock(hdev);
1f9b9a5d 2401 hci_inquiry_cache_flush(hdev);
1da177e4 2402 hci_conn_hash_flush(hdev);
6046dc3e 2403 hci_pend_le_conns_clear(hdev);
09fd0de5 2404 hci_dev_unlock(hdev);
1da177e4
LT
2405
2406 hci_notify(hdev, HCI_DEV_DOWN);
2407
2408 if (hdev->flush)
2409 hdev->flush(hdev);
2410
2411 /* Reset device */
2412 skb_queue_purge(&hdev->cmd_q);
2413 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2414 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2415 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2416 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2417 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2418 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2419 clear_bit(HCI_INIT, &hdev->flags);
2420 }
2421
c347b765
GP
2422 /* flush cmd work */
2423 flush_work(&hdev->cmd_work);
1da177e4
LT
2424
2425 /* Drop queues */
2426 skb_queue_purge(&hdev->rx_q);
2427 skb_queue_purge(&hdev->cmd_q);
2428 skb_queue_purge(&hdev->raw_q);
2429
2430 /* Drop last sent command */
2431 if (hdev->sent_cmd) {
b79f44c1 2432 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2433 kfree_skb(hdev->sent_cmd);
2434 hdev->sent_cmd = NULL;
2435 }
2436
b6ddb638
JH
2437 kfree_skb(hdev->recv_evt);
2438 hdev->recv_evt = NULL;
2439
1da177e4
LT
2440 /* After this point our queues are empty
2441 * and no tasks are scheduled. */
2442 hdev->close(hdev);
2443
35b973c9
JH
2444 /* Clear flags */
2445 hdev->flags = 0;
2446 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2447
93c311a0
MH
2448 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2449 if (hdev->dev_type == HCI_BREDR) {
2450 hci_dev_lock(hdev);
2451 mgmt_powered(hdev, 0);
2452 hci_dev_unlock(hdev);
2453 }
8ee56540 2454 }
5add6af8 2455
ced5c338 2456 /* Controller radio is available but is currently powered down */
536619e8 2457 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2458
e59fda8d 2459 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2460 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2461 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2462
1da177e4
LT
2463 hci_req_unlock(hdev);
2464
2465 hci_dev_put(hdev);
2466 return 0;
2467}
2468
2469int hci_dev_close(__u16 dev)
2470{
2471 struct hci_dev *hdev;
2472 int err;
2473
70f23020
AE
2474 hdev = hci_dev_get(dev);
2475 if (!hdev)
1da177e4 2476 return -ENODEV;
8ee56540 2477
0736cfa8
MH
2478 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2479 err = -EBUSY;
2480 goto done;
2481 }
2482
8ee56540
MH
2483 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2484 cancel_delayed_work(&hdev->power_off);
2485
1da177e4 2486 err = hci_dev_do_close(hdev);
8ee56540 2487
0736cfa8 2488done:
1da177e4
LT
2489 hci_dev_put(hdev);
2490 return err;
2491}
2492
2493int hci_dev_reset(__u16 dev)
2494{
2495 struct hci_dev *hdev;
2496 int ret = 0;
2497
70f23020
AE
2498 hdev = hci_dev_get(dev);
2499 if (!hdev)
1da177e4
LT
2500 return -ENODEV;
2501
2502 hci_req_lock(hdev);
1da177e4 2503
808a049e
MH
2504 if (!test_bit(HCI_UP, &hdev->flags)) {
2505 ret = -ENETDOWN;
1da177e4 2506 goto done;
808a049e 2507 }
1da177e4 2508
0736cfa8
MH
2509 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2510 ret = -EBUSY;
2511 goto done;
2512 }
2513
1da177e4
LT
2514 /* Drop queues */
2515 skb_queue_purge(&hdev->rx_q);
2516 skb_queue_purge(&hdev->cmd_q);
2517
09fd0de5 2518 hci_dev_lock(hdev);
1f9b9a5d 2519 hci_inquiry_cache_flush(hdev);
1da177e4 2520 hci_conn_hash_flush(hdev);
09fd0de5 2521 hci_dev_unlock(hdev);
1da177e4
LT
2522
2523 if (hdev->flush)
2524 hdev->flush(hdev);
2525
8e87d142 2526 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2527 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2528
2529 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2530 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2531
2532done:
1da177e4
LT
2533 hci_req_unlock(hdev);
2534 hci_dev_put(hdev);
2535 return ret;
2536}
2537
2538int hci_dev_reset_stat(__u16 dev)
2539{
2540 struct hci_dev *hdev;
2541 int ret = 0;
2542
70f23020
AE
2543 hdev = hci_dev_get(dev);
2544 if (!hdev)
1da177e4
LT
2545 return -ENODEV;
2546
0736cfa8
MH
2547 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2548 ret = -EBUSY;
2549 goto done;
2550 }
2551
1da177e4
LT
2552 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2553
0736cfa8 2554done:
1da177e4 2555 hci_dev_put(hdev);
1da177e4
LT
2556 return ret;
2557}
2558
2559int hci_dev_cmd(unsigned int cmd, void __user *arg)
2560{
2561 struct hci_dev *hdev;
2562 struct hci_dev_req dr;
2563 int err = 0;
2564
2565 if (copy_from_user(&dr, arg, sizeof(dr)))
2566 return -EFAULT;
2567
70f23020
AE
2568 hdev = hci_dev_get(dr.dev_id);
2569 if (!hdev)
1da177e4
LT
2570 return -ENODEV;
2571
0736cfa8
MH
2572 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2573 err = -EBUSY;
2574 goto done;
2575 }
2576
5b69bef5
MH
2577 if (hdev->dev_type != HCI_BREDR) {
2578 err = -EOPNOTSUPP;
2579 goto done;
2580 }
2581
56f87901
JH
2582 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2583 err = -EOPNOTSUPP;
2584 goto done;
2585 }
2586
1da177e4
LT
2587 switch (cmd) {
2588 case HCISETAUTH:
01178cd4
JH
2589 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2590 HCI_INIT_TIMEOUT);
1da177e4
LT
2591 break;
2592
2593 case HCISETENCRYPT:
2594 if (!lmp_encrypt_capable(hdev)) {
2595 err = -EOPNOTSUPP;
2596 break;
2597 }
2598
2599 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2600 /* Auth must be enabled first */
01178cd4
JH
2601 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2602 HCI_INIT_TIMEOUT);
1da177e4
LT
2603 if (err)
2604 break;
2605 }
2606
01178cd4
JH
2607 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2608 HCI_INIT_TIMEOUT);
1da177e4
LT
2609 break;
2610
2611 case HCISETSCAN:
01178cd4
JH
2612 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2613 HCI_INIT_TIMEOUT);
1da177e4
LT
2614 break;
2615
1da177e4 2616 case HCISETLINKPOL:
01178cd4
JH
2617 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2618 HCI_INIT_TIMEOUT);
1da177e4
LT
2619 break;
2620
2621 case HCISETLINKMODE:
e4e8e37c
MH
2622 hdev->link_mode = ((__u16) dr.dev_opt) &
2623 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2624 break;
2625
2626 case HCISETPTYPE:
2627 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2628 break;
2629
2630 case HCISETACLMTU:
e4e8e37c
MH
2631 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2632 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2633 break;
2634
2635 case HCISETSCOMTU:
e4e8e37c
MH
2636 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2637 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2638 break;
2639
2640 default:
2641 err = -EINVAL;
2642 break;
2643 }
e4e8e37c 2644
0736cfa8 2645done:
1da177e4
LT
2646 hci_dev_put(hdev);
2647 return err;
2648}
2649
2650int hci_get_dev_list(void __user *arg)
2651{
8035ded4 2652 struct hci_dev *hdev;
1da177e4
LT
2653 struct hci_dev_list_req *dl;
2654 struct hci_dev_req *dr;
1da177e4
LT
2655 int n = 0, size, err;
2656 __u16 dev_num;
2657
2658 if (get_user(dev_num, (__u16 __user *) arg))
2659 return -EFAULT;
2660
2661 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2662 return -EINVAL;
2663
2664 size = sizeof(*dl) + dev_num * sizeof(*dr);
2665
70f23020
AE
2666 dl = kzalloc(size, GFP_KERNEL);
2667 if (!dl)
1da177e4
LT
2668 return -ENOMEM;
2669
2670 dr = dl->dev_req;
2671
f20d09d5 2672 read_lock(&hci_dev_list_lock);
8035ded4 2673 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2674 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2675 cancel_delayed_work(&hdev->power_off);
c542a06c 2676
a8b2d5c2
JH
2677 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2678 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2679
1da177e4
LT
2680 (dr + n)->dev_id = hdev->id;
2681 (dr + n)->dev_opt = hdev->flags;
c542a06c 2682
1da177e4
LT
2683 if (++n >= dev_num)
2684 break;
2685 }
f20d09d5 2686 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2687
2688 dl->dev_num = n;
2689 size = sizeof(*dl) + n * sizeof(*dr);
2690
2691 err = copy_to_user(arg, dl, size);
2692 kfree(dl);
2693
2694 return err ? -EFAULT : 0;
2695}
2696
2697int hci_get_dev_info(void __user *arg)
2698{
2699 struct hci_dev *hdev;
2700 struct hci_dev_info di;
2701 int err = 0;
2702
2703 if (copy_from_user(&di, arg, sizeof(di)))
2704 return -EFAULT;
2705
70f23020
AE
2706 hdev = hci_dev_get(di.dev_id);
2707 if (!hdev)
1da177e4
LT
2708 return -ENODEV;
2709
a8b2d5c2 2710 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2711 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2712
a8b2d5c2
JH
2713 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2714 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2715
1da177e4
LT
2716 strcpy(di.name, hdev->name);
2717 di.bdaddr = hdev->bdaddr;
60f2a3ed 2718 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2719 di.flags = hdev->flags;
2720 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2721 if (lmp_bredr_capable(hdev)) {
2722 di.acl_mtu = hdev->acl_mtu;
2723 di.acl_pkts = hdev->acl_pkts;
2724 di.sco_mtu = hdev->sco_mtu;
2725 di.sco_pkts = hdev->sco_pkts;
2726 } else {
2727 di.acl_mtu = hdev->le_mtu;
2728 di.acl_pkts = hdev->le_pkts;
2729 di.sco_mtu = 0;
2730 di.sco_pkts = 0;
2731 }
1da177e4
LT
2732 di.link_policy = hdev->link_policy;
2733 di.link_mode = hdev->link_mode;
2734
2735 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2736 memcpy(&di.features, &hdev->features, sizeof(di.features));
2737
2738 if (copy_to_user(arg, &di, sizeof(di)))
2739 err = -EFAULT;
2740
2741 hci_dev_put(hdev);
2742
2743 return err;
2744}
2745
2746/* ---- Interface to HCI drivers ---- */
2747
611b30f7
MH
2748static int hci_rfkill_set_block(void *data, bool blocked)
2749{
2750 struct hci_dev *hdev = data;
2751
2752 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2753
0736cfa8
MH
2754 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2755 return -EBUSY;
2756
5e130367
JH
2757 if (blocked) {
2758 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2759 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2760 hci_dev_do_close(hdev);
5e130367
JH
2761 } else {
2762 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2763 }
611b30f7
MH
2764
2765 return 0;
2766}
2767
2768static const struct rfkill_ops hci_rfkill_ops = {
2769 .set_block = hci_rfkill_set_block,
2770};
2771
ab81cbf9
JH
2772static void hci_power_on(struct work_struct *work)
2773{
2774 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2775 int err;
ab81cbf9
JH
2776
2777 BT_DBG("%s", hdev->name);
2778
cbed0ca1 2779 err = hci_dev_do_open(hdev);
96570ffc
JH
2780 if (err < 0) {
2781 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2782 return;
96570ffc 2783 }
ab81cbf9 2784
a5c8f270
MH
2785 /* During the HCI setup phase, a few error conditions are
2786 * ignored and they need to be checked now. If they are still
2787 * valid, it is important to turn the device back off.
2788 */
2789 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2790 (hdev->dev_type == HCI_BREDR &&
2791 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2792 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2793 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2794 hci_dev_do_close(hdev);
2795 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2796 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2797 HCI_AUTO_OFF_TIMEOUT);
bf543036 2798 }
ab81cbf9 2799
a8b2d5c2 2800 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2801 mgmt_index_added(hdev);
ab81cbf9
JH
2802}
2803
2804static void hci_power_off(struct work_struct *work)
2805{
3243553f 2806 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2807 power_off.work);
ab81cbf9
JH
2808
2809 BT_DBG("%s", hdev->name);
2810
8ee56540 2811 hci_dev_do_close(hdev);
ab81cbf9
JH
2812}
2813
16ab91ab
JH
2814static void hci_discov_off(struct work_struct *work)
2815{
2816 struct hci_dev *hdev;
16ab91ab
JH
2817
2818 hdev = container_of(work, struct hci_dev, discov_off.work);
2819
2820 BT_DBG("%s", hdev->name);
2821
d1967ff8 2822 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2823}
2824
35f7498a 2825void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2826{
4821002c 2827 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2828
4821002c
JH
2829 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2830 list_del(&uuid->list);
2aeb9a1a
JH
2831 kfree(uuid);
2832 }
2aeb9a1a
JH
2833}
2834
35f7498a 2835void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2836{
2837 struct list_head *p, *n;
2838
2839 list_for_each_safe(p, n, &hdev->link_keys) {
2840 struct link_key *key;
2841
2842 key = list_entry(p, struct link_key, list);
2843
2844 list_del(p);
2845 kfree(key);
2846 }
55ed8ca1
JH
2847}
2848
35f7498a 2849void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2850{
2851 struct smp_ltk *k, *tmp;
2852
2853 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2854 list_del(&k->list);
2855 kfree(k);
2856 }
b899efaf
VCG
2857}
2858
970c4e46
JH
2859void hci_smp_irks_clear(struct hci_dev *hdev)
2860{
2861 struct smp_irk *k, *tmp;
2862
2863 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2864 list_del(&k->list);
2865 kfree(k);
2866 }
2867}
2868
55ed8ca1
JH
2869struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2870{
8035ded4 2871 struct link_key *k;
55ed8ca1 2872
8035ded4 2873 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2874 if (bacmp(bdaddr, &k->bdaddr) == 0)
2875 return k;
55ed8ca1
JH
2876
2877 return NULL;
2878}
2879
745c0ce3 2880static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2881 u8 key_type, u8 old_key_type)
d25e28ab
JH
2882{
2883 /* Legacy key */
2884 if (key_type < 0x03)
745c0ce3 2885 return true;
d25e28ab
JH
2886
2887 /* Debug keys are insecure so don't store them persistently */
2888 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2889 return false;
d25e28ab
JH
2890
2891 /* Changed combination key and there's no previous one */
2892 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2893 return false;
d25e28ab
JH
2894
2895 /* Security mode 3 case */
2896 if (!conn)
745c0ce3 2897 return true;
d25e28ab
JH
2898
2899 /* Neither local nor remote side had no-bonding as requirement */
2900 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2901 return true;
d25e28ab
JH
2902
2903 /* Local side had dedicated bonding as requirement */
2904 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2905 return true;
d25e28ab
JH
2906
2907 /* Remote side had dedicated bonding as requirement */
2908 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2909 return true;
d25e28ab
JH
2910
2911 /* If none of the above criteria match, then don't store the key
2912 * persistently */
745c0ce3 2913 return false;
d25e28ab
JH
2914}
2915
98a0b845
JH
2916static bool ltk_type_master(u8 type)
2917{
2918 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2919 return true;
2920
2921 return false;
2922}
2923
2924struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2925 bool master)
75d262c2 2926{
c9839a11 2927 struct smp_ltk *k;
75d262c2 2928
c9839a11
VCG
2929 list_for_each_entry(k, &hdev->long_term_keys, list) {
2930 if (k->ediv != ediv ||
a8c5fb1a 2931 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2932 continue;
2933
98a0b845
JH
2934 if (ltk_type_master(k->type) != master)
2935 continue;
2936
c9839a11 2937 return k;
75d262c2
VCG
2938 }
2939
2940 return NULL;
2941}
75d262c2 2942
c9839a11 2943struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2944 u8 addr_type, bool master)
75d262c2 2945{
c9839a11 2946 struct smp_ltk *k;
75d262c2 2947
c9839a11
VCG
2948 list_for_each_entry(k, &hdev->long_term_keys, list)
2949 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2950 bacmp(bdaddr, &k->bdaddr) == 0 &&
2951 ltk_type_master(k->type) == master)
75d262c2
VCG
2952 return k;
2953
2954 return NULL;
2955}
75d262c2 2956
970c4e46
JH
2957struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2958{
2959 struct smp_irk *irk;
2960
2961 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2962 if (!bacmp(&irk->rpa, rpa))
2963 return irk;
2964 }
2965
2966 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2967 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2968 bacpy(&irk->rpa, rpa);
2969 return irk;
2970 }
2971 }
2972
2973 return NULL;
2974}
2975
2976struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2977 u8 addr_type)
2978{
2979 struct smp_irk *irk;
2980
6cfc9988
JH
2981 /* Identity Address must be public or static random */
2982 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2983 return NULL;
2984
970c4e46
JH
2985 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2986 if (addr_type == irk->addr_type &&
2987 bacmp(bdaddr, &irk->bdaddr) == 0)
2988 return irk;
2989 }
2990
2991 return NULL;
2992}
2993
d25e28ab 2994int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2995 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2996{
2997 struct link_key *key, *old_key;
745c0ce3
VA
2998 u8 old_key_type;
2999 bool persistent;
55ed8ca1
JH
3000
3001 old_key = hci_find_link_key(hdev, bdaddr);
3002 if (old_key) {
3003 old_key_type = old_key->type;
3004 key = old_key;
3005 } else {
12adcf3a 3006 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3007 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
3008 if (!key)
3009 return -ENOMEM;
3010 list_add(&key->list, &hdev->link_keys);
3011 }
3012
6ed93dc6 3013 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3014
d25e28ab
JH
3015 /* Some buggy controller combinations generate a changed
3016 * combination key for legacy pairing even when there's no
3017 * previous key */
3018 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3019 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3020 type = HCI_LK_COMBINATION;
655fe6ec
JH
3021 if (conn)
3022 conn->key_type = type;
3023 }
d25e28ab 3024
55ed8ca1 3025 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3026 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3027 key->pin_len = pin_len;
3028
b6020ba0 3029 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3030 key->type = old_key_type;
4748fed2
JH
3031 else
3032 key->type = type;
3033
4df378a1
JH
3034 if (!new_key)
3035 return 0;
3036
3037 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
3038
744cf19e 3039 mgmt_new_link_key(hdev, key, persistent);
4df378a1 3040
6ec5bcad
VA
3041 if (conn)
3042 conn->flush_key = !persistent;
55ed8ca1
JH
3043
3044 return 0;
3045}
3046
ca9142b8 3047struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271
JH
3048 u8 addr_type, u8 type, u8 authenticated,
3049 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
75d262c2 3050{
c9839a11 3051 struct smp_ltk *key, *old_key;
98a0b845 3052 bool master = ltk_type_master(type);
75d262c2 3053
98a0b845 3054 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 3055 if (old_key)
75d262c2 3056 key = old_key;
c9839a11 3057 else {
0a14ab41 3058 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3059 if (!key)
ca9142b8 3060 return NULL;
c9839a11 3061 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3062 }
3063
75d262c2 3064 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3065 key->bdaddr_type = addr_type;
3066 memcpy(key->val, tk, sizeof(key->val));
3067 key->authenticated = authenticated;
3068 key->ediv = ediv;
3069 key->enc_size = enc_size;
3070 key->type = type;
3071 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 3072
ca9142b8 3073 return key;
75d262c2
VCG
3074}
3075
ca9142b8
JH
3076struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3077 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3078{
3079 struct smp_irk *irk;
3080
3081 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3082 if (!irk) {
3083 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3084 if (!irk)
ca9142b8 3085 return NULL;
970c4e46
JH
3086
3087 bacpy(&irk->bdaddr, bdaddr);
3088 irk->addr_type = addr_type;
3089
3090 list_add(&irk->list, &hdev->identity_resolving_keys);
3091 }
3092
3093 memcpy(irk->val, val, 16);
3094 bacpy(&irk->rpa, rpa);
3095
ca9142b8 3096 return irk;
970c4e46
JH
3097}
3098
55ed8ca1
JH
3099int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3100{
3101 struct link_key *key;
3102
3103 key = hci_find_link_key(hdev, bdaddr);
3104 if (!key)
3105 return -ENOENT;
3106
6ed93dc6 3107 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
3108
3109 list_del(&key->list);
3110 kfree(key);
3111
3112 return 0;
3113}
3114
e0b2b27e 3115int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
3116{
3117 struct smp_ltk *k, *tmp;
c51ffa0b 3118 int removed = 0;
b899efaf
VCG
3119
3120 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 3121 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3122 continue;
3123
6ed93dc6 3124 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
3125
3126 list_del(&k->list);
3127 kfree(k);
c51ffa0b 3128 removed++;
b899efaf
VCG
3129 }
3130
c51ffa0b 3131 return removed ? 0 : -ENOENT;
b899efaf
VCG
3132}
3133
a7ec7338
JH
3134void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3135{
3136 struct smp_irk *k, *tmp;
3137
668b7b19 3138 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3139 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3140 continue;
3141
3142 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3143
3144 list_del(&k->list);
3145 kfree(k);
3146 }
3147}
3148
6bd32326 3149/* HCI command timer function */
bda4f23a 3150static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
3151{
3152 struct hci_dev *hdev = (void *) arg;
3153
bda4f23a
AE
3154 if (hdev->sent_cmd) {
3155 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3156 u16 opcode = __le16_to_cpu(sent->opcode);
3157
3158 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3159 } else {
3160 BT_ERR("%s command tx timeout", hdev->name);
3161 }
3162
6bd32326 3163 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3164 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3165}
3166
2763eda6 3167struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3168 bdaddr_t *bdaddr)
2763eda6
SJ
3169{
3170 struct oob_data *data;
3171
3172 list_for_each_entry(data, &hdev->remote_oob_data, list)
3173 if (bacmp(bdaddr, &data->bdaddr) == 0)
3174 return data;
3175
3176 return NULL;
3177}
3178
3179int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3180{
3181 struct oob_data *data;
3182
3183 data = hci_find_remote_oob_data(hdev, bdaddr);
3184 if (!data)
3185 return -ENOENT;
3186
6ed93dc6 3187 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3188
3189 list_del(&data->list);
3190 kfree(data);
3191
3192 return 0;
3193}
3194
35f7498a 3195void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3196{
3197 struct oob_data *data, *n;
3198
3199 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3200 list_del(&data->list);
3201 kfree(data);
3202 }
2763eda6
SJ
3203}
3204
0798872e
MH
3205int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3206 u8 *hash, u8 *randomizer)
2763eda6
SJ
3207{
3208 struct oob_data *data;
3209
3210 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3211 if (!data) {
0a14ab41 3212 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3213 if (!data)
3214 return -ENOMEM;
3215
3216 bacpy(&data->bdaddr, bdaddr);
3217 list_add(&data->list, &hdev->remote_oob_data);
3218 }
3219
519ca9d0
MH
3220 memcpy(data->hash192, hash, sizeof(data->hash192));
3221 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 3222
0798872e
MH
3223 memset(data->hash256, 0, sizeof(data->hash256));
3224 memset(data->randomizer256, 0, sizeof(data->randomizer256));
3225
3226 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3227
3228 return 0;
3229}
3230
3231int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
3232 u8 *hash192, u8 *randomizer192,
3233 u8 *hash256, u8 *randomizer256)
3234{
3235 struct oob_data *data;
3236
3237 data = hci_find_remote_oob_data(hdev, bdaddr);
3238 if (!data) {
0a14ab41 3239 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3240 if (!data)
3241 return -ENOMEM;
3242
3243 bacpy(&data->bdaddr, bdaddr);
3244 list_add(&data->list, &hdev->remote_oob_data);
3245 }
3246
3247 memcpy(data->hash192, hash192, sizeof(data->hash192));
3248 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
3249
3250 memcpy(data->hash256, hash256, sizeof(data->hash256));
3251 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
3252
6ed93dc6 3253 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3254
3255 return 0;
3256}
3257
b9ee0a78
MH
3258struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3259 bdaddr_t *bdaddr, u8 type)
b2a66aad 3260{
8035ded4 3261 struct bdaddr_list *b;
b2a66aad 3262
b9ee0a78
MH
3263 list_for_each_entry(b, &hdev->blacklist, list) {
3264 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3265 return b;
b9ee0a78 3266 }
b2a66aad
AJ
3267
3268 return NULL;
3269}
3270
c9507490 3271static void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3272{
3273 struct list_head *p, *n;
3274
3275 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3276 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3277
3278 list_del(p);
3279 kfree(b);
3280 }
b2a66aad
AJ
3281}
3282
88c1fe4b 3283int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3284{
3285 struct bdaddr_list *entry;
b2a66aad 3286
b9ee0a78 3287 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3288 return -EBADF;
3289
b9ee0a78 3290 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3291 return -EEXIST;
b2a66aad
AJ
3292
3293 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3294 if (!entry)
3295 return -ENOMEM;
b2a66aad
AJ
3296
3297 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3298 entry->bdaddr_type = type;
b2a66aad
AJ
3299
3300 list_add(&entry->list, &hdev->blacklist);
3301
88c1fe4b 3302 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3303}
3304
88c1fe4b 3305int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3306{
3307 struct bdaddr_list *entry;
b2a66aad 3308
35f7498a
JH
3309 if (!bacmp(bdaddr, BDADDR_ANY)) {
3310 hci_blacklist_clear(hdev);
3311 return 0;
3312 }
b2a66aad 3313
b9ee0a78 3314 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3315 if (!entry)
5e762444 3316 return -ENOENT;
b2a66aad
AJ
3317
3318 list_del(&entry->list);
3319 kfree(entry);
3320
88c1fe4b 3321 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3322}
3323
d2ab0ac1
MH
3324struct bdaddr_list *hci_white_list_lookup(struct hci_dev *hdev,
3325 bdaddr_t *bdaddr, u8 type)
3326{
3327 struct bdaddr_list *b;
3328
3329 list_for_each_entry(b, &hdev->le_white_list, list) {
3330 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
3331 return b;
3332 }
3333
3334 return NULL;
3335}
3336
3337void hci_white_list_clear(struct hci_dev *hdev)
3338{
3339 struct list_head *p, *n;
3340
3341 list_for_each_safe(p, n, &hdev->le_white_list) {
3342 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
3343
3344 list_del(p);
3345 kfree(b);
3346 }
3347}
3348
3349int hci_white_list_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3350{
3351 struct bdaddr_list *entry;
3352
3353 if (!bacmp(bdaddr, BDADDR_ANY))
3354 return -EBADF;
3355
3356 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
3357 if (!entry)
3358 return -ENOMEM;
3359
3360 bacpy(&entry->bdaddr, bdaddr);
3361 entry->bdaddr_type = type;
3362
3363 list_add(&entry->list, &hdev->le_white_list);
3364
3365 return 0;
3366}
3367
3368int hci_white_list_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3369{
3370 struct bdaddr_list *entry;
3371
3372 if (!bacmp(bdaddr, BDADDR_ANY))
3373 return -EBADF;
3374
3375 entry = hci_white_list_lookup(hdev, bdaddr, type);
3376 if (!entry)
3377 return -ENOENT;
3378
3379 list_del(&entry->list);
3380 kfree(entry);
3381
3382 return 0;
3383}
3384
15819a70
AG
3385/* This function requires the caller holds hdev->lock */
3386struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3387 bdaddr_t *addr, u8 addr_type)
3388{
3389 struct hci_conn_params *params;
3390
3391 list_for_each_entry(params, &hdev->le_conn_params, list) {
3392 if (bacmp(&params->addr, addr) == 0 &&
3393 params->addr_type == addr_type) {
3394 return params;
3395 }
3396 }
3397
3398 return NULL;
3399}
3400
cef952ce
AG
3401static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3402{
3403 struct hci_conn *conn;
3404
3405 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3406 if (!conn)
3407 return false;
3408
3409 if (conn->dst_type != type)
3410 return false;
3411
3412 if (conn->state != BT_CONNECTED)
3413 return false;
3414
3415 return true;
3416}
3417
a9b0a04c
AG
3418static bool is_identity_address(bdaddr_t *addr, u8 addr_type)
3419{
3420 if (addr_type == ADDR_LE_DEV_PUBLIC)
3421 return true;
3422
3423 /* Check for Random Static address type */
3424 if ((addr->b[5] & 0xc0) == 0xc0)
3425 return true;
3426
3427 return false;
3428}
3429
15819a70 3430/* This function requires the caller holds hdev->lock */
a9b0a04c
AG
3431int hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3432 u8 auto_connect, u16 conn_min_interval,
3433 u16 conn_max_interval)
15819a70
AG
3434{
3435 struct hci_conn_params *params;
3436
a9b0a04c
AG
3437 if (!is_identity_address(addr, addr_type))
3438 return -EINVAL;
3439
15819a70 3440 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce
AG
3441 if (params)
3442 goto update;
15819a70
AG
3443
3444 params = kzalloc(sizeof(*params), GFP_KERNEL);
3445 if (!params) {
3446 BT_ERR("Out of memory");
a9b0a04c 3447 return -ENOMEM;
15819a70
AG
3448 }
3449
3450 bacpy(&params->addr, addr);
3451 params->addr_type = addr_type;
cef952ce
AG
3452
3453 list_add(&params->list, &hdev->le_conn_params);
3454
3455update:
15819a70
AG
3456 params->conn_min_interval = conn_min_interval;
3457 params->conn_max_interval = conn_max_interval;
9fcb18ef 3458 params->auto_connect = auto_connect;
15819a70 3459
cef952ce
AG
3460 switch (auto_connect) {
3461 case HCI_AUTO_CONN_DISABLED:
3462 case HCI_AUTO_CONN_LINK_LOSS:
3463 hci_pend_le_conn_del(hdev, addr, addr_type);
3464 break;
3465 case HCI_AUTO_CONN_ALWAYS:
3466 if (!is_connected(hdev, addr, addr_type))
3467 hci_pend_le_conn_add(hdev, addr, addr_type);
3468 break;
3469 }
15819a70 3470
9fcb18ef
AG
3471 BT_DBG("addr %pMR (type %u) auto_connect %u conn_min_interval 0x%.4x "
3472 "conn_max_interval 0x%.4x", addr, addr_type, auto_connect,
3473 conn_min_interval, conn_max_interval);
a9b0a04c
AG
3474
3475 return 0;
15819a70
AG
3476}
3477
3478/* This function requires the caller holds hdev->lock */
3479void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3480{
3481 struct hci_conn_params *params;
3482
3483 params = hci_conn_params_lookup(hdev, addr, addr_type);
3484 if (!params)
3485 return;
3486
cef952ce
AG
3487 hci_pend_le_conn_del(hdev, addr, addr_type);
3488
15819a70
AG
3489 list_del(&params->list);
3490 kfree(params);
3491
3492 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3493}
3494
3495/* This function requires the caller holds hdev->lock */
3496void hci_conn_params_clear(struct hci_dev *hdev)
3497{
3498 struct hci_conn_params *params, *tmp;
3499
3500 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3501 list_del(&params->list);
3502 kfree(params);
3503 }
3504
3505 BT_DBG("All LE connection parameters were removed");
3506}
3507
77a77a30
AG
3508/* This function requires the caller holds hdev->lock */
3509struct bdaddr_list *hci_pend_le_conn_lookup(struct hci_dev *hdev,
3510 bdaddr_t *addr, u8 addr_type)
3511{
3512 struct bdaddr_list *entry;
3513
3514 list_for_each_entry(entry, &hdev->pend_le_conns, list) {
3515 if (bacmp(&entry->bdaddr, addr) == 0 &&
3516 entry->bdaddr_type == addr_type)
3517 return entry;
3518 }
3519
3520 return NULL;
3521}
3522
3523/* This function requires the caller holds hdev->lock */
3524void hci_pend_le_conn_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3525{
3526 struct bdaddr_list *entry;
3527
3528 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3529 if (entry)
a4790dbd 3530 goto done;
77a77a30
AG
3531
3532 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
3533 if (!entry) {
3534 BT_ERR("Out of memory");
3535 return;
3536 }
3537
3538 bacpy(&entry->bdaddr, addr);
3539 entry->bdaddr_type = addr_type;
3540
3541 list_add(&entry->list, &hdev->pend_le_conns);
3542
3543 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3544
3545done:
3546 hci_update_background_scan(hdev);
77a77a30
AG
3547}
3548
3549/* This function requires the caller holds hdev->lock */
3550void hci_pend_le_conn_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3551{
3552 struct bdaddr_list *entry;
3553
3554 entry = hci_pend_le_conn_lookup(hdev, addr, addr_type);
3555 if (!entry)
a4790dbd 3556 goto done;
77a77a30
AG
3557
3558 list_del(&entry->list);
3559 kfree(entry);
3560
3561 BT_DBG("addr %pMR (type %u)", addr, addr_type);
a4790dbd
AG
3562
3563done:
3564 hci_update_background_scan(hdev);
77a77a30
AG
3565}
3566
3567/* This function requires the caller holds hdev->lock */
3568void hci_pend_le_conns_clear(struct hci_dev *hdev)
3569{
3570 struct bdaddr_list *entry, *tmp;
3571
3572 list_for_each_entry_safe(entry, tmp, &hdev->pend_le_conns, list) {
3573 list_del(&entry->list);
3574 kfree(entry);
3575 }
3576
3577 BT_DBG("All LE pending connections cleared");
3578}
3579
4c87eaab 3580static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3581{
4c87eaab
AG
3582 if (status) {
3583 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3584
4c87eaab
AG
3585 hci_dev_lock(hdev);
3586 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3587 hci_dev_unlock(hdev);
3588 return;
3589 }
7ba8b4be
AG
3590}
3591
4c87eaab 3592static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3593{
4c87eaab
AG
3594 /* General inquiry access code (GIAC) */
3595 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3596 struct hci_request req;
3597 struct hci_cp_inquiry cp;
7ba8b4be
AG
3598 int err;
3599
4c87eaab
AG
3600 if (status) {
3601 BT_ERR("Failed to disable LE scanning: status %d", status);
3602 return;
3603 }
7ba8b4be 3604
4c87eaab
AG
3605 switch (hdev->discovery.type) {
3606 case DISCOV_TYPE_LE:
3607 hci_dev_lock(hdev);
3608 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3609 hci_dev_unlock(hdev);
3610 break;
7ba8b4be 3611
4c87eaab
AG
3612 case DISCOV_TYPE_INTERLEAVED:
3613 hci_req_init(&req, hdev);
7ba8b4be 3614
4c87eaab
AG
3615 memset(&cp, 0, sizeof(cp));
3616 memcpy(&cp.lap, lap, sizeof(cp.lap));
3617 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3618 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3619
4c87eaab 3620 hci_dev_lock(hdev);
7dbfac1d 3621
4c87eaab 3622 hci_inquiry_cache_flush(hdev);
7dbfac1d 3623
4c87eaab
AG
3624 err = hci_req_run(&req, inquiry_complete);
3625 if (err) {
3626 BT_ERR("Inquiry request failed: err %d", err);
3627 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3628 }
7dbfac1d 3629
4c87eaab
AG
3630 hci_dev_unlock(hdev);
3631 break;
7dbfac1d 3632 }
7dbfac1d
AG
3633}
3634
7ba8b4be
AG
3635static void le_scan_disable_work(struct work_struct *work)
3636{
3637 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3638 le_scan_disable.work);
4c87eaab
AG
3639 struct hci_request req;
3640 int err;
7ba8b4be
AG
3641
3642 BT_DBG("%s", hdev->name);
3643
4c87eaab 3644 hci_req_init(&req, hdev);
28b75a89 3645
b1efcc28 3646 hci_req_add_le_scan_disable(&req);
28b75a89 3647
4c87eaab
AG
3648 err = hci_req_run(&req, le_scan_disable_work_complete);
3649 if (err)
3650 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3651}
3652
94b1fc92
MH
3653int hci_update_random_address(struct hci_request *req, bool require_privacy,
3654 u8 *own_addr_type)
ebd3a747
JH
3655{
3656 struct hci_dev *hdev = req->hdev;
3657 int err;
3658
3659 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3660 * current RPA has expired or there is something else than
3661 * the current RPA in use, then generate a new one.
ebd3a747
JH
3662 */
3663 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3664 int to;
3665
3666 *own_addr_type = ADDR_LE_DEV_RANDOM;
3667
3668 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3669 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3670 return 0;
3671
2b5224dc 3672 err = smp_generate_rpa(hdev->tfm_aes, hdev->irk, &hdev->rpa);
ebd3a747
JH
3673 if (err < 0) {
3674 BT_ERR("%s failed to generate new RPA", hdev->name);
3675 return err;
3676 }
3677
2b5224dc 3678 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &hdev->rpa);
ebd3a747
JH
3679
3680 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3681 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3682
3683 return 0;
94b1fc92
MH
3684 }
3685
3686 /* In case of required privacy without resolvable private address,
3687 * use an unresolvable private address. This is useful for active
3688 * scanning and non-connectable advertising.
3689 */
3690 if (require_privacy) {
3691 bdaddr_t urpa;
3692
3693 get_random_bytes(&urpa, 6);
3694 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3695
3696 *own_addr_type = ADDR_LE_DEV_RANDOM;
3697 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, &urpa);
3698 return 0;
ebd3a747
JH
3699 }
3700
3701 /* If forcing static address is in use or there is no public
3702 * address use the static address as random address (but skip
3703 * the HCI command if the current random address is already the
3704 * static one.
3705 */
3706 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3707 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3708 *own_addr_type = ADDR_LE_DEV_RANDOM;
3709 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3710 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3711 &hdev->static_addr);
3712 return 0;
3713 }
3714
3715 /* Neither privacy nor static address is being used so use a
3716 * public address.
3717 */
3718 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3719
3720 return 0;
3721}
3722
a1f4c318
JH
3723/* Copy the Identity Address of the controller.
3724 *
3725 * If the controller has a public BD_ADDR, then by default use that one.
3726 * If this is a LE only controller without a public address, default to
3727 * the static random address.
3728 *
3729 * For debugging purposes it is possible to force controllers with a
3730 * public address to use the static random address instead.
3731 */
3732void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3733 u8 *bdaddr_type)
3734{
3735 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
3736 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3737 bacpy(bdaddr, &hdev->static_addr);
3738 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3739 } else {
3740 bacpy(bdaddr, &hdev->bdaddr);
3741 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3742 }
3743}
3744
9be0dab7
DH
3745/* Alloc HCI device */
3746struct hci_dev *hci_alloc_dev(void)
3747{
3748 struct hci_dev *hdev;
3749
3750 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3751 if (!hdev)
3752 return NULL;
3753
b1b813d4
DH
3754 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3755 hdev->esco_type = (ESCO_HV1);
3756 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3757 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3758 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3759 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3760 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3761
b1b813d4
DH
3762 hdev->sniff_max_interval = 800;
3763 hdev->sniff_min_interval = 80;
3764
3f959d46 3765 hdev->le_adv_channel_map = 0x07;
bef64738
MH
3766 hdev->le_scan_interval = 0x0060;
3767 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3768 hdev->le_conn_min_interval = 0x0028;
3769 hdev->le_conn_max_interval = 0x0038;
bef64738 3770
d6bfd59c
JH
3771 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
3772
b1b813d4
DH
3773 mutex_init(&hdev->lock);
3774 mutex_init(&hdev->req_lock);
3775
3776 INIT_LIST_HEAD(&hdev->mgmt_pending);
3777 INIT_LIST_HEAD(&hdev->blacklist);
3778 INIT_LIST_HEAD(&hdev->uuids);
3779 INIT_LIST_HEAD(&hdev->link_keys);
3780 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3781 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3782 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 3783 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 3784 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 3785 INIT_LIST_HEAD(&hdev->pend_le_conns);
6b536b5e 3786 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3787
3788 INIT_WORK(&hdev->rx_work, hci_rx_work);
3789 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3790 INIT_WORK(&hdev->tx_work, hci_tx_work);
3791 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3792
b1b813d4
DH
3793 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3794 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3795 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3796
b1b813d4
DH
3797 skb_queue_head_init(&hdev->rx_q);
3798 skb_queue_head_init(&hdev->cmd_q);
3799 skb_queue_head_init(&hdev->raw_q);
3800
3801 init_waitqueue_head(&hdev->req_wait_q);
3802
bda4f23a 3803 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3804
b1b813d4
DH
3805 hci_init_sysfs(hdev);
3806 discovery_init(hdev);
9be0dab7
DH
3807
3808 return hdev;
3809}
3810EXPORT_SYMBOL(hci_alloc_dev);
3811
3812/* Free HCI device */
3813void hci_free_dev(struct hci_dev *hdev)
3814{
9be0dab7
DH
3815 /* will free via device release */
3816 put_device(&hdev->dev);
3817}
3818EXPORT_SYMBOL(hci_free_dev);
3819
1da177e4
LT
3820/* Register HCI device */
3821int hci_register_dev(struct hci_dev *hdev)
3822{
b1b813d4 3823 int id, error;
1da177e4 3824
010666a1 3825 if (!hdev->open || !hdev->close)
1da177e4
LT
3826 return -EINVAL;
3827
08add513
MM
3828 /* Do not allow HCI_AMP devices to register at index 0,
3829 * so the index can be used as the AMP controller ID.
3830 */
3df92b31
SL
3831 switch (hdev->dev_type) {
3832 case HCI_BREDR:
3833 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3834 break;
3835 case HCI_AMP:
3836 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3837 break;
3838 default:
3839 return -EINVAL;
1da177e4 3840 }
8e87d142 3841
3df92b31
SL
3842 if (id < 0)
3843 return id;
3844
1da177e4
LT
3845 sprintf(hdev->name, "hci%d", id);
3846 hdev->id = id;
2d8b3a11
AE
3847
3848 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3849
d8537548
KC
3850 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3851 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3852 if (!hdev->workqueue) {
3853 error = -ENOMEM;
3854 goto err;
3855 }
f48fd9c8 3856
d8537548
KC
3857 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3858 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3859 if (!hdev->req_workqueue) {
3860 destroy_workqueue(hdev->workqueue);
3861 error = -ENOMEM;
3862 goto err;
3863 }
3864
0153e2ec
MH
3865 if (!IS_ERR_OR_NULL(bt_debugfs))
3866 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3867
bdc3e0f1
MH
3868 dev_set_name(&hdev->dev, "%s", hdev->name);
3869
99780a7b
JH
3870 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3871 CRYPTO_ALG_ASYNC);
3872 if (IS_ERR(hdev->tfm_aes)) {
3873 BT_ERR("Unable to create crypto context");
3874 error = PTR_ERR(hdev->tfm_aes);
3875 hdev->tfm_aes = NULL;
3876 goto err_wqueue;
3877 }
3878
bdc3e0f1 3879 error = device_add(&hdev->dev);
33ca954d 3880 if (error < 0)
99780a7b 3881 goto err_tfm;
1da177e4 3882
611b30f7 3883 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3884 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3885 hdev);
611b30f7
MH
3886 if (hdev->rfkill) {
3887 if (rfkill_register(hdev->rfkill) < 0) {
3888 rfkill_destroy(hdev->rfkill);
3889 hdev->rfkill = NULL;
3890 }
3891 }
3892
5e130367
JH
3893 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3894 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3895
a8b2d5c2 3896 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3897 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3898
01cd3404 3899 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3900 /* Assume BR/EDR support until proven otherwise (such as
3901 * through reading supported features during init.
3902 */
3903 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3904 }
ce2be9ac 3905
fcee3377
GP
3906 write_lock(&hci_dev_list_lock);
3907 list_add(&hdev->list, &hci_dev_list);
3908 write_unlock(&hci_dev_list_lock);
3909
1da177e4 3910 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3911 hci_dev_hold(hdev);
1da177e4 3912
19202573 3913 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3914
1da177e4 3915 return id;
f48fd9c8 3916
99780a7b
JH
3917err_tfm:
3918 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3919err_wqueue:
3920 destroy_workqueue(hdev->workqueue);
6ead1bbc 3921 destroy_workqueue(hdev->req_workqueue);
33ca954d 3922err:
3df92b31 3923 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3924
33ca954d 3925 return error;
1da177e4
LT
3926}
3927EXPORT_SYMBOL(hci_register_dev);
3928
3929/* Unregister HCI device */
59735631 3930void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3931{
3df92b31 3932 int i, id;
ef222013 3933
c13854ce 3934 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3935
94324962
JH
3936 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3937
3df92b31
SL
3938 id = hdev->id;
3939
f20d09d5 3940 write_lock(&hci_dev_list_lock);
1da177e4 3941 list_del(&hdev->list);
f20d09d5 3942 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3943
3944 hci_dev_do_close(hdev);
3945
cd4c5391 3946 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3947 kfree_skb(hdev->reassembly[i]);
3948
b9b5ef18
GP
3949 cancel_work_sync(&hdev->power_on);
3950
ab81cbf9 3951 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3952 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3953 hci_dev_lock(hdev);
744cf19e 3954 mgmt_index_removed(hdev);
09fd0de5 3955 hci_dev_unlock(hdev);
56e5cb86 3956 }
ab81cbf9 3957
2e58ef3e
JH
3958 /* mgmt_index_removed should take care of emptying the
3959 * pending list */
3960 BUG_ON(!list_empty(&hdev->mgmt_pending));
3961
1da177e4
LT
3962 hci_notify(hdev, HCI_DEV_UNREG);
3963
611b30f7
MH
3964 if (hdev->rfkill) {
3965 rfkill_unregister(hdev->rfkill);
3966 rfkill_destroy(hdev->rfkill);
3967 }
3968
99780a7b
JH
3969 if (hdev->tfm_aes)
3970 crypto_free_blkcipher(hdev->tfm_aes);
3971
bdc3e0f1 3972 device_del(&hdev->dev);
147e2d59 3973
0153e2ec
MH
3974 debugfs_remove_recursive(hdev->debugfs);
3975
f48fd9c8 3976 destroy_workqueue(hdev->workqueue);
6ead1bbc 3977 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3978
09fd0de5 3979 hci_dev_lock(hdev);
e2e0cacb 3980 hci_blacklist_clear(hdev);
2aeb9a1a 3981 hci_uuids_clear(hdev);
55ed8ca1 3982 hci_link_keys_clear(hdev);
b899efaf 3983 hci_smp_ltks_clear(hdev);
970c4e46 3984 hci_smp_irks_clear(hdev);
2763eda6 3985 hci_remote_oob_data_clear(hdev);
d2ab0ac1 3986 hci_white_list_clear(hdev);
15819a70 3987 hci_conn_params_clear(hdev);
77a77a30 3988 hci_pend_le_conns_clear(hdev);
09fd0de5 3989 hci_dev_unlock(hdev);
e2e0cacb 3990
dc946bd8 3991 hci_dev_put(hdev);
3df92b31
SL
3992
3993 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3994}
3995EXPORT_SYMBOL(hci_unregister_dev);
3996
3997/* Suspend HCI device */
3998int hci_suspend_dev(struct hci_dev *hdev)
3999{
4000 hci_notify(hdev, HCI_DEV_SUSPEND);
4001 return 0;
4002}
4003EXPORT_SYMBOL(hci_suspend_dev);
4004
4005/* Resume HCI device */
4006int hci_resume_dev(struct hci_dev *hdev)
4007{
4008 hci_notify(hdev, HCI_DEV_RESUME);
4009 return 0;
4010}
4011EXPORT_SYMBOL(hci_resume_dev);
4012
76bca880 4013/* Receive frame from HCI drivers */
e1a26170 4014int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4015{
76bca880 4016 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4017 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4018 kfree_skb(skb);
4019 return -ENXIO;
4020 }
4021
d82603c6 4022 /* Incoming skb */
76bca880
MH
4023 bt_cb(skb)->incoming = 1;
4024
4025 /* Time stamp */
4026 __net_timestamp(skb);
4027
76bca880 4028 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4029 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4030
76bca880
MH
4031 return 0;
4032}
4033EXPORT_SYMBOL(hci_recv_frame);
4034
33e882a5 4035static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4036 int count, __u8 index)
33e882a5
SS
4037{
4038 int len = 0;
4039 int hlen = 0;
4040 int remain = count;
4041 struct sk_buff *skb;
4042 struct bt_skb_cb *scb;
4043
4044 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4045 index >= NUM_REASSEMBLY)
33e882a5
SS
4046 return -EILSEQ;
4047
4048 skb = hdev->reassembly[index];
4049
4050 if (!skb) {
4051 switch (type) {
4052 case HCI_ACLDATA_PKT:
4053 len = HCI_MAX_FRAME_SIZE;
4054 hlen = HCI_ACL_HDR_SIZE;
4055 break;
4056 case HCI_EVENT_PKT:
4057 len = HCI_MAX_EVENT_SIZE;
4058 hlen = HCI_EVENT_HDR_SIZE;
4059 break;
4060 case HCI_SCODATA_PKT:
4061 len = HCI_MAX_SCO_SIZE;
4062 hlen = HCI_SCO_HDR_SIZE;
4063 break;
4064 }
4065
1e429f38 4066 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4067 if (!skb)
4068 return -ENOMEM;
4069
4070 scb = (void *) skb->cb;
4071 scb->expect = hlen;
4072 scb->pkt_type = type;
4073
33e882a5
SS
4074 hdev->reassembly[index] = skb;
4075 }
4076
4077 while (count) {
4078 scb = (void *) skb->cb;
89bb46d0 4079 len = min_t(uint, scb->expect, count);
33e882a5
SS
4080
4081 memcpy(skb_put(skb, len), data, len);
4082
4083 count -= len;
4084 data += len;
4085 scb->expect -= len;
4086 remain = count;
4087
4088 switch (type) {
4089 case HCI_EVENT_PKT:
4090 if (skb->len == HCI_EVENT_HDR_SIZE) {
4091 struct hci_event_hdr *h = hci_event_hdr(skb);
4092 scb->expect = h->plen;
4093
4094 if (skb_tailroom(skb) < scb->expect) {
4095 kfree_skb(skb);
4096 hdev->reassembly[index] = NULL;
4097 return -ENOMEM;
4098 }
4099 }
4100 break;
4101
4102 case HCI_ACLDATA_PKT:
4103 if (skb->len == HCI_ACL_HDR_SIZE) {
4104 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4105 scb->expect = __le16_to_cpu(h->dlen);
4106
4107 if (skb_tailroom(skb) < scb->expect) {
4108 kfree_skb(skb);
4109 hdev->reassembly[index] = NULL;
4110 return -ENOMEM;
4111 }
4112 }
4113 break;
4114
4115 case HCI_SCODATA_PKT:
4116 if (skb->len == HCI_SCO_HDR_SIZE) {
4117 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4118 scb->expect = h->dlen;
4119
4120 if (skb_tailroom(skb) < scb->expect) {
4121 kfree_skb(skb);
4122 hdev->reassembly[index] = NULL;
4123 return -ENOMEM;
4124 }
4125 }
4126 break;
4127 }
4128
4129 if (scb->expect == 0) {
4130 /* Complete frame */
4131
4132 bt_cb(skb)->pkt_type = type;
e1a26170 4133 hci_recv_frame(hdev, skb);
33e882a5
SS
4134
4135 hdev->reassembly[index] = NULL;
4136 return remain;
4137 }
4138 }
4139
4140 return remain;
4141}
4142
ef222013
MH
4143int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
4144{
f39a3c06
SS
4145 int rem = 0;
4146
ef222013
MH
4147 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
4148 return -EILSEQ;
4149
da5f6c37 4150 while (count) {
1e429f38 4151 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
4152 if (rem < 0)
4153 return rem;
ef222013 4154
f39a3c06
SS
4155 data += (count - rem);
4156 count = rem;
f81c6224 4157 }
ef222013 4158
f39a3c06 4159 return rem;
ef222013
MH
4160}
4161EXPORT_SYMBOL(hci_recv_fragment);
4162
99811510
SS
4163#define STREAM_REASSEMBLY 0
4164
4165int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4166{
4167 int type;
4168 int rem = 0;
4169
da5f6c37 4170 while (count) {
99811510
SS
4171 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4172
4173 if (!skb) {
4174 struct { char type; } *pkt;
4175
4176 /* Start of the frame */
4177 pkt = data;
4178 type = pkt->type;
4179
4180 data++;
4181 count--;
4182 } else
4183 type = bt_cb(skb)->pkt_type;
4184
1e429f38 4185 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4186 STREAM_REASSEMBLY);
99811510
SS
4187 if (rem < 0)
4188 return rem;
4189
4190 data += (count - rem);
4191 count = rem;
f81c6224 4192 }
99811510
SS
4193
4194 return rem;
4195}
4196EXPORT_SYMBOL(hci_recv_stream_fragment);
4197
1da177e4
LT
4198/* ---- Interface to upper protocols ---- */
4199
1da177e4
LT
4200int hci_register_cb(struct hci_cb *cb)
4201{
4202 BT_DBG("%p name %s", cb, cb->name);
4203
f20d09d5 4204 write_lock(&hci_cb_list_lock);
1da177e4 4205 list_add(&cb->list, &hci_cb_list);
f20d09d5 4206 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4207
4208 return 0;
4209}
4210EXPORT_SYMBOL(hci_register_cb);
4211
4212int hci_unregister_cb(struct hci_cb *cb)
4213{
4214 BT_DBG("%p name %s", cb, cb->name);
4215
f20d09d5 4216 write_lock(&hci_cb_list_lock);
1da177e4 4217 list_del(&cb->list);
f20d09d5 4218 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4219
4220 return 0;
4221}
4222EXPORT_SYMBOL(hci_unregister_cb);
4223
51086991 4224static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4225{
0d48d939 4226 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4227
cd82e61c
MH
4228 /* Time stamp */
4229 __net_timestamp(skb);
1da177e4 4230
cd82e61c
MH
4231 /* Send copy to monitor */
4232 hci_send_to_monitor(hdev, skb);
4233
4234 if (atomic_read(&hdev->promisc)) {
4235 /* Send copy to the sockets */
470fe1b5 4236 hci_send_to_sock(hdev, skb);
1da177e4
LT
4237 }
4238
4239 /* Get rid of skb owner, prior to sending to the driver. */
4240 skb_orphan(skb);
4241
7bd8f09f 4242 if (hdev->send(hdev, skb) < 0)
51086991 4243 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
4244}
4245
3119ae95
JH
4246void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4247{
4248 skb_queue_head_init(&req->cmd_q);
4249 req->hdev = hdev;
5d73e034 4250 req->err = 0;
3119ae95
JH
4251}
4252
4253int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4254{
4255 struct hci_dev *hdev = req->hdev;
4256 struct sk_buff *skb;
4257 unsigned long flags;
4258
4259 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4260
5d73e034
AG
4261 /* If an error occured during request building, remove all HCI
4262 * commands queued on the HCI request queue.
4263 */
4264 if (req->err) {
4265 skb_queue_purge(&req->cmd_q);
4266 return req->err;
4267 }
4268
3119ae95
JH
4269 /* Do not allow empty requests */
4270 if (skb_queue_empty(&req->cmd_q))
382b0c39 4271 return -ENODATA;
3119ae95
JH
4272
4273 skb = skb_peek_tail(&req->cmd_q);
4274 bt_cb(skb)->req.complete = complete;
4275
4276 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4277 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4278 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4279
4280 queue_work(hdev->workqueue, &hdev->cmd_work);
4281
4282 return 0;
4283}
4284
1ca3a9d0 4285static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4286 u32 plen, const void *param)
1da177e4
LT
4287{
4288 int len = HCI_COMMAND_HDR_SIZE + plen;
4289 struct hci_command_hdr *hdr;
4290 struct sk_buff *skb;
4291
1da177e4 4292 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4293 if (!skb)
4294 return NULL;
1da177e4
LT
4295
4296 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4297 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4298 hdr->plen = plen;
4299
4300 if (plen)
4301 memcpy(skb_put(skb, plen), param, plen);
4302
4303 BT_DBG("skb len %d", skb->len);
4304
0d48d939 4305 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 4306
1ca3a9d0
JH
4307 return skb;
4308}
4309
4310/* Send HCI command */
07dc93dd
JH
4311int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4312 const void *param)
1ca3a9d0
JH
4313{
4314 struct sk_buff *skb;
4315
4316 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4317
4318 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4319 if (!skb) {
4320 BT_ERR("%s no memory for command", hdev->name);
4321 return -ENOMEM;
4322 }
4323
11714b3d
JH
4324 /* Stand-alone HCI commands must be flaged as
4325 * single-command requests.
4326 */
4327 bt_cb(skb)->req.start = true;
4328
1da177e4 4329 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4330 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4331
4332 return 0;
4333}
1da177e4 4334
71c76a17 4335/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4336void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4337 const void *param, u8 event)
71c76a17
JH
4338{
4339 struct hci_dev *hdev = req->hdev;
4340 struct sk_buff *skb;
4341
4342 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4343
34739c1e
AG
4344 /* If an error occured during request building, there is no point in
4345 * queueing the HCI command. We can simply return.
4346 */
4347 if (req->err)
4348 return;
4349
71c76a17
JH
4350 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4351 if (!skb) {
5d73e034
AG
4352 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4353 hdev->name, opcode);
4354 req->err = -ENOMEM;
e348fe6b 4355 return;
71c76a17
JH
4356 }
4357
4358 if (skb_queue_empty(&req->cmd_q))
4359 bt_cb(skb)->req.start = true;
4360
02350a72
JH
4361 bt_cb(skb)->req.event = event;
4362
71c76a17 4363 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4364}
4365
07dc93dd
JH
4366void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4367 const void *param)
02350a72
JH
4368{
4369 hci_req_add_ev(req, opcode, plen, param, 0);
4370}
4371
1da177e4 4372/* Get data from the previously sent command */
a9de9248 4373void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4374{
4375 struct hci_command_hdr *hdr;
4376
4377 if (!hdev->sent_cmd)
4378 return NULL;
4379
4380 hdr = (void *) hdev->sent_cmd->data;
4381
a9de9248 4382 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4383 return NULL;
4384
f0e09510 4385 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4386
4387 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4388}
4389
4390/* Send ACL data */
4391static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4392{
4393 struct hci_acl_hdr *hdr;
4394 int len = skb->len;
4395
badff6d0
ACM
4396 skb_push(skb, HCI_ACL_HDR_SIZE);
4397 skb_reset_transport_header(skb);
9c70220b 4398 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4399 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4400 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4401}
4402
ee22be7e 4403static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4404 struct sk_buff *skb, __u16 flags)
1da177e4 4405{
ee22be7e 4406 struct hci_conn *conn = chan->conn;
1da177e4
LT
4407 struct hci_dev *hdev = conn->hdev;
4408 struct sk_buff *list;
4409
087bfd99
GP
4410 skb->len = skb_headlen(skb);
4411 skb->data_len = 0;
4412
4413 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4414
4415 switch (hdev->dev_type) {
4416 case HCI_BREDR:
4417 hci_add_acl_hdr(skb, conn->handle, flags);
4418 break;
4419 case HCI_AMP:
4420 hci_add_acl_hdr(skb, chan->handle, flags);
4421 break;
4422 default:
4423 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4424 return;
4425 }
087bfd99 4426
70f23020
AE
4427 list = skb_shinfo(skb)->frag_list;
4428 if (!list) {
1da177e4
LT
4429 /* Non fragmented */
4430 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4431
73d80deb 4432 skb_queue_tail(queue, skb);
1da177e4
LT
4433 } else {
4434 /* Fragmented */
4435 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4436
4437 skb_shinfo(skb)->frag_list = NULL;
4438
4439 /* Queue all fragments atomically */
af3e6359 4440 spin_lock(&queue->lock);
1da177e4 4441
73d80deb 4442 __skb_queue_tail(queue, skb);
e702112f
AE
4443
4444 flags &= ~ACL_START;
4445 flags |= ACL_CONT;
1da177e4
LT
4446 do {
4447 skb = list; list = list->next;
8e87d142 4448
0d48d939 4449 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4450 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4451
4452 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4453
73d80deb 4454 __skb_queue_tail(queue, skb);
1da177e4
LT
4455 } while (list);
4456
af3e6359 4457 spin_unlock(&queue->lock);
1da177e4 4458 }
73d80deb
LAD
4459}
4460
4461void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4462{
ee22be7e 4463 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4464
f0e09510 4465 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4466
ee22be7e 4467 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4468
3eff45ea 4469 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4470}
1da177e4
LT
4471
4472/* Send SCO data */
0d861d8b 4473void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4474{
4475 struct hci_dev *hdev = conn->hdev;
4476 struct hci_sco_hdr hdr;
4477
4478 BT_DBG("%s len %d", hdev->name, skb->len);
4479
aca3192c 4480 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4481 hdr.dlen = skb->len;
4482
badff6d0
ACM
4483 skb_push(skb, HCI_SCO_HDR_SIZE);
4484 skb_reset_transport_header(skb);
9c70220b 4485 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4486
0d48d939 4487 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4488
1da177e4 4489 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4490 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4491}
1da177e4
LT
4492
4493/* ---- HCI TX task (outgoing data) ---- */
4494
4495/* HCI Connection scheduler */
6039aa73
GP
4496static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4497 int *quote)
1da177e4
LT
4498{
4499 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4500 struct hci_conn *conn = NULL, *c;
abc5de8f 4501 unsigned int num = 0, min = ~0;
1da177e4 4502
8e87d142 4503 /* We don't have to lock device here. Connections are always
1da177e4 4504 * added and removed with TX task disabled. */
bf4c6325
GP
4505
4506 rcu_read_lock();
4507
4508 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4509 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4510 continue;
769be974
MH
4511
4512 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4513 continue;
4514
1da177e4
LT
4515 num++;
4516
4517 if (c->sent < min) {
4518 min = c->sent;
4519 conn = c;
4520 }
52087a79
LAD
4521
4522 if (hci_conn_num(hdev, type) == num)
4523 break;
1da177e4
LT
4524 }
4525
bf4c6325
GP
4526 rcu_read_unlock();
4527
1da177e4 4528 if (conn) {
6ed58ec5
VT
4529 int cnt, q;
4530
4531 switch (conn->type) {
4532 case ACL_LINK:
4533 cnt = hdev->acl_cnt;
4534 break;
4535 case SCO_LINK:
4536 case ESCO_LINK:
4537 cnt = hdev->sco_cnt;
4538 break;
4539 case LE_LINK:
4540 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4541 break;
4542 default:
4543 cnt = 0;
4544 BT_ERR("Unknown link type");
4545 }
4546
4547 q = cnt / num;
1da177e4
LT
4548 *quote = q ? q : 1;
4549 } else
4550 *quote = 0;
4551
4552 BT_DBG("conn %p quote %d", conn, *quote);
4553 return conn;
4554}
4555
6039aa73 4556static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4557{
4558 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4559 struct hci_conn *c;
1da177e4 4560
bae1f5d9 4561 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4562
bf4c6325
GP
4563 rcu_read_lock();
4564
1da177e4 4565 /* Kill stalled connections */
bf4c6325 4566 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4567 if (c->type == type && c->sent) {
6ed93dc6
AE
4568 BT_ERR("%s killing stalled connection %pMR",
4569 hdev->name, &c->dst);
bed71748 4570 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4571 }
4572 }
bf4c6325
GP
4573
4574 rcu_read_unlock();
1da177e4
LT
4575}
4576
6039aa73
GP
4577static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4578 int *quote)
1da177e4 4579{
73d80deb
LAD
4580 struct hci_conn_hash *h = &hdev->conn_hash;
4581 struct hci_chan *chan = NULL;
abc5de8f 4582 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4583 struct hci_conn *conn;
73d80deb
LAD
4584 int cnt, q, conn_num = 0;
4585
4586 BT_DBG("%s", hdev->name);
4587
bf4c6325
GP
4588 rcu_read_lock();
4589
4590 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4591 struct hci_chan *tmp;
4592
4593 if (conn->type != type)
4594 continue;
4595
4596 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4597 continue;
4598
4599 conn_num++;
4600
8192edef 4601 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4602 struct sk_buff *skb;
4603
4604 if (skb_queue_empty(&tmp->data_q))
4605 continue;
4606
4607 skb = skb_peek(&tmp->data_q);
4608 if (skb->priority < cur_prio)
4609 continue;
4610
4611 if (skb->priority > cur_prio) {
4612 num = 0;
4613 min = ~0;
4614 cur_prio = skb->priority;
4615 }
4616
4617 num++;
4618
4619 if (conn->sent < min) {
4620 min = conn->sent;
4621 chan = tmp;
4622 }
4623 }
4624
4625 if (hci_conn_num(hdev, type) == conn_num)
4626 break;
4627 }
4628
bf4c6325
GP
4629 rcu_read_unlock();
4630
73d80deb
LAD
4631 if (!chan)
4632 return NULL;
4633
4634 switch (chan->conn->type) {
4635 case ACL_LINK:
4636 cnt = hdev->acl_cnt;
4637 break;
bd1eb66b
AE
4638 case AMP_LINK:
4639 cnt = hdev->block_cnt;
4640 break;
73d80deb
LAD
4641 case SCO_LINK:
4642 case ESCO_LINK:
4643 cnt = hdev->sco_cnt;
4644 break;
4645 case LE_LINK:
4646 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4647 break;
4648 default:
4649 cnt = 0;
4650 BT_ERR("Unknown link type");
4651 }
4652
4653 q = cnt / num;
4654 *quote = q ? q : 1;
4655 BT_DBG("chan %p quote %d", chan, *quote);
4656 return chan;
4657}
4658
02b20f0b
LAD
4659static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4660{
4661 struct hci_conn_hash *h = &hdev->conn_hash;
4662 struct hci_conn *conn;
4663 int num = 0;
4664
4665 BT_DBG("%s", hdev->name);
4666
bf4c6325
GP
4667 rcu_read_lock();
4668
4669 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4670 struct hci_chan *chan;
4671
4672 if (conn->type != type)
4673 continue;
4674
4675 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4676 continue;
4677
4678 num++;
4679
8192edef 4680 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4681 struct sk_buff *skb;
4682
4683 if (chan->sent) {
4684 chan->sent = 0;
4685 continue;
4686 }
4687
4688 if (skb_queue_empty(&chan->data_q))
4689 continue;
4690
4691 skb = skb_peek(&chan->data_q);
4692 if (skb->priority >= HCI_PRIO_MAX - 1)
4693 continue;
4694
4695 skb->priority = HCI_PRIO_MAX - 1;
4696
4697 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4698 skb->priority);
02b20f0b
LAD
4699 }
4700
4701 if (hci_conn_num(hdev, type) == num)
4702 break;
4703 }
bf4c6325
GP
4704
4705 rcu_read_unlock();
4706
02b20f0b
LAD
4707}
4708
b71d385a
AE
4709static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4710{
4711 /* Calculate count of blocks used by this packet */
4712 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4713}
4714
6039aa73 4715static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4716{
1da177e4
LT
4717 if (!test_bit(HCI_RAW, &hdev->flags)) {
4718 /* ACL tx timeout must be longer than maximum
4719 * link supervision timeout (40.9 seconds) */
63d2bc1b 4720 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4721 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4722 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4723 }
63d2bc1b 4724}
1da177e4 4725
6039aa73 4726static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4727{
4728 unsigned int cnt = hdev->acl_cnt;
4729 struct hci_chan *chan;
4730 struct sk_buff *skb;
4731 int quote;
4732
4733 __check_timeout(hdev, cnt);
04837f64 4734
73d80deb 4735 while (hdev->acl_cnt &&
a8c5fb1a 4736 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4737 u32 priority = (skb_peek(&chan->data_q))->priority;
4738 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4739 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4740 skb->len, skb->priority);
73d80deb 4741
ec1cce24
LAD
4742 /* Stop if priority has changed */
4743 if (skb->priority < priority)
4744 break;
4745
4746 skb = skb_dequeue(&chan->data_q);
4747
73d80deb 4748 hci_conn_enter_active_mode(chan->conn,
04124681 4749 bt_cb(skb)->force_active);
04837f64 4750
57d17d70 4751 hci_send_frame(hdev, skb);
1da177e4
LT
4752 hdev->acl_last_tx = jiffies;
4753
4754 hdev->acl_cnt--;
73d80deb
LAD
4755 chan->sent++;
4756 chan->conn->sent++;
1da177e4
LT
4757 }
4758 }
02b20f0b
LAD
4759
4760 if (cnt != hdev->acl_cnt)
4761 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4762}
4763
6039aa73 4764static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4765{
63d2bc1b 4766 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4767 struct hci_chan *chan;
4768 struct sk_buff *skb;
4769 int quote;
bd1eb66b 4770 u8 type;
b71d385a 4771
63d2bc1b 4772 __check_timeout(hdev, cnt);
b71d385a 4773
bd1eb66b
AE
4774 BT_DBG("%s", hdev->name);
4775
4776 if (hdev->dev_type == HCI_AMP)
4777 type = AMP_LINK;
4778 else
4779 type = ACL_LINK;
4780
b71d385a 4781 while (hdev->block_cnt > 0 &&
bd1eb66b 4782 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4783 u32 priority = (skb_peek(&chan->data_q))->priority;
4784 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4785 int blocks;
4786
4787 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4788 skb->len, skb->priority);
b71d385a
AE
4789
4790 /* Stop if priority has changed */
4791 if (skb->priority < priority)
4792 break;
4793
4794 skb = skb_dequeue(&chan->data_q);
4795
4796 blocks = __get_blocks(hdev, skb);
4797 if (blocks > hdev->block_cnt)
4798 return;
4799
4800 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4801 bt_cb(skb)->force_active);
b71d385a 4802
57d17d70 4803 hci_send_frame(hdev, skb);
b71d385a
AE
4804 hdev->acl_last_tx = jiffies;
4805
4806 hdev->block_cnt -= blocks;
4807 quote -= blocks;
4808
4809 chan->sent += blocks;
4810 chan->conn->sent += blocks;
4811 }
4812 }
4813
4814 if (cnt != hdev->block_cnt)
bd1eb66b 4815 hci_prio_recalculate(hdev, type);
b71d385a
AE
4816}
4817
6039aa73 4818static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4819{
4820 BT_DBG("%s", hdev->name);
4821
bd1eb66b
AE
4822 /* No ACL link over BR/EDR controller */
4823 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4824 return;
4825
4826 /* No AMP link over AMP controller */
4827 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4828 return;
4829
4830 switch (hdev->flow_ctl_mode) {
4831 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4832 hci_sched_acl_pkt(hdev);
4833 break;
4834
4835 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4836 hci_sched_acl_blk(hdev);
4837 break;
4838 }
4839}
4840
1da177e4 4841/* Schedule SCO */
6039aa73 4842static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4843{
4844 struct hci_conn *conn;
4845 struct sk_buff *skb;
4846 int quote;
4847
4848 BT_DBG("%s", hdev->name);
4849
52087a79
LAD
4850 if (!hci_conn_num(hdev, SCO_LINK))
4851 return;
4852
1da177e4
LT
4853 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4854 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4855 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4856 hci_send_frame(hdev, skb);
1da177e4
LT
4857
4858 conn->sent++;
4859 if (conn->sent == ~0)
4860 conn->sent = 0;
4861 }
4862 }
4863}
4864
6039aa73 4865static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4866{
4867 struct hci_conn *conn;
4868 struct sk_buff *skb;
4869 int quote;
4870
4871 BT_DBG("%s", hdev->name);
4872
52087a79
LAD
4873 if (!hci_conn_num(hdev, ESCO_LINK))
4874 return;
4875
8fc9ced3
GP
4876 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4877 &quote))) {
b6a0dc82
MH
4878 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4879 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4880 hci_send_frame(hdev, skb);
b6a0dc82
MH
4881
4882 conn->sent++;
4883 if (conn->sent == ~0)
4884 conn->sent = 0;
4885 }
4886 }
4887}
4888
6039aa73 4889static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4890{
73d80deb 4891 struct hci_chan *chan;
6ed58ec5 4892 struct sk_buff *skb;
02b20f0b 4893 int quote, cnt, tmp;
6ed58ec5
VT
4894
4895 BT_DBG("%s", hdev->name);
4896
52087a79
LAD
4897 if (!hci_conn_num(hdev, LE_LINK))
4898 return;
4899
6ed58ec5
VT
4900 if (!test_bit(HCI_RAW, &hdev->flags)) {
4901 /* LE tx timeout must be longer than maximum
4902 * link supervision timeout (40.9 seconds) */
bae1f5d9 4903 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4904 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4905 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4906 }
4907
4908 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4909 tmp = cnt;
73d80deb 4910 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4911 u32 priority = (skb_peek(&chan->data_q))->priority;
4912 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4913 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4914 skb->len, skb->priority);
6ed58ec5 4915
ec1cce24
LAD
4916 /* Stop if priority has changed */
4917 if (skb->priority < priority)
4918 break;
4919
4920 skb = skb_dequeue(&chan->data_q);
4921
57d17d70 4922 hci_send_frame(hdev, skb);
6ed58ec5
VT
4923 hdev->le_last_tx = jiffies;
4924
4925 cnt--;
73d80deb
LAD
4926 chan->sent++;
4927 chan->conn->sent++;
6ed58ec5
VT
4928 }
4929 }
73d80deb 4930
6ed58ec5
VT
4931 if (hdev->le_pkts)
4932 hdev->le_cnt = cnt;
4933 else
4934 hdev->acl_cnt = cnt;
02b20f0b
LAD
4935
4936 if (cnt != tmp)
4937 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4938}
4939
3eff45ea 4940static void hci_tx_work(struct work_struct *work)
1da177e4 4941{
3eff45ea 4942 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4943 struct sk_buff *skb;
4944
6ed58ec5 4945 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4946 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4947
52de599e
MH
4948 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4949 /* Schedule queues and send stuff to HCI driver */
4950 hci_sched_acl(hdev);
4951 hci_sched_sco(hdev);
4952 hci_sched_esco(hdev);
4953 hci_sched_le(hdev);
4954 }
6ed58ec5 4955
1da177e4
LT
4956 /* Send next queued raw (unknown type) packet */
4957 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4958 hci_send_frame(hdev, skb);
1da177e4
LT
4959}
4960
25985edc 4961/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4962
4963/* ACL data packet */
6039aa73 4964static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4965{
4966 struct hci_acl_hdr *hdr = (void *) skb->data;
4967 struct hci_conn *conn;
4968 __u16 handle, flags;
4969
4970 skb_pull(skb, HCI_ACL_HDR_SIZE);
4971
4972 handle = __le16_to_cpu(hdr->handle);
4973 flags = hci_flags(handle);
4974 handle = hci_handle(handle);
4975
f0e09510 4976 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4977 handle, flags);
1da177e4
LT
4978
4979 hdev->stat.acl_rx++;
4980
4981 hci_dev_lock(hdev);
4982 conn = hci_conn_hash_lookup_handle(hdev, handle);
4983 hci_dev_unlock(hdev);
8e87d142 4984
1da177e4 4985 if (conn) {
65983fc7 4986 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4987
1da177e4 4988 /* Send to upper protocol */
686ebf28
UF
4989 l2cap_recv_acldata(conn, skb, flags);
4990 return;
1da177e4 4991 } else {
8e87d142 4992 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4993 hdev->name, handle);
1da177e4
LT
4994 }
4995
4996 kfree_skb(skb);
4997}
4998
4999/* SCO data packet */
6039aa73 5000static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5001{
5002 struct hci_sco_hdr *hdr = (void *) skb->data;
5003 struct hci_conn *conn;
5004 __u16 handle;
5005
5006 skb_pull(skb, HCI_SCO_HDR_SIZE);
5007
5008 handle = __le16_to_cpu(hdr->handle);
5009
f0e09510 5010 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5011
5012 hdev->stat.sco_rx++;
5013
5014 hci_dev_lock(hdev);
5015 conn = hci_conn_hash_lookup_handle(hdev, handle);
5016 hci_dev_unlock(hdev);
5017
5018 if (conn) {
1da177e4 5019 /* Send to upper protocol */
686ebf28
UF
5020 sco_recv_scodata(conn, skb);
5021 return;
1da177e4 5022 } else {
8e87d142 5023 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5024 hdev->name, handle);
1da177e4
LT
5025 }
5026
5027 kfree_skb(skb);
5028}
5029
9238f36a
JH
5030static bool hci_req_is_complete(struct hci_dev *hdev)
5031{
5032 struct sk_buff *skb;
5033
5034 skb = skb_peek(&hdev->cmd_q);
5035 if (!skb)
5036 return true;
5037
5038 return bt_cb(skb)->req.start;
5039}
5040
42c6b129
JH
5041static void hci_resend_last(struct hci_dev *hdev)
5042{
5043 struct hci_command_hdr *sent;
5044 struct sk_buff *skb;
5045 u16 opcode;
5046
5047 if (!hdev->sent_cmd)
5048 return;
5049
5050 sent = (void *) hdev->sent_cmd->data;
5051 opcode = __le16_to_cpu(sent->opcode);
5052 if (opcode == HCI_OP_RESET)
5053 return;
5054
5055 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5056 if (!skb)
5057 return;
5058
5059 skb_queue_head(&hdev->cmd_q, skb);
5060 queue_work(hdev->workqueue, &hdev->cmd_work);
5061}
5062
9238f36a
JH
5063void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5064{
5065 hci_req_complete_t req_complete = NULL;
5066 struct sk_buff *skb;
5067 unsigned long flags;
5068
5069 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5070
42c6b129
JH
5071 /* If the completed command doesn't match the last one that was
5072 * sent we need to do special handling of it.
9238f36a 5073 */
42c6b129
JH
5074 if (!hci_sent_cmd_data(hdev, opcode)) {
5075 /* Some CSR based controllers generate a spontaneous
5076 * reset complete event during init and any pending
5077 * command will never be completed. In such a case we
5078 * need to resend whatever was the last sent
5079 * command.
5080 */
5081 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5082 hci_resend_last(hdev);
5083
9238f36a 5084 return;
42c6b129 5085 }
9238f36a
JH
5086
5087 /* If the command succeeded and there's still more commands in
5088 * this request the request is not yet complete.
5089 */
5090 if (!status && !hci_req_is_complete(hdev))
5091 return;
5092
5093 /* If this was the last command in a request the complete
5094 * callback would be found in hdev->sent_cmd instead of the
5095 * command queue (hdev->cmd_q).
5096 */
5097 if (hdev->sent_cmd) {
5098 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5099
5100 if (req_complete) {
5101 /* We must set the complete callback to NULL to
5102 * avoid calling the callback more than once if
5103 * this function gets called again.
5104 */
5105 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5106
9238f36a 5107 goto call_complete;
53e21fbc 5108 }
9238f36a
JH
5109 }
5110
5111 /* Remove all pending commands belonging to this request */
5112 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5113 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5114 if (bt_cb(skb)->req.start) {
5115 __skb_queue_head(&hdev->cmd_q, skb);
5116 break;
5117 }
5118
5119 req_complete = bt_cb(skb)->req.complete;
5120 kfree_skb(skb);
5121 }
5122 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5123
5124call_complete:
5125 if (req_complete)
5126 req_complete(hdev, status);
5127}
5128
b78752cc 5129static void hci_rx_work(struct work_struct *work)
1da177e4 5130{
b78752cc 5131 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5132 struct sk_buff *skb;
5133
5134 BT_DBG("%s", hdev->name);
5135
1da177e4 5136 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5137 /* Send copy to monitor */
5138 hci_send_to_monitor(hdev, skb);
5139
1da177e4
LT
5140 if (atomic_read(&hdev->promisc)) {
5141 /* Send copy to the sockets */
470fe1b5 5142 hci_send_to_sock(hdev, skb);
1da177e4
LT
5143 }
5144
0736cfa8
MH
5145 if (test_bit(HCI_RAW, &hdev->flags) ||
5146 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5147 kfree_skb(skb);
5148 continue;
5149 }
5150
5151 if (test_bit(HCI_INIT, &hdev->flags)) {
5152 /* Don't process data packets in this states. */
0d48d939 5153 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5154 case HCI_ACLDATA_PKT:
5155 case HCI_SCODATA_PKT:
5156 kfree_skb(skb);
5157 continue;
3ff50b79 5158 }
1da177e4
LT
5159 }
5160
5161 /* Process frame */
0d48d939 5162 switch (bt_cb(skb)->pkt_type) {
1da177e4 5163 case HCI_EVENT_PKT:
b78752cc 5164 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5165 hci_event_packet(hdev, skb);
5166 break;
5167
5168 case HCI_ACLDATA_PKT:
5169 BT_DBG("%s ACL data packet", hdev->name);
5170 hci_acldata_packet(hdev, skb);
5171 break;
5172
5173 case HCI_SCODATA_PKT:
5174 BT_DBG("%s SCO data packet", hdev->name);
5175 hci_scodata_packet(hdev, skb);
5176 break;
5177
5178 default:
5179 kfree_skb(skb);
5180 break;
5181 }
5182 }
1da177e4
LT
5183}
5184
c347b765 5185static void hci_cmd_work(struct work_struct *work)
1da177e4 5186{
c347b765 5187 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5188 struct sk_buff *skb;
5189
2104786b
AE
5190 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5191 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5192
1da177e4 5193 /* Send queued commands */
5a08ecce
AE
5194 if (atomic_read(&hdev->cmd_cnt)) {
5195 skb = skb_dequeue(&hdev->cmd_q);
5196 if (!skb)
5197 return;
5198
7585b97a 5199 kfree_skb(hdev->sent_cmd);
1da177e4 5200
a675d7f1 5201 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5202 if (hdev->sent_cmd) {
1da177e4 5203 atomic_dec(&hdev->cmd_cnt);
57d17d70 5204 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
5205 if (test_bit(HCI_RESET, &hdev->flags))
5206 del_timer(&hdev->cmd_timer);
5207 else
5208 mod_timer(&hdev->cmd_timer,
5f246e89 5209 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
5210 } else {
5211 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5212 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5213 }
5214 }
5215}
b1efcc28
AG
5216
5217void hci_req_add_le_scan_disable(struct hci_request *req)
5218{
5219 struct hci_cp_le_set_scan_enable cp;
5220
5221 memset(&cp, 0, sizeof(cp));
5222 cp.enable = LE_SCAN_DISABLE;
5223 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5224}
a4790dbd 5225
8ef30fd3
AG
5226void hci_req_add_le_passive_scan(struct hci_request *req)
5227{
5228 struct hci_cp_le_set_scan_param param_cp;
5229 struct hci_cp_le_set_scan_enable enable_cp;
5230 struct hci_dev *hdev = req->hdev;
5231 u8 own_addr_type;
5232
5233 /* Set require_privacy to true to avoid identification from
5234 * unknown peer devices. Since this is passive scanning, no
5235 * SCAN_REQ using the local identity should be sent. Mandating
5236 * privacy is just an extra precaution.
5237 */
5238 if (hci_update_random_address(req, true, &own_addr_type))
5239 return;
5240
5241 memset(&param_cp, 0, sizeof(param_cp));
5242 param_cp.type = LE_SCAN_PASSIVE;
5243 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5244 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5245 param_cp.own_address_type = own_addr_type;
5246 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5247 &param_cp);
5248
5249 memset(&enable_cp, 0, sizeof(enable_cp));
5250 enable_cp.enable = LE_SCAN_ENABLE;
5251 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE;
5252 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5253 &enable_cp);
5254}
5255
a4790dbd
AG
5256static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5257{
5258 if (status)
5259 BT_DBG("HCI request failed to update background scanning: "
5260 "status 0x%2.2x", status);
5261}
5262
5263/* This function controls the background scanning based on hdev->pend_le_conns
5264 * list. If there are pending LE connection we start the background scanning,
5265 * otherwise we stop it.
5266 *
5267 * This function requires the caller holds hdev->lock.
5268 */
5269void hci_update_background_scan(struct hci_dev *hdev)
5270{
a4790dbd
AG
5271 struct hci_request req;
5272 struct hci_conn *conn;
5273 int err;
5274
5275 hci_req_init(&req, hdev);
5276
5277 if (list_empty(&hdev->pend_le_conns)) {
5278 /* If there is no pending LE connections, we should stop
5279 * the background scanning.
5280 */
5281
5282 /* If controller is not scanning we are done. */
5283 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5284 return;
5285
5286 hci_req_add_le_scan_disable(&req);
5287
5288 BT_DBG("%s stopping background scanning", hdev->name);
5289 } else {
a4790dbd
AG
5290 /* If there is at least one pending LE connection, we should
5291 * keep the background scan running.
5292 */
5293
5294 /* If controller is already scanning we are done. */
5295 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5296 return;
5297
5298 /* If controller is connecting, we should not start scanning
5299 * since some controllers are not able to scan and connect at
5300 * the same time.
5301 */
5302 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5303 if (conn)
5304 return;
5305
8ef30fd3 5306 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5307
5308 BT_DBG("%s starting background scanning", hdev->name);
5309 }
5310
5311 err = hci_req_run(&req, update_background_scan_complete);
5312 if (err)
5313 BT_ERR("Failed to run HCI request: err %d", err);
5314}
This page took 1.593563 seconds and 5 git commands to generate.