Bluetooth: Convert link keys list to use RCU
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
4bc58f51 37#include <net/bluetooth/l2cap.h>
af58925c 38#include <net/bluetooth/mgmt.h>
1da177e4 39
970c4e46
JH
40#include "smp.h"
41
b78752cc 42static void hci_rx_work(struct work_struct *work);
c347b765 43static void hci_cmd_work(struct work_struct *work);
3eff45ea 44static void hci_tx_work(struct work_struct *work);
1da177e4 45
1da177e4
LT
46/* HCI device list */
47LIST_HEAD(hci_dev_list);
48DEFINE_RWLOCK(hci_dev_list_lock);
49
50/* HCI callback list */
51LIST_HEAD(hci_cb_list);
52DEFINE_RWLOCK(hci_cb_list_lock);
53
3df92b31
SL
54/* HCI ID Numbering */
55static DEFINE_IDA(hci_index_ida);
56
899de765
MH
57/* ----- HCI requests ----- */
58
59#define HCI_REQ_DONE 0
60#define HCI_REQ_PEND 1
61#define HCI_REQ_CANCELED 2
62
63#define hci_req_lock(d) mutex_lock(&d->req_lock)
64#define hci_req_unlock(d) mutex_unlock(&d->req_lock)
65
1da177e4
LT
66/* ---- HCI notifications ---- */
67
6516455d 68static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 69{
040030ef 70 hci_sock_dev_event(hdev, event);
1da177e4
LT
71}
72
baf27f6e
MH
73/* ---- HCI debugfs entries ---- */
74
4b4148e9
MH
75static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
76 size_t count, loff_t *ppos)
77{
78 struct hci_dev *hdev = file->private_data;
79 char buf[3];
80
111902f7 81 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dbg_flags) ? 'Y': 'N';
4b4148e9
MH
82 buf[1] = '\n';
83 buf[2] = '\0';
84 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
85}
86
87static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
88 size_t count, loff_t *ppos)
89{
90 struct hci_dev *hdev = file->private_data;
91 struct sk_buff *skb;
92 char buf[32];
93 size_t buf_size = min(count, (sizeof(buf)-1));
94 bool enable;
95 int err;
96
97 if (!test_bit(HCI_UP, &hdev->flags))
98 return -ENETDOWN;
99
100 if (copy_from_user(buf, user_buf, buf_size))
101 return -EFAULT;
102
103 buf[buf_size] = '\0';
104 if (strtobool(buf, &enable))
105 return -EINVAL;
106
111902f7 107 if (enable == test_bit(HCI_DUT_MODE, &hdev->dbg_flags))
4b4148e9
MH
108 return -EALREADY;
109
110 hci_req_lock(hdev);
111 if (enable)
112 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
113 HCI_CMD_TIMEOUT);
114 else
115 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
116 HCI_CMD_TIMEOUT);
117 hci_req_unlock(hdev);
118
119 if (IS_ERR(skb))
120 return PTR_ERR(skb);
121
122 err = -bt_to_errno(skb->data[0]);
123 kfree_skb(skb);
124
125 if (err < 0)
126 return err;
127
111902f7 128 change_bit(HCI_DUT_MODE, &hdev->dbg_flags);
4b4148e9
MH
129
130 return count;
131}
132
133static const struct file_operations dut_mode_fops = {
134 .open = simple_open,
135 .read = dut_mode_read,
136 .write = dut_mode_write,
137 .llseek = default_llseek,
138};
139
dfb826a8
MH
140static int features_show(struct seq_file *f, void *ptr)
141{
142 struct hci_dev *hdev = f->private;
143 u8 p;
144
145 hci_dev_lock(hdev);
146 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 147 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
148 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
149 hdev->features[p][0], hdev->features[p][1],
150 hdev->features[p][2], hdev->features[p][3],
151 hdev->features[p][4], hdev->features[p][5],
152 hdev->features[p][6], hdev->features[p][7]);
153 }
cfbb2b5b
MH
154 if (lmp_le_capable(hdev))
155 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
156 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
157 hdev->le_features[0], hdev->le_features[1],
158 hdev->le_features[2], hdev->le_features[3],
159 hdev->le_features[4], hdev->le_features[5],
160 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
161 hci_dev_unlock(hdev);
162
163 return 0;
164}
165
166static int features_open(struct inode *inode, struct file *file)
167{
168 return single_open(file, features_show, inode->i_private);
169}
170
171static const struct file_operations features_fops = {
172 .open = features_open,
173 .read = seq_read,
174 .llseek = seq_lseek,
175 .release = single_release,
176};
177
70afe0b8
MH
178static int blacklist_show(struct seq_file *f, void *p)
179{
180 struct hci_dev *hdev = f->private;
181 struct bdaddr_list *b;
182
183 hci_dev_lock(hdev);
184 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 185 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
186 hci_dev_unlock(hdev);
187
188 return 0;
189}
190
191static int blacklist_open(struct inode *inode, struct file *file)
192{
193 return single_open(file, blacklist_show, inode->i_private);
194}
195
196static const struct file_operations blacklist_fops = {
197 .open = blacklist_open,
198 .read = seq_read,
199 .llseek = seq_lseek,
200 .release = single_release,
201};
202
47219839
MH
203static int uuids_show(struct seq_file *f, void *p)
204{
205 struct hci_dev *hdev = f->private;
206 struct bt_uuid *uuid;
207
208 hci_dev_lock(hdev);
209 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
210 u8 i, val[16];
211
212 /* The Bluetooth UUID values are stored in big endian,
213 * but with reversed byte order. So convert them into
214 * the right order for the %pUb modifier.
215 */
216 for (i = 0; i < 16; i++)
217 val[i] = uuid->uuid[15 - i];
218
219 seq_printf(f, "%pUb\n", val);
47219839
MH
220 }
221 hci_dev_unlock(hdev);
222
223 return 0;
224}
225
226static int uuids_open(struct inode *inode, struct file *file)
227{
228 return single_open(file, uuids_show, inode->i_private);
229}
230
231static const struct file_operations uuids_fops = {
232 .open = uuids_open,
233 .read = seq_read,
234 .llseek = seq_lseek,
235 .release = single_release,
236};
237
baf27f6e
MH
238static int inquiry_cache_show(struct seq_file *f, void *p)
239{
240 struct hci_dev *hdev = f->private;
241 struct discovery_state *cache = &hdev->discovery;
242 struct inquiry_entry *e;
243
244 hci_dev_lock(hdev);
245
246 list_for_each_entry(e, &cache->all, all) {
247 struct inquiry_data *data = &e->data;
248 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
249 &data->bdaddr,
250 data->pscan_rep_mode, data->pscan_period_mode,
251 data->pscan_mode, data->dev_class[2],
252 data->dev_class[1], data->dev_class[0],
253 __le16_to_cpu(data->clock_offset),
254 data->rssi, data->ssp_mode, e->timestamp);
255 }
256
257 hci_dev_unlock(hdev);
258
259 return 0;
260}
261
262static int inquiry_cache_open(struct inode *inode, struct file *file)
263{
264 return single_open(file, inquiry_cache_show, inode->i_private);
265}
266
267static const struct file_operations inquiry_cache_fops = {
268 .open = inquiry_cache_open,
269 .read = seq_read,
270 .llseek = seq_lseek,
271 .release = single_release,
272};
273
02d08d15
MH
274static int link_keys_show(struct seq_file *f, void *ptr)
275{
276 struct hci_dev *hdev = f->private;
0378b597 277 struct link_key *key;
02d08d15 278
0378b597
JH
279 rcu_read_lock();
280 list_for_each_entry_rcu(key, &hdev->link_keys, list)
02d08d15
MH
281 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
282 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
0378b597 283 rcu_read_unlock();
02d08d15
MH
284
285 return 0;
286}
287
288static int link_keys_open(struct inode *inode, struct file *file)
289{
290 return single_open(file, link_keys_show, inode->i_private);
291}
292
293static const struct file_operations link_keys_fops = {
294 .open = link_keys_open,
295 .read = seq_read,
296 .llseek = seq_lseek,
297 .release = single_release,
298};
299
babdbb3c
MH
300static int dev_class_show(struct seq_file *f, void *ptr)
301{
302 struct hci_dev *hdev = f->private;
303
304 hci_dev_lock(hdev);
305 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
306 hdev->dev_class[1], hdev->dev_class[0]);
307 hci_dev_unlock(hdev);
308
309 return 0;
310}
311
312static int dev_class_open(struct inode *inode, struct file *file)
313{
314 return single_open(file, dev_class_show, inode->i_private);
315}
316
317static const struct file_operations dev_class_fops = {
318 .open = dev_class_open,
319 .read = seq_read,
320 .llseek = seq_lseek,
321 .release = single_release,
322};
323
041000b9
MH
324static int voice_setting_get(void *data, u64 *val)
325{
326 struct hci_dev *hdev = data;
327
328 hci_dev_lock(hdev);
329 *val = hdev->voice_setting;
330 hci_dev_unlock(hdev);
331
332 return 0;
333}
334
335DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
336 NULL, "0x%4.4llx\n");
337
ebd1e33b
MH
338static int auto_accept_delay_set(void *data, u64 val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 hdev->auto_accept_delay = val;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349static int auto_accept_delay_get(void *data, u64 *val)
350{
351 struct hci_dev *hdev = data;
352
353 hci_dev_lock(hdev);
354 *val = hdev->auto_accept_delay;
355 hci_dev_unlock(hdev);
356
357 return 0;
358}
359
360DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
361 auto_accept_delay_set, "%llu\n");
362
5afeac14
MH
363static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
364 size_t count, loff_t *ppos)
365{
366 struct hci_dev *hdev = file->private_data;
367 char buf[3];
368
111902f7 369 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dbg_flags) ? 'Y': 'N';
5afeac14
MH
370 buf[1] = '\n';
371 buf[2] = '\0';
372 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
373}
374
375static ssize_t force_sc_support_write(struct file *file,
376 const char __user *user_buf,
377 size_t count, loff_t *ppos)
378{
379 struct hci_dev *hdev = file->private_data;
380 char buf[32];
381 size_t buf_size = min(count, (sizeof(buf)-1));
382 bool enable;
383
384 if (test_bit(HCI_UP, &hdev->flags))
385 return -EBUSY;
386
387 if (copy_from_user(buf, user_buf, buf_size))
388 return -EFAULT;
389
390 buf[buf_size] = '\0';
391 if (strtobool(buf, &enable))
392 return -EINVAL;
393
111902f7 394 if (enable == test_bit(HCI_FORCE_SC, &hdev->dbg_flags))
5afeac14
MH
395 return -EALREADY;
396
111902f7 397 change_bit(HCI_FORCE_SC, &hdev->dbg_flags);
5afeac14
MH
398
399 return count;
400}
401
402static const struct file_operations force_sc_support_fops = {
403 .open = simple_open,
404 .read = force_sc_support_read,
405 .write = force_sc_support_write,
406 .llseek = default_llseek,
407};
408
134c2a89
MH
409static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
410 size_t count, loff_t *ppos)
411{
412 struct hci_dev *hdev = file->private_data;
413 char buf[3];
414
415 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
416 buf[1] = '\n';
417 buf[2] = '\0';
418 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
419}
420
421static const struct file_operations sc_only_mode_fops = {
422 .open = simple_open,
423 .read = sc_only_mode_read,
424 .llseek = default_llseek,
425};
426
2bfa3531
MH
427static int idle_timeout_set(void *data, u64 val)
428{
429 struct hci_dev *hdev = data;
430
431 if (val != 0 && (val < 500 || val > 3600000))
432 return -EINVAL;
433
434 hci_dev_lock(hdev);
2be48b65 435 hdev->idle_timeout = val;
2bfa3531
MH
436 hci_dev_unlock(hdev);
437
438 return 0;
439}
440
441static int idle_timeout_get(void *data, u64 *val)
442{
443 struct hci_dev *hdev = data;
444
445 hci_dev_lock(hdev);
446 *val = hdev->idle_timeout;
447 hci_dev_unlock(hdev);
448
449 return 0;
450}
451
452DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
453 idle_timeout_set, "%llu\n");
454
c982b2ea
JH
455static int rpa_timeout_set(void *data, u64 val)
456{
457 struct hci_dev *hdev = data;
458
459 /* Require the RPA timeout to be at least 30 seconds and at most
460 * 24 hours.
461 */
462 if (val < 30 || val > (60 * 60 * 24))
463 return -EINVAL;
464
465 hci_dev_lock(hdev);
466 hdev->rpa_timeout = val;
467 hci_dev_unlock(hdev);
468
469 return 0;
470}
471
472static int rpa_timeout_get(void *data, u64 *val)
473{
474 struct hci_dev *hdev = data;
475
476 hci_dev_lock(hdev);
477 *val = hdev->rpa_timeout;
478 hci_dev_unlock(hdev);
479
480 return 0;
481}
482
483DEFINE_SIMPLE_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get,
484 rpa_timeout_set, "%llu\n");
485
2bfa3531
MH
486static int sniff_min_interval_set(void *data, u64 val)
487{
488 struct hci_dev *hdev = data;
489
490 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
491 return -EINVAL;
492
493 hci_dev_lock(hdev);
2be48b65 494 hdev->sniff_min_interval = val;
2bfa3531
MH
495 hci_dev_unlock(hdev);
496
497 return 0;
498}
499
500static int sniff_min_interval_get(void *data, u64 *val)
501{
502 struct hci_dev *hdev = data;
503
504 hci_dev_lock(hdev);
505 *val = hdev->sniff_min_interval;
506 hci_dev_unlock(hdev);
507
508 return 0;
509}
510
511DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
512 sniff_min_interval_set, "%llu\n");
513
514static int sniff_max_interval_set(void *data, u64 val)
515{
516 struct hci_dev *hdev = data;
517
518 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
519 return -EINVAL;
520
521 hci_dev_lock(hdev);
2be48b65 522 hdev->sniff_max_interval = val;
2bfa3531
MH
523 hci_dev_unlock(hdev);
524
525 return 0;
526}
527
528static int sniff_max_interval_get(void *data, u64 *val)
529{
530 struct hci_dev *hdev = data;
531
532 hci_dev_lock(hdev);
533 *val = hdev->sniff_max_interval;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
540 sniff_max_interval_set, "%llu\n");
541
31ad1691
AK
542static int conn_info_min_age_set(void *data, u64 val)
543{
544 struct hci_dev *hdev = data;
545
546 if (val == 0 || val > hdev->conn_info_max_age)
547 return -EINVAL;
548
549 hci_dev_lock(hdev);
550 hdev->conn_info_min_age = val;
551 hci_dev_unlock(hdev);
552
553 return 0;
554}
555
556static int conn_info_min_age_get(void *data, u64 *val)
557{
558 struct hci_dev *hdev = data;
559
560 hci_dev_lock(hdev);
561 *val = hdev->conn_info_min_age;
562 hci_dev_unlock(hdev);
563
564 return 0;
565}
566
567DEFINE_SIMPLE_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get,
568 conn_info_min_age_set, "%llu\n");
569
570static int conn_info_max_age_set(void *data, u64 val)
571{
572 struct hci_dev *hdev = data;
573
574 if (val == 0 || val < hdev->conn_info_min_age)
575 return -EINVAL;
576
577 hci_dev_lock(hdev);
578 hdev->conn_info_max_age = val;
579 hci_dev_unlock(hdev);
580
581 return 0;
582}
583
584static int conn_info_max_age_get(void *data, u64 *val)
585{
586 struct hci_dev *hdev = data;
587
588 hci_dev_lock(hdev);
589 *val = hdev->conn_info_max_age;
590 hci_dev_unlock(hdev);
591
592 return 0;
593}
594
595DEFINE_SIMPLE_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get,
596 conn_info_max_age_set, "%llu\n");
597
ac345813
MH
598static int identity_show(struct seq_file *f, void *p)
599{
600 struct hci_dev *hdev = f->private;
a1f4c318 601 bdaddr_t addr;
ac345813
MH
602 u8 addr_type;
603
604 hci_dev_lock(hdev);
605
a1f4c318 606 hci_copy_identity_address(hdev, &addr, &addr_type);
ac345813 607
a1f4c318 608 seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type,
473deef2 609 16, hdev->irk, &hdev->rpa);
ac345813
MH
610
611 hci_dev_unlock(hdev);
612
613 return 0;
614}
615
616static int identity_open(struct inode *inode, struct file *file)
617{
618 return single_open(file, identity_show, inode->i_private);
619}
620
621static const struct file_operations identity_fops = {
622 .open = identity_open,
623 .read = seq_read,
624 .llseek = seq_lseek,
625 .release = single_release,
626};
627
7a4cd51d
MH
628static int random_address_show(struct seq_file *f, void *p)
629{
630 struct hci_dev *hdev = f->private;
631
632 hci_dev_lock(hdev);
633 seq_printf(f, "%pMR\n", &hdev->random_addr);
634 hci_dev_unlock(hdev);
635
636 return 0;
637}
638
639static int random_address_open(struct inode *inode, struct file *file)
640{
641 return single_open(file, random_address_show, inode->i_private);
642}
643
644static const struct file_operations random_address_fops = {
645 .open = random_address_open,
646 .read = seq_read,
647 .llseek = seq_lseek,
648 .release = single_release,
649};
650
e7b8fc92
MH
651static int static_address_show(struct seq_file *f, void *p)
652{
653 struct hci_dev *hdev = f->private;
654
655 hci_dev_lock(hdev);
656 seq_printf(f, "%pMR\n", &hdev->static_addr);
657 hci_dev_unlock(hdev);
658
659 return 0;
660}
661
662static int static_address_open(struct inode *inode, struct file *file)
663{
664 return single_open(file, static_address_show, inode->i_private);
665}
666
667static const struct file_operations static_address_fops = {
668 .open = static_address_open,
669 .read = seq_read,
670 .llseek = seq_lseek,
671 .release = single_release,
672};
673
b32bba6c
MH
674static ssize_t force_static_address_read(struct file *file,
675 char __user *user_buf,
676 size_t count, loff_t *ppos)
92202185 677{
b32bba6c
MH
678 struct hci_dev *hdev = file->private_data;
679 char buf[3];
92202185 680
111902f7 681 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ? 'Y': 'N';
b32bba6c
MH
682 buf[1] = '\n';
683 buf[2] = '\0';
684 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
685}
686
b32bba6c
MH
687static ssize_t force_static_address_write(struct file *file,
688 const char __user *user_buf,
689 size_t count, loff_t *ppos)
92202185 690{
b32bba6c
MH
691 struct hci_dev *hdev = file->private_data;
692 char buf[32];
693 size_t buf_size = min(count, (sizeof(buf)-1));
694 bool enable;
92202185 695
b32bba6c
MH
696 if (test_bit(HCI_UP, &hdev->flags))
697 return -EBUSY;
92202185 698
b32bba6c
MH
699 if (copy_from_user(buf, user_buf, buf_size))
700 return -EFAULT;
701
702 buf[buf_size] = '\0';
703 if (strtobool(buf, &enable))
704 return -EINVAL;
705
111902f7 706 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags))
b32bba6c
MH
707 return -EALREADY;
708
111902f7 709 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags);
b32bba6c
MH
710
711 return count;
92202185
MH
712}
713
b32bba6c
MH
714static const struct file_operations force_static_address_fops = {
715 .open = simple_open,
716 .read = force_static_address_read,
717 .write = force_static_address_write,
718 .llseek = default_llseek,
719};
92202185 720
d2ab0ac1
MH
721static int white_list_show(struct seq_file *f, void *ptr)
722{
723 struct hci_dev *hdev = f->private;
724 struct bdaddr_list *b;
725
726 hci_dev_lock(hdev);
727 list_for_each_entry(b, &hdev->le_white_list, list)
728 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
729 hci_dev_unlock(hdev);
730
731 return 0;
732}
733
734static int white_list_open(struct inode *inode, struct file *file)
735{
736 return single_open(file, white_list_show, inode->i_private);
737}
738
739static const struct file_operations white_list_fops = {
740 .open = white_list_open,
741 .read = seq_read,
742 .llseek = seq_lseek,
743 .release = single_release,
744};
745
3698d704
MH
746static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
747{
748 struct hci_dev *hdev = f->private;
adae20cb 749 struct smp_irk *irk;
3698d704 750
adae20cb
JH
751 rcu_read_lock();
752 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3698d704
MH
753 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
754 &irk->bdaddr, irk->addr_type,
755 16, irk->val, &irk->rpa);
756 }
adae20cb 757 rcu_read_unlock();
3698d704
MH
758
759 return 0;
760}
761
762static int identity_resolving_keys_open(struct inode *inode, struct file *file)
763{
764 return single_open(file, identity_resolving_keys_show,
765 inode->i_private);
766}
767
768static const struct file_operations identity_resolving_keys_fops = {
769 .open = identity_resolving_keys_open,
770 .read = seq_read,
771 .llseek = seq_lseek,
772 .release = single_release,
773};
774
8f8625cd
MH
775static int long_term_keys_show(struct seq_file *f, void *ptr)
776{
777 struct hci_dev *hdev = f->private;
970d0f1b 778 struct smp_ltk *ltk;
8f8625cd 779
970d0f1b
JH
780 rcu_read_lock();
781 list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list)
fe39c7b2 782 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n",
8f8625cd
MH
783 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
784 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
fe39c7b2 785 __le64_to_cpu(ltk->rand), 16, ltk->val);
970d0f1b 786 rcu_read_unlock();
8f8625cd
MH
787
788 return 0;
789}
790
791static int long_term_keys_open(struct inode *inode, struct file *file)
792{
793 return single_open(file, long_term_keys_show, inode->i_private);
794}
795
796static const struct file_operations long_term_keys_fops = {
797 .open = long_term_keys_open,
798 .read = seq_read,
799 .llseek = seq_lseek,
800 .release = single_release,
801};
802
4e70c7e7
MH
803static int conn_min_interval_set(void *data, u64 val)
804{
805 struct hci_dev *hdev = data;
806
807 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
808 return -EINVAL;
809
810 hci_dev_lock(hdev);
2be48b65 811 hdev->le_conn_min_interval = val;
4e70c7e7
MH
812 hci_dev_unlock(hdev);
813
814 return 0;
815}
816
817static int conn_min_interval_get(void *data, u64 *val)
818{
819 struct hci_dev *hdev = data;
820
821 hci_dev_lock(hdev);
822 *val = hdev->le_conn_min_interval;
823 hci_dev_unlock(hdev);
824
825 return 0;
826}
827
828DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
829 conn_min_interval_set, "%llu\n");
830
831static int conn_max_interval_set(void *data, u64 val)
832{
833 struct hci_dev *hdev = data;
834
835 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
836 return -EINVAL;
837
838 hci_dev_lock(hdev);
2be48b65 839 hdev->le_conn_max_interval = val;
4e70c7e7
MH
840 hci_dev_unlock(hdev);
841
842 return 0;
843}
844
845static int conn_max_interval_get(void *data, u64 *val)
846{
847 struct hci_dev *hdev = data;
848
849 hci_dev_lock(hdev);
850 *val = hdev->le_conn_max_interval;
851 hci_dev_unlock(hdev);
852
853 return 0;
854}
855
856DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
857 conn_max_interval_set, "%llu\n");
858
816a93d1 859static int conn_latency_set(void *data, u64 val)
3f959d46
MH
860{
861 struct hci_dev *hdev = data;
862
816a93d1 863 if (val > 0x01f3)
3f959d46
MH
864 return -EINVAL;
865
866 hci_dev_lock(hdev);
816a93d1 867 hdev->le_conn_latency = val;
3f959d46
MH
868 hci_dev_unlock(hdev);
869
870 return 0;
871}
872
816a93d1 873static int conn_latency_get(void *data, u64 *val)
3f959d46
MH
874{
875 struct hci_dev *hdev = data;
876
877 hci_dev_lock(hdev);
816a93d1 878 *val = hdev->le_conn_latency;
3f959d46
MH
879 hci_dev_unlock(hdev);
880
881 return 0;
882}
883
816a93d1
MH
884DEFINE_SIMPLE_ATTRIBUTE(conn_latency_fops, conn_latency_get,
885 conn_latency_set, "%llu\n");
3f959d46 886
f1649577 887static int supervision_timeout_set(void *data, u64 val)
89863109 888{
f1649577 889 struct hci_dev *hdev = data;
89863109 890
f1649577
MH
891 if (val < 0x000a || val > 0x0c80)
892 return -EINVAL;
893
894 hci_dev_lock(hdev);
895 hdev->le_supv_timeout = val;
896 hci_dev_unlock(hdev);
897
898 return 0;
89863109
JR
899}
900
f1649577 901static int supervision_timeout_get(void *data, u64 *val)
89863109 902{
f1649577 903 struct hci_dev *hdev = data;
89863109 904
f1649577
MH
905 hci_dev_lock(hdev);
906 *val = hdev->le_supv_timeout;
907 hci_dev_unlock(hdev);
89863109 908
f1649577
MH
909 return 0;
910}
89863109 911
f1649577
MH
912DEFINE_SIMPLE_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get,
913 supervision_timeout_set, "%llu\n");
89863109 914
3f959d46
MH
915static int adv_channel_map_set(void *data, u64 val)
916{
917 struct hci_dev *hdev = data;
89863109 918
3f959d46
MH
919 if (val < 0x01 || val > 0x07)
920 return -EINVAL;
89863109 921
3f959d46
MH
922 hci_dev_lock(hdev);
923 hdev->le_adv_channel_map = val;
924 hci_dev_unlock(hdev);
89863109 925
3f959d46
MH
926 return 0;
927}
89863109 928
3f959d46 929static int adv_channel_map_get(void *data, u64 *val)
7d474e06 930{
3f959d46 931 struct hci_dev *hdev = data;
7d474e06
AG
932
933 hci_dev_lock(hdev);
3f959d46
MH
934 *val = hdev->le_adv_channel_map;
935 hci_dev_unlock(hdev);
7d474e06 936
3f959d46
MH
937 return 0;
938}
939
940DEFINE_SIMPLE_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get,
941 adv_channel_map_set, "%llu\n");
7d474e06 942
729a1051
GL
943static int adv_min_interval_set(void *data, u64 val)
944{
945 struct hci_dev *hdev = data;
946
947 if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval)
948 return -EINVAL;
949
950 hci_dev_lock(hdev);
951 hdev->le_adv_min_interval = val;
7d474e06
AG
952 hci_dev_unlock(hdev);
953
954 return 0;
955}
956
729a1051 957static int adv_min_interval_get(void *data, u64 *val)
7d474e06 958{
729a1051
GL
959 struct hci_dev *hdev = data;
960
961 hci_dev_lock(hdev);
962 *val = hdev->le_adv_min_interval;
963 hci_dev_unlock(hdev);
964
965 return 0;
7d474e06
AG
966}
967
729a1051
GL
968DEFINE_SIMPLE_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get,
969 adv_min_interval_set, "%llu\n");
970
971static int adv_max_interval_set(void *data, u64 val)
7d474e06 972{
729a1051 973 struct hci_dev *hdev = data;
7d474e06 974
729a1051 975 if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval)
7d474e06
AG
976 return -EINVAL;
977
729a1051
GL
978 hci_dev_lock(hdev);
979 hdev->le_adv_max_interval = val;
980 hci_dev_unlock(hdev);
7d474e06 981
729a1051
GL
982 return 0;
983}
7d474e06 984
729a1051
GL
985static int adv_max_interval_get(void *data, u64 *val)
986{
987 struct hci_dev *hdev = data;
7d474e06 988
729a1051
GL
989 hci_dev_lock(hdev);
990 *val = hdev->le_adv_max_interval;
991 hci_dev_unlock(hdev);
7d474e06 992
729a1051
GL
993 return 0;
994}
7d474e06 995
729a1051
GL
996DEFINE_SIMPLE_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get,
997 adv_max_interval_set, "%llu\n");
7d474e06 998
0b3c7d37 999static int device_list_show(struct seq_file *f, void *ptr)
7d474e06 1000{
0b3c7d37 1001 struct hci_dev *hdev = f->private;
7d474e06 1002 struct hci_conn_params *p;
40f4938a 1003 struct bdaddr_list *b;
7d474e06 1004
7d474e06 1005 hci_dev_lock(hdev);
40f4938a
MH
1006 list_for_each_entry(b, &hdev->whitelist, list)
1007 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
7d474e06 1008 list_for_each_entry(p, &hdev->le_conn_params, list) {
40f4938a 1009 seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type,
7d474e06 1010 p->auto_connect);
7d474e06 1011 }
7d474e06 1012 hci_dev_unlock(hdev);
7d474e06 1013
7d474e06
AG
1014 return 0;
1015}
7d474e06 1016
0b3c7d37 1017static int device_list_open(struct inode *inode, struct file *file)
7d474e06 1018{
0b3c7d37 1019 return single_open(file, device_list_show, inode->i_private);
7d474e06
AG
1020}
1021
0b3c7d37
MH
1022static const struct file_operations device_list_fops = {
1023 .open = device_list_open,
7d474e06 1024 .read = seq_read,
7d474e06
AG
1025 .llseek = seq_lseek,
1026 .release = single_release,
1027};
1028
1da177e4
LT
1029/* ---- HCI requests ---- */
1030
42c6b129 1031static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 1032{
42c6b129 1033 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
1034
1035 if (hdev->req_status == HCI_REQ_PEND) {
1036 hdev->req_result = result;
1037 hdev->req_status = HCI_REQ_DONE;
1038 wake_up_interruptible(&hdev->req_wait_q);
1039 }
1040}
1041
1042static void hci_req_cancel(struct hci_dev *hdev, int err)
1043{
1044 BT_DBG("%s err 0x%2.2x", hdev->name, err);
1045
1046 if (hdev->req_status == HCI_REQ_PEND) {
1047 hdev->req_result = err;
1048 hdev->req_status = HCI_REQ_CANCELED;
1049 wake_up_interruptible(&hdev->req_wait_q);
1050 }
1051}
1052
77a63e0a
FW
1053static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
1054 u8 event)
75e84b7c
JH
1055{
1056 struct hci_ev_cmd_complete *ev;
1057 struct hci_event_hdr *hdr;
1058 struct sk_buff *skb;
1059
1060 hci_dev_lock(hdev);
1061
1062 skb = hdev->recv_evt;
1063 hdev->recv_evt = NULL;
1064
1065 hci_dev_unlock(hdev);
1066
1067 if (!skb)
1068 return ERR_PTR(-ENODATA);
1069
1070 if (skb->len < sizeof(*hdr)) {
1071 BT_ERR("Too short HCI event");
1072 goto failed;
1073 }
1074
1075 hdr = (void *) skb->data;
1076 skb_pull(skb, HCI_EVENT_HDR_SIZE);
1077
7b1abbbe
JH
1078 if (event) {
1079 if (hdr->evt != event)
1080 goto failed;
1081 return skb;
1082 }
1083
75e84b7c
JH
1084 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
1085 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
1086 goto failed;
1087 }
1088
1089 if (skb->len < sizeof(*ev)) {
1090 BT_ERR("Too short cmd_complete event");
1091 goto failed;
1092 }
1093
1094 ev = (void *) skb->data;
1095 skb_pull(skb, sizeof(*ev));
1096
1097 if (opcode == __le16_to_cpu(ev->opcode))
1098 return skb;
1099
1100 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
1101 __le16_to_cpu(ev->opcode));
1102
1103failed:
1104 kfree_skb(skb);
1105 return ERR_PTR(-ENODATA);
1106}
1107
7b1abbbe 1108struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1109 const void *param, u8 event, u32 timeout)
75e84b7c
JH
1110{
1111 DECLARE_WAITQUEUE(wait, current);
1112 struct hci_request req;
1113 int err = 0;
1114
1115 BT_DBG("%s", hdev->name);
1116
1117 hci_req_init(&req, hdev);
1118
7b1abbbe 1119 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
1120
1121 hdev->req_status = HCI_REQ_PEND;
1122
75e84b7c
JH
1123 add_wait_queue(&hdev->req_wait_q, &wait);
1124 set_current_state(TASK_INTERRUPTIBLE);
1125
039fada5
CP
1126 err = hci_req_run(&req, hci_req_sync_complete);
1127 if (err < 0) {
1128 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1129 set_current_state(TASK_RUNNING);
039fada5
CP
1130 return ERR_PTR(err);
1131 }
1132
75e84b7c
JH
1133 schedule_timeout(timeout);
1134
1135 remove_wait_queue(&hdev->req_wait_q, &wait);
1136
1137 if (signal_pending(current))
1138 return ERR_PTR(-EINTR);
1139
1140 switch (hdev->req_status) {
1141 case HCI_REQ_DONE:
1142 err = -bt_to_errno(hdev->req_result);
1143 break;
1144
1145 case HCI_REQ_CANCELED:
1146 err = -hdev->req_result;
1147 break;
1148
1149 default:
1150 err = -ETIMEDOUT;
1151 break;
1152 }
1153
1154 hdev->req_status = hdev->req_result = 0;
1155
1156 BT_DBG("%s end: err %d", hdev->name, err);
1157
1158 if (err < 0)
1159 return ERR_PTR(err);
1160
7b1abbbe
JH
1161 return hci_get_cmd_complete(hdev, opcode, event);
1162}
1163EXPORT_SYMBOL(__hci_cmd_sync_ev);
1164
1165struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 1166 const void *param, u32 timeout)
7b1abbbe
JH
1167{
1168 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
1169}
1170EXPORT_SYMBOL(__hci_cmd_sync);
1171
1da177e4 1172/* Execute request and wait for completion. */
01178cd4 1173static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1174 void (*func)(struct hci_request *req,
1175 unsigned long opt),
01178cd4 1176 unsigned long opt, __u32 timeout)
1da177e4 1177{
42c6b129 1178 struct hci_request req;
1da177e4
LT
1179 DECLARE_WAITQUEUE(wait, current);
1180 int err = 0;
1181
1182 BT_DBG("%s start", hdev->name);
1183
42c6b129
JH
1184 hci_req_init(&req, hdev);
1185
1da177e4
LT
1186 hdev->req_status = HCI_REQ_PEND;
1187
42c6b129 1188 func(&req, opt);
53cce22d 1189
039fada5
CP
1190 add_wait_queue(&hdev->req_wait_q, &wait);
1191 set_current_state(TASK_INTERRUPTIBLE);
1192
42c6b129
JH
1193 err = hci_req_run(&req, hci_req_sync_complete);
1194 if (err < 0) {
53cce22d 1195 hdev->req_status = 0;
920c8300 1196
039fada5 1197 remove_wait_queue(&hdev->req_wait_q, &wait);
22a3ceab 1198 set_current_state(TASK_RUNNING);
039fada5 1199
920c8300
AG
1200 /* ENODATA means the HCI request command queue is empty.
1201 * This can happen when a request with conditionals doesn't
1202 * trigger any commands to be sent. This is normal behavior
1203 * and should not trigger an error return.
42c6b129 1204 */
920c8300
AG
1205 if (err == -ENODATA)
1206 return 0;
1207
1208 return err;
53cce22d
JH
1209 }
1210
1da177e4
LT
1211 schedule_timeout(timeout);
1212
1213 remove_wait_queue(&hdev->req_wait_q, &wait);
1214
1215 if (signal_pending(current))
1216 return -EINTR;
1217
1218 switch (hdev->req_status) {
1219 case HCI_REQ_DONE:
e175072f 1220 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
1221 break;
1222
1223 case HCI_REQ_CANCELED:
1224 err = -hdev->req_result;
1225 break;
1226
1227 default:
1228 err = -ETIMEDOUT;
1229 break;
3ff50b79 1230 }
1da177e4 1231
a5040efa 1232 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
1233
1234 BT_DBG("%s end: err %d", hdev->name, err);
1235
1236 return err;
1237}
1238
01178cd4 1239static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
1240 void (*req)(struct hci_request *req,
1241 unsigned long opt),
01178cd4 1242 unsigned long opt, __u32 timeout)
1da177e4
LT
1243{
1244 int ret;
1245
7c6a329e
MH
1246 if (!test_bit(HCI_UP, &hdev->flags))
1247 return -ENETDOWN;
1248
1da177e4
LT
1249 /* Serialize all requests */
1250 hci_req_lock(hdev);
01178cd4 1251 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
1252 hci_req_unlock(hdev);
1253
1254 return ret;
1255}
1256
42c6b129 1257static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1258{
42c6b129 1259 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1260
1261 /* Reset device */
42c6b129
JH
1262 set_bit(HCI_RESET, &req->hdev->flags);
1263 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1264}
1265
42c6b129 1266static void bredr_init(struct hci_request *req)
1da177e4 1267{
42c6b129 1268 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1269
1da177e4 1270 /* Read Local Supported Features */
42c6b129 1271 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1272
1143e5a6 1273 /* Read Local Version */
42c6b129 1274 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1275
1276 /* Read BD Address */
42c6b129 1277 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1278}
1279
42c6b129 1280static void amp_init(struct hci_request *req)
e61ef499 1281{
42c6b129 1282 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1283
e61ef499 1284 /* Read Local Version */
42c6b129 1285 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1286
f6996cfe
MH
1287 /* Read Local Supported Commands */
1288 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1289
1290 /* Read Local Supported Features */
1291 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1292
6bcbc489 1293 /* Read Local AMP Info */
42c6b129 1294 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1295
1296 /* Read Data Blk size */
42c6b129 1297 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1298
f38ba941
MH
1299 /* Read Flow Control Mode */
1300 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1301
7528ca1c
MH
1302 /* Read Location Data */
1303 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1304}
1305
42c6b129 1306static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1307{
42c6b129 1308 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1309
1310 BT_DBG("%s %ld", hdev->name, opt);
1311
11778716
AE
1312 /* Reset */
1313 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1314 hci_reset_req(req, 0);
11778716 1315
e61ef499
AE
1316 switch (hdev->dev_type) {
1317 case HCI_BREDR:
42c6b129 1318 bredr_init(req);
e61ef499
AE
1319 break;
1320
1321 case HCI_AMP:
42c6b129 1322 amp_init(req);
e61ef499
AE
1323 break;
1324
1325 default:
1326 BT_ERR("Unknown device type %d", hdev->dev_type);
1327 break;
1328 }
e61ef499
AE
1329}
1330
42c6b129 1331static void bredr_setup(struct hci_request *req)
2177bab5 1332{
4ca048e3
MH
1333 struct hci_dev *hdev = req->hdev;
1334
2177bab5
JH
1335 __le16 param;
1336 __u8 flt_type;
1337
1338 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1339 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1340
1341 /* Read Class of Device */
42c6b129 1342 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1343
1344 /* Read Local Name */
42c6b129 1345 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1346
1347 /* Read Voice Setting */
42c6b129 1348 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1349
b4cb9fb2
MH
1350 /* Read Number of Supported IAC */
1351 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1352
4b836f39
MH
1353 /* Read Current IAC LAP */
1354 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1355
2177bab5
JH
1356 /* Clear Event Filters */
1357 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1358 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1359
1360 /* Connection accept timeout ~20 secs */
dcf4adbf 1361 param = cpu_to_le16(0x7d00);
42c6b129 1362 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1363
4ca048e3
MH
1364 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1365 * but it does not support page scan related HCI commands.
1366 */
1367 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1368 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1370 }
2177bab5
JH
1371}
1372
42c6b129 1373static void le_setup(struct hci_request *req)
2177bab5 1374{
c73eee91
JH
1375 struct hci_dev *hdev = req->hdev;
1376
2177bab5 1377 /* Read LE Buffer Size */
42c6b129 1378 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1379
1380 /* Read LE Local Supported Features */
42c6b129 1381 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5 1382
747d3f03
MH
1383 /* Read LE Supported States */
1384 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
1385
2177bab5 1386 /* Read LE White List Size */
42c6b129 1387 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5 1388
747d3f03
MH
1389 /* Clear LE White List */
1390 hci_req_add(req, HCI_OP_LE_CLEAR_WHITE_LIST, 0, NULL);
c73eee91
JH
1391
1392 /* LE-only controllers have LE implicitly enabled */
1393 if (!lmp_bredr_capable(hdev))
1394 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1395}
1396
1397static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1398{
1399 if (lmp_ext_inq_capable(hdev))
1400 return 0x02;
1401
1402 if (lmp_inq_rssi_capable(hdev))
1403 return 0x01;
1404
1405 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1406 hdev->lmp_subver == 0x0757)
1407 return 0x01;
1408
1409 if (hdev->manufacturer == 15) {
1410 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1411 return 0x01;
1412 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1413 return 0x01;
1414 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1415 return 0x01;
1416 }
1417
1418 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1419 hdev->lmp_subver == 0x1805)
1420 return 0x01;
1421
1422 return 0x00;
1423}
1424
42c6b129 1425static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1426{
1427 u8 mode;
1428
42c6b129 1429 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1430
42c6b129 1431 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1432}
1433
42c6b129 1434static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1435{
42c6b129
JH
1436 struct hci_dev *hdev = req->hdev;
1437
2177bab5
JH
1438 /* The second byte is 0xff instead of 0x9f (two reserved bits
1439 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1440 * command otherwise.
1441 */
1442 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1443
1444 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1445 * any event mask for pre 1.2 devices.
1446 */
1447 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1448 return;
1449
1450 if (lmp_bredr_capable(hdev)) {
1451 events[4] |= 0x01; /* Flow Specification Complete */
1452 events[4] |= 0x02; /* Inquiry Result with RSSI */
1453 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1454 events[5] |= 0x08; /* Synchronous Connection Complete */
1455 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1456 } else {
1457 /* Use a different default for LE-only devices */
1458 memset(events, 0, sizeof(events));
1459 events[0] |= 0x10; /* Disconnection Complete */
c7882cbd
MH
1460 events[1] |= 0x08; /* Read Remote Version Information Complete */
1461 events[1] |= 0x20; /* Command Complete */
1462 events[1] |= 0x40; /* Command Status */
1463 events[1] |= 0x80; /* Hardware Error */
1464 events[2] |= 0x04; /* Number of Completed Packets */
1465 events[3] |= 0x02; /* Data Buffer Overflow */
0da71f1b
MH
1466
1467 if (hdev->le_features[0] & HCI_LE_ENCRYPTION) {
1468 events[0] |= 0x80; /* Encryption Change */
1469 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1470 }
2177bab5
JH
1471 }
1472
1473 if (lmp_inq_rssi_capable(hdev))
1474 events[4] |= 0x02; /* Inquiry Result with RSSI */
1475
1476 if (lmp_sniffsubr_capable(hdev))
1477 events[5] |= 0x20; /* Sniff Subrating */
1478
1479 if (lmp_pause_enc_capable(hdev))
1480 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1481
1482 if (lmp_ext_inq_capable(hdev))
1483 events[5] |= 0x40; /* Extended Inquiry Result */
1484
1485 if (lmp_no_flush_capable(hdev))
1486 events[7] |= 0x01; /* Enhanced Flush Complete */
1487
1488 if (lmp_lsto_capable(hdev))
1489 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1490
1491 if (lmp_ssp_capable(hdev)) {
1492 events[6] |= 0x01; /* IO Capability Request */
1493 events[6] |= 0x02; /* IO Capability Response */
1494 events[6] |= 0x04; /* User Confirmation Request */
1495 events[6] |= 0x08; /* User Passkey Request */
1496 events[6] |= 0x10; /* Remote OOB Data Request */
1497 events[6] |= 0x20; /* Simple Pairing Complete */
1498 events[7] |= 0x04; /* User Passkey Notification */
1499 events[7] |= 0x08; /* Keypress Notification */
1500 events[7] |= 0x10; /* Remote Host Supported
1501 * Features Notification
1502 */
1503 }
1504
1505 if (lmp_le_capable(hdev))
1506 events[7] |= 0x20; /* LE Meta-Event */
1507
42c6b129 1508 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1509}
1510
42c6b129 1511static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1512{
42c6b129
JH
1513 struct hci_dev *hdev = req->hdev;
1514
2177bab5 1515 if (lmp_bredr_capable(hdev))
42c6b129 1516 bredr_setup(req);
56f87901
JH
1517 else
1518 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1519
1520 if (lmp_le_capable(hdev))
42c6b129 1521 le_setup(req);
2177bab5 1522
3f8e2d75
JH
1523 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1524 * local supported commands HCI command.
1525 */
1526 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1527 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1528
1529 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1530 /* When SSP is available, then the host features page
1531 * should also be available as well. However some
1532 * controllers list the max_page as 0 as long as SSP
1533 * has not been enabled. To achieve proper debugging
1534 * output, force the minimum max_page to 1 at least.
1535 */
1536 hdev->max_page = 0x01;
1537
2177bab5
JH
1538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1539 u8 mode = 0x01;
42c6b129
JH
1540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1541 sizeof(mode), &mode);
2177bab5
JH
1542 } else {
1543 struct hci_cp_write_eir cp;
1544
1545 memset(hdev->eir, 0, sizeof(hdev->eir));
1546 memset(&cp, 0, sizeof(cp));
1547
42c6b129 1548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1549 }
1550 }
1551
1552 if (lmp_inq_rssi_capable(hdev))
42c6b129 1553 hci_setup_inquiry_mode(req);
2177bab5
JH
1554
1555 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1557
1558 if (lmp_ext_feat_capable(hdev)) {
1559 struct hci_cp_read_local_ext_features cp;
1560
1561 cp.page = 0x01;
42c6b129
JH
1562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1563 sizeof(cp), &cp);
2177bab5
JH
1564 }
1565
1566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1567 u8 enable = 1;
42c6b129
JH
1568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1569 &enable);
2177bab5
JH
1570 }
1571}
1572
42c6b129 1573static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1574{
42c6b129 1575 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1576 struct hci_cp_write_def_link_policy cp;
1577 u16 link_policy = 0;
1578
1579 if (lmp_rswitch_capable(hdev))
1580 link_policy |= HCI_LP_RSWITCH;
1581 if (lmp_hold_capable(hdev))
1582 link_policy |= HCI_LP_HOLD;
1583 if (lmp_sniff_capable(hdev))
1584 link_policy |= HCI_LP_SNIFF;
1585 if (lmp_park_capable(hdev))
1586 link_policy |= HCI_LP_PARK;
1587
1588 cp.policy = cpu_to_le16(link_policy);
42c6b129 1589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1590}
1591
42c6b129 1592static void hci_set_le_support(struct hci_request *req)
2177bab5 1593{
42c6b129 1594 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1595 struct hci_cp_write_le_host_supported cp;
1596
c73eee91
JH
1597 /* LE-only devices do not support explicit enablement */
1598 if (!lmp_bredr_capable(hdev))
1599 return;
1600
2177bab5
JH
1601 memset(&cp, 0, sizeof(cp));
1602
1603 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1604 cp.le = 0x01;
32226e4f 1605 cp.simul = 0x00;
2177bab5
JH
1606 }
1607
1608 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1609 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1610 &cp);
2177bab5
JH
1611}
1612
d62e6d67
JH
1613static void hci_set_event_mask_page_2(struct hci_request *req)
1614{
1615 struct hci_dev *hdev = req->hdev;
1616 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1617
1618 /* If Connectionless Slave Broadcast master role is supported
1619 * enable all necessary events for it.
1620 */
53b834d2 1621 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1622 events[1] |= 0x40; /* Triggered Clock Capture */
1623 events[1] |= 0x80; /* Synchronization Train Complete */
1624 events[2] |= 0x10; /* Slave Page Response Timeout */
1625 events[2] |= 0x20; /* CSB Channel Map Change */
1626 }
1627
1628 /* If Connectionless Slave Broadcast slave role is supported
1629 * enable all necessary events for it.
1630 */
53b834d2 1631 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1632 events[2] |= 0x01; /* Synchronization Train Received */
1633 events[2] |= 0x02; /* CSB Receive */
1634 events[2] |= 0x04; /* CSB Timeout */
1635 events[2] |= 0x08; /* Truncated Page Complete */
1636 }
1637
40c59fcb 1638 /* Enable Authenticated Payload Timeout Expired event if supported */
cd7ca0ec 1639 if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING)
40c59fcb
MH
1640 events[2] |= 0x80;
1641
d62e6d67
JH
1642 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1643}
1644
42c6b129 1645static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1646{
42c6b129 1647 struct hci_dev *hdev = req->hdev;
d2c5d77f 1648 u8 p;
42c6b129 1649
0da71f1b
MH
1650 hci_setup_event_mask(req);
1651
b8f4e068
GP
1652 /* Some Broadcom based Bluetooth controllers do not support the
1653 * Delete Stored Link Key command. They are clearly indicating its
1654 * absence in the bit mask of supported commands.
1655 *
1656 * Check the supported commands and only if the the command is marked
1657 * as supported send it. If not supported assume that the controller
1658 * does not have actual support for stored link keys which makes this
1659 * command redundant anyway.
f9f462fa
MH
1660 *
1661 * Some controllers indicate that they support handling deleting
1662 * stored link keys, but they don't. The quirk lets a driver
1663 * just disable this command.
637b4cae 1664 */
f9f462fa
MH
1665 if (hdev->commands[6] & 0x80 &&
1666 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1667 struct hci_cp_delete_stored_link_key cp;
1668
1669 bacpy(&cp.bdaddr, BDADDR_ANY);
1670 cp.delete_all = 0x01;
1671 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1672 sizeof(cp), &cp);
1673 }
1674
2177bab5 1675 if (hdev->commands[5] & 0x10)
42c6b129 1676 hci_setup_link_policy(req);
2177bab5 1677
9193c6e8
AG
1678 if (lmp_le_capable(hdev)) {
1679 u8 events[8];
1680
1681 memset(events, 0, sizeof(events));
4d6c705b
MH
1682 events[0] = 0x0f;
1683
1684 if (hdev->le_features[0] & HCI_LE_ENCRYPTION)
1685 events[0] |= 0x10; /* LE Long Term Key Request */
662bc2e6
AG
1686
1687 /* If controller supports the Connection Parameters Request
1688 * Link Layer Procedure, enable the corresponding event.
1689 */
1690 if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC)
1691 events[0] |= 0x20; /* LE Remote Connection
1692 * Parameter Request
1693 */
1694
9193c6e8
AG
1695 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK, sizeof(events),
1696 events);
1697
15a49cca
MH
1698 if (hdev->commands[25] & 0x40) {
1699 /* Read LE Advertising Channel TX Power */
1700 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
1701 }
1702
42c6b129 1703 hci_set_le_support(req);
9193c6e8 1704 }
d2c5d77f
JH
1705
1706 /* Read features beyond page 1 if available */
1707 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1708 struct hci_cp_read_local_ext_features cp;
1709
1710 cp.page = p;
1711 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1712 sizeof(cp), &cp);
1713 }
2177bab5
JH
1714}
1715
5d4e7e8d
JH
1716static void hci_init4_req(struct hci_request *req, unsigned long opt)
1717{
1718 struct hci_dev *hdev = req->hdev;
1719
d62e6d67
JH
1720 /* Set event mask page 2 if the HCI command for it is supported */
1721 if (hdev->commands[22] & 0x04)
1722 hci_set_event_mask_page_2(req);
1723
109e3191
MH
1724 /* Read local codec list if the HCI command is supported */
1725 if (hdev->commands[29] & 0x20)
1726 hci_req_add(req, HCI_OP_READ_LOCAL_CODECS, 0, NULL);
1727
f4fe73ed
MH
1728 /* Get MWS transport configuration if the HCI command is supported */
1729 if (hdev->commands[30] & 0x08)
1730 hci_req_add(req, HCI_OP_GET_MWS_TRANSPORT_CONFIG, 0, NULL);
1731
5d4e7e8d 1732 /* Check for Synchronization Train support */
53b834d2 1733 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1734 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1735
1736 /* Enable Secure Connections if supported and configured */
5afeac14 1737 if ((lmp_sc_capable(hdev) ||
111902f7 1738 test_bit(HCI_FORCE_SC, &hdev->dbg_flags)) &&
a6d0d690
MH
1739 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1740 u8 support = 0x01;
1741 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1742 sizeof(support), &support);
1743 }
5d4e7e8d
JH
1744}
1745
2177bab5
JH
1746static int __hci_init(struct hci_dev *hdev)
1747{
1748 int err;
1749
1750 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1751 if (err < 0)
1752 return err;
1753
4b4148e9
MH
1754 /* The Device Under Test (DUT) mode is special and available for
1755 * all controller types. So just create it early on.
1756 */
1757 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1758 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1759 &dut_mode_fops);
1760 }
1761
2177bab5
JH
1762 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1763 * BR/EDR/LE type controllers. AMP controllers only need the
1764 * first stage init.
1765 */
1766 if (hdev->dev_type != HCI_BREDR)
1767 return 0;
1768
1769 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1770 if (err < 0)
1771 return err;
1772
5d4e7e8d
JH
1773 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1774 if (err < 0)
1775 return err;
1776
baf27f6e
MH
1777 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1778 if (err < 0)
1779 return err;
1780
1781 /* Only create debugfs entries during the initial setup
1782 * phase and not every time the controller gets powered on.
1783 */
1784 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1785 return 0;
1786
dfb826a8
MH
1787 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1788 &features_fops);
ceeb3bc0
MH
1789 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1790 &hdev->manufacturer);
1791 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1792 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
40f4938a
MH
1793 debugfs_create_file("device_list", 0444, hdev->debugfs, hdev,
1794 &device_list_fops);
70afe0b8
MH
1795 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1796 &blacklist_fops);
47219839
MH
1797 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1798
31ad1691
AK
1799 debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev,
1800 &conn_info_min_age_fops);
1801 debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev,
1802 &conn_info_max_age_fops);
1803
baf27f6e
MH
1804 if (lmp_bredr_capable(hdev)) {
1805 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1806 hdev, &inquiry_cache_fops);
02d08d15
MH
1807 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1808 hdev, &link_keys_fops);
babdbb3c
MH
1809 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1810 hdev, &dev_class_fops);
041000b9
MH
1811 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1812 hdev, &voice_setting_fops);
baf27f6e
MH
1813 }
1814
06f5b778 1815 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1816 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1817 hdev, &auto_accept_delay_fops);
5afeac14
MH
1818 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1819 hdev, &force_sc_support_fops);
134c2a89
MH
1820 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1821 hdev, &sc_only_mode_fops);
06f5b778 1822 }
ebd1e33b 1823
2bfa3531
MH
1824 if (lmp_sniff_capable(hdev)) {
1825 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1826 hdev, &idle_timeout_fops);
1827 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1828 hdev, &sniff_min_interval_fops);
1829 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1830 hdev, &sniff_max_interval_fops);
1831 }
1832
d0f729b8 1833 if (lmp_le_capable(hdev)) {
ac345813
MH
1834 debugfs_create_file("identity", 0400, hdev->debugfs,
1835 hdev, &identity_fops);
1836 debugfs_create_file("rpa_timeout", 0644, hdev->debugfs,
1837 hdev, &rpa_timeout_fops);
7a4cd51d
MH
1838 debugfs_create_file("random_address", 0444, hdev->debugfs,
1839 hdev, &random_address_fops);
b32bba6c
MH
1840 debugfs_create_file("static_address", 0444, hdev->debugfs,
1841 hdev, &static_address_fops);
1842
1843 /* For controllers with a public address, provide a debug
1844 * option to force the usage of the configured static
1845 * address. By default the public address is used.
1846 */
1847 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1848 debugfs_create_file("force_static_address", 0644,
1849 hdev->debugfs, hdev,
1850 &force_static_address_fops);
1851
d0f729b8
MH
1852 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1853 &hdev->le_white_list_size);
d2ab0ac1
MH
1854 debugfs_create_file("white_list", 0444, hdev->debugfs, hdev,
1855 &white_list_fops);
3698d704
MH
1856 debugfs_create_file("identity_resolving_keys", 0400,
1857 hdev->debugfs, hdev,
1858 &identity_resolving_keys_fops);
8f8625cd
MH
1859 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1860 hdev, &long_term_keys_fops);
4e70c7e7
MH
1861 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1862 hdev, &conn_min_interval_fops);
1863 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1864 hdev, &conn_max_interval_fops);
816a93d1
MH
1865 debugfs_create_file("conn_latency", 0644, hdev->debugfs,
1866 hdev, &conn_latency_fops);
f1649577
MH
1867 debugfs_create_file("supervision_timeout", 0644, hdev->debugfs,
1868 hdev, &supervision_timeout_fops);
3f959d46
MH
1869 debugfs_create_file("adv_channel_map", 0644, hdev->debugfs,
1870 hdev, &adv_channel_map_fops);
729a1051
GL
1871 debugfs_create_file("adv_min_interval", 0644, hdev->debugfs,
1872 hdev, &adv_min_interval_fops);
1873 debugfs_create_file("adv_max_interval", 0644, hdev->debugfs,
1874 hdev, &adv_max_interval_fops);
b9a7a61e
LR
1875 debugfs_create_u16("discov_interleaved_timeout", 0644,
1876 hdev->debugfs,
1877 &hdev->discov_interleaved_timeout);
54506918 1878
711eafe3 1879 smp_register(hdev);
d0f729b8 1880 }
e7b8fc92 1881
baf27f6e 1882 return 0;
2177bab5
JH
1883}
1884
0ebca7d6
MH
1885static void hci_init0_req(struct hci_request *req, unsigned long opt)
1886{
1887 struct hci_dev *hdev = req->hdev;
1888
1889 BT_DBG("%s %ld", hdev->name, opt);
1890
1891 /* Reset */
1892 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
1893 hci_reset_req(req, 0);
1894
1895 /* Read Local Version */
1896 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1897
1898 /* Read BD Address */
1899 if (hdev->set_bdaddr)
1900 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1901}
1902
1903static int __hci_unconf_init(struct hci_dev *hdev)
1904{
1905 int err;
1906
cc78b44b
MH
1907 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1908 return 0;
1909
0ebca7d6
MH
1910 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT);
1911 if (err < 0)
1912 return err;
1913
1914 return 0;
1915}
1916
42c6b129 1917static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1918{
1919 __u8 scan = opt;
1920
42c6b129 1921 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1922
1923 /* Inquiry and Page scans */
42c6b129 1924 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1925}
1926
42c6b129 1927static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1928{
1929 __u8 auth = opt;
1930
42c6b129 1931 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1932
1933 /* Authentication */
42c6b129 1934 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1935}
1936
42c6b129 1937static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1938{
1939 __u8 encrypt = opt;
1940
42c6b129 1941 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1942
e4e8e37c 1943 /* Encryption */
42c6b129 1944 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1945}
1946
42c6b129 1947static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1948{
1949 __le16 policy = cpu_to_le16(opt);
1950
42c6b129 1951 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1952
1953 /* Default link policy */
42c6b129 1954 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1955}
1956
8e87d142 1957/* Get HCI device by index.
1da177e4
LT
1958 * Device is held on return. */
1959struct hci_dev *hci_dev_get(int index)
1960{
8035ded4 1961 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1962
1963 BT_DBG("%d", index);
1964
1965 if (index < 0)
1966 return NULL;
1967
1968 read_lock(&hci_dev_list_lock);
8035ded4 1969 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1970 if (d->id == index) {
1971 hdev = hci_dev_hold(d);
1972 break;
1973 }
1974 }
1975 read_unlock(&hci_dev_list_lock);
1976 return hdev;
1977}
1da177e4
LT
1978
1979/* ---- Inquiry support ---- */
ff9ef578 1980
30dc78e1
JH
1981bool hci_discovery_active(struct hci_dev *hdev)
1982{
1983 struct discovery_state *discov = &hdev->discovery;
1984
6fbe195d 1985 switch (discov->state) {
343f935b 1986 case DISCOVERY_FINDING:
6fbe195d 1987 case DISCOVERY_RESOLVING:
30dc78e1
JH
1988 return true;
1989
6fbe195d
AG
1990 default:
1991 return false;
1992 }
30dc78e1
JH
1993}
1994
ff9ef578
JH
1995void hci_discovery_set_state(struct hci_dev *hdev, int state)
1996{
bb3e0a33
JH
1997 int old_state = hdev->discovery.state;
1998
ff9ef578
JH
1999 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
2000
bb3e0a33 2001 if (old_state == state)
ff9ef578
JH
2002 return;
2003
bb3e0a33
JH
2004 hdev->discovery.state = state;
2005
ff9ef578
JH
2006 switch (state) {
2007 case DISCOVERY_STOPPED:
c54c3860
AG
2008 hci_update_background_scan(hdev);
2009
bb3e0a33 2010 if (old_state != DISCOVERY_STARTING)
7b99b659 2011 mgmt_discovering(hdev, 0);
ff9ef578
JH
2012 break;
2013 case DISCOVERY_STARTING:
2014 break;
343f935b 2015 case DISCOVERY_FINDING:
ff9ef578
JH
2016 mgmt_discovering(hdev, 1);
2017 break;
30dc78e1
JH
2018 case DISCOVERY_RESOLVING:
2019 break;
ff9ef578
JH
2020 case DISCOVERY_STOPPING:
2021 break;
2022 }
ff9ef578
JH
2023}
2024
1f9b9a5d 2025void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 2026{
30883512 2027 struct discovery_state *cache = &hdev->discovery;
b57c1a56 2028 struct inquiry_entry *p, *n;
1da177e4 2029
561aafbc
JH
2030 list_for_each_entry_safe(p, n, &cache->all, all) {
2031 list_del(&p->all);
b57c1a56 2032 kfree(p);
1da177e4 2033 }
561aafbc
JH
2034
2035 INIT_LIST_HEAD(&cache->unknown);
2036 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
2037}
2038
a8c5fb1a
GP
2039struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
2040 bdaddr_t *bdaddr)
1da177e4 2041{
30883512 2042 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2043 struct inquiry_entry *e;
2044
6ed93dc6 2045 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 2046
561aafbc
JH
2047 list_for_each_entry(e, &cache->all, all) {
2048 if (!bacmp(&e->data.bdaddr, bdaddr))
2049 return e;
2050 }
2051
2052 return NULL;
2053}
2054
2055struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 2056 bdaddr_t *bdaddr)
561aafbc 2057{
30883512 2058 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
2059 struct inquiry_entry *e;
2060
6ed93dc6 2061 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
2062
2063 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 2064 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
2065 return e;
2066 }
2067
2068 return NULL;
1da177e4
LT
2069}
2070
30dc78e1 2071struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
2072 bdaddr_t *bdaddr,
2073 int state)
30dc78e1
JH
2074{
2075 struct discovery_state *cache = &hdev->discovery;
2076 struct inquiry_entry *e;
2077
6ed93dc6 2078 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
2079
2080 list_for_each_entry(e, &cache->resolve, list) {
2081 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
2082 return e;
2083 if (!bacmp(&e->data.bdaddr, bdaddr))
2084 return e;
2085 }
2086
2087 return NULL;
2088}
2089
a3d4e20a 2090void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 2091 struct inquiry_entry *ie)
a3d4e20a
JH
2092{
2093 struct discovery_state *cache = &hdev->discovery;
2094 struct list_head *pos = &cache->resolve;
2095 struct inquiry_entry *p;
2096
2097 list_del(&ie->list);
2098
2099 list_for_each_entry(p, &cache->resolve, list) {
2100 if (p->name_state != NAME_PENDING &&
a8c5fb1a 2101 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
2102 break;
2103 pos = &p->list;
2104 }
2105
2106 list_add(&ie->list, pos);
2107}
2108
af58925c
MH
2109u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
2110 bool name_known)
1da177e4 2111{
30883512 2112 struct discovery_state *cache = &hdev->discovery;
70f23020 2113 struct inquiry_entry *ie;
af58925c 2114 u32 flags = 0;
1da177e4 2115
6ed93dc6 2116 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 2117
2b2fec4d
SJ
2118 hci_remove_remote_oob_data(hdev, &data->bdaddr);
2119
af58925c
MH
2120 if (!data->ssp_mode)
2121 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2122
70f23020 2123 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 2124 if (ie) {
af58925c
MH
2125 if (!ie->data.ssp_mode)
2126 flags |= MGMT_DEV_FOUND_LEGACY_PAIRING;
388fc8fa 2127
a3d4e20a 2128 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 2129 data->rssi != ie->data.rssi) {
a3d4e20a
JH
2130 ie->data.rssi = data->rssi;
2131 hci_inquiry_cache_update_resolve(hdev, ie);
2132 }
2133
561aafbc 2134 goto update;
a3d4e20a 2135 }
561aafbc
JH
2136
2137 /* Entry not in the cache. Add new one. */
27f70f3e 2138 ie = kzalloc(sizeof(*ie), GFP_KERNEL);
af58925c
MH
2139 if (!ie) {
2140 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
2141 goto done;
2142 }
561aafbc
JH
2143
2144 list_add(&ie->all, &cache->all);
2145
2146 if (name_known) {
2147 ie->name_state = NAME_KNOWN;
2148 } else {
2149 ie->name_state = NAME_NOT_KNOWN;
2150 list_add(&ie->list, &cache->unknown);
2151 }
70f23020 2152
561aafbc
JH
2153update:
2154 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 2155 ie->name_state != NAME_PENDING) {
561aafbc
JH
2156 ie->name_state = NAME_KNOWN;
2157 list_del(&ie->list);
1da177e4
LT
2158 }
2159
70f23020
AE
2160 memcpy(&ie->data, data, sizeof(*data));
2161 ie->timestamp = jiffies;
1da177e4 2162 cache->timestamp = jiffies;
3175405b
JH
2163
2164 if (ie->name_state == NAME_NOT_KNOWN)
af58925c 2165 flags |= MGMT_DEV_FOUND_CONFIRM_NAME;
3175405b 2166
af58925c
MH
2167done:
2168 return flags;
1da177e4
LT
2169}
2170
2171static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
2172{
30883512 2173 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
2174 struct inquiry_info *info = (struct inquiry_info *) buf;
2175 struct inquiry_entry *e;
2176 int copied = 0;
2177
561aafbc 2178 list_for_each_entry(e, &cache->all, all) {
1da177e4 2179 struct inquiry_data *data = &e->data;
b57c1a56
JH
2180
2181 if (copied >= num)
2182 break;
2183
1da177e4
LT
2184 bacpy(&info->bdaddr, &data->bdaddr);
2185 info->pscan_rep_mode = data->pscan_rep_mode;
2186 info->pscan_period_mode = data->pscan_period_mode;
2187 info->pscan_mode = data->pscan_mode;
2188 memcpy(info->dev_class, data->dev_class, 3);
2189 info->clock_offset = data->clock_offset;
b57c1a56 2190
1da177e4 2191 info++;
b57c1a56 2192 copied++;
1da177e4
LT
2193 }
2194
2195 BT_DBG("cache %p, copied %d", cache, copied);
2196 return copied;
2197}
2198
42c6b129 2199static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
2200{
2201 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 2202 struct hci_dev *hdev = req->hdev;
1da177e4
LT
2203 struct hci_cp_inquiry cp;
2204
2205 BT_DBG("%s", hdev->name);
2206
2207 if (test_bit(HCI_INQUIRY, &hdev->flags))
2208 return;
2209
2210 /* Start Inquiry */
2211 memcpy(&cp.lap, &ir->lap, 3);
2212 cp.length = ir->length;
2213 cp.num_rsp = ir->num_rsp;
42c6b129 2214 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
2215}
2216
2217int hci_inquiry(void __user *arg)
2218{
2219 __u8 __user *ptr = arg;
2220 struct hci_inquiry_req ir;
2221 struct hci_dev *hdev;
2222 int err = 0, do_inquiry = 0, max_rsp;
2223 long timeo;
2224 __u8 *buf;
2225
2226 if (copy_from_user(&ir, ptr, sizeof(ir)))
2227 return -EFAULT;
2228
5a08ecce
AE
2229 hdev = hci_dev_get(ir.dev_id);
2230 if (!hdev)
1da177e4
LT
2231 return -ENODEV;
2232
0736cfa8
MH
2233 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2234 err = -EBUSY;
2235 goto done;
2236 }
2237
4a964404 2238 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2239 err = -EOPNOTSUPP;
2240 goto done;
2241 }
2242
5b69bef5
MH
2243 if (hdev->dev_type != HCI_BREDR) {
2244 err = -EOPNOTSUPP;
2245 goto done;
2246 }
2247
56f87901
JH
2248 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2249 err = -EOPNOTSUPP;
2250 goto done;
2251 }
2252
09fd0de5 2253 hci_dev_lock(hdev);
8e87d142 2254 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 2255 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 2256 hci_inquiry_cache_flush(hdev);
1da177e4
LT
2257 do_inquiry = 1;
2258 }
09fd0de5 2259 hci_dev_unlock(hdev);
1da177e4 2260
04837f64 2261 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
2262
2263 if (do_inquiry) {
01178cd4
JH
2264 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
2265 timeo);
70f23020
AE
2266 if (err < 0)
2267 goto done;
3e13fa1e
AG
2268
2269 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
2270 * cleared). If it is interrupted by a signal, return -EINTR.
2271 */
74316201 2272 if (wait_on_bit(&hdev->flags, HCI_INQUIRY,
3e13fa1e
AG
2273 TASK_INTERRUPTIBLE))
2274 return -EINTR;
70f23020 2275 }
1da177e4 2276
8fc9ced3
GP
2277 /* for unlimited number of responses we will use buffer with
2278 * 255 entries
2279 */
1da177e4
LT
2280 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
2281
2282 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
2283 * copy it to the user space.
2284 */
01df8c31 2285 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 2286 if (!buf) {
1da177e4
LT
2287 err = -ENOMEM;
2288 goto done;
2289 }
2290
09fd0de5 2291 hci_dev_lock(hdev);
1da177e4 2292 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 2293 hci_dev_unlock(hdev);
1da177e4
LT
2294
2295 BT_DBG("num_rsp %d", ir.num_rsp);
2296
2297 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
2298 ptr += sizeof(ir);
2299 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 2300 ir.num_rsp))
1da177e4 2301 err = -EFAULT;
8e87d142 2302 } else
1da177e4
LT
2303 err = -EFAULT;
2304
2305 kfree(buf);
2306
2307done:
2308 hci_dev_put(hdev);
2309 return err;
2310}
2311
cbed0ca1 2312static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 2313{
1da177e4
LT
2314 int ret = 0;
2315
1da177e4
LT
2316 BT_DBG("%s %p", hdev->name, hdev);
2317
2318 hci_req_lock(hdev);
2319
94324962
JH
2320 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
2321 ret = -ENODEV;
2322 goto done;
2323 }
2324
d603b76b
MH
2325 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2326 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
a5c8f270
MH
2327 /* Check for rfkill but allow the HCI setup stage to
2328 * proceed (which in itself doesn't cause any RF activity).
2329 */
2330 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
2331 ret = -ERFKILL;
2332 goto done;
2333 }
2334
2335 /* Check for valid public address or a configured static
2336 * random adddress, but let the HCI setup proceed to
2337 * be able to determine if there is a public address
2338 * or not.
2339 *
c6beca0e
MH
2340 * In case of user channel usage, it is not important
2341 * if a public address or static random address is
2342 * available.
2343 *
a5c8f270
MH
2344 * This check is only valid for BR/EDR controllers
2345 * since AMP controllers do not have an address.
2346 */
c6beca0e
MH
2347 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2348 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2349 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2350 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2351 ret = -EADDRNOTAVAIL;
2352 goto done;
2353 }
611b30f7
MH
2354 }
2355
1da177e4
LT
2356 if (test_bit(HCI_UP, &hdev->flags)) {
2357 ret = -EALREADY;
2358 goto done;
2359 }
2360
1da177e4
LT
2361 if (hdev->open(hdev)) {
2362 ret = -EIO;
2363 goto done;
2364 }
2365
f41c70c4
MH
2366 atomic_set(&hdev->cmd_cnt, 1);
2367 set_bit(HCI_INIT, &hdev->flags);
2368
af202f84
MH
2369 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
2370 if (hdev->setup)
2371 ret = hdev->setup(hdev);
f41c70c4 2372
af202f84
MH
2373 /* The transport driver can set these quirks before
2374 * creating the HCI device or in its setup callback.
2375 *
2376 * In case any of them is set, the controller has to
2377 * start up as unconfigured.
2378 */
eb1904f4
MH
2379 if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) ||
2380 test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks))
89bc22d2 2381 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
f41c70c4 2382
0ebca7d6
MH
2383 /* For an unconfigured controller it is required to
2384 * read at least the version information provided by
2385 * the Read Local Version Information command.
2386 *
2387 * If the set_bdaddr driver callback is provided, then
2388 * also the original Bluetooth public device address
2389 * will be read using the Read BD Address command.
2390 */
2391 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
2392 ret = __hci_unconf_init(hdev);
89bc22d2
MH
2393 }
2394
9713c17b
MH
2395 if (test_bit(HCI_CONFIG, &hdev->dev_flags)) {
2396 /* If public address change is configured, ensure that
2397 * the address gets programmed. If the driver does not
2398 * support changing the public address, fail the power
2399 * on procedure.
2400 */
2401 if (bacmp(&hdev->public_addr, BDADDR_ANY) &&
2402 hdev->set_bdaddr)
24c457e2
MH
2403 ret = hdev->set_bdaddr(hdev, &hdev->public_addr);
2404 else
2405 ret = -EADDRNOTAVAIL;
2406 }
2407
f41c70c4 2408 if (!ret) {
4a964404 2409 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2410 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2411 ret = __hci_init(hdev);
1da177e4
LT
2412 }
2413
f41c70c4
MH
2414 clear_bit(HCI_INIT, &hdev->flags);
2415
1da177e4
LT
2416 if (!ret) {
2417 hci_dev_hold(hdev);
d6bfd59c 2418 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
1da177e4
LT
2419 set_bit(HCI_UP, &hdev->flags);
2420 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2421 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
d603b76b 2422 !test_bit(HCI_CONFIG, &hdev->dev_flags) &&
4a964404 2423 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
0736cfa8 2424 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2425 hdev->dev_type == HCI_BREDR) {
09fd0de5 2426 hci_dev_lock(hdev);
744cf19e 2427 mgmt_powered(hdev, 1);
09fd0de5 2428 hci_dev_unlock(hdev);
56e5cb86 2429 }
8e87d142 2430 } else {
1da177e4 2431 /* Init failed, cleanup */
3eff45ea 2432 flush_work(&hdev->tx_work);
c347b765 2433 flush_work(&hdev->cmd_work);
b78752cc 2434 flush_work(&hdev->rx_work);
1da177e4
LT
2435
2436 skb_queue_purge(&hdev->cmd_q);
2437 skb_queue_purge(&hdev->rx_q);
2438
2439 if (hdev->flush)
2440 hdev->flush(hdev);
2441
2442 if (hdev->sent_cmd) {
2443 kfree_skb(hdev->sent_cmd);
2444 hdev->sent_cmd = NULL;
2445 }
2446
2447 hdev->close(hdev);
fee746b0 2448 hdev->flags &= BIT(HCI_RAW);
1da177e4
LT
2449 }
2450
2451done:
2452 hci_req_unlock(hdev);
1da177e4
LT
2453 return ret;
2454}
2455
cbed0ca1
JH
2456/* ---- HCI ioctl helpers ---- */
2457
2458int hci_dev_open(__u16 dev)
2459{
2460 struct hci_dev *hdev;
2461 int err;
2462
2463 hdev = hci_dev_get(dev);
2464 if (!hdev)
2465 return -ENODEV;
2466
4a964404 2467 /* Devices that are marked as unconfigured can only be powered
fee746b0
MH
2468 * up as user channel. Trying to bring them up as normal devices
2469 * will result into a failure. Only user channel operation is
2470 * possible.
2471 *
2472 * When this function is called for a user channel, the flag
2473 * HCI_USER_CHANNEL will be set first before attempting to
2474 * open the device.
2475 */
4a964404 2476 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
fee746b0
MH
2477 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2478 err = -EOPNOTSUPP;
2479 goto done;
2480 }
2481
e1d08f40
JH
2482 /* We need to ensure that no other power on/off work is pending
2483 * before proceeding to call hci_dev_do_open. This is
2484 * particularly important if the setup procedure has not yet
2485 * completed.
2486 */
2487 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2488 cancel_delayed_work(&hdev->power_off);
2489
a5c8f270
MH
2490 /* After this call it is guaranteed that the setup procedure
2491 * has finished. This means that error conditions like RFKILL
2492 * or no valid public or static random address apply.
2493 */
e1d08f40
JH
2494 flush_workqueue(hdev->req_workqueue);
2495
12aa4f0a 2496 /* For controllers not using the management interface and that
b6ae8457 2497 * are brought up using legacy ioctl, set the HCI_BONDABLE bit
12aa4f0a
MH
2498 * so that pairing works for them. Once the management interface
2499 * is in use this bit will be cleared again and userspace has
2500 * to explicitly enable it.
2501 */
2502 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2503 !test_bit(HCI_MGMT, &hdev->dev_flags))
b6ae8457 2504 set_bit(HCI_BONDABLE, &hdev->dev_flags);
12aa4f0a 2505
cbed0ca1
JH
2506 err = hci_dev_do_open(hdev);
2507
fee746b0 2508done:
cbed0ca1 2509 hci_dev_put(hdev);
cbed0ca1
JH
2510 return err;
2511}
2512
d7347f3c
JH
2513/* This function requires the caller holds hdev->lock */
2514static void hci_pend_le_actions_clear(struct hci_dev *hdev)
2515{
2516 struct hci_conn_params *p;
2517
f161dd41
JH
2518 list_for_each_entry(p, &hdev->le_conn_params, list) {
2519 if (p->conn) {
2520 hci_conn_drop(p->conn);
f8aaf9b6 2521 hci_conn_put(p->conn);
f161dd41
JH
2522 p->conn = NULL;
2523 }
d7347f3c 2524 list_del_init(&p->action);
f161dd41 2525 }
d7347f3c
JH
2526
2527 BT_DBG("All LE pending actions cleared");
2528}
2529
1da177e4
LT
2530static int hci_dev_do_close(struct hci_dev *hdev)
2531{
2532 BT_DBG("%s %p", hdev->name, hdev);
2533
78c04c0b
VCG
2534 cancel_delayed_work(&hdev->power_off);
2535
1da177e4
LT
2536 hci_req_cancel(hdev, ENODEV);
2537 hci_req_lock(hdev);
2538
2539 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
65cc2b49 2540 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2541 hci_req_unlock(hdev);
2542 return 0;
2543 }
2544
3eff45ea
GP
2545 /* Flush RX and TX works */
2546 flush_work(&hdev->tx_work);
b78752cc 2547 flush_work(&hdev->rx_work);
1da177e4 2548
16ab91ab 2549 if (hdev->discov_timeout > 0) {
e0f9309f 2550 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2551 hdev->discov_timeout = 0;
5e5282bb 2552 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2553 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2554 }
2555
a8b2d5c2 2556 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2557 cancel_delayed_work(&hdev->service_cache);
2558
7ba8b4be 2559 cancel_delayed_work_sync(&hdev->le_scan_disable);
4518bb0f
JH
2560
2561 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2562 cancel_delayed_work_sync(&hdev->rpa_expired);
7ba8b4be 2563
76727c02
JH
2564 /* Avoid potential lockdep warnings from the *_flush() calls by
2565 * ensuring the workqueue is empty up front.
2566 */
2567 drain_workqueue(hdev->workqueue);
2568
09fd0de5 2569 hci_dev_lock(hdev);
1f9b9a5d 2570 hci_inquiry_cache_flush(hdev);
d7347f3c 2571 hci_pend_le_actions_clear(hdev);
f161dd41 2572 hci_conn_hash_flush(hdev);
09fd0de5 2573 hci_dev_unlock(hdev);
1da177e4
LT
2574
2575 hci_notify(hdev, HCI_DEV_DOWN);
2576
2577 if (hdev->flush)
2578 hdev->flush(hdev);
2579
2580 /* Reset device */
2581 skb_queue_purge(&hdev->cmd_q);
2582 atomic_set(&hdev->cmd_cnt, 1);
4a964404
MH
2583 if (!test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
2584 !test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) &&
a6c511c6 2585 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2586 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2587 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2588 clear_bit(HCI_INIT, &hdev->flags);
2589 }
2590
c347b765
GP
2591 /* flush cmd work */
2592 flush_work(&hdev->cmd_work);
1da177e4
LT
2593
2594 /* Drop queues */
2595 skb_queue_purge(&hdev->rx_q);
2596 skb_queue_purge(&hdev->cmd_q);
2597 skb_queue_purge(&hdev->raw_q);
2598
2599 /* Drop last sent command */
2600 if (hdev->sent_cmd) {
65cc2b49 2601 cancel_delayed_work_sync(&hdev->cmd_timer);
1da177e4
LT
2602 kfree_skb(hdev->sent_cmd);
2603 hdev->sent_cmd = NULL;
2604 }
2605
b6ddb638
JH
2606 kfree_skb(hdev->recv_evt);
2607 hdev->recv_evt = NULL;
2608
1da177e4
LT
2609 /* After this point our queues are empty
2610 * and no tasks are scheduled. */
2611 hdev->close(hdev);
2612
35b973c9 2613 /* Clear flags */
fee746b0 2614 hdev->flags &= BIT(HCI_RAW);
35b973c9
JH
2615 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2616
93c311a0
MH
2617 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2618 if (hdev->dev_type == HCI_BREDR) {
2619 hci_dev_lock(hdev);
2620 mgmt_powered(hdev, 0);
2621 hci_dev_unlock(hdev);
2622 }
8ee56540 2623 }
5add6af8 2624
ced5c338 2625 /* Controller radio is available but is currently powered down */
536619e8 2626 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2627
e59fda8d 2628 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2629 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
7a4cd51d 2630 bacpy(&hdev->random_addr, BDADDR_ANY);
e59fda8d 2631
1da177e4
LT
2632 hci_req_unlock(hdev);
2633
2634 hci_dev_put(hdev);
2635 return 0;
2636}
2637
2638int hci_dev_close(__u16 dev)
2639{
2640 struct hci_dev *hdev;
2641 int err;
2642
70f23020
AE
2643 hdev = hci_dev_get(dev);
2644 if (!hdev)
1da177e4 2645 return -ENODEV;
8ee56540 2646
0736cfa8
MH
2647 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2648 err = -EBUSY;
2649 goto done;
2650 }
2651
8ee56540
MH
2652 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2653 cancel_delayed_work(&hdev->power_off);
2654
1da177e4 2655 err = hci_dev_do_close(hdev);
8ee56540 2656
0736cfa8 2657done:
1da177e4
LT
2658 hci_dev_put(hdev);
2659 return err;
2660}
2661
2662int hci_dev_reset(__u16 dev)
2663{
2664 struct hci_dev *hdev;
2665 int ret = 0;
2666
70f23020
AE
2667 hdev = hci_dev_get(dev);
2668 if (!hdev)
1da177e4
LT
2669 return -ENODEV;
2670
2671 hci_req_lock(hdev);
1da177e4 2672
808a049e
MH
2673 if (!test_bit(HCI_UP, &hdev->flags)) {
2674 ret = -ENETDOWN;
1da177e4 2675 goto done;
808a049e 2676 }
1da177e4 2677
0736cfa8
MH
2678 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2679 ret = -EBUSY;
2680 goto done;
2681 }
2682
4a964404 2683 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2684 ret = -EOPNOTSUPP;
2685 goto done;
2686 }
2687
1da177e4
LT
2688 /* Drop queues */
2689 skb_queue_purge(&hdev->rx_q);
2690 skb_queue_purge(&hdev->cmd_q);
2691
76727c02
JH
2692 /* Avoid potential lockdep warnings from the *_flush() calls by
2693 * ensuring the workqueue is empty up front.
2694 */
2695 drain_workqueue(hdev->workqueue);
2696
09fd0de5 2697 hci_dev_lock(hdev);
1f9b9a5d 2698 hci_inquiry_cache_flush(hdev);
1da177e4 2699 hci_conn_hash_flush(hdev);
09fd0de5 2700 hci_dev_unlock(hdev);
1da177e4
LT
2701
2702 if (hdev->flush)
2703 hdev->flush(hdev);
2704
8e87d142 2705 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2706 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4 2707
fee746b0 2708 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2709
2710done:
1da177e4
LT
2711 hci_req_unlock(hdev);
2712 hci_dev_put(hdev);
2713 return ret;
2714}
2715
2716int hci_dev_reset_stat(__u16 dev)
2717{
2718 struct hci_dev *hdev;
2719 int ret = 0;
2720
70f23020
AE
2721 hdev = hci_dev_get(dev);
2722 if (!hdev)
1da177e4
LT
2723 return -ENODEV;
2724
0736cfa8
MH
2725 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2726 ret = -EBUSY;
2727 goto done;
2728 }
2729
4a964404 2730 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2731 ret = -EOPNOTSUPP;
2732 goto done;
2733 }
2734
1da177e4
LT
2735 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2736
0736cfa8 2737done:
1da177e4 2738 hci_dev_put(hdev);
1da177e4
LT
2739 return ret;
2740}
2741
123abc08
JH
2742static void hci_update_scan_state(struct hci_dev *hdev, u8 scan)
2743{
bc6d2d04 2744 bool conn_changed, discov_changed;
123abc08
JH
2745
2746 BT_DBG("%s scan 0x%02x", hdev->name, scan);
2747
2748 if ((scan & SCAN_PAGE))
2749 conn_changed = !test_and_set_bit(HCI_CONNECTABLE,
2750 &hdev->dev_flags);
2751 else
2752 conn_changed = test_and_clear_bit(HCI_CONNECTABLE,
2753 &hdev->dev_flags);
2754
bc6d2d04
JH
2755 if ((scan & SCAN_INQUIRY)) {
2756 discov_changed = !test_and_set_bit(HCI_DISCOVERABLE,
2757 &hdev->dev_flags);
2758 } else {
2759 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
2760 discov_changed = test_and_clear_bit(HCI_DISCOVERABLE,
2761 &hdev->dev_flags);
2762 }
2763
123abc08
JH
2764 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2765 return;
2766
bc6d2d04
JH
2767 if (conn_changed || discov_changed) {
2768 /* In case this was disabled through mgmt */
2769 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2770
2771 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
2772 mgmt_update_adv_data(hdev);
2773
123abc08 2774 mgmt_new_settings(hdev);
bc6d2d04 2775 }
123abc08
JH
2776}
2777
1da177e4
LT
2778int hci_dev_cmd(unsigned int cmd, void __user *arg)
2779{
2780 struct hci_dev *hdev;
2781 struct hci_dev_req dr;
2782 int err = 0;
2783
2784 if (copy_from_user(&dr, arg, sizeof(dr)))
2785 return -EFAULT;
2786
70f23020
AE
2787 hdev = hci_dev_get(dr.dev_id);
2788 if (!hdev)
1da177e4
LT
2789 return -ENODEV;
2790
0736cfa8
MH
2791 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2792 err = -EBUSY;
2793 goto done;
2794 }
2795
4a964404 2796 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
fee746b0
MH
2797 err = -EOPNOTSUPP;
2798 goto done;
2799 }
2800
5b69bef5
MH
2801 if (hdev->dev_type != HCI_BREDR) {
2802 err = -EOPNOTSUPP;
2803 goto done;
2804 }
2805
56f87901
JH
2806 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2807 err = -EOPNOTSUPP;
2808 goto done;
2809 }
2810
1da177e4
LT
2811 switch (cmd) {
2812 case HCISETAUTH:
01178cd4
JH
2813 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2814 HCI_INIT_TIMEOUT);
1da177e4
LT
2815 break;
2816
2817 case HCISETENCRYPT:
2818 if (!lmp_encrypt_capable(hdev)) {
2819 err = -EOPNOTSUPP;
2820 break;
2821 }
2822
2823 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2824 /* Auth must be enabled first */
01178cd4
JH
2825 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2826 HCI_INIT_TIMEOUT);
1da177e4
LT
2827 if (err)
2828 break;
2829 }
2830
01178cd4
JH
2831 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2832 HCI_INIT_TIMEOUT);
1da177e4
LT
2833 break;
2834
2835 case HCISETSCAN:
01178cd4
JH
2836 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2837 HCI_INIT_TIMEOUT);
91a668b0 2838
bc6d2d04
JH
2839 /* Ensure that the connectable and discoverable states
2840 * get correctly modified as this was a non-mgmt change.
91a668b0 2841 */
123abc08
JH
2842 if (!err)
2843 hci_update_scan_state(hdev, dr.dev_opt);
1da177e4
LT
2844 break;
2845
1da177e4 2846 case HCISETLINKPOL:
01178cd4
JH
2847 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2848 HCI_INIT_TIMEOUT);
1da177e4
LT
2849 break;
2850
2851 case HCISETLINKMODE:
e4e8e37c
MH
2852 hdev->link_mode = ((__u16) dr.dev_opt) &
2853 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2854 break;
2855
2856 case HCISETPTYPE:
2857 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2858 break;
2859
2860 case HCISETACLMTU:
e4e8e37c
MH
2861 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2862 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2863 break;
2864
2865 case HCISETSCOMTU:
e4e8e37c
MH
2866 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2867 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2868 break;
2869
2870 default:
2871 err = -EINVAL;
2872 break;
2873 }
e4e8e37c 2874
0736cfa8 2875done:
1da177e4
LT
2876 hci_dev_put(hdev);
2877 return err;
2878}
2879
2880int hci_get_dev_list(void __user *arg)
2881{
8035ded4 2882 struct hci_dev *hdev;
1da177e4
LT
2883 struct hci_dev_list_req *dl;
2884 struct hci_dev_req *dr;
1da177e4
LT
2885 int n = 0, size, err;
2886 __u16 dev_num;
2887
2888 if (get_user(dev_num, (__u16 __user *) arg))
2889 return -EFAULT;
2890
2891 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2892 return -EINVAL;
2893
2894 size = sizeof(*dl) + dev_num * sizeof(*dr);
2895
70f23020
AE
2896 dl = kzalloc(size, GFP_KERNEL);
2897 if (!dl)
1da177e4
LT
2898 return -ENOMEM;
2899
2900 dr = dl->dev_req;
2901
f20d09d5 2902 read_lock(&hci_dev_list_lock);
8035ded4 2903 list_for_each_entry(hdev, &hci_dev_list, list) {
2e84d8db 2904 unsigned long flags = hdev->flags;
c542a06c 2905
2e84d8db
MH
2906 /* When the auto-off is configured it means the transport
2907 * is running, but in that case still indicate that the
2908 * device is actually down.
2909 */
2910 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2911 flags &= ~BIT(HCI_UP);
c542a06c 2912
1da177e4 2913 (dr + n)->dev_id = hdev->id;
2e84d8db 2914 (dr + n)->dev_opt = flags;
c542a06c 2915
1da177e4
LT
2916 if (++n >= dev_num)
2917 break;
2918 }
f20d09d5 2919 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2920
2921 dl->dev_num = n;
2922 size = sizeof(*dl) + n * sizeof(*dr);
2923
2924 err = copy_to_user(arg, dl, size);
2925 kfree(dl);
2926
2927 return err ? -EFAULT : 0;
2928}
2929
2930int hci_get_dev_info(void __user *arg)
2931{
2932 struct hci_dev *hdev;
2933 struct hci_dev_info di;
2e84d8db 2934 unsigned long flags;
1da177e4
LT
2935 int err = 0;
2936
2937 if (copy_from_user(&di, arg, sizeof(di)))
2938 return -EFAULT;
2939
70f23020
AE
2940 hdev = hci_dev_get(di.dev_id);
2941 if (!hdev)
1da177e4
LT
2942 return -ENODEV;
2943
2e84d8db
MH
2944 /* When the auto-off is configured it means the transport
2945 * is running, but in that case still indicate that the
2946 * device is actually down.
2947 */
2948 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2949 flags = hdev->flags & ~BIT(HCI_UP);
2950 else
2951 flags = hdev->flags;
c542a06c 2952
1da177e4
LT
2953 strcpy(di.name, hdev->name);
2954 di.bdaddr = hdev->bdaddr;
60f2a3ed 2955 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
2e84d8db 2956 di.flags = flags;
1da177e4 2957 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2958 if (lmp_bredr_capable(hdev)) {
2959 di.acl_mtu = hdev->acl_mtu;
2960 di.acl_pkts = hdev->acl_pkts;
2961 di.sco_mtu = hdev->sco_mtu;
2962 di.sco_pkts = hdev->sco_pkts;
2963 } else {
2964 di.acl_mtu = hdev->le_mtu;
2965 di.acl_pkts = hdev->le_pkts;
2966 di.sco_mtu = 0;
2967 di.sco_pkts = 0;
2968 }
1da177e4
LT
2969 di.link_policy = hdev->link_policy;
2970 di.link_mode = hdev->link_mode;
2971
2972 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2973 memcpy(&di.features, &hdev->features, sizeof(di.features));
2974
2975 if (copy_to_user(arg, &di, sizeof(di)))
2976 err = -EFAULT;
2977
2978 hci_dev_put(hdev);
2979
2980 return err;
2981}
2982
2983/* ---- Interface to HCI drivers ---- */
2984
611b30f7
MH
2985static int hci_rfkill_set_block(void *data, bool blocked)
2986{
2987 struct hci_dev *hdev = data;
2988
2989 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2990
0736cfa8
MH
2991 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2992 return -EBUSY;
2993
5e130367
JH
2994 if (blocked) {
2995 set_bit(HCI_RFKILLED, &hdev->dev_flags);
d603b76b
MH
2996 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
2997 !test_bit(HCI_CONFIG, &hdev->dev_flags))
bf543036 2998 hci_dev_do_close(hdev);
5e130367
JH
2999 } else {
3000 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 3001 }
611b30f7
MH
3002
3003 return 0;
3004}
3005
3006static const struct rfkill_ops hci_rfkill_ops = {
3007 .set_block = hci_rfkill_set_block,
3008};
3009
ab81cbf9
JH
3010static void hci_power_on(struct work_struct *work)
3011{
3012 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 3013 int err;
ab81cbf9
JH
3014
3015 BT_DBG("%s", hdev->name);
3016
cbed0ca1 3017 err = hci_dev_do_open(hdev);
96570ffc
JH
3018 if (err < 0) {
3019 mgmt_set_powered_failed(hdev, err);
ab81cbf9 3020 return;
96570ffc 3021 }
ab81cbf9 3022
a5c8f270
MH
3023 /* During the HCI setup phase, a few error conditions are
3024 * ignored and they need to be checked now. If they are still
3025 * valid, it is important to turn the device back off.
3026 */
3027 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
4a964404 3028 test_bit(HCI_UNCONFIGURED, &hdev->dev_flags) ||
a5c8f270
MH
3029 (hdev->dev_type == HCI_BREDR &&
3030 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
3031 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
3032 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
3033 hci_dev_do_close(hdev);
3034 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
3035 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
3036 HCI_AUTO_OFF_TIMEOUT);
bf543036 3037 }
ab81cbf9 3038
fee746b0 3039 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags)) {
4a964404
MH
3040 /* For unconfigured devices, set the HCI_RAW flag
3041 * so that userspace can easily identify them.
4a964404
MH
3042 */
3043 if (test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3044 set_bit(HCI_RAW, &hdev->flags);
0602a8ad
MH
3045
3046 /* For fully configured devices, this will send
3047 * the Index Added event. For unconfigured devices,
3048 * it will send Unconfigued Index Added event.
3049 *
3050 * Devices with HCI_QUIRK_RAW_DEVICE are ignored
3051 * and no event will be send.
3052 */
3053 mgmt_index_added(hdev);
d603b76b 3054 } else if (test_and_clear_bit(HCI_CONFIG, &hdev->dev_flags)) {
5ea234d3
MH
3055 /* When the controller is now configured, then it
3056 * is important to clear the HCI_RAW flag.
3057 */
3058 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags))
3059 clear_bit(HCI_RAW, &hdev->flags);
3060
d603b76b
MH
3061 /* Powering on the controller with HCI_CONFIG set only
3062 * happens with the transition from unconfigured to
3063 * configured. This will send the Index Added event.
3064 */
744cf19e 3065 mgmt_index_added(hdev);
fee746b0 3066 }
ab81cbf9
JH
3067}
3068
3069static void hci_power_off(struct work_struct *work)
3070{
3243553f 3071 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 3072 power_off.work);
ab81cbf9
JH
3073
3074 BT_DBG("%s", hdev->name);
3075
8ee56540 3076 hci_dev_do_close(hdev);
ab81cbf9
JH
3077}
3078
16ab91ab
JH
3079static void hci_discov_off(struct work_struct *work)
3080{
3081 struct hci_dev *hdev;
16ab91ab
JH
3082
3083 hdev = container_of(work, struct hci_dev, discov_off.work);
3084
3085 BT_DBG("%s", hdev->name);
3086
d1967ff8 3087 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
3088}
3089
35f7498a 3090void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 3091{
4821002c 3092 struct bt_uuid *uuid, *tmp;
2aeb9a1a 3093
4821002c
JH
3094 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
3095 list_del(&uuid->list);
2aeb9a1a
JH
3096 kfree(uuid);
3097 }
2aeb9a1a
JH
3098}
3099
35f7498a 3100void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1 3101{
0378b597 3102 struct link_key *key;
55ed8ca1 3103
0378b597
JH
3104 list_for_each_entry_rcu(key, &hdev->link_keys, list) {
3105 list_del_rcu(&key->list);
3106 kfree_rcu(key, rcu);
55ed8ca1 3107 }
55ed8ca1
JH
3108}
3109
35f7498a 3110void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf 3111{
970d0f1b 3112 struct smp_ltk *k;
b899efaf 3113
970d0f1b
JH
3114 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
3115 list_del_rcu(&k->list);
3116 kfree_rcu(k, rcu);
b899efaf 3117 }
b899efaf
VCG
3118}
3119
970c4e46
JH
3120void hci_smp_irks_clear(struct hci_dev *hdev)
3121{
adae20cb 3122 struct smp_irk *k;
970c4e46 3123
adae20cb
JH
3124 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
3125 list_del_rcu(&k->list);
3126 kfree_rcu(k, rcu);
970c4e46
JH
3127 }
3128}
3129
55ed8ca1
JH
3130struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3131{
8035ded4 3132 struct link_key *k;
55ed8ca1 3133
0378b597
JH
3134 rcu_read_lock();
3135 list_for_each_entry_rcu(k, &hdev->link_keys, list) {
3136 if (bacmp(bdaddr, &k->bdaddr) == 0) {
3137 rcu_read_unlock();
55ed8ca1 3138 return k;
0378b597
JH
3139 }
3140 }
3141 rcu_read_unlock();
55ed8ca1
JH
3142
3143 return NULL;
3144}
3145
745c0ce3 3146static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 3147 u8 key_type, u8 old_key_type)
d25e28ab
JH
3148{
3149 /* Legacy key */
3150 if (key_type < 0x03)
745c0ce3 3151 return true;
d25e28ab
JH
3152
3153 /* Debug keys are insecure so don't store them persistently */
3154 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 3155 return false;
d25e28ab
JH
3156
3157 /* Changed combination key and there's no previous one */
3158 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 3159 return false;
d25e28ab
JH
3160
3161 /* Security mode 3 case */
3162 if (!conn)
745c0ce3 3163 return true;
d25e28ab
JH
3164
3165 /* Neither local nor remote side had no-bonding as requirement */
3166 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 3167 return true;
d25e28ab
JH
3168
3169 /* Local side had dedicated bonding as requirement */
3170 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 3171 return true;
d25e28ab
JH
3172
3173 /* Remote side had dedicated bonding as requirement */
3174 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 3175 return true;
d25e28ab
JH
3176
3177 /* If none of the above criteria match, then don't store the key
3178 * persistently */
745c0ce3 3179 return false;
d25e28ab
JH
3180}
3181
e804d25d 3182static u8 ltk_role(u8 type)
98a0b845 3183{
e804d25d
JH
3184 if (type == SMP_LTK)
3185 return HCI_ROLE_MASTER;
98a0b845 3186
e804d25d 3187 return HCI_ROLE_SLAVE;
98a0b845
JH
3188}
3189
fe39c7b2 3190struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, __le64 rand,
e804d25d 3191 u8 role)
75d262c2 3192{
c9839a11 3193 struct smp_ltk *k;
75d262c2 3194
970d0f1b
JH
3195 rcu_read_lock();
3196 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
fe39c7b2 3197 if (k->ediv != ediv || k->rand != rand)
75d262c2
VCG
3198 continue;
3199
e804d25d 3200 if (ltk_role(k->type) != role)
98a0b845
JH
3201 continue;
3202
970d0f1b 3203 rcu_read_unlock();
c9839a11 3204 return k;
75d262c2 3205 }
970d0f1b 3206 rcu_read_unlock();
75d262c2
VCG
3207
3208 return NULL;
3209}
75d262c2 3210
c9839a11 3211struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
e804d25d 3212 u8 addr_type, u8 role)
75d262c2 3213{
c9839a11 3214 struct smp_ltk *k;
75d262c2 3215
970d0f1b
JH
3216 rcu_read_lock();
3217 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
c9839a11 3218 if (addr_type == k->bdaddr_type &&
98a0b845 3219 bacmp(bdaddr, &k->bdaddr) == 0 &&
970d0f1b
JH
3220 ltk_role(k->type) == role) {
3221 rcu_read_unlock();
75d262c2 3222 return k;
970d0f1b
JH
3223 }
3224 }
3225 rcu_read_unlock();
75d262c2
VCG
3226
3227 return NULL;
3228}
75d262c2 3229
970c4e46
JH
3230struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
3231{
3232 struct smp_irk *irk;
3233
adae20cb
JH
3234 rcu_read_lock();
3235 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
3236 if (!bacmp(&irk->rpa, rpa)) {
3237 rcu_read_unlock();
970c4e46 3238 return irk;
adae20cb 3239 }
970c4e46
JH
3240 }
3241
adae20cb 3242 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
defce9e8 3243 if (smp_irk_matches(hdev, irk->val, rpa)) {
970c4e46 3244 bacpy(&irk->rpa, rpa);
adae20cb 3245 rcu_read_unlock();
970c4e46
JH
3246 return irk;
3247 }
3248 }
adae20cb 3249 rcu_read_unlock();
970c4e46
JH
3250
3251 return NULL;
3252}
3253
3254struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
3255 u8 addr_type)
3256{
3257 struct smp_irk *irk;
3258
6cfc9988
JH
3259 /* Identity Address must be public or static random */
3260 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
3261 return NULL;
3262
adae20cb
JH
3263 rcu_read_lock();
3264 list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) {
970c4e46 3265 if (addr_type == irk->addr_type &&
adae20cb
JH
3266 bacmp(bdaddr, &irk->bdaddr) == 0) {
3267 rcu_read_unlock();
970c4e46 3268 return irk;
adae20cb 3269 }
970c4e46 3270 }
adae20cb 3271 rcu_read_unlock();
970c4e46
JH
3272
3273 return NULL;
3274}
3275
567fa2aa 3276struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn,
7652ff6a
JH
3277 bdaddr_t *bdaddr, u8 *val, u8 type,
3278 u8 pin_len, bool *persistent)
55ed8ca1
JH
3279{
3280 struct link_key *key, *old_key;
745c0ce3 3281 u8 old_key_type;
55ed8ca1
JH
3282
3283 old_key = hci_find_link_key(hdev, bdaddr);
3284 if (old_key) {
3285 old_key_type = old_key->type;
3286 key = old_key;
3287 } else {
12adcf3a 3288 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 3289 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1 3290 if (!key)
567fa2aa 3291 return NULL;
0378b597 3292 list_add_rcu(&key->list, &hdev->link_keys);
55ed8ca1
JH
3293 }
3294
6ed93dc6 3295 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 3296
d25e28ab
JH
3297 /* Some buggy controller combinations generate a changed
3298 * combination key for legacy pairing even when there's no
3299 * previous key */
3300 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 3301 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 3302 type = HCI_LK_COMBINATION;
655fe6ec
JH
3303 if (conn)
3304 conn->key_type = type;
3305 }
d25e28ab 3306
55ed8ca1 3307 bacpy(&key->bdaddr, bdaddr);
9b3b4460 3308 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
3309 key->pin_len = pin_len;
3310
b6020ba0 3311 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 3312 key->type = old_key_type;
4748fed2
JH
3313 else
3314 key->type = type;
3315
7652ff6a
JH
3316 if (persistent)
3317 *persistent = hci_persistent_key(hdev, conn, type,
3318 old_key_type);
4df378a1 3319
567fa2aa 3320 return key;
55ed8ca1
JH
3321}
3322
ca9142b8 3323struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271 3324 u8 addr_type, u8 type, u8 authenticated,
fe39c7b2 3325 u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
75d262c2 3326{
c9839a11 3327 struct smp_ltk *key, *old_key;
e804d25d 3328 u8 role = ltk_role(type);
75d262c2 3329
e804d25d 3330 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, role);
c9839a11 3331 if (old_key)
75d262c2 3332 key = old_key;
c9839a11 3333 else {
0a14ab41 3334 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 3335 if (!key)
ca9142b8 3336 return NULL;
970d0f1b 3337 list_add_rcu(&key->list, &hdev->long_term_keys);
75d262c2
VCG
3338 }
3339
75d262c2 3340 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
3341 key->bdaddr_type = addr_type;
3342 memcpy(key->val, tk, sizeof(key->val));
3343 key->authenticated = authenticated;
3344 key->ediv = ediv;
fe39c7b2 3345 key->rand = rand;
c9839a11
VCG
3346 key->enc_size = enc_size;
3347 key->type = type;
75d262c2 3348
ca9142b8 3349 return key;
75d262c2
VCG
3350}
3351
ca9142b8
JH
3352struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
3353 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
3354{
3355 struct smp_irk *irk;
3356
3357 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
3358 if (!irk) {
3359 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
3360 if (!irk)
ca9142b8 3361 return NULL;
970c4e46
JH
3362
3363 bacpy(&irk->bdaddr, bdaddr);
3364 irk->addr_type = addr_type;
3365
adae20cb 3366 list_add_rcu(&irk->list, &hdev->identity_resolving_keys);
970c4e46
JH
3367 }
3368
3369 memcpy(irk->val, val, 16);
3370 bacpy(&irk->rpa, rpa);
3371
ca9142b8 3372 return irk;
970c4e46
JH
3373}
3374
55ed8ca1
JH
3375int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
3376{
3377 struct link_key *key;
3378
3379 key = hci_find_link_key(hdev, bdaddr);
3380 if (!key)
3381 return -ENOENT;
3382
6ed93dc6 3383 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1 3384
0378b597
JH
3385 list_del_rcu(&key->list);
3386 kfree_rcu(key, rcu);
55ed8ca1
JH
3387
3388 return 0;
3389}
3390
e0b2b27e 3391int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf 3392{
970d0f1b 3393 struct smp_ltk *k;
c51ffa0b 3394 int removed = 0;
b899efaf 3395
970d0f1b 3396 list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
e0b2b27e 3397 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
3398 continue;
3399
6ed93dc6 3400 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf 3401
970d0f1b
JH
3402 list_del_rcu(&k->list);
3403 kfree_rcu(k, rcu);
c51ffa0b 3404 removed++;
b899efaf
VCG
3405 }
3406
c51ffa0b 3407 return removed ? 0 : -ENOENT;
b899efaf
VCG
3408}
3409
a7ec7338
JH
3410void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
3411{
adae20cb 3412 struct smp_irk *k;
a7ec7338 3413
adae20cb 3414 list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
a7ec7338
JH
3415 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
3416 continue;
3417
3418 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
3419
adae20cb
JH
3420 list_del_rcu(&k->list);
3421 kfree_rcu(k, rcu);
a7ec7338
JH
3422 }
3423}
3424
6bd32326 3425/* HCI command timer function */
65cc2b49 3426static void hci_cmd_timeout(struct work_struct *work)
6bd32326 3427{
65cc2b49
MH
3428 struct hci_dev *hdev = container_of(work, struct hci_dev,
3429 cmd_timer.work);
6bd32326 3430
bda4f23a
AE
3431 if (hdev->sent_cmd) {
3432 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
3433 u16 opcode = __le16_to_cpu(sent->opcode);
3434
3435 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
3436 } else {
3437 BT_ERR("%s command tx timeout", hdev->name);
3438 }
3439
6bd32326 3440 atomic_set(&hdev->cmd_cnt, 1);
c347b765 3441 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
3442}
3443
2763eda6 3444struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 3445 bdaddr_t *bdaddr)
2763eda6
SJ
3446{
3447 struct oob_data *data;
3448
3449 list_for_each_entry(data, &hdev->remote_oob_data, list)
3450 if (bacmp(bdaddr, &data->bdaddr) == 0)
3451 return data;
3452
3453 return NULL;
3454}
3455
3456int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
3457{
3458 struct oob_data *data;
3459
3460 data = hci_find_remote_oob_data(hdev, bdaddr);
3461 if (!data)
3462 return -ENOENT;
3463
6ed93dc6 3464 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
3465
3466 list_del(&data->list);
3467 kfree(data);
3468
3469 return 0;
3470}
3471
35f7498a 3472void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
3473{
3474 struct oob_data *data, *n;
3475
3476 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
3477 list_del(&data->list);
3478 kfree(data);
3479 }
2763eda6
SJ
3480}
3481
0798872e 3482int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
38da1703 3483 u8 *hash, u8 *rand)
2763eda6
SJ
3484{
3485 struct oob_data *data;
3486
3487 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 3488 if (!data) {
0a14ab41 3489 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
3490 if (!data)
3491 return -ENOMEM;
3492
3493 bacpy(&data->bdaddr, bdaddr);
3494 list_add(&data->list, &hdev->remote_oob_data);
3495 }
3496
519ca9d0 3497 memcpy(data->hash192, hash, sizeof(data->hash192));
38da1703 3498 memcpy(data->rand192, rand, sizeof(data->rand192));
2763eda6 3499
0798872e 3500 memset(data->hash256, 0, sizeof(data->hash256));
38da1703 3501 memset(data->rand256, 0, sizeof(data->rand256));
0798872e
MH
3502
3503 BT_DBG("%s for %pMR", hdev->name, bdaddr);
3504
3505 return 0;
3506}
3507
3508int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
38da1703
JH
3509 u8 *hash192, u8 *rand192,
3510 u8 *hash256, u8 *rand256)
0798872e
MH
3511{
3512 struct oob_data *data;
3513
3514 data = hci_find_remote_oob_data(hdev, bdaddr);
3515 if (!data) {
0a14ab41 3516 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
3517 if (!data)
3518 return -ENOMEM;
3519
3520 bacpy(&data->bdaddr, bdaddr);
3521 list_add(&data->list, &hdev->remote_oob_data);
3522 }
3523
3524 memcpy(data->hash192, hash192, sizeof(data->hash192));
38da1703 3525 memcpy(data->rand192, rand192, sizeof(data->rand192));
0798872e
MH
3526
3527 memcpy(data->hash256, hash256, sizeof(data->hash256));
38da1703 3528 memcpy(data->rand256, rand256, sizeof(data->rand256));
0798872e 3529
6ed93dc6 3530 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3531
3532 return 0;
3533}
3534
dcc36c16 3535struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list,
b9ee0a78 3536 bdaddr_t *bdaddr, u8 type)
b2a66aad 3537{
8035ded4 3538 struct bdaddr_list *b;
b2a66aad 3539
dcc36c16 3540 list_for_each_entry(b, bdaddr_list, list) {
b9ee0a78 3541 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3542 return b;
b9ee0a78 3543 }
b2a66aad
AJ
3544
3545 return NULL;
3546}
3547
dcc36c16 3548void hci_bdaddr_list_clear(struct list_head *bdaddr_list)
b2a66aad
AJ
3549{
3550 struct list_head *p, *n;
3551
dcc36c16 3552 list_for_each_safe(p, n, bdaddr_list) {
b9ee0a78 3553 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3554
3555 list_del(p);
3556 kfree(b);
3557 }
b2a66aad
AJ
3558}
3559
dcc36c16 3560int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3561{
3562 struct bdaddr_list *entry;
b2a66aad 3563
b9ee0a78 3564 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3565 return -EBADF;
3566
dcc36c16 3567 if (hci_bdaddr_list_lookup(list, bdaddr, type))
5e762444 3568 return -EEXIST;
b2a66aad 3569
27f70f3e 3570 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
5e762444
AJ
3571 if (!entry)
3572 return -ENOMEM;
b2a66aad
AJ
3573
3574 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3575 entry->bdaddr_type = type;
b2a66aad 3576
dcc36c16 3577 list_add(&entry->list, list);
b2a66aad 3578
2a8357f2 3579 return 0;
b2a66aad
AJ
3580}
3581
dcc36c16 3582int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3583{
3584 struct bdaddr_list *entry;
b2a66aad 3585
35f7498a 3586 if (!bacmp(bdaddr, BDADDR_ANY)) {
dcc36c16 3587 hci_bdaddr_list_clear(list);
35f7498a
JH
3588 return 0;
3589 }
b2a66aad 3590
dcc36c16 3591 entry = hci_bdaddr_list_lookup(list, bdaddr, type);
d2ab0ac1
MH
3592 if (!entry)
3593 return -ENOENT;
3594
3595 list_del(&entry->list);
3596 kfree(entry);
3597
3598 return 0;
3599}
3600
15819a70
AG
3601/* This function requires the caller holds hdev->lock */
3602struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3603 bdaddr_t *addr, u8 addr_type)
3604{
3605 struct hci_conn_params *params;
3606
738f6185
JH
3607 /* The conn params list only contains identity addresses */
3608 if (!hci_is_identity_address(addr, addr_type))
3609 return NULL;
3610
15819a70
AG
3611 list_for_each_entry(params, &hdev->le_conn_params, list) {
3612 if (bacmp(&params->addr, addr) == 0 &&
3613 params->addr_type == addr_type) {
3614 return params;
3615 }
3616 }
3617
3618 return NULL;
3619}
3620
cef952ce
AG
3621static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
3622{
3623 struct hci_conn *conn;
3624
3625 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
3626 if (!conn)
3627 return false;
3628
3629 if (conn->dst_type != type)
3630 return false;
3631
3632 if (conn->state != BT_CONNECTED)
3633 return false;
3634
3635 return true;
3636}
3637
4b10966f 3638/* This function requires the caller holds hdev->lock */
501f8827
JH
3639struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
3640 bdaddr_t *addr, u8 addr_type)
a9b0a04c 3641{
912b42ef 3642 struct hci_conn_params *param;
a9b0a04c 3643
738f6185
JH
3644 /* The list only contains identity addresses */
3645 if (!hci_is_identity_address(addr, addr_type))
3646 return NULL;
a9b0a04c 3647
501f8827 3648 list_for_each_entry(param, list, action) {
912b42ef
JH
3649 if (bacmp(&param->addr, addr) == 0 &&
3650 param->addr_type == addr_type)
3651 return param;
4b10966f
MH
3652 }
3653
3654 return NULL;
a9b0a04c
AG
3655}
3656
15819a70 3657/* This function requires the caller holds hdev->lock */
51d167c0
MH
3658struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
3659 bdaddr_t *addr, u8 addr_type)
15819a70
AG
3660{
3661 struct hci_conn_params *params;
3662
c46245b3 3663 if (!hci_is_identity_address(addr, addr_type))
51d167c0 3664 return NULL;
a9b0a04c 3665
15819a70 3666 params = hci_conn_params_lookup(hdev, addr, addr_type);
cef952ce 3667 if (params)
51d167c0 3668 return params;
15819a70
AG
3669
3670 params = kzalloc(sizeof(*params), GFP_KERNEL);
3671 if (!params) {
3672 BT_ERR("Out of memory");
51d167c0 3673 return NULL;
15819a70
AG
3674 }
3675
3676 bacpy(&params->addr, addr);
3677 params->addr_type = addr_type;
cef952ce
AG
3678
3679 list_add(&params->list, &hdev->le_conn_params);
93450c75 3680 INIT_LIST_HEAD(&params->action);
cef952ce 3681
bf5b3c8b
MH
3682 params->conn_min_interval = hdev->le_conn_min_interval;
3683 params->conn_max_interval = hdev->le_conn_max_interval;
3684 params->conn_latency = hdev->le_conn_latency;
3685 params->supervision_timeout = hdev->le_supv_timeout;
3686 params->auto_connect = HCI_AUTO_CONN_DISABLED;
3687
3688 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3689
51d167c0 3690 return params;
bf5b3c8b
MH
3691}
3692
3693/* This function requires the caller holds hdev->lock */
3694int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
d06b50ce 3695 u8 auto_connect)
15819a70
AG
3696{
3697 struct hci_conn_params *params;
3698
8c87aae1
MH
3699 params = hci_conn_params_add(hdev, addr, addr_type);
3700 if (!params)
3701 return -EIO;
cef952ce 3702
42ce26de
JH
3703 if (params->auto_connect == auto_connect)
3704 return 0;
3705
95305baa 3706 list_del_init(&params->action);
15819a70 3707
cef952ce
AG
3708 switch (auto_connect) {
3709 case HCI_AUTO_CONN_DISABLED:
3710 case HCI_AUTO_CONN_LINK_LOSS:
95305baa 3711 hci_update_background_scan(hdev);
cef952ce 3712 break;
851efca8 3713 case HCI_AUTO_CONN_REPORT:
95305baa
JH
3714 list_add(&params->action, &hdev->pend_le_reports);
3715 hci_update_background_scan(hdev);
cef952ce 3716 break;
4b9e7e75 3717 case HCI_AUTO_CONN_DIRECT:
cef952ce 3718 case HCI_AUTO_CONN_ALWAYS:
95305baa
JH
3719 if (!is_connected(hdev, addr, addr_type)) {
3720 list_add(&params->action, &hdev->pend_le_conns);
3721 hci_update_background_scan(hdev);
3722 }
cef952ce
AG
3723 break;
3724 }
15819a70 3725
851efca8
JH
3726 params->auto_connect = auto_connect;
3727
d06b50ce
MH
3728 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
3729 auto_connect);
a9b0a04c
AG
3730
3731 return 0;
15819a70
AG
3732}
3733
f6c63249 3734static void hci_conn_params_free(struct hci_conn_params *params)
15819a70 3735{
f8aaf9b6 3736 if (params->conn) {
f161dd41 3737 hci_conn_drop(params->conn);
f8aaf9b6
JH
3738 hci_conn_put(params->conn);
3739 }
f161dd41 3740
95305baa 3741 list_del(&params->action);
15819a70
AG
3742 list_del(&params->list);
3743 kfree(params);
f6c63249
JH
3744}
3745
3746/* This function requires the caller holds hdev->lock */
3747void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3748{
3749 struct hci_conn_params *params;
3750
3751 params = hci_conn_params_lookup(hdev, addr, addr_type);
3752 if (!params)
3753 return;
3754
3755 hci_conn_params_free(params);
15819a70 3756
95305baa
JH
3757 hci_update_background_scan(hdev);
3758
15819a70
AG
3759 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3760}
3761
3762/* This function requires the caller holds hdev->lock */
55af49a8 3763void hci_conn_params_clear_disabled(struct hci_dev *hdev)
15819a70
AG
3764{
3765 struct hci_conn_params *params, *tmp;
3766
3767 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
55af49a8
JH
3768 if (params->auto_connect != HCI_AUTO_CONN_DISABLED)
3769 continue;
15819a70
AG
3770 list_del(&params->list);
3771 kfree(params);
3772 }
3773
55af49a8 3774 BT_DBG("All LE disabled connection parameters were removed");
77a77a30
AG
3775}
3776
3777/* This function requires the caller holds hdev->lock */
373110c5 3778void hci_conn_params_clear_all(struct hci_dev *hdev)
77a77a30 3779{
15819a70 3780 struct hci_conn_params *params, *tmp;
77a77a30 3781
f6c63249
JH
3782 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list)
3783 hci_conn_params_free(params);
77a77a30 3784
a4790dbd 3785 hci_update_background_scan(hdev);
77a77a30 3786
15819a70 3787 BT_DBG("All LE connection parameters were removed");
77a77a30
AG
3788}
3789
4c87eaab 3790static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3791{
4c87eaab
AG
3792 if (status) {
3793 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3794
4c87eaab
AG
3795 hci_dev_lock(hdev);
3796 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3797 hci_dev_unlock(hdev);
3798 return;
3799 }
7ba8b4be
AG
3800}
3801
4c87eaab 3802static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3803{
4c87eaab
AG
3804 /* General inquiry access code (GIAC) */
3805 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3806 struct hci_request req;
3807 struct hci_cp_inquiry cp;
7ba8b4be
AG
3808 int err;
3809
4c87eaab
AG
3810 if (status) {
3811 BT_ERR("Failed to disable LE scanning: status %d", status);
3812 return;
3813 }
7ba8b4be 3814
4c87eaab
AG
3815 switch (hdev->discovery.type) {
3816 case DISCOV_TYPE_LE:
3817 hci_dev_lock(hdev);
3818 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3819 hci_dev_unlock(hdev);
3820 break;
7ba8b4be 3821
4c87eaab
AG
3822 case DISCOV_TYPE_INTERLEAVED:
3823 hci_req_init(&req, hdev);
7ba8b4be 3824
4c87eaab
AG
3825 memset(&cp, 0, sizeof(cp));
3826 memcpy(&cp.lap, lap, sizeof(cp.lap));
3827 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3828 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3829
4c87eaab 3830 hci_dev_lock(hdev);
7dbfac1d 3831
4c87eaab 3832 hci_inquiry_cache_flush(hdev);
7dbfac1d 3833
4c87eaab
AG
3834 err = hci_req_run(&req, inquiry_complete);
3835 if (err) {
3836 BT_ERR("Inquiry request failed: err %d", err);
3837 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3838 }
7dbfac1d 3839
4c87eaab
AG
3840 hci_dev_unlock(hdev);
3841 break;
7dbfac1d 3842 }
7dbfac1d
AG
3843}
3844
7ba8b4be
AG
3845static void le_scan_disable_work(struct work_struct *work)
3846{
3847 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3848 le_scan_disable.work);
4c87eaab
AG
3849 struct hci_request req;
3850 int err;
7ba8b4be
AG
3851
3852 BT_DBG("%s", hdev->name);
3853
4c87eaab 3854 hci_req_init(&req, hdev);
28b75a89 3855
b1efcc28 3856 hci_req_add_le_scan_disable(&req);
28b75a89 3857
4c87eaab
AG
3858 err = hci_req_run(&req, le_scan_disable_work_complete);
3859 if (err)
3860 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3861}
3862
8d97250e
JH
3863static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
3864{
3865 struct hci_dev *hdev = req->hdev;
3866
3867 /* If we're advertising or initiating an LE connection we can't
3868 * go ahead and change the random address at this time. This is
3869 * because the eventual initiator address used for the
3870 * subsequently created connection will be undefined (some
3871 * controllers use the new address and others the one we had
3872 * when the operation started).
3873 *
3874 * In this kind of scenario skip the update and let the random
3875 * address be updated at the next cycle.
3876 */
5ce194c4 3877 if (test_bit(HCI_LE_ADV, &hdev->dev_flags) ||
8d97250e
JH
3878 hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT)) {
3879 BT_DBG("Deferring random address update");
9a783a13 3880 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
8d97250e
JH
3881 return;
3882 }
3883
3884 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
3885}
3886
94b1fc92
MH
3887int hci_update_random_address(struct hci_request *req, bool require_privacy,
3888 u8 *own_addr_type)
ebd3a747
JH
3889{
3890 struct hci_dev *hdev = req->hdev;
3891 int err;
3892
3893 /* If privacy is enabled use a resolvable private address. If
2b5224dc
MH
3894 * current RPA has expired or there is something else than
3895 * the current RPA in use, then generate a new one.
ebd3a747
JH
3896 */
3897 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
ebd3a747
JH
3898 int to;
3899
3900 *own_addr_type = ADDR_LE_DEV_RANDOM;
3901
3902 if (!test_and_clear_bit(HCI_RPA_EXPIRED, &hdev->dev_flags) &&
2b5224dc 3903 !bacmp(&hdev->random_addr, &hdev->rpa))
ebd3a747
JH
3904 return 0;
3905
defce9e8 3906 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
ebd3a747
JH
3907 if (err < 0) {
3908 BT_ERR("%s failed to generate new RPA", hdev->name);
3909 return err;
3910 }
3911
8d97250e 3912 set_random_addr(req, &hdev->rpa);
ebd3a747
JH
3913
3914 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
3915 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
3916
3917 return 0;
94b1fc92
MH
3918 }
3919
3920 /* In case of required privacy without resolvable private address,
3921 * use an unresolvable private address. This is useful for active
3922 * scanning and non-connectable advertising.
3923 */
3924 if (require_privacy) {
3925 bdaddr_t urpa;
3926
3927 get_random_bytes(&urpa, 6);
3928 urpa.b[5] &= 0x3f; /* Clear two most significant bits */
3929
3930 *own_addr_type = ADDR_LE_DEV_RANDOM;
8d97250e 3931 set_random_addr(req, &urpa);
94b1fc92 3932 return 0;
ebd3a747
JH
3933 }
3934
3935 /* If forcing static address is in use or there is no public
3936 * address use the static address as random address (but skip
3937 * the HCI command if the current random address is already the
3938 * static one.
3939 */
111902f7 3940 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
ebd3a747
JH
3941 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3942 *own_addr_type = ADDR_LE_DEV_RANDOM;
3943 if (bacmp(&hdev->static_addr, &hdev->random_addr))
3944 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
3945 &hdev->static_addr);
3946 return 0;
3947 }
3948
3949 /* Neither privacy nor static address is being used so use a
3950 * public address.
3951 */
3952 *own_addr_type = ADDR_LE_DEV_PUBLIC;
3953
3954 return 0;
3955}
3956
a1f4c318
JH
3957/* Copy the Identity Address of the controller.
3958 *
3959 * If the controller has a public BD_ADDR, then by default use that one.
3960 * If this is a LE only controller without a public address, default to
3961 * the static random address.
3962 *
3963 * For debugging purposes it is possible to force controllers with a
3964 * public address to use the static random address instead.
3965 */
3966void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
3967 u8 *bdaddr_type)
3968{
111902f7 3969 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dbg_flags) ||
a1f4c318
JH
3970 !bacmp(&hdev->bdaddr, BDADDR_ANY)) {
3971 bacpy(bdaddr, &hdev->static_addr);
3972 *bdaddr_type = ADDR_LE_DEV_RANDOM;
3973 } else {
3974 bacpy(bdaddr, &hdev->bdaddr);
3975 *bdaddr_type = ADDR_LE_DEV_PUBLIC;
3976 }
3977}
3978
9be0dab7
DH
3979/* Alloc HCI device */
3980struct hci_dev *hci_alloc_dev(void)
3981{
3982 struct hci_dev *hdev;
3983
27f70f3e 3984 hdev = kzalloc(sizeof(*hdev), GFP_KERNEL);
9be0dab7
DH
3985 if (!hdev)
3986 return NULL;
3987
b1b813d4
DH
3988 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3989 hdev->esco_type = (ESCO_HV1);
3990 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3991 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3992 hdev->io_capability = 0x03; /* No Input No Output */
96c2103a 3993 hdev->manufacturer = 0xffff; /* Default to internal use */
bbaf444a
JH
3994 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3995 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3996
b1b813d4
DH
3997 hdev->sniff_max_interval = 800;
3998 hdev->sniff_min_interval = 80;
3999
3f959d46 4000 hdev->le_adv_channel_map = 0x07;
628531c9
GL
4001 hdev->le_adv_min_interval = 0x0800;
4002 hdev->le_adv_max_interval = 0x0800;
bef64738
MH
4003 hdev->le_scan_interval = 0x0060;
4004 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
4005 hdev->le_conn_min_interval = 0x0028;
4006 hdev->le_conn_max_interval = 0x0038;
04fb7d90
MH
4007 hdev->le_conn_latency = 0x0000;
4008 hdev->le_supv_timeout = 0x002a;
bef64738 4009
d6bfd59c 4010 hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT;
b9a7a61e 4011 hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT;
31ad1691
AK
4012 hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE;
4013 hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE;
d6bfd59c 4014
b1b813d4
DH
4015 mutex_init(&hdev->lock);
4016 mutex_init(&hdev->req_lock);
4017
4018 INIT_LIST_HEAD(&hdev->mgmt_pending);
4019 INIT_LIST_HEAD(&hdev->blacklist);
6659358e 4020 INIT_LIST_HEAD(&hdev->whitelist);
b1b813d4
DH
4021 INIT_LIST_HEAD(&hdev->uuids);
4022 INIT_LIST_HEAD(&hdev->link_keys);
4023 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 4024 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 4025 INIT_LIST_HEAD(&hdev->remote_oob_data);
d2ab0ac1 4026 INIT_LIST_HEAD(&hdev->le_white_list);
15819a70 4027 INIT_LIST_HEAD(&hdev->le_conn_params);
77a77a30 4028 INIT_LIST_HEAD(&hdev->pend_le_conns);
66f8455a 4029 INIT_LIST_HEAD(&hdev->pend_le_reports);
6b536b5e 4030 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
4031
4032 INIT_WORK(&hdev->rx_work, hci_rx_work);
4033 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
4034 INIT_WORK(&hdev->tx_work, hci_tx_work);
4035 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 4036
b1b813d4
DH
4037 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
4038 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
4039 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
4040
b1b813d4
DH
4041 skb_queue_head_init(&hdev->rx_q);
4042 skb_queue_head_init(&hdev->cmd_q);
4043 skb_queue_head_init(&hdev->raw_q);
4044
4045 init_waitqueue_head(&hdev->req_wait_q);
4046
65cc2b49 4047 INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
b1b813d4 4048
b1b813d4
DH
4049 hci_init_sysfs(hdev);
4050 discovery_init(hdev);
9be0dab7
DH
4051
4052 return hdev;
4053}
4054EXPORT_SYMBOL(hci_alloc_dev);
4055
4056/* Free HCI device */
4057void hci_free_dev(struct hci_dev *hdev)
4058{
9be0dab7
DH
4059 /* will free via device release */
4060 put_device(&hdev->dev);
4061}
4062EXPORT_SYMBOL(hci_free_dev);
4063
1da177e4
LT
4064/* Register HCI device */
4065int hci_register_dev(struct hci_dev *hdev)
4066{
b1b813d4 4067 int id, error;
1da177e4 4068
74292d5a 4069 if (!hdev->open || !hdev->close || !hdev->send)
1da177e4
LT
4070 return -EINVAL;
4071
08add513
MM
4072 /* Do not allow HCI_AMP devices to register at index 0,
4073 * so the index can be used as the AMP controller ID.
4074 */
3df92b31
SL
4075 switch (hdev->dev_type) {
4076 case HCI_BREDR:
4077 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
4078 break;
4079 case HCI_AMP:
4080 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
4081 break;
4082 default:
4083 return -EINVAL;
1da177e4 4084 }
8e87d142 4085
3df92b31
SL
4086 if (id < 0)
4087 return id;
4088
1da177e4
LT
4089 sprintf(hdev->name, "hci%d", id);
4090 hdev->id = id;
2d8b3a11
AE
4091
4092 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
4093
d8537548
KC
4094 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4095 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
4096 if (!hdev->workqueue) {
4097 error = -ENOMEM;
4098 goto err;
4099 }
f48fd9c8 4100
d8537548
KC
4101 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
4102 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
4103 if (!hdev->req_workqueue) {
4104 destroy_workqueue(hdev->workqueue);
4105 error = -ENOMEM;
4106 goto err;
4107 }
4108
0153e2ec
MH
4109 if (!IS_ERR_OR_NULL(bt_debugfs))
4110 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
4111
bdc3e0f1
MH
4112 dev_set_name(&hdev->dev, "%s", hdev->name);
4113
4114 error = device_add(&hdev->dev);
33ca954d 4115 if (error < 0)
54506918 4116 goto err_wqueue;
1da177e4 4117
611b30f7 4118 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
4119 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
4120 hdev);
611b30f7
MH
4121 if (hdev->rfkill) {
4122 if (rfkill_register(hdev->rfkill) < 0) {
4123 rfkill_destroy(hdev->rfkill);
4124 hdev->rfkill = NULL;
4125 }
4126 }
4127
5e130367
JH
4128 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
4129 set_bit(HCI_RFKILLED, &hdev->dev_flags);
4130
a8b2d5c2 4131 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 4132 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 4133
01cd3404 4134 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
4135 /* Assume BR/EDR support until proven otherwise (such as
4136 * through reading supported features during init.
4137 */
4138 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
4139 }
ce2be9ac 4140
fcee3377
GP
4141 write_lock(&hci_dev_list_lock);
4142 list_add(&hdev->list, &hci_dev_list);
4143 write_unlock(&hci_dev_list_lock);
4144
4a964404
MH
4145 /* Devices that are marked for raw-only usage are unconfigured
4146 * and should not be included in normal operation.
fee746b0
MH
4147 */
4148 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
4a964404 4149 set_bit(HCI_UNCONFIGURED, &hdev->dev_flags);
fee746b0 4150
1da177e4 4151 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 4152 hci_dev_hold(hdev);
1da177e4 4153
19202573 4154 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 4155
1da177e4 4156 return id;
f48fd9c8 4157
33ca954d
DH
4158err_wqueue:
4159 destroy_workqueue(hdev->workqueue);
6ead1bbc 4160 destroy_workqueue(hdev->req_workqueue);
33ca954d 4161err:
3df92b31 4162 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 4163
33ca954d 4164 return error;
1da177e4
LT
4165}
4166EXPORT_SYMBOL(hci_register_dev);
4167
4168/* Unregister HCI device */
59735631 4169void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 4170{
3df92b31 4171 int i, id;
ef222013 4172
c13854ce 4173 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 4174
94324962
JH
4175 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
4176
3df92b31
SL
4177 id = hdev->id;
4178
f20d09d5 4179 write_lock(&hci_dev_list_lock);
1da177e4 4180 list_del(&hdev->list);
f20d09d5 4181 write_unlock(&hci_dev_list_lock);
1da177e4
LT
4182
4183 hci_dev_do_close(hdev);
4184
cd4c5391 4185 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
4186 kfree_skb(hdev->reassembly[i]);
4187
b9b5ef18
GP
4188 cancel_work_sync(&hdev->power_on);
4189
ab81cbf9 4190 if (!test_bit(HCI_INIT, &hdev->flags) &&
d603b76b
MH
4191 !test_bit(HCI_SETUP, &hdev->dev_flags) &&
4192 !test_bit(HCI_CONFIG, &hdev->dev_flags)) {
09fd0de5 4193 hci_dev_lock(hdev);
744cf19e 4194 mgmt_index_removed(hdev);
09fd0de5 4195 hci_dev_unlock(hdev);
56e5cb86 4196 }
ab81cbf9 4197
2e58ef3e
JH
4198 /* mgmt_index_removed should take care of emptying the
4199 * pending list */
4200 BUG_ON(!list_empty(&hdev->mgmt_pending));
4201
1da177e4
LT
4202 hci_notify(hdev, HCI_DEV_UNREG);
4203
611b30f7
MH
4204 if (hdev->rfkill) {
4205 rfkill_unregister(hdev->rfkill);
4206 rfkill_destroy(hdev->rfkill);
4207 }
4208
711eafe3 4209 smp_unregister(hdev);
99780a7b 4210
bdc3e0f1 4211 device_del(&hdev->dev);
147e2d59 4212
0153e2ec
MH
4213 debugfs_remove_recursive(hdev->debugfs);
4214
f48fd9c8 4215 destroy_workqueue(hdev->workqueue);
6ead1bbc 4216 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 4217
09fd0de5 4218 hci_dev_lock(hdev);
dcc36c16 4219 hci_bdaddr_list_clear(&hdev->blacklist);
6659358e 4220 hci_bdaddr_list_clear(&hdev->whitelist);
2aeb9a1a 4221 hci_uuids_clear(hdev);
55ed8ca1 4222 hci_link_keys_clear(hdev);
b899efaf 4223 hci_smp_ltks_clear(hdev);
970c4e46 4224 hci_smp_irks_clear(hdev);
2763eda6 4225 hci_remote_oob_data_clear(hdev);
dcc36c16 4226 hci_bdaddr_list_clear(&hdev->le_white_list);
373110c5 4227 hci_conn_params_clear_all(hdev);
09fd0de5 4228 hci_dev_unlock(hdev);
e2e0cacb 4229
dc946bd8 4230 hci_dev_put(hdev);
3df92b31
SL
4231
4232 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
4233}
4234EXPORT_SYMBOL(hci_unregister_dev);
4235
4236/* Suspend HCI device */
4237int hci_suspend_dev(struct hci_dev *hdev)
4238{
4239 hci_notify(hdev, HCI_DEV_SUSPEND);
4240 return 0;
4241}
4242EXPORT_SYMBOL(hci_suspend_dev);
4243
4244/* Resume HCI device */
4245int hci_resume_dev(struct hci_dev *hdev)
4246{
4247 hci_notify(hdev, HCI_DEV_RESUME);
4248 return 0;
4249}
4250EXPORT_SYMBOL(hci_resume_dev);
4251
75e0569f
MH
4252/* Reset HCI device */
4253int hci_reset_dev(struct hci_dev *hdev)
4254{
4255 const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 };
4256 struct sk_buff *skb;
4257
4258 skb = bt_skb_alloc(3, GFP_ATOMIC);
4259 if (!skb)
4260 return -ENOMEM;
4261
4262 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
4263 memcpy(skb_put(skb, 3), hw_err, 3);
4264
4265 /* Send Hardware Error to upper stack */
4266 return hci_recv_frame(hdev, skb);
4267}
4268EXPORT_SYMBOL(hci_reset_dev);
4269
76bca880 4270/* Receive frame from HCI drivers */
e1a26170 4271int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 4272{
76bca880 4273 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 4274 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
4275 kfree_skb(skb);
4276 return -ENXIO;
4277 }
4278
d82603c6 4279 /* Incoming skb */
76bca880
MH
4280 bt_cb(skb)->incoming = 1;
4281
4282 /* Time stamp */
4283 __net_timestamp(skb);
4284
76bca880 4285 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 4286 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 4287
76bca880
MH
4288 return 0;
4289}
4290EXPORT_SYMBOL(hci_recv_frame);
4291
33e882a5 4292static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 4293 int count, __u8 index)
33e882a5
SS
4294{
4295 int len = 0;
4296 int hlen = 0;
4297 int remain = count;
4298 struct sk_buff *skb;
4299 struct bt_skb_cb *scb;
4300
4301 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 4302 index >= NUM_REASSEMBLY)
33e882a5
SS
4303 return -EILSEQ;
4304
4305 skb = hdev->reassembly[index];
4306
4307 if (!skb) {
4308 switch (type) {
4309 case HCI_ACLDATA_PKT:
4310 len = HCI_MAX_FRAME_SIZE;
4311 hlen = HCI_ACL_HDR_SIZE;
4312 break;
4313 case HCI_EVENT_PKT:
4314 len = HCI_MAX_EVENT_SIZE;
4315 hlen = HCI_EVENT_HDR_SIZE;
4316 break;
4317 case HCI_SCODATA_PKT:
4318 len = HCI_MAX_SCO_SIZE;
4319 hlen = HCI_SCO_HDR_SIZE;
4320 break;
4321 }
4322
1e429f38 4323 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
4324 if (!skb)
4325 return -ENOMEM;
4326
4327 scb = (void *) skb->cb;
4328 scb->expect = hlen;
4329 scb->pkt_type = type;
4330
33e882a5
SS
4331 hdev->reassembly[index] = skb;
4332 }
4333
4334 while (count) {
4335 scb = (void *) skb->cb;
89bb46d0 4336 len = min_t(uint, scb->expect, count);
33e882a5
SS
4337
4338 memcpy(skb_put(skb, len), data, len);
4339
4340 count -= len;
4341 data += len;
4342 scb->expect -= len;
4343 remain = count;
4344
4345 switch (type) {
4346 case HCI_EVENT_PKT:
4347 if (skb->len == HCI_EVENT_HDR_SIZE) {
4348 struct hci_event_hdr *h = hci_event_hdr(skb);
4349 scb->expect = h->plen;
4350
4351 if (skb_tailroom(skb) < scb->expect) {
4352 kfree_skb(skb);
4353 hdev->reassembly[index] = NULL;
4354 return -ENOMEM;
4355 }
4356 }
4357 break;
4358
4359 case HCI_ACLDATA_PKT:
4360 if (skb->len == HCI_ACL_HDR_SIZE) {
4361 struct hci_acl_hdr *h = hci_acl_hdr(skb);
4362 scb->expect = __le16_to_cpu(h->dlen);
4363
4364 if (skb_tailroom(skb) < scb->expect) {
4365 kfree_skb(skb);
4366 hdev->reassembly[index] = NULL;
4367 return -ENOMEM;
4368 }
4369 }
4370 break;
4371
4372 case HCI_SCODATA_PKT:
4373 if (skb->len == HCI_SCO_HDR_SIZE) {
4374 struct hci_sco_hdr *h = hci_sco_hdr(skb);
4375 scb->expect = h->dlen;
4376
4377 if (skb_tailroom(skb) < scb->expect) {
4378 kfree_skb(skb);
4379 hdev->reassembly[index] = NULL;
4380 return -ENOMEM;
4381 }
4382 }
4383 break;
4384 }
4385
4386 if (scb->expect == 0) {
4387 /* Complete frame */
4388
4389 bt_cb(skb)->pkt_type = type;
e1a26170 4390 hci_recv_frame(hdev, skb);
33e882a5
SS
4391
4392 hdev->reassembly[index] = NULL;
4393 return remain;
4394 }
4395 }
4396
4397 return remain;
4398}
4399
99811510
SS
4400#define STREAM_REASSEMBLY 0
4401
4402int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
4403{
4404 int type;
4405 int rem = 0;
4406
da5f6c37 4407 while (count) {
99811510
SS
4408 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
4409
4410 if (!skb) {
4411 struct { char type; } *pkt;
4412
4413 /* Start of the frame */
4414 pkt = data;
4415 type = pkt->type;
4416
4417 data++;
4418 count--;
4419 } else
4420 type = bt_cb(skb)->pkt_type;
4421
1e429f38 4422 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 4423 STREAM_REASSEMBLY);
99811510
SS
4424 if (rem < 0)
4425 return rem;
4426
4427 data += (count - rem);
4428 count = rem;
f81c6224 4429 }
99811510
SS
4430
4431 return rem;
4432}
4433EXPORT_SYMBOL(hci_recv_stream_fragment);
4434
1da177e4
LT
4435/* ---- Interface to upper protocols ---- */
4436
1da177e4
LT
4437int hci_register_cb(struct hci_cb *cb)
4438{
4439 BT_DBG("%p name %s", cb, cb->name);
4440
f20d09d5 4441 write_lock(&hci_cb_list_lock);
1da177e4 4442 list_add(&cb->list, &hci_cb_list);
f20d09d5 4443 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4444
4445 return 0;
4446}
4447EXPORT_SYMBOL(hci_register_cb);
4448
4449int hci_unregister_cb(struct hci_cb *cb)
4450{
4451 BT_DBG("%p name %s", cb, cb->name);
4452
f20d09d5 4453 write_lock(&hci_cb_list_lock);
1da177e4 4454 list_del(&cb->list);
f20d09d5 4455 write_unlock(&hci_cb_list_lock);
1da177e4
LT
4456
4457 return 0;
4458}
4459EXPORT_SYMBOL(hci_unregister_cb);
4460
51086991 4461static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 4462{
cdc52faa
MH
4463 int err;
4464
0d48d939 4465 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 4466
cd82e61c
MH
4467 /* Time stamp */
4468 __net_timestamp(skb);
1da177e4 4469
cd82e61c
MH
4470 /* Send copy to monitor */
4471 hci_send_to_monitor(hdev, skb);
4472
4473 if (atomic_read(&hdev->promisc)) {
4474 /* Send copy to the sockets */
470fe1b5 4475 hci_send_to_sock(hdev, skb);
1da177e4
LT
4476 }
4477
4478 /* Get rid of skb owner, prior to sending to the driver. */
4479 skb_orphan(skb);
4480
cdc52faa
MH
4481 err = hdev->send(hdev, skb);
4482 if (err < 0) {
4483 BT_ERR("%s sending frame failed (%d)", hdev->name, err);
4484 kfree_skb(skb);
4485 }
1da177e4
LT
4486}
4487
3119ae95
JH
4488void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
4489{
4490 skb_queue_head_init(&req->cmd_q);
4491 req->hdev = hdev;
5d73e034 4492 req->err = 0;
3119ae95
JH
4493}
4494
4495int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
4496{
4497 struct hci_dev *hdev = req->hdev;
4498 struct sk_buff *skb;
4499 unsigned long flags;
4500
4501 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
4502
49c922bb 4503 /* If an error occurred during request building, remove all HCI
5d73e034
AG
4504 * commands queued on the HCI request queue.
4505 */
4506 if (req->err) {
4507 skb_queue_purge(&req->cmd_q);
4508 return req->err;
4509 }
4510
3119ae95
JH
4511 /* Do not allow empty requests */
4512 if (skb_queue_empty(&req->cmd_q))
382b0c39 4513 return -ENODATA;
3119ae95
JH
4514
4515 skb = skb_peek_tail(&req->cmd_q);
4516 bt_cb(skb)->req.complete = complete;
4517
4518 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4519 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
4520 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4521
4522 queue_work(hdev->workqueue, &hdev->cmd_work);
4523
4524 return 0;
4525}
4526
899de765
MH
4527bool hci_req_pending(struct hci_dev *hdev)
4528{
4529 return (hdev->req_status == HCI_REQ_PEND);
4530}
4531
1ca3a9d0 4532static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 4533 u32 plen, const void *param)
1da177e4
LT
4534{
4535 int len = HCI_COMMAND_HDR_SIZE + plen;
4536 struct hci_command_hdr *hdr;
4537 struct sk_buff *skb;
4538
1da177e4 4539 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
4540 if (!skb)
4541 return NULL;
1da177e4
LT
4542
4543 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 4544 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
4545 hdr->plen = plen;
4546
4547 if (plen)
4548 memcpy(skb_put(skb, plen), param, plen);
4549
4550 BT_DBG("skb len %d", skb->len);
4551
0d48d939 4552 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
43e73e4e 4553 bt_cb(skb)->opcode = opcode;
c78ae283 4554
1ca3a9d0
JH
4555 return skb;
4556}
4557
4558/* Send HCI command */
07dc93dd
JH
4559int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
4560 const void *param)
1ca3a9d0
JH
4561{
4562 struct sk_buff *skb;
4563
4564 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4565
4566 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4567 if (!skb) {
4568 BT_ERR("%s no memory for command", hdev->name);
4569 return -ENOMEM;
4570 }
4571
49c922bb 4572 /* Stand-alone HCI commands must be flagged as
11714b3d
JH
4573 * single-command requests.
4574 */
4575 bt_cb(skb)->req.start = true;
4576
1da177e4 4577 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 4578 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4579
4580 return 0;
4581}
1da177e4 4582
71c76a17 4583/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
4584void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
4585 const void *param, u8 event)
71c76a17
JH
4586{
4587 struct hci_dev *hdev = req->hdev;
4588 struct sk_buff *skb;
4589
4590 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
4591
49c922bb 4592 /* If an error occurred during request building, there is no point in
34739c1e
AG
4593 * queueing the HCI command. We can simply return.
4594 */
4595 if (req->err)
4596 return;
4597
71c76a17
JH
4598 skb = hci_prepare_cmd(hdev, opcode, plen, param);
4599 if (!skb) {
5d73e034
AG
4600 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
4601 hdev->name, opcode);
4602 req->err = -ENOMEM;
e348fe6b 4603 return;
71c76a17
JH
4604 }
4605
4606 if (skb_queue_empty(&req->cmd_q))
4607 bt_cb(skb)->req.start = true;
4608
02350a72
JH
4609 bt_cb(skb)->req.event = event;
4610
71c76a17 4611 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
4612}
4613
07dc93dd
JH
4614void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
4615 const void *param)
02350a72
JH
4616{
4617 hci_req_add_ev(req, opcode, plen, param, 0);
4618}
4619
1da177e4 4620/* Get data from the previously sent command */
a9de9248 4621void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
4622{
4623 struct hci_command_hdr *hdr;
4624
4625 if (!hdev->sent_cmd)
4626 return NULL;
4627
4628 hdr = (void *) hdev->sent_cmd->data;
4629
a9de9248 4630 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
4631 return NULL;
4632
f0e09510 4633 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
4634
4635 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
4636}
4637
4638/* Send ACL data */
4639static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
4640{
4641 struct hci_acl_hdr *hdr;
4642 int len = skb->len;
4643
badff6d0
ACM
4644 skb_push(skb, HCI_ACL_HDR_SIZE);
4645 skb_reset_transport_header(skb);
9c70220b 4646 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
4647 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
4648 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
4649}
4650
ee22be7e 4651static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 4652 struct sk_buff *skb, __u16 flags)
1da177e4 4653{
ee22be7e 4654 struct hci_conn *conn = chan->conn;
1da177e4
LT
4655 struct hci_dev *hdev = conn->hdev;
4656 struct sk_buff *list;
4657
087bfd99
GP
4658 skb->len = skb_headlen(skb);
4659 skb->data_len = 0;
4660
4661 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
4662
4663 switch (hdev->dev_type) {
4664 case HCI_BREDR:
4665 hci_add_acl_hdr(skb, conn->handle, flags);
4666 break;
4667 case HCI_AMP:
4668 hci_add_acl_hdr(skb, chan->handle, flags);
4669 break;
4670 default:
4671 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
4672 return;
4673 }
087bfd99 4674
70f23020
AE
4675 list = skb_shinfo(skb)->frag_list;
4676 if (!list) {
1da177e4
LT
4677 /* Non fragmented */
4678 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
4679
73d80deb 4680 skb_queue_tail(queue, skb);
1da177e4
LT
4681 } else {
4682 /* Fragmented */
4683 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4684
4685 skb_shinfo(skb)->frag_list = NULL;
4686
9cfd5a23
JR
4687 /* Queue all fragments atomically. We need to use spin_lock_bh
4688 * here because of 6LoWPAN links, as there this function is
4689 * called from softirq and using normal spin lock could cause
4690 * deadlocks.
4691 */
4692 spin_lock_bh(&queue->lock);
1da177e4 4693
73d80deb 4694 __skb_queue_tail(queue, skb);
e702112f
AE
4695
4696 flags &= ~ACL_START;
4697 flags |= ACL_CONT;
1da177e4
LT
4698 do {
4699 skb = list; list = list->next;
8e87d142 4700
0d48d939 4701 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 4702 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
4703
4704 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
4705
73d80deb 4706 __skb_queue_tail(queue, skb);
1da177e4
LT
4707 } while (list);
4708
9cfd5a23 4709 spin_unlock_bh(&queue->lock);
1da177e4 4710 }
73d80deb
LAD
4711}
4712
4713void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
4714{
ee22be7e 4715 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 4716
f0e09510 4717 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 4718
ee22be7e 4719 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 4720
3eff45ea 4721 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4722}
1da177e4
LT
4723
4724/* Send SCO data */
0d861d8b 4725void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
4726{
4727 struct hci_dev *hdev = conn->hdev;
4728 struct hci_sco_hdr hdr;
4729
4730 BT_DBG("%s len %d", hdev->name, skb->len);
4731
aca3192c 4732 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
4733 hdr.dlen = skb->len;
4734
badff6d0
ACM
4735 skb_push(skb, HCI_SCO_HDR_SIZE);
4736 skb_reset_transport_header(skb);
9c70220b 4737 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 4738
0d48d939 4739 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 4740
1da177e4 4741 skb_queue_tail(&conn->data_q, skb);
3eff45ea 4742 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 4743}
1da177e4
LT
4744
4745/* ---- HCI TX task (outgoing data) ---- */
4746
4747/* HCI Connection scheduler */
6039aa73
GP
4748static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
4749 int *quote)
1da177e4
LT
4750{
4751 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4752 struct hci_conn *conn = NULL, *c;
abc5de8f 4753 unsigned int num = 0, min = ~0;
1da177e4 4754
8e87d142 4755 /* We don't have to lock device here. Connections are always
1da177e4 4756 * added and removed with TX task disabled. */
bf4c6325
GP
4757
4758 rcu_read_lock();
4759
4760 list_for_each_entry_rcu(c, &h->list, list) {
769be974 4761 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 4762 continue;
769be974
MH
4763
4764 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
4765 continue;
4766
1da177e4
LT
4767 num++;
4768
4769 if (c->sent < min) {
4770 min = c->sent;
4771 conn = c;
4772 }
52087a79
LAD
4773
4774 if (hci_conn_num(hdev, type) == num)
4775 break;
1da177e4
LT
4776 }
4777
bf4c6325
GP
4778 rcu_read_unlock();
4779
1da177e4 4780 if (conn) {
6ed58ec5
VT
4781 int cnt, q;
4782
4783 switch (conn->type) {
4784 case ACL_LINK:
4785 cnt = hdev->acl_cnt;
4786 break;
4787 case SCO_LINK:
4788 case ESCO_LINK:
4789 cnt = hdev->sco_cnt;
4790 break;
4791 case LE_LINK:
4792 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4793 break;
4794 default:
4795 cnt = 0;
4796 BT_ERR("Unknown link type");
4797 }
4798
4799 q = cnt / num;
1da177e4
LT
4800 *quote = q ? q : 1;
4801 } else
4802 *quote = 0;
4803
4804 BT_DBG("conn %p quote %d", conn, *quote);
4805 return conn;
4806}
4807
6039aa73 4808static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4809{
4810 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4811 struct hci_conn *c;
1da177e4 4812
bae1f5d9 4813 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4814
bf4c6325
GP
4815 rcu_read_lock();
4816
1da177e4 4817 /* Kill stalled connections */
bf4c6325 4818 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4819 if (c->type == type && c->sent) {
6ed93dc6
AE
4820 BT_ERR("%s killing stalled connection %pMR",
4821 hdev->name, &c->dst);
bed71748 4822 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4823 }
4824 }
bf4c6325
GP
4825
4826 rcu_read_unlock();
1da177e4
LT
4827}
4828
6039aa73
GP
4829static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4830 int *quote)
1da177e4 4831{
73d80deb
LAD
4832 struct hci_conn_hash *h = &hdev->conn_hash;
4833 struct hci_chan *chan = NULL;
abc5de8f 4834 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4835 struct hci_conn *conn;
73d80deb
LAD
4836 int cnt, q, conn_num = 0;
4837
4838 BT_DBG("%s", hdev->name);
4839
bf4c6325
GP
4840 rcu_read_lock();
4841
4842 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4843 struct hci_chan *tmp;
4844
4845 if (conn->type != type)
4846 continue;
4847
4848 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4849 continue;
4850
4851 conn_num++;
4852
8192edef 4853 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4854 struct sk_buff *skb;
4855
4856 if (skb_queue_empty(&tmp->data_q))
4857 continue;
4858
4859 skb = skb_peek(&tmp->data_q);
4860 if (skb->priority < cur_prio)
4861 continue;
4862
4863 if (skb->priority > cur_prio) {
4864 num = 0;
4865 min = ~0;
4866 cur_prio = skb->priority;
4867 }
4868
4869 num++;
4870
4871 if (conn->sent < min) {
4872 min = conn->sent;
4873 chan = tmp;
4874 }
4875 }
4876
4877 if (hci_conn_num(hdev, type) == conn_num)
4878 break;
4879 }
4880
bf4c6325
GP
4881 rcu_read_unlock();
4882
73d80deb
LAD
4883 if (!chan)
4884 return NULL;
4885
4886 switch (chan->conn->type) {
4887 case ACL_LINK:
4888 cnt = hdev->acl_cnt;
4889 break;
bd1eb66b
AE
4890 case AMP_LINK:
4891 cnt = hdev->block_cnt;
4892 break;
73d80deb
LAD
4893 case SCO_LINK:
4894 case ESCO_LINK:
4895 cnt = hdev->sco_cnt;
4896 break;
4897 case LE_LINK:
4898 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4899 break;
4900 default:
4901 cnt = 0;
4902 BT_ERR("Unknown link type");
4903 }
4904
4905 q = cnt / num;
4906 *quote = q ? q : 1;
4907 BT_DBG("chan %p quote %d", chan, *quote);
4908 return chan;
4909}
4910
02b20f0b
LAD
4911static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4912{
4913 struct hci_conn_hash *h = &hdev->conn_hash;
4914 struct hci_conn *conn;
4915 int num = 0;
4916
4917 BT_DBG("%s", hdev->name);
4918
bf4c6325
GP
4919 rcu_read_lock();
4920
4921 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4922 struct hci_chan *chan;
4923
4924 if (conn->type != type)
4925 continue;
4926
4927 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4928 continue;
4929
4930 num++;
4931
8192edef 4932 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4933 struct sk_buff *skb;
4934
4935 if (chan->sent) {
4936 chan->sent = 0;
4937 continue;
4938 }
4939
4940 if (skb_queue_empty(&chan->data_q))
4941 continue;
4942
4943 skb = skb_peek(&chan->data_q);
4944 if (skb->priority >= HCI_PRIO_MAX - 1)
4945 continue;
4946
4947 skb->priority = HCI_PRIO_MAX - 1;
4948
4949 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4950 skb->priority);
02b20f0b
LAD
4951 }
4952
4953 if (hci_conn_num(hdev, type) == num)
4954 break;
4955 }
bf4c6325
GP
4956
4957 rcu_read_unlock();
4958
02b20f0b
LAD
4959}
4960
b71d385a
AE
4961static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4962{
4963 /* Calculate count of blocks used by this packet */
4964 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4965}
4966
6039aa73 4967static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4968{
4a964404 4969 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
1da177e4
LT
4970 /* ACL tx timeout must be longer than maximum
4971 * link supervision timeout (40.9 seconds) */
63d2bc1b 4972 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4973 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4974 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4975 }
63d2bc1b 4976}
1da177e4 4977
6039aa73 4978static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4979{
4980 unsigned int cnt = hdev->acl_cnt;
4981 struct hci_chan *chan;
4982 struct sk_buff *skb;
4983 int quote;
4984
4985 __check_timeout(hdev, cnt);
04837f64 4986
73d80deb 4987 while (hdev->acl_cnt &&
a8c5fb1a 4988 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4989 u32 priority = (skb_peek(&chan->data_q))->priority;
4990 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4991 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4992 skb->len, skb->priority);
73d80deb 4993
ec1cce24
LAD
4994 /* Stop if priority has changed */
4995 if (skb->priority < priority)
4996 break;
4997
4998 skb = skb_dequeue(&chan->data_q);
4999
73d80deb 5000 hci_conn_enter_active_mode(chan->conn,
04124681 5001 bt_cb(skb)->force_active);
04837f64 5002
57d17d70 5003 hci_send_frame(hdev, skb);
1da177e4
LT
5004 hdev->acl_last_tx = jiffies;
5005
5006 hdev->acl_cnt--;
73d80deb
LAD
5007 chan->sent++;
5008 chan->conn->sent++;
1da177e4
LT
5009 }
5010 }
02b20f0b
LAD
5011
5012 if (cnt != hdev->acl_cnt)
5013 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
5014}
5015
6039aa73 5016static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 5017{
63d2bc1b 5018 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
5019 struct hci_chan *chan;
5020 struct sk_buff *skb;
5021 int quote;
bd1eb66b 5022 u8 type;
b71d385a 5023
63d2bc1b 5024 __check_timeout(hdev, cnt);
b71d385a 5025
bd1eb66b
AE
5026 BT_DBG("%s", hdev->name);
5027
5028 if (hdev->dev_type == HCI_AMP)
5029 type = AMP_LINK;
5030 else
5031 type = ACL_LINK;
5032
b71d385a 5033 while (hdev->block_cnt > 0 &&
bd1eb66b 5034 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
5035 u32 priority = (skb_peek(&chan->data_q))->priority;
5036 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
5037 int blocks;
5038
5039 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5040 skb->len, skb->priority);
b71d385a
AE
5041
5042 /* Stop if priority has changed */
5043 if (skb->priority < priority)
5044 break;
5045
5046 skb = skb_dequeue(&chan->data_q);
5047
5048 blocks = __get_blocks(hdev, skb);
5049 if (blocks > hdev->block_cnt)
5050 return;
5051
5052 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 5053 bt_cb(skb)->force_active);
b71d385a 5054
57d17d70 5055 hci_send_frame(hdev, skb);
b71d385a
AE
5056 hdev->acl_last_tx = jiffies;
5057
5058 hdev->block_cnt -= blocks;
5059 quote -= blocks;
5060
5061 chan->sent += blocks;
5062 chan->conn->sent += blocks;
5063 }
5064 }
5065
5066 if (cnt != hdev->block_cnt)
bd1eb66b 5067 hci_prio_recalculate(hdev, type);
b71d385a
AE
5068}
5069
6039aa73 5070static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
5071{
5072 BT_DBG("%s", hdev->name);
5073
bd1eb66b
AE
5074 /* No ACL link over BR/EDR controller */
5075 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
5076 return;
5077
5078 /* No AMP link over AMP controller */
5079 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
5080 return;
5081
5082 switch (hdev->flow_ctl_mode) {
5083 case HCI_FLOW_CTL_MODE_PACKET_BASED:
5084 hci_sched_acl_pkt(hdev);
5085 break;
5086
5087 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
5088 hci_sched_acl_blk(hdev);
5089 break;
5090 }
5091}
5092
1da177e4 5093/* Schedule SCO */
6039aa73 5094static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
5095{
5096 struct hci_conn *conn;
5097 struct sk_buff *skb;
5098 int quote;
5099
5100 BT_DBG("%s", hdev->name);
5101
52087a79
LAD
5102 if (!hci_conn_num(hdev, SCO_LINK))
5103 return;
5104
1da177e4
LT
5105 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
5106 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5107 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5108 hci_send_frame(hdev, skb);
1da177e4
LT
5109
5110 conn->sent++;
5111 if (conn->sent == ~0)
5112 conn->sent = 0;
5113 }
5114 }
5115}
5116
6039aa73 5117static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
5118{
5119 struct hci_conn *conn;
5120 struct sk_buff *skb;
5121 int quote;
5122
5123 BT_DBG("%s", hdev->name);
5124
52087a79
LAD
5125 if (!hci_conn_num(hdev, ESCO_LINK))
5126 return;
5127
8fc9ced3
GP
5128 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
5129 &quote))) {
b6a0dc82
MH
5130 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
5131 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 5132 hci_send_frame(hdev, skb);
b6a0dc82
MH
5133
5134 conn->sent++;
5135 if (conn->sent == ~0)
5136 conn->sent = 0;
5137 }
5138 }
5139}
5140
6039aa73 5141static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 5142{
73d80deb 5143 struct hci_chan *chan;
6ed58ec5 5144 struct sk_buff *skb;
02b20f0b 5145 int quote, cnt, tmp;
6ed58ec5
VT
5146
5147 BT_DBG("%s", hdev->name);
5148
52087a79
LAD
5149 if (!hci_conn_num(hdev, LE_LINK))
5150 return;
5151
4a964404 5152 if (!test_bit(HCI_UNCONFIGURED, &hdev->dev_flags)) {
6ed58ec5
VT
5153 /* LE tx timeout must be longer than maximum
5154 * link supervision timeout (40.9 seconds) */
bae1f5d9 5155 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 5156 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 5157 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
5158 }
5159
5160 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 5161 tmp = cnt;
73d80deb 5162 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
5163 u32 priority = (skb_peek(&chan->data_q))->priority;
5164 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 5165 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 5166 skb->len, skb->priority);
6ed58ec5 5167
ec1cce24
LAD
5168 /* Stop if priority has changed */
5169 if (skb->priority < priority)
5170 break;
5171
5172 skb = skb_dequeue(&chan->data_q);
5173
57d17d70 5174 hci_send_frame(hdev, skb);
6ed58ec5
VT
5175 hdev->le_last_tx = jiffies;
5176
5177 cnt--;
73d80deb
LAD
5178 chan->sent++;
5179 chan->conn->sent++;
6ed58ec5
VT
5180 }
5181 }
73d80deb 5182
6ed58ec5
VT
5183 if (hdev->le_pkts)
5184 hdev->le_cnt = cnt;
5185 else
5186 hdev->acl_cnt = cnt;
02b20f0b
LAD
5187
5188 if (cnt != tmp)
5189 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
5190}
5191
3eff45ea 5192static void hci_tx_work(struct work_struct *work)
1da177e4 5193{
3eff45ea 5194 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
5195 struct sk_buff *skb;
5196
6ed58ec5 5197 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 5198 hdev->sco_cnt, hdev->le_cnt);
1da177e4 5199
52de599e
MH
5200 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
5201 /* Schedule queues and send stuff to HCI driver */
5202 hci_sched_acl(hdev);
5203 hci_sched_sco(hdev);
5204 hci_sched_esco(hdev);
5205 hci_sched_le(hdev);
5206 }
6ed58ec5 5207
1da177e4
LT
5208 /* Send next queued raw (unknown type) packet */
5209 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 5210 hci_send_frame(hdev, skb);
1da177e4
LT
5211}
5212
25985edc 5213/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
5214
5215/* ACL data packet */
6039aa73 5216static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5217{
5218 struct hci_acl_hdr *hdr = (void *) skb->data;
5219 struct hci_conn *conn;
5220 __u16 handle, flags;
5221
5222 skb_pull(skb, HCI_ACL_HDR_SIZE);
5223
5224 handle = __le16_to_cpu(hdr->handle);
5225 flags = hci_flags(handle);
5226 handle = hci_handle(handle);
5227
f0e09510 5228 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 5229 handle, flags);
1da177e4
LT
5230
5231 hdev->stat.acl_rx++;
5232
5233 hci_dev_lock(hdev);
5234 conn = hci_conn_hash_lookup_handle(hdev, handle);
5235 hci_dev_unlock(hdev);
8e87d142 5236
1da177e4 5237 if (conn) {
65983fc7 5238 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 5239
1da177e4 5240 /* Send to upper protocol */
686ebf28
UF
5241 l2cap_recv_acldata(conn, skb, flags);
5242 return;
1da177e4 5243 } else {
8e87d142 5244 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 5245 hdev->name, handle);
1da177e4
LT
5246 }
5247
5248 kfree_skb(skb);
5249}
5250
5251/* SCO data packet */
6039aa73 5252static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
5253{
5254 struct hci_sco_hdr *hdr = (void *) skb->data;
5255 struct hci_conn *conn;
5256 __u16 handle;
5257
5258 skb_pull(skb, HCI_SCO_HDR_SIZE);
5259
5260 handle = __le16_to_cpu(hdr->handle);
5261
f0e09510 5262 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
5263
5264 hdev->stat.sco_rx++;
5265
5266 hci_dev_lock(hdev);
5267 conn = hci_conn_hash_lookup_handle(hdev, handle);
5268 hci_dev_unlock(hdev);
5269
5270 if (conn) {
1da177e4 5271 /* Send to upper protocol */
686ebf28
UF
5272 sco_recv_scodata(conn, skb);
5273 return;
1da177e4 5274 } else {
8e87d142 5275 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 5276 hdev->name, handle);
1da177e4
LT
5277 }
5278
5279 kfree_skb(skb);
5280}
5281
9238f36a
JH
5282static bool hci_req_is_complete(struct hci_dev *hdev)
5283{
5284 struct sk_buff *skb;
5285
5286 skb = skb_peek(&hdev->cmd_q);
5287 if (!skb)
5288 return true;
5289
5290 return bt_cb(skb)->req.start;
5291}
5292
42c6b129
JH
5293static void hci_resend_last(struct hci_dev *hdev)
5294{
5295 struct hci_command_hdr *sent;
5296 struct sk_buff *skb;
5297 u16 opcode;
5298
5299 if (!hdev->sent_cmd)
5300 return;
5301
5302 sent = (void *) hdev->sent_cmd->data;
5303 opcode = __le16_to_cpu(sent->opcode);
5304 if (opcode == HCI_OP_RESET)
5305 return;
5306
5307 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
5308 if (!skb)
5309 return;
5310
5311 skb_queue_head(&hdev->cmd_q, skb);
5312 queue_work(hdev->workqueue, &hdev->cmd_work);
5313}
5314
9238f36a
JH
5315void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
5316{
5317 hci_req_complete_t req_complete = NULL;
5318 struct sk_buff *skb;
5319 unsigned long flags;
5320
5321 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
5322
42c6b129
JH
5323 /* If the completed command doesn't match the last one that was
5324 * sent we need to do special handling of it.
9238f36a 5325 */
42c6b129
JH
5326 if (!hci_sent_cmd_data(hdev, opcode)) {
5327 /* Some CSR based controllers generate a spontaneous
5328 * reset complete event during init and any pending
5329 * command will never be completed. In such a case we
5330 * need to resend whatever was the last sent
5331 * command.
5332 */
5333 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
5334 hci_resend_last(hdev);
5335
9238f36a 5336 return;
42c6b129 5337 }
9238f36a
JH
5338
5339 /* If the command succeeded and there's still more commands in
5340 * this request the request is not yet complete.
5341 */
5342 if (!status && !hci_req_is_complete(hdev))
5343 return;
5344
5345 /* If this was the last command in a request the complete
5346 * callback would be found in hdev->sent_cmd instead of the
5347 * command queue (hdev->cmd_q).
5348 */
5349 if (hdev->sent_cmd) {
5350 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
5351
5352 if (req_complete) {
5353 /* We must set the complete callback to NULL to
5354 * avoid calling the callback more than once if
5355 * this function gets called again.
5356 */
5357 bt_cb(hdev->sent_cmd)->req.complete = NULL;
5358
9238f36a 5359 goto call_complete;
53e21fbc 5360 }
9238f36a
JH
5361 }
5362
5363 /* Remove all pending commands belonging to this request */
5364 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
5365 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
5366 if (bt_cb(skb)->req.start) {
5367 __skb_queue_head(&hdev->cmd_q, skb);
5368 break;
5369 }
5370
5371 req_complete = bt_cb(skb)->req.complete;
5372 kfree_skb(skb);
5373 }
5374 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
5375
5376call_complete:
5377 if (req_complete)
5378 req_complete(hdev, status);
5379}
5380
b78752cc 5381static void hci_rx_work(struct work_struct *work)
1da177e4 5382{
b78752cc 5383 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
5384 struct sk_buff *skb;
5385
5386 BT_DBG("%s", hdev->name);
5387
1da177e4 5388 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
5389 /* Send copy to monitor */
5390 hci_send_to_monitor(hdev, skb);
5391
1da177e4
LT
5392 if (atomic_read(&hdev->promisc)) {
5393 /* Send copy to the sockets */
470fe1b5 5394 hci_send_to_sock(hdev, skb);
1da177e4
LT
5395 }
5396
fee746b0 5397 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
5398 kfree_skb(skb);
5399 continue;
5400 }
5401
5402 if (test_bit(HCI_INIT, &hdev->flags)) {
5403 /* Don't process data packets in this states. */
0d48d939 5404 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
5405 case HCI_ACLDATA_PKT:
5406 case HCI_SCODATA_PKT:
5407 kfree_skb(skb);
5408 continue;
3ff50b79 5409 }
1da177e4
LT
5410 }
5411
5412 /* Process frame */
0d48d939 5413 switch (bt_cb(skb)->pkt_type) {
1da177e4 5414 case HCI_EVENT_PKT:
b78752cc 5415 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
5416 hci_event_packet(hdev, skb);
5417 break;
5418
5419 case HCI_ACLDATA_PKT:
5420 BT_DBG("%s ACL data packet", hdev->name);
5421 hci_acldata_packet(hdev, skb);
5422 break;
5423
5424 case HCI_SCODATA_PKT:
5425 BT_DBG("%s SCO data packet", hdev->name);
5426 hci_scodata_packet(hdev, skb);
5427 break;
5428
5429 default:
5430 kfree_skb(skb);
5431 break;
5432 }
5433 }
1da177e4
LT
5434}
5435
c347b765 5436static void hci_cmd_work(struct work_struct *work)
1da177e4 5437{
c347b765 5438 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
5439 struct sk_buff *skb;
5440
2104786b
AE
5441 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
5442 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 5443
1da177e4 5444 /* Send queued commands */
5a08ecce
AE
5445 if (atomic_read(&hdev->cmd_cnt)) {
5446 skb = skb_dequeue(&hdev->cmd_q);
5447 if (!skb)
5448 return;
5449
7585b97a 5450 kfree_skb(hdev->sent_cmd);
1da177e4 5451
a675d7f1 5452 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 5453 if (hdev->sent_cmd) {
1da177e4 5454 atomic_dec(&hdev->cmd_cnt);
57d17d70 5455 hci_send_frame(hdev, skb);
7bdb8a5c 5456 if (test_bit(HCI_RESET, &hdev->flags))
65cc2b49 5457 cancel_delayed_work(&hdev->cmd_timer);
7bdb8a5c 5458 else
65cc2b49
MH
5459 schedule_delayed_work(&hdev->cmd_timer,
5460 HCI_CMD_TIMEOUT);
1da177e4
LT
5461 } else {
5462 skb_queue_head(&hdev->cmd_q, skb);
c347b765 5463 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
5464 }
5465 }
5466}
b1efcc28
AG
5467
5468void hci_req_add_le_scan_disable(struct hci_request *req)
5469{
5470 struct hci_cp_le_set_scan_enable cp;
5471
5472 memset(&cp, 0, sizeof(cp));
5473 cp.enable = LE_SCAN_DISABLE;
5474 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
5475}
a4790dbd 5476
8540f6c0
MH
5477static void add_to_white_list(struct hci_request *req,
5478 struct hci_conn_params *params)
5479{
5480 struct hci_cp_le_add_to_white_list cp;
5481
5482 cp.bdaddr_type = params->addr_type;
5483 bacpy(&cp.bdaddr, &params->addr);
5484
5485 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
5486}
5487
5488static u8 update_white_list(struct hci_request *req)
5489{
5490 struct hci_dev *hdev = req->hdev;
5491 struct hci_conn_params *params;
5492 struct bdaddr_list *b;
5493 uint8_t white_list_entries = 0;
5494
5495 /* Go through the current white list programmed into the
5496 * controller one by one and check if that address is still
5497 * in the list of pending connections or list of devices to
5498 * report. If not present in either list, then queue the
5499 * command to remove it from the controller.
5500 */
5501 list_for_each_entry(b, &hdev->le_white_list, list) {
5502 struct hci_cp_le_del_from_white_list cp;
5503
5504 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
5505 &b->bdaddr, b->bdaddr_type) ||
5506 hci_pend_le_action_lookup(&hdev->pend_le_reports,
5507 &b->bdaddr, b->bdaddr_type)) {
5508 white_list_entries++;
5509 continue;
5510 }
5511
5512 cp.bdaddr_type = b->bdaddr_type;
5513 bacpy(&cp.bdaddr, &b->bdaddr);
5514
5515 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
5516 sizeof(cp), &cp);
5517 }
5518
5519 /* Since all no longer valid white list entries have been
5520 * removed, walk through the list of pending connections
5521 * and ensure that any new device gets programmed into
5522 * the controller.
5523 *
5524 * If the list of the devices is larger than the list of
5525 * available white list entries in the controller, then
5526 * just abort and return filer policy value to not use the
5527 * white list.
5528 */
5529 list_for_each_entry(params, &hdev->pend_le_conns, action) {
5530 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5531 &params->addr, params->addr_type))
5532 continue;
5533
5534 if (white_list_entries >= hdev->le_white_list_size) {
5535 /* Select filter policy to accept all advertising */
5536 return 0x00;
5537 }
5538
66d8e837
MH
5539 if (hci_find_irk_by_addr(hdev, &params->addr,
5540 params->addr_type)) {
5541 /* White list can not be used with RPAs */
5542 return 0x00;
5543 }
5544
8540f6c0
MH
5545 white_list_entries++;
5546 add_to_white_list(req, params);
5547 }
5548
5549 /* After adding all new pending connections, walk through
5550 * the list of pending reports and also add these to the
5551 * white list if there is still space.
5552 */
5553 list_for_each_entry(params, &hdev->pend_le_reports, action) {
5554 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
5555 &params->addr, params->addr_type))
5556 continue;
5557
5558 if (white_list_entries >= hdev->le_white_list_size) {
5559 /* Select filter policy to accept all advertising */
5560 return 0x00;
5561 }
5562
66d8e837
MH
5563 if (hci_find_irk_by_addr(hdev, &params->addr,
5564 params->addr_type)) {
5565 /* White list can not be used with RPAs */
5566 return 0x00;
5567 }
5568
8540f6c0
MH
5569 white_list_entries++;
5570 add_to_white_list(req, params);
5571 }
5572
5573 /* Select filter policy to use white list */
5574 return 0x01;
5575}
5576
8ef30fd3
AG
5577void hci_req_add_le_passive_scan(struct hci_request *req)
5578{
5579 struct hci_cp_le_set_scan_param param_cp;
5580 struct hci_cp_le_set_scan_enable enable_cp;
5581 struct hci_dev *hdev = req->hdev;
5582 u8 own_addr_type;
8540f6c0 5583 u8 filter_policy;
8ef30fd3 5584
6ab535a7
MH
5585 /* Set require_privacy to false since no SCAN_REQ are send
5586 * during passive scanning. Not using an unresolvable address
5587 * here is important so that peer devices using direct
5588 * advertising with our address will be correctly reported
5589 * by the controller.
8ef30fd3 5590 */
6ab535a7 5591 if (hci_update_random_address(req, false, &own_addr_type))
8ef30fd3
AG
5592 return;
5593
8540f6c0
MH
5594 /* Adding or removing entries from the white list must
5595 * happen before enabling scanning. The controller does
5596 * not allow white list modification while scanning.
5597 */
5598 filter_policy = update_white_list(req);
5599
8ef30fd3
AG
5600 memset(&param_cp, 0, sizeof(param_cp));
5601 param_cp.type = LE_SCAN_PASSIVE;
5602 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
5603 param_cp.window = cpu_to_le16(hdev->le_scan_window);
5604 param_cp.own_address_type = own_addr_type;
8540f6c0 5605 param_cp.filter_policy = filter_policy;
8ef30fd3
AG
5606 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
5607 &param_cp);
5608
5609 memset(&enable_cp, 0, sizeof(enable_cp));
5610 enable_cp.enable = LE_SCAN_ENABLE;
4340a124 5611 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
8ef30fd3
AG
5612 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
5613 &enable_cp);
5614}
5615
a4790dbd
AG
5616static void update_background_scan_complete(struct hci_dev *hdev, u8 status)
5617{
5618 if (status)
5619 BT_DBG("HCI request failed to update background scanning: "
5620 "status 0x%2.2x", status);
5621}
5622
5623/* This function controls the background scanning based on hdev->pend_le_conns
5624 * list. If there are pending LE connection we start the background scanning,
5625 * otherwise we stop it.
5626 *
5627 * This function requires the caller holds hdev->lock.
5628 */
5629void hci_update_background_scan(struct hci_dev *hdev)
5630{
a4790dbd
AG
5631 struct hci_request req;
5632 struct hci_conn *conn;
5633 int err;
5634
c20c02d5
MH
5635 if (!test_bit(HCI_UP, &hdev->flags) ||
5636 test_bit(HCI_INIT, &hdev->flags) ||
5637 test_bit(HCI_SETUP, &hdev->dev_flags) ||
d603b76b 5638 test_bit(HCI_CONFIG, &hdev->dev_flags) ||
b8221770 5639 test_bit(HCI_AUTO_OFF, &hdev->dev_flags) ||
c20c02d5 5640 test_bit(HCI_UNREGISTER, &hdev->dev_flags))
1c1697c0
MH
5641 return;
5642
a70f4b5f
JH
5643 /* No point in doing scanning if LE support hasn't been enabled */
5644 if (!test_bit(HCI_LE_ENABLED, &hdev->dev_flags))
5645 return;
5646
ae23ada4
JH
5647 /* If discovery is active don't interfere with it */
5648 if (hdev->discovery.state != DISCOVERY_STOPPED)
5649 return;
5650
a4790dbd
AG
5651 hci_req_init(&req, hdev);
5652
d1d588c1 5653 if (list_empty(&hdev->pend_le_conns) &&
66f8455a 5654 list_empty(&hdev->pend_le_reports)) {
0d2bf134
JH
5655 /* If there is no pending LE connections or devices
5656 * to be scanned for, we should stop the background
5657 * scanning.
a4790dbd
AG
5658 */
5659
5660 /* If controller is not scanning we are done. */
5661 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5662 return;
5663
5664 hci_req_add_le_scan_disable(&req);
5665
5666 BT_DBG("%s stopping background scanning", hdev->name);
5667 } else {
a4790dbd
AG
5668 /* If there is at least one pending LE connection, we should
5669 * keep the background scan running.
5670 */
5671
a4790dbd
AG
5672 /* If controller is connecting, we should not start scanning
5673 * since some controllers are not able to scan and connect at
5674 * the same time.
5675 */
5676 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
5677 if (conn)
5678 return;
5679
4340a124
AG
5680 /* If controller is currently scanning, we stop it to ensure we
5681 * don't miss any advertising (due to duplicates filter).
5682 */
5683 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
5684 hci_req_add_le_scan_disable(&req);
5685
8ef30fd3 5686 hci_req_add_le_passive_scan(&req);
a4790dbd
AG
5687
5688 BT_DBG("%s starting background scanning", hdev->name);
5689 }
5690
5691 err = hci_req_run(&req, update_background_scan_complete);
5692 if (err)
5693 BT_ERR("Failed to run HCI request: err %d", err);
5694}
432df05e 5695
22f433dc
JH
5696static bool disconnected_whitelist_entries(struct hci_dev *hdev)
5697{
5698 struct bdaddr_list *b;
5699
5700 list_for_each_entry(b, &hdev->whitelist, list) {
5701 struct hci_conn *conn;
5702
5703 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
5704 if (!conn)
5705 return true;
5706
5707 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
5708 return true;
5709 }
5710
5711 return false;
5712}
5713
432df05e
JH
5714void hci_update_page_scan(struct hci_dev *hdev, struct hci_request *req)
5715{
5716 u8 scan;
5717
5718 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
5719 return;
5720
5721 if (!hdev_is_powered(hdev))
5722 return;
5723
5724 if (mgmt_powering_down(hdev))
5725 return;
5726
5727 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) ||
22f433dc 5728 disconnected_whitelist_entries(hdev))
432df05e
JH
5729 scan = SCAN_PAGE;
5730 else
5731 scan = SCAN_DISABLED;
5732
5733 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
5734 return;
5735
5736 if (test_bit(HCI_DISCOVERABLE, &hdev->dev_flags))
5737 scan |= SCAN_INQUIRY;
5738
5739 if (req)
5740 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5741 else
5742 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
5743}
This page took 1.172655 seconds and 5 git commands to generate.