Bluetooth: Replace own_address_type with force_static_address debugfs
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
970c4e46
JH
38#include "smp.h"
39
b78752cc 40static void hci_rx_work(struct work_struct *work);
c347b765 41static void hci_cmd_work(struct work_struct *work);
3eff45ea 42static void hci_tx_work(struct work_struct *work);
1da177e4 43
1da177e4
LT
44/* HCI device list */
45LIST_HEAD(hci_dev_list);
46DEFINE_RWLOCK(hci_dev_list_lock);
47
48/* HCI callback list */
49LIST_HEAD(hci_cb_list);
50DEFINE_RWLOCK(hci_cb_list_lock);
51
3df92b31
SL
52/* HCI ID Numbering */
53static DEFINE_IDA(hci_index_ida);
54
1da177e4
LT
55/* ---- HCI notifications ---- */
56
6516455d 57static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 58{
040030ef 59 hci_sock_dev_event(hdev, event);
1da177e4
LT
60}
61
baf27f6e
MH
62/* ---- HCI debugfs entries ---- */
63
4b4148e9
MH
64static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
65 size_t count, loff_t *ppos)
66{
67 struct hci_dev *hdev = file->private_data;
68 char buf[3];
69
70 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
71 buf[1] = '\n';
72 buf[2] = '\0';
73 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
74}
75
76static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
77 size_t count, loff_t *ppos)
78{
79 struct hci_dev *hdev = file->private_data;
80 struct sk_buff *skb;
81 char buf[32];
82 size_t buf_size = min(count, (sizeof(buf)-1));
83 bool enable;
84 int err;
85
86 if (!test_bit(HCI_UP, &hdev->flags))
87 return -ENETDOWN;
88
89 if (copy_from_user(buf, user_buf, buf_size))
90 return -EFAULT;
91
92 buf[buf_size] = '\0';
93 if (strtobool(buf, &enable))
94 return -EINVAL;
95
96 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
97 return -EALREADY;
98
99 hci_req_lock(hdev);
100 if (enable)
101 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 else
104 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
105 HCI_CMD_TIMEOUT);
106 hci_req_unlock(hdev);
107
108 if (IS_ERR(skb))
109 return PTR_ERR(skb);
110
111 err = -bt_to_errno(skb->data[0]);
112 kfree_skb(skb);
113
114 if (err < 0)
115 return err;
116
117 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
118
119 return count;
120}
121
122static const struct file_operations dut_mode_fops = {
123 .open = simple_open,
124 .read = dut_mode_read,
125 .write = dut_mode_write,
126 .llseek = default_llseek,
127};
128
dfb826a8
MH
129static int features_show(struct seq_file *f, void *ptr)
130{
131 struct hci_dev *hdev = f->private;
132 u8 p;
133
134 hci_dev_lock(hdev);
135 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 136 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
137 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
138 hdev->features[p][0], hdev->features[p][1],
139 hdev->features[p][2], hdev->features[p][3],
140 hdev->features[p][4], hdev->features[p][5],
141 hdev->features[p][6], hdev->features[p][7]);
142 }
cfbb2b5b
MH
143 if (lmp_le_capable(hdev))
144 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
145 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
146 hdev->le_features[0], hdev->le_features[1],
147 hdev->le_features[2], hdev->le_features[3],
148 hdev->le_features[4], hdev->le_features[5],
149 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
150 hci_dev_unlock(hdev);
151
152 return 0;
153}
154
155static int features_open(struct inode *inode, struct file *file)
156{
157 return single_open(file, features_show, inode->i_private);
158}
159
160static const struct file_operations features_fops = {
161 .open = features_open,
162 .read = seq_read,
163 .llseek = seq_lseek,
164 .release = single_release,
165};
166
70afe0b8
MH
167static int blacklist_show(struct seq_file *f, void *p)
168{
169 struct hci_dev *hdev = f->private;
170 struct bdaddr_list *b;
171
172 hci_dev_lock(hdev);
173 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 174 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
175 hci_dev_unlock(hdev);
176
177 return 0;
178}
179
180static int blacklist_open(struct inode *inode, struct file *file)
181{
182 return single_open(file, blacklist_show, inode->i_private);
183}
184
185static const struct file_operations blacklist_fops = {
186 .open = blacklist_open,
187 .read = seq_read,
188 .llseek = seq_lseek,
189 .release = single_release,
190};
191
47219839
MH
192static int uuids_show(struct seq_file *f, void *p)
193{
194 struct hci_dev *hdev = f->private;
195 struct bt_uuid *uuid;
196
197 hci_dev_lock(hdev);
198 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
199 u8 i, val[16];
200
201 /* The Bluetooth UUID values are stored in big endian,
202 * but with reversed byte order. So convert them into
203 * the right order for the %pUb modifier.
204 */
205 for (i = 0; i < 16; i++)
206 val[i] = uuid->uuid[15 - i];
207
208 seq_printf(f, "%pUb\n", val);
47219839
MH
209 }
210 hci_dev_unlock(hdev);
211
212 return 0;
213}
214
215static int uuids_open(struct inode *inode, struct file *file)
216{
217 return single_open(file, uuids_show, inode->i_private);
218}
219
220static const struct file_operations uuids_fops = {
221 .open = uuids_open,
222 .read = seq_read,
223 .llseek = seq_lseek,
224 .release = single_release,
225};
226
baf27f6e
MH
227static int inquiry_cache_show(struct seq_file *f, void *p)
228{
229 struct hci_dev *hdev = f->private;
230 struct discovery_state *cache = &hdev->discovery;
231 struct inquiry_entry *e;
232
233 hci_dev_lock(hdev);
234
235 list_for_each_entry(e, &cache->all, all) {
236 struct inquiry_data *data = &e->data;
237 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
238 &data->bdaddr,
239 data->pscan_rep_mode, data->pscan_period_mode,
240 data->pscan_mode, data->dev_class[2],
241 data->dev_class[1], data->dev_class[0],
242 __le16_to_cpu(data->clock_offset),
243 data->rssi, data->ssp_mode, e->timestamp);
244 }
245
246 hci_dev_unlock(hdev);
247
248 return 0;
249}
250
251static int inquiry_cache_open(struct inode *inode, struct file *file)
252{
253 return single_open(file, inquiry_cache_show, inode->i_private);
254}
255
256static const struct file_operations inquiry_cache_fops = {
257 .open = inquiry_cache_open,
258 .read = seq_read,
259 .llseek = seq_lseek,
260 .release = single_release,
261};
262
02d08d15
MH
263static int link_keys_show(struct seq_file *f, void *ptr)
264{
265 struct hci_dev *hdev = f->private;
266 struct list_head *p, *n;
267
268 hci_dev_lock(hdev);
269 list_for_each_safe(p, n, &hdev->link_keys) {
270 struct link_key *key = list_entry(p, struct link_key, list);
271 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
272 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
273 }
274 hci_dev_unlock(hdev);
275
276 return 0;
277}
278
279static int link_keys_open(struct inode *inode, struct file *file)
280{
281 return single_open(file, link_keys_show, inode->i_private);
282}
283
284static const struct file_operations link_keys_fops = {
285 .open = link_keys_open,
286 .read = seq_read,
287 .llseek = seq_lseek,
288 .release = single_release,
289};
290
babdbb3c
MH
291static int dev_class_show(struct seq_file *f, void *ptr)
292{
293 struct hci_dev *hdev = f->private;
294
295 hci_dev_lock(hdev);
296 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
297 hdev->dev_class[1], hdev->dev_class[0]);
298 hci_dev_unlock(hdev);
299
300 return 0;
301}
302
303static int dev_class_open(struct inode *inode, struct file *file)
304{
305 return single_open(file, dev_class_show, inode->i_private);
306}
307
308static const struct file_operations dev_class_fops = {
309 .open = dev_class_open,
310 .read = seq_read,
311 .llseek = seq_lseek,
312 .release = single_release,
313};
314
041000b9
MH
315static int voice_setting_get(void *data, u64 *val)
316{
317 struct hci_dev *hdev = data;
318
319 hci_dev_lock(hdev);
320 *val = hdev->voice_setting;
321 hci_dev_unlock(hdev);
322
323 return 0;
324}
325
326DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
327 NULL, "0x%4.4llx\n");
328
ebd1e33b
MH
329static int auto_accept_delay_set(void *data, u64 val)
330{
331 struct hci_dev *hdev = data;
332
333 hci_dev_lock(hdev);
334 hdev->auto_accept_delay = val;
335 hci_dev_unlock(hdev);
336
337 return 0;
338}
339
340static int auto_accept_delay_get(void *data, u64 *val)
341{
342 struct hci_dev *hdev = data;
343
344 hci_dev_lock(hdev);
345 *val = hdev->auto_accept_delay;
346 hci_dev_unlock(hdev);
347
348 return 0;
349}
350
351DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
352 auto_accept_delay_set, "%llu\n");
353
06f5b778
MH
354static int ssp_debug_mode_set(void *data, u64 val)
355{
356 struct hci_dev *hdev = data;
357 struct sk_buff *skb;
358 __u8 mode;
359 int err;
360
361 if (val != 0 && val != 1)
362 return -EINVAL;
363
364 if (!test_bit(HCI_UP, &hdev->flags))
365 return -ENETDOWN;
366
367 hci_req_lock(hdev);
368 mode = val;
369 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
370 &mode, HCI_CMD_TIMEOUT);
371 hci_req_unlock(hdev);
372
373 if (IS_ERR(skb))
374 return PTR_ERR(skb);
375
376 err = -bt_to_errno(skb->data[0]);
377 kfree_skb(skb);
378
379 if (err < 0)
380 return err;
381
382 hci_dev_lock(hdev);
383 hdev->ssp_debug_mode = val;
384 hci_dev_unlock(hdev);
385
386 return 0;
387}
388
389static int ssp_debug_mode_get(void *data, u64 *val)
390{
391 struct hci_dev *hdev = data;
392
393 hci_dev_lock(hdev);
394 *val = hdev->ssp_debug_mode;
395 hci_dev_unlock(hdev);
396
397 return 0;
398}
399
400DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
401 ssp_debug_mode_set, "%llu\n");
402
5afeac14
MH
403static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
404 size_t count, loff_t *ppos)
405{
406 struct hci_dev *hdev = file->private_data;
407 char buf[3];
408
409 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
410 buf[1] = '\n';
411 buf[2] = '\0';
412 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
413}
414
415static ssize_t force_sc_support_write(struct file *file,
416 const char __user *user_buf,
417 size_t count, loff_t *ppos)
418{
419 struct hci_dev *hdev = file->private_data;
420 char buf[32];
421 size_t buf_size = min(count, (sizeof(buf)-1));
422 bool enable;
423
424 if (test_bit(HCI_UP, &hdev->flags))
425 return -EBUSY;
426
427 if (copy_from_user(buf, user_buf, buf_size))
428 return -EFAULT;
429
430 buf[buf_size] = '\0';
431 if (strtobool(buf, &enable))
432 return -EINVAL;
433
434 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
435 return -EALREADY;
436
437 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
438
439 return count;
440}
441
442static const struct file_operations force_sc_support_fops = {
443 .open = simple_open,
444 .read = force_sc_support_read,
445 .write = force_sc_support_write,
446 .llseek = default_llseek,
447};
448
134c2a89
MH
449static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
450 size_t count, loff_t *ppos)
451{
452 struct hci_dev *hdev = file->private_data;
453 char buf[3];
454
455 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
456 buf[1] = '\n';
457 buf[2] = '\0';
458 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
459}
460
461static const struct file_operations sc_only_mode_fops = {
462 .open = simple_open,
463 .read = sc_only_mode_read,
464 .llseek = default_llseek,
465};
466
2bfa3531
MH
467static int idle_timeout_set(void *data, u64 val)
468{
469 struct hci_dev *hdev = data;
470
471 if (val != 0 && (val < 500 || val > 3600000))
472 return -EINVAL;
473
474 hci_dev_lock(hdev);
2be48b65 475 hdev->idle_timeout = val;
2bfa3531
MH
476 hci_dev_unlock(hdev);
477
478 return 0;
479}
480
481static int idle_timeout_get(void *data, u64 *val)
482{
483 struct hci_dev *hdev = data;
484
485 hci_dev_lock(hdev);
486 *val = hdev->idle_timeout;
487 hci_dev_unlock(hdev);
488
489 return 0;
490}
491
492DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
493 idle_timeout_set, "%llu\n");
494
495static int sniff_min_interval_set(void *data, u64 val)
496{
497 struct hci_dev *hdev = data;
498
499 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
500 return -EINVAL;
501
502 hci_dev_lock(hdev);
2be48b65 503 hdev->sniff_min_interval = val;
2bfa3531
MH
504 hci_dev_unlock(hdev);
505
506 return 0;
507}
508
509static int sniff_min_interval_get(void *data, u64 *val)
510{
511 struct hci_dev *hdev = data;
512
513 hci_dev_lock(hdev);
514 *val = hdev->sniff_min_interval;
515 hci_dev_unlock(hdev);
516
517 return 0;
518}
519
520DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
521 sniff_min_interval_set, "%llu\n");
522
523static int sniff_max_interval_set(void *data, u64 val)
524{
525 struct hci_dev *hdev = data;
526
527 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
528 return -EINVAL;
529
530 hci_dev_lock(hdev);
2be48b65 531 hdev->sniff_max_interval = val;
2bfa3531
MH
532 hci_dev_unlock(hdev);
533
534 return 0;
535}
536
537static int sniff_max_interval_get(void *data, u64 *val)
538{
539 struct hci_dev *hdev = data;
540
541 hci_dev_lock(hdev);
542 *val = hdev->sniff_max_interval;
543 hci_dev_unlock(hdev);
544
545 return 0;
546}
547
548DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
549 sniff_max_interval_set, "%llu\n");
550
e7b8fc92
MH
551static int static_address_show(struct seq_file *f, void *p)
552{
553 struct hci_dev *hdev = f->private;
554
555 hci_dev_lock(hdev);
556 seq_printf(f, "%pMR\n", &hdev->static_addr);
557 hci_dev_unlock(hdev);
558
559 return 0;
560}
561
562static int static_address_open(struct inode *inode, struct file *file)
563{
564 return single_open(file, static_address_show, inode->i_private);
565}
566
567static const struct file_operations static_address_fops = {
568 .open = static_address_open,
569 .read = seq_read,
570 .llseek = seq_lseek,
571 .release = single_release,
572};
573
b32bba6c
MH
574static ssize_t force_static_address_read(struct file *file,
575 char __user *user_buf,
576 size_t count, loff_t *ppos)
92202185 577{
b32bba6c
MH
578 struct hci_dev *hdev = file->private_data;
579 char buf[3];
92202185 580
b32bba6c
MH
581 buf[0] = test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ? 'Y': 'N';
582 buf[1] = '\n';
583 buf[2] = '\0';
584 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
92202185
MH
585}
586
b32bba6c
MH
587static ssize_t force_static_address_write(struct file *file,
588 const char __user *user_buf,
589 size_t count, loff_t *ppos)
92202185 590{
b32bba6c
MH
591 struct hci_dev *hdev = file->private_data;
592 char buf[32];
593 size_t buf_size = min(count, (sizeof(buf)-1));
594 bool enable;
92202185 595
b32bba6c
MH
596 if (test_bit(HCI_UP, &hdev->flags))
597 return -EBUSY;
92202185 598
b32bba6c
MH
599 if (copy_from_user(buf, user_buf, buf_size))
600 return -EFAULT;
601
602 buf[buf_size] = '\0';
603 if (strtobool(buf, &enable))
604 return -EINVAL;
605
606 if (enable == test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags))
607 return -EALREADY;
608
609 change_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags);
610
611 return count;
92202185
MH
612}
613
b32bba6c
MH
614static const struct file_operations force_static_address_fops = {
615 .open = simple_open,
616 .read = force_static_address_read,
617 .write = force_static_address_write,
618 .llseek = default_llseek,
619};
92202185 620
3698d704
MH
621static int identity_resolving_keys_show(struct seq_file *f, void *ptr)
622{
623 struct hci_dev *hdev = f->private;
624 struct list_head *p, *n;
625
626 hci_dev_lock(hdev);
627 list_for_each_safe(p, n, &hdev->identity_resolving_keys) {
628 struct smp_irk *irk = list_entry(p, struct smp_irk, list);
629 seq_printf(f, "%pMR (type %u) %*phN %pMR\n",
630 &irk->bdaddr, irk->addr_type,
631 16, irk->val, &irk->rpa);
632 }
633 hci_dev_unlock(hdev);
634
635 return 0;
636}
637
638static int identity_resolving_keys_open(struct inode *inode, struct file *file)
639{
640 return single_open(file, identity_resolving_keys_show,
641 inode->i_private);
642}
643
644static const struct file_operations identity_resolving_keys_fops = {
645 .open = identity_resolving_keys_open,
646 .read = seq_read,
647 .llseek = seq_lseek,
648 .release = single_release,
649};
650
8f8625cd
MH
651static int long_term_keys_show(struct seq_file *f, void *ptr)
652{
653 struct hci_dev *hdev = f->private;
654 struct list_head *p, *n;
655
656 hci_dev_lock(hdev);
f813f1be 657 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 658 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 659 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
660 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
661 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
662 8, ltk->rand, 16, ltk->val);
663 }
664 hci_dev_unlock(hdev);
665
666 return 0;
667}
668
669static int long_term_keys_open(struct inode *inode, struct file *file)
670{
671 return single_open(file, long_term_keys_show, inode->i_private);
672}
673
674static const struct file_operations long_term_keys_fops = {
675 .open = long_term_keys_open,
676 .read = seq_read,
677 .llseek = seq_lseek,
678 .release = single_release,
679};
680
4e70c7e7
MH
681static int conn_min_interval_set(void *data, u64 val)
682{
683 struct hci_dev *hdev = data;
684
685 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
686 return -EINVAL;
687
688 hci_dev_lock(hdev);
2be48b65 689 hdev->le_conn_min_interval = val;
4e70c7e7
MH
690 hci_dev_unlock(hdev);
691
692 return 0;
693}
694
695static int conn_min_interval_get(void *data, u64 *val)
696{
697 struct hci_dev *hdev = data;
698
699 hci_dev_lock(hdev);
700 *val = hdev->le_conn_min_interval;
701 hci_dev_unlock(hdev);
702
703 return 0;
704}
705
706DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
707 conn_min_interval_set, "%llu\n");
708
709static int conn_max_interval_set(void *data, u64 val)
710{
711 struct hci_dev *hdev = data;
712
713 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
714 return -EINVAL;
715
716 hci_dev_lock(hdev);
2be48b65 717 hdev->le_conn_max_interval = val;
4e70c7e7
MH
718 hci_dev_unlock(hdev);
719
720 return 0;
721}
722
723static int conn_max_interval_get(void *data, u64 *val)
724{
725 struct hci_dev *hdev = data;
726
727 hci_dev_lock(hdev);
728 *val = hdev->le_conn_max_interval;
729 hci_dev_unlock(hdev);
730
731 return 0;
732}
733
734DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
735 conn_max_interval_set, "%llu\n");
736
89863109
JR
737static ssize_t lowpan_read(struct file *file, char __user *user_buf,
738 size_t count, loff_t *ppos)
739{
740 struct hci_dev *hdev = file->private_data;
741 char buf[3];
742
743 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
744 buf[1] = '\n';
745 buf[2] = '\0';
746 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
747}
748
749static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
750 size_t count, loff_t *position)
751{
752 struct hci_dev *hdev = fp->private_data;
753 bool enable;
754 char buf[32];
755 size_t buf_size = min(count, (sizeof(buf)-1));
756
757 if (copy_from_user(buf, user_buffer, buf_size))
758 return -EFAULT;
759
760 buf[buf_size] = '\0';
761
762 if (strtobool(buf, &enable) < 0)
763 return -EINVAL;
764
765 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
766 return -EALREADY;
767
768 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
769
770 return count;
771}
772
773static const struct file_operations lowpan_debugfs_fops = {
774 .open = simple_open,
775 .read = lowpan_read,
776 .write = lowpan_write,
777 .llseek = default_llseek,
778};
779
1da177e4
LT
780/* ---- HCI requests ---- */
781
42c6b129 782static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 783{
42c6b129 784 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
785
786 if (hdev->req_status == HCI_REQ_PEND) {
787 hdev->req_result = result;
788 hdev->req_status = HCI_REQ_DONE;
789 wake_up_interruptible(&hdev->req_wait_q);
790 }
791}
792
793static void hci_req_cancel(struct hci_dev *hdev, int err)
794{
795 BT_DBG("%s err 0x%2.2x", hdev->name, err);
796
797 if (hdev->req_status == HCI_REQ_PEND) {
798 hdev->req_result = err;
799 hdev->req_status = HCI_REQ_CANCELED;
800 wake_up_interruptible(&hdev->req_wait_q);
801 }
802}
803
77a63e0a
FW
804static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
805 u8 event)
75e84b7c
JH
806{
807 struct hci_ev_cmd_complete *ev;
808 struct hci_event_hdr *hdr;
809 struct sk_buff *skb;
810
811 hci_dev_lock(hdev);
812
813 skb = hdev->recv_evt;
814 hdev->recv_evt = NULL;
815
816 hci_dev_unlock(hdev);
817
818 if (!skb)
819 return ERR_PTR(-ENODATA);
820
821 if (skb->len < sizeof(*hdr)) {
822 BT_ERR("Too short HCI event");
823 goto failed;
824 }
825
826 hdr = (void *) skb->data;
827 skb_pull(skb, HCI_EVENT_HDR_SIZE);
828
7b1abbbe
JH
829 if (event) {
830 if (hdr->evt != event)
831 goto failed;
832 return skb;
833 }
834
75e84b7c
JH
835 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
836 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
837 goto failed;
838 }
839
840 if (skb->len < sizeof(*ev)) {
841 BT_ERR("Too short cmd_complete event");
842 goto failed;
843 }
844
845 ev = (void *) skb->data;
846 skb_pull(skb, sizeof(*ev));
847
848 if (opcode == __le16_to_cpu(ev->opcode))
849 return skb;
850
851 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
852 __le16_to_cpu(ev->opcode));
853
854failed:
855 kfree_skb(skb);
856 return ERR_PTR(-ENODATA);
857}
858
7b1abbbe 859struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 860 const void *param, u8 event, u32 timeout)
75e84b7c
JH
861{
862 DECLARE_WAITQUEUE(wait, current);
863 struct hci_request req;
864 int err = 0;
865
866 BT_DBG("%s", hdev->name);
867
868 hci_req_init(&req, hdev);
869
7b1abbbe 870 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
871
872 hdev->req_status = HCI_REQ_PEND;
873
874 err = hci_req_run(&req, hci_req_sync_complete);
875 if (err < 0)
876 return ERR_PTR(err);
877
878 add_wait_queue(&hdev->req_wait_q, &wait);
879 set_current_state(TASK_INTERRUPTIBLE);
880
881 schedule_timeout(timeout);
882
883 remove_wait_queue(&hdev->req_wait_q, &wait);
884
885 if (signal_pending(current))
886 return ERR_PTR(-EINTR);
887
888 switch (hdev->req_status) {
889 case HCI_REQ_DONE:
890 err = -bt_to_errno(hdev->req_result);
891 break;
892
893 case HCI_REQ_CANCELED:
894 err = -hdev->req_result;
895 break;
896
897 default:
898 err = -ETIMEDOUT;
899 break;
900 }
901
902 hdev->req_status = hdev->req_result = 0;
903
904 BT_DBG("%s end: err %d", hdev->name, err);
905
906 if (err < 0)
907 return ERR_PTR(err);
908
7b1abbbe
JH
909 return hci_get_cmd_complete(hdev, opcode, event);
910}
911EXPORT_SYMBOL(__hci_cmd_sync_ev);
912
913struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 914 const void *param, u32 timeout)
7b1abbbe
JH
915{
916 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
917}
918EXPORT_SYMBOL(__hci_cmd_sync);
919
1da177e4 920/* Execute request and wait for completion. */
01178cd4 921static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
922 void (*func)(struct hci_request *req,
923 unsigned long opt),
01178cd4 924 unsigned long opt, __u32 timeout)
1da177e4 925{
42c6b129 926 struct hci_request req;
1da177e4
LT
927 DECLARE_WAITQUEUE(wait, current);
928 int err = 0;
929
930 BT_DBG("%s start", hdev->name);
931
42c6b129
JH
932 hci_req_init(&req, hdev);
933
1da177e4
LT
934 hdev->req_status = HCI_REQ_PEND;
935
42c6b129 936 func(&req, opt);
53cce22d 937
42c6b129
JH
938 err = hci_req_run(&req, hci_req_sync_complete);
939 if (err < 0) {
53cce22d 940 hdev->req_status = 0;
920c8300
AG
941
942 /* ENODATA means the HCI request command queue is empty.
943 * This can happen when a request with conditionals doesn't
944 * trigger any commands to be sent. This is normal behavior
945 * and should not trigger an error return.
42c6b129 946 */
920c8300
AG
947 if (err == -ENODATA)
948 return 0;
949
950 return err;
53cce22d
JH
951 }
952
bc4445c7
AG
953 add_wait_queue(&hdev->req_wait_q, &wait);
954 set_current_state(TASK_INTERRUPTIBLE);
955
1da177e4
LT
956 schedule_timeout(timeout);
957
958 remove_wait_queue(&hdev->req_wait_q, &wait);
959
960 if (signal_pending(current))
961 return -EINTR;
962
963 switch (hdev->req_status) {
964 case HCI_REQ_DONE:
e175072f 965 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
966 break;
967
968 case HCI_REQ_CANCELED:
969 err = -hdev->req_result;
970 break;
971
972 default:
973 err = -ETIMEDOUT;
974 break;
3ff50b79 975 }
1da177e4 976
a5040efa 977 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
978
979 BT_DBG("%s end: err %d", hdev->name, err);
980
981 return err;
982}
983
01178cd4 984static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
985 void (*req)(struct hci_request *req,
986 unsigned long opt),
01178cd4 987 unsigned long opt, __u32 timeout)
1da177e4
LT
988{
989 int ret;
990
7c6a329e
MH
991 if (!test_bit(HCI_UP, &hdev->flags))
992 return -ENETDOWN;
993
1da177e4
LT
994 /* Serialize all requests */
995 hci_req_lock(hdev);
01178cd4 996 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
997 hci_req_unlock(hdev);
998
999 return ret;
1000}
1001
42c6b129 1002static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 1003{
42c6b129 1004 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
1005
1006 /* Reset device */
42c6b129
JH
1007 set_bit(HCI_RESET, &req->hdev->flags);
1008 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
1009}
1010
42c6b129 1011static void bredr_init(struct hci_request *req)
1da177e4 1012{
42c6b129 1013 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 1014
1da177e4 1015 /* Read Local Supported Features */
42c6b129 1016 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 1017
1143e5a6 1018 /* Read Local Version */
42c6b129 1019 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
1020
1021 /* Read BD Address */
42c6b129 1022 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
1023}
1024
42c6b129 1025static void amp_init(struct hci_request *req)
e61ef499 1026{
42c6b129 1027 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 1028
e61ef499 1029 /* Read Local Version */
42c6b129 1030 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 1031
f6996cfe
MH
1032 /* Read Local Supported Commands */
1033 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
1034
1035 /* Read Local Supported Features */
1036 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1037
6bcbc489 1038 /* Read Local AMP Info */
42c6b129 1039 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
1040
1041 /* Read Data Blk size */
42c6b129 1042 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 1043
f38ba941
MH
1044 /* Read Flow Control Mode */
1045 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
1046
7528ca1c
MH
1047 /* Read Location Data */
1048 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
1049}
1050
42c6b129 1051static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1052{
42c6b129 1053 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1054
1055 BT_DBG("%s %ld", hdev->name, opt);
1056
11778716
AE
1057 /* Reset */
1058 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1059 hci_reset_req(req, 0);
11778716 1060
e61ef499
AE
1061 switch (hdev->dev_type) {
1062 case HCI_BREDR:
42c6b129 1063 bredr_init(req);
e61ef499
AE
1064 break;
1065
1066 case HCI_AMP:
42c6b129 1067 amp_init(req);
e61ef499
AE
1068 break;
1069
1070 default:
1071 BT_ERR("Unknown device type %d", hdev->dev_type);
1072 break;
1073 }
e61ef499
AE
1074}
1075
42c6b129 1076static void bredr_setup(struct hci_request *req)
2177bab5 1077{
4ca048e3
MH
1078 struct hci_dev *hdev = req->hdev;
1079
2177bab5
JH
1080 __le16 param;
1081 __u8 flt_type;
1082
1083 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1084 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1085
1086 /* Read Class of Device */
42c6b129 1087 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1088
1089 /* Read Local Name */
42c6b129 1090 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1091
1092 /* Read Voice Setting */
42c6b129 1093 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1094
b4cb9fb2
MH
1095 /* Read Number of Supported IAC */
1096 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1097
4b836f39
MH
1098 /* Read Current IAC LAP */
1099 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1100
2177bab5
JH
1101 /* Clear Event Filters */
1102 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1103 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1104
1105 /* Connection accept timeout ~20 secs */
1106 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1107 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1108
4ca048e3
MH
1109 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1110 * but it does not support page scan related HCI commands.
1111 */
1112 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1113 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1114 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1115 }
2177bab5
JH
1116}
1117
42c6b129 1118static void le_setup(struct hci_request *req)
2177bab5 1119{
c73eee91
JH
1120 struct hci_dev *hdev = req->hdev;
1121
2177bab5 1122 /* Read LE Buffer Size */
42c6b129 1123 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1124
1125 /* Read LE Local Supported Features */
42c6b129 1126 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1127
1128 /* Read LE Advertising Channel TX Power */
42c6b129 1129 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1130
1131 /* Read LE White List Size */
42c6b129 1132 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1133
1134 /* Read LE Supported States */
42c6b129 1135 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1136
1137 /* LE-only controllers have LE implicitly enabled */
1138 if (!lmp_bredr_capable(hdev))
1139 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1140}
1141
1142static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1143{
1144 if (lmp_ext_inq_capable(hdev))
1145 return 0x02;
1146
1147 if (lmp_inq_rssi_capable(hdev))
1148 return 0x01;
1149
1150 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1151 hdev->lmp_subver == 0x0757)
1152 return 0x01;
1153
1154 if (hdev->manufacturer == 15) {
1155 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1156 return 0x01;
1157 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1158 return 0x01;
1159 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1160 return 0x01;
1161 }
1162
1163 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1164 hdev->lmp_subver == 0x1805)
1165 return 0x01;
1166
1167 return 0x00;
1168}
1169
42c6b129 1170static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1171{
1172 u8 mode;
1173
42c6b129 1174 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1175
42c6b129 1176 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1177}
1178
42c6b129 1179static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1180{
42c6b129
JH
1181 struct hci_dev *hdev = req->hdev;
1182
2177bab5
JH
1183 /* The second byte is 0xff instead of 0x9f (two reserved bits
1184 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1185 * command otherwise.
1186 */
1187 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1188
1189 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1190 * any event mask for pre 1.2 devices.
1191 */
1192 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1193 return;
1194
1195 if (lmp_bredr_capable(hdev)) {
1196 events[4] |= 0x01; /* Flow Specification Complete */
1197 events[4] |= 0x02; /* Inquiry Result with RSSI */
1198 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1199 events[5] |= 0x08; /* Synchronous Connection Complete */
1200 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1201 } else {
1202 /* Use a different default for LE-only devices */
1203 memset(events, 0, sizeof(events));
1204 events[0] |= 0x10; /* Disconnection Complete */
1205 events[0] |= 0x80; /* Encryption Change */
1206 events[1] |= 0x08; /* Read Remote Version Information Complete */
1207 events[1] |= 0x20; /* Command Complete */
1208 events[1] |= 0x40; /* Command Status */
1209 events[1] |= 0x80; /* Hardware Error */
1210 events[2] |= 0x04; /* Number of Completed Packets */
1211 events[3] |= 0x02; /* Data Buffer Overflow */
1212 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1213 }
1214
1215 if (lmp_inq_rssi_capable(hdev))
1216 events[4] |= 0x02; /* Inquiry Result with RSSI */
1217
1218 if (lmp_sniffsubr_capable(hdev))
1219 events[5] |= 0x20; /* Sniff Subrating */
1220
1221 if (lmp_pause_enc_capable(hdev))
1222 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1223
1224 if (lmp_ext_inq_capable(hdev))
1225 events[5] |= 0x40; /* Extended Inquiry Result */
1226
1227 if (lmp_no_flush_capable(hdev))
1228 events[7] |= 0x01; /* Enhanced Flush Complete */
1229
1230 if (lmp_lsto_capable(hdev))
1231 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1232
1233 if (lmp_ssp_capable(hdev)) {
1234 events[6] |= 0x01; /* IO Capability Request */
1235 events[6] |= 0x02; /* IO Capability Response */
1236 events[6] |= 0x04; /* User Confirmation Request */
1237 events[6] |= 0x08; /* User Passkey Request */
1238 events[6] |= 0x10; /* Remote OOB Data Request */
1239 events[6] |= 0x20; /* Simple Pairing Complete */
1240 events[7] |= 0x04; /* User Passkey Notification */
1241 events[7] |= 0x08; /* Keypress Notification */
1242 events[7] |= 0x10; /* Remote Host Supported
1243 * Features Notification
1244 */
1245 }
1246
1247 if (lmp_le_capable(hdev))
1248 events[7] |= 0x20; /* LE Meta-Event */
1249
42c6b129 1250 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1251
1252 if (lmp_le_capable(hdev)) {
1253 memset(events, 0, sizeof(events));
1254 events[0] = 0x1f;
42c6b129
JH
1255 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1256 sizeof(events), events);
2177bab5
JH
1257 }
1258}
1259
42c6b129 1260static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1261{
42c6b129
JH
1262 struct hci_dev *hdev = req->hdev;
1263
2177bab5 1264 if (lmp_bredr_capable(hdev))
42c6b129 1265 bredr_setup(req);
56f87901
JH
1266 else
1267 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1268
1269 if (lmp_le_capable(hdev))
42c6b129 1270 le_setup(req);
2177bab5 1271
42c6b129 1272 hci_setup_event_mask(req);
2177bab5 1273
3f8e2d75
JH
1274 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1275 * local supported commands HCI command.
1276 */
1277 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1278 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1279
1280 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1281 /* When SSP is available, then the host features page
1282 * should also be available as well. However some
1283 * controllers list the max_page as 0 as long as SSP
1284 * has not been enabled. To achieve proper debugging
1285 * output, force the minimum max_page to 1 at least.
1286 */
1287 hdev->max_page = 0x01;
1288
2177bab5
JH
1289 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1290 u8 mode = 0x01;
42c6b129
JH
1291 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1292 sizeof(mode), &mode);
2177bab5
JH
1293 } else {
1294 struct hci_cp_write_eir cp;
1295
1296 memset(hdev->eir, 0, sizeof(hdev->eir));
1297 memset(&cp, 0, sizeof(cp));
1298
42c6b129 1299 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1300 }
1301 }
1302
1303 if (lmp_inq_rssi_capable(hdev))
42c6b129 1304 hci_setup_inquiry_mode(req);
2177bab5
JH
1305
1306 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1307 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1308
1309 if (lmp_ext_feat_capable(hdev)) {
1310 struct hci_cp_read_local_ext_features cp;
1311
1312 cp.page = 0x01;
42c6b129
JH
1313 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1314 sizeof(cp), &cp);
2177bab5
JH
1315 }
1316
1317 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1318 u8 enable = 1;
42c6b129
JH
1319 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1320 &enable);
2177bab5
JH
1321 }
1322}
1323
42c6b129 1324static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1325{
42c6b129 1326 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1327 struct hci_cp_write_def_link_policy cp;
1328 u16 link_policy = 0;
1329
1330 if (lmp_rswitch_capable(hdev))
1331 link_policy |= HCI_LP_RSWITCH;
1332 if (lmp_hold_capable(hdev))
1333 link_policy |= HCI_LP_HOLD;
1334 if (lmp_sniff_capable(hdev))
1335 link_policy |= HCI_LP_SNIFF;
1336 if (lmp_park_capable(hdev))
1337 link_policy |= HCI_LP_PARK;
1338
1339 cp.policy = cpu_to_le16(link_policy);
42c6b129 1340 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1341}
1342
42c6b129 1343static void hci_set_le_support(struct hci_request *req)
2177bab5 1344{
42c6b129 1345 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1346 struct hci_cp_write_le_host_supported cp;
1347
c73eee91
JH
1348 /* LE-only devices do not support explicit enablement */
1349 if (!lmp_bredr_capable(hdev))
1350 return;
1351
2177bab5
JH
1352 memset(&cp, 0, sizeof(cp));
1353
1354 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1355 cp.le = 0x01;
1356 cp.simul = lmp_le_br_capable(hdev);
1357 }
1358
1359 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1360 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1361 &cp);
2177bab5
JH
1362}
1363
d62e6d67
JH
1364static void hci_set_event_mask_page_2(struct hci_request *req)
1365{
1366 struct hci_dev *hdev = req->hdev;
1367 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1368
1369 /* If Connectionless Slave Broadcast master role is supported
1370 * enable all necessary events for it.
1371 */
53b834d2 1372 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1373 events[1] |= 0x40; /* Triggered Clock Capture */
1374 events[1] |= 0x80; /* Synchronization Train Complete */
1375 events[2] |= 0x10; /* Slave Page Response Timeout */
1376 events[2] |= 0x20; /* CSB Channel Map Change */
1377 }
1378
1379 /* If Connectionless Slave Broadcast slave role is supported
1380 * enable all necessary events for it.
1381 */
53b834d2 1382 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1383 events[2] |= 0x01; /* Synchronization Train Received */
1384 events[2] |= 0x02; /* CSB Receive */
1385 events[2] |= 0x04; /* CSB Timeout */
1386 events[2] |= 0x08; /* Truncated Page Complete */
1387 }
1388
40c59fcb
MH
1389 /* Enable Authenticated Payload Timeout Expired event if supported */
1390 if (lmp_ping_capable(hdev))
1391 events[2] |= 0x80;
1392
d62e6d67
JH
1393 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1394}
1395
42c6b129 1396static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1397{
42c6b129 1398 struct hci_dev *hdev = req->hdev;
d2c5d77f 1399 u8 p;
42c6b129 1400
b8f4e068
GP
1401 /* Some Broadcom based Bluetooth controllers do not support the
1402 * Delete Stored Link Key command. They are clearly indicating its
1403 * absence in the bit mask of supported commands.
1404 *
1405 * Check the supported commands and only if the the command is marked
1406 * as supported send it. If not supported assume that the controller
1407 * does not have actual support for stored link keys which makes this
1408 * command redundant anyway.
f9f462fa
MH
1409 *
1410 * Some controllers indicate that they support handling deleting
1411 * stored link keys, but they don't. The quirk lets a driver
1412 * just disable this command.
637b4cae 1413 */
f9f462fa
MH
1414 if (hdev->commands[6] & 0x80 &&
1415 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1416 struct hci_cp_delete_stored_link_key cp;
1417
1418 bacpy(&cp.bdaddr, BDADDR_ANY);
1419 cp.delete_all = 0x01;
1420 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1421 sizeof(cp), &cp);
1422 }
1423
2177bab5 1424 if (hdev->commands[5] & 0x10)
42c6b129 1425 hci_setup_link_policy(req);
2177bab5 1426
79830f66 1427 if (lmp_le_capable(hdev)) {
b32bba6c
MH
1428 /* If the controller has a public BD_ADDR, then by default
1429 * use that one. If this is a LE only controller without
1430 * a public address, default to the random address.
1431 *
1432 * For debugging purposes it is possible to force
1433 * controllers with a public address to use the
1434 * random address instead.
1435 */
1436 if (test_bit(HCI_FORCE_STATIC_ADDR, &hdev->dev_flags) ||
1437 !bacmp(&hdev->bdaddr, BDADDR_ANY))
1438 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1439 else
1440 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
79830f66 1441
42c6b129 1442 hci_set_le_support(req);
79830f66 1443 }
d2c5d77f
JH
1444
1445 /* Read features beyond page 1 if available */
1446 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1447 struct hci_cp_read_local_ext_features cp;
1448
1449 cp.page = p;
1450 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1451 sizeof(cp), &cp);
1452 }
2177bab5
JH
1453}
1454
5d4e7e8d
JH
1455static void hci_init4_req(struct hci_request *req, unsigned long opt)
1456{
1457 struct hci_dev *hdev = req->hdev;
1458
d62e6d67
JH
1459 /* Set event mask page 2 if the HCI command for it is supported */
1460 if (hdev->commands[22] & 0x04)
1461 hci_set_event_mask_page_2(req);
1462
5d4e7e8d 1463 /* Check for Synchronization Train support */
53b834d2 1464 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1465 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1466
1467 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1468 if ((lmp_sc_capable(hdev) ||
1469 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1470 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1471 u8 support = 0x01;
1472 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1473 sizeof(support), &support);
1474 }
5d4e7e8d
JH
1475}
1476
2177bab5
JH
1477static int __hci_init(struct hci_dev *hdev)
1478{
1479 int err;
1480
1481 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1482 if (err < 0)
1483 return err;
1484
4b4148e9
MH
1485 /* The Device Under Test (DUT) mode is special and available for
1486 * all controller types. So just create it early on.
1487 */
1488 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1489 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1490 &dut_mode_fops);
1491 }
1492
2177bab5
JH
1493 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1494 * BR/EDR/LE type controllers. AMP controllers only need the
1495 * first stage init.
1496 */
1497 if (hdev->dev_type != HCI_BREDR)
1498 return 0;
1499
1500 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1501 if (err < 0)
1502 return err;
1503
5d4e7e8d
JH
1504 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1505 if (err < 0)
1506 return err;
1507
baf27f6e
MH
1508 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1509 if (err < 0)
1510 return err;
1511
1512 /* Only create debugfs entries during the initial setup
1513 * phase and not every time the controller gets powered on.
1514 */
1515 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1516 return 0;
1517
dfb826a8
MH
1518 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1519 &features_fops);
ceeb3bc0
MH
1520 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1521 &hdev->manufacturer);
1522 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1523 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1524 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1525 &blacklist_fops);
47219839
MH
1526 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1527
baf27f6e
MH
1528 if (lmp_bredr_capable(hdev)) {
1529 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1530 hdev, &inquiry_cache_fops);
02d08d15
MH
1531 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1532 hdev, &link_keys_fops);
babdbb3c
MH
1533 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1534 hdev, &dev_class_fops);
041000b9
MH
1535 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1536 hdev, &voice_setting_fops);
baf27f6e
MH
1537 }
1538
06f5b778 1539 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1540 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1541 hdev, &auto_accept_delay_fops);
06f5b778
MH
1542 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1543 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1544 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1545 hdev, &force_sc_support_fops);
134c2a89
MH
1546 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1547 hdev, &sc_only_mode_fops);
06f5b778 1548 }
ebd1e33b 1549
2bfa3531
MH
1550 if (lmp_sniff_capable(hdev)) {
1551 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1552 hdev, &idle_timeout_fops);
1553 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1554 hdev, &sniff_min_interval_fops);
1555 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1556 hdev, &sniff_max_interval_fops);
1557 }
1558
d0f729b8 1559 if (lmp_le_capable(hdev)) {
b32bba6c
MH
1560 debugfs_create_file("static_address", 0444, hdev->debugfs,
1561 hdev, &static_address_fops);
1562
1563 /* For controllers with a public address, provide a debug
1564 * option to force the usage of the configured static
1565 * address. By default the public address is used.
1566 */
1567 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1568 debugfs_create_file("force_static_address", 0644,
1569 hdev->debugfs, hdev,
1570 &force_static_address_fops);
1571
d0f729b8
MH
1572 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1573 &hdev->le_white_list_size);
3698d704
MH
1574 debugfs_create_file("identity_resolving_keys", 0400,
1575 hdev->debugfs, hdev,
1576 &identity_resolving_keys_fops);
8f8625cd
MH
1577 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1578 hdev, &long_term_keys_fops);
4e70c7e7
MH
1579 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1580 hdev, &conn_min_interval_fops);
1581 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1582 hdev, &conn_max_interval_fops);
89863109
JR
1583 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1584 &lowpan_debugfs_fops);
d0f729b8 1585 }
e7b8fc92 1586
baf27f6e 1587 return 0;
2177bab5
JH
1588}
1589
42c6b129 1590static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1591{
1592 __u8 scan = opt;
1593
42c6b129 1594 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1595
1596 /* Inquiry and Page scans */
42c6b129 1597 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1598}
1599
42c6b129 1600static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1601{
1602 __u8 auth = opt;
1603
42c6b129 1604 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1605
1606 /* Authentication */
42c6b129 1607 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1608}
1609
42c6b129 1610static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1611{
1612 __u8 encrypt = opt;
1613
42c6b129 1614 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1615
e4e8e37c 1616 /* Encryption */
42c6b129 1617 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1618}
1619
42c6b129 1620static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1621{
1622 __le16 policy = cpu_to_le16(opt);
1623
42c6b129 1624 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1625
1626 /* Default link policy */
42c6b129 1627 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1628}
1629
8e87d142 1630/* Get HCI device by index.
1da177e4
LT
1631 * Device is held on return. */
1632struct hci_dev *hci_dev_get(int index)
1633{
8035ded4 1634 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1635
1636 BT_DBG("%d", index);
1637
1638 if (index < 0)
1639 return NULL;
1640
1641 read_lock(&hci_dev_list_lock);
8035ded4 1642 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1643 if (d->id == index) {
1644 hdev = hci_dev_hold(d);
1645 break;
1646 }
1647 }
1648 read_unlock(&hci_dev_list_lock);
1649 return hdev;
1650}
1da177e4
LT
1651
1652/* ---- Inquiry support ---- */
ff9ef578 1653
30dc78e1
JH
1654bool hci_discovery_active(struct hci_dev *hdev)
1655{
1656 struct discovery_state *discov = &hdev->discovery;
1657
6fbe195d 1658 switch (discov->state) {
343f935b 1659 case DISCOVERY_FINDING:
6fbe195d 1660 case DISCOVERY_RESOLVING:
30dc78e1
JH
1661 return true;
1662
6fbe195d
AG
1663 default:
1664 return false;
1665 }
30dc78e1
JH
1666}
1667
ff9ef578
JH
1668void hci_discovery_set_state(struct hci_dev *hdev, int state)
1669{
1670 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1671
1672 if (hdev->discovery.state == state)
1673 return;
1674
1675 switch (state) {
1676 case DISCOVERY_STOPPED:
7b99b659
AG
1677 if (hdev->discovery.state != DISCOVERY_STARTING)
1678 mgmt_discovering(hdev, 0);
ff9ef578
JH
1679 break;
1680 case DISCOVERY_STARTING:
1681 break;
343f935b 1682 case DISCOVERY_FINDING:
ff9ef578
JH
1683 mgmt_discovering(hdev, 1);
1684 break;
30dc78e1
JH
1685 case DISCOVERY_RESOLVING:
1686 break;
ff9ef578
JH
1687 case DISCOVERY_STOPPING:
1688 break;
1689 }
1690
1691 hdev->discovery.state = state;
1692}
1693
1f9b9a5d 1694void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1695{
30883512 1696 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1697 struct inquiry_entry *p, *n;
1da177e4 1698
561aafbc
JH
1699 list_for_each_entry_safe(p, n, &cache->all, all) {
1700 list_del(&p->all);
b57c1a56 1701 kfree(p);
1da177e4 1702 }
561aafbc
JH
1703
1704 INIT_LIST_HEAD(&cache->unknown);
1705 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1706}
1707
a8c5fb1a
GP
1708struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1709 bdaddr_t *bdaddr)
1da177e4 1710{
30883512 1711 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1712 struct inquiry_entry *e;
1713
6ed93dc6 1714 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1715
561aafbc
JH
1716 list_for_each_entry(e, &cache->all, all) {
1717 if (!bacmp(&e->data.bdaddr, bdaddr))
1718 return e;
1719 }
1720
1721 return NULL;
1722}
1723
1724struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1725 bdaddr_t *bdaddr)
561aafbc 1726{
30883512 1727 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1728 struct inquiry_entry *e;
1729
6ed93dc6 1730 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1731
1732 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1733 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1734 return e;
1735 }
1736
1737 return NULL;
1da177e4
LT
1738}
1739
30dc78e1 1740struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1741 bdaddr_t *bdaddr,
1742 int state)
30dc78e1
JH
1743{
1744 struct discovery_state *cache = &hdev->discovery;
1745 struct inquiry_entry *e;
1746
6ed93dc6 1747 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1748
1749 list_for_each_entry(e, &cache->resolve, list) {
1750 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1751 return e;
1752 if (!bacmp(&e->data.bdaddr, bdaddr))
1753 return e;
1754 }
1755
1756 return NULL;
1757}
1758
a3d4e20a 1759void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1760 struct inquiry_entry *ie)
a3d4e20a
JH
1761{
1762 struct discovery_state *cache = &hdev->discovery;
1763 struct list_head *pos = &cache->resolve;
1764 struct inquiry_entry *p;
1765
1766 list_del(&ie->list);
1767
1768 list_for_each_entry(p, &cache->resolve, list) {
1769 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1770 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1771 break;
1772 pos = &p->list;
1773 }
1774
1775 list_add(&ie->list, pos);
1776}
1777
3175405b 1778bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1779 bool name_known, bool *ssp)
1da177e4 1780{
30883512 1781 struct discovery_state *cache = &hdev->discovery;
70f23020 1782 struct inquiry_entry *ie;
1da177e4 1783
6ed93dc6 1784 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1785
2b2fec4d
SJ
1786 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1787
388fc8fa
JH
1788 if (ssp)
1789 *ssp = data->ssp_mode;
1790
70f23020 1791 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1792 if (ie) {
388fc8fa
JH
1793 if (ie->data.ssp_mode && ssp)
1794 *ssp = true;
1795
a3d4e20a 1796 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1797 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1798 ie->data.rssi = data->rssi;
1799 hci_inquiry_cache_update_resolve(hdev, ie);
1800 }
1801
561aafbc 1802 goto update;
a3d4e20a 1803 }
561aafbc
JH
1804
1805 /* Entry not in the cache. Add new one. */
1806 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1807 if (!ie)
3175405b 1808 return false;
561aafbc
JH
1809
1810 list_add(&ie->all, &cache->all);
1811
1812 if (name_known) {
1813 ie->name_state = NAME_KNOWN;
1814 } else {
1815 ie->name_state = NAME_NOT_KNOWN;
1816 list_add(&ie->list, &cache->unknown);
1817 }
70f23020 1818
561aafbc
JH
1819update:
1820 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1821 ie->name_state != NAME_PENDING) {
561aafbc
JH
1822 ie->name_state = NAME_KNOWN;
1823 list_del(&ie->list);
1da177e4
LT
1824 }
1825
70f23020
AE
1826 memcpy(&ie->data, data, sizeof(*data));
1827 ie->timestamp = jiffies;
1da177e4 1828 cache->timestamp = jiffies;
3175405b
JH
1829
1830 if (ie->name_state == NAME_NOT_KNOWN)
1831 return false;
1832
1833 return true;
1da177e4
LT
1834}
1835
1836static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1837{
30883512 1838 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1839 struct inquiry_info *info = (struct inquiry_info *) buf;
1840 struct inquiry_entry *e;
1841 int copied = 0;
1842
561aafbc 1843 list_for_each_entry(e, &cache->all, all) {
1da177e4 1844 struct inquiry_data *data = &e->data;
b57c1a56
JH
1845
1846 if (copied >= num)
1847 break;
1848
1da177e4
LT
1849 bacpy(&info->bdaddr, &data->bdaddr);
1850 info->pscan_rep_mode = data->pscan_rep_mode;
1851 info->pscan_period_mode = data->pscan_period_mode;
1852 info->pscan_mode = data->pscan_mode;
1853 memcpy(info->dev_class, data->dev_class, 3);
1854 info->clock_offset = data->clock_offset;
b57c1a56 1855
1da177e4 1856 info++;
b57c1a56 1857 copied++;
1da177e4
LT
1858 }
1859
1860 BT_DBG("cache %p, copied %d", cache, copied);
1861 return copied;
1862}
1863
42c6b129 1864static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1865{
1866 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1867 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1868 struct hci_cp_inquiry cp;
1869
1870 BT_DBG("%s", hdev->name);
1871
1872 if (test_bit(HCI_INQUIRY, &hdev->flags))
1873 return;
1874
1875 /* Start Inquiry */
1876 memcpy(&cp.lap, &ir->lap, 3);
1877 cp.length = ir->length;
1878 cp.num_rsp = ir->num_rsp;
42c6b129 1879 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1880}
1881
3e13fa1e
AG
1882static int wait_inquiry(void *word)
1883{
1884 schedule();
1885 return signal_pending(current);
1886}
1887
1da177e4
LT
1888int hci_inquiry(void __user *arg)
1889{
1890 __u8 __user *ptr = arg;
1891 struct hci_inquiry_req ir;
1892 struct hci_dev *hdev;
1893 int err = 0, do_inquiry = 0, max_rsp;
1894 long timeo;
1895 __u8 *buf;
1896
1897 if (copy_from_user(&ir, ptr, sizeof(ir)))
1898 return -EFAULT;
1899
5a08ecce
AE
1900 hdev = hci_dev_get(ir.dev_id);
1901 if (!hdev)
1da177e4
LT
1902 return -ENODEV;
1903
0736cfa8
MH
1904 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1905 err = -EBUSY;
1906 goto done;
1907 }
1908
5b69bef5
MH
1909 if (hdev->dev_type != HCI_BREDR) {
1910 err = -EOPNOTSUPP;
1911 goto done;
1912 }
1913
56f87901
JH
1914 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1915 err = -EOPNOTSUPP;
1916 goto done;
1917 }
1918
09fd0de5 1919 hci_dev_lock(hdev);
8e87d142 1920 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1921 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1922 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1923 do_inquiry = 1;
1924 }
09fd0de5 1925 hci_dev_unlock(hdev);
1da177e4 1926
04837f64 1927 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1928
1929 if (do_inquiry) {
01178cd4
JH
1930 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1931 timeo);
70f23020
AE
1932 if (err < 0)
1933 goto done;
3e13fa1e
AG
1934
1935 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1936 * cleared). If it is interrupted by a signal, return -EINTR.
1937 */
1938 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1939 TASK_INTERRUPTIBLE))
1940 return -EINTR;
70f23020 1941 }
1da177e4 1942
8fc9ced3
GP
1943 /* for unlimited number of responses we will use buffer with
1944 * 255 entries
1945 */
1da177e4
LT
1946 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1947
1948 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1949 * copy it to the user space.
1950 */
01df8c31 1951 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1952 if (!buf) {
1da177e4
LT
1953 err = -ENOMEM;
1954 goto done;
1955 }
1956
09fd0de5 1957 hci_dev_lock(hdev);
1da177e4 1958 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1959 hci_dev_unlock(hdev);
1da177e4
LT
1960
1961 BT_DBG("num_rsp %d", ir.num_rsp);
1962
1963 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1964 ptr += sizeof(ir);
1965 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1966 ir.num_rsp))
1da177e4 1967 err = -EFAULT;
8e87d142 1968 } else
1da177e4
LT
1969 err = -EFAULT;
1970
1971 kfree(buf);
1972
1973done:
1974 hci_dev_put(hdev);
1975 return err;
1976}
1977
cbed0ca1 1978static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1979{
1da177e4
LT
1980 int ret = 0;
1981
1da177e4
LT
1982 BT_DBG("%s %p", hdev->name, hdev);
1983
1984 hci_req_lock(hdev);
1985
94324962
JH
1986 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1987 ret = -ENODEV;
1988 goto done;
1989 }
1990
a5c8f270
MH
1991 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1992 /* Check for rfkill but allow the HCI setup stage to
1993 * proceed (which in itself doesn't cause any RF activity).
1994 */
1995 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1996 ret = -ERFKILL;
1997 goto done;
1998 }
1999
2000 /* Check for valid public address or a configured static
2001 * random adddress, but let the HCI setup proceed to
2002 * be able to determine if there is a public address
2003 * or not.
2004 *
c6beca0e
MH
2005 * In case of user channel usage, it is not important
2006 * if a public address or static random address is
2007 * available.
2008 *
a5c8f270
MH
2009 * This check is only valid for BR/EDR controllers
2010 * since AMP controllers do not have an address.
2011 */
c6beca0e
MH
2012 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
2013 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
2014 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2015 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
2016 ret = -EADDRNOTAVAIL;
2017 goto done;
2018 }
611b30f7
MH
2019 }
2020
1da177e4
LT
2021 if (test_bit(HCI_UP, &hdev->flags)) {
2022 ret = -EALREADY;
2023 goto done;
2024 }
2025
1da177e4
LT
2026 if (hdev->open(hdev)) {
2027 ret = -EIO;
2028 goto done;
2029 }
2030
f41c70c4
MH
2031 atomic_set(&hdev->cmd_cnt, 1);
2032 set_bit(HCI_INIT, &hdev->flags);
2033
2034 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
2035 ret = hdev->setup(hdev);
2036
2037 if (!ret) {
f41c70c4
MH
2038 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
2039 set_bit(HCI_RAW, &hdev->flags);
2040
0736cfa8
MH
2041 if (!test_bit(HCI_RAW, &hdev->flags) &&
2042 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 2043 ret = __hci_init(hdev);
1da177e4
LT
2044 }
2045
f41c70c4
MH
2046 clear_bit(HCI_INIT, &hdev->flags);
2047
1da177e4
LT
2048 if (!ret) {
2049 hci_dev_hold(hdev);
2050 set_bit(HCI_UP, &hdev->flags);
2051 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 2052 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 2053 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 2054 hdev->dev_type == HCI_BREDR) {
09fd0de5 2055 hci_dev_lock(hdev);
744cf19e 2056 mgmt_powered(hdev, 1);
09fd0de5 2057 hci_dev_unlock(hdev);
56e5cb86 2058 }
8e87d142 2059 } else {
1da177e4 2060 /* Init failed, cleanup */
3eff45ea 2061 flush_work(&hdev->tx_work);
c347b765 2062 flush_work(&hdev->cmd_work);
b78752cc 2063 flush_work(&hdev->rx_work);
1da177e4
LT
2064
2065 skb_queue_purge(&hdev->cmd_q);
2066 skb_queue_purge(&hdev->rx_q);
2067
2068 if (hdev->flush)
2069 hdev->flush(hdev);
2070
2071 if (hdev->sent_cmd) {
2072 kfree_skb(hdev->sent_cmd);
2073 hdev->sent_cmd = NULL;
2074 }
2075
2076 hdev->close(hdev);
2077 hdev->flags = 0;
2078 }
2079
2080done:
2081 hci_req_unlock(hdev);
1da177e4
LT
2082 return ret;
2083}
2084
cbed0ca1
JH
2085/* ---- HCI ioctl helpers ---- */
2086
2087int hci_dev_open(__u16 dev)
2088{
2089 struct hci_dev *hdev;
2090 int err;
2091
2092 hdev = hci_dev_get(dev);
2093 if (!hdev)
2094 return -ENODEV;
2095
e1d08f40
JH
2096 /* We need to ensure that no other power on/off work is pending
2097 * before proceeding to call hci_dev_do_open. This is
2098 * particularly important if the setup procedure has not yet
2099 * completed.
2100 */
2101 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2102 cancel_delayed_work(&hdev->power_off);
2103
a5c8f270
MH
2104 /* After this call it is guaranteed that the setup procedure
2105 * has finished. This means that error conditions like RFKILL
2106 * or no valid public or static random address apply.
2107 */
e1d08f40
JH
2108 flush_workqueue(hdev->req_workqueue);
2109
cbed0ca1
JH
2110 err = hci_dev_do_open(hdev);
2111
2112 hci_dev_put(hdev);
2113
2114 return err;
2115}
2116
1da177e4
LT
2117static int hci_dev_do_close(struct hci_dev *hdev)
2118{
2119 BT_DBG("%s %p", hdev->name, hdev);
2120
78c04c0b
VCG
2121 cancel_delayed_work(&hdev->power_off);
2122
1da177e4
LT
2123 hci_req_cancel(hdev, ENODEV);
2124 hci_req_lock(hdev);
2125
2126 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2127 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2128 hci_req_unlock(hdev);
2129 return 0;
2130 }
2131
3eff45ea
GP
2132 /* Flush RX and TX works */
2133 flush_work(&hdev->tx_work);
b78752cc 2134 flush_work(&hdev->rx_work);
1da177e4 2135
16ab91ab 2136 if (hdev->discov_timeout > 0) {
e0f9309f 2137 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2138 hdev->discov_timeout = 0;
5e5282bb 2139 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2140 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2141 }
2142
a8b2d5c2 2143 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2144 cancel_delayed_work(&hdev->service_cache);
2145
7ba8b4be
AG
2146 cancel_delayed_work_sync(&hdev->le_scan_disable);
2147
09fd0de5 2148 hci_dev_lock(hdev);
1f9b9a5d 2149 hci_inquiry_cache_flush(hdev);
1da177e4 2150 hci_conn_hash_flush(hdev);
09fd0de5 2151 hci_dev_unlock(hdev);
1da177e4
LT
2152
2153 hci_notify(hdev, HCI_DEV_DOWN);
2154
2155 if (hdev->flush)
2156 hdev->flush(hdev);
2157
2158 /* Reset device */
2159 skb_queue_purge(&hdev->cmd_q);
2160 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2161 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2162 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2163 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2164 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2165 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2166 clear_bit(HCI_INIT, &hdev->flags);
2167 }
2168
c347b765
GP
2169 /* flush cmd work */
2170 flush_work(&hdev->cmd_work);
1da177e4
LT
2171
2172 /* Drop queues */
2173 skb_queue_purge(&hdev->rx_q);
2174 skb_queue_purge(&hdev->cmd_q);
2175 skb_queue_purge(&hdev->raw_q);
2176
2177 /* Drop last sent command */
2178 if (hdev->sent_cmd) {
b79f44c1 2179 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2180 kfree_skb(hdev->sent_cmd);
2181 hdev->sent_cmd = NULL;
2182 }
2183
b6ddb638
JH
2184 kfree_skb(hdev->recv_evt);
2185 hdev->recv_evt = NULL;
2186
1da177e4
LT
2187 /* After this point our queues are empty
2188 * and no tasks are scheduled. */
2189 hdev->close(hdev);
2190
35b973c9
JH
2191 /* Clear flags */
2192 hdev->flags = 0;
2193 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2194
93c311a0
MH
2195 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2196 if (hdev->dev_type == HCI_BREDR) {
2197 hci_dev_lock(hdev);
2198 mgmt_powered(hdev, 0);
2199 hci_dev_unlock(hdev);
2200 }
8ee56540 2201 }
5add6af8 2202
ced5c338 2203 /* Controller radio is available but is currently powered down */
536619e8 2204 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2205
e59fda8d 2206 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2207 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2208
1da177e4
LT
2209 hci_req_unlock(hdev);
2210
2211 hci_dev_put(hdev);
2212 return 0;
2213}
2214
2215int hci_dev_close(__u16 dev)
2216{
2217 struct hci_dev *hdev;
2218 int err;
2219
70f23020
AE
2220 hdev = hci_dev_get(dev);
2221 if (!hdev)
1da177e4 2222 return -ENODEV;
8ee56540 2223
0736cfa8
MH
2224 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2225 err = -EBUSY;
2226 goto done;
2227 }
2228
8ee56540
MH
2229 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2230 cancel_delayed_work(&hdev->power_off);
2231
1da177e4 2232 err = hci_dev_do_close(hdev);
8ee56540 2233
0736cfa8 2234done:
1da177e4
LT
2235 hci_dev_put(hdev);
2236 return err;
2237}
2238
2239int hci_dev_reset(__u16 dev)
2240{
2241 struct hci_dev *hdev;
2242 int ret = 0;
2243
70f23020
AE
2244 hdev = hci_dev_get(dev);
2245 if (!hdev)
1da177e4
LT
2246 return -ENODEV;
2247
2248 hci_req_lock(hdev);
1da177e4 2249
808a049e
MH
2250 if (!test_bit(HCI_UP, &hdev->flags)) {
2251 ret = -ENETDOWN;
1da177e4 2252 goto done;
808a049e 2253 }
1da177e4 2254
0736cfa8
MH
2255 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2256 ret = -EBUSY;
2257 goto done;
2258 }
2259
1da177e4
LT
2260 /* Drop queues */
2261 skb_queue_purge(&hdev->rx_q);
2262 skb_queue_purge(&hdev->cmd_q);
2263
09fd0de5 2264 hci_dev_lock(hdev);
1f9b9a5d 2265 hci_inquiry_cache_flush(hdev);
1da177e4 2266 hci_conn_hash_flush(hdev);
09fd0de5 2267 hci_dev_unlock(hdev);
1da177e4
LT
2268
2269 if (hdev->flush)
2270 hdev->flush(hdev);
2271
8e87d142 2272 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2273 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2274
2275 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2276 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2277
2278done:
1da177e4
LT
2279 hci_req_unlock(hdev);
2280 hci_dev_put(hdev);
2281 return ret;
2282}
2283
2284int hci_dev_reset_stat(__u16 dev)
2285{
2286 struct hci_dev *hdev;
2287 int ret = 0;
2288
70f23020
AE
2289 hdev = hci_dev_get(dev);
2290 if (!hdev)
1da177e4
LT
2291 return -ENODEV;
2292
0736cfa8
MH
2293 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2294 ret = -EBUSY;
2295 goto done;
2296 }
2297
1da177e4
LT
2298 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2299
0736cfa8 2300done:
1da177e4 2301 hci_dev_put(hdev);
1da177e4
LT
2302 return ret;
2303}
2304
2305int hci_dev_cmd(unsigned int cmd, void __user *arg)
2306{
2307 struct hci_dev *hdev;
2308 struct hci_dev_req dr;
2309 int err = 0;
2310
2311 if (copy_from_user(&dr, arg, sizeof(dr)))
2312 return -EFAULT;
2313
70f23020
AE
2314 hdev = hci_dev_get(dr.dev_id);
2315 if (!hdev)
1da177e4
LT
2316 return -ENODEV;
2317
0736cfa8
MH
2318 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2319 err = -EBUSY;
2320 goto done;
2321 }
2322
5b69bef5
MH
2323 if (hdev->dev_type != HCI_BREDR) {
2324 err = -EOPNOTSUPP;
2325 goto done;
2326 }
2327
56f87901
JH
2328 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2329 err = -EOPNOTSUPP;
2330 goto done;
2331 }
2332
1da177e4
LT
2333 switch (cmd) {
2334 case HCISETAUTH:
01178cd4
JH
2335 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2336 HCI_INIT_TIMEOUT);
1da177e4
LT
2337 break;
2338
2339 case HCISETENCRYPT:
2340 if (!lmp_encrypt_capable(hdev)) {
2341 err = -EOPNOTSUPP;
2342 break;
2343 }
2344
2345 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2346 /* Auth must be enabled first */
01178cd4
JH
2347 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2348 HCI_INIT_TIMEOUT);
1da177e4
LT
2349 if (err)
2350 break;
2351 }
2352
01178cd4
JH
2353 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2354 HCI_INIT_TIMEOUT);
1da177e4
LT
2355 break;
2356
2357 case HCISETSCAN:
01178cd4
JH
2358 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2359 HCI_INIT_TIMEOUT);
1da177e4
LT
2360 break;
2361
1da177e4 2362 case HCISETLINKPOL:
01178cd4
JH
2363 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2364 HCI_INIT_TIMEOUT);
1da177e4
LT
2365 break;
2366
2367 case HCISETLINKMODE:
e4e8e37c
MH
2368 hdev->link_mode = ((__u16) dr.dev_opt) &
2369 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2370 break;
2371
2372 case HCISETPTYPE:
2373 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2374 break;
2375
2376 case HCISETACLMTU:
e4e8e37c
MH
2377 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2378 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2379 break;
2380
2381 case HCISETSCOMTU:
e4e8e37c
MH
2382 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2383 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2384 break;
2385
2386 default:
2387 err = -EINVAL;
2388 break;
2389 }
e4e8e37c 2390
0736cfa8 2391done:
1da177e4
LT
2392 hci_dev_put(hdev);
2393 return err;
2394}
2395
2396int hci_get_dev_list(void __user *arg)
2397{
8035ded4 2398 struct hci_dev *hdev;
1da177e4
LT
2399 struct hci_dev_list_req *dl;
2400 struct hci_dev_req *dr;
1da177e4
LT
2401 int n = 0, size, err;
2402 __u16 dev_num;
2403
2404 if (get_user(dev_num, (__u16 __user *) arg))
2405 return -EFAULT;
2406
2407 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2408 return -EINVAL;
2409
2410 size = sizeof(*dl) + dev_num * sizeof(*dr);
2411
70f23020
AE
2412 dl = kzalloc(size, GFP_KERNEL);
2413 if (!dl)
1da177e4
LT
2414 return -ENOMEM;
2415
2416 dr = dl->dev_req;
2417
f20d09d5 2418 read_lock(&hci_dev_list_lock);
8035ded4 2419 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2420 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2421 cancel_delayed_work(&hdev->power_off);
c542a06c 2422
a8b2d5c2
JH
2423 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2424 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2425
1da177e4
LT
2426 (dr + n)->dev_id = hdev->id;
2427 (dr + n)->dev_opt = hdev->flags;
c542a06c 2428
1da177e4
LT
2429 if (++n >= dev_num)
2430 break;
2431 }
f20d09d5 2432 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2433
2434 dl->dev_num = n;
2435 size = sizeof(*dl) + n * sizeof(*dr);
2436
2437 err = copy_to_user(arg, dl, size);
2438 kfree(dl);
2439
2440 return err ? -EFAULT : 0;
2441}
2442
2443int hci_get_dev_info(void __user *arg)
2444{
2445 struct hci_dev *hdev;
2446 struct hci_dev_info di;
2447 int err = 0;
2448
2449 if (copy_from_user(&di, arg, sizeof(di)))
2450 return -EFAULT;
2451
70f23020
AE
2452 hdev = hci_dev_get(di.dev_id);
2453 if (!hdev)
1da177e4
LT
2454 return -ENODEV;
2455
a8b2d5c2 2456 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2457 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2458
a8b2d5c2
JH
2459 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2460 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2461
1da177e4
LT
2462 strcpy(di.name, hdev->name);
2463 di.bdaddr = hdev->bdaddr;
60f2a3ed 2464 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2465 di.flags = hdev->flags;
2466 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2467 if (lmp_bredr_capable(hdev)) {
2468 di.acl_mtu = hdev->acl_mtu;
2469 di.acl_pkts = hdev->acl_pkts;
2470 di.sco_mtu = hdev->sco_mtu;
2471 di.sco_pkts = hdev->sco_pkts;
2472 } else {
2473 di.acl_mtu = hdev->le_mtu;
2474 di.acl_pkts = hdev->le_pkts;
2475 di.sco_mtu = 0;
2476 di.sco_pkts = 0;
2477 }
1da177e4
LT
2478 di.link_policy = hdev->link_policy;
2479 di.link_mode = hdev->link_mode;
2480
2481 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2482 memcpy(&di.features, &hdev->features, sizeof(di.features));
2483
2484 if (copy_to_user(arg, &di, sizeof(di)))
2485 err = -EFAULT;
2486
2487 hci_dev_put(hdev);
2488
2489 return err;
2490}
2491
2492/* ---- Interface to HCI drivers ---- */
2493
611b30f7
MH
2494static int hci_rfkill_set_block(void *data, bool blocked)
2495{
2496 struct hci_dev *hdev = data;
2497
2498 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2499
0736cfa8
MH
2500 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2501 return -EBUSY;
2502
5e130367
JH
2503 if (blocked) {
2504 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2505 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2506 hci_dev_do_close(hdev);
5e130367
JH
2507 } else {
2508 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2509 }
611b30f7
MH
2510
2511 return 0;
2512}
2513
2514static const struct rfkill_ops hci_rfkill_ops = {
2515 .set_block = hci_rfkill_set_block,
2516};
2517
ab81cbf9
JH
2518static void hci_power_on(struct work_struct *work)
2519{
2520 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2521 int err;
ab81cbf9
JH
2522
2523 BT_DBG("%s", hdev->name);
2524
cbed0ca1 2525 err = hci_dev_do_open(hdev);
96570ffc
JH
2526 if (err < 0) {
2527 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2528 return;
96570ffc 2529 }
ab81cbf9 2530
a5c8f270
MH
2531 /* During the HCI setup phase, a few error conditions are
2532 * ignored and they need to be checked now. If they are still
2533 * valid, it is important to turn the device back off.
2534 */
2535 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2536 (hdev->dev_type == HCI_BREDR &&
2537 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2538 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2539 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2540 hci_dev_do_close(hdev);
2541 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2542 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2543 HCI_AUTO_OFF_TIMEOUT);
bf543036 2544 }
ab81cbf9 2545
a8b2d5c2 2546 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2547 mgmt_index_added(hdev);
ab81cbf9
JH
2548}
2549
2550static void hci_power_off(struct work_struct *work)
2551{
3243553f 2552 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2553 power_off.work);
ab81cbf9
JH
2554
2555 BT_DBG("%s", hdev->name);
2556
8ee56540 2557 hci_dev_do_close(hdev);
ab81cbf9
JH
2558}
2559
16ab91ab
JH
2560static void hci_discov_off(struct work_struct *work)
2561{
2562 struct hci_dev *hdev;
16ab91ab
JH
2563
2564 hdev = container_of(work, struct hci_dev, discov_off.work);
2565
2566 BT_DBG("%s", hdev->name);
2567
d1967ff8 2568 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2569}
2570
35f7498a 2571void hci_uuids_clear(struct hci_dev *hdev)
2aeb9a1a 2572{
4821002c 2573 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2574
4821002c
JH
2575 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2576 list_del(&uuid->list);
2aeb9a1a
JH
2577 kfree(uuid);
2578 }
2aeb9a1a
JH
2579}
2580
35f7498a 2581void hci_link_keys_clear(struct hci_dev *hdev)
55ed8ca1
JH
2582{
2583 struct list_head *p, *n;
2584
2585 list_for_each_safe(p, n, &hdev->link_keys) {
2586 struct link_key *key;
2587
2588 key = list_entry(p, struct link_key, list);
2589
2590 list_del(p);
2591 kfree(key);
2592 }
55ed8ca1
JH
2593}
2594
35f7498a 2595void hci_smp_ltks_clear(struct hci_dev *hdev)
b899efaf
VCG
2596{
2597 struct smp_ltk *k, *tmp;
2598
2599 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2600 list_del(&k->list);
2601 kfree(k);
2602 }
b899efaf
VCG
2603}
2604
970c4e46
JH
2605void hci_smp_irks_clear(struct hci_dev *hdev)
2606{
2607 struct smp_irk *k, *tmp;
2608
2609 list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) {
2610 list_del(&k->list);
2611 kfree(k);
2612 }
2613}
2614
55ed8ca1
JH
2615struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2616{
8035ded4 2617 struct link_key *k;
55ed8ca1 2618
8035ded4 2619 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2620 if (bacmp(bdaddr, &k->bdaddr) == 0)
2621 return k;
55ed8ca1
JH
2622
2623 return NULL;
2624}
2625
745c0ce3 2626static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2627 u8 key_type, u8 old_key_type)
d25e28ab
JH
2628{
2629 /* Legacy key */
2630 if (key_type < 0x03)
745c0ce3 2631 return true;
d25e28ab
JH
2632
2633 /* Debug keys are insecure so don't store them persistently */
2634 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2635 return false;
d25e28ab
JH
2636
2637 /* Changed combination key and there's no previous one */
2638 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2639 return false;
d25e28ab
JH
2640
2641 /* Security mode 3 case */
2642 if (!conn)
745c0ce3 2643 return true;
d25e28ab
JH
2644
2645 /* Neither local nor remote side had no-bonding as requirement */
2646 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2647 return true;
d25e28ab
JH
2648
2649 /* Local side had dedicated bonding as requirement */
2650 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2651 return true;
d25e28ab
JH
2652
2653 /* Remote side had dedicated bonding as requirement */
2654 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2655 return true;
d25e28ab
JH
2656
2657 /* If none of the above criteria match, then don't store the key
2658 * persistently */
745c0ce3 2659 return false;
d25e28ab
JH
2660}
2661
98a0b845
JH
2662static bool ltk_type_master(u8 type)
2663{
2664 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2665 return true;
2666
2667 return false;
2668}
2669
2670struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2671 bool master)
75d262c2 2672{
c9839a11 2673 struct smp_ltk *k;
75d262c2 2674
c9839a11
VCG
2675 list_for_each_entry(k, &hdev->long_term_keys, list) {
2676 if (k->ediv != ediv ||
a8c5fb1a 2677 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2678 continue;
2679
98a0b845
JH
2680 if (ltk_type_master(k->type) != master)
2681 continue;
2682
c9839a11 2683 return k;
75d262c2
VCG
2684 }
2685
2686 return NULL;
2687}
75d262c2 2688
c9839a11 2689struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2690 u8 addr_type, bool master)
75d262c2 2691{
c9839a11 2692 struct smp_ltk *k;
75d262c2 2693
c9839a11
VCG
2694 list_for_each_entry(k, &hdev->long_term_keys, list)
2695 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2696 bacmp(bdaddr, &k->bdaddr) == 0 &&
2697 ltk_type_master(k->type) == master)
75d262c2
VCG
2698 return k;
2699
2700 return NULL;
2701}
75d262c2 2702
970c4e46
JH
2703struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa)
2704{
2705 struct smp_irk *irk;
2706
2707 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2708 if (!bacmp(&irk->rpa, rpa))
2709 return irk;
2710 }
2711
2712 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2713 if (smp_irk_matches(hdev->tfm_aes, irk->val, rpa)) {
2714 bacpy(&irk->rpa, rpa);
2715 return irk;
2716 }
2717 }
2718
2719 return NULL;
2720}
2721
2722struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
2723 u8 addr_type)
2724{
2725 struct smp_irk *irk;
2726
6cfc9988
JH
2727 /* Identity Address must be public or static random */
2728 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2729 return NULL;
2730
970c4e46
JH
2731 list_for_each_entry(irk, &hdev->identity_resolving_keys, list) {
2732 if (addr_type == irk->addr_type &&
2733 bacmp(bdaddr, &irk->bdaddr) == 0)
2734 return irk;
2735 }
2736
2737 return NULL;
2738}
2739
d25e28ab 2740int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2741 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2742{
2743 struct link_key *key, *old_key;
745c0ce3
VA
2744 u8 old_key_type;
2745 bool persistent;
55ed8ca1
JH
2746
2747 old_key = hci_find_link_key(hdev, bdaddr);
2748 if (old_key) {
2749 old_key_type = old_key->type;
2750 key = old_key;
2751 } else {
12adcf3a 2752 old_key_type = conn ? conn->key_type : 0xff;
0a14ab41 2753 key = kzalloc(sizeof(*key), GFP_KERNEL);
55ed8ca1
JH
2754 if (!key)
2755 return -ENOMEM;
2756 list_add(&key->list, &hdev->link_keys);
2757 }
2758
6ed93dc6 2759 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2760
d25e28ab
JH
2761 /* Some buggy controller combinations generate a changed
2762 * combination key for legacy pairing even when there's no
2763 * previous key */
2764 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2765 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2766 type = HCI_LK_COMBINATION;
655fe6ec
JH
2767 if (conn)
2768 conn->key_type = type;
2769 }
d25e28ab 2770
55ed8ca1 2771 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2772 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2773 key->pin_len = pin_len;
2774
b6020ba0 2775 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2776 key->type = old_key_type;
4748fed2
JH
2777 else
2778 key->type = type;
2779
4df378a1
JH
2780 if (!new_key)
2781 return 0;
2782
2783 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2784
744cf19e 2785 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2786
6ec5bcad
VA
2787 if (conn)
2788 conn->flush_key = !persistent;
55ed8ca1
JH
2789
2790 return 0;
2791}
2792
ca9142b8 2793struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr,
35d70271
JH
2794 u8 addr_type, u8 type, u8 authenticated,
2795 u8 tk[16], u8 enc_size, __le16 ediv, u8 rand[8])
75d262c2 2796{
c9839a11 2797 struct smp_ltk *key, *old_key;
98a0b845 2798 bool master = ltk_type_master(type);
75d262c2 2799
98a0b845 2800 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2801 if (old_key)
75d262c2 2802 key = old_key;
c9839a11 2803 else {
0a14ab41 2804 key = kzalloc(sizeof(*key), GFP_KERNEL);
75d262c2 2805 if (!key)
ca9142b8 2806 return NULL;
c9839a11 2807 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2808 }
2809
75d262c2 2810 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2811 key->bdaddr_type = addr_type;
2812 memcpy(key->val, tk, sizeof(key->val));
2813 key->authenticated = authenticated;
2814 key->ediv = ediv;
2815 key->enc_size = enc_size;
2816 key->type = type;
2817 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2818
ca9142b8 2819 return key;
75d262c2
VCG
2820}
2821
ca9142b8
JH
2822struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
2823 u8 addr_type, u8 val[16], bdaddr_t *rpa)
970c4e46
JH
2824{
2825 struct smp_irk *irk;
2826
2827 irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type);
2828 if (!irk) {
2829 irk = kzalloc(sizeof(*irk), GFP_KERNEL);
2830 if (!irk)
ca9142b8 2831 return NULL;
970c4e46
JH
2832
2833 bacpy(&irk->bdaddr, bdaddr);
2834 irk->addr_type = addr_type;
2835
2836 list_add(&irk->list, &hdev->identity_resolving_keys);
2837 }
2838
2839 memcpy(irk->val, val, 16);
2840 bacpy(&irk->rpa, rpa);
2841
ca9142b8 2842 return irk;
970c4e46
JH
2843}
2844
55ed8ca1
JH
2845int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2846{
2847 struct link_key *key;
2848
2849 key = hci_find_link_key(hdev, bdaddr);
2850 if (!key)
2851 return -ENOENT;
2852
6ed93dc6 2853 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2854
2855 list_del(&key->list);
2856 kfree(key);
2857
2858 return 0;
2859}
2860
e0b2b27e 2861int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type)
b899efaf
VCG
2862{
2863 struct smp_ltk *k, *tmp;
c51ffa0b 2864 int removed = 0;
b899efaf
VCG
2865
2866 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
e0b2b27e 2867 if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type)
b899efaf
VCG
2868 continue;
2869
6ed93dc6 2870 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2871
2872 list_del(&k->list);
2873 kfree(k);
c51ffa0b 2874 removed++;
b899efaf
VCG
2875 }
2876
c51ffa0b 2877 return removed ? 0 : -ENOENT;
b899efaf
VCG
2878}
2879
a7ec7338
JH
2880void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type)
2881{
2882 struct smp_irk *k, *tmp;
2883
2884 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2885 if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type)
2886 continue;
2887
2888 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2889
2890 list_del(&k->list);
2891 kfree(k);
2892 }
2893}
2894
6bd32326 2895/* HCI command timer function */
bda4f23a 2896static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2897{
2898 struct hci_dev *hdev = (void *) arg;
2899
bda4f23a
AE
2900 if (hdev->sent_cmd) {
2901 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2902 u16 opcode = __le16_to_cpu(sent->opcode);
2903
2904 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2905 } else {
2906 BT_ERR("%s command tx timeout", hdev->name);
2907 }
2908
6bd32326 2909 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2910 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2911}
2912
2763eda6 2913struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2914 bdaddr_t *bdaddr)
2763eda6
SJ
2915{
2916 struct oob_data *data;
2917
2918 list_for_each_entry(data, &hdev->remote_oob_data, list)
2919 if (bacmp(bdaddr, &data->bdaddr) == 0)
2920 return data;
2921
2922 return NULL;
2923}
2924
2925int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2926{
2927 struct oob_data *data;
2928
2929 data = hci_find_remote_oob_data(hdev, bdaddr);
2930 if (!data)
2931 return -ENOENT;
2932
6ed93dc6 2933 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2934
2935 list_del(&data->list);
2936 kfree(data);
2937
2938 return 0;
2939}
2940
35f7498a 2941void hci_remote_oob_data_clear(struct hci_dev *hdev)
2763eda6
SJ
2942{
2943 struct oob_data *data, *n;
2944
2945 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2946 list_del(&data->list);
2947 kfree(data);
2948 }
2763eda6
SJ
2949}
2950
0798872e
MH
2951int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2952 u8 *hash, u8 *randomizer)
2763eda6
SJ
2953{
2954 struct oob_data *data;
2955
2956 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 2957 if (!data) {
0a14ab41 2958 data = kmalloc(sizeof(*data), GFP_KERNEL);
2763eda6
SJ
2959 if (!data)
2960 return -ENOMEM;
2961
2962 bacpy(&data->bdaddr, bdaddr);
2963 list_add(&data->list, &hdev->remote_oob_data);
2964 }
2965
519ca9d0
MH
2966 memcpy(data->hash192, hash, sizeof(data->hash192));
2967 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 2968
0798872e
MH
2969 memset(data->hash256, 0, sizeof(data->hash256));
2970 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2971
2972 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2973
2974 return 0;
2975}
2976
2977int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2978 u8 *hash192, u8 *randomizer192,
2979 u8 *hash256, u8 *randomizer256)
2980{
2981 struct oob_data *data;
2982
2983 data = hci_find_remote_oob_data(hdev, bdaddr);
2984 if (!data) {
0a14ab41 2985 data = kmalloc(sizeof(*data), GFP_KERNEL);
0798872e
MH
2986 if (!data)
2987 return -ENOMEM;
2988
2989 bacpy(&data->bdaddr, bdaddr);
2990 list_add(&data->list, &hdev->remote_oob_data);
2991 }
2992
2993 memcpy(data->hash192, hash192, sizeof(data->hash192));
2994 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2995
2996 memcpy(data->hash256, hash256, sizeof(data->hash256));
2997 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2998
6ed93dc6 2999 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
3000
3001 return 0;
3002}
3003
b9ee0a78
MH
3004struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
3005 bdaddr_t *bdaddr, u8 type)
b2a66aad 3006{
8035ded4 3007 struct bdaddr_list *b;
b2a66aad 3008
b9ee0a78
MH
3009 list_for_each_entry(b, &hdev->blacklist, list) {
3010 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 3011 return b;
b9ee0a78 3012 }
b2a66aad
AJ
3013
3014 return NULL;
3015}
3016
35f7498a 3017void hci_blacklist_clear(struct hci_dev *hdev)
b2a66aad
AJ
3018{
3019 struct list_head *p, *n;
3020
3021 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 3022 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
3023
3024 list_del(p);
3025 kfree(b);
3026 }
b2a66aad
AJ
3027}
3028
88c1fe4b 3029int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3030{
3031 struct bdaddr_list *entry;
b2a66aad 3032
b9ee0a78 3033 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
3034 return -EBADF;
3035
b9ee0a78 3036 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 3037 return -EEXIST;
b2a66aad
AJ
3038
3039 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
3040 if (!entry)
3041 return -ENOMEM;
b2a66aad
AJ
3042
3043 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 3044 entry->bdaddr_type = type;
b2a66aad
AJ
3045
3046 list_add(&entry->list, &hdev->blacklist);
3047
88c1fe4b 3048 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
3049}
3050
88c1fe4b 3051int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
3052{
3053 struct bdaddr_list *entry;
b2a66aad 3054
35f7498a
JH
3055 if (!bacmp(bdaddr, BDADDR_ANY)) {
3056 hci_blacklist_clear(hdev);
3057 return 0;
3058 }
b2a66aad 3059
b9ee0a78 3060 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 3061 if (!entry)
5e762444 3062 return -ENOENT;
b2a66aad
AJ
3063
3064 list_del(&entry->list);
3065 kfree(entry);
3066
88c1fe4b 3067 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
3068}
3069
15819a70
AG
3070/* This function requires the caller holds hdev->lock */
3071struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
3072 bdaddr_t *addr, u8 addr_type)
3073{
3074 struct hci_conn_params *params;
3075
3076 list_for_each_entry(params, &hdev->le_conn_params, list) {
3077 if (bacmp(&params->addr, addr) == 0 &&
3078 params->addr_type == addr_type) {
3079 return params;
3080 }
3081 }
3082
3083 return NULL;
3084}
3085
3086/* This function requires the caller holds hdev->lock */
3087void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
3088 u16 conn_min_interval, u16 conn_max_interval)
3089{
3090 struct hci_conn_params *params;
3091
3092 params = hci_conn_params_lookup(hdev, addr, addr_type);
3093 if (params) {
3094 params->conn_min_interval = conn_min_interval;
3095 params->conn_max_interval = conn_max_interval;
3096 return;
3097 }
3098
3099 params = kzalloc(sizeof(*params), GFP_KERNEL);
3100 if (!params) {
3101 BT_ERR("Out of memory");
3102 return;
3103 }
3104
3105 bacpy(&params->addr, addr);
3106 params->addr_type = addr_type;
3107 params->conn_min_interval = conn_min_interval;
3108 params->conn_max_interval = conn_max_interval;
3109
3110 list_add(&params->list, &hdev->le_conn_params);
3111
3112 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
3113 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
3114 conn_max_interval);
3115}
3116
3117/* This function requires the caller holds hdev->lock */
3118void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
3119{
3120 struct hci_conn_params *params;
3121
3122 params = hci_conn_params_lookup(hdev, addr, addr_type);
3123 if (!params)
3124 return;
3125
3126 list_del(&params->list);
3127 kfree(params);
3128
3129 BT_DBG("addr %pMR (type %u)", addr, addr_type);
3130}
3131
3132/* This function requires the caller holds hdev->lock */
3133void hci_conn_params_clear(struct hci_dev *hdev)
3134{
3135 struct hci_conn_params *params, *tmp;
3136
3137 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3138 list_del(&params->list);
3139 kfree(params);
3140 }
3141
3142 BT_DBG("All LE connection parameters were removed");
3143}
3144
4c87eaab 3145static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3146{
4c87eaab
AG
3147 if (status) {
3148 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3149
4c87eaab
AG
3150 hci_dev_lock(hdev);
3151 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3152 hci_dev_unlock(hdev);
3153 return;
3154 }
7ba8b4be
AG
3155}
3156
4c87eaab 3157static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3158{
4c87eaab
AG
3159 /* General inquiry access code (GIAC) */
3160 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3161 struct hci_request req;
3162 struct hci_cp_inquiry cp;
7ba8b4be
AG
3163 int err;
3164
4c87eaab
AG
3165 if (status) {
3166 BT_ERR("Failed to disable LE scanning: status %d", status);
3167 return;
3168 }
7ba8b4be 3169
4c87eaab
AG
3170 switch (hdev->discovery.type) {
3171 case DISCOV_TYPE_LE:
3172 hci_dev_lock(hdev);
3173 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3174 hci_dev_unlock(hdev);
3175 break;
7ba8b4be 3176
4c87eaab
AG
3177 case DISCOV_TYPE_INTERLEAVED:
3178 hci_req_init(&req, hdev);
7ba8b4be 3179
4c87eaab
AG
3180 memset(&cp, 0, sizeof(cp));
3181 memcpy(&cp.lap, lap, sizeof(cp.lap));
3182 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3183 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3184
4c87eaab 3185 hci_dev_lock(hdev);
7dbfac1d 3186
4c87eaab 3187 hci_inquiry_cache_flush(hdev);
7dbfac1d 3188
4c87eaab
AG
3189 err = hci_req_run(&req, inquiry_complete);
3190 if (err) {
3191 BT_ERR("Inquiry request failed: err %d", err);
3192 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3193 }
7dbfac1d 3194
4c87eaab
AG
3195 hci_dev_unlock(hdev);
3196 break;
7dbfac1d 3197 }
7dbfac1d
AG
3198}
3199
7ba8b4be
AG
3200static void le_scan_disable_work(struct work_struct *work)
3201{
3202 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3203 le_scan_disable.work);
7ba8b4be 3204 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
3205 struct hci_request req;
3206 int err;
7ba8b4be
AG
3207
3208 BT_DBG("%s", hdev->name);
3209
4c87eaab 3210 hci_req_init(&req, hdev);
28b75a89 3211
7ba8b4be 3212 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
3213 cp.enable = LE_SCAN_DISABLE;
3214 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 3215
4c87eaab
AG
3216 err = hci_req_run(&req, le_scan_disable_work_complete);
3217 if (err)
3218 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3219}
3220
9be0dab7
DH
3221/* Alloc HCI device */
3222struct hci_dev *hci_alloc_dev(void)
3223{
3224 struct hci_dev *hdev;
3225
3226 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3227 if (!hdev)
3228 return NULL;
3229
b1b813d4
DH
3230 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3231 hdev->esco_type = (ESCO_HV1);
3232 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3233 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3234 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3235 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3236 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3237
b1b813d4
DH
3238 hdev->sniff_max_interval = 800;
3239 hdev->sniff_min_interval = 80;
3240
bef64738
MH
3241 hdev->le_scan_interval = 0x0060;
3242 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3243 hdev->le_conn_min_interval = 0x0028;
3244 hdev->le_conn_max_interval = 0x0038;
bef64738 3245
b1b813d4
DH
3246 mutex_init(&hdev->lock);
3247 mutex_init(&hdev->req_lock);
3248
3249 INIT_LIST_HEAD(&hdev->mgmt_pending);
3250 INIT_LIST_HEAD(&hdev->blacklist);
3251 INIT_LIST_HEAD(&hdev->uuids);
3252 INIT_LIST_HEAD(&hdev->link_keys);
3253 INIT_LIST_HEAD(&hdev->long_term_keys);
970c4e46 3254 INIT_LIST_HEAD(&hdev->identity_resolving_keys);
b1b813d4 3255 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3256 INIT_LIST_HEAD(&hdev->le_conn_params);
6b536b5e 3257 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3258
3259 INIT_WORK(&hdev->rx_work, hci_rx_work);
3260 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3261 INIT_WORK(&hdev->tx_work, hci_tx_work);
3262 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3263
b1b813d4
DH
3264 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3265 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3266 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3267
b1b813d4
DH
3268 skb_queue_head_init(&hdev->rx_q);
3269 skb_queue_head_init(&hdev->cmd_q);
3270 skb_queue_head_init(&hdev->raw_q);
3271
3272 init_waitqueue_head(&hdev->req_wait_q);
3273
bda4f23a 3274 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3275
b1b813d4
DH
3276 hci_init_sysfs(hdev);
3277 discovery_init(hdev);
9be0dab7
DH
3278
3279 return hdev;
3280}
3281EXPORT_SYMBOL(hci_alloc_dev);
3282
3283/* Free HCI device */
3284void hci_free_dev(struct hci_dev *hdev)
3285{
9be0dab7
DH
3286 /* will free via device release */
3287 put_device(&hdev->dev);
3288}
3289EXPORT_SYMBOL(hci_free_dev);
3290
1da177e4
LT
3291/* Register HCI device */
3292int hci_register_dev(struct hci_dev *hdev)
3293{
b1b813d4 3294 int id, error;
1da177e4 3295
010666a1 3296 if (!hdev->open || !hdev->close)
1da177e4
LT
3297 return -EINVAL;
3298
08add513
MM
3299 /* Do not allow HCI_AMP devices to register at index 0,
3300 * so the index can be used as the AMP controller ID.
3301 */
3df92b31
SL
3302 switch (hdev->dev_type) {
3303 case HCI_BREDR:
3304 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3305 break;
3306 case HCI_AMP:
3307 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3308 break;
3309 default:
3310 return -EINVAL;
1da177e4 3311 }
8e87d142 3312
3df92b31
SL
3313 if (id < 0)
3314 return id;
3315
1da177e4
LT
3316 sprintf(hdev->name, "hci%d", id);
3317 hdev->id = id;
2d8b3a11
AE
3318
3319 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3320
d8537548
KC
3321 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3322 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3323 if (!hdev->workqueue) {
3324 error = -ENOMEM;
3325 goto err;
3326 }
f48fd9c8 3327
d8537548
KC
3328 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3329 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3330 if (!hdev->req_workqueue) {
3331 destroy_workqueue(hdev->workqueue);
3332 error = -ENOMEM;
3333 goto err;
3334 }
3335
0153e2ec
MH
3336 if (!IS_ERR_OR_NULL(bt_debugfs))
3337 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3338
bdc3e0f1
MH
3339 dev_set_name(&hdev->dev, "%s", hdev->name);
3340
99780a7b
JH
3341 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3342 CRYPTO_ALG_ASYNC);
3343 if (IS_ERR(hdev->tfm_aes)) {
3344 BT_ERR("Unable to create crypto context");
3345 error = PTR_ERR(hdev->tfm_aes);
3346 hdev->tfm_aes = NULL;
3347 goto err_wqueue;
3348 }
3349
bdc3e0f1 3350 error = device_add(&hdev->dev);
33ca954d 3351 if (error < 0)
99780a7b 3352 goto err_tfm;
1da177e4 3353
611b30f7 3354 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3355 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3356 hdev);
611b30f7
MH
3357 if (hdev->rfkill) {
3358 if (rfkill_register(hdev->rfkill) < 0) {
3359 rfkill_destroy(hdev->rfkill);
3360 hdev->rfkill = NULL;
3361 }
3362 }
3363
5e130367
JH
3364 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3365 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3366
a8b2d5c2 3367 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3368 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3369
01cd3404 3370 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3371 /* Assume BR/EDR support until proven otherwise (such as
3372 * through reading supported features during init.
3373 */
3374 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3375 }
ce2be9ac 3376
fcee3377
GP
3377 write_lock(&hci_dev_list_lock);
3378 list_add(&hdev->list, &hci_dev_list);
3379 write_unlock(&hci_dev_list_lock);
3380
1da177e4 3381 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3382 hci_dev_hold(hdev);
1da177e4 3383
19202573 3384 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3385
1da177e4 3386 return id;
f48fd9c8 3387
99780a7b
JH
3388err_tfm:
3389 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3390err_wqueue:
3391 destroy_workqueue(hdev->workqueue);
6ead1bbc 3392 destroy_workqueue(hdev->req_workqueue);
33ca954d 3393err:
3df92b31 3394 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3395
33ca954d 3396 return error;
1da177e4
LT
3397}
3398EXPORT_SYMBOL(hci_register_dev);
3399
3400/* Unregister HCI device */
59735631 3401void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3402{
3df92b31 3403 int i, id;
ef222013 3404
c13854ce 3405 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3406
94324962
JH
3407 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3408
3df92b31
SL
3409 id = hdev->id;
3410
f20d09d5 3411 write_lock(&hci_dev_list_lock);
1da177e4 3412 list_del(&hdev->list);
f20d09d5 3413 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3414
3415 hci_dev_do_close(hdev);
3416
cd4c5391 3417 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3418 kfree_skb(hdev->reassembly[i]);
3419
b9b5ef18
GP
3420 cancel_work_sync(&hdev->power_on);
3421
ab81cbf9 3422 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3423 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3424 hci_dev_lock(hdev);
744cf19e 3425 mgmt_index_removed(hdev);
09fd0de5 3426 hci_dev_unlock(hdev);
56e5cb86 3427 }
ab81cbf9 3428
2e58ef3e
JH
3429 /* mgmt_index_removed should take care of emptying the
3430 * pending list */
3431 BUG_ON(!list_empty(&hdev->mgmt_pending));
3432
1da177e4
LT
3433 hci_notify(hdev, HCI_DEV_UNREG);
3434
611b30f7
MH
3435 if (hdev->rfkill) {
3436 rfkill_unregister(hdev->rfkill);
3437 rfkill_destroy(hdev->rfkill);
3438 }
3439
99780a7b
JH
3440 if (hdev->tfm_aes)
3441 crypto_free_blkcipher(hdev->tfm_aes);
3442
bdc3e0f1 3443 device_del(&hdev->dev);
147e2d59 3444
0153e2ec
MH
3445 debugfs_remove_recursive(hdev->debugfs);
3446
f48fd9c8 3447 destroy_workqueue(hdev->workqueue);
6ead1bbc 3448 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3449
09fd0de5 3450 hci_dev_lock(hdev);
e2e0cacb 3451 hci_blacklist_clear(hdev);
2aeb9a1a 3452 hci_uuids_clear(hdev);
55ed8ca1 3453 hci_link_keys_clear(hdev);
b899efaf 3454 hci_smp_ltks_clear(hdev);
970c4e46 3455 hci_smp_irks_clear(hdev);
2763eda6 3456 hci_remote_oob_data_clear(hdev);
15819a70 3457 hci_conn_params_clear(hdev);
09fd0de5 3458 hci_dev_unlock(hdev);
e2e0cacb 3459
dc946bd8 3460 hci_dev_put(hdev);
3df92b31
SL
3461
3462 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3463}
3464EXPORT_SYMBOL(hci_unregister_dev);
3465
3466/* Suspend HCI device */
3467int hci_suspend_dev(struct hci_dev *hdev)
3468{
3469 hci_notify(hdev, HCI_DEV_SUSPEND);
3470 return 0;
3471}
3472EXPORT_SYMBOL(hci_suspend_dev);
3473
3474/* Resume HCI device */
3475int hci_resume_dev(struct hci_dev *hdev)
3476{
3477 hci_notify(hdev, HCI_DEV_RESUME);
3478 return 0;
3479}
3480EXPORT_SYMBOL(hci_resume_dev);
3481
76bca880 3482/* Receive frame from HCI drivers */
e1a26170 3483int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3484{
76bca880 3485 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3486 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3487 kfree_skb(skb);
3488 return -ENXIO;
3489 }
3490
d82603c6 3491 /* Incoming skb */
76bca880
MH
3492 bt_cb(skb)->incoming = 1;
3493
3494 /* Time stamp */
3495 __net_timestamp(skb);
3496
76bca880 3497 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3498 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3499
76bca880
MH
3500 return 0;
3501}
3502EXPORT_SYMBOL(hci_recv_frame);
3503
33e882a5 3504static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3505 int count, __u8 index)
33e882a5
SS
3506{
3507 int len = 0;
3508 int hlen = 0;
3509 int remain = count;
3510 struct sk_buff *skb;
3511 struct bt_skb_cb *scb;
3512
3513 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3514 index >= NUM_REASSEMBLY)
33e882a5
SS
3515 return -EILSEQ;
3516
3517 skb = hdev->reassembly[index];
3518
3519 if (!skb) {
3520 switch (type) {
3521 case HCI_ACLDATA_PKT:
3522 len = HCI_MAX_FRAME_SIZE;
3523 hlen = HCI_ACL_HDR_SIZE;
3524 break;
3525 case HCI_EVENT_PKT:
3526 len = HCI_MAX_EVENT_SIZE;
3527 hlen = HCI_EVENT_HDR_SIZE;
3528 break;
3529 case HCI_SCODATA_PKT:
3530 len = HCI_MAX_SCO_SIZE;
3531 hlen = HCI_SCO_HDR_SIZE;
3532 break;
3533 }
3534
1e429f38 3535 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3536 if (!skb)
3537 return -ENOMEM;
3538
3539 scb = (void *) skb->cb;
3540 scb->expect = hlen;
3541 scb->pkt_type = type;
3542
33e882a5
SS
3543 hdev->reassembly[index] = skb;
3544 }
3545
3546 while (count) {
3547 scb = (void *) skb->cb;
89bb46d0 3548 len = min_t(uint, scb->expect, count);
33e882a5
SS
3549
3550 memcpy(skb_put(skb, len), data, len);
3551
3552 count -= len;
3553 data += len;
3554 scb->expect -= len;
3555 remain = count;
3556
3557 switch (type) {
3558 case HCI_EVENT_PKT:
3559 if (skb->len == HCI_EVENT_HDR_SIZE) {
3560 struct hci_event_hdr *h = hci_event_hdr(skb);
3561 scb->expect = h->plen;
3562
3563 if (skb_tailroom(skb) < scb->expect) {
3564 kfree_skb(skb);
3565 hdev->reassembly[index] = NULL;
3566 return -ENOMEM;
3567 }
3568 }
3569 break;
3570
3571 case HCI_ACLDATA_PKT:
3572 if (skb->len == HCI_ACL_HDR_SIZE) {
3573 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3574 scb->expect = __le16_to_cpu(h->dlen);
3575
3576 if (skb_tailroom(skb) < scb->expect) {
3577 kfree_skb(skb);
3578 hdev->reassembly[index] = NULL;
3579 return -ENOMEM;
3580 }
3581 }
3582 break;
3583
3584 case HCI_SCODATA_PKT:
3585 if (skb->len == HCI_SCO_HDR_SIZE) {
3586 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3587 scb->expect = h->dlen;
3588
3589 if (skb_tailroom(skb) < scb->expect) {
3590 kfree_skb(skb);
3591 hdev->reassembly[index] = NULL;
3592 return -ENOMEM;
3593 }
3594 }
3595 break;
3596 }
3597
3598 if (scb->expect == 0) {
3599 /* Complete frame */
3600
3601 bt_cb(skb)->pkt_type = type;
e1a26170 3602 hci_recv_frame(hdev, skb);
33e882a5
SS
3603
3604 hdev->reassembly[index] = NULL;
3605 return remain;
3606 }
3607 }
3608
3609 return remain;
3610}
3611
ef222013
MH
3612int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3613{
f39a3c06
SS
3614 int rem = 0;
3615
ef222013
MH
3616 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3617 return -EILSEQ;
3618
da5f6c37 3619 while (count) {
1e429f38 3620 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3621 if (rem < 0)
3622 return rem;
ef222013 3623
f39a3c06
SS
3624 data += (count - rem);
3625 count = rem;
f81c6224 3626 }
ef222013 3627
f39a3c06 3628 return rem;
ef222013
MH
3629}
3630EXPORT_SYMBOL(hci_recv_fragment);
3631
99811510
SS
3632#define STREAM_REASSEMBLY 0
3633
3634int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3635{
3636 int type;
3637 int rem = 0;
3638
da5f6c37 3639 while (count) {
99811510
SS
3640 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3641
3642 if (!skb) {
3643 struct { char type; } *pkt;
3644
3645 /* Start of the frame */
3646 pkt = data;
3647 type = pkt->type;
3648
3649 data++;
3650 count--;
3651 } else
3652 type = bt_cb(skb)->pkt_type;
3653
1e429f38 3654 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3655 STREAM_REASSEMBLY);
99811510
SS
3656 if (rem < 0)
3657 return rem;
3658
3659 data += (count - rem);
3660 count = rem;
f81c6224 3661 }
99811510
SS
3662
3663 return rem;
3664}
3665EXPORT_SYMBOL(hci_recv_stream_fragment);
3666
1da177e4
LT
3667/* ---- Interface to upper protocols ---- */
3668
1da177e4
LT
3669int hci_register_cb(struct hci_cb *cb)
3670{
3671 BT_DBG("%p name %s", cb, cb->name);
3672
f20d09d5 3673 write_lock(&hci_cb_list_lock);
1da177e4 3674 list_add(&cb->list, &hci_cb_list);
f20d09d5 3675 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3676
3677 return 0;
3678}
3679EXPORT_SYMBOL(hci_register_cb);
3680
3681int hci_unregister_cb(struct hci_cb *cb)
3682{
3683 BT_DBG("%p name %s", cb, cb->name);
3684
f20d09d5 3685 write_lock(&hci_cb_list_lock);
1da177e4 3686 list_del(&cb->list);
f20d09d5 3687 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3688
3689 return 0;
3690}
3691EXPORT_SYMBOL(hci_unregister_cb);
3692
51086991 3693static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3694{
0d48d939 3695 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3696
cd82e61c
MH
3697 /* Time stamp */
3698 __net_timestamp(skb);
1da177e4 3699
cd82e61c
MH
3700 /* Send copy to monitor */
3701 hci_send_to_monitor(hdev, skb);
3702
3703 if (atomic_read(&hdev->promisc)) {
3704 /* Send copy to the sockets */
470fe1b5 3705 hci_send_to_sock(hdev, skb);
1da177e4
LT
3706 }
3707
3708 /* Get rid of skb owner, prior to sending to the driver. */
3709 skb_orphan(skb);
3710
7bd8f09f 3711 if (hdev->send(hdev, skb) < 0)
51086991 3712 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3713}
3714
3119ae95
JH
3715void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3716{
3717 skb_queue_head_init(&req->cmd_q);
3718 req->hdev = hdev;
5d73e034 3719 req->err = 0;
3119ae95
JH
3720}
3721
3722int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3723{
3724 struct hci_dev *hdev = req->hdev;
3725 struct sk_buff *skb;
3726 unsigned long flags;
3727
3728 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3729
5d73e034
AG
3730 /* If an error occured during request building, remove all HCI
3731 * commands queued on the HCI request queue.
3732 */
3733 if (req->err) {
3734 skb_queue_purge(&req->cmd_q);
3735 return req->err;
3736 }
3737
3119ae95
JH
3738 /* Do not allow empty requests */
3739 if (skb_queue_empty(&req->cmd_q))
382b0c39 3740 return -ENODATA;
3119ae95
JH
3741
3742 skb = skb_peek_tail(&req->cmd_q);
3743 bt_cb(skb)->req.complete = complete;
3744
3745 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3746 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3747 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3748
3749 queue_work(hdev->workqueue, &hdev->cmd_work);
3750
3751 return 0;
3752}
3753
1ca3a9d0 3754static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3755 u32 plen, const void *param)
1da177e4
LT
3756{
3757 int len = HCI_COMMAND_HDR_SIZE + plen;
3758 struct hci_command_hdr *hdr;
3759 struct sk_buff *skb;
3760
1da177e4 3761 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3762 if (!skb)
3763 return NULL;
1da177e4
LT
3764
3765 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3766 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3767 hdr->plen = plen;
3768
3769 if (plen)
3770 memcpy(skb_put(skb, plen), param, plen);
3771
3772 BT_DBG("skb len %d", skb->len);
3773
0d48d939 3774 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3775
1ca3a9d0
JH
3776 return skb;
3777}
3778
3779/* Send HCI command */
07dc93dd
JH
3780int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3781 const void *param)
1ca3a9d0
JH
3782{
3783 struct sk_buff *skb;
3784
3785 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3786
3787 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3788 if (!skb) {
3789 BT_ERR("%s no memory for command", hdev->name);
3790 return -ENOMEM;
3791 }
3792
11714b3d
JH
3793 /* Stand-alone HCI commands must be flaged as
3794 * single-command requests.
3795 */
3796 bt_cb(skb)->req.start = true;
3797
1da177e4 3798 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3799 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3800
3801 return 0;
3802}
1da177e4 3803
71c76a17 3804/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3805void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3806 const void *param, u8 event)
71c76a17
JH
3807{
3808 struct hci_dev *hdev = req->hdev;
3809 struct sk_buff *skb;
3810
3811 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3812
34739c1e
AG
3813 /* If an error occured during request building, there is no point in
3814 * queueing the HCI command. We can simply return.
3815 */
3816 if (req->err)
3817 return;
3818
71c76a17
JH
3819 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3820 if (!skb) {
5d73e034
AG
3821 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3822 hdev->name, opcode);
3823 req->err = -ENOMEM;
e348fe6b 3824 return;
71c76a17
JH
3825 }
3826
3827 if (skb_queue_empty(&req->cmd_q))
3828 bt_cb(skb)->req.start = true;
3829
02350a72
JH
3830 bt_cb(skb)->req.event = event;
3831
71c76a17 3832 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3833}
3834
07dc93dd
JH
3835void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3836 const void *param)
02350a72
JH
3837{
3838 hci_req_add_ev(req, opcode, plen, param, 0);
3839}
3840
1da177e4 3841/* Get data from the previously sent command */
a9de9248 3842void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3843{
3844 struct hci_command_hdr *hdr;
3845
3846 if (!hdev->sent_cmd)
3847 return NULL;
3848
3849 hdr = (void *) hdev->sent_cmd->data;
3850
a9de9248 3851 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3852 return NULL;
3853
f0e09510 3854 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3855
3856 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3857}
3858
3859/* Send ACL data */
3860static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3861{
3862 struct hci_acl_hdr *hdr;
3863 int len = skb->len;
3864
badff6d0
ACM
3865 skb_push(skb, HCI_ACL_HDR_SIZE);
3866 skb_reset_transport_header(skb);
9c70220b 3867 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3868 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3869 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3870}
3871
ee22be7e 3872static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3873 struct sk_buff *skb, __u16 flags)
1da177e4 3874{
ee22be7e 3875 struct hci_conn *conn = chan->conn;
1da177e4
LT
3876 struct hci_dev *hdev = conn->hdev;
3877 struct sk_buff *list;
3878
087bfd99
GP
3879 skb->len = skb_headlen(skb);
3880 skb->data_len = 0;
3881
3882 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3883
3884 switch (hdev->dev_type) {
3885 case HCI_BREDR:
3886 hci_add_acl_hdr(skb, conn->handle, flags);
3887 break;
3888 case HCI_AMP:
3889 hci_add_acl_hdr(skb, chan->handle, flags);
3890 break;
3891 default:
3892 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3893 return;
3894 }
087bfd99 3895
70f23020
AE
3896 list = skb_shinfo(skb)->frag_list;
3897 if (!list) {
1da177e4
LT
3898 /* Non fragmented */
3899 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3900
73d80deb 3901 skb_queue_tail(queue, skb);
1da177e4
LT
3902 } else {
3903 /* Fragmented */
3904 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3905
3906 skb_shinfo(skb)->frag_list = NULL;
3907
3908 /* Queue all fragments atomically */
af3e6359 3909 spin_lock(&queue->lock);
1da177e4 3910
73d80deb 3911 __skb_queue_tail(queue, skb);
e702112f
AE
3912
3913 flags &= ~ACL_START;
3914 flags |= ACL_CONT;
1da177e4
LT
3915 do {
3916 skb = list; list = list->next;
8e87d142 3917
0d48d939 3918 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3919 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3920
3921 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3922
73d80deb 3923 __skb_queue_tail(queue, skb);
1da177e4
LT
3924 } while (list);
3925
af3e6359 3926 spin_unlock(&queue->lock);
1da177e4 3927 }
73d80deb
LAD
3928}
3929
3930void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3931{
ee22be7e 3932 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3933
f0e09510 3934 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3935
ee22be7e 3936 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3937
3eff45ea 3938 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3939}
1da177e4
LT
3940
3941/* Send SCO data */
0d861d8b 3942void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3943{
3944 struct hci_dev *hdev = conn->hdev;
3945 struct hci_sco_hdr hdr;
3946
3947 BT_DBG("%s len %d", hdev->name, skb->len);
3948
aca3192c 3949 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3950 hdr.dlen = skb->len;
3951
badff6d0
ACM
3952 skb_push(skb, HCI_SCO_HDR_SIZE);
3953 skb_reset_transport_header(skb);
9c70220b 3954 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3955
0d48d939 3956 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3957
1da177e4 3958 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3959 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3960}
1da177e4
LT
3961
3962/* ---- HCI TX task (outgoing data) ---- */
3963
3964/* HCI Connection scheduler */
6039aa73
GP
3965static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3966 int *quote)
1da177e4
LT
3967{
3968 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3969 struct hci_conn *conn = NULL, *c;
abc5de8f 3970 unsigned int num = 0, min = ~0;
1da177e4 3971
8e87d142 3972 /* We don't have to lock device here. Connections are always
1da177e4 3973 * added and removed with TX task disabled. */
bf4c6325
GP
3974
3975 rcu_read_lock();
3976
3977 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3978 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3979 continue;
769be974
MH
3980
3981 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3982 continue;
3983
1da177e4
LT
3984 num++;
3985
3986 if (c->sent < min) {
3987 min = c->sent;
3988 conn = c;
3989 }
52087a79
LAD
3990
3991 if (hci_conn_num(hdev, type) == num)
3992 break;
1da177e4
LT
3993 }
3994
bf4c6325
GP
3995 rcu_read_unlock();
3996
1da177e4 3997 if (conn) {
6ed58ec5
VT
3998 int cnt, q;
3999
4000 switch (conn->type) {
4001 case ACL_LINK:
4002 cnt = hdev->acl_cnt;
4003 break;
4004 case SCO_LINK:
4005 case ESCO_LINK:
4006 cnt = hdev->sco_cnt;
4007 break;
4008 case LE_LINK:
4009 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4010 break;
4011 default:
4012 cnt = 0;
4013 BT_ERR("Unknown link type");
4014 }
4015
4016 q = cnt / num;
1da177e4
LT
4017 *quote = q ? q : 1;
4018 } else
4019 *quote = 0;
4020
4021 BT_DBG("conn %p quote %d", conn, *quote);
4022 return conn;
4023}
4024
6039aa73 4025static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
4026{
4027 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 4028 struct hci_conn *c;
1da177e4 4029
bae1f5d9 4030 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 4031
bf4c6325
GP
4032 rcu_read_lock();
4033
1da177e4 4034 /* Kill stalled connections */
bf4c6325 4035 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 4036 if (c->type == type && c->sent) {
6ed93dc6
AE
4037 BT_ERR("%s killing stalled connection %pMR",
4038 hdev->name, &c->dst);
bed71748 4039 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
4040 }
4041 }
bf4c6325
GP
4042
4043 rcu_read_unlock();
1da177e4
LT
4044}
4045
6039aa73
GP
4046static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
4047 int *quote)
1da177e4 4048{
73d80deb
LAD
4049 struct hci_conn_hash *h = &hdev->conn_hash;
4050 struct hci_chan *chan = NULL;
abc5de8f 4051 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 4052 struct hci_conn *conn;
73d80deb
LAD
4053 int cnt, q, conn_num = 0;
4054
4055 BT_DBG("%s", hdev->name);
4056
bf4c6325
GP
4057 rcu_read_lock();
4058
4059 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
4060 struct hci_chan *tmp;
4061
4062 if (conn->type != type)
4063 continue;
4064
4065 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4066 continue;
4067
4068 conn_num++;
4069
8192edef 4070 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
4071 struct sk_buff *skb;
4072
4073 if (skb_queue_empty(&tmp->data_q))
4074 continue;
4075
4076 skb = skb_peek(&tmp->data_q);
4077 if (skb->priority < cur_prio)
4078 continue;
4079
4080 if (skb->priority > cur_prio) {
4081 num = 0;
4082 min = ~0;
4083 cur_prio = skb->priority;
4084 }
4085
4086 num++;
4087
4088 if (conn->sent < min) {
4089 min = conn->sent;
4090 chan = tmp;
4091 }
4092 }
4093
4094 if (hci_conn_num(hdev, type) == conn_num)
4095 break;
4096 }
4097
bf4c6325
GP
4098 rcu_read_unlock();
4099
73d80deb
LAD
4100 if (!chan)
4101 return NULL;
4102
4103 switch (chan->conn->type) {
4104 case ACL_LINK:
4105 cnt = hdev->acl_cnt;
4106 break;
bd1eb66b
AE
4107 case AMP_LINK:
4108 cnt = hdev->block_cnt;
4109 break;
73d80deb
LAD
4110 case SCO_LINK:
4111 case ESCO_LINK:
4112 cnt = hdev->sco_cnt;
4113 break;
4114 case LE_LINK:
4115 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
4116 break;
4117 default:
4118 cnt = 0;
4119 BT_ERR("Unknown link type");
4120 }
4121
4122 q = cnt / num;
4123 *quote = q ? q : 1;
4124 BT_DBG("chan %p quote %d", chan, *quote);
4125 return chan;
4126}
4127
02b20f0b
LAD
4128static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
4129{
4130 struct hci_conn_hash *h = &hdev->conn_hash;
4131 struct hci_conn *conn;
4132 int num = 0;
4133
4134 BT_DBG("%s", hdev->name);
4135
bf4c6325
GP
4136 rcu_read_lock();
4137
4138 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4139 struct hci_chan *chan;
4140
4141 if (conn->type != type)
4142 continue;
4143
4144 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4145 continue;
4146
4147 num++;
4148
8192edef 4149 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4150 struct sk_buff *skb;
4151
4152 if (chan->sent) {
4153 chan->sent = 0;
4154 continue;
4155 }
4156
4157 if (skb_queue_empty(&chan->data_q))
4158 continue;
4159
4160 skb = skb_peek(&chan->data_q);
4161 if (skb->priority >= HCI_PRIO_MAX - 1)
4162 continue;
4163
4164 skb->priority = HCI_PRIO_MAX - 1;
4165
4166 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4167 skb->priority);
02b20f0b
LAD
4168 }
4169
4170 if (hci_conn_num(hdev, type) == num)
4171 break;
4172 }
bf4c6325
GP
4173
4174 rcu_read_unlock();
4175
02b20f0b
LAD
4176}
4177
b71d385a
AE
4178static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4179{
4180 /* Calculate count of blocks used by this packet */
4181 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4182}
4183
6039aa73 4184static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4185{
1da177e4
LT
4186 if (!test_bit(HCI_RAW, &hdev->flags)) {
4187 /* ACL tx timeout must be longer than maximum
4188 * link supervision timeout (40.9 seconds) */
63d2bc1b 4189 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4190 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4191 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4192 }
63d2bc1b 4193}
1da177e4 4194
6039aa73 4195static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4196{
4197 unsigned int cnt = hdev->acl_cnt;
4198 struct hci_chan *chan;
4199 struct sk_buff *skb;
4200 int quote;
4201
4202 __check_timeout(hdev, cnt);
04837f64 4203
73d80deb 4204 while (hdev->acl_cnt &&
a8c5fb1a 4205 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4206 u32 priority = (skb_peek(&chan->data_q))->priority;
4207 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4208 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4209 skb->len, skb->priority);
73d80deb 4210
ec1cce24
LAD
4211 /* Stop if priority has changed */
4212 if (skb->priority < priority)
4213 break;
4214
4215 skb = skb_dequeue(&chan->data_q);
4216
73d80deb 4217 hci_conn_enter_active_mode(chan->conn,
04124681 4218 bt_cb(skb)->force_active);
04837f64 4219
57d17d70 4220 hci_send_frame(hdev, skb);
1da177e4
LT
4221 hdev->acl_last_tx = jiffies;
4222
4223 hdev->acl_cnt--;
73d80deb
LAD
4224 chan->sent++;
4225 chan->conn->sent++;
1da177e4
LT
4226 }
4227 }
02b20f0b
LAD
4228
4229 if (cnt != hdev->acl_cnt)
4230 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4231}
4232
6039aa73 4233static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4234{
63d2bc1b 4235 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4236 struct hci_chan *chan;
4237 struct sk_buff *skb;
4238 int quote;
bd1eb66b 4239 u8 type;
b71d385a 4240
63d2bc1b 4241 __check_timeout(hdev, cnt);
b71d385a 4242
bd1eb66b
AE
4243 BT_DBG("%s", hdev->name);
4244
4245 if (hdev->dev_type == HCI_AMP)
4246 type = AMP_LINK;
4247 else
4248 type = ACL_LINK;
4249
b71d385a 4250 while (hdev->block_cnt > 0 &&
bd1eb66b 4251 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4252 u32 priority = (skb_peek(&chan->data_q))->priority;
4253 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4254 int blocks;
4255
4256 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4257 skb->len, skb->priority);
b71d385a
AE
4258
4259 /* Stop if priority has changed */
4260 if (skb->priority < priority)
4261 break;
4262
4263 skb = skb_dequeue(&chan->data_q);
4264
4265 blocks = __get_blocks(hdev, skb);
4266 if (blocks > hdev->block_cnt)
4267 return;
4268
4269 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4270 bt_cb(skb)->force_active);
b71d385a 4271
57d17d70 4272 hci_send_frame(hdev, skb);
b71d385a
AE
4273 hdev->acl_last_tx = jiffies;
4274
4275 hdev->block_cnt -= blocks;
4276 quote -= blocks;
4277
4278 chan->sent += blocks;
4279 chan->conn->sent += blocks;
4280 }
4281 }
4282
4283 if (cnt != hdev->block_cnt)
bd1eb66b 4284 hci_prio_recalculate(hdev, type);
b71d385a
AE
4285}
4286
6039aa73 4287static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4288{
4289 BT_DBG("%s", hdev->name);
4290
bd1eb66b
AE
4291 /* No ACL link over BR/EDR controller */
4292 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4293 return;
4294
4295 /* No AMP link over AMP controller */
4296 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4297 return;
4298
4299 switch (hdev->flow_ctl_mode) {
4300 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4301 hci_sched_acl_pkt(hdev);
4302 break;
4303
4304 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4305 hci_sched_acl_blk(hdev);
4306 break;
4307 }
4308}
4309
1da177e4 4310/* Schedule SCO */
6039aa73 4311static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4312{
4313 struct hci_conn *conn;
4314 struct sk_buff *skb;
4315 int quote;
4316
4317 BT_DBG("%s", hdev->name);
4318
52087a79
LAD
4319 if (!hci_conn_num(hdev, SCO_LINK))
4320 return;
4321
1da177e4
LT
4322 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4323 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4324 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4325 hci_send_frame(hdev, skb);
1da177e4
LT
4326
4327 conn->sent++;
4328 if (conn->sent == ~0)
4329 conn->sent = 0;
4330 }
4331 }
4332}
4333
6039aa73 4334static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4335{
4336 struct hci_conn *conn;
4337 struct sk_buff *skb;
4338 int quote;
4339
4340 BT_DBG("%s", hdev->name);
4341
52087a79
LAD
4342 if (!hci_conn_num(hdev, ESCO_LINK))
4343 return;
4344
8fc9ced3
GP
4345 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4346 &quote))) {
b6a0dc82
MH
4347 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4348 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4349 hci_send_frame(hdev, skb);
b6a0dc82
MH
4350
4351 conn->sent++;
4352 if (conn->sent == ~0)
4353 conn->sent = 0;
4354 }
4355 }
4356}
4357
6039aa73 4358static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4359{
73d80deb 4360 struct hci_chan *chan;
6ed58ec5 4361 struct sk_buff *skb;
02b20f0b 4362 int quote, cnt, tmp;
6ed58ec5
VT
4363
4364 BT_DBG("%s", hdev->name);
4365
52087a79
LAD
4366 if (!hci_conn_num(hdev, LE_LINK))
4367 return;
4368
6ed58ec5
VT
4369 if (!test_bit(HCI_RAW, &hdev->flags)) {
4370 /* LE tx timeout must be longer than maximum
4371 * link supervision timeout (40.9 seconds) */
bae1f5d9 4372 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4373 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4374 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4375 }
4376
4377 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4378 tmp = cnt;
73d80deb 4379 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4380 u32 priority = (skb_peek(&chan->data_q))->priority;
4381 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4382 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4383 skb->len, skb->priority);
6ed58ec5 4384
ec1cce24
LAD
4385 /* Stop if priority has changed */
4386 if (skb->priority < priority)
4387 break;
4388
4389 skb = skb_dequeue(&chan->data_q);
4390
57d17d70 4391 hci_send_frame(hdev, skb);
6ed58ec5
VT
4392 hdev->le_last_tx = jiffies;
4393
4394 cnt--;
73d80deb
LAD
4395 chan->sent++;
4396 chan->conn->sent++;
6ed58ec5
VT
4397 }
4398 }
73d80deb 4399
6ed58ec5
VT
4400 if (hdev->le_pkts)
4401 hdev->le_cnt = cnt;
4402 else
4403 hdev->acl_cnt = cnt;
02b20f0b
LAD
4404
4405 if (cnt != tmp)
4406 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4407}
4408
3eff45ea 4409static void hci_tx_work(struct work_struct *work)
1da177e4 4410{
3eff45ea 4411 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4412 struct sk_buff *skb;
4413
6ed58ec5 4414 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4415 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4416
52de599e
MH
4417 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4418 /* Schedule queues and send stuff to HCI driver */
4419 hci_sched_acl(hdev);
4420 hci_sched_sco(hdev);
4421 hci_sched_esco(hdev);
4422 hci_sched_le(hdev);
4423 }
6ed58ec5 4424
1da177e4
LT
4425 /* Send next queued raw (unknown type) packet */
4426 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4427 hci_send_frame(hdev, skb);
1da177e4
LT
4428}
4429
25985edc 4430/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4431
4432/* ACL data packet */
6039aa73 4433static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4434{
4435 struct hci_acl_hdr *hdr = (void *) skb->data;
4436 struct hci_conn *conn;
4437 __u16 handle, flags;
4438
4439 skb_pull(skb, HCI_ACL_HDR_SIZE);
4440
4441 handle = __le16_to_cpu(hdr->handle);
4442 flags = hci_flags(handle);
4443 handle = hci_handle(handle);
4444
f0e09510 4445 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4446 handle, flags);
1da177e4
LT
4447
4448 hdev->stat.acl_rx++;
4449
4450 hci_dev_lock(hdev);
4451 conn = hci_conn_hash_lookup_handle(hdev, handle);
4452 hci_dev_unlock(hdev);
8e87d142 4453
1da177e4 4454 if (conn) {
65983fc7 4455 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4456
1da177e4 4457 /* Send to upper protocol */
686ebf28
UF
4458 l2cap_recv_acldata(conn, skb, flags);
4459 return;
1da177e4 4460 } else {
8e87d142 4461 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4462 hdev->name, handle);
1da177e4
LT
4463 }
4464
4465 kfree_skb(skb);
4466}
4467
4468/* SCO data packet */
6039aa73 4469static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4470{
4471 struct hci_sco_hdr *hdr = (void *) skb->data;
4472 struct hci_conn *conn;
4473 __u16 handle;
4474
4475 skb_pull(skb, HCI_SCO_HDR_SIZE);
4476
4477 handle = __le16_to_cpu(hdr->handle);
4478
f0e09510 4479 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4480
4481 hdev->stat.sco_rx++;
4482
4483 hci_dev_lock(hdev);
4484 conn = hci_conn_hash_lookup_handle(hdev, handle);
4485 hci_dev_unlock(hdev);
4486
4487 if (conn) {
1da177e4 4488 /* Send to upper protocol */
686ebf28
UF
4489 sco_recv_scodata(conn, skb);
4490 return;
1da177e4 4491 } else {
8e87d142 4492 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4493 hdev->name, handle);
1da177e4
LT
4494 }
4495
4496 kfree_skb(skb);
4497}
4498
9238f36a
JH
4499static bool hci_req_is_complete(struct hci_dev *hdev)
4500{
4501 struct sk_buff *skb;
4502
4503 skb = skb_peek(&hdev->cmd_q);
4504 if (!skb)
4505 return true;
4506
4507 return bt_cb(skb)->req.start;
4508}
4509
42c6b129
JH
4510static void hci_resend_last(struct hci_dev *hdev)
4511{
4512 struct hci_command_hdr *sent;
4513 struct sk_buff *skb;
4514 u16 opcode;
4515
4516 if (!hdev->sent_cmd)
4517 return;
4518
4519 sent = (void *) hdev->sent_cmd->data;
4520 opcode = __le16_to_cpu(sent->opcode);
4521 if (opcode == HCI_OP_RESET)
4522 return;
4523
4524 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4525 if (!skb)
4526 return;
4527
4528 skb_queue_head(&hdev->cmd_q, skb);
4529 queue_work(hdev->workqueue, &hdev->cmd_work);
4530}
4531
9238f36a
JH
4532void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4533{
4534 hci_req_complete_t req_complete = NULL;
4535 struct sk_buff *skb;
4536 unsigned long flags;
4537
4538 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4539
42c6b129
JH
4540 /* If the completed command doesn't match the last one that was
4541 * sent we need to do special handling of it.
9238f36a 4542 */
42c6b129
JH
4543 if (!hci_sent_cmd_data(hdev, opcode)) {
4544 /* Some CSR based controllers generate a spontaneous
4545 * reset complete event during init and any pending
4546 * command will never be completed. In such a case we
4547 * need to resend whatever was the last sent
4548 * command.
4549 */
4550 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4551 hci_resend_last(hdev);
4552
9238f36a 4553 return;
42c6b129 4554 }
9238f36a
JH
4555
4556 /* If the command succeeded and there's still more commands in
4557 * this request the request is not yet complete.
4558 */
4559 if (!status && !hci_req_is_complete(hdev))
4560 return;
4561
4562 /* If this was the last command in a request the complete
4563 * callback would be found in hdev->sent_cmd instead of the
4564 * command queue (hdev->cmd_q).
4565 */
4566 if (hdev->sent_cmd) {
4567 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4568
4569 if (req_complete) {
4570 /* We must set the complete callback to NULL to
4571 * avoid calling the callback more than once if
4572 * this function gets called again.
4573 */
4574 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4575
9238f36a 4576 goto call_complete;
53e21fbc 4577 }
9238f36a
JH
4578 }
4579
4580 /* Remove all pending commands belonging to this request */
4581 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4582 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4583 if (bt_cb(skb)->req.start) {
4584 __skb_queue_head(&hdev->cmd_q, skb);
4585 break;
4586 }
4587
4588 req_complete = bt_cb(skb)->req.complete;
4589 kfree_skb(skb);
4590 }
4591 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4592
4593call_complete:
4594 if (req_complete)
4595 req_complete(hdev, status);
4596}
4597
b78752cc 4598static void hci_rx_work(struct work_struct *work)
1da177e4 4599{
b78752cc 4600 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4601 struct sk_buff *skb;
4602
4603 BT_DBG("%s", hdev->name);
4604
1da177e4 4605 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4606 /* Send copy to monitor */
4607 hci_send_to_monitor(hdev, skb);
4608
1da177e4
LT
4609 if (atomic_read(&hdev->promisc)) {
4610 /* Send copy to the sockets */
470fe1b5 4611 hci_send_to_sock(hdev, skb);
1da177e4
LT
4612 }
4613
0736cfa8
MH
4614 if (test_bit(HCI_RAW, &hdev->flags) ||
4615 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4616 kfree_skb(skb);
4617 continue;
4618 }
4619
4620 if (test_bit(HCI_INIT, &hdev->flags)) {
4621 /* Don't process data packets in this states. */
0d48d939 4622 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4623 case HCI_ACLDATA_PKT:
4624 case HCI_SCODATA_PKT:
4625 kfree_skb(skb);
4626 continue;
3ff50b79 4627 }
1da177e4
LT
4628 }
4629
4630 /* Process frame */
0d48d939 4631 switch (bt_cb(skb)->pkt_type) {
1da177e4 4632 case HCI_EVENT_PKT:
b78752cc 4633 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4634 hci_event_packet(hdev, skb);
4635 break;
4636
4637 case HCI_ACLDATA_PKT:
4638 BT_DBG("%s ACL data packet", hdev->name);
4639 hci_acldata_packet(hdev, skb);
4640 break;
4641
4642 case HCI_SCODATA_PKT:
4643 BT_DBG("%s SCO data packet", hdev->name);
4644 hci_scodata_packet(hdev, skb);
4645 break;
4646
4647 default:
4648 kfree_skb(skb);
4649 break;
4650 }
4651 }
1da177e4
LT
4652}
4653
c347b765 4654static void hci_cmd_work(struct work_struct *work)
1da177e4 4655{
c347b765 4656 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4657 struct sk_buff *skb;
4658
2104786b
AE
4659 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4660 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4661
1da177e4 4662 /* Send queued commands */
5a08ecce
AE
4663 if (atomic_read(&hdev->cmd_cnt)) {
4664 skb = skb_dequeue(&hdev->cmd_q);
4665 if (!skb)
4666 return;
4667
7585b97a 4668 kfree_skb(hdev->sent_cmd);
1da177e4 4669
a675d7f1 4670 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4671 if (hdev->sent_cmd) {
1da177e4 4672 atomic_dec(&hdev->cmd_cnt);
57d17d70 4673 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4674 if (test_bit(HCI_RESET, &hdev->flags))
4675 del_timer(&hdev->cmd_timer);
4676 else
4677 mod_timer(&hdev->cmd_timer,
5f246e89 4678 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4679 } else {
4680 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4681 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4682 }
4683 }
4684}
This page took 1.056951 seconds and 5 git commands to generate.