Bluetooth: Add AES crypto context for each HCI device
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
99780a7b 32#include <linux/crypto.h>
47219839 33#include <asm/unaligned.h>
1da177e4
LT
34
35#include <net/bluetooth/bluetooth.h>
36#include <net/bluetooth/hci_core.h>
37
b78752cc 38static void hci_rx_work(struct work_struct *work);
c347b765 39static void hci_cmd_work(struct work_struct *work);
3eff45ea 40static void hci_tx_work(struct work_struct *work);
1da177e4 41
1da177e4
LT
42/* HCI device list */
43LIST_HEAD(hci_dev_list);
44DEFINE_RWLOCK(hci_dev_list_lock);
45
46/* HCI callback list */
47LIST_HEAD(hci_cb_list);
48DEFINE_RWLOCK(hci_cb_list_lock);
49
3df92b31
SL
50/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
1da177e4
LT
53/* ---- HCI notifications ---- */
54
6516455d 55static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 56{
040030ef 57 hci_sock_dev_event(hdev, event);
1da177e4
LT
58}
59
baf27f6e
MH
60/* ---- HCI debugfs entries ---- */
61
4b4148e9
MH
62static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
63 size_t count, loff_t *ppos)
64{
65 struct hci_dev *hdev = file->private_data;
66 char buf[3];
67
68 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
69 buf[1] = '\n';
70 buf[2] = '\0';
71 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
72}
73
74static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
75 size_t count, loff_t *ppos)
76{
77 struct hci_dev *hdev = file->private_data;
78 struct sk_buff *skb;
79 char buf[32];
80 size_t buf_size = min(count, (sizeof(buf)-1));
81 bool enable;
82 int err;
83
84 if (!test_bit(HCI_UP, &hdev->flags))
85 return -ENETDOWN;
86
87 if (copy_from_user(buf, user_buf, buf_size))
88 return -EFAULT;
89
90 buf[buf_size] = '\0';
91 if (strtobool(buf, &enable))
92 return -EINVAL;
93
94 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
95 return -EALREADY;
96
97 hci_req_lock(hdev);
98 if (enable)
99 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
100 HCI_CMD_TIMEOUT);
101 else
102 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
103 HCI_CMD_TIMEOUT);
104 hci_req_unlock(hdev);
105
106 if (IS_ERR(skb))
107 return PTR_ERR(skb);
108
109 err = -bt_to_errno(skb->data[0]);
110 kfree_skb(skb);
111
112 if (err < 0)
113 return err;
114
115 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
116
117 return count;
118}
119
120static const struct file_operations dut_mode_fops = {
121 .open = simple_open,
122 .read = dut_mode_read,
123 .write = dut_mode_write,
124 .llseek = default_llseek,
125};
126
dfb826a8
MH
127static int features_show(struct seq_file *f, void *ptr)
128{
129 struct hci_dev *hdev = f->private;
130 u8 p;
131
132 hci_dev_lock(hdev);
133 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 134 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
135 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
136 hdev->features[p][0], hdev->features[p][1],
137 hdev->features[p][2], hdev->features[p][3],
138 hdev->features[p][4], hdev->features[p][5],
139 hdev->features[p][6], hdev->features[p][7]);
140 }
cfbb2b5b
MH
141 if (lmp_le_capable(hdev))
142 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
143 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
144 hdev->le_features[0], hdev->le_features[1],
145 hdev->le_features[2], hdev->le_features[3],
146 hdev->le_features[4], hdev->le_features[5],
147 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
148 hci_dev_unlock(hdev);
149
150 return 0;
151}
152
153static int features_open(struct inode *inode, struct file *file)
154{
155 return single_open(file, features_show, inode->i_private);
156}
157
158static const struct file_operations features_fops = {
159 .open = features_open,
160 .read = seq_read,
161 .llseek = seq_lseek,
162 .release = single_release,
163};
164
70afe0b8
MH
165static int blacklist_show(struct seq_file *f, void *p)
166{
167 struct hci_dev *hdev = f->private;
168 struct bdaddr_list *b;
169
170 hci_dev_lock(hdev);
171 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 172 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
173 hci_dev_unlock(hdev);
174
175 return 0;
176}
177
178static int blacklist_open(struct inode *inode, struct file *file)
179{
180 return single_open(file, blacklist_show, inode->i_private);
181}
182
183static const struct file_operations blacklist_fops = {
184 .open = blacklist_open,
185 .read = seq_read,
186 .llseek = seq_lseek,
187 .release = single_release,
188};
189
47219839
MH
190static int uuids_show(struct seq_file *f, void *p)
191{
192 struct hci_dev *hdev = f->private;
193 struct bt_uuid *uuid;
194
195 hci_dev_lock(hdev);
196 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
197 u8 i, val[16];
198
199 /* The Bluetooth UUID values are stored in big endian,
200 * but with reversed byte order. So convert them into
201 * the right order for the %pUb modifier.
202 */
203 for (i = 0; i < 16; i++)
204 val[i] = uuid->uuid[15 - i];
205
206 seq_printf(f, "%pUb\n", val);
47219839
MH
207 }
208 hci_dev_unlock(hdev);
209
210 return 0;
211}
212
213static int uuids_open(struct inode *inode, struct file *file)
214{
215 return single_open(file, uuids_show, inode->i_private);
216}
217
218static const struct file_operations uuids_fops = {
219 .open = uuids_open,
220 .read = seq_read,
221 .llseek = seq_lseek,
222 .release = single_release,
223};
224
baf27f6e
MH
225static int inquiry_cache_show(struct seq_file *f, void *p)
226{
227 struct hci_dev *hdev = f->private;
228 struct discovery_state *cache = &hdev->discovery;
229 struct inquiry_entry *e;
230
231 hci_dev_lock(hdev);
232
233 list_for_each_entry(e, &cache->all, all) {
234 struct inquiry_data *data = &e->data;
235 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
236 &data->bdaddr,
237 data->pscan_rep_mode, data->pscan_period_mode,
238 data->pscan_mode, data->dev_class[2],
239 data->dev_class[1], data->dev_class[0],
240 __le16_to_cpu(data->clock_offset),
241 data->rssi, data->ssp_mode, e->timestamp);
242 }
243
244 hci_dev_unlock(hdev);
245
246 return 0;
247}
248
249static int inquiry_cache_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, inquiry_cache_show, inode->i_private);
252}
253
254static const struct file_operations inquiry_cache_fops = {
255 .open = inquiry_cache_open,
256 .read = seq_read,
257 .llseek = seq_lseek,
258 .release = single_release,
259};
260
02d08d15
MH
261static int link_keys_show(struct seq_file *f, void *ptr)
262{
263 struct hci_dev *hdev = f->private;
264 struct list_head *p, *n;
265
266 hci_dev_lock(hdev);
267 list_for_each_safe(p, n, &hdev->link_keys) {
268 struct link_key *key = list_entry(p, struct link_key, list);
269 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
270 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
271 }
272 hci_dev_unlock(hdev);
273
274 return 0;
275}
276
277static int link_keys_open(struct inode *inode, struct file *file)
278{
279 return single_open(file, link_keys_show, inode->i_private);
280}
281
282static const struct file_operations link_keys_fops = {
283 .open = link_keys_open,
284 .read = seq_read,
285 .llseek = seq_lseek,
286 .release = single_release,
287};
288
babdbb3c
MH
289static int dev_class_show(struct seq_file *f, void *ptr)
290{
291 struct hci_dev *hdev = f->private;
292
293 hci_dev_lock(hdev);
294 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
295 hdev->dev_class[1], hdev->dev_class[0]);
296 hci_dev_unlock(hdev);
297
298 return 0;
299}
300
301static int dev_class_open(struct inode *inode, struct file *file)
302{
303 return single_open(file, dev_class_show, inode->i_private);
304}
305
306static const struct file_operations dev_class_fops = {
307 .open = dev_class_open,
308 .read = seq_read,
309 .llseek = seq_lseek,
310 .release = single_release,
311};
312
041000b9
MH
313static int voice_setting_get(void *data, u64 *val)
314{
315 struct hci_dev *hdev = data;
316
317 hci_dev_lock(hdev);
318 *val = hdev->voice_setting;
319 hci_dev_unlock(hdev);
320
321 return 0;
322}
323
324DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
325 NULL, "0x%4.4llx\n");
326
ebd1e33b
MH
327static int auto_accept_delay_set(void *data, u64 val)
328{
329 struct hci_dev *hdev = data;
330
331 hci_dev_lock(hdev);
332 hdev->auto_accept_delay = val;
333 hci_dev_unlock(hdev);
334
335 return 0;
336}
337
338static int auto_accept_delay_get(void *data, u64 *val)
339{
340 struct hci_dev *hdev = data;
341
342 hci_dev_lock(hdev);
343 *val = hdev->auto_accept_delay;
344 hci_dev_unlock(hdev);
345
346 return 0;
347}
348
349DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
350 auto_accept_delay_set, "%llu\n");
351
06f5b778
MH
352static int ssp_debug_mode_set(void *data, u64 val)
353{
354 struct hci_dev *hdev = data;
355 struct sk_buff *skb;
356 __u8 mode;
357 int err;
358
359 if (val != 0 && val != 1)
360 return -EINVAL;
361
362 if (!test_bit(HCI_UP, &hdev->flags))
363 return -ENETDOWN;
364
365 hci_req_lock(hdev);
366 mode = val;
367 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
368 &mode, HCI_CMD_TIMEOUT);
369 hci_req_unlock(hdev);
370
371 if (IS_ERR(skb))
372 return PTR_ERR(skb);
373
374 err = -bt_to_errno(skb->data[0]);
375 kfree_skb(skb);
376
377 if (err < 0)
378 return err;
379
380 hci_dev_lock(hdev);
381 hdev->ssp_debug_mode = val;
382 hci_dev_unlock(hdev);
383
384 return 0;
385}
386
387static int ssp_debug_mode_get(void *data, u64 *val)
388{
389 struct hci_dev *hdev = data;
390
391 hci_dev_lock(hdev);
392 *val = hdev->ssp_debug_mode;
393 hci_dev_unlock(hdev);
394
395 return 0;
396}
397
398DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
399 ssp_debug_mode_set, "%llu\n");
400
5afeac14
MH
401static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
402 size_t count, loff_t *ppos)
403{
404 struct hci_dev *hdev = file->private_data;
405 char buf[3];
406
407 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
408 buf[1] = '\n';
409 buf[2] = '\0';
410 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
411}
412
413static ssize_t force_sc_support_write(struct file *file,
414 const char __user *user_buf,
415 size_t count, loff_t *ppos)
416{
417 struct hci_dev *hdev = file->private_data;
418 char buf[32];
419 size_t buf_size = min(count, (sizeof(buf)-1));
420 bool enable;
421
422 if (test_bit(HCI_UP, &hdev->flags))
423 return -EBUSY;
424
425 if (copy_from_user(buf, user_buf, buf_size))
426 return -EFAULT;
427
428 buf[buf_size] = '\0';
429 if (strtobool(buf, &enable))
430 return -EINVAL;
431
432 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
433 return -EALREADY;
434
435 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
436
437 return count;
438}
439
440static const struct file_operations force_sc_support_fops = {
441 .open = simple_open,
442 .read = force_sc_support_read,
443 .write = force_sc_support_write,
444 .llseek = default_llseek,
445};
446
134c2a89
MH
447static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
448 size_t count, loff_t *ppos)
449{
450 struct hci_dev *hdev = file->private_data;
451 char buf[3];
452
453 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
454 buf[1] = '\n';
455 buf[2] = '\0';
456 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
457}
458
459static const struct file_operations sc_only_mode_fops = {
460 .open = simple_open,
461 .read = sc_only_mode_read,
462 .llseek = default_llseek,
463};
464
2bfa3531
MH
465static int idle_timeout_set(void *data, u64 val)
466{
467 struct hci_dev *hdev = data;
468
469 if (val != 0 && (val < 500 || val > 3600000))
470 return -EINVAL;
471
472 hci_dev_lock(hdev);
2be48b65 473 hdev->idle_timeout = val;
2bfa3531
MH
474 hci_dev_unlock(hdev);
475
476 return 0;
477}
478
479static int idle_timeout_get(void *data, u64 *val)
480{
481 struct hci_dev *hdev = data;
482
483 hci_dev_lock(hdev);
484 *val = hdev->idle_timeout;
485 hci_dev_unlock(hdev);
486
487 return 0;
488}
489
490DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
491 idle_timeout_set, "%llu\n");
492
493static int sniff_min_interval_set(void *data, u64 val)
494{
495 struct hci_dev *hdev = data;
496
497 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
498 return -EINVAL;
499
500 hci_dev_lock(hdev);
2be48b65 501 hdev->sniff_min_interval = val;
2bfa3531
MH
502 hci_dev_unlock(hdev);
503
504 return 0;
505}
506
507static int sniff_min_interval_get(void *data, u64 *val)
508{
509 struct hci_dev *hdev = data;
510
511 hci_dev_lock(hdev);
512 *val = hdev->sniff_min_interval;
513 hci_dev_unlock(hdev);
514
515 return 0;
516}
517
518DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
519 sniff_min_interval_set, "%llu\n");
520
521static int sniff_max_interval_set(void *data, u64 val)
522{
523 struct hci_dev *hdev = data;
524
525 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
526 return -EINVAL;
527
528 hci_dev_lock(hdev);
2be48b65 529 hdev->sniff_max_interval = val;
2bfa3531
MH
530 hci_dev_unlock(hdev);
531
532 return 0;
533}
534
535static int sniff_max_interval_get(void *data, u64 *val)
536{
537 struct hci_dev *hdev = data;
538
539 hci_dev_lock(hdev);
540 *val = hdev->sniff_max_interval;
541 hci_dev_unlock(hdev);
542
543 return 0;
544}
545
546DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
547 sniff_max_interval_set, "%llu\n");
548
e7b8fc92
MH
549static int static_address_show(struct seq_file *f, void *p)
550{
551 struct hci_dev *hdev = f->private;
552
553 hci_dev_lock(hdev);
554 seq_printf(f, "%pMR\n", &hdev->static_addr);
555 hci_dev_unlock(hdev);
556
557 return 0;
558}
559
560static int static_address_open(struct inode *inode, struct file *file)
561{
562 return single_open(file, static_address_show, inode->i_private);
563}
564
565static const struct file_operations static_address_fops = {
566 .open = static_address_open,
567 .read = seq_read,
568 .llseek = seq_lseek,
569 .release = single_release,
570};
571
92202185
MH
572static int own_address_type_set(void *data, u64 val)
573{
574 struct hci_dev *hdev = data;
575
576 if (val != 0 && val != 1)
577 return -EINVAL;
578
579 hci_dev_lock(hdev);
580 hdev->own_addr_type = val;
581 hci_dev_unlock(hdev);
582
583 return 0;
584}
585
586static int own_address_type_get(void *data, u64 *val)
587{
588 struct hci_dev *hdev = data;
589
590 hci_dev_lock(hdev);
591 *val = hdev->own_addr_type;
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
598 own_address_type_set, "%llu\n");
599
8f8625cd
MH
600static int long_term_keys_show(struct seq_file *f, void *ptr)
601{
602 struct hci_dev *hdev = f->private;
603 struct list_head *p, *n;
604
605 hci_dev_lock(hdev);
f813f1be 606 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 607 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 608 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
609 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
610 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
611 8, ltk->rand, 16, ltk->val);
612 }
613 hci_dev_unlock(hdev);
614
615 return 0;
616}
617
618static int long_term_keys_open(struct inode *inode, struct file *file)
619{
620 return single_open(file, long_term_keys_show, inode->i_private);
621}
622
623static const struct file_operations long_term_keys_fops = {
624 .open = long_term_keys_open,
625 .read = seq_read,
626 .llseek = seq_lseek,
627 .release = single_release,
628};
629
4e70c7e7
MH
630static int conn_min_interval_set(void *data, u64 val)
631{
632 struct hci_dev *hdev = data;
633
634 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
635 return -EINVAL;
636
637 hci_dev_lock(hdev);
2be48b65 638 hdev->le_conn_min_interval = val;
4e70c7e7
MH
639 hci_dev_unlock(hdev);
640
641 return 0;
642}
643
644static int conn_min_interval_get(void *data, u64 *val)
645{
646 struct hci_dev *hdev = data;
647
648 hci_dev_lock(hdev);
649 *val = hdev->le_conn_min_interval;
650 hci_dev_unlock(hdev);
651
652 return 0;
653}
654
655DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
656 conn_min_interval_set, "%llu\n");
657
658static int conn_max_interval_set(void *data, u64 val)
659{
660 struct hci_dev *hdev = data;
661
662 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
663 return -EINVAL;
664
665 hci_dev_lock(hdev);
2be48b65 666 hdev->le_conn_max_interval = val;
4e70c7e7
MH
667 hci_dev_unlock(hdev);
668
669 return 0;
670}
671
672static int conn_max_interval_get(void *data, u64 *val)
673{
674 struct hci_dev *hdev = data;
675
676 hci_dev_lock(hdev);
677 *val = hdev->le_conn_max_interval;
678 hci_dev_unlock(hdev);
679
680 return 0;
681}
682
683DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
684 conn_max_interval_set, "%llu\n");
685
89863109
JR
686static ssize_t lowpan_read(struct file *file, char __user *user_buf,
687 size_t count, loff_t *ppos)
688{
689 struct hci_dev *hdev = file->private_data;
690 char buf[3];
691
692 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
693 buf[1] = '\n';
694 buf[2] = '\0';
695 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
696}
697
698static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
699 size_t count, loff_t *position)
700{
701 struct hci_dev *hdev = fp->private_data;
702 bool enable;
703 char buf[32];
704 size_t buf_size = min(count, (sizeof(buf)-1));
705
706 if (copy_from_user(buf, user_buffer, buf_size))
707 return -EFAULT;
708
709 buf[buf_size] = '\0';
710
711 if (strtobool(buf, &enable) < 0)
712 return -EINVAL;
713
714 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
715 return -EALREADY;
716
717 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
718
719 return count;
720}
721
722static const struct file_operations lowpan_debugfs_fops = {
723 .open = simple_open,
724 .read = lowpan_read,
725 .write = lowpan_write,
726 .llseek = default_llseek,
727};
728
1da177e4
LT
729/* ---- HCI requests ---- */
730
42c6b129 731static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 732{
42c6b129 733 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
734
735 if (hdev->req_status == HCI_REQ_PEND) {
736 hdev->req_result = result;
737 hdev->req_status = HCI_REQ_DONE;
738 wake_up_interruptible(&hdev->req_wait_q);
739 }
740}
741
742static void hci_req_cancel(struct hci_dev *hdev, int err)
743{
744 BT_DBG("%s err 0x%2.2x", hdev->name, err);
745
746 if (hdev->req_status == HCI_REQ_PEND) {
747 hdev->req_result = err;
748 hdev->req_status = HCI_REQ_CANCELED;
749 wake_up_interruptible(&hdev->req_wait_q);
750 }
751}
752
77a63e0a
FW
753static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
754 u8 event)
75e84b7c
JH
755{
756 struct hci_ev_cmd_complete *ev;
757 struct hci_event_hdr *hdr;
758 struct sk_buff *skb;
759
760 hci_dev_lock(hdev);
761
762 skb = hdev->recv_evt;
763 hdev->recv_evt = NULL;
764
765 hci_dev_unlock(hdev);
766
767 if (!skb)
768 return ERR_PTR(-ENODATA);
769
770 if (skb->len < sizeof(*hdr)) {
771 BT_ERR("Too short HCI event");
772 goto failed;
773 }
774
775 hdr = (void *) skb->data;
776 skb_pull(skb, HCI_EVENT_HDR_SIZE);
777
7b1abbbe
JH
778 if (event) {
779 if (hdr->evt != event)
780 goto failed;
781 return skb;
782 }
783
75e84b7c
JH
784 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
785 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
786 goto failed;
787 }
788
789 if (skb->len < sizeof(*ev)) {
790 BT_ERR("Too short cmd_complete event");
791 goto failed;
792 }
793
794 ev = (void *) skb->data;
795 skb_pull(skb, sizeof(*ev));
796
797 if (opcode == __le16_to_cpu(ev->opcode))
798 return skb;
799
800 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
801 __le16_to_cpu(ev->opcode));
802
803failed:
804 kfree_skb(skb);
805 return ERR_PTR(-ENODATA);
806}
807
7b1abbbe 808struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 809 const void *param, u8 event, u32 timeout)
75e84b7c
JH
810{
811 DECLARE_WAITQUEUE(wait, current);
812 struct hci_request req;
813 int err = 0;
814
815 BT_DBG("%s", hdev->name);
816
817 hci_req_init(&req, hdev);
818
7b1abbbe 819 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
820
821 hdev->req_status = HCI_REQ_PEND;
822
823 err = hci_req_run(&req, hci_req_sync_complete);
824 if (err < 0)
825 return ERR_PTR(err);
826
827 add_wait_queue(&hdev->req_wait_q, &wait);
828 set_current_state(TASK_INTERRUPTIBLE);
829
830 schedule_timeout(timeout);
831
832 remove_wait_queue(&hdev->req_wait_q, &wait);
833
834 if (signal_pending(current))
835 return ERR_PTR(-EINTR);
836
837 switch (hdev->req_status) {
838 case HCI_REQ_DONE:
839 err = -bt_to_errno(hdev->req_result);
840 break;
841
842 case HCI_REQ_CANCELED:
843 err = -hdev->req_result;
844 break;
845
846 default:
847 err = -ETIMEDOUT;
848 break;
849 }
850
851 hdev->req_status = hdev->req_result = 0;
852
853 BT_DBG("%s end: err %d", hdev->name, err);
854
855 if (err < 0)
856 return ERR_PTR(err);
857
7b1abbbe
JH
858 return hci_get_cmd_complete(hdev, opcode, event);
859}
860EXPORT_SYMBOL(__hci_cmd_sync_ev);
861
862struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 863 const void *param, u32 timeout)
7b1abbbe
JH
864{
865 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
866}
867EXPORT_SYMBOL(__hci_cmd_sync);
868
1da177e4 869/* Execute request and wait for completion. */
01178cd4 870static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
871 void (*func)(struct hci_request *req,
872 unsigned long opt),
01178cd4 873 unsigned long opt, __u32 timeout)
1da177e4 874{
42c6b129 875 struct hci_request req;
1da177e4
LT
876 DECLARE_WAITQUEUE(wait, current);
877 int err = 0;
878
879 BT_DBG("%s start", hdev->name);
880
42c6b129
JH
881 hci_req_init(&req, hdev);
882
1da177e4
LT
883 hdev->req_status = HCI_REQ_PEND;
884
42c6b129 885 func(&req, opt);
53cce22d 886
42c6b129
JH
887 err = hci_req_run(&req, hci_req_sync_complete);
888 if (err < 0) {
53cce22d 889 hdev->req_status = 0;
920c8300
AG
890
891 /* ENODATA means the HCI request command queue is empty.
892 * This can happen when a request with conditionals doesn't
893 * trigger any commands to be sent. This is normal behavior
894 * and should not trigger an error return.
42c6b129 895 */
920c8300
AG
896 if (err == -ENODATA)
897 return 0;
898
899 return err;
53cce22d
JH
900 }
901
bc4445c7
AG
902 add_wait_queue(&hdev->req_wait_q, &wait);
903 set_current_state(TASK_INTERRUPTIBLE);
904
1da177e4
LT
905 schedule_timeout(timeout);
906
907 remove_wait_queue(&hdev->req_wait_q, &wait);
908
909 if (signal_pending(current))
910 return -EINTR;
911
912 switch (hdev->req_status) {
913 case HCI_REQ_DONE:
e175072f 914 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
915 break;
916
917 case HCI_REQ_CANCELED:
918 err = -hdev->req_result;
919 break;
920
921 default:
922 err = -ETIMEDOUT;
923 break;
3ff50b79 924 }
1da177e4 925
a5040efa 926 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
927
928 BT_DBG("%s end: err %d", hdev->name, err);
929
930 return err;
931}
932
01178cd4 933static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
934 void (*req)(struct hci_request *req,
935 unsigned long opt),
01178cd4 936 unsigned long opt, __u32 timeout)
1da177e4
LT
937{
938 int ret;
939
7c6a329e
MH
940 if (!test_bit(HCI_UP, &hdev->flags))
941 return -ENETDOWN;
942
1da177e4
LT
943 /* Serialize all requests */
944 hci_req_lock(hdev);
01178cd4 945 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
946 hci_req_unlock(hdev);
947
948 return ret;
949}
950
42c6b129 951static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 952{
42c6b129 953 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
954
955 /* Reset device */
42c6b129
JH
956 set_bit(HCI_RESET, &req->hdev->flags);
957 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
958}
959
42c6b129 960static void bredr_init(struct hci_request *req)
1da177e4 961{
42c6b129 962 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 963
1da177e4 964 /* Read Local Supported Features */
42c6b129 965 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 966
1143e5a6 967 /* Read Local Version */
42c6b129 968 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
969
970 /* Read BD Address */
42c6b129 971 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
972}
973
42c6b129 974static void amp_init(struct hci_request *req)
e61ef499 975{
42c6b129 976 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 977
e61ef499 978 /* Read Local Version */
42c6b129 979 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 980
f6996cfe
MH
981 /* Read Local Supported Commands */
982 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
983
984 /* Read Local Supported Features */
985 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
986
6bcbc489 987 /* Read Local AMP Info */
42c6b129 988 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
989
990 /* Read Data Blk size */
42c6b129 991 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 992
f38ba941
MH
993 /* Read Flow Control Mode */
994 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
995
7528ca1c
MH
996 /* Read Location Data */
997 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
998}
999
42c6b129 1000static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1001{
42c6b129 1002 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1003
1004 BT_DBG("%s %ld", hdev->name, opt);
1005
11778716
AE
1006 /* Reset */
1007 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1008 hci_reset_req(req, 0);
11778716 1009
e61ef499
AE
1010 switch (hdev->dev_type) {
1011 case HCI_BREDR:
42c6b129 1012 bredr_init(req);
e61ef499
AE
1013 break;
1014
1015 case HCI_AMP:
42c6b129 1016 amp_init(req);
e61ef499
AE
1017 break;
1018
1019 default:
1020 BT_ERR("Unknown device type %d", hdev->dev_type);
1021 break;
1022 }
e61ef499
AE
1023}
1024
42c6b129 1025static void bredr_setup(struct hci_request *req)
2177bab5 1026{
4ca048e3
MH
1027 struct hci_dev *hdev = req->hdev;
1028
2177bab5
JH
1029 __le16 param;
1030 __u8 flt_type;
1031
1032 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1033 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1034
1035 /* Read Class of Device */
42c6b129 1036 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1037
1038 /* Read Local Name */
42c6b129 1039 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1040
1041 /* Read Voice Setting */
42c6b129 1042 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1043
b4cb9fb2
MH
1044 /* Read Number of Supported IAC */
1045 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1046
4b836f39
MH
1047 /* Read Current IAC LAP */
1048 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1049
2177bab5
JH
1050 /* Clear Event Filters */
1051 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1052 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1053
1054 /* Connection accept timeout ~20 secs */
1055 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1056 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1057
4ca048e3
MH
1058 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1059 * but it does not support page scan related HCI commands.
1060 */
1061 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1062 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1063 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1064 }
2177bab5
JH
1065}
1066
42c6b129 1067static void le_setup(struct hci_request *req)
2177bab5 1068{
c73eee91
JH
1069 struct hci_dev *hdev = req->hdev;
1070
2177bab5 1071 /* Read LE Buffer Size */
42c6b129 1072 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1073
1074 /* Read LE Local Supported Features */
42c6b129 1075 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1076
1077 /* Read LE Advertising Channel TX Power */
42c6b129 1078 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1079
1080 /* Read LE White List Size */
42c6b129 1081 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1082
1083 /* Read LE Supported States */
42c6b129 1084 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1085
1086 /* LE-only controllers have LE implicitly enabled */
1087 if (!lmp_bredr_capable(hdev))
1088 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1089}
1090
1091static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1092{
1093 if (lmp_ext_inq_capable(hdev))
1094 return 0x02;
1095
1096 if (lmp_inq_rssi_capable(hdev))
1097 return 0x01;
1098
1099 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1100 hdev->lmp_subver == 0x0757)
1101 return 0x01;
1102
1103 if (hdev->manufacturer == 15) {
1104 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1105 return 0x01;
1106 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1107 return 0x01;
1108 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1109 return 0x01;
1110 }
1111
1112 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1113 hdev->lmp_subver == 0x1805)
1114 return 0x01;
1115
1116 return 0x00;
1117}
1118
42c6b129 1119static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1120{
1121 u8 mode;
1122
42c6b129 1123 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1124
42c6b129 1125 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1126}
1127
42c6b129 1128static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1129{
42c6b129
JH
1130 struct hci_dev *hdev = req->hdev;
1131
2177bab5
JH
1132 /* The second byte is 0xff instead of 0x9f (two reserved bits
1133 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1134 * command otherwise.
1135 */
1136 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1137
1138 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1139 * any event mask for pre 1.2 devices.
1140 */
1141 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1142 return;
1143
1144 if (lmp_bredr_capable(hdev)) {
1145 events[4] |= 0x01; /* Flow Specification Complete */
1146 events[4] |= 0x02; /* Inquiry Result with RSSI */
1147 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1148 events[5] |= 0x08; /* Synchronous Connection Complete */
1149 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1150 } else {
1151 /* Use a different default for LE-only devices */
1152 memset(events, 0, sizeof(events));
1153 events[0] |= 0x10; /* Disconnection Complete */
1154 events[0] |= 0x80; /* Encryption Change */
1155 events[1] |= 0x08; /* Read Remote Version Information Complete */
1156 events[1] |= 0x20; /* Command Complete */
1157 events[1] |= 0x40; /* Command Status */
1158 events[1] |= 0x80; /* Hardware Error */
1159 events[2] |= 0x04; /* Number of Completed Packets */
1160 events[3] |= 0x02; /* Data Buffer Overflow */
1161 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1162 }
1163
1164 if (lmp_inq_rssi_capable(hdev))
1165 events[4] |= 0x02; /* Inquiry Result with RSSI */
1166
1167 if (lmp_sniffsubr_capable(hdev))
1168 events[5] |= 0x20; /* Sniff Subrating */
1169
1170 if (lmp_pause_enc_capable(hdev))
1171 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1172
1173 if (lmp_ext_inq_capable(hdev))
1174 events[5] |= 0x40; /* Extended Inquiry Result */
1175
1176 if (lmp_no_flush_capable(hdev))
1177 events[7] |= 0x01; /* Enhanced Flush Complete */
1178
1179 if (lmp_lsto_capable(hdev))
1180 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1181
1182 if (lmp_ssp_capable(hdev)) {
1183 events[6] |= 0x01; /* IO Capability Request */
1184 events[6] |= 0x02; /* IO Capability Response */
1185 events[6] |= 0x04; /* User Confirmation Request */
1186 events[6] |= 0x08; /* User Passkey Request */
1187 events[6] |= 0x10; /* Remote OOB Data Request */
1188 events[6] |= 0x20; /* Simple Pairing Complete */
1189 events[7] |= 0x04; /* User Passkey Notification */
1190 events[7] |= 0x08; /* Keypress Notification */
1191 events[7] |= 0x10; /* Remote Host Supported
1192 * Features Notification
1193 */
1194 }
1195
1196 if (lmp_le_capable(hdev))
1197 events[7] |= 0x20; /* LE Meta-Event */
1198
42c6b129 1199 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1200
1201 if (lmp_le_capable(hdev)) {
1202 memset(events, 0, sizeof(events));
1203 events[0] = 0x1f;
42c6b129
JH
1204 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1205 sizeof(events), events);
2177bab5
JH
1206 }
1207}
1208
42c6b129 1209static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1210{
42c6b129
JH
1211 struct hci_dev *hdev = req->hdev;
1212
2177bab5 1213 if (lmp_bredr_capable(hdev))
42c6b129 1214 bredr_setup(req);
56f87901
JH
1215 else
1216 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1217
1218 if (lmp_le_capable(hdev))
42c6b129 1219 le_setup(req);
2177bab5 1220
42c6b129 1221 hci_setup_event_mask(req);
2177bab5 1222
3f8e2d75
JH
1223 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1224 * local supported commands HCI command.
1225 */
1226 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1227 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1228
1229 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1230 /* When SSP is available, then the host features page
1231 * should also be available as well. However some
1232 * controllers list the max_page as 0 as long as SSP
1233 * has not been enabled. To achieve proper debugging
1234 * output, force the minimum max_page to 1 at least.
1235 */
1236 hdev->max_page = 0x01;
1237
2177bab5
JH
1238 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1239 u8 mode = 0x01;
42c6b129
JH
1240 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1241 sizeof(mode), &mode);
2177bab5
JH
1242 } else {
1243 struct hci_cp_write_eir cp;
1244
1245 memset(hdev->eir, 0, sizeof(hdev->eir));
1246 memset(&cp, 0, sizeof(cp));
1247
42c6b129 1248 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1249 }
1250 }
1251
1252 if (lmp_inq_rssi_capable(hdev))
42c6b129 1253 hci_setup_inquiry_mode(req);
2177bab5
JH
1254
1255 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1256 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1257
1258 if (lmp_ext_feat_capable(hdev)) {
1259 struct hci_cp_read_local_ext_features cp;
1260
1261 cp.page = 0x01;
42c6b129
JH
1262 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1263 sizeof(cp), &cp);
2177bab5
JH
1264 }
1265
1266 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1267 u8 enable = 1;
42c6b129
JH
1268 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1269 &enable);
2177bab5
JH
1270 }
1271}
1272
42c6b129 1273static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1274{
42c6b129 1275 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1276 struct hci_cp_write_def_link_policy cp;
1277 u16 link_policy = 0;
1278
1279 if (lmp_rswitch_capable(hdev))
1280 link_policy |= HCI_LP_RSWITCH;
1281 if (lmp_hold_capable(hdev))
1282 link_policy |= HCI_LP_HOLD;
1283 if (lmp_sniff_capable(hdev))
1284 link_policy |= HCI_LP_SNIFF;
1285 if (lmp_park_capable(hdev))
1286 link_policy |= HCI_LP_PARK;
1287
1288 cp.policy = cpu_to_le16(link_policy);
42c6b129 1289 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1290}
1291
42c6b129 1292static void hci_set_le_support(struct hci_request *req)
2177bab5 1293{
42c6b129 1294 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1295 struct hci_cp_write_le_host_supported cp;
1296
c73eee91
JH
1297 /* LE-only devices do not support explicit enablement */
1298 if (!lmp_bredr_capable(hdev))
1299 return;
1300
2177bab5
JH
1301 memset(&cp, 0, sizeof(cp));
1302
1303 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1304 cp.le = 0x01;
1305 cp.simul = lmp_le_br_capable(hdev);
1306 }
1307
1308 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1309 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1310 &cp);
2177bab5
JH
1311}
1312
d62e6d67
JH
1313static void hci_set_event_mask_page_2(struct hci_request *req)
1314{
1315 struct hci_dev *hdev = req->hdev;
1316 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1317
1318 /* If Connectionless Slave Broadcast master role is supported
1319 * enable all necessary events for it.
1320 */
53b834d2 1321 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1322 events[1] |= 0x40; /* Triggered Clock Capture */
1323 events[1] |= 0x80; /* Synchronization Train Complete */
1324 events[2] |= 0x10; /* Slave Page Response Timeout */
1325 events[2] |= 0x20; /* CSB Channel Map Change */
1326 }
1327
1328 /* If Connectionless Slave Broadcast slave role is supported
1329 * enable all necessary events for it.
1330 */
53b834d2 1331 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1332 events[2] |= 0x01; /* Synchronization Train Received */
1333 events[2] |= 0x02; /* CSB Receive */
1334 events[2] |= 0x04; /* CSB Timeout */
1335 events[2] |= 0x08; /* Truncated Page Complete */
1336 }
1337
40c59fcb
MH
1338 /* Enable Authenticated Payload Timeout Expired event if supported */
1339 if (lmp_ping_capable(hdev))
1340 events[2] |= 0x80;
1341
d62e6d67
JH
1342 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1343}
1344
42c6b129 1345static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1346{
42c6b129 1347 struct hci_dev *hdev = req->hdev;
d2c5d77f 1348 u8 p;
42c6b129 1349
b8f4e068
GP
1350 /* Some Broadcom based Bluetooth controllers do not support the
1351 * Delete Stored Link Key command. They are clearly indicating its
1352 * absence in the bit mask of supported commands.
1353 *
1354 * Check the supported commands and only if the the command is marked
1355 * as supported send it. If not supported assume that the controller
1356 * does not have actual support for stored link keys which makes this
1357 * command redundant anyway.
f9f462fa
MH
1358 *
1359 * Some controllers indicate that they support handling deleting
1360 * stored link keys, but they don't. The quirk lets a driver
1361 * just disable this command.
637b4cae 1362 */
f9f462fa
MH
1363 if (hdev->commands[6] & 0x80 &&
1364 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1365 struct hci_cp_delete_stored_link_key cp;
1366
1367 bacpy(&cp.bdaddr, BDADDR_ANY);
1368 cp.delete_all = 0x01;
1369 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1370 sizeof(cp), &cp);
1371 }
1372
2177bab5 1373 if (hdev->commands[5] & 0x10)
42c6b129 1374 hci_setup_link_policy(req);
2177bab5 1375
79830f66 1376 if (lmp_le_capable(hdev)) {
bef34c0a
MH
1377 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1378 /* If the controller has a public BD_ADDR, then
1379 * by default use that one. If this is a LE only
1380 * controller without a public address, default
1381 * to the random address.
1382 */
1383 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1384 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1385 else
1386 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1387 }
79830f66 1388
42c6b129 1389 hci_set_le_support(req);
79830f66 1390 }
d2c5d77f
JH
1391
1392 /* Read features beyond page 1 if available */
1393 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1394 struct hci_cp_read_local_ext_features cp;
1395
1396 cp.page = p;
1397 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1398 sizeof(cp), &cp);
1399 }
2177bab5
JH
1400}
1401
5d4e7e8d
JH
1402static void hci_init4_req(struct hci_request *req, unsigned long opt)
1403{
1404 struct hci_dev *hdev = req->hdev;
1405
d62e6d67
JH
1406 /* Set event mask page 2 if the HCI command for it is supported */
1407 if (hdev->commands[22] & 0x04)
1408 hci_set_event_mask_page_2(req);
1409
5d4e7e8d 1410 /* Check for Synchronization Train support */
53b834d2 1411 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1412 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1413
1414 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1415 if ((lmp_sc_capable(hdev) ||
1416 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1417 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1418 u8 support = 0x01;
1419 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1420 sizeof(support), &support);
1421 }
5d4e7e8d
JH
1422}
1423
2177bab5
JH
1424static int __hci_init(struct hci_dev *hdev)
1425{
1426 int err;
1427
1428 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1429 if (err < 0)
1430 return err;
1431
4b4148e9
MH
1432 /* The Device Under Test (DUT) mode is special and available for
1433 * all controller types. So just create it early on.
1434 */
1435 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1436 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1437 &dut_mode_fops);
1438 }
1439
2177bab5
JH
1440 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1441 * BR/EDR/LE type controllers. AMP controllers only need the
1442 * first stage init.
1443 */
1444 if (hdev->dev_type != HCI_BREDR)
1445 return 0;
1446
1447 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1448 if (err < 0)
1449 return err;
1450
5d4e7e8d
JH
1451 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1452 if (err < 0)
1453 return err;
1454
baf27f6e
MH
1455 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1456 if (err < 0)
1457 return err;
1458
1459 /* Only create debugfs entries during the initial setup
1460 * phase and not every time the controller gets powered on.
1461 */
1462 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1463 return 0;
1464
dfb826a8
MH
1465 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1466 &features_fops);
ceeb3bc0
MH
1467 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1468 &hdev->manufacturer);
1469 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1470 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1471 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1472 &blacklist_fops);
47219839
MH
1473 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1474
baf27f6e
MH
1475 if (lmp_bredr_capable(hdev)) {
1476 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1477 hdev, &inquiry_cache_fops);
02d08d15
MH
1478 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1479 hdev, &link_keys_fops);
babdbb3c
MH
1480 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1481 hdev, &dev_class_fops);
041000b9
MH
1482 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1483 hdev, &voice_setting_fops);
baf27f6e
MH
1484 }
1485
06f5b778 1486 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1487 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1488 hdev, &auto_accept_delay_fops);
06f5b778
MH
1489 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1490 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1491 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1492 hdev, &force_sc_support_fops);
134c2a89
MH
1493 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1494 hdev, &sc_only_mode_fops);
06f5b778 1495 }
ebd1e33b 1496
2bfa3531
MH
1497 if (lmp_sniff_capable(hdev)) {
1498 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1499 hdev, &idle_timeout_fops);
1500 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1501 hdev, &sniff_min_interval_fops);
1502 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1503 hdev, &sniff_max_interval_fops);
1504 }
1505
d0f729b8
MH
1506 if (lmp_le_capable(hdev)) {
1507 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1508 &hdev->le_white_list_size);
e7b8fc92
MH
1509 debugfs_create_file("static_address", 0444, hdev->debugfs,
1510 hdev, &static_address_fops);
92202185
MH
1511 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1512 hdev, &own_address_type_fops);
8f8625cd
MH
1513 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1514 hdev, &long_term_keys_fops);
4e70c7e7
MH
1515 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1516 hdev, &conn_min_interval_fops);
1517 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1518 hdev, &conn_max_interval_fops);
89863109
JR
1519 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1520 &lowpan_debugfs_fops);
d0f729b8 1521 }
e7b8fc92 1522
baf27f6e 1523 return 0;
2177bab5
JH
1524}
1525
42c6b129 1526static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1527{
1528 __u8 scan = opt;
1529
42c6b129 1530 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1531
1532 /* Inquiry and Page scans */
42c6b129 1533 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1534}
1535
42c6b129 1536static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1537{
1538 __u8 auth = opt;
1539
42c6b129 1540 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1541
1542 /* Authentication */
42c6b129 1543 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1544}
1545
42c6b129 1546static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1547{
1548 __u8 encrypt = opt;
1549
42c6b129 1550 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1551
e4e8e37c 1552 /* Encryption */
42c6b129 1553 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1554}
1555
42c6b129 1556static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1557{
1558 __le16 policy = cpu_to_le16(opt);
1559
42c6b129 1560 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1561
1562 /* Default link policy */
42c6b129 1563 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1564}
1565
8e87d142 1566/* Get HCI device by index.
1da177e4
LT
1567 * Device is held on return. */
1568struct hci_dev *hci_dev_get(int index)
1569{
8035ded4 1570 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1571
1572 BT_DBG("%d", index);
1573
1574 if (index < 0)
1575 return NULL;
1576
1577 read_lock(&hci_dev_list_lock);
8035ded4 1578 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1579 if (d->id == index) {
1580 hdev = hci_dev_hold(d);
1581 break;
1582 }
1583 }
1584 read_unlock(&hci_dev_list_lock);
1585 return hdev;
1586}
1da177e4
LT
1587
1588/* ---- Inquiry support ---- */
ff9ef578 1589
30dc78e1
JH
1590bool hci_discovery_active(struct hci_dev *hdev)
1591{
1592 struct discovery_state *discov = &hdev->discovery;
1593
6fbe195d 1594 switch (discov->state) {
343f935b 1595 case DISCOVERY_FINDING:
6fbe195d 1596 case DISCOVERY_RESOLVING:
30dc78e1
JH
1597 return true;
1598
6fbe195d
AG
1599 default:
1600 return false;
1601 }
30dc78e1
JH
1602}
1603
ff9ef578
JH
1604void hci_discovery_set_state(struct hci_dev *hdev, int state)
1605{
1606 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1607
1608 if (hdev->discovery.state == state)
1609 return;
1610
1611 switch (state) {
1612 case DISCOVERY_STOPPED:
7b99b659
AG
1613 if (hdev->discovery.state != DISCOVERY_STARTING)
1614 mgmt_discovering(hdev, 0);
ff9ef578
JH
1615 break;
1616 case DISCOVERY_STARTING:
1617 break;
343f935b 1618 case DISCOVERY_FINDING:
ff9ef578
JH
1619 mgmt_discovering(hdev, 1);
1620 break;
30dc78e1
JH
1621 case DISCOVERY_RESOLVING:
1622 break;
ff9ef578
JH
1623 case DISCOVERY_STOPPING:
1624 break;
1625 }
1626
1627 hdev->discovery.state = state;
1628}
1629
1f9b9a5d 1630void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1631{
30883512 1632 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1633 struct inquiry_entry *p, *n;
1da177e4 1634
561aafbc
JH
1635 list_for_each_entry_safe(p, n, &cache->all, all) {
1636 list_del(&p->all);
b57c1a56 1637 kfree(p);
1da177e4 1638 }
561aafbc
JH
1639
1640 INIT_LIST_HEAD(&cache->unknown);
1641 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1642}
1643
a8c5fb1a
GP
1644struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1645 bdaddr_t *bdaddr)
1da177e4 1646{
30883512 1647 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1648 struct inquiry_entry *e;
1649
6ed93dc6 1650 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1651
561aafbc
JH
1652 list_for_each_entry(e, &cache->all, all) {
1653 if (!bacmp(&e->data.bdaddr, bdaddr))
1654 return e;
1655 }
1656
1657 return NULL;
1658}
1659
1660struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1661 bdaddr_t *bdaddr)
561aafbc 1662{
30883512 1663 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1664 struct inquiry_entry *e;
1665
6ed93dc6 1666 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1667
1668 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1669 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1670 return e;
1671 }
1672
1673 return NULL;
1da177e4
LT
1674}
1675
30dc78e1 1676struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1677 bdaddr_t *bdaddr,
1678 int state)
30dc78e1
JH
1679{
1680 struct discovery_state *cache = &hdev->discovery;
1681 struct inquiry_entry *e;
1682
6ed93dc6 1683 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1684
1685 list_for_each_entry(e, &cache->resolve, list) {
1686 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1687 return e;
1688 if (!bacmp(&e->data.bdaddr, bdaddr))
1689 return e;
1690 }
1691
1692 return NULL;
1693}
1694
a3d4e20a 1695void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1696 struct inquiry_entry *ie)
a3d4e20a
JH
1697{
1698 struct discovery_state *cache = &hdev->discovery;
1699 struct list_head *pos = &cache->resolve;
1700 struct inquiry_entry *p;
1701
1702 list_del(&ie->list);
1703
1704 list_for_each_entry(p, &cache->resolve, list) {
1705 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1706 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1707 break;
1708 pos = &p->list;
1709 }
1710
1711 list_add(&ie->list, pos);
1712}
1713
3175405b 1714bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1715 bool name_known, bool *ssp)
1da177e4 1716{
30883512 1717 struct discovery_state *cache = &hdev->discovery;
70f23020 1718 struct inquiry_entry *ie;
1da177e4 1719
6ed93dc6 1720 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1721
2b2fec4d
SJ
1722 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1723
388fc8fa
JH
1724 if (ssp)
1725 *ssp = data->ssp_mode;
1726
70f23020 1727 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1728 if (ie) {
388fc8fa
JH
1729 if (ie->data.ssp_mode && ssp)
1730 *ssp = true;
1731
a3d4e20a 1732 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1733 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1734 ie->data.rssi = data->rssi;
1735 hci_inquiry_cache_update_resolve(hdev, ie);
1736 }
1737
561aafbc 1738 goto update;
a3d4e20a 1739 }
561aafbc
JH
1740
1741 /* Entry not in the cache. Add new one. */
1742 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1743 if (!ie)
3175405b 1744 return false;
561aafbc
JH
1745
1746 list_add(&ie->all, &cache->all);
1747
1748 if (name_known) {
1749 ie->name_state = NAME_KNOWN;
1750 } else {
1751 ie->name_state = NAME_NOT_KNOWN;
1752 list_add(&ie->list, &cache->unknown);
1753 }
70f23020 1754
561aafbc
JH
1755update:
1756 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1757 ie->name_state != NAME_PENDING) {
561aafbc
JH
1758 ie->name_state = NAME_KNOWN;
1759 list_del(&ie->list);
1da177e4
LT
1760 }
1761
70f23020
AE
1762 memcpy(&ie->data, data, sizeof(*data));
1763 ie->timestamp = jiffies;
1da177e4 1764 cache->timestamp = jiffies;
3175405b
JH
1765
1766 if (ie->name_state == NAME_NOT_KNOWN)
1767 return false;
1768
1769 return true;
1da177e4
LT
1770}
1771
1772static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1773{
30883512 1774 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1775 struct inquiry_info *info = (struct inquiry_info *) buf;
1776 struct inquiry_entry *e;
1777 int copied = 0;
1778
561aafbc 1779 list_for_each_entry(e, &cache->all, all) {
1da177e4 1780 struct inquiry_data *data = &e->data;
b57c1a56
JH
1781
1782 if (copied >= num)
1783 break;
1784
1da177e4
LT
1785 bacpy(&info->bdaddr, &data->bdaddr);
1786 info->pscan_rep_mode = data->pscan_rep_mode;
1787 info->pscan_period_mode = data->pscan_period_mode;
1788 info->pscan_mode = data->pscan_mode;
1789 memcpy(info->dev_class, data->dev_class, 3);
1790 info->clock_offset = data->clock_offset;
b57c1a56 1791
1da177e4 1792 info++;
b57c1a56 1793 copied++;
1da177e4
LT
1794 }
1795
1796 BT_DBG("cache %p, copied %d", cache, copied);
1797 return copied;
1798}
1799
42c6b129 1800static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1801{
1802 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1803 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1804 struct hci_cp_inquiry cp;
1805
1806 BT_DBG("%s", hdev->name);
1807
1808 if (test_bit(HCI_INQUIRY, &hdev->flags))
1809 return;
1810
1811 /* Start Inquiry */
1812 memcpy(&cp.lap, &ir->lap, 3);
1813 cp.length = ir->length;
1814 cp.num_rsp = ir->num_rsp;
42c6b129 1815 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1816}
1817
3e13fa1e
AG
1818static int wait_inquiry(void *word)
1819{
1820 schedule();
1821 return signal_pending(current);
1822}
1823
1da177e4
LT
1824int hci_inquiry(void __user *arg)
1825{
1826 __u8 __user *ptr = arg;
1827 struct hci_inquiry_req ir;
1828 struct hci_dev *hdev;
1829 int err = 0, do_inquiry = 0, max_rsp;
1830 long timeo;
1831 __u8 *buf;
1832
1833 if (copy_from_user(&ir, ptr, sizeof(ir)))
1834 return -EFAULT;
1835
5a08ecce
AE
1836 hdev = hci_dev_get(ir.dev_id);
1837 if (!hdev)
1da177e4
LT
1838 return -ENODEV;
1839
0736cfa8
MH
1840 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1841 err = -EBUSY;
1842 goto done;
1843 }
1844
5b69bef5
MH
1845 if (hdev->dev_type != HCI_BREDR) {
1846 err = -EOPNOTSUPP;
1847 goto done;
1848 }
1849
56f87901
JH
1850 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1851 err = -EOPNOTSUPP;
1852 goto done;
1853 }
1854
09fd0de5 1855 hci_dev_lock(hdev);
8e87d142 1856 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1857 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1858 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1859 do_inquiry = 1;
1860 }
09fd0de5 1861 hci_dev_unlock(hdev);
1da177e4 1862
04837f64 1863 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1864
1865 if (do_inquiry) {
01178cd4
JH
1866 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1867 timeo);
70f23020
AE
1868 if (err < 0)
1869 goto done;
3e13fa1e
AG
1870
1871 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1872 * cleared). If it is interrupted by a signal, return -EINTR.
1873 */
1874 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1875 TASK_INTERRUPTIBLE))
1876 return -EINTR;
70f23020 1877 }
1da177e4 1878
8fc9ced3
GP
1879 /* for unlimited number of responses we will use buffer with
1880 * 255 entries
1881 */
1da177e4
LT
1882 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1883
1884 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1885 * copy it to the user space.
1886 */
01df8c31 1887 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1888 if (!buf) {
1da177e4
LT
1889 err = -ENOMEM;
1890 goto done;
1891 }
1892
09fd0de5 1893 hci_dev_lock(hdev);
1da177e4 1894 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1895 hci_dev_unlock(hdev);
1da177e4
LT
1896
1897 BT_DBG("num_rsp %d", ir.num_rsp);
1898
1899 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1900 ptr += sizeof(ir);
1901 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1902 ir.num_rsp))
1da177e4 1903 err = -EFAULT;
8e87d142 1904 } else
1da177e4
LT
1905 err = -EFAULT;
1906
1907 kfree(buf);
1908
1909done:
1910 hci_dev_put(hdev);
1911 return err;
1912}
1913
cbed0ca1 1914static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1915{
1da177e4
LT
1916 int ret = 0;
1917
1da177e4
LT
1918 BT_DBG("%s %p", hdev->name, hdev);
1919
1920 hci_req_lock(hdev);
1921
94324962
JH
1922 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1923 ret = -ENODEV;
1924 goto done;
1925 }
1926
a5c8f270
MH
1927 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1928 /* Check for rfkill but allow the HCI setup stage to
1929 * proceed (which in itself doesn't cause any RF activity).
1930 */
1931 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1932 ret = -ERFKILL;
1933 goto done;
1934 }
1935
1936 /* Check for valid public address or a configured static
1937 * random adddress, but let the HCI setup proceed to
1938 * be able to determine if there is a public address
1939 * or not.
1940 *
c6beca0e
MH
1941 * In case of user channel usage, it is not important
1942 * if a public address or static random address is
1943 * available.
1944 *
a5c8f270
MH
1945 * This check is only valid for BR/EDR controllers
1946 * since AMP controllers do not have an address.
1947 */
c6beca0e
MH
1948 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1949 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1950 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1951 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1952 ret = -EADDRNOTAVAIL;
1953 goto done;
1954 }
611b30f7
MH
1955 }
1956
1da177e4
LT
1957 if (test_bit(HCI_UP, &hdev->flags)) {
1958 ret = -EALREADY;
1959 goto done;
1960 }
1961
1da177e4
LT
1962 if (hdev->open(hdev)) {
1963 ret = -EIO;
1964 goto done;
1965 }
1966
f41c70c4
MH
1967 atomic_set(&hdev->cmd_cnt, 1);
1968 set_bit(HCI_INIT, &hdev->flags);
1969
1970 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1971 ret = hdev->setup(hdev);
1972
1973 if (!ret) {
f41c70c4
MH
1974 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1975 set_bit(HCI_RAW, &hdev->flags);
1976
0736cfa8
MH
1977 if (!test_bit(HCI_RAW, &hdev->flags) &&
1978 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1979 ret = __hci_init(hdev);
1da177e4
LT
1980 }
1981
f41c70c4
MH
1982 clear_bit(HCI_INIT, &hdev->flags);
1983
1da177e4
LT
1984 if (!ret) {
1985 hci_dev_hold(hdev);
1986 set_bit(HCI_UP, &hdev->flags);
1987 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1988 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1989 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1990 hdev->dev_type == HCI_BREDR) {
09fd0de5 1991 hci_dev_lock(hdev);
744cf19e 1992 mgmt_powered(hdev, 1);
09fd0de5 1993 hci_dev_unlock(hdev);
56e5cb86 1994 }
8e87d142 1995 } else {
1da177e4 1996 /* Init failed, cleanup */
3eff45ea 1997 flush_work(&hdev->tx_work);
c347b765 1998 flush_work(&hdev->cmd_work);
b78752cc 1999 flush_work(&hdev->rx_work);
1da177e4
LT
2000
2001 skb_queue_purge(&hdev->cmd_q);
2002 skb_queue_purge(&hdev->rx_q);
2003
2004 if (hdev->flush)
2005 hdev->flush(hdev);
2006
2007 if (hdev->sent_cmd) {
2008 kfree_skb(hdev->sent_cmd);
2009 hdev->sent_cmd = NULL;
2010 }
2011
2012 hdev->close(hdev);
2013 hdev->flags = 0;
2014 }
2015
2016done:
2017 hci_req_unlock(hdev);
1da177e4
LT
2018 return ret;
2019}
2020
cbed0ca1
JH
2021/* ---- HCI ioctl helpers ---- */
2022
2023int hci_dev_open(__u16 dev)
2024{
2025 struct hci_dev *hdev;
2026 int err;
2027
2028 hdev = hci_dev_get(dev);
2029 if (!hdev)
2030 return -ENODEV;
2031
e1d08f40
JH
2032 /* We need to ensure that no other power on/off work is pending
2033 * before proceeding to call hci_dev_do_open. This is
2034 * particularly important if the setup procedure has not yet
2035 * completed.
2036 */
2037 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2038 cancel_delayed_work(&hdev->power_off);
2039
a5c8f270
MH
2040 /* After this call it is guaranteed that the setup procedure
2041 * has finished. This means that error conditions like RFKILL
2042 * or no valid public or static random address apply.
2043 */
e1d08f40
JH
2044 flush_workqueue(hdev->req_workqueue);
2045
cbed0ca1
JH
2046 err = hci_dev_do_open(hdev);
2047
2048 hci_dev_put(hdev);
2049
2050 return err;
2051}
2052
1da177e4
LT
2053static int hci_dev_do_close(struct hci_dev *hdev)
2054{
2055 BT_DBG("%s %p", hdev->name, hdev);
2056
78c04c0b
VCG
2057 cancel_delayed_work(&hdev->power_off);
2058
1da177e4
LT
2059 hci_req_cancel(hdev, ENODEV);
2060 hci_req_lock(hdev);
2061
2062 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2063 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2064 hci_req_unlock(hdev);
2065 return 0;
2066 }
2067
3eff45ea
GP
2068 /* Flush RX and TX works */
2069 flush_work(&hdev->tx_work);
b78752cc 2070 flush_work(&hdev->rx_work);
1da177e4 2071
16ab91ab 2072 if (hdev->discov_timeout > 0) {
e0f9309f 2073 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2074 hdev->discov_timeout = 0;
5e5282bb 2075 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2076 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2077 }
2078
a8b2d5c2 2079 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2080 cancel_delayed_work(&hdev->service_cache);
2081
7ba8b4be
AG
2082 cancel_delayed_work_sync(&hdev->le_scan_disable);
2083
09fd0de5 2084 hci_dev_lock(hdev);
1f9b9a5d 2085 hci_inquiry_cache_flush(hdev);
1da177e4 2086 hci_conn_hash_flush(hdev);
09fd0de5 2087 hci_dev_unlock(hdev);
1da177e4
LT
2088
2089 hci_notify(hdev, HCI_DEV_DOWN);
2090
2091 if (hdev->flush)
2092 hdev->flush(hdev);
2093
2094 /* Reset device */
2095 skb_queue_purge(&hdev->cmd_q);
2096 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2097 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2098 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2099 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2100 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2101 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2102 clear_bit(HCI_INIT, &hdev->flags);
2103 }
2104
c347b765
GP
2105 /* flush cmd work */
2106 flush_work(&hdev->cmd_work);
1da177e4
LT
2107
2108 /* Drop queues */
2109 skb_queue_purge(&hdev->rx_q);
2110 skb_queue_purge(&hdev->cmd_q);
2111 skb_queue_purge(&hdev->raw_q);
2112
2113 /* Drop last sent command */
2114 if (hdev->sent_cmd) {
b79f44c1 2115 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2116 kfree_skb(hdev->sent_cmd);
2117 hdev->sent_cmd = NULL;
2118 }
2119
b6ddb638
JH
2120 kfree_skb(hdev->recv_evt);
2121 hdev->recv_evt = NULL;
2122
1da177e4
LT
2123 /* After this point our queues are empty
2124 * and no tasks are scheduled. */
2125 hdev->close(hdev);
2126
35b973c9
JH
2127 /* Clear flags */
2128 hdev->flags = 0;
2129 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2130
93c311a0
MH
2131 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2132 if (hdev->dev_type == HCI_BREDR) {
2133 hci_dev_lock(hdev);
2134 mgmt_powered(hdev, 0);
2135 hci_dev_unlock(hdev);
2136 }
8ee56540 2137 }
5add6af8 2138
ced5c338 2139 /* Controller radio is available but is currently powered down */
536619e8 2140 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2141
e59fda8d 2142 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2143 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2144
1da177e4
LT
2145 hci_req_unlock(hdev);
2146
2147 hci_dev_put(hdev);
2148 return 0;
2149}
2150
2151int hci_dev_close(__u16 dev)
2152{
2153 struct hci_dev *hdev;
2154 int err;
2155
70f23020
AE
2156 hdev = hci_dev_get(dev);
2157 if (!hdev)
1da177e4 2158 return -ENODEV;
8ee56540 2159
0736cfa8
MH
2160 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2161 err = -EBUSY;
2162 goto done;
2163 }
2164
8ee56540
MH
2165 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2166 cancel_delayed_work(&hdev->power_off);
2167
1da177e4 2168 err = hci_dev_do_close(hdev);
8ee56540 2169
0736cfa8 2170done:
1da177e4
LT
2171 hci_dev_put(hdev);
2172 return err;
2173}
2174
2175int hci_dev_reset(__u16 dev)
2176{
2177 struct hci_dev *hdev;
2178 int ret = 0;
2179
70f23020
AE
2180 hdev = hci_dev_get(dev);
2181 if (!hdev)
1da177e4
LT
2182 return -ENODEV;
2183
2184 hci_req_lock(hdev);
1da177e4 2185
808a049e
MH
2186 if (!test_bit(HCI_UP, &hdev->flags)) {
2187 ret = -ENETDOWN;
1da177e4 2188 goto done;
808a049e 2189 }
1da177e4 2190
0736cfa8
MH
2191 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2192 ret = -EBUSY;
2193 goto done;
2194 }
2195
1da177e4
LT
2196 /* Drop queues */
2197 skb_queue_purge(&hdev->rx_q);
2198 skb_queue_purge(&hdev->cmd_q);
2199
09fd0de5 2200 hci_dev_lock(hdev);
1f9b9a5d 2201 hci_inquiry_cache_flush(hdev);
1da177e4 2202 hci_conn_hash_flush(hdev);
09fd0de5 2203 hci_dev_unlock(hdev);
1da177e4
LT
2204
2205 if (hdev->flush)
2206 hdev->flush(hdev);
2207
8e87d142 2208 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2209 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2210
2211 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2212 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2213
2214done:
1da177e4
LT
2215 hci_req_unlock(hdev);
2216 hci_dev_put(hdev);
2217 return ret;
2218}
2219
2220int hci_dev_reset_stat(__u16 dev)
2221{
2222 struct hci_dev *hdev;
2223 int ret = 0;
2224
70f23020
AE
2225 hdev = hci_dev_get(dev);
2226 if (!hdev)
1da177e4
LT
2227 return -ENODEV;
2228
0736cfa8
MH
2229 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2230 ret = -EBUSY;
2231 goto done;
2232 }
2233
1da177e4
LT
2234 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2235
0736cfa8 2236done:
1da177e4 2237 hci_dev_put(hdev);
1da177e4
LT
2238 return ret;
2239}
2240
2241int hci_dev_cmd(unsigned int cmd, void __user *arg)
2242{
2243 struct hci_dev *hdev;
2244 struct hci_dev_req dr;
2245 int err = 0;
2246
2247 if (copy_from_user(&dr, arg, sizeof(dr)))
2248 return -EFAULT;
2249
70f23020
AE
2250 hdev = hci_dev_get(dr.dev_id);
2251 if (!hdev)
1da177e4
LT
2252 return -ENODEV;
2253
0736cfa8
MH
2254 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2255 err = -EBUSY;
2256 goto done;
2257 }
2258
5b69bef5
MH
2259 if (hdev->dev_type != HCI_BREDR) {
2260 err = -EOPNOTSUPP;
2261 goto done;
2262 }
2263
56f87901
JH
2264 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2265 err = -EOPNOTSUPP;
2266 goto done;
2267 }
2268
1da177e4
LT
2269 switch (cmd) {
2270 case HCISETAUTH:
01178cd4
JH
2271 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2272 HCI_INIT_TIMEOUT);
1da177e4
LT
2273 break;
2274
2275 case HCISETENCRYPT:
2276 if (!lmp_encrypt_capable(hdev)) {
2277 err = -EOPNOTSUPP;
2278 break;
2279 }
2280
2281 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2282 /* Auth must be enabled first */
01178cd4
JH
2283 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2284 HCI_INIT_TIMEOUT);
1da177e4
LT
2285 if (err)
2286 break;
2287 }
2288
01178cd4
JH
2289 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2290 HCI_INIT_TIMEOUT);
1da177e4
LT
2291 break;
2292
2293 case HCISETSCAN:
01178cd4
JH
2294 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2295 HCI_INIT_TIMEOUT);
1da177e4
LT
2296 break;
2297
1da177e4 2298 case HCISETLINKPOL:
01178cd4
JH
2299 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2300 HCI_INIT_TIMEOUT);
1da177e4
LT
2301 break;
2302
2303 case HCISETLINKMODE:
e4e8e37c
MH
2304 hdev->link_mode = ((__u16) dr.dev_opt) &
2305 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2306 break;
2307
2308 case HCISETPTYPE:
2309 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2310 break;
2311
2312 case HCISETACLMTU:
e4e8e37c
MH
2313 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2314 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2315 break;
2316
2317 case HCISETSCOMTU:
e4e8e37c
MH
2318 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2319 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2320 break;
2321
2322 default:
2323 err = -EINVAL;
2324 break;
2325 }
e4e8e37c 2326
0736cfa8 2327done:
1da177e4
LT
2328 hci_dev_put(hdev);
2329 return err;
2330}
2331
2332int hci_get_dev_list(void __user *arg)
2333{
8035ded4 2334 struct hci_dev *hdev;
1da177e4
LT
2335 struct hci_dev_list_req *dl;
2336 struct hci_dev_req *dr;
1da177e4
LT
2337 int n = 0, size, err;
2338 __u16 dev_num;
2339
2340 if (get_user(dev_num, (__u16 __user *) arg))
2341 return -EFAULT;
2342
2343 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2344 return -EINVAL;
2345
2346 size = sizeof(*dl) + dev_num * sizeof(*dr);
2347
70f23020
AE
2348 dl = kzalloc(size, GFP_KERNEL);
2349 if (!dl)
1da177e4
LT
2350 return -ENOMEM;
2351
2352 dr = dl->dev_req;
2353
f20d09d5 2354 read_lock(&hci_dev_list_lock);
8035ded4 2355 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2356 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2357 cancel_delayed_work(&hdev->power_off);
c542a06c 2358
a8b2d5c2
JH
2359 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2360 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2361
1da177e4
LT
2362 (dr + n)->dev_id = hdev->id;
2363 (dr + n)->dev_opt = hdev->flags;
c542a06c 2364
1da177e4
LT
2365 if (++n >= dev_num)
2366 break;
2367 }
f20d09d5 2368 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2369
2370 dl->dev_num = n;
2371 size = sizeof(*dl) + n * sizeof(*dr);
2372
2373 err = copy_to_user(arg, dl, size);
2374 kfree(dl);
2375
2376 return err ? -EFAULT : 0;
2377}
2378
2379int hci_get_dev_info(void __user *arg)
2380{
2381 struct hci_dev *hdev;
2382 struct hci_dev_info di;
2383 int err = 0;
2384
2385 if (copy_from_user(&di, arg, sizeof(di)))
2386 return -EFAULT;
2387
70f23020
AE
2388 hdev = hci_dev_get(di.dev_id);
2389 if (!hdev)
1da177e4
LT
2390 return -ENODEV;
2391
a8b2d5c2 2392 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2393 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2394
a8b2d5c2
JH
2395 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2396 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2397
1da177e4
LT
2398 strcpy(di.name, hdev->name);
2399 di.bdaddr = hdev->bdaddr;
60f2a3ed 2400 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2401 di.flags = hdev->flags;
2402 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2403 if (lmp_bredr_capable(hdev)) {
2404 di.acl_mtu = hdev->acl_mtu;
2405 di.acl_pkts = hdev->acl_pkts;
2406 di.sco_mtu = hdev->sco_mtu;
2407 di.sco_pkts = hdev->sco_pkts;
2408 } else {
2409 di.acl_mtu = hdev->le_mtu;
2410 di.acl_pkts = hdev->le_pkts;
2411 di.sco_mtu = 0;
2412 di.sco_pkts = 0;
2413 }
1da177e4
LT
2414 di.link_policy = hdev->link_policy;
2415 di.link_mode = hdev->link_mode;
2416
2417 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2418 memcpy(&di.features, &hdev->features, sizeof(di.features));
2419
2420 if (copy_to_user(arg, &di, sizeof(di)))
2421 err = -EFAULT;
2422
2423 hci_dev_put(hdev);
2424
2425 return err;
2426}
2427
2428/* ---- Interface to HCI drivers ---- */
2429
611b30f7
MH
2430static int hci_rfkill_set_block(void *data, bool blocked)
2431{
2432 struct hci_dev *hdev = data;
2433
2434 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2435
0736cfa8
MH
2436 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2437 return -EBUSY;
2438
5e130367
JH
2439 if (blocked) {
2440 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2441 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2442 hci_dev_do_close(hdev);
5e130367
JH
2443 } else {
2444 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2445 }
611b30f7
MH
2446
2447 return 0;
2448}
2449
2450static const struct rfkill_ops hci_rfkill_ops = {
2451 .set_block = hci_rfkill_set_block,
2452};
2453
ab81cbf9
JH
2454static void hci_power_on(struct work_struct *work)
2455{
2456 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2457 int err;
ab81cbf9
JH
2458
2459 BT_DBG("%s", hdev->name);
2460
cbed0ca1 2461 err = hci_dev_do_open(hdev);
96570ffc
JH
2462 if (err < 0) {
2463 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2464 return;
96570ffc 2465 }
ab81cbf9 2466
a5c8f270
MH
2467 /* During the HCI setup phase, a few error conditions are
2468 * ignored and they need to be checked now. If they are still
2469 * valid, it is important to turn the device back off.
2470 */
2471 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2472 (hdev->dev_type == HCI_BREDR &&
2473 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2474 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2475 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2476 hci_dev_do_close(hdev);
2477 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2478 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2479 HCI_AUTO_OFF_TIMEOUT);
bf543036 2480 }
ab81cbf9 2481
a8b2d5c2 2482 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2483 mgmt_index_added(hdev);
ab81cbf9
JH
2484}
2485
2486static void hci_power_off(struct work_struct *work)
2487{
3243553f 2488 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2489 power_off.work);
ab81cbf9
JH
2490
2491 BT_DBG("%s", hdev->name);
2492
8ee56540 2493 hci_dev_do_close(hdev);
ab81cbf9
JH
2494}
2495
16ab91ab
JH
2496static void hci_discov_off(struct work_struct *work)
2497{
2498 struct hci_dev *hdev;
16ab91ab
JH
2499
2500 hdev = container_of(work, struct hci_dev, discov_off.work);
2501
2502 BT_DBG("%s", hdev->name);
2503
d1967ff8 2504 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2505}
2506
2aeb9a1a
JH
2507int hci_uuids_clear(struct hci_dev *hdev)
2508{
4821002c 2509 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2510
4821002c
JH
2511 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2512 list_del(&uuid->list);
2aeb9a1a
JH
2513 kfree(uuid);
2514 }
2515
2516 return 0;
2517}
2518
55ed8ca1
JH
2519int hci_link_keys_clear(struct hci_dev *hdev)
2520{
2521 struct list_head *p, *n;
2522
2523 list_for_each_safe(p, n, &hdev->link_keys) {
2524 struct link_key *key;
2525
2526 key = list_entry(p, struct link_key, list);
2527
2528 list_del(p);
2529 kfree(key);
2530 }
2531
2532 return 0;
2533}
2534
b899efaf
VCG
2535int hci_smp_ltks_clear(struct hci_dev *hdev)
2536{
2537 struct smp_ltk *k, *tmp;
2538
2539 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2540 list_del(&k->list);
2541 kfree(k);
2542 }
2543
2544 return 0;
2545}
2546
55ed8ca1
JH
2547struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2548{
8035ded4 2549 struct link_key *k;
55ed8ca1 2550
8035ded4 2551 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2552 if (bacmp(bdaddr, &k->bdaddr) == 0)
2553 return k;
55ed8ca1
JH
2554
2555 return NULL;
2556}
2557
745c0ce3 2558static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2559 u8 key_type, u8 old_key_type)
d25e28ab
JH
2560{
2561 /* Legacy key */
2562 if (key_type < 0x03)
745c0ce3 2563 return true;
d25e28ab
JH
2564
2565 /* Debug keys are insecure so don't store them persistently */
2566 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2567 return false;
d25e28ab
JH
2568
2569 /* Changed combination key and there's no previous one */
2570 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2571 return false;
d25e28ab
JH
2572
2573 /* Security mode 3 case */
2574 if (!conn)
745c0ce3 2575 return true;
d25e28ab
JH
2576
2577 /* Neither local nor remote side had no-bonding as requirement */
2578 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2579 return true;
d25e28ab
JH
2580
2581 /* Local side had dedicated bonding as requirement */
2582 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2583 return true;
d25e28ab
JH
2584
2585 /* Remote side had dedicated bonding as requirement */
2586 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2587 return true;
d25e28ab
JH
2588
2589 /* If none of the above criteria match, then don't store the key
2590 * persistently */
745c0ce3 2591 return false;
d25e28ab
JH
2592}
2593
98a0b845
JH
2594static bool ltk_type_master(u8 type)
2595{
2596 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2597 return true;
2598
2599 return false;
2600}
2601
2602struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2603 bool master)
75d262c2 2604{
c9839a11 2605 struct smp_ltk *k;
75d262c2 2606
c9839a11
VCG
2607 list_for_each_entry(k, &hdev->long_term_keys, list) {
2608 if (k->ediv != ediv ||
a8c5fb1a 2609 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2610 continue;
2611
98a0b845
JH
2612 if (ltk_type_master(k->type) != master)
2613 continue;
2614
c9839a11 2615 return k;
75d262c2
VCG
2616 }
2617
2618 return NULL;
2619}
75d262c2 2620
c9839a11 2621struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2622 u8 addr_type, bool master)
75d262c2 2623{
c9839a11 2624 struct smp_ltk *k;
75d262c2 2625
c9839a11
VCG
2626 list_for_each_entry(k, &hdev->long_term_keys, list)
2627 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2628 bacmp(bdaddr, &k->bdaddr) == 0 &&
2629 ltk_type_master(k->type) == master)
75d262c2
VCG
2630 return k;
2631
2632 return NULL;
2633}
75d262c2 2634
d25e28ab 2635int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2636 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2637{
2638 struct link_key *key, *old_key;
745c0ce3
VA
2639 u8 old_key_type;
2640 bool persistent;
55ed8ca1
JH
2641
2642 old_key = hci_find_link_key(hdev, bdaddr);
2643 if (old_key) {
2644 old_key_type = old_key->type;
2645 key = old_key;
2646 } else {
12adcf3a 2647 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2648 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2649 if (!key)
2650 return -ENOMEM;
2651 list_add(&key->list, &hdev->link_keys);
2652 }
2653
6ed93dc6 2654 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2655
d25e28ab
JH
2656 /* Some buggy controller combinations generate a changed
2657 * combination key for legacy pairing even when there's no
2658 * previous key */
2659 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2660 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2661 type = HCI_LK_COMBINATION;
655fe6ec
JH
2662 if (conn)
2663 conn->key_type = type;
2664 }
d25e28ab 2665
55ed8ca1 2666 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2667 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2668 key->pin_len = pin_len;
2669
b6020ba0 2670 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2671 key->type = old_key_type;
4748fed2
JH
2672 else
2673 key->type = type;
2674
4df378a1
JH
2675 if (!new_key)
2676 return 0;
2677
2678 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2679
744cf19e 2680 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2681
6ec5bcad
VA
2682 if (conn)
2683 conn->flush_key = !persistent;
55ed8ca1
JH
2684
2685 return 0;
2686}
2687
c9839a11 2688int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2689 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2690 ediv, u8 rand[8])
75d262c2 2691{
c9839a11 2692 struct smp_ltk *key, *old_key;
98a0b845 2693 bool master = ltk_type_master(type);
0fe442ff 2694 u8 persistent;
75d262c2 2695
98a0b845 2696 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2697 if (old_key)
75d262c2 2698 key = old_key;
c9839a11
VCG
2699 else {
2700 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2701 if (!key)
2702 return -ENOMEM;
c9839a11 2703 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2704 }
2705
75d262c2 2706 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2707 key->bdaddr_type = addr_type;
2708 memcpy(key->val, tk, sizeof(key->val));
2709 key->authenticated = authenticated;
2710 key->ediv = ediv;
2711 key->enc_size = enc_size;
2712 key->type = type;
2713 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2714
c9839a11
VCG
2715 if (!new_key)
2716 return 0;
75d262c2 2717
0fe442ff
MH
2718 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2719 persistent = 0;
2720 else
2721 persistent = 1;
2722
21b93b75 2723 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
0fe442ff 2724 mgmt_new_ltk(hdev, key, persistent);
261cc5aa 2725
75d262c2
VCG
2726 return 0;
2727}
2728
55ed8ca1
JH
2729int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2730{
2731 struct link_key *key;
2732
2733 key = hci_find_link_key(hdev, bdaddr);
2734 if (!key)
2735 return -ENOENT;
2736
6ed93dc6 2737 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2738
2739 list_del(&key->list);
2740 kfree(key);
2741
2742 return 0;
2743}
2744
b899efaf
VCG
2745int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2746{
2747 struct smp_ltk *k, *tmp;
2748
2749 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2750 if (bacmp(bdaddr, &k->bdaddr))
2751 continue;
2752
6ed93dc6 2753 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2754
2755 list_del(&k->list);
2756 kfree(k);
2757 }
2758
2759 return 0;
2760}
2761
6bd32326 2762/* HCI command timer function */
bda4f23a 2763static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2764{
2765 struct hci_dev *hdev = (void *) arg;
2766
bda4f23a
AE
2767 if (hdev->sent_cmd) {
2768 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2769 u16 opcode = __le16_to_cpu(sent->opcode);
2770
2771 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2772 } else {
2773 BT_ERR("%s command tx timeout", hdev->name);
2774 }
2775
6bd32326 2776 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2777 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2778}
2779
2763eda6 2780struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2781 bdaddr_t *bdaddr)
2763eda6
SJ
2782{
2783 struct oob_data *data;
2784
2785 list_for_each_entry(data, &hdev->remote_oob_data, list)
2786 if (bacmp(bdaddr, &data->bdaddr) == 0)
2787 return data;
2788
2789 return NULL;
2790}
2791
2792int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2793{
2794 struct oob_data *data;
2795
2796 data = hci_find_remote_oob_data(hdev, bdaddr);
2797 if (!data)
2798 return -ENOENT;
2799
6ed93dc6 2800 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2801
2802 list_del(&data->list);
2803 kfree(data);
2804
2805 return 0;
2806}
2807
2808int hci_remote_oob_data_clear(struct hci_dev *hdev)
2809{
2810 struct oob_data *data, *n;
2811
2812 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2813 list_del(&data->list);
2814 kfree(data);
2815 }
2816
2817 return 0;
2818}
2819
0798872e
MH
2820int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2821 u8 *hash, u8 *randomizer)
2763eda6
SJ
2822{
2823 struct oob_data *data;
2824
2825 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 2826 if (!data) {
0798872e 2827 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2763eda6
SJ
2828 if (!data)
2829 return -ENOMEM;
2830
2831 bacpy(&data->bdaddr, bdaddr);
2832 list_add(&data->list, &hdev->remote_oob_data);
2833 }
2834
519ca9d0
MH
2835 memcpy(data->hash192, hash, sizeof(data->hash192));
2836 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 2837
0798872e
MH
2838 memset(data->hash256, 0, sizeof(data->hash256));
2839 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2840
2841 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2842
2843 return 0;
2844}
2845
2846int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2847 u8 *hash192, u8 *randomizer192,
2848 u8 *hash256, u8 *randomizer256)
2849{
2850 struct oob_data *data;
2851
2852 data = hci_find_remote_oob_data(hdev, bdaddr);
2853 if (!data) {
2854 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2855 if (!data)
2856 return -ENOMEM;
2857
2858 bacpy(&data->bdaddr, bdaddr);
2859 list_add(&data->list, &hdev->remote_oob_data);
2860 }
2861
2862 memcpy(data->hash192, hash192, sizeof(data->hash192));
2863 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2864
2865 memcpy(data->hash256, hash256, sizeof(data->hash256));
2866 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2867
6ed93dc6 2868 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2869
2870 return 0;
2871}
2872
b9ee0a78
MH
2873struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2874 bdaddr_t *bdaddr, u8 type)
b2a66aad 2875{
8035ded4 2876 struct bdaddr_list *b;
b2a66aad 2877
b9ee0a78
MH
2878 list_for_each_entry(b, &hdev->blacklist, list) {
2879 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2880 return b;
b9ee0a78 2881 }
b2a66aad
AJ
2882
2883 return NULL;
2884}
2885
2886int hci_blacklist_clear(struct hci_dev *hdev)
2887{
2888 struct list_head *p, *n;
2889
2890 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2891 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2892
2893 list_del(p);
2894 kfree(b);
2895 }
2896
2897 return 0;
2898}
2899
88c1fe4b 2900int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2901{
2902 struct bdaddr_list *entry;
b2a66aad 2903
b9ee0a78 2904 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2905 return -EBADF;
2906
b9ee0a78 2907 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2908 return -EEXIST;
b2a66aad
AJ
2909
2910 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2911 if (!entry)
2912 return -ENOMEM;
b2a66aad
AJ
2913
2914 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2915 entry->bdaddr_type = type;
b2a66aad
AJ
2916
2917 list_add(&entry->list, &hdev->blacklist);
2918
88c1fe4b 2919 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2920}
2921
88c1fe4b 2922int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2923{
2924 struct bdaddr_list *entry;
b2a66aad 2925
b9ee0a78 2926 if (!bacmp(bdaddr, BDADDR_ANY))
5e762444 2927 return hci_blacklist_clear(hdev);
b2a66aad 2928
b9ee0a78 2929 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2930 if (!entry)
5e762444 2931 return -ENOENT;
b2a66aad
AJ
2932
2933 list_del(&entry->list);
2934 kfree(entry);
2935
88c1fe4b 2936 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2937}
2938
15819a70
AG
2939/* This function requires the caller holds hdev->lock */
2940struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2941 bdaddr_t *addr, u8 addr_type)
2942{
2943 struct hci_conn_params *params;
2944
2945 list_for_each_entry(params, &hdev->le_conn_params, list) {
2946 if (bacmp(&params->addr, addr) == 0 &&
2947 params->addr_type == addr_type) {
2948 return params;
2949 }
2950 }
2951
2952 return NULL;
2953}
2954
2955/* This function requires the caller holds hdev->lock */
2956void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
2957 u16 conn_min_interval, u16 conn_max_interval)
2958{
2959 struct hci_conn_params *params;
2960
2961 params = hci_conn_params_lookup(hdev, addr, addr_type);
2962 if (params) {
2963 params->conn_min_interval = conn_min_interval;
2964 params->conn_max_interval = conn_max_interval;
2965 return;
2966 }
2967
2968 params = kzalloc(sizeof(*params), GFP_KERNEL);
2969 if (!params) {
2970 BT_ERR("Out of memory");
2971 return;
2972 }
2973
2974 bacpy(&params->addr, addr);
2975 params->addr_type = addr_type;
2976 params->conn_min_interval = conn_min_interval;
2977 params->conn_max_interval = conn_max_interval;
2978
2979 list_add(&params->list, &hdev->le_conn_params);
2980
2981 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
2982 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
2983 conn_max_interval);
2984}
2985
2986/* This function requires the caller holds hdev->lock */
2987void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2988{
2989 struct hci_conn_params *params;
2990
2991 params = hci_conn_params_lookup(hdev, addr, addr_type);
2992 if (!params)
2993 return;
2994
2995 list_del(&params->list);
2996 kfree(params);
2997
2998 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2999}
3000
3001/* This function requires the caller holds hdev->lock */
3002void hci_conn_params_clear(struct hci_dev *hdev)
3003{
3004 struct hci_conn_params *params, *tmp;
3005
3006 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3007 list_del(&params->list);
3008 kfree(params);
3009 }
3010
3011 BT_DBG("All LE connection parameters were removed");
3012}
3013
4c87eaab 3014static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3015{
4c87eaab
AG
3016 if (status) {
3017 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3018
4c87eaab
AG
3019 hci_dev_lock(hdev);
3020 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3021 hci_dev_unlock(hdev);
3022 return;
3023 }
7ba8b4be
AG
3024}
3025
4c87eaab 3026static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3027{
4c87eaab
AG
3028 /* General inquiry access code (GIAC) */
3029 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3030 struct hci_request req;
3031 struct hci_cp_inquiry cp;
7ba8b4be
AG
3032 int err;
3033
4c87eaab
AG
3034 if (status) {
3035 BT_ERR("Failed to disable LE scanning: status %d", status);
3036 return;
3037 }
7ba8b4be 3038
4c87eaab
AG
3039 switch (hdev->discovery.type) {
3040 case DISCOV_TYPE_LE:
3041 hci_dev_lock(hdev);
3042 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3043 hci_dev_unlock(hdev);
3044 break;
7ba8b4be 3045
4c87eaab
AG
3046 case DISCOV_TYPE_INTERLEAVED:
3047 hci_req_init(&req, hdev);
7ba8b4be 3048
4c87eaab
AG
3049 memset(&cp, 0, sizeof(cp));
3050 memcpy(&cp.lap, lap, sizeof(cp.lap));
3051 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3052 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3053
4c87eaab 3054 hci_dev_lock(hdev);
7dbfac1d 3055
4c87eaab 3056 hci_inquiry_cache_flush(hdev);
7dbfac1d 3057
4c87eaab
AG
3058 err = hci_req_run(&req, inquiry_complete);
3059 if (err) {
3060 BT_ERR("Inquiry request failed: err %d", err);
3061 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3062 }
7dbfac1d 3063
4c87eaab
AG
3064 hci_dev_unlock(hdev);
3065 break;
7dbfac1d 3066 }
7dbfac1d
AG
3067}
3068
7ba8b4be
AG
3069static void le_scan_disable_work(struct work_struct *work)
3070{
3071 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3072 le_scan_disable.work);
7ba8b4be 3073 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
3074 struct hci_request req;
3075 int err;
7ba8b4be
AG
3076
3077 BT_DBG("%s", hdev->name);
3078
4c87eaab 3079 hci_req_init(&req, hdev);
28b75a89 3080
7ba8b4be 3081 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
3082 cp.enable = LE_SCAN_DISABLE;
3083 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 3084
4c87eaab
AG
3085 err = hci_req_run(&req, le_scan_disable_work_complete);
3086 if (err)
3087 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3088}
3089
9be0dab7
DH
3090/* Alloc HCI device */
3091struct hci_dev *hci_alloc_dev(void)
3092{
3093 struct hci_dev *hdev;
3094
3095 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3096 if (!hdev)
3097 return NULL;
3098
b1b813d4
DH
3099 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3100 hdev->esco_type = (ESCO_HV1);
3101 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3102 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3103 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3104 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3105 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3106
b1b813d4
DH
3107 hdev->sniff_max_interval = 800;
3108 hdev->sniff_min_interval = 80;
3109
bef64738
MH
3110 hdev->le_scan_interval = 0x0060;
3111 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3112 hdev->le_conn_min_interval = 0x0028;
3113 hdev->le_conn_max_interval = 0x0038;
bef64738 3114
b1b813d4
DH
3115 mutex_init(&hdev->lock);
3116 mutex_init(&hdev->req_lock);
3117
3118 INIT_LIST_HEAD(&hdev->mgmt_pending);
3119 INIT_LIST_HEAD(&hdev->blacklist);
3120 INIT_LIST_HEAD(&hdev->uuids);
3121 INIT_LIST_HEAD(&hdev->link_keys);
3122 INIT_LIST_HEAD(&hdev->long_term_keys);
3123 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3124 INIT_LIST_HEAD(&hdev->le_conn_params);
6b536b5e 3125 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3126
3127 INIT_WORK(&hdev->rx_work, hci_rx_work);
3128 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3129 INIT_WORK(&hdev->tx_work, hci_tx_work);
3130 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3131
b1b813d4
DH
3132 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3133 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3134 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3135
b1b813d4
DH
3136 skb_queue_head_init(&hdev->rx_q);
3137 skb_queue_head_init(&hdev->cmd_q);
3138 skb_queue_head_init(&hdev->raw_q);
3139
3140 init_waitqueue_head(&hdev->req_wait_q);
3141
bda4f23a 3142 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3143
b1b813d4
DH
3144 hci_init_sysfs(hdev);
3145 discovery_init(hdev);
9be0dab7
DH
3146
3147 return hdev;
3148}
3149EXPORT_SYMBOL(hci_alloc_dev);
3150
3151/* Free HCI device */
3152void hci_free_dev(struct hci_dev *hdev)
3153{
9be0dab7
DH
3154 /* will free via device release */
3155 put_device(&hdev->dev);
3156}
3157EXPORT_SYMBOL(hci_free_dev);
3158
1da177e4
LT
3159/* Register HCI device */
3160int hci_register_dev(struct hci_dev *hdev)
3161{
b1b813d4 3162 int id, error;
1da177e4 3163
010666a1 3164 if (!hdev->open || !hdev->close)
1da177e4
LT
3165 return -EINVAL;
3166
08add513
MM
3167 /* Do not allow HCI_AMP devices to register at index 0,
3168 * so the index can be used as the AMP controller ID.
3169 */
3df92b31
SL
3170 switch (hdev->dev_type) {
3171 case HCI_BREDR:
3172 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3173 break;
3174 case HCI_AMP:
3175 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3176 break;
3177 default:
3178 return -EINVAL;
1da177e4 3179 }
8e87d142 3180
3df92b31
SL
3181 if (id < 0)
3182 return id;
3183
1da177e4
LT
3184 sprintf(hdev->name, "hci%d", id);
3185 hdev->id = id;
2d8b3a11
AE
3186
3187 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3188
d8537548
KC
3189 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3190 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3191 if (!hdev->workqueue) {
3192 error = -ENOMEM;
3193 goto err;
3194 }
f48fd9c8 3195
d8537548
KC
3196 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3197 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3198 if (!hdev->req_workqueue) {
3199 destroy_workqueue(hdev->workqueue);
3200 error = -ENOMEM;
3201 goto err;
3202 }
3203
0153e2ec
MH
3204 if (!IS_ERR_OR_NULL(bt_debugfs))
3205 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3206
bdc3e0f1
MH
3207 dev_set_name(&hdev->dev, "%s", hdev->name);
3208
99780a7b
JH
3209 hdev->tfm_aes = crypto_alloc_blkcipher("ecb(aes)", 0,
3210 CRYPTO_ALG_ASYNC);
3211 if (IS_ERR(hdev->tfm_aes)) {
3212 BT_ERR("Unable to create crypto context");
3213 error = PTR_ERR(hdev->tfm_aes);
3214 hdev->tfm_aes = NULL;
3215 goto err_wqueue;
3216 }
3217
bdc3e0f1 3218 error = device_add(&hdev->dev);
33ca954d 3219 if (error < 0)
99780a7b 3220 goto err_tfm;
1da177e4 3221
611b30f7 3222 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3223 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3224 hdev);
611b30f7
MH
3225 if (hdev->rfkill) {
3226 if (rfkill_register(hdev->rfkill) < 0) {
3227 rfkill_destroy(hdev->rfkill);
3228 hdev->rfkill = NULL;
3229 }
3230 }
3231
5e130367
JH
3232 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3233 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3234
a8b2d5c2 3235 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3236 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3237
01cd3404 3238 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3239 /* Assume BR/EDR support until proven otherwise (such as
3240 * through reading supported features during init.
3241 */
3242 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3243 }
ce2be9ac 3244
fcee3377
GP
3245 write_lock(&hci_dev_list_lock);
3246 list_add(&hdev->list, &hci_dev_list);
3247 write_unlock(&hci_dev_list_lock);
3248
1da177e4 3249 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3250 hci_dev_hold(hdev);
1da177e4 3251
19202573 3252 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3253
1da177e4 3254 return id;
f48fd9c8 3255
99780a7b
JH
3256err_tfm:
3257 crypto_free_blkcipher(hdev->tfm_aes);
33ca954d
DH
3258err_wqueue:
3259 destroy_workqueue(hdev->workqueue);
6ead1bbc 3260 destroy_workqueue(hdev->req_workqueue);
33ca954d 3261err:
3df92b31 3262 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3263
33ca954d 3264 return error;
1da177e4
LT
3265}
3266EXPORT_SYMBOL(hci_register_dev);
3267
3268/* Unregister HCI device */
59735631 3269void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3270{
3df92b31 3271 int i, id;
ef222013 3272
c13854ce 3273 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3274
94324962
JH
3275 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3276
3df92b31
SL
3277 id = hdev->id;
3278
f20d09d5 3279 write_lock(&hci_dev_list_lock);
1da177e4 3280 list_del(&hdev->list);
f20d09d5 3281 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3282
3283 hci_dev_do_close(hdev);
3284
cd4c5391 3285 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3286 kfree_skb(hdev->reassembly[i]);
3287
b9b5ef18
GP
3288 cancel_work_sync(&hdev->power_on);
3289
ab81cbf9 3290 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3291 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3292 hci_dev_lock(hdev);
744cf19e 3293 mgmt_index_removed(hdev);
09fd0de5 3294 hci_dev_unlock(hdev);
56e5cb86 3295 }
ab81cbf9 3296
2e58ef3e
JH
3297 /* mgmt_index_removed should take care of emptying the
3298 * pending list */
3299 BUG_ON(!list_empty(&hdev->mgmt_pending));
3300
1da177e4
LT
3301 hci_notify(hdev, HCI_DEV_UNREG);
3302
611b30f7
MH
3303 if (hdev->rfkill) {
3304 rfkill_unregister(hdev->rfkill);
3305 rfkill_destroy(hdev->rfkill);
3306 }
3307
99780a7b
JH
3308 if (hdev->tfm_aes)
3309 crypto_free_blkcipher(hdev->tfm_aes);
3310
bdc3e0f1 3311 device_del(&hdev->dev);
147e2d59 3312
0153e2ec
MH
3313 debugfs_remove_recursive(hdev->debugfs);
3314
f48fd9c8 3315 destroy_workqueue(hdev->workqueue);
6ead1bbc 3316 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3317
09fd0de5 3318 hci_dev_lock(hdev);
e2e0cacb 3319 hci_blacklist_clear(hdev);
2aeb9a1a 3320 hci_uuids_clear(hdev);
55ed8ca1 3321 hci_link_keys_clear(hdev);
b899efaf 3322 hci_smp_ltks_clear(hdev);
2763eda6 3323 hci_remote_oob_data_clear(hdev);
15819a70 3324 hci_conn_params_clear(hdev);
09fd0de5 3325 hci_dev_unlock(hdev);
e2e0cacb 3326
dc946bd8 3327 hci_dev_put(hdev);
3df92b31
SL
3328
3329 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3330}
3331EXPORT_SYMBOL(hci_unregister_dev);
3332
3333/* Suspend HCI device */
3334int hci_suspend_dev(struct hci_dev *hdev)
3335{
3336 hci_notify(hdev, HCI_DEV_SUSPEND);
3337 return 0;
3338}
3339EXPORT_SYMBOL(hci_suspend_dev);
3340
3341/* Resume HCI device */
3342int hci_resume_dev(struct hci_dev *hdev)
3343{
3344 hci_notify(hdev, HCI_DEV_RESUME);
3345 return 0;
3346}
3347EXPORT_SYMBOL(hci_resume_dev);
3348
76bca880 3349/* Receive frame from HCI drivers */
e1a26170 3350int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3351{
76bca880 3352 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3353 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3354 kfree_skb(skb);
3355 return -ENXIO;
3356 }
3357
d82603c6 3358 /* Incoming skb */
76bca880
MH
3359 bt_cb(skb)->incoming = 1;
3360
3361 /* Time stamp */
3362 __net_timestamp(skb);
3363
76bca880 3364 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3365 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3366
76bca880
MH
3367 return 0;
3368}
3369EXPORT_SYMBOL(hci_recv_frame);
3370
33e882a5 3371static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3372 int count, __u8 index)
33e882a5
SS
3373{
3374 int len = 0;
3375 int hlen = 0;
3376 int remain = count;
3377 struct sk_buff *skb;
3378 struct bt_skb_cb *scb;
3379
3380 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3381 index >= NUM_REASSEMBLY)
33e882a5
SS
3382 return -EILSEQ;
3383
3384 skb = hdev->reassembly[index];
3385
3386 if (!skb) {
3387 switch (type) {
3388 case HCI_ACLDATA_PKT:
3389 len = HCI_MAX_FRAME_SIZE;
3390 hlen = HCI_ACL_HDR_SIZE;
3391 break;
3392 case HCI_EVENT_PKT:
3393 len = HCI_MAX_EVENT_SIZE;
3394 hlen = HCI_EVENT_HDR_SIZE;
3395 break;
3396 case HCI_SCODATA_PKT:
3397 len = HCI_MAX_SCO_SIZE;
3398 hlen = HCI_SCO_HDR_SIZE;
3399 break;
3400 }
3401
1e429f38 3402 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3403 if (!skb)
3404 return -ENOMEM;
3405
3406 scb = (void *) skb->cb;
3407 scb->expect = hlen;
3408 scb->pkt_type = type;
3409
33e882a5
SS
3410 hdev->reassembly[index] = skb;
3411 }
3412
3413 while (count) {
3414 scb = (void *) skb->cb;
89bb46d0 3415 len = min_t(uint, scb->expect, count);
33e882a5
SS
3416
3417 memcpy(skb_put(skb, len), data, len);
3418
3419 count -= len;
3420 data += len;
3421 scb->expect -= len;
3422 remain = count;
3423
3424 switch (type) {
3425 case HCI_EVENT_PKT:
3426 if (skb->len == HCI_EVENT_HDR_SIZE) {
3427 struct hci_event_hdr *h = hci_event_hdr(skb);
3428 scb->expect = h->plen;
3429
3430 if (skb_tailroom(skb) < scb->expect) {
3431 kfree_skb(skb);
3432 hdev->reassembly[index] = NULL;
3433 return -ENOMEM;
3434 }
3435 }
3436 break;
3437
3438 case HCI_ACLDATA_PKT:
3439 if (skb->len == HCI_ACL_HDR_SIZE) {
3440 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3441 scb->expect = __le16_to_cpu(h->dlen);
3442
3443 if (skb_tailroom(skb) < scb->expect) {
3444 kfree_skb(skb);
3445 hdev->reassembly[index] = NULL;
3446 return -ENOMEM;
3447 }
3448 }
3449 break;
3450
3451 case HCI_SCODATA_PKT:
3452 if (skb->len == HCI_SCO_HDR_SIZE) {
3453 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3454 scb->expect = h->dlen;
3455
3456 if (skb_tailroom(skb) < scb->expect) {
3457 kfree_skb(skb);
3458 hdev->reassembly[index] = NULL;
3459 return -ENOMEM;
3460 }
3461 }
3462 break;
3463 }
3464
3465 if (scb->expect == 0) {
3466 /* Complete frame */
3467
3468 bt_cb(skb)->pkt_type = type;
e1a26170 3469 hci_recv_frame(hdev, skb);
33e882a5
SS
3470
3471 hdev->reassembly[index] = NULL;
3472 return remain;
3473 }
3474 }
3475
3476 return remain;
3477}
3478
ef222013
MH
3479int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3480{
f39a3c06
SS
3481 int rem = 0;
3482
ef222013
MH
3483 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3484 return -EILSEQ;
3485
da5f6c37 3486 while (count) {
1e429f38 3487 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3488 if (rem < 0)
3489 return rem;
ef222013 3490
f39a3c06
SS
3491 data += (count - rem);
3492 count = rem;
f81c6224 3493 }
ef222013 3494
f39a3c06 3495 return rem;
ef222013
MH
3496}
3497EXPORT_SYMBOL(hci_recv_fragment);
3498
99811510
SS
3499#define STREAM_REASSEMBLY 0
3500
3501int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3502{
3503 int type;
3504 int rem = 0;
3505
da5f6c37 3506 while (count) {
99811510
SS
3507 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3508
3509 if (!skb) {
3510 struct { char type; } *pkt;
3511
3512 /* Start of the frame */
3513 pkt = data;
3514 type = pkt->type;
3515
3516 data++;
3517 count--;
3518 } else
3519 type = bt_cb(skb)->pkt_type;
3520
1e429f38 3521 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3522 STREAM_REASSEMBLY);
99811510
SS
3523 if (rem < 0)
3524 return rem;
3525
3526 data += (count - rem);
3527 count = rem;
f81c6224 3528 }
99811510
SS
3529
3530 return rem;
3531}
3532EXPORT_SYMBOL(hci_recv_stream_fragment);
3533
1da177e4
LT
3534/* ---- Interface to upper protocols ---- */
3535
1da177e4
LT
3536int hci_register_cb(struct hci_cb *cb)
3537{
3538 BT_DBG("%p name %s", cb, cb->name);
3539
f20d09d5 3540 write_lock(&hci_cb_list_lock);
1da177e4 3541 list_add(&cb->list, &hci_cb_list);
f20d09d5 3542 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3543
3544 return 0;
3545}
3546EXPORT_SYMBOL(hci_register_cb);
3547
3548int hci_unregister_cb(struct hci_cb *cb)
3549{
3550 BT_DBG("%p name %s", cb, cb->name);
3551
f20d09d5 3552 write_lock(&hci_cb_list_lock);
1da177e4 3553 list_del(&cb->list);
f20d09d5 3554 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3555
3556 return 0;
3557}
3558EXPORT_SYMBOL(hci_unregister_cb);
3559
51086991 3560static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3561{
0d48d939 3562 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3563
cd82e61c
MH
3564 /* Time stamp */
3565 __net_timestamp(skb);
1da177e4 3566
cd82e61c
MH
3567 /* Send copy to monitor */
3568 hci_send_to_monitor(hdev, skb);
3569
3570 if (atomic_read(&hdev->promisc)) {
3571 /* Send copy to the sockets */
470fe1b5 3572 hci_send_to_sock(hdev, skb);
1da177e4
LT
3573 }
3574
3575 /* Get rid of skb owner, prior to sending to the driver. */
3576 skb_orphan(skb);
3577
7bd8f09f 3578 if (hdev->send(hdev, skb) < 0)
51086991 3579 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3580}
3581
3119ae95
JH
3582void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3583{
3584 skb_queue_head_init(&req->cmd_q);
3585 req->hdev = hdev;
5d73e034 3586 req->err = 0;
3119ae95
JH
3587}
3588
3589int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3590{
3591 struct hci_dev *hdev = req->hdev;
3592 struct sk_buff *skb;
3593 unsigned long flags;
3594
3595 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3596
5d73e034
AG
3597 /* If an error occured during request building, remove all HCI
3598 * commands queued on the HCI request queue.
3599 */
3600 if (req->err) {
3601 skb_queue_purge(&req->cmd_q);
3602 return req->err;
3603 }
3604
3119ae95
JH
3605 /* Do not allow empty requests */
3606 if (skb_queue_empty(&req->cmd_q))
382b0c39 3607 return -ENODATA;
3119ae95
JH
3608
3609 skb = skb_peek_tail(&req->cmd_q);
3610 bt_cb(skb)->req.complete = complete;
3611
3612 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3613 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3614 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3615
3616 queue_work(hdev->workqueue, &hdev->cmd_work);
3617
3618 return 0;
3619}
3620
1ca3a9d0 3621static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3622 u32 plen, const void *param)
1da177e4
LT
3623{
3624 int len = HCI_COMMAND_HDR_SIZE + plen;
3625 struct hci_command_hdr *hdr;
3626 struct sk_buff *skb;
3627
1da177e4 3628 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3629 if (!skb)
3630 return NULL;
1da177e4
LT
3631
3632 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3633 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3634 hdr->plen = plen;
3635
3636 if (plen)
3637 memcpy(skb_put(skb, plen), param, plen);
3638
3639 BT_DBG("skb len %d", skb->len);
3640
0d48d939 3641 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3642
1ca3a9d0
JH
3643 return skb;
3644}
3645
3646/* Send HCI command */
07dc93dd
JH
3647int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3648 const void *param)
1ca3a9d0
JH
3649{
3650 struct sk_buff *skb;
3651
3652 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3653
3654 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3655 if (!skb) {
3656 BT_ERR("%s no memory for command", hdev->name);
3657 return -ENOMEM;
3658 }
3659
11714b3d
JH
3660 /* Stand-alone HCI commands must be flaged as
3661 * single-command requests.
3662 */
3663 bt_cb(skb)->req.start = true;
3664
1da177e4 3665 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3666 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3667
3668 return 0;
3669}
1da177e4 3670
71c76a17 3671/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3672void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3673 const void *param, u8 event)
71c76a17
JH
3674{
3675 struct hci_dev *hdev = req->hdev;
3676 struct sk_buff *skb;
3677
3678 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3679
34739c1e
AG
3680 /* If an error occured during request building, there is no point in
3681 * queueing the HCI command. We can simply return.
3682 */
3683 if (req->err)
3684 return;
3685
71c76a17
JH
3686 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3687 if (!skb) {
5d73e034
AG
3688 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3689 hdev->name, opcode);
3690 req->err = -ENOMEM;
e348fe6b 3691 return;
71c76a17
JH
3692 }
3693
3694 if (skb_queue_empty(&req->cmd_q))
3695 bt_cb(skb)->req.start = true;
3696
02350a72
JH
3697 bt_cb(skb)->req.event = event;
3698
71c76a17 3699 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3700}
3701
07dc93dd
JH
3702void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3703 const void *param)
02350a72
JH
3704{
3705 hci_req_add_ev(req, opcode, plen, param, 0);
3706}
3707
1da177e4 3708/* Get data from the previously sent command */
a9de9248 3709void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3710{
3711 struct hci_command_hdr *hdr;
3712
3713 if (!hdev->sent_cmd)
3714 return NULL;
3715
3716 hdr = (void *) hdev->sent_cmd->data;
3717
a9de9248 3718 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3719 return NULL;
3720
f0e09510 3721 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3722
3723 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3724}
3725
3726/* Send ACL data */
3727static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3728{
3729 struct hci_acl_hdr *hdr;
3730 int len = skb->len;
3731
badff6d0
ACM
3732 skb_push(skb, HCI_ACL_HDR_SIZE);
3733 skb_reset_transport_header(skb);
9c70220b 3734 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3735 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3736 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3737}
3738
ee22be7e 3739static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3740 struct sk_buff *skb, __u16 flags)
1da177e4 3741{
ee22be7e 3742 struct hci_conn *conn = chan->conn;
1da177e4
LT
3743 struct hci_dev *hdev = conn->hdev;
3744 struct sk_buff *list;
3745
087bfd99
GP
3746 skb->len = skb_headlen(skb);
3747 skb->data_len = 0;
3748
3749 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3750
3751 switch (hdev->dev_type) {
3752 case HCI_BREDR:
3753 hci_add_acl_hdr(skb, conn->handle, flags);
3754 break;
3755 case HCI_AMP:
3756 hci_add_acl_hdr(skb, chan->handle, flags);
3757 break;
3758 default:
3759 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3760 return;
3761 }
087bfd99 3762
70f23020
AE
3763 list = skb_shinfo(skb)->frag_list;
3764 if (!list) {
1da177e4
LT
3765 /* Non fragmented */
3766 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3767
73d80deb 3768 skb_queue_tail(queue, skb);
1da177e4
LT
3769 } else {
3770 /* Fragmented */
3771 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3772
3773 skb_shinfo(skb)->frag_list = NULL;
3774
3775 /* Queue all fragments atomically */
af3e6359 3776 spin_lock(&queue->lock);
1da177e4 3777
73d80deb 3778 __skb_queue_tail(queue, skb);
e702112f
AE
3779
3780 flags &= ~ACL_START;
3781 flags |= ACL_CONT;
1da177e4
LT
3782 do {
3783 skb = list; list = list->next;
8e87d142 3784
0d48d939 3785 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3786 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3787
3788 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3789
73d80deb 3790 __skb_queue_tail(queue, skb);
1da177e4
LT
3791 } while (list);
3792
af3e6359 3793 spin_unlock(&queue->lock);
1da177e4 3794 }
73d80deb
LAD
3795}
3796
3797void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3798{
ee22be7e 3799 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3800
f0e09510 3801 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3802
ee22be7e 3803 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3804
3eff45ea 3805 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3806}
1da177e4
LT
3807
3808/* Send SCO data */
0d861d8b 3809void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3810{
3811 struct hci_dev *hdev = conn->hdev;
3812 struct hci_sco_hdr hdr;
3813
3814 BT_DBG("%s len %d", hdev->name, skb->len);
3815
aca3192c 3816 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3817 hdr.dlen = skb->len;
3818
badff6d0
ACM
3819 skb_push(skb, HCI_SCO_HDR_SIZE);
3820 skb_reset_transport_header(skb);
9c70220b 3821 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3822
0d48d939 3823 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3824
1da177e4 3825 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3826 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3827}
1da177e4
LT
3828
3829/* ---- HCI TX task (outgoing data) ---- */
3830
3831/* HCI Connection scheduler */
6039aa73
GP
3832static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3833 int *quote)
1da177e4
LT
3834{
3835 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3836 struct hci_conn *conn = NULL, *c;
abc5de8f 3837 unsigned int num = 0, min = ~0;
1da177e4 3838
8e87d142 3839 /* We don't have to lock device here. Connections are always
1da177e4 3840 * added and removed with TX task disabled. */
bf4c6325
GP
3841
3842 rcu_read_lock();
3843
3844 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3845 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3846 continue;
769be974
MH
3847
3848 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3849 continue;
3850
1da177e4
LT
3851 num++;
3852
3853 if (c->sent < min) {
3854 min = c->sent;
3855 conn = c;
3856 }
52087a79
LAD
3857
3858 if (hci_conn_num(hdev, type) == num)
3859 break;
1da177e4
LT
3860 }
3861
bf4c6325
GP
3862 rcu_read_unlock();
3863
1da177e4 3864 if (conn) {
6ed58ec5
VT
3865 int cnt, q;
3866
3867 switch (conn->type) {
3868 case ACL_LINK:
3869 cnt = hdev->acl_cnt;
3870 break;
3871 case SCO_LINK:
3872 case ESCO_LINK:
3873 cnt = hdev->sco_cnt;
3874 break;
3875 case LE_LINK:
3876 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3877 break;
3878 default:
3879 cnt = 0;
3880 BT_ERR("Unknown link type");
3881 }
3882
3883 q = cnt / num;
1da177e4
LT
3884 *quote = q ? q : 1;
3885 } else
3886 *quote = 0;
3887
3888 BT_DBG("conn %p quote %d", conn, *quote);
3889 return conn;
3890}
3891
6039aa73 3892static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3893{
3894 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3895 struct hci_conn *c;
1da177e4 3896
bae1f5d9 3897 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3898
bf4c6325
GP
3899 rcu_read_lock();
3900
1da177e4 3901 /* Kill stalled connections */
bf4c6325 3902 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3903 if (c->type == type && c->sent) {
6ed93dc6
AE
3904 BT_ERR("%s killing stalled connection %pMR",
3905 hdev->name, &c->dst);
bed71748 3906 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3907 }
3908 }
bf4c6325
GP
3909
3910 rcu_read_unlock();
1da177e4
LT
3911}
3912
6039aa73
GP
3913static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3914 int *quote)
1da177e4 3915{
73d80deb
LAD
3916 struct hci_conn_hash *h = &hdev->conn_hash;
3917 struct hci_chan *chan = NULL;
abc5de8f 3918 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3919 struct hci_conn *conn;
73d80deb
LAD
3920 int cnt, q, conn_num = 0;
3921
3922 BT_DBG("%s", hdev->name);
3923
bf4c6325
GP
3924 rcu_read_lock();
3925
3926 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3927 struct hci_chan *tmp;
3928
3929 if (conn->type != type)
3930 continue;
3931
3932 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3933 continue;
3934
3935 conn_num++;
3936
8192edef 3937 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3938 struct sk_buff *skb;
3939
3940 if (skb_queue_empty(&tmp->data_q))
3941 continue;
3942
3943 skb = skb_peek(&tmp->data_q);
3944 if (skb->priority < cur_prio)
3945 continue;
3946
3947 if (skb->priority > cur_prio) {
3948 num = 0;
3949 min = ~0;
3950 cur_prio = skb->priority;
3951 }
3952
3953 num++;
3954
3955 if (conn->sent < min) {
3956 min = conn->sent;
3957 chan = tmp;
3958 }
3959 }
3960
3961 if (hci_conn_num(hdev, type) == conn_num)
3962 break;
3963 }
3964
bf4c6325
GP
3965 rcu_read_unlock();
3966
73d80deb
LAD
3967 if (!chan)
3968 return NULL;
3969
3970 switch (chan->conn->type) {
3971 case ACL_LINK:
3972 cnt = hdev->acl_cnt;
3973 break;
bd1eb66b
AE
3974 case AMP_LINK:
3975 cnt = hdev->block_cnt;
3976 break;
73d80deb
LAD
3977 case SCO_LINK:
3978 case ESCO_LINK:
3979 cnt = hdev->sco_cnt;
3980 break;
3981 case LE_LINK:
3982 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3983 break;
3984 default:
3985 cnt = 0;
3986 BT_ERR("Unknown link type");
3987 }
3988
3989 q = cnt / num;
3990 *quote = q ? q : 1;
3991 BT_DBG("chan %p quote %d", chan, *quote);
3992 return chan;
3993}
3994
02b20f0b
LAD
3995static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3996{
3997 struct hci_conn_hash *h = &hdev->conn_hash;
3998 struct hci_conn *conn;
3999 int num = 0;
4000
4001 BT_DBG("%s", hdev->name);
4002
bf4c6325
GP
4003 rcu_read_lock();
4004
4005 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
4006 struct hci_chan *chan;
4007
4008 if (conn->type != type)
4009 continue;
4010
4011 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
4012 continue;
4013
4014 num++;
4015
8192edef 4016 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4017 struct sk_buff *skb;
4018
4019 if (chan->sent) {
4020 chan->sent = 0;
4021 continue;
4022 }
4023
4024 if (skb_queue_empty(&chan->data_q))
4025 continue;
4026
4027 skb = skb_peek(&chan->data_q);
4028 if (skb->priority >= HCI_PRIO_MAX - 1)
4029 continue;
4030
4031 skb->priority = HCI_PRIO_MAX - 1;
4032
4033 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4034 skb->priority);
02b20f0b
LAD
4035 }
4036
4037 if (hci_conn_num(hdev, type) == num)
4038 break;
4039 }
bf4c6325
GP
4040
4041 rcu_read_unlock();
4042
02b20f0b
LAD
4043}
4044
b71d385a
AE
4045static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4046{
4047 /* Calculate count of blocks used by this packet */
4048 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4049}
4050
6039aa73 4051static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4052{
1da177e4
LT
4053 if (!test_bit(HCI_RAW, &hdev->flags)) {
4054 /* ACL tx timeout must be longer than maximum
4055 * link supervision timeout (40.9 seconds) */
63d2bc1b 4056 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4057 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4058 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4059 }
63d2bc1b 4060}
1da177e4 4061
6039aa73 4062static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4063{
4064 unsigned int cnt = hdev->acl_cnt;
4065 struct hci_chan *chan;
4066 struct sk_buff *skb;
4067 int quote;
4068
4069 __check_timeout(hdev, cnt);
04837f64 4070
73d80deb 4071 while (hdev->acl_cnt &&
a8c5fb1a 4072 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4073 u32 priority = (skb_peek(&chan->data_q))->priority;
4074 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4075 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4076 skb->len, skb->priority);
73d80deb 4077
ec1cce24
LAD
4078 /* Stop if priority has changed */
4079 if (skb->priority < priority)
4080 break;
4081
4082 skb = skb_dequeue(&chan->data_q);
4083
73d80deb 4084 hci_conn_enter_active_mode(chan->conn,
04124681 4085 bt_cb(skb)->force_active);
04837f64 4086
57d17d70 4087 hci_send_frame(hdev, skb);
1da177e4
LT
4088 hdev->acl_last_tx = jiffies;
4089
4090 hdev->acl_cnt--;
73d80deb
LAD
4091 chan->sent++;
4092 chan->conn->sent++;
1da177e4
LT
4093 }
4094 }
02b20f0b
LAD
4095
4096 if (cnt != hdev->acl_cnt)
4097 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4098}
4099
6039aa73 4100static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4101{
63d2bc1b 4102 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4103 struct hci_chan *chan;
4104 struct sk_buff *skb;
4105 int quote;
bd1eb66b 4106 u8 type;
b71d385a 4107
63d2bc1b 4108 __check_timeout(hdev, cnt);
b71d385a 4109
bd1eb66b
AE
4110 BT_DBG("%s", hdev->name);
4111
4112 if (hdev->dev_type == HCI_AMP)
4113 type = AMP_LINK;
4114 else
4115 type = ACL_LINK;
4116
b71d385a 4117 while (hdev->block_cnt > 0 &&
bd1eb66b 4118 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4119 u32 priority = (skb_peek(&chan->data_q))->priority;
4120 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4121 int blocks;
4122
4123 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4124 skb->len, skb->priority);
b71d385a
AE
4125
4126 /* Stop if priority has changed */
4127 if (skb->priority < priority)
4128 break;
4129
4130 skb = skb_dequeue(&chan->data_q);
4131
4132 blocks = __get_blocks(hdev, skb);
4133 if (blocks > hdev->block_cnt)
4134 return;
4135
4136 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4137 bt_cb(skb)->force_active);
b71d385a 4138
57d17d70 4139 hci_send_frame(hdev, skb);
b71d385a
AE
4140 hdev->acl_last_tx = jiffies;
4141
4142 hdev->block_cnt -= blocks;
4143 quote -= blocks;
4144
4145 chan->sent += blocks;
4146 chan->conn->sent += blocks;
4147 }
4148 }
4149
4150 if (cnt != hdev->block_cnt)
bd1eb66b 4151 hci_prio_recalculate(hdev, type);
b71d385a
AE
4152}
4153
6039aa73 4154static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4155{
4156 BT_DBG("%s", hdev->name);
4157
bd1eb66b
AE
4158 /* No ACL link over BR/EDR controller */
4159 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4160 return;
4161
4162 /* No AMP link over AMP controller */
4163 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4164 return;
4165
4166 switch (hdev->flow_ctl_mode) {
4167 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4168 hci_sched_acl_pkt(hdev);
4169 break;
4170
4171 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4172 hci_sched_acl_blk(hdev);
4173 break;
4174 }
4175}
4176
1da177e4 4177/* Schedule SCO */
6039aa73 4178static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4179{
4180 struct hci_conn *conn;
4181 struct sk_buff *skb;
4182 int quote;
4183
4184 BT_DBG("%s", hdev->name);
4185
52087a79
LAD
4186 if (!hci_conn_num(hdev, SCO_LINK))
4187 return;
4188
1da177e4
LT
4189 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4190 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4191 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4192 hci_send_frame(hdev, skb);
1da177e4
LT
4193
4194 conn->sent++;
4195 if (conn->sent == ~0)
4196 conn->sent = 0;
4197 }
4198 }
4199}
4200
6039aa73 4201static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4202{
4203 struct hci_conn *conn;
4204 struct sk_buff *skb;
4205 int quote;
4206
4207 BT_DBG("%s", hdev->name);
4208
52087a79
LAD
4209 if (!hci_conn_num(hdev, ESCO_LINK))
4210 return;
4211
8fc9ced3
GP
4212 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4213 &quote))) {
b6a0dc82
MH
4214 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4215 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4216 hci_send_frame(hdev, skb);
b6a0dc82
MH
4217
4218 conn->sent++;
4219 if (conn->sent == ~0)
4220 conn->sent = 0;
4221 }
4222 }
4223}
4224
6039aa73 4225static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4226{
73d80deb 4227 struct hci_chan *chan;
6ed58ec5 4228 struct sk_buff *skb;
02b20f0b 4229 int quote, cnt, tmp;
6ed58ec5
VT
4230
4231 BT_DBG("%s", hdev->name);
4232
52087a79
LAD
4233 if (!hci_conn_num(hdev, LE_LINK))
4234 return;
4235
6ed58ec5
VT
4236 if (!test_bit(HCI_RAW, &hdev->flags)) {
4237 /* LE tx timeout must be longer than maximum
4238 * link supervision timeout (40.9 seconds) */
bae1f5d9 4239 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4240 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4241 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4242 }
4243
4244 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4245 tmp = cnt;
73d80deb 4246 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4247 u32 priority = (skb_peek(&chan->data_q))->priority;
4248 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4249 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4250 skb->len, skb->priority);
6ed58ec5 4251
ec1cce24
LAD
4252 /* Stop if priority has changed */
4253 if (skb->priority < priority)
4254 break;
4255
4256 skb = skb_dequeue(&chan->data_q);
4257
57d17d70 4258 hci_send_frame(hdev, skb);
6ed58ec5
VT
4259 hdev->le_last_tx = jiffies;
4260
4261 cnt--;
73d80deb
LAD
4262 chan->sent++;
4263 chan->conn->sent++;
6ed58ec5
VT
4264 }
4265 }
73d80deb 4266
6ed58ec5
VT
4267 if (hdev->le_pkts)
4268 hdev->le_cnt = cnt;
4269 else
4270 hdev->acl_cnt = cnt;
02b20f0b
LAD
4271
4272 if (cnt != tmp)
4273 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4274}
4275
3eff45ea 4276static void hci_tx_work(struct work_struct *work)
1da177e4 4277{
3eff45ea 4278 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4279 struct sk_buff *skb;
4280
6ed58ec5 4281 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4282 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4283
52de599e
MH
4284 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4285 /* Schedule queues and send stuff to HCI driver */
4286 hci_sched_acl(hdev);
4287 hci_sched_sco(hdev);
4288 hci_sched_esco(hdev);
4289 hci_sched_le(hdev);
4290 }
6ed58ec5 4291
1da177e4
LT
4292 /* Send next queued raw (unknown type) packet */
4293 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4294 hci_send_frame(hdev, skb);
1da177e4
LT
4295}
4296
25985edc 4297/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4298
4299/* ACL data packet */
6039aa73 4300static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4301{
4302 struct hci_acl_hdr *hdr = (void *) skb->data;
4303 struct hci_conn *conn;
4304 __u16 handle, flags;
4305
4306 skb_pull(skb, HCI_ACL_HDR_SIZE);
4307
4308 handle = __le16_to_cpu(hdr->handle);
4309 flags = hci_flags(handle);
4310 handle = hci_handle(handle);
4311
f0e09510 4312 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4313 handle, flags);
1da177e4
LT
4314
4315 hdev->stat.acl_rx++;
4316
4317 hci_dev_lock(hdev);
4318 conn = hci_conn_hash_lookup_handle(hdev, handle);
4319 hci_dev_unlock(hdev);
8e87d142 4320
1da177e4 4321 if (conn) {
65983fc7 4322 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4323
1da177e4 4324 /* Send to upper protocol */
686ebf28
UF
4325 l2cap_recv_acldata(conn, skb, flags);
4326 return;
1da177e4 4327 } else {
8e87d142 4328 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4329 hdev->name, handle);
1da177e4
LT
4330 }
4331
4332 kfree_skb(skb);
4333}
4334
4335/* SCO data packet */
6039aa73 4336static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4337{
4338 struct hci_sco_hdr *hdr = (void *) skb->data;
4339 struct hci_conn *conn;
4340 __u16 handle;
4341
4342 skb_pull(skb, HCI_SCO_HDR_SIZE);
4343
4344 handle = __le16_to_cpu(hdr->handle);
4345
f0e09510 4346 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4347
4348 hdev->stat.sco_rx++;
4349
4350 hci_dev_lock(hdev);
4351 conn = hci_conn_hash_lookup_handle(hdev, handle);
4352 hci_dev_unlock(hdev);
4353
4354 if (conn) {
1da177e4 4355 /* Send to upper protocol */
686ebf28
UF
4356 sco_recv_scodata(conn, skb);
4357 return;
1da177e4 4358 } else {
8e87d142 4359 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4360 hdev->name, handle);
1da177e4
LT
4361 }
4362
4363 kfree_skb(skb);
4364}
4365
9238f36a
JH
4366static bool hci_req_is_complete(struct hci_dev *hdev)
4367{
4368 struct sk_buff *skb;
4369
4370 skb = skb_peek(&hdev->cmd_q);
4371 if (!skb)
4372 return true;
4373
4374 return bt_cb(skb)->req.start;
4375}
4376
42c6b129
JH
4377static void hci_resend_last(struct hci_dev *hdev)
4378{
4379 struct hci_command_hdr *sent;
4380 struct sk_buff *skb;
4381 u16 opcode;
4382
4383 if (!hdev->sent_cmd)
4384 return;
4385
4386 sent = (void *) hdev->sent_cmd->data;
4387 opcode = __le16_to_cpu(sent->opcode);
4388 if (opcode == HCI_OP_RESET)
4389 return;
4390
4391 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4392 if (!skb)
4393 return;
4394
4395 skb_queue_head(&hdev->cmd_q, skb);
4396 queue_work(hdev->workqueue, &hdev->cmd_work);
4397}
4398
9238f36a
JH
4399void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4400{
4401 hci_req_complete_t req_complete = NULL;
4402 struct sk_buff *skb;
4403 unsigned long flags;
4404
4405 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4406
42c6b129
JH
4407 /* If the completed command doesn't match the last one that was
4408 * sent we need to do special handling of it.
9238f36a 4409 */
42c6b129
JH
4410 if (!hci_sent_cmd_data(hdev, opcode)) {
4411 /* Some CSR based controllers generate a spontaneous
4412 * reset complete event during init and any pending
4413 * command will never be completed. In such a case we
4414 * need to resend whatever was the last sent
4415 * command.
4416 */
4417 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4418 hci_resend_last(hdev);
4419
9238f36a 4420 return;
42c6b129 4421 }
9238f36a
JH
4422
4423 /* If the command succeeded and there's still more commands in
4424 * this request the request is not yet complete.
4425 */
4426 if (!status && !hci_req_is_complete(hdev))
4427 return;
4428
4429 /* If this was the last command in a request the complete
4430 * callback would be found in hdev->sent_cmd instead of the
4431 * command queue (hdev->cmd_q).
4432 */
4433 if (hdev->sent_cmd) {
4434 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4435
4436 if (req_complete) {
4437 /* We must set the complete callback to NULL to
4438 * avoid calling the callback more than once if
4439 * this function gets called again.
4440 */
4441 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4442
9238f36a 4443 goto call_complete;
53e21fbc 4444 }
9238f36a
JH
4445 }
4446
4447 /* Remove all pending commands belonging to this request */
4448 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4449 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4450 if (bt_cb(skb)->req.start) {
4451 __skb_queue_head(&hdev->cmd_q, skb);
4452 break;
4453 }
4454
4455 req_complete = bt_cb(skb)->req.complete;
4456 kfree_skb(skb);
4457 }
4458 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4459
4460call_complete:
4461 if (req_complete)
4462 req_complete(hdev, status);
4463}
4464
b78752cc 4465static void hci_rx_work(struct work_struct *work)
1da177e4 4466{
b78752cc 4467 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4468 struct sk_buff *skb;
4469
4470 BT_DBG("%s", hdev->name);
4471
1da177e4 4472 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4473 /* Send copy to monitor */
4474 hci_send_to_monitor(hdev, skb);
4475
1da177e4
LT
4476 if (atomic_read(&hdev->promisc)) {
4477 /* Send copy to the sockets */
470fe1b5 4478 hci_send_to_sock(hdev, skb);
1da177e4
LT
4479 }
4480
0736cfa8
MH
4481 if (test_bit(HCI_RAW, &hdev->flags) ||
4482 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4483 kfree_skb(skb);
4484 continue;
4485 }
4486
4487 if (test_bit(HCI_INIT, &hdev->flags)) {
4488 /* Don't process data packets in this states. */
0d48d939 4489 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4490 case HCI_ACLDATA_PKT:
4491 case HCI_SCODATA_PKT:
4492 kfree_skb(skb);
4493 continue;
3ff50b79 4494 }
1da177e4
LT
4495 }
4496
4497 /* Process frame */
0d48d939 4498 switch (bt_cb(skb)->pkt_type) {
1da177e4 4499 case HCI_EVENT_PKT:
b78752cc 4500 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4501 hci_event_packet(hdev, skb);
4502 break;
4503
4504 case HCI_ACLDATA_PKT:
4505 BT_DBG("%s ACL data packet", hdev->name);
4506 hci_acldata_packet(hdev, skb);
4507 break;
4508
4509 case HCI_SCODATA_PKT:
4510 BT_DBG("%s SCO data packet", hdev->name);
4511 hci_scodata_packet(hdev, skb);
4512 break;
4513
4514 default:
4515 kfree_skb(skb);
4516 break;
4517 }
4518 }
1da177e4
LT
4519}
4520
c347b765 4521static void hci_cmd_work(struct work_struct *work)
1da177e4 4522{
c347b765 4523 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4524 struct sk_buff *skb;
4525
2104786b
AE
4526 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4527 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4528
1da177e4 4529 /* Send queued commands */
5a08ecce
AE
4530 if (atomic_read(&hdev->cmd_cnt)) {
4531 skb = skb_dequeue(&hdev->cmd_q);
4532 if (!skb)
4533 return;
4534
7585b97a 4535 kfree_skb(hdev->sent_cmd);
1da177e4 4536
a675d7f1 4537 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4538 if (hdev->sent_cmd) {
1da177e4 4539 atomic_dec(&hdev->cmd_cnt);
57d17d70 4540 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4541 if (test_bit(HCI_RESET, &hdev->flags))
4542 del_timer(&hdev->cmd_timer);
4543 else
4544 mod_timer(&hdev->cmd_timer,
5f246e89 4545 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4546 } else {
4547 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4548 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4549 }
4550 }
4551}
This page took 1.044379 seconds and 5 git commands to generate.