Bluetooth: Add smp_irk_matches helper function
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
47219839 32#include <asm/unaligned.h>
1da177e4
LT
33
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
b78752cc 37static void hci_rx_work(struct work_struct *work);
c347b765 38static void hci_cmd_work(struct work_struct *work);
3eff45ea 39static void hci_tx_work(struct work_struct *work);
1da177e4 40
1da177e4
LT
41/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
3df92b31
SL
49/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
1da177e4
LT
52/* ---- HCI notifications ---- */
53
6516455d 54static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 55{
040030ef 56 hci_sock_dev_event(hdev, event);
1da177e4
LT
57}
58
baf27f6e
MH
59/* ---- HCI debugfs entries ---- */
60
4b4148e9
MH
61static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
dfb826a8
MH
126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
cfbb2b5b
MH
140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
70afe0b8
MH
164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
47219839
MH
189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
196 u8 i, val[16];
197
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
204
205 seq_printf(f, "%pUb\n", val);
47219839
MH
206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
baf27f6e
MH
224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
02d08d15
MH
260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
babdbb3c
MH
288static int dev_class_show(struct seq_file *f, void *ptr)
289{
290 struct hci_dev *hdev = f->private;
291
292 hci_dev_lock(hdev);
293 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
294 hdev->dev_class[1], hdev->dev_class[0]);
295 hci_dev_unlock(hdev);
296
297 return 0;
298}
299
300static int dev_class_open(struct inode *inode, struct file *file)
301{
302 return single_open(file, dev_class_show, inode->i_private);
303}
304
305static const struct file_operations dev_class_fops = {
306 .open = dev_class_open,
307 .read = seq_read,
308 .llseek = seq_lseek,
309 .release = single_release,
310};
311
041000b9
MH
312static int voice_setting_get(void *data, u64 *val)
313{
314 struct hci_dev *hdev = data;
315
316 hci_dev_lock(hdev);
317 *val = hdev->voice_setting;
318 hci_dev_unlock(hdev);
319
320 return 0;
321}
322
323DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
324 NULL, "0x%4.4llx\n");
325
ebd1e33b
MH
326static int auto_accept_delay_set(void *data, u64 val)
327{
328 struct hci_dev *hdev = data;
329
330 hci_dev_lock(hdev);
331 hdev->auto_accept_delay = val;
332 hci_dev_unlock(hdev);
333
334 return 0;
335}
336
337static int auto_accept_delay_get(void *data, u64 *val)
338{
339 struct hci_dev *hdev = data;
340
341 hci_dev_lock(hdev);
342 *val = hdev->auto_accept_delay;
343 hci_dev_unlock(hdev);
344
345 return 0;
346}
347
348DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
349 auto_accept_delay_set, "%llu\n");
350
06f5b778
MH
351static int ssp_debug_mode_set(void *data, u64 val)
352{
353 struct hci_dev *hdev = data;
354 struct sk_buff *skb;
355 __u8 mode;
356 int err;
357
358 if (val != 0 && val != 1)
359 return -EINVAL;
360
361 if (!test_bit(HCI_UP, &hdev->flags))
362 return -ENETDOWN;
363
364 hci_req_lock(hdev);
365 mode = val;
366 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
367 &mode, HCI_CMD_TIMEOUT);
368 hci_req_unlock(hdev);
369
370 if (IS_ERR(skb))
371 return PTR_ERR(skb);
372
373 err = -bt_to_errno(skb->data[0]);
374 kfree_skb(skb);
375
376 if (err < 0)
377 return err;
378
379 hci_dev_lock(hdev);
380 hdev->ssp_debug_mode = val;
381 hci_dev_unlock(hdev);
382
383 return 0;
384}
385
386static int ssp_debug_mode_get(void *data, u64 *val)
387{
388 struct hci_dev *hdev = data;
389
390 hci_dev_lock(hdev);
391 *val = hdev->ssp_debug_mode;
392 hci_dev_unlock(hdev);
393
394 return 0;
395}
396
397DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
398 ssp_debug_mode_set, "%llu\n");
399
5afeac14
MH
400static ssize_t force_sc_support_read(struct file *file, char __user *user_buf,
401 size_t count, loff_t *ppos)
402{
403 struct hci_dev *hdev = file->private_data;
404 char buf[3];
405
406 buf[0] = test_bit(HCI_FORCE_SC, &hdev->dev_flags) ? 'Y': 'N';
407 buf[1] = '\n';
408 buf[2] = '\0';
409 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
410}
411
412static ssize_t force_sc_support_write(struct file *file,
413 const char __user *user_buf,
414 size_t count, loff_t *ppos)
415{
416 struct hci_dev *hdev = file->private_data;
417 char buf[32];
418 size_t buf_size = min(count, (sizeof(buf)-1));
419 bool enable;
420
421 if (test_bit(HCI_UP, &hdev->flags))
422 return -EBUSY;
423
424 if (copy_from_user(buf, user_buf, buf_size))
425 return -EFAULT;
426
427 buf[buf_size] = '\0';
428 if (strtobool(buf, &enable))
429 return -EINVAL;
430
431 if (enable == test_bit(HCI_FORCE_SC, &hdev->dev_flags))
432 return -EALREADY;
433
434 change_bit(HCI_FORCE_SC, &hdev->dev_flags);
435
436 return count;
437}
438
439static const struct file_operations force_sc_support_fops = {
440 .open = simple_open,
441 .read = force_sc_support_read,
442 .write = force_sc_support_write,
443 .llseek = default_llseek,
444};
445
134c2a89
MH
446static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf,
447 size_t count, loff_t *ppos)
448{
449 struct hci_dev *hdev = file->private_data;
450 char buf[3];
451
452 buf[0] = test_bit(HCI_SC_ONLY, &hdev->dev_flags) ? 'Y': 'N';
453 buf[1] = '\n';
454 buf[2] = '\0';
455 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
456}
457
458static const struct file_operations sc_only_mode_fops = {
459 .open = simple_open,
460 .read = sc_only_mode_read,
461 .llseek = default_llseek,
462};
463
2bfa3531
MH
464static int idle_timeout_set(void *data, u64 val)
465{
466 struct hci_dev *hdev = data;
467
468 if (val != 0 && (val < 500 || val > 3600000))
469 return -EINVAL;
470
471 hci_dev_lock(hdev);
2be48b65 472 hdev->idle_timeout = val;
2bfa3531
MH
473 hci_dev_unlock(hdev);
474
475 return 0;
476}
477
478static int idle_timeout_get(void *data, u64 *val)
479{
480 struct hci_dev *hdev = data;
481
482 hci_dev_lock(hdev);
483 *val = hdev->idle_timeout;
484 hci_dev_unlock(hdev);
485
486 return 0;
487}
488
489DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
490 idle_timeout_set, "%llu\n");
491
492static int sniff_min_interval_set(void *data, u64 val)
493{
494 struct hci_dev *hdev = data;
495
496 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
497 return -EINVAL;
498
499 hci_dev_lock(hdev);
2be48b65 500 hdev->sniff_min_interval = val;
2bfa3531
MH
501 hci_dev_unlock(hdev);
502
503 return 0;
504}
505
506static int sniff_min_interval_get(void *data, u64 *val)
507{
508 struct hci_dev *hdev = data;
509
510 hci_dev_lock(hdev);
511 *val = hdev->sniff_min_interval;
512 hci_dev_unlock(hdev);
513
514 return 0;
515}
516
517DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
518 sniff_min_interval_set, "%llu\n");
519
520static int sniff_max_interval_set(void *data, u64 val)
521{
522 struct hci_dev *hdev = data;
523
524 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
525 return -EINVAL;
526
527 hci_dev_lock(hdev);
2be48b65 528 hdev->sniff_max_interval = val;
2bfa3531
MH
529 hci_dev_unlock(hdev);
530
531 return 0;
532}
533
534static int sniff_max_interval_get(void *data, u64 *val)
535{
536 struct hci_dev *hdev = data;
537
538 hci_dev_lock(hdev);
539 *val = hdev->sniff_max_interval;
540 hci_dev_unlock(hdev);
541
542 return 0;
543}
544
545DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
546 sniff_max_interval_set, "%llu\n");
547
e7b8fc92
MH
548static int static_address_show(struct seq_file *f, void *p)
549{
550 struct hci_dev *hdev = f->private;
551
552 hci_dev_lock(hdev);
553 seq_printf(f, "%pMR\n", &hdev->static_addr);
554 hci_dev_unlock(hdev);
555
556 return 0;
557}
558
559static int static_address_open(struct inode *inode, struct file *file)
560{
561 return single_open(file, static_address_show, inode->i_private);
562}
563
564static const struct file_operations static_address_fops = {
565 .open = static_address_open,
566 .read = seq_read,
567 .llseek = seq_lseek,
568 .release = single_release,
569};
570
92202185
MH
571static int own_address_type_set(void *data, u64 val)
572{
573 struct hci_dev *hdev = data;
574
575 if (val != 0 && val != 1)
576 return -EINVAL;
577
578 hci_dev_lock(hdev);
579 hdev->own_addr_type = val;
580 hci_dev_unlock(hdev);
581
582 return 0;
583}
584
585static int own_address_type_get(void *data, u64 *val)
586{
587 struct hci_dev *hdev = data;
588
589 hci_dev_lock(hdev);
590 *val = hdev->own_addr_type;
591 hci_dev_unlock(hdev);
592
593 return 0;
594}
595
596DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
597 own_address_type_set, "%llu\n");
598
8f8625cd
MH
599static int long_term_keys_show(struct seq_file *f, void *ptr)
600{
601 struct hci_dev *hdev = f->private;
602 struct list_head *p, *n;
603
604 hci_dev_lock(hdev);
f813f1be 605 list_for_each_safe(p, n, &hdev->long_term_keys) {
8f8625cd 606 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
f813f1be 607 seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %*phN %*phN\n",
8f8625cd
MH
608 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
609 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
610 8, ltk->rand, 16, ltk->val);
611 }
612 hci_dev_unlock(hdev);
613
614 return 0;
615}
616
617static int long_term_keys_open(struct inode *inode, struct file *file)
618{
619 return single_open(file, long_term_keys_show, inode->i_private);
620}
621
622static const struct file_operations long_term_keys_fops = {
623 .open = long_term_keys_open,
624 .read = seq_read,
625 .llseek = seq_lseek,
626 .release = single_release,
627};
628
4e70c7e7
MH
629static int conn_min_interval_set(void *data, u64 val)
630{
631 struct hci_dev *hdev = data;
632
633 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
634 return -EINVAL;
635
636 hci_dev_lock(hdev);
2be48b65 637 hdev->le_conn_min_interval = val;
4e70c7e7
MH
638 hci_dev_unlock(hdev);
639
640 return 0;
641}
642
643static int conn_min_interval_get(void *data, u64 *val)
644{
645 struct hci_dev *hdev = data;
646
647 hci_dev_lock(hdev);
648 *val = hdev->le_conn_min_interval;
649 hci_dev_unlock(hdev);
650
651 return 0;
652}
653
654DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
655 conn_min_interval_set, "%llu\n");
656
657static int conn_max_interval_set(void *data, u64 val)
658{
659 struct hci_dev *hdev = data;
660
661 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
662 return -EINVAL;
663
664 hci_dev_lock(hdev);
2be48b65 665 hdev->le_conn_max_interval = val;
4e70c7e7
MH
666 hci_dev_unlock(hdev);
667
668 return 0;
669}
670
671static int conn_max_interval_get(void *data, u64 *val)
672{
673 struct hci_dev *hdev = data;
674
675 hci_dev_lock(hdev);
676 *val = hdev->le_conn_max_interval;
677 hci_dev_unlock(hdev);
678
679 return 0;
680}
681
682DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
683 conn_max_interval_set, "%llu\n");
684
89863109
JR
685static ssize_t lowpan_read(struct file *file, char __user *user_buf,
686 size_t count, loff_t *ppos)
687{
688 struct hci_dev *hdev = file->private_data;
689 char buf[3];
690
691 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
692 buf[1] = '\n';
693 buf[2] = '\0';
694 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
695}
696
697static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
698 size_t count, loff_t *position)
699{
700 struct hci_dev *hdev = fp->private_data;
701 bool enable;
702 char buf[32];
703 size_t buf_size = min(count, (sizeof(buf)-1));
704
705 if (copy_from_user(buf, user_buffer, buf_size))
706 return -EFAULT;
707
708 buf[buf_size] = '\0';
709
710 if (strtobool(buf, &enable) < 0)
711 return -EINVAL;
712
713 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
714 return -EALREADY;
715
716 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
717
718 return count;
719}
720
721static const struct file_operations lowpan_debugfs_fops = {
722 .open = simple_open,
723 .read = lowpan_read,
724 .write = lowpan_write,
725 .llseek = default_llseek,
726};
727
1da177e4
LT
728/* ---- HCI requests ---- */
729
42c6b129 730static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 731{
42c6b129 732 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
733
734 if (hdev->req_status == HCI_REQ_PEND) {
735 hdev->req_result = result;
736 hdev->req_status = HCI_REQ_DONE;
737 wake_up_interruptible(&hdev->req_wait_q);
738 }
739}
740
741static void hci_req_cancel(struct hci_dev *hdev, int err)
742{
743 BT_DBG("%s err 0x%2.2x", hdev->name, err);
744
745 if (hdev->req_status == HCI_REQ_PEND) {
746 hdev->req_result = err;
747 hdev->req_status = HCI_REQ_CANCELED;
748 wake_up_interruptible(&hdev->req_wait_q);
749 }
750}
751
77a63e0a
FW
752static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
753 u8 event)
75e84b7c
JH
754{
755 struct hci_ev_cmd_complete *ev;
756 struct hci_event_hdr *hdr;
757 struct sk_buff *skb;
758
759 hci_dev_lock(hdev);
760
761 skb = hdev->recv_evt;
762 hdev->recv_evt = NULL;
763
764 hci_dev_unlock(hdev);
765
766 if (!skb)
767 return ERR_PTR(-ENODATA);
768
769 if (skb->len < sizeof(*hdr)) {
770 BT_ERR("Too short HCI event");
771 goto failed;
772 }
773
774 hdr = (void *) skb->data;
775 skb_pull(skb, HCI_EVENT_HDR_SIZE);
776
7b1abbbe
JH
777 if (event) {
778 if (hdr->evt != event)
779 goto failed;
780 return skb;
781 }
782
75e84b7c
JH
783 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
784 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
785 goto failed;
786 }
787
788 if (skb->len < sizeof(*ev)) {
789 BT_ERR("Too short cmd_complete event");
790 goto failed;
791 }
792
793 ev = (void *) skb->data;
794 skb_pull(skb, sizeof(*ev));
795
796 if (opcode == __le16_to_cpu(ev->opcode))
797 return skb;
798
799 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
800 __le16_to_cpu(ev->opcode));
801
802failed:
803 kfree_skb(skb);
804 return ERR_PTR(-ENODATA);
805}
806
7b1abbbe 807struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 808 const void *param, u8 event, u32 timeout)
75e84b7c
JH
809{
810 DECLARE_WAITQUEUE(wait, current);
811 struct hci_request req;
812 int err = 0;
813
814 BT_DBG("%s", hdev->name);
815
816 hci_req_init(&req, hdev);
817
7b1abbbe 818 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
819
820 hdev->req_status = HCI_REQ_PEND;
821
822 err = hci_req_run(&req, hci_req_sync_complete);
823 if (err < 0)
824 return ERR_PTR(err);
825
826 add_wait_queue(&hdev->req_wait_q, &wait);
827 set_current_state(TASK_INTERRUPTIBLE);
828
829 schedule_timeout(timeout);
830
831 remove_wait_queue(&hdev->req_wait_q, &wait);
832
833 if (signal_pending(current))
834 return ERR_PTR(-EINTR);
835
836 switch (hdev->req_status) {
837 case HCI_REQ_DONE:
838 err = -bt_to_errno(hdev->req_result);
839 break;
840
841 case HCI_REQ_CANCELED:
842 err = -hdev->req_result;
843 break;
844
845 default:
846 err = -ETIMEDOUT;
847 break;
848 }
849
850 hdev->req_status = hdev->req_result = 0;
851
852 BT_DBG("%s end: err %d", hdev->name, err);
853
854 if (err < 0)
855 return ERR_PTR(err);
856
7b1abbbe
JH
857 return hci_get_cmd_complete(hdev, opcode, event);
858}
859EXPORT_SYMBOL(__hci_cmd_sync_ev);
860
861struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 862 const void *param, u32 timeout)
7b1abbbe
JH
863{
864 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
865}
866EXPORT_SYMBOL(__hci_cmd_sync);
867
1da177e4 868/* Execute request and wait for completion. */
01178cd4 869static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
870 void (*func)(struct hci_request *req,
871 unsigned long opt),
01178cd4 872 unsigned long opt, __u32 timeout)
1da177e4 873{
42c6b129 874 struct hci_request req;
1da177e4
LT
875 DECLARE_WAITQUEUE(wait, current);
876 int err = 0;
877
878 BT_DBG("%s start", hdev->name);
879
42c6b129
JH
880 hci_req_init(&req, hdev);
881
1da177e4
LT
882 hdev->req_status = HCI_REQ_PEND;
883
42c6b129 884 func(&req, opt);
53cce22d 885
42c6b129
JH
886 err = hci_req_run(&req, hci_req_sync_complete);
887 if (err < 0) {
53cce22d 888 hdev->req_status = 0;
920c8300
AG
889
890 /* ENODATA means the HCI request command queue is empty.
891 * This can happen when a request with conditionals doesn't
892 * trigger any commands to be sent. This is normal behavior
893 * and should not trigger an error return.
42c6b129 894 */
920c8300
AG
895 if (err == -ENODATA)
896 return 0;
897
898 return err;
53cce22d
JH
899 }
900
bc4445c7
AG
901 add_wait_queue(&hdev->req_wait_q, &wait);
902 set_current_state(TASK_INTERRUPTIBLE);
903
1da177e4
LT
904 schedule_timeout(timeout);
905
906 remove_wait_queue(&hdev->req_wait_q, &wait);
907
908 if (signal_pending(current))
909 return -EINTR;
910
911 switch (hdev->req_status) {
912 case HCI_REQ_DONE:
e175072f 913 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
914 break;
915
916 case HCI_REQ_CANCELED:
917 err = -hdev->req_result;
918 break;
919
920 default:
921 err = -ETIMEDOUT;
922 break;
3ff50b79 923 }
1da177e4 924
a5040efa 925 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
926
927 BT_DBG("%s end: err %d", hdev->name, err);
928
929 return err;
930}
931
01178cd4 932static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
933 void (*req)(struct hci_request *req,
934 unsigned long opt),
01178cd4 935 unsigned long opt, __u32 timeout)
1da177e4
LT
936{
937 int ret;
938
7c6a329e
MH
939 if (!test_bit(HCI_UP, &hdev->flags))
940 return -ENETDOWN;
941
1da177e4
LT
942 /* Serialize all requests */
943 hci_req_lock(hdev);
01178cd4 944 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
945 hci_req_unlock(hdev);
946
947 return ret;
948}
949
42c6b129 950static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 951{
42c6b129 952 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
953
954 /* Reset device */
42c6b129
JH
955 set_bit(HCI_RESET, &req->hdev->flags);
956 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
957}
958
42c6b129 959static void bredr_init(struct hci_request *req)
1da177e4 960{
42c6b129 961 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 962
1da177e4 963 /* Read Local Supported Features */
42c6b129 964 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 965
1143e5a6 966 /* Read Local Version */
42c6b129 967 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
968
969 /* Read BD Address */
42c6b129 970 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
971}
972
42c6b129 973static void amp_init(struct hci_request *req)
e61ef499 974{
42c6b129 975 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 976
e61ef499 977 /* Read Local Version */
42c6b129 978 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 979
f6996cfe
MH
980 /* Read Local Supported Commands */
981 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
982
983 /* Read Local Supported Features */
984 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
985
6bcbc489 986 /* Read Local AMP Info */
42c6b129 987 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
988
989 /* Read Data Blk size */
42c6b129 990 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 991
f38ba941
MH
992 /* Read Flow Control Mode */
993 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
994
7528ca1c
MH
995 /* Read Location Data */
996 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
997}
998
42c6b129 999static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 1000{
42c6b129 1001 struct hci_dev *hdev = req->hdev;
e61ef499
AE
1002
1003 BT_DBG("%s %ld", hdev->name, opt);
1004
11778716
AE
1005 /* Reset */
1006 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 1007 hci_reset_req(req, 0);
11778716 1008
e61ef499
AE
1009 switch (hdev->dev_type) {
1010 case HCI_BREDR:
42c6b129 1011 bredr_init(req);
e61ef499
AE
1012 break;
1013
1014 case HCI_AMP:
42c6b129 1015 amp_init(req);
e61ef499
AE
1016 break;
1017
1018 default:
1019 BT_ERR("Unknown device type %d", hdev->dev_type);
1020 break;
1021 }
e61ef499
AE
1022}
1023
42c6b129 1024static void bredr_setup(struct hci_request *req)
2177bab5 1025{
4ca048e3
MH
1026 struct hci_dev *hdev = req->hdev;
1027
2177bab5
JH
1028 __le16 param;
1029 __u8 flt_type;
1030
1031 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 1032 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1033
1034 /* Read Class of Device */
42c6b129 1035 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
1036
1037 /* Read Local Name */
42c6b129 1038 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
1039
1040 /* Read Voice Setting */
42c6b129 1041 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 1042
b4cb9fb2
MH
1043 /* Read Number of Supported IAC */
1044 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
1045
4b836f39
MH
1046 /* Read Current IAC LAP */
1047 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1048
2177bab5
JH
1049 /* Clear Event Filters */
1050 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1051 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1052
1053 /* Connection accept timeout ~20 secs */
1054 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1055 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1056
4ca048e3
MH
1057 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1058 * but it does not support page scan related HCI commands.
1059 */
1060 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1061 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1062 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1063 }
2177bab5
JH
1064}
1065
42c6b129 1066static void le_setup(struct hci_request *req)
2177bab5 1067{
c73eee91
JH
1068 struct hci_dev *hdev = req->hdev;
1069
2177bab5 1070 /* Read LE Buffer Size */
42c6b129 1071 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1072
1073 /* Read LE Local Supported Features */
42c6b129 1074 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1075
1076 /* Read LE Advertising Channel TX Power */
42c6b129 1077 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1078
1079 /* Read LE White List Size */
42c6b129 1080 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1081
1082 /* Read LE Supported States */
42c6b129 1083 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1084
1085 /* LE-only controllers have LE implicitly enabled */
1086 if (!lmp_bredr_capable(hdev))
1087 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1088}
1089
1090static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1091{
1092 if (lmp_ext_inq_capable(hdev))
1093 return 0x02;
1094
1095 if (lmp_inq_rssi_capable(hdev))
1096 return 0x01;
1097
1098 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1099 hdev->lmp_subver == 0x0757)
1100 return 0x01;
1101
1102 if (hdev->manufacturer == 15) {
1103 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1104 return 0x01;
1105 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1106 return 0x01;
1107 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1108 return 0x01;
1109 }
1110
1111 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1112 hdev->lmp_subver == 0x1805)
1113 return 0x01;
1114
1115 return 0x00;
1116}
1117
42c6b129 1118static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1119{
1120 u8 mode;
1121
42c6b129 1122 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1123
42c6b129 1124 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1125}
1126
42c6b129 1127static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1128{
42c6b129
JH
1129 struct hci_dev *hdev = req->hdev;
1130
2177bab5
JH
1131 /* The second byte is 0xff instead of 0x9f (two reserved bits
1132 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1133 * command otherwise.
1134 */
1135 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1136
1137 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1138 * any event mask for pre 1.2 devices.
1139 */
1140 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1141 return;
1142
1143 if (lmp_bredr_capable(hdev)) {
1144 events[4] |= 0x01; /* Flow Specification Complete */
1145 events[4] |= 0x02; /* Inquiry Result with RSSI */
1146 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1147 events[5] |= 0x08; /* Synchronous Connection Complete */
1148 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1149 } else {
1150 /* Use a different default for LE-only devices */
1151 memset(events, 0, sizeof(events));
1152 events[0] |= 0x10; /* Disconnection Complete */
1153 events[0] |= 0x80; /* Encryption Change */
1154 events[1] |= 0x08; /* Read Remote Version Information Complete */
1155 events[1] |= 0x20; /* Command Complete */
1156 events[1] |= 0x40; /* Command Status */
1157 events[1] |= 0x80; /* Hardware Error */
1158 events[2] |= 0x04; /* Number of Completed Packets */
1159 events[3] |= 0x02; /* Data Buffer Overflow */
1160 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1161 }
1162
1163 if (lmp_inq_rssi_capable(hdev))
1164 events[4] |= 0x02; /* Inquiry Result with RSSI */
1165
1166 if (lmp_sniffsubr_capable(hdev))
1167 events[5] |= 0x20; /* Sniff Subrating */
1168
1169 if (lmp_pause_enc_capable(hdev))
1170 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1171
1172 if (lmp_ext_inq_capable(hdev))
1173 events[5] |= 0x40; /* Extended Inquiry Result */
1174
1175 if (lmp_no_flush_capable(hdev))
1176 events[7] |= 0x01; /* Enhanced Flush Complete */
1177
1178 if (lmp_lsto_capable(hdev))
1179 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1180
1181 if (lmp_ssp_capable(hdev)) {
1182 events[6] |= 0x01; /* IO Capability Request */
1183 events[6] |= 0x02; /* IO Capability Response */
1184 events[6] |= 0x04; /* User Confirmation Request */
1185 events[6] |= 0x08; /* User Passkey Request */
1186 events[6] |= 0x10; /* Remote OOB Data Request */
1187 events[6] |= 0x20; /* Simple Pairing Complete */
1188 events[7] |= 0x04; /* User Passkey Notification */
1189 events[7] |= 0x08; /* Keypress Notification */
1190 events[7] |= 0x10; /* Remote Host Supported
1191 * Features Notification
1192 */
1193 }
1194
1195 if (lmp_le_capable(hdev))
1196 events[7] |= 0x20; /* LE Meta-Event */
1197
42c6b129 1198 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1199
1200 if (lmp_le_capable(hdev)) {
1201 memset(events, 0, sizeof(events));
1202 events[0] = 0x1f;
42c6b129
JH
1203 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1204 sizeof(events), events);
2177bab5
JH
1205 }
1206}
1207
42c6b129 1208static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1209{
42c6b129
JH
1210 struct hci_dev *hdev = req->hdev;
1211
2177bab5 1212 if (lmp_bredr_capable(hdev))
42c6b129 1213 bredr_setup(req);
56f87901
JH
1214 else
1215 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1216
1217 if (lmp_le_capable(hdev))
42c6b129 1218 le_setup(req);
2177bab5 1219
42c6b129 1220 hci_setup_event_mask(req);
2177bab5 1221
3f8e2d75
JH
1222 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1223 * local supported commands HCI command.
1224 */
1225 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1226 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1227
1228 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1229 /* When SSP is available, then the host features page
1230 * should also be available as well. However some
1231 * controllers list the max_page as 0 as long as SSP
1232 * has not been enabled. To achieve proper debugging
1233 * output, force the minimum max_page to 1 at least.
1234 */
1235 hdev->max_page = 0x01;
1236
2177bab5
JH
1237 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1238 u8 mode = 0x01;
42c6b129
JH
1239 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1240 sizeof(mode), &mode);
2177bab5
JH
1241 } else {
1242 struct hci_cp_write_eir cp;
1243
1244 memset(hdev->eir, 0, sizeof(hdev->eir));
1245 memset(&cp, 0, sizeof(cp));
1246
42c6b129 1247 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1248 }
1249 }
1250
1251 if (lmp_inq_rssi_capable(hdev))
42c6b129 1252 hci_setup_inquiry_mode(req);
2177bab5
JH
1253
1254 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1255 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1256
1257 if (lmp_ext_feat_capable(hdev)) {
1258 struct hci_cp_read_local_ext_features cp;
1259
1260 cp.page = 0x01;
42c6b129
JH
1261 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1262 sizeof(cp), &cp);
2177bab5
JH
1263 }
1264
1265 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1266 u8 enable = 1;
42c6b129
JH
1267 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1268 &enable);
2177bab5
JH
1269 }
1270}
1271
42c6b129 1272static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1273{
42c6b129 1274 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1275 struct hci_cp_write_def_link_policy cp;
1276 u16 link_policy = 0;
1277
1278 if (lmp_rswitch_capable(hdev))
1279 link_policy |= HCI_LP_RSWITCH;
1280 if (lmp_hold_capable(hdev))
1281 link_policy |= HCI_LP_HOLD;
1282 if (lmp_sniff_capable(hdev))
1283 link_policy |= HCI_LP_SNIFF;
1284 if (lmp_park_capable(hdev))
1285 link_policy |= HCI_LP_PARK;
1286
1287 cp.policy = cpu_to_le16(link_policy);
42c6b129 1288 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1289}
1290
42c6b129 1291static void hci_set_le_support(struct hci_request *req)
2177bab5 1292{
42c6b129 1293 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1294 struct hci_cp_write_le_host_supported cp;
1295
c73eee91
JH
1296 /* LE-only devices do not support explicit enablement */
1297 if (!lmp_bredr_capable(hdev))
1298 return;
1299
2177bab5
JH
1300 memset(&cp, 0, sizeof(cp));
1301
1302 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1303 cp.le = 0x01;
1304 cp.simul = lmp_le_br_capable(hdev);
1305 }
1306
1307 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1308 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1309 &cp);
2177bab5
JH
1310}
1311
d62e6d67
JH
1312static void hci_set_event_mask_page_2(struct hci_request *req)
1313{
1314 struct hci_dev *hdev = req->hdev;
1315 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1316
1317 /* If Connectionless Slave Broadcast master role is supported
1318 * enable all necessary events for it.
1319 */
53b834d2 1320 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1321 events[1] |= 0x40; /* Triggered Clock Capture */
1322 events[1] |= 0x80; /* Synchronization Train Complete */
1323 events[2] |= 0x10; /* Slave Page Response Timeout */
1324 events[2] |= 0x20; /* CSB Channel Map Change */
1325 }
1326
1327 /* If Connectionless Slave Broadcast slave role is supported
1328 * enable all necessary events for it.
1329 */
53b834d2 1330 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1331 events[2] |= 0x01; /* Synchronization Train Received */
1332 events[2] |= 0x02; /* CSB Receive */
1333 events[2] |= 0x04; /* CSB Timeout */
1334 events[2] |= 0x08; /* Truncated Page Complete */
1335 }
1336
40c59fcb
MH
1337 /* Enable Authenticated Payload Timeout Expired event if supported */
1338 if (lmp_ping_capable(hdev))
1339 events[2] |= 0x80;
1340
d62e6d67
JH
1341 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1342}
1343
42c6b129 1344static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1345{
42c6b129 1346 struct hci_dev *hdev = req->hdev;
d2c5d77f 1347 u8 p;
42c6b129 1348
b8f4e068
GP
1349 /* Some Broadcom based Bluetooth controllers do not support the
1350 * Delete Stored Link Key command. They are clearly indicating its
1351 * absence in the bit mask of supported commands.
1352 *
1353 * Check the supported commands and only if the the command is marked
1354 * as supported send it. If not supported assume that the controller
1355 * does not have actual support for stored link keys which makes this
1356 * command redundant anyway.
f9f462fa
MH
1357 *
1358 * Some controllers indicate that they support handling deleting
1359 * stored link keys, but they don't. The quirk lets a driver
1360 * just disable this command.
637b4cae 1361 */
f9f462fa
MH
1362 if (hdev->commands[6] & 0x80 &&
1363 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1364 struct hci_cp_delete_stored_link_key cp;
1365
1366 bacpy(&cp.bdaddr, BDADDR_ANY);
1367 cp.delete_all = 0x01;
1368 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1369 sizeof(cp), &cp);
1370 }
1371
2177bab5 1372 if (hdev->commands[5] & 0x10)
42c6b129 1373 hci_setup_link_policy(req);
2177bab5 1374
79830f66 1375 if (lmp_le_capable(hdev)) {
bef34c0a
MH
1376 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1377 /* If the controller has a public BD_ADDR, then
1378 * by default use that one. If this is a LE only
1379 * controller without a public address, default
1380 * to the random address.
1381 */
1382 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1383 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1384 else
1385 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1386 }
79830f66 1387
42c6b129 1388 hci_set_le_support(req);
79830f66 1389 }
d2c5d77f
JH
1390
1391 /* Read features beyond page 1 if available */
1392 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1393 struct hci_cp_read_local_ext_features cp;
1394
1395 cp.page = p;
1396 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1397 sizeof(cp), &cp);
1398 }
2177bab5
JH
1399}
1400
5d4e7e8d
JH
1401static void hci_init4_req(struct hci_request *req, unsigned long opt)
1402{
1403 struct hci_dev *hdev = req->hdev;
1404
d62e6d67
JH
1405 /* Set event mask page 2 if the HCI command for it is supported */
1406 if (hdev->commands[22] & 0x04)
1407 hci_set_event_mask_page_2(req);
1408
5d4e7e8d 1409 /* Check for Synchronization Train support */
53b834d2 1410 if (lmp_sync_train_capable(hdev))
5d4e7e8d 1411 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
a6d0d690
MH
1412
1413 /* Enable Secure Connections if supported and configured */
5afeac14
MH
1414 if ((lmp_sc_capable(hdev) ||
1415 test_bit(HCI_FORCE_SC, &hdev->dev_flags)) &&
a6d0d690
MH
1416 test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
1417 u8 support = 0x01;
1418 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
1419 sizeof(support), &support);
1420 }
5d4e7e8d
JH
1421}
1422
2177bab5
JH
1423static int __hci_init(struct hci_dev *hdev)
1424{
1425 int err;
1426
1427 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1428 if (err < 0)
1429 return err;
1430
4b4148e9
MH
1431 /* The Device Under Test (DUT) mode is special and available for
1432 * all controller types. So just create it early on.
1433 */
1434 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1435 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1436 &dut_mode_fops);
1437 }
1438
2177bab5
JH
1439 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1440 * BR/EDR/LE type controllers. AMP controllers only need the
1441 * first stage init.
1442 */
1443 if (hdev->dev_type != HCI_BREDR)
1444 return 0;
1445
1446 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1447 if (err < 0)
1448 return err;
1449
5d4e7e8d
JH
1450 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1451 if (err < 0)
1452 return err;
1453
baf27f6e
MH
1454 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1455 if (err < 0)
1456 return err;
1457
1458 /* Only create debugfs entries during the initial setup
1459 * phase and not every time the controller gets powered on.
1460 */
1461 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1462 return 0;
1463
dfb826a8
MH
1464 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1465 &features_fops);
ceeb3bc0
MH
1466 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1467 &hdev->manufacturer);
1468 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1469 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1470 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1471 &blacklist_fops);
47219839
MH
1472 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1473
baf27f6e
MH
1474 if (lmp_bredr_capable(hdev)) {
1475 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1476 hdev, &inquiry_cache_fops);
02d08d15
MH
1477 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1478 hdev, &link_keys_fops);
babdbb3c
MH
1479 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1480 hdev, &dev_class_fops);
041000b9
MH
1481 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1482 hdev, &voice_setting_fops);
baf27f6e
MH
1483 }
1484
06f5b778 1485 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1486 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1487 hdev, &auto_accept_delay_fops);
06f5b778
MH
1488 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1489 hdev, &ssp_debug_mode_fops);
5afeac14
MH
1490 debugfs_create_file("force_sc_support", 0644, hdev->debugfs,
1491 hdev, &force_sc_support_fops);
134c2a89
MH
1492 debugfs_create_file("sc_only_mode", 0444, hdev->debugfs,
1493 hdev, &sc_only_mode_fops);
06f5b778 1494 }
ebd1e33b 1495
2bfa3531
MH
1496 if (lmp_sniff_capable(hdev)) {
1497 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1498 hdev, &idle_timeout_fops);
1499 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1500 hdev, &sniff_min_interval_fops);
1501 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1502 hdev, &sniff_max_interval_fops);
1503 }
1504
d0f729b8
MH
1505 if (lmp_le_capable(hdev)) {
1506 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1507 &hdev->le_white_list_size);
e7b8fc92
MH
1508 debugfs_create_file("static_address", 0444, hdev->debugfs,
1509 hdev, &static_address_fops);
92202185
MH
1510 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1511 hdev, &own_address_type_fops);
8f8625cd
MH
1512 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1513 hdev, &long_term_keys_fops);
4e70c7e7
MH
1514 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1515 hdev, &conn_min_interval_fops);
1516 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1517 hdev, &conn_max_interval_fops);
89863109
JR
1518 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1519 &lowpan_debugfs_fops);
d0f729b8 1520 }
e7b8fc92 1521
baf27f6e 1522 return 0;
2177bab5
JH
1523}
1524
42c6b129 1525static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1526{
1527 __u8 scan = opt;
1528
42c6b129 1529 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1530
1531 /* Inquiry and Page scans */
42c6b129 1532 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1533}
1534
42c6b129 1535static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1536{
1537 __u8 auth = opt;
1538
42c6b129 1539 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1540
1541 /* Authentication */
42c6b129 1542 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1543}
1544
42c6b129 1545static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1546{
1547 __u8 encrypt = opt;
1548
42c6b129 1549 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1550
e4e8e37c 1551 /* Encryption */
42c6b129 1552 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1553}
1554
42c6b129 1555static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1556{
1557 __le16 policy = cpu_to_le16(opt);
1558
42c6b129 1559 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1560
1561 /* Default link policy */
42c6b129 1562 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1563}
1564
8e87d142 1565/* Get HCI device by index.
1da177e4
LT
1566 * Device is held on return. */
1567struct hci_dev *hci_dev_get(int index)
1568{
8035ded4 1569 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1570
1571 BT_DBG("%d", index);
1572
1573 if (index < 0)
1574 return NULL;
1575
1576 read_lock(&hci_dev_list_lock);
8035ded4 1577 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1578 if (d->id == index) {
1579 hdev = hci_dev_hold(d);
1580 break;
1581 }
1582 }
1583 read_unlock(&hci_dev_list_lock);
1584 return hdev;
1585}
1da177e4
LT
1586
1587/* ---- Inquiry support ---- */
ff9ef578 1588
30dc78e1
JH
1589bool hci_discovery_active(struct hci_dev *hdev)
1590{
1591 struct discovery_state *discov = &hdev->discovery;
1592
6fbe195d 1593 switch (discov->state) {
343f935b 1594 case DISCOVERY_FINDING:
6fbe195d 1595 case DISCOVERY_RESOLVING:
30dc78e1
JH
1596 return true;
1597
6fbe195d
AG
1598 default:
1599 return false;
1600 }
30dc78e1
JH
1601}
1602
ff9ef578
JH
1603void hci_discovery_set_state(struct hci_dev *hdev, int state)
1604{
1605 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1606
1607 if (hdev->discovery.state == state)
1608 return;
1609
1610 switch (state) {
1611 case DISCOVERY_STOPPED:
7b99b659
AG
1612 if (hdev->discovery.state != DISCOVERY_STARTING)
1613 mgmt_discovering(hdev, 0);
ff9ef578
JH
1614 break;
1615 case DISCOVERY_STARTING:
1616 break;
343f935b 1617 case DISCOVERY_FINDING:
ff9ef578
JH
1618 mgmt_discovering(hdev, 1);
1619 break;
30dc78e1
JH
1620 case DISCOVERY_RESOLVING:
1621 break;
ff9ef578
JH
1622 case DISCOVERY_STOPPING:
1623 break;
1624 }
1625
1626 hdev->discovery.state = state;
1627}
1628
1f9b9a5d 1629void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1630{
30883512 1631 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1632 struct inquiry_entry *p, *n;
1da177e4 1633
561aafbc
JH
1634 list_for_each_entry_safe(p, n, &cache->all, all) {
1635 list_del(&p->all);
b57c1a56 1636 kfree(p);
1da177e4 1637 }
561aafbc
JH
1638
1639 INIT_LIST_HEAD(&cache->unknown);
1640 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1641}
1642
a8c5fb1a
GP
1643struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1644 bdaddr_t *bdaddr)
1da177e4 1645{
30883512 1646 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1647 struct inquiry_entry *e;
1648
6ed93dc6 1649 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1650
561aafbc
JH
1651 list_for_each_entry(e, &cache->all, all) {
1652 if (!bacmp(&e->data.bdaddr, bdaddr))
1653 return e;
1654 }
1655
1656 return NULL;
1657}
1658
1659struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1660 bdaddr_t *bdaddr)
561aafbc 1661{
30883512 1662 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1663 struct inquiry_entry *e;
1664
6ed93dc6 1665 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1666
1667 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1668 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1669 return e;
1670 }
1671
1672 return NULL;
1da177e4
LT
1673}
1674
30dc78e1 1675struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1676 bdaddr_t *bdaddr,
1677 int state)
30dc78e1
JH
1678{
1679 struct discovery_state *cache = &hdev->discovery;
1680 struct inquiry_entry *e;
1681
6ed93dc6 1682 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1683
1684 list_for_each_entry(e, &cache->resolve, list) {
1685 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1686 return e;
1687 if (!bacmp(&e->data.bdaddr, bdaddr))
1688 return e;
1689 }
1690
1691 return NULL;
1692}
1693
a3d4e20a 1694void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1695 struct inquiry_entry *ie)
a3d4e20a
JH
1696{
1697 struct discovery_state *cache = &hdev->discovery;
1698 struct list_head *pos = &cache->resolve;
1699 struct inquiry_entry *p;
1700
1701 list_del(&ie->list);
1702
1703 list_for_each_entry(p, &cache->resolve, list) {
1704 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1705 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1706 break;
1707 pos = &p->list;
1708 }
1709
1710 list_add(&ie->list, pos);
1711}
1712
3175405b 1713bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1714 bool name_known, bool *ssp)
1da177e4 1715{
30883512 1716 struct discovery_state *cache = &hdev->discovery;
70f23020 1717 struct inquiry_entry *ie;
1da177e4 1718
6ed93dc6 1719 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1720
2b2fec4d
SJ
1721 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1722
388fc8fa
JH
1723 if (ssp)
1724 *ssp = data->ssp_mode;
1725
70f23020 1726 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1727 if (ie) {
388fc8fa
JH
1728 if (ie->data.ssp_mode && ssp)
1729 *ssp = true;
1730
a3d4e20a 1731 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1732 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1733 ie->data.rssi = data->rssi;
1734 hci_inquiry_cache_update_resolve(hdev, ie);
1735 }
1736
561aafbc 1737 goto update;
a3d4e20a 1738 }
561aafbc
JH
1739
1740 /* Entry not in the cache. Add new one. */
1741 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1742 if (!ie)
3175405b 1743 return false;
561aafbc
JH
1744
1745 list_add(&ie->all, &cache->all);
1746
1747 if (name_known) {
1748 ie->name_state = NAME_KNOWN;
1749 } else {
1750 ie->name_state = NAME_NOT_KNOWN;
1751 list_add(&ie->list, &cache->unknown);
1752 }
70f23020 1753
561aafbc
JH
1754update:
1755 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1756 ie->name_state != NAME_PENDING) {
561aafbc
JH
1757 ie->name_state = NAME_KNOWN;
1758 list_del(&ie->list);
1da177e4
LT
1759 }
1760
70f23020
AE
1761 memcpy(&ie->data, data, sizeof(*data));
1762 ie->timestamp = jiffies;
1da177e4 1763 cache->timestamp = jiffies;
3175405b
JH
1764
1765 if (ie->name_state == NAME_NOT_KNOWN)
1766 return false;
1767
1768 return true;
1da177e4
LT
1769}
1770
1771static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1772{
30883512 1773 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1774 struct inquiry_info *info = (struct inquiry_info *) buf;
1775 struct inquiry_entry *e;
1776 int copied = 0;
1777
561aafbc 1778 list_for_each_entry(e, &cache->all, all) {
1da177e4 1779 struct inquiry_data *data = &e->data;
b57c1a56
JH
1780
1781 if (copied >= num)
1782 break;
1783
1da177e4
LT
1784 bacpy(&info->bdaddr, &data->bdaddr);
1785 info->pscan_rep_mode = data->pscan_rep_mode;
1786 info->pscan_period_mode = data->pscan_period_mode;
1787 info->pscan_mode = data->pscan_mode;
1788 memcpy(info->dev_class, data->dev_class, 3);
1789 info->clock_offset = data->clock_offset;
b57c1a56 1790
1da177e4 1791 info++;
b57c1a56 1792 copied++;
1da177e4
LT
1793 }
1794
1795 BT_DBG("cache %p, copied %d", cache, copied);
1796 return copied;
1797}
1798
42c6b129 1799static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1800{
1801 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1802 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1803 struct hci_cp_inquiry cp;
1804
1805 BT_DBG("%s", hdev->name);
1806
1807 if (test_bit(HCI_INQUIRY, &hdev->flags))
1808 return;
1809
1810 /* Start Inquiry */
1811 memcpy(&cp.lap, &ir->lap, 3);
1812 cp.length = ir->length;
1813 cp.num_rsp = ir->num_rsp;
42c6b129 1814 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1815}
1816
3e13fa1e
AG
1817static int wait_inquiry(void *word)
1818{
1819 schedule();
1820 return signal_pending(current);
1821}
1822
1da177e4
LT
1823int hci_inquiry(void __user *arg)
1824{
1825 __u8 __user *ptr = arg;
1826 struct hci_inquiry_req ir;
1827 struct hci_dev *hdev;
1828 int err = 0, do_inquiry = 0, max_rsp;
1829 long timeo;
1830 __u8 *buf;
1831
1832 if (copy_from_user(&ir, ptr, sizeof(ir)))
1833 return -EFAULT;
1834
5a08ecce
AE
1835 hdev = hci_dev_get(ir.dev_id);
1836 if (!hdev)
1da177e4
LT
1837 return -ENODEV;
1838
0736cfa8
MH
1839 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1840 err = -EBUSY;
1841 goto done;
1842 }
1843
5b69bef5
MH
1844 if (hdev->dev_type != HCI_BREDR) {
1845 err = -EOPNOTSUPP;
1846 goto done;
1847 }
1848
56f87901
JH
1849 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1850 err = -EOPNOTSUPP;
1851 goto done;
1852 }
1853
09fd0de5 1854 hci_dev_lock(hdev);
8e87d142 1855 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1856 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1857 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1858 do_inquiry = 1;
1859 }
09fd0de5 1860 hci_dev_unlock(hdev);
1da177e4 1861
04837f64 1862 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1863
1864 if (do_inquiry) {
01178cd4
JH
1865 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1866 timeo);
70f23020
AE
1867 if (err < 0)
1868 goto done;
3e13fa1e
AG
1869
1870 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1871 * cleared). If it is interrupted by a signal, return -EINTR.
1872 */
1873 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1874 TASK_INTERRUPTIBLE))
1875 return -EINTR;
70f23020 1876 }
1da177e4 1877
8fc9ced3
GP
1878 /* for unlimited number of responses we will use buffer with
1879 * 255 entries
1880 */
1da177e4
LT
1881 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1882
1883 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1884 * copy it to the user space.
1885 */
01df8c31 1886 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1887 if (!buf) {
1da177e4
LT
1888 err = -ENOMEM;
1889 goto done;
1890 }
1891
09fd0de5 1892 hci_dev_lock(hdev);
1da177e4 1893 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1894 hci_dev_unlock(hdev);
1da177e4
LT
1895
1896 BT_DBG("num_rsp %d", ir.num_rsp);
1897
1898 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1899 ptr += sizeof(ir);
1900 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1901 ir.num_rsp))
1da177e4 1902 err = -EFAULT;
8e87d142 1903 } else
1da177e4
LT
1904 err = -EFAULT;
1905
1906 kfree(buf);
1907
1908done:
1909 hci_dev_put(hdev);
1910 return err;
1911}
1912
cbed0ca1 1913static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1914{
1da177e4
LT
1915 int ret = 0;
1916
1da177e4
LT
1917 BT_DBG("%s %p", hdev->name, hdev);
1918
1919 hci_req_lock(hdev);
1920
94324962
JH
1921 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1922 ret = -ENODEV;
1923 goto done;
1924 }
1925
a5c8f270
MH
1926 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1927 /* Check for rfkill but allow the HCI setup stage to
1928 * proceed (which in itself doesn't cause any RF activity).
1929 */
1930 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1931 ret = -ERFKILL;
1932 goto done;
1933 }
1934
1935 /* Check for valid public address or a configured static
1936 * random adddress, but let the HCI setup proceed to
1937 * be able to determine if there is a public address
1938 * or not.
1939 *
c6beca0e
MH
1940 * In case of user channel usage, it is not important
1941 * if a public address or static random address is
1942 * available.
1943 *
a5c8f270
MH
1944 * This check is only valid for BR/EDR controllers
1945 * since AMP controllers do not have an address.
1946 */
c6beca0e
MH
1947 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1948 hdev->dev_type == HCI_BREDR &&
a5c8f270
MH
1949 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1950 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1951 ret = -EADDRNOTAVAIL;
1952 goto done;
1953 }
611b30f7
MH
1954 }
1955
1da177e4
LT
1956 if (test_bit(HCI_UP, &hdev->flags)) {
1957 ret = -EALREADY;
1958 goto done;
1959 }
1960
1da177e4
LT
1961 if (hdev->open(hdev)) {
1962 ret = -EIO;
1963 goto done;
1964 }
1965
f41c70c4
MH
1966 atomic_set(&hdev->cmd_cnt, 1);
1967 set_bit(HCI_INIT, &hdev->flags);
1968
1969 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1970 ret = hdev->setup(hdev);
1971
1972 if (!ret) {
f41c70c4
MH
1973 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1974 set_bit(HCI_RAW, &hdev->flags);
1975
0736cfa8
MH
1976 if (!test_bit(HCI_RAW, &hdev->flags) &&
1977 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1978 ret = __hci_init(hdev);
1da177e4
LT
1979 }
1980
f41c70c4
MH
1981 clear_bit(HCI_INIT, &hdev->flags);
1982
1da177e4
LT
1983 if (!ret) {
1984 hci_dev_hold(hdev);
1985 set_bit(HCI_UP, &hdev->flags);
1986 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1987 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1988 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1989 hdev->dev_type == HCI_BREDR) {
09fd0de5 1990 hci_dev_lock(hdev);
744cf19e 1991 mgmt_powered(hdev, 1);
09fd0de5 1992 hci_dev_unlock(hdev);
56e5cb86 1993 }
8e87d142 1994 } else {
1da177e4 1995 /* Init failed, cleanup */
3eff45ea 1996 flush_work(&hdev->tx_work);
c347b765 1997 flush_work(&hdev->cmd_work);
b78752cc 1998 flush_work(&hdev->rx_work);
1da177e4
LT
1999
2000 skb_queue_purge(&hdev->cmd_q);
2001 skb_queue_purge(&hdev->rx_q);
2002
2003 if (hdev->flush)
2004 hdev->flush(hdev);
2005
2006 if (hdev->sent_cmd) {
2007 kfree_skb(hdev->sent_cmd);
2008 hdev->sent_cmd = NULL;
2009 }
2010
2011 hdev->close(hdev);
2012 hdev->flags = 0;
2013 }
2014
2015done:
2016 hci_req_unlock(hdev);
1da177e4
LT
2017 return ret;
2018}
2019
cbed0ca1
JH
2020/* ---- HCI ioctl helpers ---- */
2021
2022int hci_dev_open(__u16 dev)
2023{
2024 struct hci_dev *hdev;
2025 int err;
2026
2027 hdev = hci_dev_get(dev);
2028 if (!hdev)
2029 return -ENODEV;
2030
e1d08f40
JH
2031 /* We need to ensure that no other power on/off work is pending
2032 * before proceeding to call hci_dev_do_open. This is
2033 * particularly important if the setup procedure has not yet
2034 * completed.
2035 */
2036 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2037 cancel_delayed_work(&hdev->power_off);
2038
a5c8f270
MH
2039 /* After this call it is guaranteed that the setup procedure
2040 * has finished. This means that error conditions like RFKILL
2041 * or no valid public or static random address apply.
2042 */
e1d08f40
JH
2043 flush_workqueue(hdev->req_workqueue);
2044
cbed0ca1
JH
2045 err = hci_dev_do_open(hdev);
2046
2047 hci_dev_put(hdev);
2048
2049 return err;
2050}
2051
1da177e4
LT
2052static int hci_dev_do_close(struct hci_dev *hdev)
2053{
2054 BT_DBG("%s %p", hdev->name, hdev);
2055
78c04c0b
VCG
2056 cancel_delayed_work(&hdev->power_off);
2057
1da177e4
LT
2058 hci_req_cancel(hdev, ENODEV);
2059 hci_req_lock(hdev);
2060
2061 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2062 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2063 hci_req_unlock(hdev);
2064 return 0;
2065 }
2066
3eff45ea
GP
2067 /* Flush RX and TX works */
2068 flush_work(&hdev->tx_work);
b78752cc 2069 flush_work(&hdev->rx_work);
1da177e4 2070
16ab91ab 2071 if (hdev->discov_timeout > 0) {
e0f9309f 2072 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2073 hdev->discov_timeout = 0;
5e5282bb 2074 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2075 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2076 }
2077
a8b2d5c2 2078 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2079 cancel_delayed_work(&hdev->service_cache);
2080
7ba8b4be
AG
2081 cancel_delayed_work_sync(&hdev->le_scan_disable);
2082
09fd0de5 2083 hci_dev_lock(hdev);
1f9b9a5d 2084 hci_inquiry_cache_flush(hdev);
1da177e4 2085 hci_conn_hash_flush(hdev);
09fd0de5 2086 hci_dev_unlock(hdev);
1da177e4
LT
2087
2088 hci_notify(hdev, HCI_DEV_DOWN);
2089
2090 if (hdev->flush)
2091 hdev->flush(hdev);
2092
2093 /* Reset device */
2094 skb_queue_purge(&hdev->cmd_q);
2095 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2096 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2097 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2098 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2099 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2100 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2101 clear_bit(HCI_INIT, &hdev->flags);
2102 }
2103
c347b765
GP
2104 /* flush cmd work */
2105 flush_work(&hdev->cmd_work);
1da177e4
LT
2106
2107 /* Drop queues */
2108 skb_queue_purge(&hdev->rx_q);
2109 skb_queue_purge(&hdev->cmd_q);
2110 skb_queue_purge(&hdev->raw_q);
2111
2112 /* Drop last sent command */
2113 if (hdev->sent_cmd) {
b79f44c1 2114 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2115 kfree_skb(hdev->sent_cmd);
2116 hdev->sent_cmd = NULL;
2117 }
2118
b6ddb638
JH
2119 kfree_skb(hdev->recv_evt);
2120 hdev->recv_evt = NULL;
2121
1da177e4
LT
2122 /* After this point our queues are empty
2123 * and no tasks are scheduled. */
2124 hdev->close(hdev);
2125
35b973c9
JH
2126 /* Clear flags */
2127 hdev->flags = 0;
2128 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2129
93c311a0
MH
2130 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2131 if (hdev->dev_type == HCI_BREDR) {
2132 hci_dev_lock(hdev);
2133 mgmt_powered(hdev, 0);
2134 hci_dev_unlock(hdev);
2135 }
8ee56540 2136 }
5add6af8 2137
ced5c338 2138 /* Controller radio is available but is currently powered down */
536619e8 2139 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2140
e59fda8d 2141 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2142 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2143
1da177e4
LT
2144 hci_req_unlock(hdev);
2145
2146 hci_dev_put(hdev);
2147 return 0;
2148}
2149
2150int hci_dev_close(__u16 dev)
2151{
2152 struct hci_dev *hdev;
2153 int err;
2154
70f23020
AE
2155 hdev = hci_dev_get(dev);
2156 if (!hdev)
1da177e4 2157 return -ENODEV;
8ee56540 2158
0736cfa8
MH
2159 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2160 err = -EBUSY;
2161 goto done;
2162 }
2163
8ee56540
MH
2164 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2165 cancel_delayed_work(&hdev->power_off);
2166
1da177e4 2167 err = hci_dev_do_close(hdev);
8ee56540 2168
0736cfa8 2169done:
1da177e4
LT
2170 hci_dev_put(hdev);
2171 return err;
2172}
2173
2174int hci_dev_reset(__u16 dev)
2175{
2176 struct hci_dev *hdev;
2177 int ret = 0;
2178
70f23020
AE
2179 hdev = hci_dev_get(dev);
2180 if (!hdev)
1da177e4
LT
2181 return -ENODEV;
2182
2183 hci_req_lock(hdev);
1da177e4 2184
808a049e
MH
2185 if (!test_bit(HCI_UP, &hdev->flags)) {
2186 ret = -ENETDOWN;
1da177e4 2187 goto done;
808a049e 2188 }
1da177e4 2189
0736cfa8
MH
2190 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2191 ret = -EBUSY;
2192 goto done;
2193 }
2194
1da177e4
LT
2195 /* Drop queues */
2196 skb_queue_purge(&hdev->rx_q);
2197 skb_queue_purge(&hdev->cmd_q);
2198
09fd0de5 2199 hci_dev_lock(hdev);
1f9b9a5d 2200 hci_inquiry_cache_flush(hdev);
1da177e4 2201 hci_conn_hash_flush(hdev);
09fd0de5 2202 hci_dev_unlock(hdev);
1da177e4
LT
2203
2204 if (hdev->flush)
2205 hdev->flush(hdev);
2206
8e87d142 2207 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2208 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2209
2210 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2211 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2212
2213done:
1da177e4
LT
2214 hci_req_unlock(hdev);
2215 hci_dev_put(hdev);
2216 return ret;
2217}
2218
2219int hci_dev_reset_stat(__u16 dev)
2220{
2221 struct hci_dev *hdev;
2222 int ret = 0;
2223
70f23020
AE
2224 hdev = hci_dev_get(dev);
2225 if (!hdev)
1da177e4
LT
2226 return -ENODEV;
2227
0736cfa8
MH
2228 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2229 ret = -EBUSY;
2230 goto done;
2231 }
2232
1da177e4
LT
2233 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2234
0736cfa8 2235done:
1da177e4 2236 hci_dev_put(hdev);
1da177e4
LT
2237 return ret;
2238}
2239
2240int hci_dev_cmd(unsigned int cmd, void __user *arg)
2241{
2242 struct hci_dev *hdev;
2243 struct hci_dev_req dr;
2244 int err = 0;
2245
2246 if (copy_from_user(&dr, arg, sizeof(dr)))
2247 return -EFAULT;
2248
70f23020
AE
2249 hdev = hci_dev_get(dr.dev_id);
2250 if (!hdev)
1da177e4
LT
2251 return -ENODEV;
2252
0736cfa8
MH
2253 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2254 err = -EBUSY;
2255 goto done;
2256 }
2257
5b69bef5
MH
2258 if (hdev->dev_type != HCI_BREDR) {
2259 err = -EOPNOTSUPP;
2260 goto done;
2261 }
2262
56f87901
JH
2263 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2264 err = -EOPNOTSUPP;
2265 goto done;
2266 }
2267
1da177e4
LT
2268 switch (cmd) {
2269 case HCISETAUTH:
01178cd4
JH
2270 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2271 HCI_INIT_TIMEOUT);
1da177e4
LT
2272 break;
2273
2274 case HCISETENCRYPT:
2275 if (!lmp_encrypt_capable(hdev)) {
2276 err = -EOPNOTSUPP;
2277 break;
2278 }
2279
2280 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2281 /* Auth must be enabled first */
01178cd4
JH
2282 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2283 HCI_INIT_TIMEOUT);
1da177e4
LT
2284 if (err)
2285 break;
2286 }
2287
01178cd4
JH
2288 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2289 HCI_INIT_TIMEOUT);
1da177e4
LT
2290 break;
2291
2292 case HCISETSCAN:
01178cd4
JH
2293 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2294 HCI_INIT_TIMEOUT);
1da177e4
LT
2295 break;
2296
1da177e4 2297 case HCISETLINKPOL:
01178cd4
JH
2298 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2299 HCI_INIT_TIMEOUT);
1da177e4
LT
2300 break;
2301
2302 case HCISETLINKMODE:
e4e8e37c
MH
2303 hdev->link_mode = ((__u16) dr.dev_opt) &
2304 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2305 break;
2306
2307 case HCISETPTYPE:
2308 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2309 break;
2310
2311 case HCISETACLMTU:
e4e8e37c
MH
2312 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2313 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2314 break;
2315
2316 case HCISETSCOMTU:
e4e8e37c
MH
2317 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2318 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2319 break;
2320
2321 default:
2322 err = -EINVAL;
2323 break;
2324 }
e4e8e37c 2325
0736cfa8 2326done:
1da177e4
LT
2327 hci_dev_put(hdev);
2328 return err;
2329}
2330
2331int hci_get_dev_list(void __user *arg)
2332{
8035ded4 2333 struct hci_dev *hdev;
1da177e4
LT
2334 struct hci_dev_list_req *dl;
2335 struct hci_dev_req *dr;
1da177e4
LT
2336 int n = 0, size, err;
2337 __u16 dev_num;
2338
2339 if (get_user(dev_num, (__u16 __user *) arg))
2340 return -EFAULT;
2341
2342 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2343 return -EINVAL;
2344
2345 size = sizeof(*dl) + dev_num * sizeof(*dr);
2346
70f23020
AE
2347 dl = kzalloc(size, GFP_KERNEL);
2348 if (!dl)
1da177e4
LT
2349 return -ENOMEM;
2350
2351 dr = dl->dev_req;
2352
f20d09d5 2353 read_lock(&hci_dev_list_lock);
8035ded4 2354 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2355 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2356 cancel_delayed_work(&hdev->power_off);
c542a06c 2357
a8b2d5c2
JH
2358 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2359 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2360
1da177e4
LT
2361 (dr + n)->dev_id = hdev->id;
2362 (dr + n)->dev_opt = hdev->flags;
c542a06c 2363
1da177e4
LT
2364 if (++n >= dev_num)
2365 break;
2366 }
f20d09d5 2367 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2368
2369 dl->dev_num = n;
2370 size = sizeof(*dl) + n * sizeof(*dr);
2371
2372 err = copy_to_user(arg, dl, size);
2373 kfree(dl);
2374
2375 return err ? -EFAULT : 0;
2376}
2377
2378int hci_get_dev_info(void __user *arg)
2379{
2380 struct hci_dev *hdev;
2381 struct hci_dev_info di;
2382 int err = 0;
2383
2384 if (copy_from_user(&di, arg, sizeof(di)))
2385 return -EFAULT;
2386
70f23020
AE
2387 hdev = hci_dev_get(di.dev_id);
2388 if (!hdev)
1da177e4
LT
2389 return -ENODEV;
2390
a8b2d5c2 2391 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2392 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2393
a8b2d5c2
JH
2394 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2395 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2396
1da177e4
LT
2397 strcpy(di.name, hdev->name);
2398 di.bdaddr = hdev->bdaddr;
60f2a3ed 2399 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2400 di.flags = hdev->flags;
2401 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2402 if (lmp_bredr_capable(hdev)) {
2403 di.acl_mtu = hdev->acl_mtu;
2404 di.acl_pkts = hdev->acl_pkts;
2405 di.sco_mtu = hdev->sco_mtu;
2406 di.sco_pkts = hdev->sco_pkts;
2407 } else {
2408 di.acl_mtu = hdev->le_mtu;
2409 di.acl_pkts = hdev->le_pkts;
2410 di.sco_mtu = 0;
2411 di.sco_pkts = 0;
2412 }
1da177e4
LT
2413 di.link_policy = hdev->link_policy;
2414 di.link_mode = hdev->link_mode;
2415
2416 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2417 memcpy(&di.features, &hdev->features, sizeof(di.features));
2418
2419 if (copy_to_user(arg, &di, sizeof(di)))
2420 err = -EFAULT;
2421
2422 hci_dev_put(hdev);
2423
2424 return err;
2425}
2426
2427/* ---- Interface to HCI drivers ---- */
2428
611b30f7
MH
2429static int hci_rfkill_set_block(void *data, bool blocked)
2430{
2431 struct hci_dev *hdev = data;
2432
2433 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2434
0736cfa8
MH
2435 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2436 return -EBUSY;
2437
5e130367
JH
2438 if (blocked) {
2439 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2440 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2441 hci_dev_do_close(hdev);
5e130367
JH
2442 } else {
2443 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2444 }
611b30f7
MH
2445
2446 return 0;
2447}
2448
2449static const struct rfkill_ops hci_rfkill_ops = {
2450 .set_block = hci_rfkill_set_block,
2451};
2452
ab81cbf9
JH
2453static void hci_power_on(struct work_struct *work)
2454{
2455 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2456 int err;
ab81cbf9
JH
2457
2458 BT_DBG("%s", hdev->name);
2459
cbed0ca1 2460 err = hci_dev_do_open(hdev);
96570ffc
JH
2461 if (err < 0) {
2462 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2463 return;
96570ffc 2464 }
ab81cbf9 2465
a5c8f270
MH
2466 /* During the HCI setup phase, a few error conditions are
2467 * ignored and they need to be checked now. If they are still
2468 * valid, it is important to turn the device back off.
2469 */
2470 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2471 (hdev->dev_type == HCI_BREDR &&
2472 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2473 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2474 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2475 hci_dev_do_close(hdev);
2476 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2477 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2478 HCI_AUTO_OFF_TIMEOUT);
bf543036 2479 }
ab81cbf9 2480
a8b2d5c2 2481 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2482 mgmt_index_added(hdev);
ab81cbf9
JH
2483}
2484
2485static void hci_power_off(struct work_struct *work)
2486{
3243553f 2487 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2488 power_off.work);
ab81cbf9
JH
2489
2490 BT_DBG("%s", hdev->name);
2491
8ee56540 2492 hci_dev_do_close(hdev);
ab81cbf9
JH
2493}
2494
16ab91ab
JH
2495static void hci_discov_off(struct work_struct *work)
2496{
2497 struct hci_dev *hdev;
16ab91ab
JH
2498
2499 hdev = container_of(work, struct hci_dev, discov_off.work);
2500
2501 BT_DBG("%s", hdev->name);
2502
d1967ff8 2503 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2504}
2505
2aeb9a1a
JH
2506int hci_uuids_clear(struct hci_dev *hdev)
2507{
4821002c 2508 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2509
4821002c
JH
2510 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2511 list_del(&uuid->list);
2aeb9a1a
JH
2512 kfree(uuid);
2513 }
2514
2515 return 0;
2516}
2517
55ed8ca1
JH
2518int hci_link_keys_clear(struct hci_dev *hdev)
2519{
2520 struct list_head *p, *n;
2521
2522 list_for_each_safe(p, n, &hdev->link_keys) {
2523 struct link_key *key;
2524
2525 key = list_entry(p, struct link_key, list);
2526
2527 list_del(p);
2528 kfree(key);
2529 }
2530
2531 return 0;
2532}
2533
b899efaf
VCG
2534int hci_smp_ltks_clear(struct hci_dev *hdev)
2535{
2536 struct smp_ltk *k, *tmp;
2537
2538 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2539 list_del(&k->list);
2540 kfree(k);
2541 }
2542
2543 return 0;
2544}
2545
55ed8ca1
JH
2546struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2547{
8035ded4 2548 struct link_key *k;
55ed8ca1 2549
8035ded4 2550 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2551 if (bacmp(bdaddr, &k->bdaddr) == 0)
2552 return k;
55ed8ca1
JH
2553
2554 return NULL;
2555}
2556
745c0ce3 2557static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2558 u8 key_type, u8 old_key_type)
d25e28ab
JH
2559{
2560 /* Legacy key */
2561 if (key_type < 0x03)
745c0ce3 2562 return true;
d25e28ab
JH
2563
2564 /* Debug keys are insecure so don't store them persistently */
2565 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2566 return false;
d25e28ab
JH
2567
2568 /* Changed combination key and there's no previous one */
2569 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2570 return false;
d25e28ab
JH
2571
2572 /* Security mode 3 case */
2573 if (!conn)
745c0ce3 2574 return true;
d25e28ab
JH
2575
2576 /* Neither local nor remote side had no-bonding as requirement */
2577 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2578 return true;
d25e28ab
JH
2579
2580 /* Local side had dedicated bonding as requirement */
2581 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2582 return true;
d25e28ab
JH
2583
2584 /* Remote side had dedicated bonding as requirement */
2585 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2586 return true;
d25e28ab
JH
2587
2588 /* If none of the above criteria match, then don't store the key
2589 * persistently */
745c0ce3 2590 return false;
d25e28ab
JH
2591}
2592
98a0b845
JH
2593static bool ltk_type_master(u8 type)
2594{
2595 if (type == HCI_SMP_STK || type == HCI_SMP_LTK)
2596 return true;
2597
2598 return false;
2599}
2600
2601struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8],
2602 bool master)
75d262c2 2603{
c9839a11 2604 struct smp_ltk *k;
75d262c2 2605
c9839a11
VCG
2606 list_for_each_entry(k, &hdev->long_term_keys, list) {
2607 if (k->ediv != ediv ||
a8c5fb1a 2608 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2609 continue;
2610
98a0b845
JH
2611 if (ltk_type_master(k->type) != master)
2612 continue;
2613
c9839a11 2614 return k;
75d262c2
VCG
2615 }
2616
2617 return NULL;
2618}
75d262c2 2619
c9839a11 2620struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
98a0b845 2621 u8 addr_type, bool master)
75d262c2 2622{
c9839a11 2623 struct smp_ltk *k;
75d262c2 2624
c9839a11
VCG
2625 list_for_each_entry(k, &hdev->long_term_keys, list)
2626 if (addr_type == k->bdaddr_type &&
98a0b845
JH
2627 bacmp(bdaddr, &k->bdaddr) == 0 &&
2628 ltk_type_master(k->type) == master)
75d262c2
VCG
2629 return k;
2630
2631 return NULL;
2632}
75d262c2 2633
d25e28ab 2634int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2635 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2636{
2637 struct link_key *key, *old_key;
745c0ce3
VA
2638 u8 old_key_type;
2639 bool persistent;
55ed8ca1
JH
2640
2641 old_key = hci_find_link_key(hdev, bdaddr);
2642 if (old_key) {
2643 old_key_type = old_key->type;
2644 key = old_key;
2645 } else {
12adcf3a 2646 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2647 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2648 if (!key)
2649 return -ENOMEM;
2650 list_add(&key->list, &hdev->link_keys);
2651 }
2652
6ed93dc6 2653 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2654
d25e28ab
JH
2655 /* Some buggy controller combinations generate a changed
2656 * combination key for legacy pairing even when there's no
2657 * previous key */
2658 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2659 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2660 type = HCI_LK_COMBINATION;
655fe6ec
JH
2661 if (conn)
2662 conn->key_type = type;
2663 }
d25e28ab 2664
55ed8ca1 2665 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2666 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2667 key->pin_len = pin_len;
2668
b6020ba0 2669 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2670 key->type = old_key_type;
4748fed2
JH
2671 else
2672 key->type = type;
2673
4df378a1
JH
2674 if (!new_key)
2675 return 0;
2676
2677 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2678
744cf19e 2679 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2680
6ec5bcad
VA
2681 if (conn)
2682 conn->flush_key = !persistent;
55ed8ca1
JH
2683
2684 return 0;
2685}
2686
c9839a11 2687int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2688 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2689 ediv, u8 rand[8])
75d262c2 2690{
c9839a11 2691 struct smp_ltk *key, *old_key;
98a0b845 2692 bool master = ltk_type_master(type);
0fe442ff 2693 u8 persistent;
75d262c2 2694
98a0b845 2695 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type, master);
c9839a11 2696 if (old_key)
75d262c2 2697 key = old_key;
c9839a11
VCG
2698 else {
2699 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2700 if (!key)
2701 return -ENOMEM;
c9839a11 2702 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2703 }
2704
75d262c2 2705 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2706 key->bdaddr_type = addr_type;
2707 memcpy(key->val, tk, sizeof(key->val));
2708 key->authenticated = authenticated;
2709 key->ediv = ediv;
2710 key->enc_size = enc_size;
2711 key->type = type;
2712 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2713
c9839a11
VCG
2714 if (!new_key)
2715 return 0;
75d262c2 2716
0fe442ff
MH
2717 if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0)
2718 persistent = 0;
2719 else
2720 persistent = 1;
2721
21b93b75 2722 if (type == HCI_SMP_LTK || type == HCI_SMP_LTK_SLAVE)
0fe442ff 2723 mgmt_new_ltk(hdev, key, persistent);
261cc5aa 2724
75d262c2
VCG
2725 return 0;
2726}
2727
55ed8ca1
JH
2728int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2729{
2730 struct link_key *key;
2731
2732 key = hci_find_link_key(hdev, bdaddr);
2733 if (!key)
2734 return -ENOENT;
2735
6ed93dc6 2736 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2737
2738 list_del(&key->list);
2739 kfree(key);
2740
2741 return 0;
2742}
2743
b899efaf
VCG
2744int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2745{
2746 struct smp_ltk *k, *tmp;
2747
2748 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2749 if (bacmp(bdaddr, &k->bdaddr))
2750 continue;
2751
6ed93dc6 2752 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2753
2754 list_del(&k->list);
2755 kfree(k);
2756 }
2757
2758 return 0;
2759}
2760
6bd32326 2761/* HCI command timer function */
bda4f23a 2762static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2763{
2764 struct hci_dev *hdev = (void *) arg;
2765
bda4f23a
AE
2766 if (hdev->sent_cmd) {
2767 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2768 u16 opcode = __le16_to_cpu(sent->opcode);
2769
2770 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2771 } else {
2772 BT_ERR("%s command tx timeout", hdev->name);
2773 }
2774
6bd32326 2775 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2776 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2777}
2778
2763eda6 2779struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2780 bdaddr_t *bdaddr)
2763eda6
SJ
2781{
2782 struct oob_data *data;
2783
2784 list_for_each_entry(data, &hdev->remote_oob_data, list)
2785 if (bacmp(bdaddr, &data->bdaddr) == 0)
2786 return data;
2787
2788 return NULL;
2789}
2790
2791int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2792{
2793 struct oob_data *data;
2794
2795 data = hci_find_remote_oob_data(hdev, bdaddr);
2796 if (!data)
2797 return -ENOENT;
2798
6ed93dc6 2799 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2800
2801 list_del(&data->list);
2802 kfree(data);
2803
2804 return 0;
2805}
2806
2807int hci_remote_oob_data_clear(struct hci_dev *hdev)
2808{
2809 struct oob_data *data, *n;
2810
2811 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2812 list_del(&data->list);
2813 kfree(data);
2814 }
2815
2816 return 0;
2817}
2818
0798872e
MH
2819int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2820 u8 *hash, u8 *randomizer)
2763eda6
SJ
2821{
2822 struct oob_data *data;
2823
2824 data = hci_find_remote_oob_data(hdev, bdaddr);
2763eda6 2825 if (!data) {
0798872e 2826 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2763eda6
SJ
2827 if (!data)
2828 return -ENOMEM;
2829
2830 bacpy(&data->bdaddr, bdaddr);
2831 list_add(&data->list, &hdev->remote_oob_data);
2832 }
2833
519ca9d0
MH
2834 memcpy(data->hash192, hash, sizeof(data->hash192));
2835 memcpy(data->randomizer192, randomizer, sizeof(data->randomizer192));
2763eda6 2836
0798872e
MH
2837 memset(data->hash256, 0, sizeof(data->hash256));
2838 memset(data->randomizer256, 0, sizeof(data->randomizer256));
2839
2840 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2841
2842 return 0;
2843}
2844
2845int hci_add_remote_oob_ext_data(struct hci_dev *hdev, bdaddr_t *bdaddr,
2846 u8 *hash192, u8 *randomizer192,
2847 u8 *hash256, u8 *randomizer256)
2848{
2849 struct oob_data *data;
2850
2851 data = hci_find_remote_oob_data(hdev, bdaddr);
2852 if (!data) {
2853 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2854 if (!data)
2855 return -ENOMEM;
2856
2857 bacpy(&data->bdaddr, bdaddr);
2858 list_add(&data->list, &hdev->remote_oob_data);
2859 }
2860
2861 memcpy(data->hash192, hash192, sizeof(data->hash192));
2862 memcpy(data->randomizer192, randomizer192, sizeof(data->randomizer192));
2863
2864 memcpy(data->hash256, hash256, sizeof(data->hash256));
2865 memcpy(data->randomizer256, randomizer256, sizeof(data->randomizer256));
2866
6ed93dc6 2867 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2868
2869 return 0;
2870}
2871
b9ee0a78
MH
2872struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2873 bdaddr_t *bdaddr, u8 type)
b2a66aad 2874{
8035ded4 2875 struct bdaddr_list *b;
b2a66aad 2876
b9ee0a78
MH
2877 list_for_each_entry(b, &hdev->blacklist, list) {
2878 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2879 return b;
b9ee0a78 2880 }
b2a66aad
AJ
2881
2882 return NULL;
2883}
2884
2885int hci_blacklist_clear(struct hci_dev *hdev)
2886{
2887 struct list_head *p, *n;
2888
2889 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2890 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2891
2892 list_del(p);
2893 kfree(b);
2894 }
2895
2896 return 0;
2897}
2898
88c1fe4b 2899int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2900{
2901 struct bdaddr_list *entry;
b2a66aad 2902
b9ee0a78 2903 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2904 return -EBADF;
2905
b9ee0a78 2906 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2907 return -EEXIST;
b2a66aad
AJ
2908
2909 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2910 if (!entry)
2911 return -ENOMEM;
b2a66aad
AJ
2912
2913 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2914 entry->bdaddr_type = type;
b2a66aad
AJ
2915
2916 list_add(&entry->list, &hdev->blacklist);
2917
88c1fe4b 2918 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2919}
2920
88c1fe4b 2921int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2922{
2923 struct bdaddr_list *entry;
b2a66aad 2924
b9ee0a78 2925 if (!bacmp(bdaddr, BDADDR_ANY))
5e762444 2926 return hci_blacklist_clear(hdev);
b2a66aad 2927
b9ee0a78 2928 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2929 if (!entry)
5e762444 2930 return -ENOENT;
b2a66aad
AJ
2931
2932 list_del(&entry->list);
2933 kfree(entry);
2934
88c1fe4b 2935 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2936}
2937
15819a70
AG
2938/* This function requires the caller holds hdev->lock */
2939struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev,
2940 bdaddr_t *addr, u8 addr_type)
2941{
2942 struct hci_conn_params *params;
2943
2944 list_for_each_entry(params, &hdev->le_conn_params, list) {
2945 if (bacmp(&params->addr, addr) == 0 &&
2946 params->addr_type == addr_type) {
2947 return params;
2948 }
2949 }
2950
2951 return NULL;
2952}
2953
2954/* This function requires the caller holds hdev->lock */
2955void hci_conn_params_add(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type,
2956 u16 conn_min_interval, u16 conn_max_interval)
2957{
2958 struct hci_conn_params *params;
2959
2960 params = hci_conn_params_lookup(hdev, addr, addr_type);
2961 if (params) {
2962 params->conn_min_interval = conn_min_interval;
2963 params->conn_max_interval = conn_max_interval;
2964 return;
2965 }
2966
2967 params = kzalloc(sizeof(*params), GFP_KERNEL);
2968 if (!params) {
2969 BT_ERR("Out of memory");
2970 return;
2971 }
2972
2973 bacpy(&params->addr, addr);
2974 params->addr_type = addr_type;
2975 params->conn_min_interval = conn_min_interval;
2976 params->conn_max_interval = conn_max_interval;
2977
2978 list_add(&params->list, &hdev->le_conn_params);
2979
2980 BT_DBG("addr %pMR (type %u) conn_min_interval 0x%.4x "
2981 "conn_max_interval 0x%.4x", addr, addr_type, conn_min_interval,
2982 conn_max_interval);
2983}
2984
2985/* This function requires the caller holds hdev->lock */
2986void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type)
2987{
2988 struct hci_conn_params *params;
2989
2990 params = hci_conn_params_lookup(hdev, addr, addr_type);
2991 if (!params)
2992 return;
2993
2994 list_del(&params->list);
2995 kfree(params);
2996
2997 BT_DBG("addr %pMR (type %u)", addr, addr_type);
2998}
2999
3000/* This function requires the caller holds hdev->lock */
3001void hci_conn_params_clear(struct hci_dev *hdev)
3002{
3003 struct hci_conn_params *params, *tmp;
3004
3005 list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) {
3006 list_del(&params->list);
3007 kfree(params);
3008 }
3009
3010 BT_DBG("All LE connection parameters were removed");
3011}
3012
4c87eaab 3013static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3014{
4c87eaab
AG
3015 if (status) {
3016 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 3017
4c87eaab
AG
3018 hci_dev_lock(hdev);
3019 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3020 hci_dev_unlock(hdev);
3021 return;
3022 }
7ba8b4be
AG
3023}
3024
4c87eaab 3025static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 3026{
4c87eaab
AG
3027 /* General inquiry access code (GIAC) */
3028 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3029 struct hci_request req;
3030 struct hci_cp_inquiry cp;
7ba8b4be
AG
3031 int err;
3032
4c87eaab
AG
3033 if (status) {
3034 BT_ERR("Failed to disable LE scanning: status %d", status);
3035 return;
3036 }
7ba8b4be 3037
4c87eaab
AG
3038 switch (hdev->discovery.type) {
3039 case DISCOV_TYPE_LE:
3040 hci_dev_lock(hdev);
3041 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3042 hci_dev_unlock(hdev);
3043 break;
7ba8b4be 3044
4c87eaab
AG
3045 case DISCOV_TYPE_INTERLEAVED:
3046 hci_req_init(&req, hdev);
7ba8b4be 3047
4c87eaab
AG
3048 memset(&cp, 0, sizeof(cp));
3049 memcpy(&cp.lap, lap, sizeof(cp.lap));
3050 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
3051 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 3052
4c87eaab 3053 hci_dev_lock(hdev);
7dbfac1d 3054
4c87eaab 3055 hci_inquiry_cache_flush(hdev);
7dbfac1d 3056
4c87eaab
AG
3057 err = hci_req_run(&req, inquiry_complete);
3058 if (err) {
3059 BT_ERR("Inquiry request failed: err %d", err);
3060 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3061 }
7dbfac1d 3062
4c87eaab
AG
3063 hci_dev_unlock(hdev);
3064 break;
7dbfac1d 3065 }
7dbfac1d
AG
3066}
3067
7ba8b4be
AG
3068static void le_scan_disable_work(struct work_struct *work)
3069{
3070 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 3071 le_scan_disable.work);
7ba8b4be 3072 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
3073 struct hci_request req;
3074 int err;
7ba8b4be
AG
3075
3076 BT_DBG("%s", hdev->name);
3077
4c87eaab 3078 hci_req_init(&req, hdev);
28b75a89 3079
7ba8b4be 3080 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
3081 cp.enable = LE_SCAN_DISABLE;
3082 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 3083
4c87eaab
AG
3084 err = hci_req_run(&req, le_scan_disable_work_complete);
3085 if (err)
3086 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
3087}
3088
9be0dab7
DH
3089/* Alloc HCI device */
3090struct hci_dev *hci_alloc_dev(void)
3091{
3092 struct hci_dev *hdev;
3093
3094 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
3095 if (!hdev)
3096 return NULL;
3097
b1b813d4
DH
3098 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
3099 hdev->esco_type = (ESCO_HV1);
3100 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
3101 hdev->num_iac = 0x01; /* One IAC support is mandatory */
3102 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
3103 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
3104 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 3105
b1b813d4
DH
3106 hdev->sniff_max_interval = 800;
3107 hdev->sniff_min_interval = 80;
3108
bef64738
MH
3109 hdev->le_scan_interval = 0x0060;
3110 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
3111 hdev->le_conn_min_interval = 0x0028;
3112 hdev->le_conn_max_interval = 0x0038;
bef64738 3113
b1b813d4
DH
3114 mutex_init(&hdev->lock);
3115 mutex_init(&hdev->req_lock);
3116
3117 INIT_LIST_HEAD(&hdev->mgmt_pending);
3118 INIT_LIST_HEAD(&hdev->blacklist);
3119 INIT_LIST_HEAD(&hdev->uuids);
3120 INIT_LIST_HEAD(&hdev->link_keys);
3121 INIT_LIST_HEAD(&hdev->long_term_keys);
3122 INIT_LIST_HEAD(&hdev->remote_oob_data);
15819a70 3123 INIT_LIST_HEAD(&hdev->le_conn_params);
6b536b5e 3124 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
3125
3126 INIT_WORK(&hdev->rx_work, hci_rx_work);
3127 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3128 INIT_WORK(&hdev->tx_work, hci_tx_work);
3129 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 3130
b1b813d4
DH
3131 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
3132 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
3133 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
3134
b1b813d4
DH
3135 skb_queue_head_init(&hdev->rx_q);
3136 skb_queue_head_init(&hdev->cmd_q);
3137 skb_queue_head_init(&hdev->raw_q);
3138
3139 init_waitqueue_head(&hdev->req_wait_q);
3140
bda4f23a 3141 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 3142
b1b813d4
DH
3143 hci_init_sysfs(hdev);
3144 discovery_init(hdev);
9be0dab7
DH
3145
3146 return hdev;
3147}
3148EXPORT_SYMBOL(hci_alloc_dev);
3149
3150/* Free HCI device */
3151void hci_free_dev(struct hci_dev *hdev)
3152{
9be0dab7
DH
3153 /* will free via device release */
3154 put_device(&hdev->dev);
3155}
3156EXPORT_SYMBOL(hci_free_dev);
3157
1da177e4
LT
3158/* Register HCI device */
3159int hci_register_dev(struct hci_dev *hdev)
3160{
b1b813d4 3161 int id, error;
1da177e4 3162
010666a1 3163 if (!hdev->open || !hdev->close)
1da177e4
LT
3164 return -EINVAL;
3165
08add513
MM
3166 /* Do not allow HCI_AMP devices to register at index 0,
3167 * so the index can be used as the AMP controller ID.
3168 */
3df92b31
SL
3169 switch (hdev->dev_type) {
3170 case HCI_BREDR:
3171 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
3172 break;
3173 case HCI_AMP:
3174 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
3175 break;
3176 default:
3177 return -EINVAL;
1da177e4 3178 }
8e87d142 3179
3df92b31
SL
3180 if (id < 0)
3181 return id;
3182
1da177e4
LT
3183 sprintf(hdev->name, "hci%d", id);
3184 hdev->id = id;
2d8b3a11
AE
3185
3186 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3187
d8537548
KC
3188 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3189 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3190 if (!hdev->workqueue) {
3191 error = -ENOMEM;
3192 goto err;
3193 }
f48fd9c8 3194
d8537548
KC
3195 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3196 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3197 if (!hdev->req_workqueue) {
3198 destroy_workqueue(hdev->workqueue);
3199 error = -ENOMEM;
3200 goto err;
3201 }
3202
0153e2ec
MH
3203 if (!IS_ERR_OR_NULL(bt_debugfs))
3204 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3205
bdc3e0f1
MH
3206 dev_set_name(&hdev->dev, "%s", hdev->name);
3207
3208 error = device_add(&hdev->dev);
33ca954d
DH
3209 if (error < 0)
3210 goto err_wqueue;
1da177e4 3211
611b30f7 3212 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3213 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3214 hdev);
611b30f7
MH
3215 if (hdev->rfkill) {
3216 if (rfkill_register(hdev->rfkill) < 0) {
3217 rfkill_destroy(hdev->rfkill);
3218 hdev->rfkill = NULL;
3219 }
3220 }
3221
5e130367
JH
3222 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3223 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3224
a8b2d5c2 3225 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3226 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3227
01cd3404 3228 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3229 /* Assume BR/EDR support until proven otherwise (such as
3230 * through reading supported features during init.
3231 */
3232 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3233 }
ce2be9ac 3234
fcee3377
GP
3235 write_lock(&hci_dev_list_lock);
3236 list_add(&hdev->list, &hci_dev_list);
3237 write_unlock(&hci_dev_list_lock);
3238
1da177e4 3239 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3240 hci_dev_hold(hdev);
1da177e4 3241
19202573 3242 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3243
1da177e4 3244 return id;
f48fd9c8 3245
33ca954d
DH
3246err_wqueue:
3247 destroy_workqueue(hdev->workqueue);
6ead1bbc 3248 destroy_workqueue(hdev->req_workqueue);
33ca954d 3249err:
3df92b31 3250 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3251
33ca954d 3252 return error;
1da177e4
LT
3253}
3254EXPORT_SYMBOL(hci_register_dev);
3255
3256/* Unregister HCI device */
59735631 3257void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3258{
3df92b31 3259 int i, id;
ef222013 3260
c13854ce 3261 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3262
94324962
JH
3263 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3264
3df92b31
SL
3265 id = hdev->id;
3266
f20d09d5 3267 write_lock(&hci_dev_list_lock);
1da177e4 3268 list_del(&hdev->list);
f20d09d5 3269 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3270
3271 hci_dev_do_close(hdev);
3272
cd4c5391 3273 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3274 kfree_skb(hdev->reassembly[i]);
3275
b9b5ef18
GP
3276 cancel_work_sync(&hdev->power_on);
3277
ab81cbf9 3278 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3279 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3280 hci_dev_lock(hdev);
744cf19e 3281 mgmt_index_removed(hdev);
09fd0de5 3282 hci_dev_unlock(hdev);
56e5cb86 3283 }
ab81cbf9 3284
2e58ef3e
JH
3285 /* mgmt_index_removed should take care of emptying the
3286 * pending list */
3287 BUG_ON(!list_empty(&hdev->mgmt_pending));
3288
1da177e4
LT
3289 hci_notify(hdev, HCI_DEV_UNREG);
3290
611b30f7
MH
3291 if (hdev->rfkill) {
3292 rfkill_unregister(hdev->rfkill);
3293 rfkill_destroy(hdev->rfkill);
3294 }
3295
bdc3e0f1 3296 device_del(&hdev->dev);
147e2d59 3297
0153e2ec
MH
3298 debugfs_remove_recursive(hdev->debugfs);
3299
f48fd9c8 3300 destroy_workqueue(hdev->workqueue);
6ead1bbc 3301 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3302
09fd0de5 3303 hci_dev_lock(hdev);
e2e0cacb 3304 hci_blacklist_clear(hdev);
2aeb9a1a 3305 hci_uuids_clear(hdev);
55ed8ca1 3306 hci_link_keys_clear(hdev);
b899efaf 3307 hci_smp_ltks_clear(hdev);
2763eda6 3308 hci_remote_oob_data_clear(hdev);
15819a70 3309 hci_conn_params_clear(hdev);
09fd0de5 3310 hci_dev_unlock(hdev);
e2e0cacb 3311
dc946bd8 3312 hci_dev_put(hdev);
3df92b31
SL
3313
3314 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3315}
3316EXPORT_SYMBOL(hci_unregister_dev);
3317
3318/* Suspend HCI device */
3319int hci_suspend_dev(struct hci_dev *hdev)
3320{
3321 hci_notify(hdev, HCI_DEV_SUSPEND);
3322 return 0;
3323}
3324EXPORT_SYMBOL(hci_suspend_dev);
3325
3326/* Resume HCI device */
3327int hci_resume_dev(struct hci_dev *hdev)
3328{
3329 hci_notify(hdev, HCI_DEV_RESUME);
3330 return 0;
3331}
3332EXPORT_SYMBOL(hci_resume_dev);
3333
76bca880 3334/* Receive frame from HCI drivers */
e1a26170 3335int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3336{
76bca880 3337 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3338 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3339 kfree_skb(skb);
3340 return -ENXIO;
3341 }
3342
d82603c6 3343 /* Incoming skb */
76bca880
MH
3344 bt_cb(skb)->incoming = 1;
3345
3346 /* Time stamp */
3347 __net_timestamp(skb);
3348
76bca880 3349 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3350 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3351
76bca880
MH
3352 return 0;
3353}
3354EXPORT_SYMBOL(hci_recv_frame);
3355
33e882a5 3356static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3357 int count, __u8 index)
33e882a5
SS
3358{
3359 int len = 0;
3360 int hlen = 0;
3361 int remain = count;
3362 struct sk_buff *skb;
3363 struct bt_skb_cb *scb;
3364
3365 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3366 index >= NUM_REASSEMBLY)
33e882a5
SS
3367 return -EILSEQ;
3368
3369 skb = hdev->reassembly[index];
3370
3371 if (!skb) {
3372 switch (type) {
3373 case HCI_ACLDATA_PKT:
3374 len = HCI_MAX_FRAME_SIZE;
3375 hlen = HCI_ACL_HDR_SIZE;
3376 break;
3377 case HCI_EVENT_PKT:
3378 len = HCI_MAX_EVENT_SIZE;
3379 hlen = HCI_EVENT_HDR_SIZE;
3380 break;
3381 case HCI_SCODATA_PKT:
3382 len = HCI_MAX_SCO_SIZE;
3383 hlen = HCI_SCO_HDR_SIZE;
3384 break;
3385 }
3386
1e429f38 3387 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3388 if (!skb)
3389 return -ENOMEM;
3390
3391 scb = (void *) skb->cb;
3392 scb->expect = hlen;
3393 scb->pkt_type = type;
3394
33e882a5
SS
3395 hdev->reassembly[index] = skb;
3396 }
3397
3398 while (count) {
3399 scb = (void *) skb->cb;
89bb46d0 3400 len = min_t(uint, scb->expect, count);
33e882a5
SS
3401
3402 memcpy(skb_put(skb, len), data, len);
3403
3404 count -= len;
3405 data += len;
3406 scb->expect -= len;
3407 remain = count;
3408
3409 switch (type) {
3410 case HCI_EVENT_PKT:
3411 if (skb->len == HCI_EVENT_HDR_SIZE) {
3412 struct hci_event_hdr *h = hci_event_hdr(skb);
3413 scb->expect = h->plen;
3414
3415 if (skb_tailroom(skb) < scb->expect) {
3416 kfree_skb(skb);
3417 hdev->reassembly[index] = NULL;
3418 return -ENOMEM;
3419 }
3420 }
3421 break;
3422
3423 case HCI_ACLDATA_PKT:
3424 if (skb->len == HCI_ACL_HDR_SIZE) {
3425 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3426 scb->expect = __le16_to_cpu(h->dlen);
3427
3428 if (skb_tailroom(skb) < scb->expect) {
3429 kfree_skb(skb);
3430 hdev->reassembly[index] = NULL;
3431 return -ENOMEM;
3432 }
3433 }
3434 break;
3435
3436 case HCI_SCODATA_PKT:
3437 if (skb->len == HCI_SCO_HDR_SIZE) {
3438 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3439 scb->expect = h->dlen;
3440
3441 if (skb_tailroom(skb) < scb->expect) {
3442 kfree_skb(skb);
3443 hdev->reassembly[index] = NULL;
3444 return -ENOMEM;
3445 }
3446 }
3447 break;
3448 }
3449
3450 if (scb->expect == 0) {
3451 /* Complete frame */
3452
3453 bt_cb(skb)->pkt_type = type;
e1a26170 3454 hci_recv_frame(hdev, skb);
33e882a5
SS
3455
3456 hdev->reassembly[index] = NULL;
3457 return remain;
3458 }
3459 }
3460
3461 return remain;
3462}
3463
ef222013
MH
3464int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3465{
f39a3c06
SS
3466 int rem = 0;
3467
ef222013
MH
3468 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3469 return -EILSEQ;
3470
da5f6c37 3471 while (count) {
1e429f38 3472 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3473 if (rem < 0)
3474 return rem;
ef222013 3475
f39a3c06
SS
3476 data += (count - rem);
3477 count = rem;
f81c6224 3478 }
ef222013 3479
f39a3c06 3480 return rem;
ef222013
MH
3481}
3482EXPORT_SYMBOL(hci_recv_fragment);
3483
99811510
SS
3484#define STREAM_REASSEMBLY 0
3485
3486int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3487{
3488 int type;
3489 int rem = 0;
3490
da5f6c37 3491 while (count) {
99811510
SS
3492 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3493
3494 if (!skb) {
3495 struct { char type; } *pkt;
3496
3497 /* Start of the frame */
3498 pkt = data;
3499 type = pkt->type;
3500
3501 data++;
3502 count--;
3503 } else
3504 type = bt_cb(skb)->pkt_type;
3505
1e429f38 3506 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3507 STREAM_REASSEMBLY);
99811510
SS
3508 if (rem < 0)
3509 return rem;
3510
3511 data += (count - rem);
3512 count = rem;
f81c6224 3513 }
99811510
SS
3514
3515 return rem;
3516}
3517EXPORT_SYMBOL(hci_recv_stream_fragment);
3518
1da177e4
LT
3519/* ---- Interface to upper protocols ---- */
3520
1da177e4
LT
3521int hci_register_cb(struct hci_cb *cb)
3522{
3523 BT_DBG("%p name %s", cb, cb->name);
3524
f20d09d5 3525 write_lock(&hci_cb_list_lock);
1da177e4 3526 list_add(&cb->list, &hci_cb_list);
f20d09d5 3527 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3528
3529 return 0;
3530}
3531EXPORT_SYMBOL(hci_register_cb);
3532
3533int hci_unregister_cb(struct hci_cb *cb)
3534{
3535 BT_DBG("%p name %s", cb, cb->name);
3536
f20d09d5 3537 write_lock(&hci_cb_list_lock);
1da177e4 3538 list_del(&cb->list);
f20d09d5 3539 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3540
3541 return 0;
3542}
3543EXPORT_SYMBOL(hci_unregister_cb);
3544
51086991 3545static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3546{
0d48d939 3547 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3548
cd82e61c
MH
3549 /* Time stamp */
3550 __net_timestamp(skb);
1da177e4 3551
cd82e61c
MH
3552 /* Send copy to monitor */
3553 hci_send_to_monitor(hdev, skb);
3554
3555 if (atomic_read(&hdev->promisc)) {
3556 /* Send copy to the sockets */
470fe1b5 3557 hci_send_to_sock(hdev, skb);
1da177e4
LT
3558 }
3559
3560 /* Get rid of skb owner, prior to sending to the driver. */
3561 skb_orphan(skb);
3562
7bd8f09f 3563 if (hdev->send(hdev, skb) < 0)
51086991 3564 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3565}
3566
3119ae95
JH
3567void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3568{
3569 skb_queue_head_init(&req->cmd_q);
3570 req->hdev = hdev;
5d73e034 3571 req->err = 0;
3119ae95
JH
3572}
3573
3574int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3575{
3576 struct hci_dev *hdev = req->hdev;
3577 struct sk_buff *skb;
3578 unsigned long flags;
3579
3580 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3581
5d73e034
AG
3582 /* If an error occured during request building, remove all HCI
3583 * commands queued on the HCI request queue.
3584 */
3585 if (req->err) {
3586 skb_queue_purge(&req->cmd_q);
3587 return req->err;
3588 }
3589
3119ae95
JH
3590 /* Do not allow empty requests */
3591 if (skb_queue_empty(&req->cmd_q))
382b0c39 3592 return -ENODATA;
3119ae95
JH
3593
3594 skb = skb_peek_tail(&req->cmd_q);
3595 bt_cb(skb)->req.complete = complete;
3596
3597 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3598 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3599 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3600
3601 queue_work(hdev->workqueue, &hdev->cmd_work);
3602
3603 return 0;
3604}
3605
1ca3a9d0 3606static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3607 u32 plen, const void *param)
1da177e4
LT
3608{
3609 int len = HCI_COMMAND_HDR_SIZE + plen;
3610 struct hci_command_hdr *hdr;
3611 struct sk_buff *skb;
3612
1da177e4 3613 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3614 if (!skb)
3615 return NULL;
1da177e4
LT
3616
3617 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3618 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3619 hdr->plen = plen;
3620
3621 if (plen)
3622 memcpy(skb_put(skb, plen), param, plen);
3623
3624 BT_DBG("skb len %d", skb->len);
3625
0d48d939 3626 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3627
1ca3a9d0
JH
3628 return skb;
3629}
3630
3631/* Send HCI command */
07dc93dd
JH
3632int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3633 const void *param)
1ca3a9d0
JH
3634{
3635 struct sk_buff *skb;
3636
3637 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3638
3639 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3640 if (!skb) {
3641 BT_ERR("%s no memory for command", hdev->name);
3642 return -ENOMEM;
3643 }
3644
11714b3d
JH
3645 /* Stand-alone HCI commands must be flaged as
3646 * single-command requests.
3647 */
3648 bt_cb(skb)->req.start = true;
3649
1da177e4 3650 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3651 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3652
3653 return 0;
3654}
1da177e4 3655
71c76a17 3656/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3657void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3658 const void *param, u8 event)
71c76a17
JH
3659{
3660 struct hci_dev *hdev = req->hdev;
3661 struct sk_buff *skb;
3662
3663 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3664
34739c1e
AG
3665 /* If an error occured during request building, there is no point in
3666 * queueing the HCI command. We can simply return.
3667 */
3668 if (req->err)
3669 return;
3670
71c76a17
JH
3671 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3672 if (!skb) {
5d73e034
AG
3673 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3674 hdev->name, opcode);
3675 req->err = -ENOMEM;
e348fe6b 3676 return;
71c76a17
JH
3677 }
3678
3679 if (skb_queue_empty(&req->cmd_q))
3680 bt_cb(skb)->req.start = true;
3681
02350a72
JH
3682 bt_cb(skb)->req.event = event;
3683
71c76a17 3684 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3685}
3686
07dc93dd
JH
3687void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3688 const void *param)
02350a72
JH
3689{
3690 hci_req_add_ev(req, opcode, plen, param, 0);
3691}
3692
1da177e4 3693/* Get data from the previously sent command */
a9de9248 3694void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3695{
3696 struct hci_command_hdr *hdr;
3697
3698 if (!hdev->sent_cmd)
3699 return NULL;
3700
3701 hdr = (void *) hdev->sent_cmd->data;
3702
a9de9248 3703 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3704 return NULL;
3705
f0e09510 3706 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3707
3708 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3709}
3710
3711/* Send ACL data */
3712static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3713{
3714 struct hci_acl_hdr *hdr;
3715 int len = skb->len;
3716
badff6d0
ACM
3717 skb_push(skb, HCI_ACL_HDR_SIZE);
3718 skb_reset_transport_header(skb);
9c70220b 3719 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3720 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3721 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3722}
3723
ee22be7e 3724static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3725 struct sk_buff *skb, __u16 flags)
1da177e4 3726{
ee22be7e 3727 struct hci_conn *conn = chan->conn;
1da177e4
LT
3728 struct hci_dev *hdev = conn->hdev;
3729 struct sk_buff *list;
3730
087bfd99
GP
3731 skb->len = skb_headlen(skb);
3732 skb->data_len = 0;
3733
3734 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3735
3736 switch (hdev->dev_type) {
3737 case HCI_BREDR:
3738 hci_add_acl_hdr(skb, conn->handle, flags);
3739 break;
3740 case HCI_AMP:
3741 hci_add_acl_hdr(skb, chan->handle, flags);
3742 break;
3743 default:
3744 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3745 return;
3746 }
087bfd99 3747
70f23020
AE
3748 list = skb_shinfo(skb)->frag_list;
3749 if (!list) {
1da177e4
LT
3750 /* Non fragmented */
3751 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3752
73d80deb 3753 skb_queue_tail(queue, skb);
1da177e4
LT
3754 } else {
3755 /* Fragmented */
3756 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3757
3758 skb_shinfo(skb)->frag_list = NULL;
3759
3760 /* Queue all fragments atomically */
af3e6359 3761 spin_lock(&queue->lock);
1da177e4 3762
73d80deb 3763 __skb_queue_tail(queue, skb);
e702112f
AE
3764
3765 flags &= ~ACL_START;
3766 flags |= ACL_CONT;
1da177e4
LT
3767 do {
3768 skb = list; list = list->next;
8e87d142 3769
0d48d939 3770 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3771 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3772
3773 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3774
73d80deb 3775 __skb_queue_tail(queue, skb);
1da177e4
LT
3776 } while (list);
3777
af3e6359 3778 spin_unlock(&queue->lock);
1da177e4 3779 }
73d80deb
LAD
3780}
3781
3782void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3783{
ee22be7e 3784 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3785
f0e09510 3786 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3787
ee22be7e 3788 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3789
3eff45ea 3790 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3791}
1da177e4
LT
3792
3793/* Send SCO data */
0d861d8b 3794void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3795{
3796 struct hci_dev *hdev = conn->hdev;
3797 struct hci_sco_hdr hdr;
3798
3799 BT_DBG("%s len %d", hdev->name, skb->len);
3800
aca3192c 3801 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3802 hdr.dlen = skb->len;
3803
badff6d0
ACM
3804 skb_push(skb, HCI_SCO_HDR_SIZE);
3805 skb_reset_transport_header(skb);
9c70220b 3806 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3807
0d48d939 3808 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3809
1da177e4 3810 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3811 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3812}
1da177e4
LT
3813
3814/* ---- HCI TX task (outgoing data) ---- */
3815
3816/* HCI Connection scheduler */
6039aa73
GP
3817static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3818 int *quote)
1da177e4
LT
3819{
3820 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3821 struct hci_conn *conn = NULL, *c;
abc5de8f 3822 unsigned int num = 0, min = ~0;
1da177e4 3823
8e87d142 3824 /* We don't have to lock device here. Connections are always
1da177e4 3825 * added and removed with TX task disabled. */
bf4c6325
GP
3826
3827 rcu_read_lock();
3828
3829 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3830 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3831 continue;
769be974
MH
3832
3833 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3834 continue;
3835
1da177e4
LT
3836 num++;
3837
3838 if (c->sent < min) {
3839 min = c->sent;
3840 conn = c;
3841 }
52087a79
LAD
3842
3843 if (hci_conn_num(hdev, type) == num)
3844 break;
1da177e4
LT
3845 }
3846
bf4c6325
GP
3847 rcu_read_unlock();
3848
1da177e4 3849 if (conn) {
6ed58ec5
VT
3850 int cnt, q;
3851
3852 switch (conn->type) {
3853 case ACL_LINK:
3854 cnt = hdev->acl_cnt;
3855 break;
3856 case SCO_LINK:
3857 case ESCO_LINK:
3858 cnt = hdev->sco_cnt;
3859 break;
3860 case LE_LINK:
3861 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3862 break;
3863 default:
3864 cnt = 0;
3865 BT_ERR("Unknown link type");
3866 }
3867
3868 q = cnt / num;
1da177e4
LT
3869 *quote = q ? q : 1;
3870 } else
3871 *quote = 0;
3872
3873 BT_DBG("conn %p quote %d", conn, *quote);
3874 return conn;
3875}
3876
6039aa73 3877static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3878{
3879 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3880 struct hci_conn *c;
1da177e4 3881
bae1f5d9 3882 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3883
bf4c6325
GP
3884 rcu_read_lock();
3885
1da177e4 3886 /* Kill stalled connections */
bf4c6325 3887 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3888 if (c->type == type && c->sent) {
6ed93dc6
AE
3889 BT_ERR("%s killing stalled connection %pMR",
3890 hdev->name, &c->dst);
bed71748 3891 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3892 }
3893 }
bf4c6325
GP
3894
3895 rcu_read_unlock();
1da177e4
LT
3896}
3897
6039aa73
GP
3898static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3899 int *quote)
1da177e4 3900{
73d80deb
LAD
3901 struct hci_conn_hash *h = &hdev->conn_hash;
3902 struct hci_chan *chan = NULL;
abc5de8f 3903 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3904 struct hci_conn *conn;
73d80deb
LAD
3905 int cnt, q, conn_num = 0;
3906
3907 BT_DBG("%s", hdev->name);
3908
bf4c6325
GP
3909 rcu_read_lock();
3910
3911 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3912 struct hci_chan *tmp;
3913
3914 if (conn->type != type)
3915 continue;
3916
3917 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3918 continue;
3919
3920 conn_num++;
3921
8192edef 3922 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3923 struct sk_buff *skb;
3924
3925 if (skb_queue_empty(&tmp->data_q))
3926 continue;
3927
3928 skb = skb_peek(&tmp->data_q);
3929 if (skb->priority < cur_prio)
3930 continue;
3931
3932 if (skb->priority > cur_prio) {
3933 num = 0;
3934 min = ~0;
3935 cur_prio = skb->priority;
3936 }
3937
3938 num++;
3939
3940 if (conn->sent < min) {
3941 min = conn->sent;
3942 chan = tmp;
3943 }
3944 }
3945
3946 if (hci_conn_num(hdev, type) == conn_num)
3947 break;
3948 }
3949
bf4c6325
GP
3950 rcu_read_unlock();
3951
73d80deb
LAD
3952 if (!chan)
3953 return NULL;
3954
3955 switch (chan->conn->type) {
3956 case ACL_LINK:
3957 cnt = hdev->acl_cnt;
3958 break;
bd1eb66b
AE
3959 case AMP_LINK:
3960 cnt = hdev->block_cnt;
3961 break;
73d80deb
LAD
3962 case SCO_LINK:
3963 case ESCO_LINK:
3964 cnt = hdev->sco_cnt;
3965 break;
3966 case LE_LINK:
3967 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3968 break;
3969 default:
3970 cnt = 0;
3971 BT_ERR("Unknown link type");
3972 }
3973
3974 q = cnt / num;
3975 *quote = q ? q : 1;
3976 BT_DBG("chan %p quote %d", chan, *quote);
3977 return chan;
3978}
3979
02b20f0b
LAD
3980static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3981{
3982 struct hci_conn_hash *h = &hdev->conn_hash;
3983 struct hci_conn *conn;
3984 int num = 0;
3985
3986 BT_DBG("%s", hdev->name);
3987
bf4c6325
GP
3988 rcu_read_lock();
3989
3990 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3991 struct hci_chan *chan;
3992
3993 if (conn->type != type)
3994 continue;
3995
3996 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3997 continue;
3998
3999 num++;
4000
8192edef 4001 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
4002 struct sk_buff *skb;
4003
4004 if (chan->sent) {
4005 chan->sent = 0;
4006 continue;
4007 }
4008
4009 if (skb_queue_empty(&chan->data_q))
4010 continue;
4011
4012 skb = skb_peek(&chan->data_q);
4013 if (skb->priority >= HCI_PRIO_MAX - 1)
4014 continue;
4015
4016 skb->priority = HCI_PRIO_MAX - 1;
4017
4018 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 4019 skb->priority);
02b20f0b
LAD
4020 }
4021
4022 if (hci_conn_num(hdev, type) == num)
4023 break;
4024 }
bf4c6325
GP
4025
4026 rcu_read_unlock();
4027
02b20f0b
LAD
4028}
4029
b71d385a
AE
4030static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
4031{
4032 /* Calculate count of blocks used by this packet */
4033 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
4034}
4035
6039aa73 4036static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 4037{
1da177e4
LT
4038 if (!test_bit(HCI_RAW, &hdev->flags)) {
4039 /* ACL tx timeout must be longer than maximum
4040 * link supervision timeout (40.9 seconds) */
63d2bc1b 4041 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 4042 HCI_ACL_TX_TIMEOUT))
bae1f5d9 4043 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 4044 }
63d2bc1b 4045}
1da177e4 4046
6039aa73 4047static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
4048{
4049 unsigned int cnt = hdev->acl_cnt;
4050 struct hci_chan *chan;
4051 struct sk_buff *skb;
4052 int quote;
4053
4054 __check_timeout(hdev, cnt);
04837f64 4055
73d80deb 4056 while (hdev->acl_cnt &&
a8c5fb1a 4057 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
4058 u32 priority = (skb_peek(&chan->data_q))->priority;
4059 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4060 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4061 skb->len, skb->priority);
73d80deb 4062
ec1cce24
LAD
4063 /* Stop if priority has changed */
4064 if (skb->priority < priority)
4065 break;
4066
4067 skb = skb_dequeue(&chan->data_q);
4068
73d80deb 4069 hci_conn_enter_active_mode(chan->conn,
04124681 4070 bt_cb(skb)->force_active);
04837f64 4071
57d17d70 4072 hci_send_frame(hdev, skb);
1da177e4
LT
4073 hdev->acl_last_tx = jiffies;
4074
4075 hdev->acl_cnt--;
73d80deb
LAD
4076 chan->sent++;
4077 chan->conn->sent++;
1da177e4
LT
4078 }
4079 }
02b20f0b
LAD
4080
4081 if (cnt != hdev->acl_cnt)
4082 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
4083}
4084
6039aa73 4085static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 4086{
63d2bc1b 4087 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
4088 struct hci_chan *chan;
4089 struct sk_buff *skb;
4090 int quote;
bd1eb66b 4091 u8 type;
b71d385a 4092
63d2bc1b 4093 __check_timeout(hdev, cnt);
b71d385a 4094
bd1eb66b
AE
4095 BT_DBG("%s", hdev->name);
4096
4097 if (hdev->dev_type == HCI_AMP)
4098 type = AMP_LINK;
4099 else
4100 type = ACL_LINK;
4101
b71d385a 4102 while (hdev->block_cnt > 0 &&
bd1eb66b 4103 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
4104 u32 priority = (skb_peek(&chan->data_q))->priority;
4105 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
4106 int blocks;
4107
4108 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4109 skb->len, skb->priority);
b71d385a
AE
4110
4111 /* Stop if priority has changed */
4112 if (skb->priority < priority)
4113 break;
4114
4115 skb = skb_dequeue(&chan->data_q);
4116
4117 blocks = __get_blocks(hdev, skb);
4118 if (blocks > hdev->block_cnt)
4119 return;
4120
4121 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 4122 bt_cb(skb)->force_active);
b71d385a 4123
57d17d70 4124 hci_send_frame(hdev, skb);
b71d385a
AE
4125 hdev->acl_last_tx = jiffies;
4126
4127 hdev->block_cnt -= blocks;
4128 quote -= blocks;
4129
4130 chan->sent += blocks;
4131 chan->conn->sent += blocks;
4132 }
4133 }
4134
4135 if (cnt != hdev->block_cnt)
bd1eb66b 4136 hci_prio_recalculate(hdev, type);
b71d385a
AE
4137}
4138
6039aa73 4139static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
4140{
4141 BT_DBG("%s", hdev->name);
4142
bd1eb66b
AE
4143 /* No ACL link over BR/EDR controller */
4144 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
4145 return;
4146
4147 /* No AMP link over AMP controller */
4148 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
4149 return;
4150
4151 switch (hdev->flow_ctl_mode) {
4152 case HCI_FLOW_CTL_MODE_PACKET_BASED:
4153 hci_sched_acl_pkt(hdev);
4154 break;
4155
4156 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
4157 hci_sched_acl_blk(hdev);
4158 break;
4159 }
4160}
4161
1da177e4 4162/* Schedule SCO */
6039aa73 4163static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
4164{
4165 struct hci_conn *conn;
4166 struct sk_buff *skb;
4167 int quote;
4168
4169 BT_DBG("%s", hdev->name);
4170
52087a79
LAD
4171 if (!hci_conn_num(hdev, SCO_LINK))
4172 return;
4173
1da177e4
LT
4174 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
4175 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4176 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4177 hci_send_frame(hdev, skb);
1da177e4
LT
4178
4179 conn->sent++;
4180 if (conn->sent == ~0)
4181 conn->sent = 0;
4182 }
4183 }
4184}
4185
6039aa73 4186static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4187{
4188 struct hci_conn *conn;
4189 struct sk_buff *skb;
4190 int quote;
4191
4192 BT_DBG("%s", hdev->name);
4193
52087a79
LAD
4194 if (!hci_conn_num(hdev, ESCO_LINK))
4195 return;
4196
8fc9ced3
GP
4197 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4198 &quote))) {
b6a0dc82
MH
4199 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4200 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4201 hci_send_frame(hdev, skb);
b6a0dc82
MH
4202
4203 conn->sent++;
4204 if (conn->sent == ~0)
4205 conn->sent = 0;
4206 }
4207 }
4208}
4209
6039aa73 4210static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4211{
73d80deb 4212 struct hci_chan *chan;
6ed58ec5 4213 struct sk_buff *skb;
02b20f0b 4214 int quote, cnt, tmp;
6ed58ec5
VT
4215
4216 BT_DBG("%s", hdev->name);
4217
52087a79
LAD
4218 if (!hci_conn_num(hdev, LE_LINK))
4219 return;
4220
6ed58ec5
VT
4221 if (!test_bit(HCI_RAW, &hdev->flags)) {
4222 /* LE tx timeout must be longer than maximum
4223 * link supervision timeout (40.9 seconds) */
bae1f5d9 4224 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4225 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4226 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4227 }
4228
4229 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4230 tmp = cnt;
73d80deb 4231 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4232 u32 priority = (skb_peek(&chan->data_q))->priority;
4233 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4234 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4235 skb->len, skb->priority);
6ed58ec5 4236
ec1cce24
LAD
4237 /* Stop if priority has changed */
4238 if (skb->priority < priority)
4239 break;
4240
4241 skb = skb_dequeue(&chan->data_q);
4242
57d17d70 4243 hci_send_frame(hdev, skb);
6ed58ec5
VT
4244 hdev->le_last_tx = jiffies;
4245
4246 cnt--;
73d80deb
LAD
4247 chan->sent++;
4248 chan->conn->sent++;
6ed58ec5
VT
4249 }
4250 }
73d80deb 4251
6ed58ec5
VT
4252 if (hdev->le_pkts)
4253 hdev->le_cnt = cnt;
4254 else
4255 hdev->acl_cnt = cnt;
02b20f0b
LAD
4256
4257 if (cnt != tmp)
4258 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4259}
4260
3eff45ea 4261static void hci_tx_work(struct work_struct *work)
1da177e4 4262{
3eff45ea 4263 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4264 struct sk_buff *skb;
4265
6ed58ec5 4266 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4267 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4268
52de599e
MH
4269 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4270 /* Schedule queues and send stuff to HCI driver */
4271 hci_sched_acl(hdev);
4272 hci_sched_sco(hdev);
4273 hci_sched_esco(hdev);
4274 hci_sched_le(hdev);
4275 }
6ed58ec5 4276
1da177e4
LT
4277 /* Send next queued raw (unknown type) packet */
4278 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4279 hci_send_frame(hdev, skb);
1da177e4
LT
4280}
4281
25985edc 4282/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4283
4284/* ACL data packet */
6039aa73 4285static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4286{
4287 struct hci_acl_hdr *hdr = (void *) skb->data;
4288 struct hci_conn *conn;
4289 __u16 handle, flags;
4290
4291 skb_pull(skb, HCI_ACL_HDR_SIZE);
4292
4293 handle = __le16_to_cpu(hdr->handle);
4294 flags = hci_flags(handle);
4295 handle = hci_handle(handle);
4296
f0e09510 4297 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4298 handle, flags);
1da177e4
LT
4299
4300 hdev->stat.acl_rx++;
4301
4302 hci_dev_lock(hdev);
4303 conn = hci_conn_hash_lookup_handle(hdev, handle);
4304 hci_dev_unlock(hdev);
8e87d142 4305
1da177e4 4306 if (conn) {
65983fc7 4307 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4308
1da177e4 4309 /* Send to upper protocol */
686ebf28
UF
4310 l2cap_recv_acldata(conn, skb, flags);
4311 return;
1da177e4 4312 } else {
8e87d142 4313 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4314 hdev->name, handle);
1da177e4
LT
4315 }
4316
4317 kfree_skb(skb);
4318}
4319
4320/* SCO data packet */
6039aa73 4321static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4322{
4323 struct hci_sco_hdr *hdr = (void *) skb->data;
4324 struct hci_conn *conn;
4325 __u16 handle;
4326
4327 skb_pull(skb, HCI_SCO_HDR_SIZE);
4328
4329 handle = __le16_to_cpu(hdr->handle);
4330
f0e09510 4331 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4332
4333 hdev->stat.sco_rx++;
4334
4335 hci_dev_lock(hdev);
4336 conn = hci_conn_hash_lookup_handle(hdev, handle);
4337 hci_dev_unlock(hdev);
4338
4339 if (conn) {
1da177e4 4340 /* Send to upper protocol */
686ebf28
UF
4341 sco_recv_scodata(conn, skb);
4342 return;
1da177e4 4343 } else {
8e87d142 4344 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4345 hdev->name, handle);
1da177e4
LT
4346 }
4347
4348 kfree_skb(skb);
4349}
4350
9238f36a
JH
4351static bool hci_req_is_complete(struct hci_dev *hdev)
4352{
4353 struct sk_buff *skb;
4354
4355 skb = skb_peek(&hdev->cmd_q);
4356 if (!skb)
4357 return true;
4358
4359 return bt_cb(skb)->req.start;
4360}
4361
42c6b129
JH
4362static void hci_resend_last(struct hci_dev *hdev)
4363{
4364 struct hci_command_hdr *sent;
4365 struct sk_buff *skb;
4366 u16 opcode;
4367
4368 if (!hdev->sent_cmd)
4369 return;
4370
4371 sent = (void *) hdev->sent_cmd->data;
4372 opcode = __le16_to_cpu(sent->opcode);
4373 if (opcode == HCI_OP_RESET)
4374 return;
4375
4376 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4377 if (!skb)
4378 return;
4379
4380 skb_queue_head(&hdev->cmd_q, skb);
4381 queue_work(hdev->workqueue, &hdev->cmd_work);
4382}
4383
9238f36a
JH
4384void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4385{
4386 hci_req_complete_t req_complete = NULL;
4387 struct sk_buff *skb;
4388 unsigned long flags;
4389
4390 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4391
42c6b129
JH
4392 /* If the completed command doesn't match the last one that was
4393 * sent we need to do special handling of it.
9238f36a 4394 */
42c6b129
JH
4395 if (!hci_sent_cmd_data(hdev, opcode)) {
4396 /* Some CSR based controllers generate a spontaneous
4397 * reset complete event during init and any pending
4398 * command will never be completed. In such a case we
4399 * need to resend whatever was the last sent
4400 * command.
4401 */
4402 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4403 hci_resend_last(hdev);
4404
9238f36a 4405 return;
42c6b129 4406 }
9238f36a
JH
4407
4408 /* If the command succeeded and there's still more commands in
4409 * this request the request is not yet complete.
4410 */
4411 if (!status && !hci_req_is_complete(hdev))
4412 return;
4413
4414 /* If this was the last command in a request the complete
4415 * callback would be found in hdev->sent_cmd instead of the
4416 * command queue (hdev->cmd_q).
4417 */
4418 if (hdev->sent_cmd) {
4419 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4420
4421 if (req_complete) {
4422 /* We must set the complete callback to NULL to
4423 * avoid calling the callback more than once if
4424 * this function gets called again.
4425 */
4426 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4427
9238f36a 4428 goto call_complete;
53e21fbc 4429 }
9238f36a
JH
4430 }
4431
4432 /* Remove all pending commands belonging to this request */
4433 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4434 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4435 if (bt_cb(skb)->req.start) {
4436 __skb_queue_head(&hdev->cmd_q, skb);
4437 break;
4438 }
4439
4440 req_complete = bt_cb(skb)->req.complete;
4441 kfree_skb(skb);
4442 }
4443 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4444
4445call_complete:
4446 if (req_complete)
4447 req_complete(hdev, status);
4448}
4449
b78752cc 4450static void hci_rx_work(struct work_struct *work)
1da177e4 4451{
b78752cc 4452 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4453 struct sk_buff *skb;
4454
4455 BT_DBG("%s", hdev->name);
4456
1da177e4 4457 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4458 /* Send copy to monitor */
4459 hci_send_to_monitor(hdev, skb);
4460
1da177e4
LT
4461 if (atomic_read(&hdev->promisc)) {
4462 /* Send copy to the sockets */
470fe1b5 4463 hci_send_to_sock(hdev, skb);
1da177e4
LT
4464 }
4465
0736cfa8
MH
4466 if (test_bit(HCI_RAW, &hdev->flags) ||
4467 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4468 kfree_skb(skb);
4469 continue;
4470 }
4471
4472 if (test_bit(HCI_INIT, &hdev->flags)) {
4473 /* Don't process data packets in this states. */
0d48d939 4474 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4475 case HCI_ACLDATA_PKT:
4476 case HCI_SCODATA_PKT:
4477 kfree_skb(skb);
4478 continue;
3ff50b79 4479 }
1da177e4
LT
4480 }
4481
4482 /* Process frame */
0d48d939 4483 switch (bt_cb(skb)->pkt_type) {
1da177e4 4484 case HCI_EVENT_PKT:
b78752cc 4485 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4486 hci_event_packet(hdev, skb);
4487 break;
4488
4489 case HCI_ACLDATA_PKT:
4490 BT_DBG("%s ACL data packet", hdev->name);
4491 hci_acldata_packet(hdev, skb);
4492 break;
4493
4494 case HCI_SCODATA_PKT:
4495 BT_DBG("%s SCO data packet", hdev->name);
4496 hci_scodata_packet(hdev, skb);
4497 break;
4498
4499 default:
4500 kfree_skb(skb);
4501 break;
4502 }
4503 }
1da177e4
LT
4504}
4505
c347b765 4506static void hci_cmd_work(struct work_struct *work)
1da177e4 4507{
c347b765 4508 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4509 struct sk_buff *skb;
4510
2104786b
AE
4511 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4512 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4513
1da177e4 4514 /* Send queued commands */
5a08ecce
AE
4515 if (atomic_read(&hdev->cmd_cnt)) {
4516 skb = skb_dequeue(&hdev->cmd_q);
4517 if (!skb)
4518 return;
4519
7585b97a 4520 kfree_skb(hdev->sent_cmd);
1da177e4 4521
a675d7f1 4522 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4523 if (hdev->sent_cmd) {
1da177e4 4524 atomic_dec(&hdev->cmd_cnt);
57d17d70 4525 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4526 if (test_bit(HCI_RESET, &hdev->flags))
4527 del_timer(&hdev->cmd_timer);
4528 else
4529 mod_timer(&hdev->cmd_timer,
5f246e89 4530 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4531 } else {
4532 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4533 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4534 }
4535 }
4536}
This page took 1.047438 seconds and 5 git commands to generate.