Bluetooth: Add management command for enabling Secure Connections
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
47219839 32#include <asm/unaligned.h>
1da177e4
LT
33
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
b78752cc 37static void hci_rx_work(struct work_struct *work);
c347b765 38static void hci_cmd_work(struct work_struct *work);
3eff45ea 39static void hci_tx_work(struct work_struct *work);
1da177e4 40
1da177e4
LT
41/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
3df92b31
SL
49/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
1da177e4
LT
52/* ---- HCI notifications ---- */
53
6516455d 54static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 55{
040030ef 56 hci_sock_dev_event(hdev, event);
1da177e4
LT
57}
58
baf27f6e
MH
59/* ---- HCI debugfs entries ---- */
60
4b4148e9
MH
61static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
dfb826a8
MH
126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
cfbb2b5b
MH
140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
70afe0b8
MH
164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
47219839
MH
189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
196 u8 i, val[16];
197
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
204
205 seq_printf(f, "%pUb\n", val);
47219839
MH
206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
baf27f6e
MH
224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
02d08d15
MH
260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
12c269d7
MH
288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
babdbb3c
MH
306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
041000b9
MH
330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
ebd1e33b
MH
344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
06f5b778
MH
369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
2bfa3531
MH
418static int idle_timeout_set(void *data, u64 val)
419{
420 struct hci_dev *hdev = data;
421
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
424
425 hci_dev_lock(hdev);
2be48b65 426 hdev->idle_timeout = val;
2bfa3531
MH
427 hci_dev_unlock(hdev);
428
429 return 0;
430}
431
432static int idle_timeout_get(void *data, u64 *val)
433{
434 struct hci_dev *hdev = data;
435
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
445
446static int sniff_min_interval_set(void *data, u64 val)
447{
448 struct hci_dev *hdev = data;
449
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
452
453 hci_dev_lock(hdev);
2be48b65 454 hdev->sniff_min_interval = val;
2bfa3531
MH
455 hci_dev_unlock(hdev);
456
457 return 0;
458}
459
460static int sniff_min_interval_get(void *data, u64 *val)
461{
462 struct hci_dev *hdev = data;
463
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
467
468 return 0;
469}
470
471DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
473
474static int sniff_max_interval_set(void *data, u64 val)
475{
476 struct hci_dev *hdev = data;
477
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
480
481 hci_dev_lock(hdev);
2be48b65 482 hdev->sniff_max_interval = val;
2bfa3531
MH
483 hci_dev_unlock(hdev);
484
485 return 0;
486}
487
488static int sniff_max_interval_get(void *data, u64 *val)
489{
490 struct hci_dev *hdev = data;
491
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
501
e7b8fc92
MH
502static int static_address_show(struct seq_file *f, void *p)
503{
504 struct hci_dev *hdev = f->private;
505
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int static_address_open(struct inode *inode, struct file *file)
514{
515 return single_open(file, static_address_show, inode->i_private);
516}
517
518static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
523};
524
92202185
MH
525static int own_address_type_set(void *data, u64 val)
526{
527 struct hci_dev *hdev = data;
528
529 if (val != 0 && val != 1)
530 return -EINVAL;
531
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539static int own_address_type_get(void *data, u64 *val)
540{
541 struct hci_dev *hdev = data;
542
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
546
547 return 0;
548}
549
550DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
552
8f8625cd
MH
553static int long_term_keys_show(struct seq_file *f, void *ptr)
554{
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
557
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
565 }
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int long_term_keys_open(struct inode *inode, struct file *file)
572{
573 return single_open(file, long_term_keys_show, inode->i_private);
574}
575
576static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
4e70c7e7
MH
583static int conn_min_interval_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
2be48b65 591 hdev->le_conn_min_interval = val;
4e70c7e7
MH
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_min_interval_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
610
611static int conn_max_interval_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
2be48b65 619 hdev->le_conn_max_interval = val;
4e70c7e7
MH
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_max_interval_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
638
89863109
JR
639static ssize_t lowpan_read(struct file *file, char __user *user_buf,
640 size_t count, loff_t *ppos)
641{
642 struct hci_dev *hdev = file->private_data;
643 char buf[3];
644
645 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
646 buf[1] = '\n';
647 buf[2] = '\0';
648 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
649}
650
651static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
652 size_t count, loff_t *position)
653{
654 struct hci_dev *hdev = fp->private_data;
655 bool enable;
656 char buf[32];
657 size_t buf_size = min(count, (sizeof(buf)-1));
658
659 if (copy_from_user(buf, user_buffer, buf_size))
660 return -EFAULT;
661
662 buf[buf_size] = '\0';
663
664 if (strtobool(buf, &enable) < 0)
665 return -EINVAL;
666
667 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
668 return -EALREADY;
669
670 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
671
672 return count;
673}
674
675static const struct file_operations lowpan_debugfs_fops = {
676 .open = simple_open,
677 .read = lowpan_read,
678 .write = lowpan_write,
679 .llseek = default_llseek,
680};
681
1da177e4
LT
682/* ---- HCI requests ---- */
683
42c6b129 684static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 685{
42c6b129 686 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
687
688 if (hdev->req_status == HCI_REQ_PEND) {
689 hdev->req_result = result;
690 hdev->req_status = HCI_REQ_DONE;
691 wake_up_interruptible(&hdev->req_wait_q);
692 }
693}
694
695static void hci_req_cancel(struct hci_dev *hdev, int err)
696{
697 BT_DBG("%s err 0x%2.2x", hdev->name, err);
698
699 if (hdev->req_status == HCI_REQ_PEND) {
700 hdev->req_result = err;
701 hdev->req_status = HCI_REQ_CANCELED;
702 wake_up_interruptible(&hdev->req_wait_q);
703 }
704}
705
77a63e0a
FW
706static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
707 u8 event)
75e84b7c
JH
708{
709 struct hci_ev_cmd_complete *ev;
710 struct hci_event_hdr *hdr;
711 struct sk_buff *skb;
712
713 hci_dev_lock(hdev);
714
715 skb = hdev->recv_evt;
716 hdev->recv_evt = NULL;
717
718 hci_dev_unlock(hdev);
719
720 if (!skb)
721 return ERR_PTR(-ENODATA);
722
723 if (skb->len < sizeof(*hdr)) {
724 BT_ERR("Too short HCI event");
725 goto failed;
726 }
727
728 hdr = (void *) skb->data;
729 skb_pull(skb, HCI_EVENT_HDR_SIZE);
730
7b1abbbe
JH
731 if (event) {
732 if (hdr->evt != event)
733 goto failed;
734 return skb;
735 }
736
75e84b7c
JH
737 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
738 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
739 goto failed;
740 }
741
742 if (skb->len < sizeof(*ev)) {
743 BT_ERR("Too short cmd_complete event");
744 goto failed;
745 }
746
747 ev = (void *) skb->data;
748 skb_pull(skb, sizeof(*ev));
749
750 if (opcode == __le16_to_cpu(ev->opcode))
751 return skb;
752
753 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
754 __le16_to_cpu(ev->opcode));
755
756failed:
757 kfree_skb(skb);
758 return ERR_PTR(-ENODATA);
759}
760
7b1abbbe 761struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 762 const void *param, u8 event, u32 timeout)
75e84b7c
JH
763{
764 DECLARE_WAITQUEUE(wait, current);
765 struct hci_request req;
766 int err = 0;
767
768 BT_DBG("%s", hdev->name);
769
770 hci_req_init(&req, hdev);
771
7b1abbbe 772 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
773
774 hdev->req_status = HCI_REQ_PEND;
775
776 err = hci_req_run(&req, hci_req_sync_complete);
777 if (err < 0)
778 return ERR_PTR(err);
779
780 add_wait_queue(&hdev->req_wait_q, &wait);
781 set_current_state(TASK_INTERRUPTIBLE);
782
783 schedule_timeout(timeout);
784
785 remove_wait_queue(&hdev->req_wait_q, &wait);
786
787 if (signal_pending(current))
788 return ERR_PTR(-EINTR);
789
790 switch (hdev->req_status) {
791 case HCI_REQ_DONE:
792 err = -bt_to_errno(hdev->req_result);
793 break;
794
795 case HCI_REQ_CANCELED:
796 err = -hdev->req_result;
797 break;
798
799 default:
800 err = -ETIMEDOUT;
801 break;
802 }
803
804 hdev->req_status = hdev->req_result = 0;
805
806 BT_DBG("%s end: err %d", hdev->name, err);
807
808 if (err < 0)
809 return ERR_PTR(err);
810
7b1abbbe
JH
811 return hci_get_cmd_complete(hdev, opcode, event);
812}
813EXPORT_SYMBOL(__hci_cmd_sync_ev);
814
815struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 816 const void *param, u32 timeout)
7b1abbbe
JH
817{
818 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
819}
820EXPORT_SYMBOL(__hci_cmd_sync);
821
1da177e4 822/* Execute request and wait for completion. */
01178cd4 823static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
824 void (*func)(struct hci_request *req,
825 unsigned long opt),
01178cd4 826 unsigned long opt, __u32 timeout)
1da177e4 827{
42c6b129 828 struct hci_request req;
1da177e4
LT
829 DECLARE_WAITQUEUE(wait, current);
830 int err = 0;
831
832 BT_DBG("%s start", hdev->name);
833
42c6b129
JH
834 hci_req_init(&req, hdev);
835
1da177e4
LT
836 hdev->req_status = HCI_REQ_PEND;
837
42c6b129 838 func(&req, opt);
53cce22d 839
42c6b129
JH
840 err = hci_req_run(&req, hci_req_sync_complete);
841 if (err < 0) {
53cce22d 842 hdev->req_status = 0;
920c8300
AG
843
844 /* ENODATA means the HCI request command queue is empty.
845 * This can happen when a request with conditionals doesn't
846 * trigger any commands to be sent. This is normal behavior
847 * and should not trigger an error return.
42c6b129 848 */
920c8300
AG
849 if (err == -ENODATA)
850 return 0;
851
852 return err;
53cce22d
JH
853 }
854
bc4445c7
AG
855 add_wait_queue(&hdev->req_wait_q, &wait);
856 set_current_state(TASK_INTERRUPTIBLE);
857
1da177e4
LT
858 schedule_timeout(timeout);
859
860 remove_wait_queue(&hdev->req_wait_q, &wait);
861
862 if (signal_pending(current))
863 return -EINTR;
864
865 switch (hdev->req_status) {
866 case HCI_REQ_DONE:
e175072f 867 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
868 break;
869
870 case HCI_REQ_CANCELED:
871 err = -hdev->req_result;
872 break;
873
874 default:
875 err = -ETIMEDOUT;
876 break;
3ff50b79 877 }
1da177e4 878
a5040efa 879 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
880
881 BT_DBG("%s end: err %d", hdev->name, err);
882
883 return err;
884}
885
01178cd4 886static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
887 void (*req)(struct hci_request *req,
888 unsigned long opt),
01178cd4 889 unsigned long opt, __u32 timeout)
1da177e4
LT
890{
891 int ret;
892
7c6a329e
MH
893 if (!test_bit(HCI_UP, &hdev->flags))
894 return -ENETDOWN;
895
1da177e4
LT
896 /* Serialize all requests */
897 hci_req_lock(hdev);
01178cd4 898 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
899 hci_req_unlock(hdev);
900
901 return ret;
902}
903
42c6b129 904static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 905{
42c6b129 906 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
907
908 /* Reset device */
42c6b129
JH
909 set_bit(HCI_RESET, &req->hdev->flags);
910 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
911}
912
42c6b129 913static void bredr_init(struct hci_request *req)
1da177e4 914{
42c6b129 915 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 916
1da177e4 917 /* Read Local Supported Features */
42c6b129 918 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 919
1143e5a6 920 /* Read Local Version */
42c6b129 921 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
922
923 /* Read BD Address */
42c6b129 924 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
925}
926
42c6b129 927static void amp_init(struct hci_request *req)
e61ef499 928{
42c6b129 929 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 930
e61ef499 931 /* Read Local Version */
42c6b129 932 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 933
f6996cfe
MH
934 /* Read Local Supported Commands */
935 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
936
937 /* Read Local Supported Features */
938 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
939
6bcbc489 940 /* Read Local AMP Info */
42c6b129 941 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
942
943 /* Read Data Blk size */
42c6b129 944 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 945
f38ba941
MH
946 /* Read Flow Control Mode */
947 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
948
7528ca1c
MH
949 /* Read Location Data */
950 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
951}
952
42c6b129 953static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 954{
42c6b129 955 struct hci_dev *hdev = req->hdev;
e61ef499
AE
956
957 BT_DBG("%s %ld", hdev->name, opt);
958
11778716
AE
959 /* Reset */
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 961 hci_reset_req(req, 0);
11778716 962
e61ef499
AE
963 switch (hdev->dev_type) {
964 case HCI_BREDR:
42c6b129 965 bredr_init(req);
e61ef499
AE
966 break;
967
968 case HCI_AMP:
42c6b129 969 amp_init(req);
e61ef499
AE
970 break;
971
972 default:
973 BT_ERR("Unknown device type %d", hdev->dev_type);
974 break;
975 }
e61ef499
AE
976}
977
42c6b129 978static void bredr_setup(struct hci_request *req)
2177bab5 979{
4ca048e3
MH
980 struct hci_dev *hdev = req->hdev;
981
2177bab5
JH
982 __le16 param;
983 __u8 flt_type;
984
985 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 986 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
987
988 /* Read Class of Device */
42c6b129 989 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
990
991 /* Read Local Name */
42c6b129 992 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
993
994 /* Read Voice Setting */
42c6b129 995 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 996
b4cb9fb2
MH
997 /* Read Number of Supported IAC */
998 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
999
4b836f39
MH
1000 /* Read Current IAC LAP */
1001 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1002
2177bab5
JH
1003 /* Clear Event Filters */
1004 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1005 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1006
1007 /* Connection accept timeout ~20 secs */
1008 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1009 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1010
4ca048e3
MH
1011 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1012 * but it does not support page scan related HCI commands.
1013 */
1014 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1015 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1016 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1017 }
2177bab5
JH
1018}
1019
42c6b129 1020static void le_setup(struct hci_request *req)
2177bab5 1021{
c73eee91
JH
1022 struct hci_dev *hdev = req->hdev;
1023
2177bab5 1024 /* Read LE Buffer Size */
42c6b129 1025 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1026
1027 /* Read LE Local Supported Features */
42c6b129 1028 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1029
1030 /* Read LE Advertising Channel TX Power */
42c6b129 1031 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1032
1033 /* Read LE White List Size */
42c6b129 1034 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1035
1036 /* Read LE Supported States */
42c6b129 1037 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1038
1039 /* LE-only controllers have LE implicitly enabled */
1040 if (!lmp_bredr_capable(hdev))
1041 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1042}
1043
1044static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1045{
1046 if (lmp_ext_inq_capable(hdev))
1047 return 0x02;
1048
1049 if (lmp_inq_rssi_capable(hdev))
1050 return 0x01;
1051
1052 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1053 hdev->lmp_subver == 0x0757)
1054 return 0x01;
1055
1056 if (hdev->manufacturer == 15) {
1057 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1058 return 0x01;
1059 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1060 return 0x01;
1061 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1062 return 0x01;
1063 }
1064
1065 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1066 hdev->lmp_subver == 0x1805)
1067 return 0x01;
1068
1069 return 0x00;
1070}
1071
42c6b129 1072static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1073{
1074 u8 mode;
1075
42c6b129 1076 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1077
42c6b129 1078 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1079}
1080
42c6b129 1081static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1082{
42c6b129
JH
1083 struct hci_dev *hdev = req->hdev;
1084
2177bab5
JH
1085 /* The second byte is 0xff instead of 0x9f (two reserved bits
1086 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1087 * command otherwise.
1088 */
1089 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1090
1091 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1092 * any event mask for pre 1.2 devices.
1093 */
1094 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095 return;
1096
1097 if (lmp_bredr_capable(hdev)) {
1098 events[4] |= 0x01; /* Flow Specification Complete */
1099 events[4] |= 0x02; /* Inquiry Result with RSSI */
1100 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1101 events[5] |= 0x08; /* Synchronous Connection Complete */
1102 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1103 } else {
1104 /* Use a different default for LE-only devices */
1105 memset(events, 0, sizeof(events));
1106 events[0] |= 0x10; /* Disconnection Complete */
1107 events[0] |= 0x80; /* Encryption Change */
1108 events[1] |= 0x08; /* Read Remote Version Information Complete */
1109 events[1] |= 0x20; /* Command Complete */
1110 events[1] |= 0x40; /* Command Status */
1111 events[1] |= 0x80; /* Hardware Error */
1112 events[2] |= 0x04; /* Number of Completed Packets */
1113 events[3] |= 0x02; /* Data Buffer Overflow */
1114 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1115 }
1116
1117 if (lmp_inq_rssi_capable(hdev))
1118 events[4] |= 0x02; /* Inquiry Result with RSSI */
1119
1120 if (lmp_sniffsubr_capable(hdev))
1121 events[5] |= 0x20; /* Sniff Subrating */
1122
1123 if (lmp_pause_enc_capable(hdev))
1124 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1125
1126 if (lmp_ext_inq_capable(hdev))
1127 events[5] |= 0x40; /* Extended Inquiry Result */
1128
1129 if (lmp_no_flush_capable(hdev))
1130 events[7] |= 0x01; /* Enhanced Flush Complete */
1131
1132 if (lmp_lsto_capable(hdev))
1133 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1134
1135 if (lmp_ssp_capable(hdev)) {
1136 events[6] |= 0x01; /* IO Capability Request */
1137 events[6] |= 0x02; /* IO Capability Response */
1138 events[6] |= 0x04; /* User Confirmation Request */
1139 events[6] |= 0x08; /* User Passkey Request */
1140 events[6] |= 0x10; /* Remote OOB Data Request */
1141 events[6] |= 0x20; /* Simple Pairing Complete */
1142 events[7] |= 0x04; /* User Passkey Notification */
1143 events[7] |= 0x08; /* Keypress Notification */
1144 events[7] |= 0x10; /* Remote Host Supported
1145 * Features Notification
1146 */
1147 }
1148
1149 if (lmp_le_capable(hdev))
1150 events[7] |= 0x20; /* LE Meta-Event */
1151
42c6b129 1152 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1153
1154 if (lmp_le_capable(hdev)) {
1155 memset(events, 0, sizeof(events));
1156 events[0] = 0x1f;
42c6b129
JH
1157 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1158 sizeof(events), events);
2177bab5
JH
1159 }
1160}
1161
42c6b129 1162static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1163{
42c6b129
JH
1164 struct hci_dev *hdev = req->hdev;
1165
2177bab5 1166 if (lmp_bredr_capable(hdev))
42c6b129 1167 bredr_setup(req);
56f87901
JH
1168 else
1169 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1170
1171 if (lmp_le_capable(hdev))
42c6b129 1172 le_setup(req);
2177bab5 1173
42c6b129 1174 hci_setup_event_mask(req);
2177bab5 1175
3f8e2d75
JH
1176 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1177 * local supported commands HCI command.
1178 */
1179 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1180 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1181
1182 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1183 /* When SSP is available, then the host features page
1184 * should also be available as well. However some
1185 * controllers list the max_page as 0 as long as SSP
1186 * has not been enabled. To achieve proper debugging
1187 * output, force the minimum max_page to 1 at least.
1188 */
1189 hdev->max_page = 0x01;
1190
2177bab5
JH
1191 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1192 u8 mode = 0x01;
42c6b129
JH
1193 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1194 sizeof(mode), &mode);
2177bab5
JH
1195 } else {
1196 struct hci_cp_write_eir cp;
1197
1198 memset(hdev->eir, 0, sizeof(hdev->eir));
1199 memset(&cp, 0, sizeof(cp));
1200
42c6b129 1201 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1202 }
1203 }
1204
1205 if (lmp_inq_rssi_capable(hdev))
42c6b129 1206 hci_setup_inquiry_mode(req);
2177bab5
JH
1207
1208 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1209 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1210
1211 if (lmp_ext_feat_capable(hdev)) {
1212 struct hci_cp_read_local_ext_features cp;
1213
1214 cp.page = 0x01;
42c6b129
JH
1215 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1216 sizeof(cp), &cp);
2177bab5
JH
1217 }
1218
1219 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1220 u8 enable = 1;
42c6b129
JH
1221 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1222 &enable);
2177bab5
JH
1223 }
1224}
1225
42c6b129 1226static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1227{
42c6b129 1228 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1229 struct hci_cp_write_def_link_policy cp;
1230 u16 link_policy = 0;
1231
1232 if (lmp_rswitch_capable(hdev))
1233 link_policy |= HCI_LP_RSWITCH;
1234 if (lmp_hold_capable(hdev))
1235 link_policy |= HCI_LP_HOLD;
1236 if (lmp_sniff_capable(hdev))
1237 link_policy |= HCI_LP_SNIFF;
1238 if (lmp_park_capable(hdev))
1239 link_policy |= HCI_LP_PARK;
1240
1241 cp.policy = cpu_to_le16(link_policy);
42c6b129 1242 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1243}
1244
42c6b129 1245static void hci_set_le_support(struct hci_request *req)
2177bab5 1246{
42c6b129 1247 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1248 struct hci_cp_write_le_host_supported cp;
1249
c73eee91
JH
1250 /* LE-only devices do not support explicit enablement */
1251 if (!lmp_bredr_capable(hdev))
1252 return;
1253
2177bab5
JH
1254 memset(&cp, 0, sizeof(cp));
1255
1256 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1257 cp.le = 0x01;
1258 cp.simul = lmp_le_br_capable(hdev);
1259 }
1260
1261 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1262 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1263 &cp);
2177bab5
JH
1264}
1265
d62e6d67
JH
1266static void hci_set_event_mask_page_2(struct hci_request *req)
1267{
1268 struct hci_dev *hdev = req->hdev;
1269 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1270
1271 /* If Connectionless Slave Broadcast master role is supported
1272 * enable all necessary events for it.
1273 */
53b834d2 1274 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1275 events[1] |= 0x40; /* Triggered Clock Capture */
1276 events[1] |= 0x80; /* Synchronization Train Complete */
1277 events[2] |= 0x10; /* Slave Page Response Timeout */
1278 events[2] |= 0x20; /* CSB Channel Map Change */
1279 }
1280
1281 /* If Connectionless Slave Broadcast slave role is supported
1282 * enable all necessary events for it.
1283 */
53b834d2 1284 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1285 events[2] |= 0x01; /* Synchronization Train Received */
1286 events[2] |= 0x02; /* CSB Receive */
1287 events[2] |= 0x04; /* CSB Timeout */
1288 events[2] |= 0x08; /* Truncated Page Complete */
1289 }
1290
40c59fcb
MH
1291 /* Enable Authenticated Payload Timeout Expired event if supported */
1292 if (lmp_ping_capable(hdev))
1293 events[2] |= 0x80;
1294
d62e6d67
JH
1295 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1296}
1297
42c6b129 1298static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1299{
42c6b129 1300 struct hci_dev *hdev = req->hdev;
d2c5d77f 1301 u8 p;
42c6b129 1302
b8f4e068
GP
1303 /* Some Broadcom based Bluetooth controllers do not support the
1304 * Delete Stored Link Key command. They are clearly indicating its
1305 * absence in the bit mask of supported commands.
1306 *
1307 * Check the supported commands and only if the the command is marked
1308 * as supported send it. If not supported assume that the controller
1309 * does not have actual support for stored link keys which makes this
1310 * command redundant anyway.
f9f462fa
MH
1311 *
1312 * Some controllers indicate that they support handling deleting
1313 * stored link keys, but they don't. The quirk lets a driver
1314 * just disable this command.
637b4cae 1315 */
f9f462fa
MH
1316 if (hdev->commands[6] & 0x80 &&
1317 !test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) {
59f45d57
JH
1318 struct hci_cp_delete_stored_link_key cp;
1319
1320 bacpy(&cp.bdaddr, BDADDR_ANY);
1321 cp.delete_all = 0x01;
1322 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1323 sizeof(cp), &cp);
1324 }
1325
2177bab5 1326 if (hdev->commands[5] & 0x10)
42c6b129 1327 hci_setup_link_policy(req);
2177bab5 1328
79830f66 1329 if (lmp_le_capable(hdev)) {
bef34c0a
MH
1330 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1331 /* If the controller has a public BD_ADDR, then
1332 * by default use that one. If this is a LE only
1333 * controller without a public address, default
1334 * to the random address.
1335 */
1336 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1337 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1338 else
1339 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1340 }
79830f66 1341
42c6b129 1342 hci_set_le_support(req);
79830f66 1343 }
d2c5d77f
JH
1344
1345 /* Read features beyond page 1 if available */
1346 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1347 struct hci_cp_read_local_ext_features cp;
1348
1349 cp.page = p;
1350 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1351 sizeof(cp), &cp);
1352 }
2177bab5
JH
1353}
1354
5d4e7e8d
JH
1355static void hci_init4_req(struct hci_request *req, unsigned long opt)
1356{
1357 struct hci_dev *hdev = req->hdev;
1358
d62e6d67
JH
1359 /* Set event mask page 2 if the HCI command for it is supported */
1360 if (hdev->commands[22] & 0x04)
1361 hci_set_event_mask_page_2(req);
1362
5d4e7e8d 1363 /* Check for Synchronization Train support */
53b834d2 1364 if (lmp_sync_train_capable(hdev))
5d4e7e8d
JH
1365 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1366}
1367
2177bab5
JH
1368static int __hci_init(struct hci_dev *hdev)
1369{
1370 int err;
1371
1372 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1373 if (err < 0)
1374 return err;
1375
4b4148e9
MH
1376 /* The Device Under Test (DUT) mode is special and available for
1377 * all controller types. So just create it early on.
1378 */
1379 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1380 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1381 &dut_mode_fops);
1382 }
1383
2177bab5
JH
1384 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1385 * BR/EDR/LE type controllers. AMP controllers only need the
1386 * first stage init.
1387 */
1388 if (hdev->dev_type != HCI_BREDR)
1389 return 0;
1390
1391 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1392 if (err < 0)
1393 return err;
1394
5d4e7e8d
JH
1395 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1396 if (err < 0)
1397 return err;
1398
baf27f6e
MH
1399 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1400 if (err < 0)
1401 return err;
1402
1403 /* Only create debugfs entries during the initial setup
1404 * phase and not every time the controller gets powered on.
1405 */
1406 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1407 return 0;
1408
dfb826a8
MH
1409 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1410 &features_fops);
ceeb3bc0
MH
1411 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1412 &hdev->manufacturer);
1413 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1414 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1415 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1416 &blacklist_fops);
47219839
MH
1417 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1418
baf27f6e
MH
1419 if (lmp_bredr_capable(hdev)) {
1420 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1421 hdev, &inquiry_cache_fops);
02d08d15
MH
1422 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1423 hdev, &link_keys_fops);
12c269d7
MH
1424 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1425 hdev, &use_debug_keys_fops);
babdbb3c
MH
1426 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1427 hdev, &dev_class_fops);
041000b9
MH
1428 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1429 hdev, &voice_setting_fops);
baf27f6e
MH
1430 }
1431
06f5b778 1432 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1433 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1434 hdev, &auto_accept_delay_fops);
06f5b778
MH
1435 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1436 hdev, &ssp_debug_mode_fops);
1437 }
ebd1e33b 1438
2bfa3531
MH
1439 if (lmp_sniff_capable(hdev)) {
1440 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1441 hdev, &idle_timeout_fops);
1442 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1443 hdev, &sniff_min_interval_fops);
1444 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1445 hdev, &sniff_max_interval_fops);
1446 }
1447
d0f729b8
MH
1448 if (lmp_le_capable(hdev)) {
1449 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1450 &hdev->le_white_list_size);
e7b8fc92
MH
1451 debugfs_create_file("static_address", 0444, hdev->debugfs,
1452 hdev, &static_address_fops);
92202185
MH
1453 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1454 hdev, &own_address_type_fops);
8f8625cd
MH
1455 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1456 hdev, &long_term_keys_fops);
4e70c7e7
MH
1457 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1458 hdev, &conn_min_interval_fops);
1459 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1460 hdev, &conn_max_interval_fops);
89863109
JR
1461 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1462 &lowpan_debugfs_fops);
d0f729b8 1463 }
e7b8fc92 1464
baf27f6e 1465 return 0;
2177bab5
JH
1466}
1467
42c6b129 1468static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1469{
1470 __u8 scan = opt;
1471
42c6b129 1472 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1473
1474 /* Inquiry and Page scans */
42c6b129 1475 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1476}
1477
42c6b129 1478static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1479{
1480 __u8 auth = opt;
1481
42c6b129 1482 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1483
1484 /* Authentication */
42c6b129 1485 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1486}
1487
42c6b129 1488static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1489{
1490 __u8 encrypt = opt;
1491
42c6b129 1492 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1493
e4e8e37c 1494 /* Encryption */
42c6b129 1495 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1496}
1497
42c6b129 1498static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1499{
1500 __le16 policy = cpu_to_le16(opt);
1501
42c6b129 1502 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1503
1504 /* Default link policy */
42c6b129 1505 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1506}
1507
8e87d142 1508/* Get HCI device by index.
1da177e4
LT
1509 * Device is held on return. */
1510struct hci_dev *hci_dev_get(int index)
1511{
8035ded4 1512 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1513
1514 BT_DBG("%d", index);
1515
1516 if (index < 0)
1517 return NULL;
1518
1519 read_lock(&hci_dev_list_lock);
8035ded4 1520 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1521 if (d->id == index) {
1522 hdev = hci_dev_hold(d);
1523 break;
1524 }
1525 }
1526 read_unlock(&hci_dev_list_lock);
1527 return hdev;
1528}
1da177e4
LT
1529
1530/* ---- Inquiry support ---- */
ff9ef578 1531
30dc78e1
JH
1532bool hci_discovery_active(struct hci_dev *hdev)
1533{
1534 struct discovery_state *discov = &hdev->discovery;
1535
6fbe195d 1536 switch (discov->state) {
343f935b 1537 case DISCOVERY_FINDING:
6fbe195d 1538 case DISCOVERY_RESOLVING:
30dc78e1
JH
1539 return true;
1540
6fbe195d
AG
1541 default:
1542 return false;
1543 }
30dc78e1
JH
1544}
1545
ff9ef578
JH
1546void hci_discovery_set_state(struct hci_dev *hdev, int state)
1547{
1548 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1549
1550 if (hdev->discovery.state == state)
1551 return;
1552
1553 switch (state) {
1554 case DISCOVERY_STOPPED:
7b99b659
AG
1555 if (hdev->discovery.state != DISCOVERY_STARTING)
1556 mgmt_discovering(hdev, 0);
ff9ef578
JH
1557 break;
1558 case DISCOVERY_STARTING:
1559 break;
343f935b 1560 case DISCOVERY_FINDING:
ff9ef578
JH
1561 mgmt_discovering(hdev, 1);
1562 break;
30dc78e1
JH
1563 case DISCOVERY_RESOLVING:
1564 break;
ff9ef578
JH
1565 case DISCOVERY_STOPPING:
1566 break;
1567 }
1568
1569 hdev->discovery.state = state;
1570}
1571
1f9b9a5d 1572void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1573{
30883512 1574 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1575 struct inquiry_entry *p, *n;
1da177e4 1576
561aafbc
JH
1577 list_for_each_entry_safe(p, n, &cache->all, all) {
1578 list_del(&p->all);
b57c1a56 1579 kfree(p);
1da177e4 1580 }
561aafbc
JH
1581
1582 INIT_LIST_HEAD(&cache->unknown);
1583 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1584}
1585
a8c5fb1a
GP
1586struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1587 bdaddr_t *bdaddr)
1da177e4 1588{
30883512 1589 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1590 struct inquiry_entry *e;
1591
6ed93dc6 1592 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1593
561aafbc
JH
1594 list_for_each_entry(e, &cache->all, all) {
1595 if (!bacmp(&e->data.bdaddr, bdaddr))
1596 return e;
1597 }
1598
1599 return NULL;
1600}
1601
1602struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1603 bdaddr_t *bdaddr)
561aafbc 1604{
30883512 1605 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1606 struct inquiry_entry *e;
1607
6ed93dc6 1608 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1609
1610 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1611 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1612 return e;
1613 }
1614
1615 return NULL;
1da177e4
LT
1616}
1617
30dc78e1 1618struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1619 bdaddr_t *bdaddr,
1620 int state)
30dc78e1
JH
1621{
1622 struct discovery_state *cache = &hdev->discovery;
1623 struct inquiry_entry *e;
1624
6ed93dc6 1625 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1626
1627 list_for_each_entry(e, &cache->resolve, list) {
1628 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1629 return e;
1630 if (!bacmp(&e->data.bdaddr, bdaddr))
1631 return e;
1632 }
1633
1634 return NULL;
1635}
1636
a3d4e20a 1637void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1638 struct inquiry_entry *ie)
a3d4e20a
JH
1639{
1640 struct discovery_state *cache = &hdev->discovery;
1641 struct list_head *pos = &cache->resolve;
1642 struct inquiry_entry *p;
1643
1644 list_del(&ie->list);
1645
1646 list_for_each_entry(p, &cache->resolve, list) {
1647 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1648 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1649 break;
1650 pos = &p->list;
1651 }
1652
1653 list_add(&ie->list, pos);
1654}
1655
3175405b 1656bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1657 bool name_known, bool *ssp)
1da177e4 1658{
30883512 1659 struct discovery_state *cache = &hdev->discovery;
70f23020 1660 struct inquiry_entry *ie;
1da177e4 1661
6ed93dc6 1662 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1663
2b2fec4d
SJ
1664 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1665
388fc8fa
JH
1666 if (ssp)
1667 *ssp = data->ssp_mode;
1668
70f23020 1669 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1670 if (ie) {
388fc8fa
JH
1671 if (ie->data.ssp_mode && ssp)
1672 *ssp = true;
1673
a3d4e20a 1674 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1675 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1676 ie->data.rssi = data->rssi;
1677 hci_inquiry_cache_update_resolve(hdev, ie);
1678 }
1679
561aafbc 1680 goto update;
a3d4e20a 1681 }
561aafbc
JH
1682
1683 /* Entry not in the cache. Add new one. */
1684 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1685 if (!ie)
3175405b 1686 return false;
561aafbc
JH
1687
1688 list_add(&ie->all, &cache->all);
1689
1690 if (name_known) {
1691 ie->name_state = NAME_KNOWN;
1692 } else {
1693 ie->name_state = NAME_NOT_KNOWN;
1694 list_add(&ie->list, &cache->unknown);
1695 }
70f23020 1696
561aafbc
JH
1697update:
1698 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1699 ie->name_state != NAME_PENDING) {
561aafbc
JH
1700 ie->name_state = NAME_KNOWN;
1701 list_del(&ie->list);
1da177e4
LT
1702 }
1703
70f23020
AE
1704 memcpy(&ie->data, data, sizeof(*data));
1705 ie->timestamp = jiffies;
1da177e4 1706 cache->timestamp = jiffies;
3175405b
JH
1707
1708 if (ie->name_state == NAME_NOT_KNOWN)
1709 return false;
1710
1711 return true;
1da177e4
LT
1712}
1713
1714static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1715{
30883512 1716 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1717 struct inquiry_info *info = (struct inquiry_info *) buf;
1718 struct inquiry_entry *e;
1719 int copied = 0;
1720
561aafbc 1721 list_for_each_entry(e, &cache->all, all) {
1da177e4 1722 struct inquiry_data *data = &e->data;
b57c1a56
JH
1723
1724 if (copied >= num)
1725 break;
1726
1da177e4
LT
1727 bacpy(&info->bdaddr, &data->bdaddr);
1728 info->pscan_rep_mode = data->pscan_rep_mode;
1729 info->pscan_period_mode = data->pscan_period_mode;
1730 info->pscan_mode = data->pscan_mode;
1731 memcpy(info->dev_class, data->dev_class, 3);
1732 info->clock_offset = data->clock_offset;
b57c1a56 1733
1da177e4 1734 info++;
b57c1a56 1735 copied++;
1da177e4
LT
1736 }
1737
1738 BT_DBG("cache %p, copied %d", cache, copied);
1739 return copied;
1740}
1741
42c6b129 1742static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1743{
1744 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1745 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1746 struct hci_cp_inquiry cp;
1747
1748 BT_DBG("%s", hdev->name);
1749
1750 if (test_bit(HCI_INQUIRY, &hdev->flags))
1751 return;
1752
1753 /* Start Inquiry */
1754 memcpy(&cp.lap, &ir->lap, 3);
1755 cp.length = ir->length;
1756 cp.num_rsp = ir->num_rsp;
42c6b129 1757 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1758}
1759
3e13fa1e
AG
1760static int wait_inquiry(void *word)
1761{
1762 schedule();
1763 return signal_pending(current);
1764}
1765
1da177e4
LT
1766int hci_inquiry(void __user *arg)
1767{
1768 __u8 __user *ptr = arg;
1769 struct hci_inquiry_req ir;
1770 struct hci_dev *hdev;
1771 int err = 0, do_inquiry = 0, max_rsp;
1772 long timeo;
1773 __u8 *buf;
1774
1775 if (copy_from_user(&ir, ptr, sizeof(ir)))
1776 return -EFAULT;
1777
5a08ecce
AE
1778 hdev = hci_dev_get(ir.dev_id);
1779 if (!hdev)
1da177e4
LT
1780 return -ENODEV;
1781
0736cfa8
MH
1782 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1783 err = -EBUSY;
1784 goto done;
1785 }
1786
5b69bef5
MH
1787 if (hdev->dev_type != HCI_BREDR) {
1788 err = -EOPNOTSUPP;
1789 goto done;
1790 }
1791
56f87901
JH
1792 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1793 err = -EOPNOTSUPP;
1794 goto done;
1795 }
1796
09fd0de5 1797 hci_dev_lock(hdev);
8e87d142 1798 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1799 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1800 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1801 do_inquiry = 1;
1802 }
09fd0de5 1803 hci_dev_unlock(hdev);
1da177e4 1804
04837f64 1805 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1806
1807 if (do_inquiry) {
01178cd4
JH
1808 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1809 timeo);
70f23020
AE
1810 if (err < 0)
1811 goto done;
3e13fa1e
AG
1812
1813 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1814 * cleared). If it is interrupted by a signal, return -EINTR.
1815 */
1816 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1817 TASK_INTERRUPTIBLE))
1818 return -EINTR;
70f23020 1819 }
1da177e4 1820
8fc9ced3
GP
1821 /* for unlimited number of responses we will use buffer with
1822 * 255 entries
1823 */
1da177e4
LT
1824 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1825
1826 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1827 * copy it to the user space.
1828 */
01df8c31 1829 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1830 if (!buf) {
1da177e4
LT
1831 err = -ENOMEM;
1832 goto done;
1833 }
1834
09fd0de5 1835 hci_dev_lock(hdev);
1da177e4 1836 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1837 hci_dev_unlock(hdev);
1da177e4
LT
1838
1839 BT_DBG("num_rsp %d", ir.num_rsp);
1840
1841 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1842 ptr += sizeof(ir);
1843 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1844 ir.num_rsp))
1da177e4 1845 err = -EFAULT;
8e87d142 1846 } else
1da177e4
LT
1847 err = -EFAULT;
1848
1849 kfree(buf);
1850
1851done:
1852 hci_dev_put(hdev);
1853 return err;
1854}
1855
cbed0ca1 1856static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1857{
1da177e4
LT
1858 int ret = 0;
1859
1da177e4
LT
1860 BT_DBG("%s %p", hdev->name, hdev);
1861
1862 hci_req_lock(hdev);
1863
94324962
JH
1864 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1865 ret = -ENODEV;
1866 goto done;
1867 }
1868
a5c8f270
MH
1869 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1870 /* Check for rfkill but allow the HCI setup stage to
1871 * proceed (which in itself doesn't cause any RF activity).
1872 */
1873 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1874 ret = -ERFKILL;
1875 goto done;
1876 }
1877
1878 /* Check for valid public address or a configured static
1879 * random adddress, but let the HCI setup proceed to
1880 * be able to determine if there is a public address
1881 * or not.
1882 *
1883 * This check is only valid for BR/EDR controllers
1884 * since AMP controllers do not have an address.
1885 */
1886 if (hdev->dev_type == HCI_BREDR &&
1887 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1888 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1889 ret = -EADDRNOTAVAIL;
1890 goto done;
1891 }
611b30f7
MH
1892 }
1893
1da177e4
LT
1894 if (test_bit(HCI_UP, &hdev->flags)) {
1895 ret = -EALREADY;
1896 goto done;
1897 }
1898
1da177e4
LT
1899 if (hdev->open(hdev)) {
1900 ret = -EIO;
1901 goto done;
1902 }
1903
f41c70c4
MH
1904 atomic_set(&hdev->cmd_cnt, 1);
1905 set_bit(HCI_INIT, &hdev->flags);
1906
1907 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1908 ret = hdev->setup(hdev);
1909
1910 if (!ret) {
f41c70c4
MH
1911 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1912 set_bit(HCI_RAW, &hdev->flags);
1913
0736cfa8
MH
1914 if (!test_bit(HCI_RAW, &hdev->flags) &&
1915 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1916 ret = __hci_init(hdev);
1da177e4
LT
1917 }
1918
f41c70c4
MH
1919 clear_bit(HCI_INIT, &hdev->flags);
1920
1da177e4
LT
1921 if (!ret) {
1922 hci_dev_hold(hdev);
1923 set_bit(HCI_UP, &hdev->flags);
1924 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1925 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1926 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1927 hdev->dev_type == HCI_BREDR) {
09fd0de5 1928 hci_dev_lock(hdev);
744cf19e 1929 mgmt_powered(hdev, 1);
09fd0de5 1930 hci_dev_unlock(hdev);
56e5cb86 1931 }
8e87d142 1932 } else {
1da177e4 1933 /* Init failed, cleanup */
3eff45ea 1934 flush_work(&hdev->tx_work);
c347b765 1935 flush_work(&hdev->cmd_work);
b78752cc 1936 flush_work(&hdev->rx_work);
1da177e4
LT
1937
1938 skb_queue_purge(&hdev->cmd_q);
1939 skb_queue_purge(&hdev->rx_q);
1940
1941 if (hdev->flush)
1942 hdev->flush(hdev);
1943
1944 if (hdev->sent_cmd) {
1945 kfree_skb(hdev->sent_cmd);
1946 hdev->sent_cmd = NULL;
1947 }
1948
1949 hdev->close(hdev);
1950 hdev->flags = 0;
1951 }
1952
1953done:
1954 hci_req_unlock(hdev);
1da177e4
LT
1955 return ret;
1956}
1957
cbed0ca1
JH
1958/* ---- HCI ioctl helpers ---- */
1959
1960int hci_dev_open(__u16 dev)
1961{
1962 struct hci_dev *hdev;
1963 int err;
1964
1965 hdev = hci_dev_get(dev);
1966 if (!hdev)
1967 return -ENODEV;
1968
e1d08f40
JH
1969 /* We need to ensure that no other power on/off work is pending
1970 * before proceeding to call hci_dev_do_open. This is
1971 * particularly important if the setup procedure has not yet
1972 * completed.
1973 */
1974 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1975 cancel_delayed_work(&hdev->power_off);
1976
a5c8f270
MH
1977 /* After this call it is guaranteed that the setup procedure
1978 * has finished. This means that error conditions like RFKILL
1979 * or no valid public or static random address apply.
1980 */
e1d08f40
JH
1981 flush_workqueue(hdev->req_workqueue);
1982
cbed0ca1
JH
1983 err = hci_dev_do_open(hdev);
1984
1985 hci_dev_put(hdev);
1986
1987 return err;
1988}
1989
1da177e4
LT
1990static int hci_dev_do_close(struct hci_dev *hdev)
1991{
1992 BT_DBG("%s %p", hdev->name, hdev);
1993
78c04c0b
VCG
1994 cancel_delayed_work(&hdev->power_off);
1995
1da177e4
LT
1996 hci_req_cancel(hdev, ENODEV);
1997 hci_req_lock(hdev);
1998
1999 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 2000 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2001 hci_req_unlock(hdev);
2002 return 0;
2003 }
2004
3eff45ea
GP
2005 /* Flush RX and TX works */
2006 flush_work(&hdev->tx_work);
b78752cc 2007 flush_work(&hdev->rx_work);
1da177e4 2008
16ab91ab 2009 if (hdev->discov_timeout > 0) {
e0f9309f 2010 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2011 hdev->discov_timeout = 0;
5e5282bb 2012 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2013 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2014 }
2015
a8b2d5c2 2016 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2017 cancel_delayed_work(&hdev->service_cache);
2018
7ba8b4be
AG
2019 cancel_delayed_work_sync(&hdev->le_scan_disable);
2020
09fd0de5 2021 hci_dev_lock(hdev);
1f9b9a5d 2022 hci_inquiry_cache_flush(hdev);
1da177e4 2023 hci_conn_hash_flush(hdev);
09fd0de5 2024 hci_dev_unlock(hdev);
1da177e4
LT
2025
2026 hci_notify(hdev, HCI_DEV_DOWN);
2027
2028 if (hdev->flush)
2029 hdev->flush(hdev);
2030
2031 /* Reset device */
2032 skb_queue_purge(&hdev->cmd_q);
2033 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2034 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2035 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2036 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2037 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2038 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2039 clear_bit(HCI_INIT, &hdev->flags);
2040 }
2041
c347b765
GP
2042 /* flush cmd work */
2043 flush_work(&hdev->cmd_work);
1da177e4
LT
2044
2045 /* Drop queues */
2046 skb_queue_purge(&hdev->rx_q);
2047 skb_queue_purge(&hdev->cmd_q);
2048 skb_queue_purge(&hdev->raw_q);
2049
2050 /* Drop last sent command */
2051 if (hdev->sent_cmd) {
b79f44c1 2052 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2053 kfree_skb(hdev->sent_cmd);
2054 hdev->sent_cmd = NULL;
2055 }
2056
b6ddb638
JH
2057 kfree_skb(hdev->recv_evt);
2058 hdev->recv_evt = NULL;
2059
1da177e4
LT
2060 /* After this point our queues are empty
2061 * and no tasks are scheduled. */
2062 hdev->close(hdev);
2063
35b973c9
JH
2064 /* Clear flags */
2065 hdev->flags = 0;
2066 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2067
93c311a0
MH
2068 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2069 if (hdev->dev_type == HCI_BREDR) {
2070 hci_dev_lock(hdev);
2071 mgmt_powered(hdev, 0);
2072 hci_dev_unlock(hdev);
2073 }
8ee56540 2074 }
5add6af8 2075
ced5c338 2076 /* Controller radio is available but is currently powered down */
536619e8 2077 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2078
e59fda8d 2079 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2080 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2081
1da177e4
LT
2082 hci_req_unlock(hdev);
2083
2084 hci_dev_put(hdev);
2085 return 0;
2086}
2087
2088int hci_dev_close(__u16 dev)
2089{
2090 struct hci_dev *hdev;
2091 int err;
2092
70f23020
AE
2093 hdev = hci_dev_get(dev);
2094 if (!hdev)
1da177e4 2095 return -ENODEV;
8ee56540 2096
0736cfa8
MH
2097 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2098 err = -EBUSY;
2099 goto done;
2100 }
2101
8ee56540
MH
2102 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2103 cancel_delayed_work(&hdev->power_off);
2104
1da177e4 2105 err = hci_dev_do_close(hdev);
8ee56540 2106
0736cfa8 2107done:
1da177e4
LT
2108 hci_dev_put(hdev);
2109 return err;
2110}
2111
2112int hci_dev_reset(__u16 dev)
2113{
2114 struct hci_dev *hdev;
2115 int ret = 0;
2116
70f23020
AE
2117 hdev = hci_dev_get(dev);
2118 if (!hdev)
1da177e4
LT
2119 return -ENODEV;
2120
2121 hci_req_lock(hdev);
1da177e4 2122
808a049e
MH
2123 if (!test_bit(HCI_UP, &hdev->flags)) {
2124 ret = -ENETDOWN;
1da177e4 2125 goto done;
808a049e 2126 }
1da177e4 2127
0736cfa8
MH
2128 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2129 ret = -EBUSY;
2130 goto done;
2131 }
2132
1da177e4
LT
2133 /* Drop queues */
2134 skb_queue_purge(&hdev->rx_q);
2135 skb_queue_purge(&hdev->cmd_q);
2136
09fd0de5 2137 hci_dev_lock(hdev);
1f9b9a5d 2138 hci_inquiry_cache_flush(hdev);
1da177e4 2139 hci_conn_hash_flush(hdev);
09fd0de5 2140 hci_dev_unlock(hdev);
1da177e4
LT
2141
2142 if (hdev->flush)
2143 hdev->flush(hdev);
2144
8e87d142 2145 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2146 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2147
2148 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2149 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2150
2151done:
1da177e4
LT
2152 hci_req_unlock(hdev);
2153 hci_dev_put(hdev);
2154 return ret;
2155}
2156
2157int hci_dev_reset_stat(__u16 dev)
2158{
2159 struct hci_dev *hdev;
2160 int ret = 0;
2161
70f23020
AE
2162 hdev = hci_dev_get(dev);
2163 if (!hdev)
1da177e4
LT
2164 return -ENODEV;
2165
0736cfa8
MH
2166 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2167 ret = -EBUSY;
2168 goto done;
2169 }
2170
1da177e4
LT
2171 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2172
0736cfa8 2173done:
1da177e4 2174 hci_dev_put(hdev);
1da177e4
LT
2175 return ret;
2176}
2177
2178int hci_dev_cmd(unsigned int cmd, void __user *arg)
2179{
2180 struct hci_dev *hdev;
2181 struct hci_dev_req dr;
2182 int err = 0;
2183
2184 if (copy_from_user(&dr, arg, sizeof(dr)))
2185 return -EFAULT;
2186
70f23020
AE
2187 hdev = hci_dev_get(dr.dev_id);
2188 if (!hdev)
1da177e4
LT
2189 return -ENODEV;
2190
0736cfa8
MH
2191 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2192 err = -EBUSY;
2193 goto done;
2194 }
2195
5b69bef5
MH
2196 if (hdev->dev_type != HCI_BREDR) {
2197 err = -EOPNOTSUPP;
2198 goto done;
2199 }
2200
56f87901
JH
2201 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2202 err = -EOPNOTSUPP;
2203 goto done;
2204 }
2205
1da177e4
LT
2206 switch (cmd) {
2207 case HCISETAUTH:
01178cd4
JH
2208 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2209 HCI_INIT_TIMEOUT);
1da177e4
LT
2210 break;
2211
2212 case HCISETENCRYPT:
2213 if (!lmp_encrypt_capable(hdev)) {
2214 err = -EOPNOTSUPP;
2215 break;
2216 }
2217
2218 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2219 /* Auth must be enabled first */
01178cd4
JH
2220 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2221 HCI_INIT_TIMEOUT);
1da177e4
LT
2222 if (err)
2223 break;
2224 }
2225
01178cd4
JH
2226 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2227 HCI_INIT_TIMEOUT);
1da177e4
LT
2228 break;
2229
2230 case HCISETSCAN:
01178cd4
JH
2231 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2232 HCI_INIT_TIMEOUT);
1da177e4
LT
2233 break;
2234
1da177e4 2235 case HCISETLINKPOL:
01178cd4
JH
2236 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2237 HCI_INIT_TIMEOUT);
1da177e4
LT
2238 break;
2239
2240 case HCISETLINKMODE:
e4e8e37c
MH
2241 hdev->link_mode = ((__u16) dr.dev_opt) &
2242 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2243 break;
2244
2245 case HCISETPTYPE:
2246 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2247 break;
2248
2249 case HCISETACLMTU:
e4e8e37c
MH
2250 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2251 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2252 break;
2253
2254 case HCISETSCOMTU:
e4e8e37c
MH
2255 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2256 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2257 break;
2258
2259 default:
2260 err = -EINVAL;
2261 break;
2262 }
e4e8e37c 2263
0736cfa8 2264done:
1da177e4
LT
2265 hci_dev_put(hdev);
2266 return err;
2267}
2268
2269int hci_get_dev_list(void __user *arg)
2270{
8035ded4 2271 struct hci_dev *hdev;
1da177e4
LT
2272 struct hci_dev_list_req *dl;
2273 struct hci_dev_req *dr;
1da177e4
LT
2274 int n = 0, size, err;
2275 __u16 dev_num;
2276
2277 if (get_user(dev_num, (__u16 __user *) arg))
2278 return -EFAULT;
2279
2280 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2281 return -EINVAL;
2282
2283 size = sizeof(*dl) + dev_num * sizeof(*dr);
2284
70f23020
AE
2285 dl = kzalloc(size, GFP_KERNEL);
2286 if (!dl)
1da177e4
LT
2287 return -ENOMEM;
2288
2289 dr = dl->dev_req;
2290
f20d09d5 2291 read_lock(&hci_dev_list_lock);
8035ded4 2292 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2293 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2294 cancel_delayed_work(&hdev->power_off);
c542a06c 2295
a8b2d5c2
JH
2296 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2297 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2298
1da177e4
LT
2299 (dr + n)->dev_id = hdev->id;
2300 (dr + n)->dev_opt = hdev->flags;
c542a06c 2301
1da177e4
LT
2302 if (++n >= dev_num)
2303 break;
2304 }
f20d09d5 2305 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2306
2307 dl->dev_num = n;
2308 size = sizeof(*dl) + n * sizeof(*dr);
2309
2310 err = copy_to_user(arg, dl, size);
2311 kfree(dl);
2312
2313 return err ? -EFAULT : 0;
2314}
2315
2316int hci_get_dev_info(void __user *arg)
2317{
2318 struct hci_dev *hdev;
2319 struct hci_dev_info di;
2320 int err = 0;
2321
2322 if (copy_from_user(&di, arg, sizeof(di)))
2323 return -EFAULT;
2324
70f23020
AE
2325 hdev = hci_dev_get(di.dev_id);
2326 if (!hdev)
1da177e4
LT
2327 return -ENODEV;
2328
a8b2d5c2 2329 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2330 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2331
a8b2d5c2
JH
2332 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2333 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2334
1da177e4
LT
2335 strcpy(di.name, hdev->name);
2336 di.bdaddr = hdev->bdaddr;
60f2a3ed 2337 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2338 di.flags = hdev->flags;
2339 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2340 if (lmp_bredr_capable(hdev)) {
2341 di.acl_mtu = hdev->acl_mtu;
2342 di.acl_pkts = hdev->acl_pkts;
2343 di.sco_mtu = hdev->sco_mtu;
2344 di.sco_pkts = hdev->sco_pkts;
2345 } else {
2346 di.acl_mtu = hdev->le_mtu;
2347 di.acl_pkts = hdev->le_pkts;
2348 di.sco_mtu = 0;
2349 di.sco_pkts = 0;
2350 }
1da177e4
LT
2351 di.link_policy = hdev->link_policy;
2352 di.link_mode = hdev->link_mode;
2353
2354 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2355 memcpy(&di.features, &hdev->features, sizeof(di.features));
2356
2357 if (copy_to_user(arg, &di, sizeof(di)))
2358 err = -EFAULT;
2359
2360 hci_dev_put(hdev);
2361
2362 return err;
2363}
2364
2365/* ---- Interface to HCI drivers ---- */
2366
611b30f7
MH
2367static int hci_rfkill_set_block(void *data, bool blocked)
2368{
2369 struct hci_dev *hdev = data;
2370
2371 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2372
0736cfa8
MH
2373 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2374 return -EBUSY;
2375
5e130367
JH
2376 if (blocked) {
2377 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2378 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2379 hci_dev_do_close(hdev);
5e130367
JH
2380 } else {
2381 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2382 }
611b30f7
MH
2383
2384 return 0;
2385}
2386
2387static const struct rfkill_ops hci_rfkill_ops = {
2388 .set_block = hci_rfkill_set_block,
2389};
2390
ab81cbf9
JH
2391static void hci_power_on(struct work_struct *work)
2392{
2393 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2394 int err;
ab81cbf9
JH
2395
2396 BT_DBG("%s", hdev->name);
2397
cbed0ca1 2398 err = hci_dev_do_open(hdev);
96570ffc
JH
2399 if (err < 0) {
2400 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2401 return;
96570ffc 2402 }
ab81cbf9 2403
a5c8f270
MH
2404 /* During the HCI setup phase, a few error conditions are
2405 * ignored and they need to be checked now. If they are still
2406 * valid, it is important to turn the device back off.
2407 */
2408 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2409 (hdev->dev_type == HCI_BREDR &&
2410 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2411 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2412 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2413 hci_dev_do_close(hdev);
2414 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2415 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2416 HCI_AUTO_OFF_TIMEOUT);
bf543036 2417 }
ab81cbf9 2418
a8b2d5c2 2419 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2420 mgmt_index_added(hdev);
ab81cbf9
JH
2421}
2422
2423static void hci_power_off(struct work_struct *work)
2424{
3243553f 2425 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2426 power_off.work);
ab81cbf9
JH
2427
2428 BT_DBG("%s", hdev->name);
2429
8ee56540 2430 hci_dev_do_close(hdev);
ab81cbf9
JH
2431}
2432
16ab91ab
JH
2433static void hci_discov_off(struct work_struct *work)
2434{
2435 struct hci_dev *hdev;
16ab91ab
JH
2436
2437 hdev = container_of(work, struct hci_dev, discov_off.work);
2438
2439 BT_DBG("%s", hdev->name);
2440
d1967ff8 2441 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2442}
2443
2aeb9a1a
JH
2444int hci_uuids_clear(struct hci_dev *hdev)
2445{
4821002c 2446 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2447
4821002c
JH
2448 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2449 list_del(&uuid->list);
2aeb9a1a
JH
2450 kfree(uuid);
2451 }
2452
2453 return 0;
2454}
2455
55ed8ca1
JH
2456int hci_link_keys_clear(struct hci_dev *hdev)
2457{
2458 struct list_head *p, *n;
2459
2460 list_for_each_safe(p, n, &hdev->link_keys) {
2461 struct link_key *key;
2462
2463 key = list_entry(p, struct link_key, list);
2464
2465 list_del(p);
2466 kfree(key);
2467 }
2468
2469 return 0;
2470}
2471
b899efaf
VCG
2472int hci_smp_ltks_clear(struct hci_dev *hdev)
2473{
2474 struct smp_ltk *k, *tmp;
2475
2476 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2477 list_del(&k->list);
2478 kfree(k);
2479 }
2480
2481 return 0;
2482}
2483
55ed8ca1
JH
2484struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2485{
8035ded4 2486 struct link_key *k;
55ed8ca1 2487
8035ded4 2488 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2489 if (bacmp(bdaddr, &k->bdaddr) == 0)
2490 return k;
55ed8ca1
JH
2491
2492 return NULL;
2493}
2494
745c0ce3 2495static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2496 u8 key_type, u8 old_key_type)
d25e28ab
JH
2497{
2498 /* Legacy key */
2499 if (key_type < 0x03)
745c0ce3 2500 return true;
d25e28ab
JH
2501
2502 /* Debug keys are insecure so don't store them persistently */
2503 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2504 return false;
d25e28ab
JH
2505
2506 /* Changed combination key and there's no previous one */
2507 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2508 return false;
d25e28ab
JH
2509
2510 /* Security mode 3 case */
2511 if (!conn)
745c0ce3 2512 return true;
d25e28ab
JH
2513
2514 /* Neither local nor remote side had no-bonding as requirement */
2515 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2516 return true;
d25e28ab
JH
2517
2518 /* Local side had dedicated bonding as requirement */
2519 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2520 return true;
d25e28ab
JH
2521
2522 /* Remote side had dedicated bonding as requirement */
2523 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2524 return true;
d25e28ab
JH
2525
2526 /* If none of the above criteria match, then don't store the key
2527 * persistently */
745c0ce3 2528 return false;
d25e28ab
JH
2529}
2530
c9839a11 2531struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 2532{
c9839a11 2533 struct smp_ltk *k;
75d262c2 2534
c9839a11
VCG
2535 list_for_each_entry(k, &hdev->long_term_keys, list) {
2536 if (k->ediv != ediv ||
a8c5fb1a 2537 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2538 continue;
2539
c9839a11 2540 return k;
75d262c2
VCG
2541 }
2542
2543 return NULL;
2544}
75d262c2 2545
c9839a11 2546struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 2547 u8 addr_type)
75d262c2 2548{
c9839a11 2549 struct smp_ltk *k;
75d262c2 2550
c9839a11
VCG
2551 list_for_each_entry(k, &hdev->long_term_keys, list)
2552 if (addr_type == k->bdaddr_type &&
a8c5fb1a 2553 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
2554 return k;
2555
2556 return NULL;
2557}
75d262c2 2558
d25e28ab 2559int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2560 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2561{
2562 struct link_key *key, *old_key;
745c0ce3
VA
2563 u8 old_key_type;
2564 bool persistent;
55ed8ca1
JH
2565
2566 old_key = hci_find_link_key(hdev, bdaddr);
2567 if (old_key) {
2568 old_key_type = old_key->type;
2569 key = old_key;
2570 } else {
12adcf3a 2571 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2572 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2573 if (!key)
2574 return -ENOMEM;
2575 list_add(&key->list, &hdev->link_keys);
2576 }
2577
6ed93dc6 2578 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2579
d25e28ab
JH
2580 /* Some buggy controller combinations generate a changed
2581 * combination key for legacy pairing even when there's no
2582 * previous key */
2583 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2584 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2585 type = HCI_LK_COMBINATION;
655fe6ec
JH
2586 if (conn)
2587 conn->key_type = type;
2588 }
d25e28ab 2589
55ed8ca1 2590 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2591 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2592 key->pin_len = pin_len;
2593
b6020ba0 2594 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2595 key->type = old_key_type;
4748fed2
JH
2596 else
2597 key->type = type;
2598
4df378a1
JH
2599 if (!new_key)
2600 return 0;
2601
2602 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2603
744cf19e 2604 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2605
6ec5bcad
VA
2606 if (conn)
2607 conn->flush_key = !persistent;
55ed8ca1
JH
2608
2609 return 0;
2610}
2611
c9839a11 2612int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2613 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2614 ediv, u8 rand[8])
75d262c2 2615{
c9839a11 2616 struct smp_ltk *key, *old_key;
75d262c2 2617
c9839a11
VCG
2618 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2619 return 0;
75d262c2 2620
c9839a11
VCG
2621 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2622 if (old_key)
75d262c2 2623 key = old_key;
c9839a11
VCG
2624 else {
2625 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2626 if (!key)
2627 return -ENOMEM;
c9839a11 2628 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2629 }
2630
75d262c2 2631 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2632 key->bdaddr_type = addr_type;
2633 memcpy(key->val, tk, sizeof(key->val));
2634 key->authenticated = authenticated;
2635 key->ediv = ediv;
2636 key->enc_size = enc_size;
2637 key->type = type;
2638 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2639
c9839a11
VCG
2640 if (!new_key)
2641 return 0;
75d262c2 2642
261cc5aa
VCG
2643 if (type & HCI_SMP_LTK)
2644 mgmt_new_ltk(hdev, key, 1);
2645
75d262c2
VCG
2646 return 0;
2647}
2648
55ed8ca1
JH
2649int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2650{
2651 struct link_key *key;
2652
2653 key = hci_find_link_key(hdev, bdaddr);
2654 if (!key)
2655 return -ENOENT;
2656
6ed93dc6 2657 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2658
2659 list_del(&key->list);
2660 kfree(key);
2661
2662 return 0;
2663}
2664
b899efaf
VCG
2665int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2666{
2667 struct smp_ltk *k, *tmp;
2668
2669 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2670 if (bacmp(bdaddr, &k->bdaddr))
2671 continue;
2672
6ed93dc6 2673 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2674
2675 list_del(&k->list);
2676 kfree(k);
2677 }
2678
2679 return 0;
2680}
2681
6bd32326 2682/* HCI command timer function */
bda4f23a 2683static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2684{
2685 struct hci_dev *hdev = (void *) arg;
2686
bda4f23a
AE
2687 if (hdev->sent_cmd) {
2688 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2689 u16 opcode = __le16_to_cpu(sent->opcode);
2690
2691 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2692 } else {
2693 BT_ERR("%s command tx timeout", hdev->name);
2694 }
2695
6bd32326 2696 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2697 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2698}
2699
2763eda6 2700struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2701 bdaddr_t *bdaddr)
2763eda6
SJ
2702{
2703 struct oob_data *data;
2704
2705 list_for_each_entry(data, &hdev->remote_oob_data, list)
2706 if (bacmp(bdaddr, &data->bdaddr) == 0)
2707 return data;
2708
2709 return NULL;
2710}
2711
2712int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2713{
2714 struct oob_data *data;
2715
2716 data = hci_find_remote_oob_data(hdev, bdaddr);
2717 if (!data)
2718 return -ENOENT;
2719
6ed93dc6 2720 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2721
2722 list_del(&data->list);
2723 kfree(data);
2724
2725 return 0;
2726}
2727
2728int hci_remote_oob_data_clear(struct hci_dev *hdev)
2729{
2730 struct oob_data *data, *n;
2731
2732 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2733 list_del(&data->list);
2734 kfree(data);
2735 }
2736
2737 return 0;
2738}
2739
2740int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2741 u8 *randomizer)
2763eda6
SJ
2742{
2743 struct oob_data *data;
2744
2745 data = hci_find_remote_oob_data(hdev, bdaddr);
2746
2747 if (!data) {
2748 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2749 if (!data)
2750 return -ENOMEM;
2751
2752 bacpy(&data->bdaddr, bdaddr);
2753 list_add(&data->list, &hdev->remote_oob_data);
2754 }
2755
2756 memcpy(data->hash, hash, sizeof(data->hash));
2757 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2758
6ed93dc6 2759 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2760
2761 return 0;
2762}
2763
b9ee0a78
MH
2764struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2765 bdaddr_t *bdaddr, u8 type)
b2a66aad 2766{
8035ded4 2767 struct bdaddr_list *b;
b2a66aad 2768
b9ee0a78
MH
2769 list_for_each_entry(b, &hdev->blacklist, list) {
2770 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2771 return b;
b9ee0a78 2772 }
b2a66aad
AJ
2773
2774 return NULL;
2775}
2776
2777int hci_blacklist_clear(struct hci_dev *hdev)
2778{
2779 struct list_head *p, *n;
2780
2781 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2782 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2783
2784 list_del(p);
2785 kfree(b);
2786 }
2787
2788 return 0;
2789}
2790
88c1fe4b 2791int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2792{
2793 struct bdaddr_list *entry;
b2a66aad 2794
b9ee0a78 2795 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2796 return -EBADF;
2797
b9ee0a78 2798 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2799 return -EEXIST;
b2a66aad
AJ
2800
2801 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2802 if (!entry)
2803 return -ENOMEM;
b2a66aad
AJ
2804
2805 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2806 entry->bdaddr_type = type;
b2a66aad
AJ
2807
2808 list_add(&entry->list, &hdev->blacklist);
2809
88c1fe4b 2810 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2811}
2812
88c1fe4b 2813int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2814{
2815 struct bdaddr_list *entry;
b2a66aad 2816
b9ee0a78 2817 if (!bacmp(bdaddr, BDADDR_ANY))
5e762444 2818 return hci_blacklist_clear(hdev);
b2a66aad 2819
b9ee0a78 2820 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2821 if (!entry)
5e762444 2822 return -ENOENT;
b2a66aad
AJ
2823
2824 list_del(&entry->list);
2825 kfree(entry);
2826
88c1fe4b 2827 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2828}
2829
4c87eaab 2830static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2831{
4c87eaab
AG
2832 if (status) {
2833 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2834
4c87eaab
AG
2835 hci_dev_lock(hdev);
2836 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2837 hci_dev_unlock(hdev);
2838 return;
2839 }
7ba8b4be
AG
2840}
2841
4c87eaab 2842static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2843{
4c87eaab
AG
2844 /* General inquiry access code (GIAC) */
2845 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2846 struct hci_request req;
2847 struct hci_cp_inquiry cp;
7ba8b4be
AG
2848 int err;
2849
4c87eaab
AG
2850 if (status) {
2851 BT_ERR("Failed to disable LE scanning: status %d", status);
2852 return;
2853 }
7ba8b4be 2854
4c87eaab
AG
2855 switch (hdev->discovery.type) {
2856 case DISCOV_TYPE_LE:
2857 hci_dev_lock(hdev);
2858 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2859 hci_dev_unlock(hdev);
2860 break;
7ba8b4be 2861
4c87eaab
AG
2862 case DISCOV_TYPE_INTERLEAVED:
2863 hci_req_init(&req, hdev);
7ba8b4be 2864
4c87eaab
AG
2865 memset(&cp, 0, sizeof(cp));
2866 memcpy(&cp.lap, lap, sizeof(cp.lap));
2867 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2868 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2869
4c87eaab 2870 hci_dev_lock(hdev);
7dbfac1d 2871
4c87eaab 2872 hci_inquiry_cache_flush(hdev);
7dbfac1d 2873
4c87eaab
AG
2874 err = hci_req_run(&req, inquiry_complete);
2875 if (err) {
2876 BT_ERR("Inquiry request failed: err %d", err);
2877 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2878 }
7dbfac1d 2879
4c87eaab
AG
2880 hci_dev_unlock(hdev);
2881 break;
7dbfac1d 2882 }
7dbfac1d
AG
2883}
2884
7ba8b4be
AG
2885static void le_scan_disable_work(struct work_struct *work)
2886{
2887 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2888 le_scan_disable.work);
7ba8b4be 2889 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2890 struct hci_request req;
2891 int err;
7ba8b4be
AG
2892
2893 BT_DBG("%s", hdev->name);
2894
4c87eaab 2895 hci_req_init(&req, hdev);
28b75a89 2896
7ba8b4be 2897 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2898 cp.enable = LE_SCAN_DISABLE;
2899 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2900
4c87eaab
AG
2901 err = hci_req_run(&req, le_scan_disable_work_complete);
2902 if (err)
2903 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2904}
2905
9be0dab7
DH
2906/* Alloc HCI device */
2907struct hci_dev *hci_alloc_dev(void)
2908{
2909 struct hci_dev *hdev;
2910
2911 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2912 if (!hdev)
2913 return NULL;
2914
b1b813d4
DH
2915 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2916 hdev->esco_type = (ESCO_HV1);
2917 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2918 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2919 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2920 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2921 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2922
b1b813d4
DH
2923 hdev->sniff_max_interval = 800;
2924 hdev->sniff_min_interval = 80;
2925
bef64738
MH
2926 hdev->le_scan_interval = 0x0060;
2927 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
2928 hdev->le_conn_min_interval = 0x0028;
2929 hdev->le_conn_max_interval = 0x0038;
bef64738 2930
b1b813d4
DH
2931 mutex_init(&hdev->lock);
2932 mutex_init(&hdev->req_lock);
2933
2934 INIT_LIST_HEAD(&hdev->mgmt_pending);
2935 INIT_LIST_HEAD(&hdev->blacklist);
2936 INIT_LIST_HEAD(&hdev->uuids);
2937 INIT_LIST_HEAD(&hdev->link_keys);
2938 INIT_LIST_HEAD(&hdev->long_term_keys);
2939 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2940 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2941
2942 INIT_WORK(&hdev->rx_work, hci_rx_work);
2943 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2944 INIT_WORK(&hdev->tx_work, hci_tx_work);
2945 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2946
b1b813d4
DH
2947 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2948 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2949 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2950
b1b813d4
DH
2951 skb_queue_head_init(&hdev->rx_q);
2952 skb_queue_head_init(&hdev->cmd_q);
2953 skb_queue_head_init(&hdev->raw_q);
2954
2955 init_waitqueue_head(&hdev->req_wait_q);
2956
bda4f23a 2957 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2958
b1b813d4
DH
2959 hci_init_sysfs(hdev);
2960 discovery_init(hdev);
9be0dab7
DH
2961
2962 return hdev;
2963}
2964EXPORT_SYMBOL(hci_alloc_dev);
2965
2966/* Free HCI device */
2967void hci_free_dev(struct hci_dev *hdev)
2968{
9be0dab7
DH
2969 /* will free via device release */
2970 put_device(&hdev->dev);
2971}
2972EXPORT_SYMBOL(hci_free_dev);
2973
1da177e4
LT
2974/* Register HCI device */
2975int hci_register_dev(struct hci_dev *hdev)
2976{
b1b813d4 2977 int id, error;
1da177e4 2978
010666a1 2979 if (!hdev->open || !hdev->close)
1da177e4
LT
2980 return -EINVAL;
2981
08add513
MM
2982 /* Do not allow HCI_AMP devices to register at index 0,
2983 * so the index can be used as the AMP controller ID.
2984 */
3df92b31
SL
2985 switch (hdev->dev_type) {
2986 case HCI_BREDR:
2987 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2988 break;
2989 case HCI_AMP:
2990 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2991 break;
2992 default:
2993 return -EINVAL;
1da177e4 2994 }
8e87d142 2995
3df92b31
SL
2996 if (id < 0)
2997 return id;
2998
1da177e4
LT
2999 sprintf(hdev->name, "hci%d", id);
3000 hdev->id = id;
2d8b3a11
AE
3001
3002 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
3003
d8537548
KC
3004 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3005 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
3006 if (!hdev->workqueue) {
3007 error = -ENOMEM;
3008 goto err;
3009 }
f48fd9c8 3010
d8537548
KC
3011 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3012 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3013 if (!hdev->req_workqueue) {
3014 destroy_workqueue(hdev->workqueue);
3015 error = -ENOMEM;
3016 goto err;
3017 }
3018
0153e2ec
MH
3019 if (!IS_ERR_OR_NULL(bt_debugfs))
3020 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3021
bdc3e0f1
MH
3022 dev_set_name(&hdev->dev, "%s", hdev->name);
3023
3024 error = device_add(&hdev->dev);
33ca954d
DH
3025 if (error < 0)
3026 goto err_wqueue;
1da177e4 3027
611b30f7 3028 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3029 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3030 hdev);
611b30f7
MH
3031 if (hdev->rfkill) {
3032 if (rfkill_register(hdev->rfkill) < 0) {
3033 rfkill_destroy(hdev->rfkill);
3034 hdev->rfkill = NULL;
3035 }
3036 }
3037
5e130367
JH
3038 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3039 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3040
a8b2d5c2 3041 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3042 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3043
01cd3404 3044 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3045 /* Assume BR/EDR support until proven otherwise (such as
3046 * through reading supported features during init.
3047 */
3048 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3049 }
ce2be9ac 3050
fcee3377
GP
3051 write_lock(&hci_dev_list_lock);
3052 list_add(&hdev->list, &hci_dev_list);
3053 write_unlock(&hci_dev_list_lock);
3054
1da177e4 3055 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3056 hci_dev_hold(hdev);
1da177e4 3057
19202573 3058 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3059
1da177e4 3060 return id;
f48fd9c8 3061
33ca954d
DH
3062err_wqueue:
3063 destroy_workqueue(hdev->workqueue);
6ead1bbc 3064 destroy_workqueue(hdev->req_workqueue);
33ca954d 3065err:
3df92b31 3066 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3067
33ca954d 3068 return error;
1da177e4
LT
3069}
3070EXPORT_SYMBOL(hci_register_dev);
3071
3072/* Unregister HCI device */
59735631 3073void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3074{
3df92b31 3075 int i, id;
ef222013 3076
c13854ce 3077 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3078
94324962
JH
3079 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3080
3df92b31
SL
3081 id = hdev->id;
3082
f20d09d5 3083 write_lock(&hci_dev_list_lock);
1da177e4 3084 list_del(&hdev->list);
f20d09d5 3085 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3086
3087 hci_dev_do_close(hdev);
3088
cd4c5391 3089 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3090 kfree_skb(hdev->reassembly[i]);
3091
b9b5ef18
GP
3092 cancel_work_sync(&hdev->power_on);
3093
ab81cbf9 3094 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3095 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3096 hci_dev_lock(hdev);
744cf19e 3097 mgmt_index_removed(hdev);
09fd0de5 3098 hci_dev_unlock(hdev);
56e5cb86 3099 }
ab81cbf9 3100
2e58ef3e
JH
3101 /* mgmt_index_removed should take care of emptying the
3102 * pending list */
3103 BUG_ON(!list_empty(&hdev->mgmt_pending));
3104
1da177e4
LT
3105 hci_notify(hdev, HCI_DEV_UNREG);
3106
611b30f7
MH
3107 if (hdev->rfkill) {
3108 rfkill_unregister(hdev->rfkill);
3109 rfkill_destroy(hdev->rfkill);
3110 }
3111
bdc3e0f1 3112 device_del(&hdev->dev);
147e2d59 3113
0153e2ec
MH
3114 debugfs_remove_recursive(hdev->debugfs);
3115
f48fd9c8 3116 destroy_workqueue(hdev->workqueue);
6ead1bbc 3117 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3118
09fd0de5 3119 hci_dev_lock(hdev);
e2e0cacb 3120 hci_blacklist_clear(hdev);
2aeb9a1a 3121 hci_uuids_clear(hdev);
55ed8ca1 3122 hci_link_keys_clear(hdev);
b899efaf 3123 hci_smp_ltks_clear(hdev);
2763eda6 3124 hci_remote_oob_data_clear(hdev);
09fd0de5 3125 hci_dev_unlock(hdev);
e2e0cacb 3126
dc946bd8 3127 hci_dev_put(hdev);
3df92b31
SL
3128
3129 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3130}
3131EXPORT_SYMBOL(hci_unregister_dev);
3132
3133/* Suspend HCI device */
3134int hci_suspend_dev(struct hci_dev *hdev)
3135{
3136 hci_notify(hdev, HCI_DEV_SUSPEND);
3137 return 0;
3138}
3139EXPORT_SYMBOL(hci_suspend_dev);
3140
3141/* Resume HCI device */
3142int hci_resume_dev(struct hci_dev *hdev)
3143{
3144 hci_notify(hdev, HCI_DEV_RESUME);
3145 return 0;
3146}
3147EXPORT_SYMBOL(hci_resume_dev);
3148
76bca880 3149/* Receive frame from HCI drivers */
e1a26170 3150int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3151{
76bca880 3152 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3153 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3154 kfree_skb(skb);
3155 return -ENXIO;
3156 }
3157
d82603c6 3158 /* Incoming skb */
76bca880
MH
3159 bt_cb(skb)->incoming = 1;
3160
3161 /* Time stamp */
3162 __net_timestamp(skb);
3163
76bca880 3164 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3165 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3166
76bca880
MH
3167 return 0;
3168}
3169EXPORT_SYMBOL(hci_recv_frame);
3170
33e882a5 3171static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3172 int count, __u8 index)
33e882a5
SS
3173{
3174 int len = 0;
3175 int hlen = 0;
3176 int remain = count;
3177 struct sk_buff *skb;
3178 struct bt_skb_cb *scb;
3179
3180 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3181 index >= NUM_REASSEMBLY)
33e882a5
SS
3182 return -EILSEQ;
3183
3184 skb = hdev->reassembly[index];
3185
3186 if (!skb) {
3187 switch (type) {
3188 case HCI_ACLDATA_PKT:
3189 len = HCI_MAX_FRAME_SIZE;
3190 hlen = HCI_ACL_HDR_SIZE;
3191 break;
3192 case HCI_EVENT_PKT:
3193 len = HCI_MAX_EVENT_SIZE;
3194 hlen = HCI_EVENT_HDR_SIZE;
3195 break;
3196 case HCI_SCODATA_PKT:
3197 len = HCI_MAX_SCO_SIZE;
3198 hlen = HCI_SCO_HDR_SIZE;
3199 break;
3200 }
3201
1e429f38 3202 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3203 if (!skb)
3204 return -ENOMEM;
3205
3206 scb = (void *) skb->cb;
3207 scb->expect = hlen;
3208 scb->pkt_type = type;
3209
33e882a5
SS
3210 hdev->reassembly[index] = skb;
3211 }
3212
3213 while (count) {
3214 scb = (void *) skb->cb;
89bb46d0 3215 len = min_t(uint, scb->expect, count);
33e882a5
SS
3216
3217 memcpy(skb_put(skb, len), data, len);
3218
3219 count -= len;
3220 data += len;
3221 scb->expect -= len;
3222 remain = count;
3223
3224 switch (type) {
3225 case HCI_EVENT_PKT:
3226 if (skb->len == HCI_EVENT_HDR_SIZE) {
3227 struct hci_event_hdr *h = hci_event_hdr(skb);
3228 scb->expect = h->plen;
3229
3230 if (skb_tailroom(skb) < scb->expect) {
3231 kfree_skb(skb);
3232 hdev->reassembly[index] = NULL;
3233 return -ENOMEM;
3234 }
3235 }
3236 break;
3237
3238 case HCI_ACLDATA_PKT:
3239 if (skb->len == HCI_ACL_HDR_SIZE) {
3240 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3241 scb->expect = __le16_to_cpu(h->dlen);
3242
3243 if (skb_tailroom(skb) < scb->expect) {
3244 kfree_skb(skb);
3245 hdev->reassembly[index] = NULL;
3246 return -ENOMEM;
3247 }
3248 }
3249 break;
3250
3251 case HCI_SCODATA_PKT:
3252 if (skb->len == HCI_SCO_HDR_SIZE) {
3253 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3254 scb->expect = h->dlen;
3255
3256 if (skb_tailroom(skb) < scb->expect) {
3257 kfree_skb(skb);
3258 hdev->reassembly[index] = NULL;
3259 return -ENOMEM;
3260 }
3261 }
3262 break;
3263 }
3264
3265 if (scb->expect == 0) {
3266 /* Complete frame */
3267
3268 bt_cb(skb)->pkt_type = type;
e1a26170 3269 hci_recv_frame(hdev, skb);
33e882a5
SS
3270
3271 hdev->reassembly[index] = NULL;
3272 return remain;
3273 }
3274 }
3275
3276 return remain;
3277}
3278
ef222013
MH
3279int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3280{
f39a3c06
SS
3281 int rem = 0;
3282
ef222013
MH
3283 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3284 return -EILSEQ;
3285
da5f6c37 3286 while (count) {
1e429f38 3287 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3288 if (rem < 0)
3289 return rem;
ef222013 3290
f39a3c06
SS
3291 data += (count - rem);
3292 count = rem;
f81c6224 3293 }
ef222013 3294
f39a3c06 3295 return rem;
ef222013
MH
3296}
3297EXPORT_SYMBOL(hci_recv_fragment);
3298
99811510
SS
3299#define STREAM_REASSEMBLY 0
3300
3301int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3302{
3303 int type;
3304 int rem = 0;
3305
da5f6c37 3306 while (count) {
99811510
SS
3307 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3308
3309 if (!skb) {
3310 struct { char type; } *pkt;
3311
3312 /* Start of the frame */
3313 pkt = data;
3314 type = pkt->type;
3315
3316 data++;
3317 count--;
3318 } else
3319 type = bt_cb(skb)->pkt_type;
3320
1e429f38 3321 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3322 STREAM_REASSEMBLY);
99811510
SS
3323 if (rem < 0)
3324 return rem;
3325
3326 data += (count - rem);
3327 count = rem;
f81c6224 3328 }
99811510
SS
3329
3330 return rem;
3331}
3332EXPORT_SYMBOL(hci_recv_stream_fragment);
3333
1da177e4
LT
3334/* ---- Interface to upper protocols ---- */
3335
1da177e4
LT
3336int hci_register_cb(struct hci_cb *cb)
3337{
3338 BT_DBG("%p name %s", cb, cb->name);
3339
f20d09d5 3340 write_lock(&hci_cb_list_lock);
1da177e4 3341 list_add(&cb->list, &hci_cb_list);
f20d09d5 3342 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3343
3344 return 0;
3345}
3346EXPORT_SYMBOL(hci_register_cb);
3347
3348int hci_unregister_cb(struct hci_cb *cb)
3349{
3350 BT_DBG("%p name %s", cb, cb->name);
3351
f20d09d5 3352 write_lock(&hci_cb_list_lock);
1da177e4 3353 list_del(&cb->list);
f20d09d5 3354 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3355
3356 return 0;
3357}
3358EXPORT_SYMBOL(hci_unregister_cb);
3359
51086991 3360static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3361{
0d48d939 3362 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3363
cd82e61c
MH
3364 /* Time stamp */
3365 __net_timestamp(skb);
1da177e4 3366
cd82e61c
MH
3367 /* Send copy to monitor */
3368 hci_send_to_monitor(hdev, skb);
3369
3370 if (atomic_read(&hdev->promisc)) {
3371 /* Send copy to the sockets */
470fe1b5 3372 hci_send_to_sock(hdev, skb);
1da177e4
LT
3373 }
3374
3375 /* Get rid of skb owner, prior to sending to the driver. */
3376 skb_orphan(skb);
3377
7bd8f09f 3378 if (hdev->send(hdev, skb) < 0)
51086991 3379 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3380}
3381
3119ae95
JH
3382void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3383{
3384 skb_queue_head_init(&req->cmd_q);
3385 req->hdev = hdev;
5d73e034 3386 req->err = 0;
3119ae95
JH
3387}
3388
3389int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3390{
3391 struct hci_dev *hdev = req->hdev;
3392 struct sk_buff *skb;
3393 unsigned long flags;
3394
3395 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3396
5d73e034
AG
3397 /* If an error occured during request building, remove all HCI
3398 * commands queued on the HCI request queue.
3399 */
3400 if (req->err) {
3401 skb_queue_purge(&req->cmd_q);
3402 return req->err;
3403 }
3404
3119ae95
JH
3405 /* Do not allow empty requests */
3406 if (skb_queue_empty(&req->cmd_q))
382b0c39 3407 return -ENODATA;
3119ae95
JH
3408
3409 skb = skb_peek_tail(&req->cmd_q);
3410 bt_cb(skb)->req.complete = complete;
3411
3412 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3413 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3414 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3415
3416 queue_work(hdev->workqueue, &hdev->cmd_work);
3417
3418 return 0;
3419}
3420
1ca3a9d0 3421static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3422 u32 plen, const void *param)
1da177e4
LT
3423{
3424 int len = HCI_COMMAND_HDR_SIZE + plen;
3425 struct hci_command_hdr *hdr;
3426 struct sk_buff *skb;
3427
1da177e4 3428 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3429 if (!skb)
3430 return NULL;
1da177e4
LT
3431
3432 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3433 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3434 hdr->plen = plen;
3435
3436 if (plen)
3437 memcpy(skb_put(skb, plen), param, plen);
3438
3439 BT_DBG("skb len %d", skb->len);
3440
0d48d939 3441 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3442
1ca3a9d0
JH
3443 return skb;
3444}
3445
3446/* Send HCI command */
07dc93dd
JH
3447int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3448 const void *param)
1ca3a9d0
JH
3449{
3450 struct sk_buff *skb;
3451
3452 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3453
3454 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3455 if (!skb) {
3456 BT_ERR("%s no memory for command", hdev->name);
3457 return -ENOMEM;
3458 }
3459
11714b3d
JH
3460 /* Stand-alone HCI commands must be flaged as
3461 * single-command requests.
3462 */
3463 bt_cb(skb)->req.start = true;
3464
1da177e4 3465 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3466 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3467
3468 return 0;
3469}
1da177e4 3470
71c76a17 3471/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3472void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3473 const void *param, u8 event)
71c76a17
JH
3474{
3475 struct hci_dev *hdev = req->hdev;
3476 struct sk_buff *skb;
3477
3478 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3479
34739c1e
AG
3480 /* If an error occured during request building, there is no point in
3481 * queueing the HCI command. We can simply return.
3482 */
3483 if (req->err)
3484 return;
3485
71c76a17
JH
3486 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3487 if (!skb) {
5d73e034
AG
3488 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3489 hdev->name, opcode);
3490 req->err = -ENOMEM;
e348fe6b 3491 return;
71c76a17
JH
3492 }
3493
3494 if (skb_queue_empty(&req->cmd_q))
3495 bt_cb(skb)->req.start = true;
3496
02350a72
JH
3497 bt_cb(skb)->req.event = event;
3498
71c76a17 3499 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3500}
3501
07dc93dd
JH
3502void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3503 const void *param)
02350a72
JH
3504{
3505 hci_req_add_ev(req, opcode, plen, param, 0);
3506}
3507
1da177e4 3508/* Get data from the previously sent command */
a9de9248 3509void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3510{
3511 struct hci_command_hdr *hdr;
3512
3513 if (!hdev->sent_cmd)
3514 return NULL;
3515
3516 hdr = (void *) hdev->sent_cmd->data;
3517
a9de9248 3518 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3519 return NULL;
3520
f0e09510 3521 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3522
3523 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3524}
3525
3526/* Send ACL data */
3527static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3528{
3529 struct hci_acl_hdr *hdr;
3530 int len = skb->len;
3531
badff6d0
ACM
3532 skb_push(skb, HCI_ACL_HDR_SIZE);
3533 skb_reset_transport_header(skb);
9c70220b 3534 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3535 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3536 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3537}
3538
ee22be7e 3539static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3540 struct sk_buff *skb, __u16 flags)
1da177e4 3541{
ee22be7e 3542 struct hci_conn *conn = chan->conn;
1da177e4
LT
3543 struct hci_dev *hdev = conn->hdev;
3544 struct sk_buff *list;
3545
087bfd99
GP
3546 skb->len = skb_headlen(skb);
3547 skb->data_len = 0;
3548
3549 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3550
3551 switch (hdev->dev_type) {
3552 case HCI_BREDR:
3553 hci_add_acl_hdr(skb, conn->handle, flags);
3554 break;
3555 case HCI_AMP:
3556 hci_add_acl_hdr(skb, chan->handle, flags);
3557 break;
3558 default:
3559 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3560 return;
3561 }
087bfd99 3562
70f23020
AE
3563 list = skb_shinfo(skb)->frag_list;
3564 if (!list) {
1da177e4
LT
3565 /* Non fragmented */
3566 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3567
73d80deb 3568 skb_queue_tail(queue, skb);
1da177e4
LT
3569 } else {
3570 /* Fragmented */
3571 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3572
3573 skb_shinfo(skb)->frag_list = NULL;
3574
3575 /* Queue all fragments atomically */
af3e6359 3576 spin_lock(&queue->lock);
1da177e4 3577
73d80deb 3578 __skb_queue_tail(queue, skb);
e702112f
AE
3579
3580 flags &= ~ACL_START;
3581 flags |= ACL_CONT;
1da177e4
LT
3582 do {
3583 skb = list; list = list->next;
8e87d142 3584
0d48d939 3585 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3586 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3587
3588 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3589
73d80deb 3590 __skb_queue_tail(queue, skb);
1da177e4
LT
3591 } while (list);
3592
af3e6359 3593 spin_unlock(&queue->lock);
1da177e4 3594 }
73d80deb
LAD
3595}
3596
3597void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3598{
ee22be7e 3599 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3600
f0e09510 3601 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3602
ee22be7e 3603 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3604
3eff45ea 3605 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3606}
1da177e4
LT
3607
3608/* Send SCO data */
0d861d8b 3609void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3610{
3611 struct hci_dev *hdev = conn->hdev;
3612 struct hci_sco_hdr hdr;
3613
3614 BT_DBG("%s len %d", hdev->name, skb->len);
3615
aca3192c 3616 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3617 hdr.dlen = skb->len;
3618
badff6d0
ACM
3619 skb_push(skb, HCI_SCO_HDR_SIZE);
3620 skb_reset_transport_header(skb);
9c70220b 3621 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3622
0d48d939 3623 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3624
1da177e4 3625 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3626 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3627}
1da177e4
LT
3628
3629/* ---- HCI TX task (outgoing data) ---- */
3630
3631/* HCI Connection scheduler */
6039aa73
GP
3632static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3633 int *quote)
1da177e4
LT
3634{
3635 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3636 struct hci_conn *conn = NULL, *c;
abc5de8f 3637 unsigned int num = 0, min = ~0;
1da177e4 3638
8e87d142 3639 /* We don't have to lock device here. Connections are always
1da177e4 3640 * added and removed with TX task disabled. */
bf4c6325
GP
3641
3642 rcu_read_lock();
3643
3644 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3645 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3646 continue;
769be974
MH
3647
3648 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3649 continue;
3650
1da177e4
LT
3651 num++;
3652
3653 if (c->sent < min) {
3654 min = c->sent;
3655 conn = c;
3656 }
52087a79
LAD
3657
3658 if (hci_conn_num(hdev, type) == num)
3659 break;
1da177e4
LT
3660 }
3661
bf4c6325
GP
3662 rcu_read_unlock();
3663
1da177e4 3664 if (conn) {
6ed58ec5
VT
3665 int cnt, q;
3666
3667 switch (conn->type) {
3668 case ACL_LINK:
3669 cnt = hdev->acl_cnt;
3670 break;
3671 case SCO_LINK:
3672 case ESCO_LINK:
3673 cnt = hdev->sco_cnt;
3674 break;
3675 case LE_LINK:
3676 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3677 break;
3678 default:
3679 cnt = 0;
3680 BT_ERR("Unknown link type");
3681 }
3682
3683 q = cnt / num;
1da177e4
LT
3684 *quote = q ? q : 1;
3685 } else
3686 *quote = 0;
3687
3688 BT_DBG("conn %p quote %d", conn, *quote);
3689 return conn;
3690}
3691
6039aa73 3692static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3693{
3694 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3695 struct hci_conn *c;
1da177e4 3696
bae1f5d9 3697 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3698
bf4c6325
GP
3699 rcu_read_lock();
3700
1da177e4 3701 /* Kill stalled connections */
bf4c6325 3702 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3703 if (c->type == type && c->sent) {
6ed93dc6
AE
3704 BT_ERR("%s killing stalled connection %pMR",
3705 hdev->name, &c->dst);
bed71748 3706 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3707 }
3708 }
bf4c6325
GP
3709
3710 rcu_read_unlock();
1da177e4
LT
3711}
3712
6039aa73
GP
3713static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3714 int *quote)
1da177e4 3715{
73d80deb
LAD
3716 struct hci_conn_hash *h = &hdev->conn_hash;
3717 struct hci_chan *chan = NULL;
abc5de8f 3718 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3719 struct hci_conn *conn;
73d80deb
LAD
3720 int cnt, q, conn_num = 0;
3721
3722 BT_DBG("%s", hdev->name);
3723
bf4c6325
GP
3724 rcu_read_lock();
3725
3726 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3727 struct hci_chan *tmp;
3728
3729 if (conn->type != type)
3730 continue;
3731
3732 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3733 continue;
3734
3735 conn_num++;
3736
8192edef 3737 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3738 struct sk_buff *skb;
3739
3740 if (skb_queue_empty(&tmp->data_q))
3741 continue;
3742
3743 skb = skb_peek(&tmp->data_q);
3744 if (skb->priority < cur_prio)
3745 continue;
3746
3747 if (skb->priority > cur_prio) {
3748 num = 0;
3749 min = ~0;
3750 cur_prio = skb->priority;
3751 }
3752
3753 num++;
3754
3755 if (conn->sent < min) {
3756 min = conn->sent;
3757 chan = tmp;
3758 }
3759 }
3760
3761 if (hci_conn_num(hdev, type) == conn_num)
3762 break;
3763 }
3764
bf4c6325
GP
3765 rcu_read_unlock();
3766
73d80deb
LAD
3767 if (!chan)
3768 return NULL;
3769
3770 switch (chan->conn->type) {
3771 case ACL_LINK:
3772 cnt = hdev->acl_cnt;
3773 break;
bd1eb66b
AE
3774 case AMP_LINK:
3775 cnt = hdev->block_cnt;
3776 break;
73d80deb
LAD
3777 case SCO_LINK:
3778 case ESCO_LINK:
3779 cnt = hdev->sco_cnt;
3780 break;
3781 case LE_LINK:
3782 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3783 break;
3784 default:
3785 cnt = 0;
3786 BT_ERR("Unknown link type");
3787 }
3788
3789 q = cnt / num;
3790 *quote = q ? q : 1;
3791 BT_DBG("chan %p quote %d", chan, *quote);
3792 return chan;
3793}
3794
02b20f0b
LAD
3795static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3796{
3797 struct hci_conn_hash *h = &hdev->conn_hash;
3798 struct hci_conn *conn;
3799 int num = 0;
3800
3801 BT_DBG("%s", hdev->name);
3802
bf4c6325
GP
3803 rcu_read_lock();
3804
3805 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3806 struct hci_chan *chan;
3807
3808 if (conn->type != type)
3809 continue;
3810
3811 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3812 continue;
3813
3814 num++;
3815
8192edef 3816 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3817 struct sk_buff *skb;
3818
3819 if (chan->sent) {
3820 chan->sent = 0;
3821 continue;
3822 }
3823
3824 if (skb_queue_empty(&chan->data_q))
3825 continue;
3826
3827 skb = skb_peek(&chan->data_q);
3828 if (skb->priority >= HCI_PRIO_MAX - 1)
3829 continue;
3830
3831 skb->priority = HCI_PRIO_MAX - 1;
3832
3833 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3834 skb->priority);
02b20f0b
LAD
3835 }
3836
3837 if (hci_conn_num(hdev, type) == num)
3838 break;
3839 }
bf4c6325
GP
3840
3841 rcu_read_unlock();
3842
02b20f0b
LAD
3843}
3844
b71d385a
AE
3845static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3846{
3847 /* Calculate count of blocks used by this packet */
3848 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3849}
3850
6039aa73 3851static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3852{
1da177e4
LT
3853 if (!test_bit(HCI_RAW, &hdev->flags)) {
3854 /* ACL tx timeout must be longer than maximum
3855 * link supervision timeout (40.9 seconds) */
63d2bc1b 3856 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3857 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3858 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3859 }
63d2bc1b 3860}
1da177e4 3861
6039aa73 3862static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3863{
3864 unsigned int cnt = hdev->acl_cnt;
3865 struct hci_chan *chan;
3866 struct sk_buff *skb;
3867 int quote;
3868
3869 __check_timeout(hdev, cnt);
04837f64 3870
73d80deb 3871 while (hdev->acl_cnt &&
a8c5fb1a 3872 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3873 u32 priority = (skb_peek(&chan->data_q))->priority;
3874 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3875 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3876 skb->len, skb->priority);
73d80deb 3877
ec1cce24
LAD
3878 /* Stop if priority has changed */
3879 if (skb->priority < priority)
3880 break;
3881
3882 skb = skb_dequeue(&chan->data_q);
3883
73d80deb 3884 hci_conn_enter_active_mode(chan->conn,
04124681 3885 bt_cb(skb)->force_active);
04837f64 3886
57d17d70 3887 hci_send_frame(hdev, skb);
1da177e4
LT
3888 hdev->acl_last_tx = jiffies;
3889
3890 hdev->acl_cnt--;
73d80deb
LAD
3891 chan->sent++;
3892 chan->conn->sent++;
1da177e4
LT
3893 }
3894 }
02b20f0b
LAD
3895
3896 if (cnt != hdev->acl_cnt)
3897 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3898}
3899
6039aa73 3900static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3901{
63d2bc1b 3902 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3903 struct hci_chan *chan;
3904 struct sk_buff *skb;
3905 int quote;
bd1eb66b 3906 u8 type;
b71d385a 3907
63d2bc1b 3908 __check_timeout(hdev, cnt);
b71d385a 3909
bd1eb66b
AE
3910 BT_DBG("%s", hdev->name);
3911
3912 if (hdev->dev_type == HCI_AMP)
3913 type = AMP_LINK;
3914 else
3915 type = ACL_LINK;
3916
b71d385a 3917 while (hdev->block_cnt > 0 &&
bd1eb66b 3918 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3919 u32 priority = (skb_peek(&chan->data_q))->priority;
3920 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3921 int blocks;
3922
3923 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3924 skb->len, skb->priority);
b71d385a
AE
3925
3926 /* Stop if priority has changed */
3927 if (skb->priority < priority)
3928 break;
3929
3930 skb = skb_dequeue(&chan->data_q);
3931
3932 blocks = __get_blocks(hdev, skb);
3933 if (blocks > hdev->block_cnt)
3934 return;
3935
3936 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3937 bt_cb(skb)->force_active);
b71d385a 3938
57d17d70 3939 hci_send_frame(hdev, skb);
b71d385a
AE
3940 hdev->acl_last_tx = jiffies;
3941
3942 hdev->block_cnt -= blocks;
3943 quote -= blocks;
3944
3945 chan->sent += blocks;
3946 chan->conn->sent += blocks;
3947 }
3948 }
3949
3950 if (cnt != hdev->block_cnt)
bd1eb66b 3951 hci_prio_recalculate(hdev, type);
b71d385a
AE
3952}
3953
6039aa73 3954static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3955{
3956 BT_DBG("%s", hdev->name);
3957
bd1eb66b
AE
3958 /* No ACL link over BR/EDR controller */
3959 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3960 return;
3961
3962 /* No AMP link over AMP controller */
3963 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3964 return;
3965
3966 switch (hdev->flow_ctl_mode) {
3967 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3968 hci_sched_acl_pkt(hdev);
3969 break;
3970
3971 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3972 hci_sched_acl_blk(hdev);
3973 break;
3974 }
3975}
3976
1da177e4 3977/* Schedule SCO */
6039aa73 3978static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3979{
3980 struct hci_conn *conn;
3981 struct sk_buff *skb;
3982 int quote;
3983
3984 BT_DBG("%s", hdev->name);
3985
52087a79
LAD
3986 if (!hci_conn_num(hdev, SCO_LINK))
3987 return;
3988
1da177e4
LT
3989 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3990 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3991 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3992 hci_send_frame(hdev, skb);
1da177e4
LT
3993
3994 conn->sent++;
3995 if (conn->sent == ~0)
3996 conn->sent = 0;
3997 }
3998 }
3999}
4000
6039aa73 4001static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
4002{
4003 struct hci_conn *conn;
4004 struct sk_buff *skb;
4005 int quote;
4006
4007 BT_DBG("%s", hdev->name);
4008
52087a79
LAD
4009 if (!hci_conn_num(hdev, ESCO_LINK))
4010 return;
4011
8fc9ced3
GP
4012 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4013 &quote))) {
b6a0dc82
MH
4014 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4015 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4016 hci_send_frame(hdev, skb);
b6a0dc82
MH
4017
4018 conn->sent++;
4019 if (conn->sent == ~0)
4020 conn->sent = 0;
4021 }
4022 }
4023}
4024
6039aa73 4025static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4026{
73d80deb 4027 struct hci_chan *chan;
6ed58ec5 4028 struct sk_buff *skb;
02b20f0b 4029 int quote, cnt, tmp;
6ed58ec5
VT
4030
4031 BT_DBG("%s", hdev->name);
4032
52087a79
LAD
4033 if (!hci_conn_num(hdev, LE_LINK))
4034 return;
4035
6ed58ec5
VT
4036 if (!test_bit(HCI_RAW, &hdev->flags)) {
4037 /* LE tx timeout must be longer than maximum
4038 * link supervision timeout (40.9 seconds) */
bae1f5d9 4039 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4040 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4041 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4042 }
4043
4044 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4045 tmp = cnt;
73d80deb 4046 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4047 u32 priority = (skb_peek(&chan->data_q))->priority;
4048 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4049 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4050 skb->len, skb->priority);
6ed58ec5 4051
ec1cce24
LAD
4052 /* Stop if priority has changed */
4053 if (skb->priority < priority)
4054 break;
4055
4056 skb = skb_dequeue(&chan->data_q);
4057
57d17d70 4058 hci_send_frame(hdev, skb);
6ed58ec5
VT
4059 hdev->le_last_tx = jiffies;
4060
4061 cnt--;
73d80deb
LAD
4062 chan->sent++;
4063 chan->conn->sent++;
6ed58ec5
VT
4064 }
4065 }
73d80deb 4066
6ed58ec5
VT
4067 if (hdev->le_pkts)
4068 hdev->le_cnt = cnt;
4069 else
4070 hdev->acl_cnt = cnt;
02b20f0b
LAD
4071
4072 if (cnt != tmp)
4073 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4074}
4075
3eff45ea 4076static void hci_tx_work(struct work_struct *work)
1da177e4 4077{
3eff45ea 4078 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4079 struct sk_buff *skb;
4080
6ed58ec5 4081 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4082 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4083
52de599e
MH
4084 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4085 /* Schedule queues and send stuff to HCI driver */
4086 hci_sched_acl(hdev);
4087 hci_sched_sco(hdev);
4088 hci_sched_esco(hdev);
4089 hci_sched_le(hdev);
4090 }
6ed58ec5 4091
1da177e4
LT
4092 /* Send next queued raw (unknown type) packet */
4093 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4094 hci_send_frame(hdev, skb);
1da177e4
LT
4095}
4096
25985edc 4097/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4098
4099/* ACL data packet */
6039aa73 4100static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4101{
4102 struct hci_acl_hdr *hdr = (void *) skb->data;
4103 struct hci_conn *conn;
4104 __u16 handle, flags;
4105
4106 skb_pull(skb, HCI_ACL_HDR_SIZE);
4107
4108 handle = __le16_to_cpu(hdr->handle);
4109 flags = hci_flags(handle);
4110 handle = hci_handle(handle);
4111
f0e09510 4112 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4113 handle, flags);
1da177e4
LT
4114
4115 hdev->stat.acl_rx++;
4116
4117 hci_dev_lock(hdev);
4118 conn = hci_conn_hash_lookup_handle(hdev, handle);
4119 hci_dev_unlock(hdev);
8e87d142 4120
1da177e4 4121 if (conn) {
65983fc7 4122 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4123
1da177e4 4124 /* Send to upper protocol */
686ebf28
UF
4125 l2cap_recv_acldata(conn, skb, flags);
4126 return;
1da177e4 4127 } else {
8e87d142 4128 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4129 hdev->name, handle);
1da177e4
LT
4130 }
4131
4132 kfree_skb(skb);
4133}
4134
4135/* SCO data packet */
6039aa73 4136static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4137{
4138 struct hci_sco_hdr *hdr = (void *) skb->data;
4139 struct hci_conn *conn;
4140 __u16 handle;
4141
4142 skb_pull(skb, HCI_SCO_HDR_SIZE);
4143
4144 handle = __le16_to_cpu(hdr->handle);
4145
f0e09510 4146 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4147
4148 hdev->stat.sco_rx++;
4149
4150 hci_dev_lock(hdev);
4151 conn = hci_conn_hash_lookup_handle(hdev, handle);
4152 hci_dev_unlock(hdev);
4153
4154 if (conn) {
1da177e4 4155 /* Send to upper protocol */
686ebf28
UF
4156 sco_recv_scodata(conn, skb);
4157 return;
1da177e4 4158 } else {
8e87d142 4159 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4160 hdev->name, handle);
1da177e4
LT
4161 }
4162
4163 kfree_skb(skb);
4164}
4165
9238f36a
JH
4166static bool hci_req_is_complete(struct hci_dev *hdev)
4167{
4168 struct sk_buff *skb;
4169
4170 skb = skb_peek(&hdev->cmd_q);
4171 if (!skb)
4172 return true;
4173
4174 return bt_cb(skb)->req.start;
4175}
4176
42c6b129
JH
4177static void hci_resend_last(struct hci_dev *hdev)
4178{
4179 struct hci_command_hdr *sent;
4180 struct sk_buff *skb;
4181 u16 opcode;
4182
4183 if (!hdev->sent_cmd)
4184 return;
4185
4186 sent = (void *) hdev->sent_cmd->data;
4187 opcode = __le16_to_cpu(sent->opcode);
4188 if (opcode == HCI_OP_RESET)
4189 return;
4190
4191 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4192 if (!skb)
4193 return;
4194
4195 skb_queue_head(&hdev->cmd_q, skb);
4196 queue_work(hdev->workqueue, &hdev->cmd_work);
4197}
4198
9238f36a
JH
4199void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4200{
4201 hci_req_complete_t req_complete = NULL;
4202 struct sk_buff *skb;
4203 unsigned long flags;
4204
4205 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4206
42c6b129
JH
4207 /* If the completed command doesn't match the last one that was
4208 * sent we need to do special handling of it.
9238f36a 4209 */
42c6b129
JH
4210 if (!hci_sent_cmd_data(hdev, opcode)) {
4211 /* Some CSR based controllers generate a spontaneous
4212 * reset complete event during init and any pending
4213 * command will never be completed. In such a case we
4214 * need to resend whatever was the last sent
4215 * command.
4216 */
4217 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4218 hci_resend_last(hdev);
4219
9238f36a 4220 return;
42c6b129 4221 }
9238f36a
JH
4222
4223 /* If the command succeeded and there's still more commands in
4224 * this request the request is not yet complete.
4225 */
4226 if (!status && !hci_req_is_complete(hdev))
4227 return;
4228
4229 /* If this was the last command in a request the complete
4230 * callback would be found in hdev->sent_cmd instead of the
4231 * command queue (hdev->cmd_q).
4232 */
4233 if (hdev->sent_cmd) {
4234 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4235
4236 if (req_complete) {
4237 /* We must set the complete callback to NULL to
4238 * avoid calling the callback more than once if
4239 * this function gets called again.
4240 */
4241 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4242
9238f36a 4243 goto call_complete;
53e21fbc 4244 }
9238f36a
JH
4245 }
4246
4247 /* Remove all pending commands belonging to this request */
4248 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4249 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4250 if (bt_cb(skb)->req.start) {
4251 __skb_queue_head(&hdev->cmd_q, skb);
4252 break;
4253 }
4254
4255 req_complete = bt_cb(skb)->req.complete;
4256 kfree_skb(skb);
4257 }
4258 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4259
4260call_complete:
4261 if (req_complete)
4262 req_complete(hdev, status);
4263}
4264
b78752cc 4265static void hci_rx_work(struct work_struct *work)
1da177e4 4266{
b78752cc 4267 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4268 struct sk_buff *skb;
4269
4270 BT_DBG("%s", hdev->name);
4271
1da177e4 4272 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4273 /* Send copy to monitor */
4274 hci_send_to_monitor(hdev, skb);
4275
1da177e4
LT
4276 if (atomic_read(&hdev->promisc)) {
4277 /* Send copy to the sockets */
470fe1b5 4278 hci_send_to_sock(hdev, skb);
1da177e4
LT
4279 }
4280
0736cfa8
MH
4281 if (test_bit(HCI_RAW, &hdev->flags) ||
4282 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4283 kfree_skb(skb);
4284 continue;
4285 }
4286
4287 if (test_bit(HCI_INIT, &hdev->flags)) {
4288 /* Don't process data packets in this states. */
0d48d939 4289 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4290 case HCI_ACLDATA_PKT:
4291 case HCI_SCODATA_PKT:
4292 kfree_skb(skb);
4293 continue;
3ff50b79 4294 }
1da177e4
LT
4295 }
4296
4297 /* Process frame */
0d48d939 4298 switch (bt_cb(skb)->pkt_type) {
1da177e4 4299 case HCI_EVENT_PKT:
b78752cc 4300 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4301 hci_event_packet(hdev, skb);
4302 break;
4303
4304 case HCI_ACLDATA_PKT:
4305 BT_DBG("%s ACL data packet", hdev->name);
4306 hci_acldata_packet(hdev, skb);
4307 break;
4308
4309 case HCI_SCODATA_PKT:
4310 BT_DBG("%s SCO data packet", hdev->name);
4311 hci_scodata_packet(hdev, skb);
4312 break;
4313
4314 default:
4315 kfree_skb(skb);
4316 break;
4317 }
4318 }
1da177e4
LT
4319}
4320
c347b765 4321static void hci_cmd_work(struct work_struct *work)
1da177e4 4322{
c347b765 4323 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4324 struct sk_buff *skb;
4325
2104786b
AE
4326 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4327 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4328
1da177e4 4329 /* Send queued commands */
5a08ecce
AE
4330 if (atomic_read(&hdev->cmd_cnt)) {
4331 skb = skb_dequeue(&hdev->cmd_q);
4332 if (!skb)
4333 return;
4334
7585b97a 4335 kfree_skb(hdev->sent_cmd);
1da177e4 4336
a675d7f1 4337 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4338 if (hdev->sent_cmd) {
1da177e4 4339 atomic_dec(&hdev->cmd_cnt);
57d17d70 4340 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4341 if (test_bit(HCI_RESET, &hdev->flags))
4342 del_timer(&hdev->cmd_timer);
4343 else
4344 mod_timer(&hdev->cmd_timer,
5f246e89 4345 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4346 } else {
4347 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4348 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4349 }
4350 }
4351}
This page took 1.00279 seconds and 5 git commands to generate.