Bluetooth: Use MD SET register for changing SDIO Type-B to Type-A
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
8c520a59 30#include <linux/rfkill.h>
baf27f6e 31#include <linux/debugfs.h>
47219839 32#include <asm/unaligned.h>
1da177e4
LT
33
34#include <net/bluetooth/bluetooth.h>
35#include <net/bluetooth/hci_core.h>
36
b78752cc 37static void hci_rx_work(struct work_struct *work);
c347b765 38static void hci_cmd_work(struct work_struct *work);
3eff45ea 39static void hci_tx_work(struct work_struct *work);
1da177e4 40
1da177e4
LT
41/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
3df92b31
SL
49/* HCI ID Numbering */
50static DEFINE_IDA(hci_index_ida);
51
1da177e4
LT
52/* ---- HCI notifications ---- */
53
6516455d 54static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 55{
040030ef 56 hci_sock_dev_event(hdev, event);
1da177e4
LT
57}
58
baf27f6e
MH
59/* ---- HCI debugfs entries ---- */
60
4b4148e9
MH
61static ssize_t dut_mode_read(struct file *file, char __user *user_buf,
62 size_t count, loff_t *ppos)
63{
64 struct hci_dev *hdev = file->private_data;
65 char buf[3];
66
67 buf[0] = test_bit(HCI_DUT_MODE, &hdev->dev_flags) ? 'Y': 'N';
68 buf[1] = '\n';
69 buf[2] = '\0';
70 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
71}
72
73static ssize_t dut_mode_write(struct file *file, const char __user *user_buf,
74 size_t count, loff_t *ppos)
75{
76 struct hci_dev *hdev = file->private_data;
77 struct sk_buff *skb;
78 char buf[32];
79 size_t buf_size = min(count, (sizeof(buf)-1));
80 bool enable;
81 int err;
82
83 if (!test_bit(HCI_UP, &hdev->flags))
84 return -ENETDOWN;
85
86 if (copy_from_user(buf, user_buf, buf_size))
87 return -EFAULT;
88
89 buf[buf_size] = '\0';
90 if (strtobool(buf, &enable))
91 return -EINVAL;
92
93 if (enable == test_bit(HCI_DUT_MODE, &hdev->dev_flags))
94 return -EALREADY;
95
96 hci_req_lock(hdev);
97 if (enable)
98 skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL,
99 HCI_CMD_TIMEOUT);
100 else
101 skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL,
102 HCI_CMD_TIMEOUT);
103 hci_req_unlock(hdev);
104
105 if (IS_ERR(skb))
106 return PTR_ERR(skb);
107
108 err = -bt_to_errno(skb->data[0]);
109 kfree_skb(skb);
110
111 if (err < 0)
112 return err;
113
114 change_bit(HCI_DUT_MODE, &hdev->dev_flags);
115
116 return count;
117}
118
119static const struct file_operations dut_mode_fops = {
120 .open = simple_open,
121 .read = dut_mode_read,
122 .write = dut_mode_write,
123 .llseek = default_llseek,
124};
125
dfb826a8
MH
126static int features_show(struct seq_file *f, void *ptr)
127{
128 struct hci_dev *hdev = f->private;
129 u8 p;
130
131 hci_dev_lock(hdev);
132 for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
cfbb2b5b 133 seq_printf(f, "%2u: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
dfb826a8
MH
134 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n", p,
135 hdev->features[p][0], hdev->features[p][1],
136 hdev->features[p][2], hdev->features[p][3],
137 hdev->features[p][4], hdev->features[p][5],
138 hdev->features[p][6], hdev->features[p][7]);
139 }
cfbb2b5b
MH
140 if (lmp_le_capable(hdev))
141 seq_printf(f, "LE: 0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x "
142 "0x%2.2x 0x%2.2x 0x%2.2x 0x%2.2x\n",
143 hdev->le_features[0], hdev->le_features[1],
144 hdev->le_features[2], hdev->le_features[3],
145 hdev->le_features[4], hdev->le_features[5],
146 hdev->le_features[6], hdev->le_features[7]);
dfb826a8
MH
147 hci_dev_unlock(hdev);
148
149 return 0;
150}
151
152static int features_open(struct inode *inode, struct file *file)
153{
154 return single_open(file, features_show, inode->i_private);
155}
156
157static const struct file_operations features_fops = {
158 .open = features_open,
159 .read = seq_read,
160 .llseek = seq_lseek,
161 .release = single_release,
162};
163
70afe0b8
MH
164static int blacklist_show(struct seq_file *f, void *p)
165{
166 struct hci_dev *hdev = f->private;
167 struct bdaddr_list *b;
168
169 hci_dev_lock(hdev);
170 list_for_each_entry(b, &hdev->blacklist, list)
b25f0785 171 seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type);
70afe0b8
MH
172 hci_dev_unlock(hdev);
173
174 return 0;
175}
176
177static int blacklist_open(struct inode *inode, struct file *file)
178{
179 return single_open(file, blacklist_show, inode->i_private);
180}
181
182static const struct file_operations blacklist_fops = {
183 .open = blacklist_open,
184 .read = seq_read,
185 .llseek = seq_lseek,
186 .release = single_release,
187};
188
47219839
MH
189static int uuids_show(struct seq_file *f, void *p)
190{
191 struct hci_dev *hdev = f->private;
192 struct bt_uuid *uuid;
193
194 hci_dev_lock(hdev);
195 list_for_each_entry(uuid, &hdev->uuids, list) {
58f01aa9
MH
196 u8 i, val[16];
197
198 /* The Bluetooth UUID values are stored in big endian,
199 * but with reversed byte order. So convert them into
200 * the right order for the %pUb modifier.
201 */
202 for (i = 0; i < 16; i++)
203 val[i] = uuid->uuid[15 - i];
204
205 seq_printf(f, "%pUb\n", val);
47219839
MH
206 }
207 hci_dev_unlock(hdev);
208
209 return 0;
210}
211
212static int uuids_open(struct inode *inode, struct file *file)
213{
214 return single_open(file, uuids_show, inode->i_private);
215}
216
217static const struct file_operations uuids_fops = {
218 .open = uuids_open,
219 .read = seq_read,
220 .llseek = seq_lseek,
221 .release = single_release,
222};
223
baf27f6e
MH
224static int inquiry_cache_show(struct seq_file *f, void *p)
225{
226 struct hci_dev *hdev = f->private;
227 struct discovery_state *cache = &hdev->discovery;
228 struct inquiry_entry *e;
229
230 hci_dev_lock(hdev);
231
232 list_for_each_entry(e, &cache->all, all) {
233 struct inquiry_data *data = &e->data;
234 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
235 &data->bdaddr,
236 data->pscan_rep_mode, data->pscan_period_mode,
237 data->pscan_mode, data->dev_class[2],
238 data->dev_class[1], data->dev_class[0],
239 __le16_to_cpu(data->clock_offset),
240 data->rssi, data->ssp_mode, e->timestamp);
241 }
242
243 hci_dev_unlock(hdev);
244
245 return 0;
246}
247
248static int inquiry_cache_open(struct inode *inode, struct file *file)
249{
250 return single_open(file, inquiry_cache_show, inode->i_private);
251}
252
253static const struct file_operations inquiry_cache_fops = {
254 .open = inquiry_cache_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = single_release,
258};
259
02d08d15
MH
260static int link_keys_show(struct seq_file *f, void *ptr)
261{
262 struct hci_dev *hdev = f->private;
263 struct list_head *p, *n;
264
265 hci_dev_lock(hdev);
266 list_for_each_safe(p, n, &hdev->link_keys) {
267 struct link_key *key = list_entry(p, struct link_key, list);
268 seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type,
269 HCI_LINK_KEY_SIZE, key->val, key->pin_len);
270 }
271 hci_dev_unlock(hdev);
272
273 return 0;
274}
275
276static int link_keys_open(struct inode *inode, struct file *file)
277{
278 return single_open(file, link_keys_show, inode->i_private);
279}
280
281static const struct file_operations link_keys_fops = {
282 .open = link_keys_open,
283 .read = seq_read,
284 .llseek = seq_lseek,
285 .release = single_release,
286};
287
12c269d7
MH
288static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf,
289 size_t count, loff_t *ppos)
290{
291 struct hci_dev *hdev = file->private_data;
292 char buf[3];
293
294 buf[0] = test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) ? 'Y': 'N';
295 buf[1] = '\n';
296 buf[2] = '\0';
297 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
298}
299
300static const struct file_operations use_debug_keys_fops = {
301 .open = simple_open,
302 .read = use_debug_keys_read,
303 .llseek = default_llseek,
304};
305
babdbb3c
MH
306static int dev_class_show(struct seq_file *f, void *ptr)
307{
308 struct hci_dev *hdev = f->private;
309
310 hci_dev_lock(hdev);
311 seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2],
312 hdev->dev_class[1], hdev->dev_class[0]);
313 hci_dev_unlock(hdev);
314
315 return 0;
316}
317
318static int dev_class_open(struct inode *inode, struct file *file)
319{
320 return single_open(file, dev_class_show, inode->i_private);
321}
322
323static const struct file_operations dev_class_fops = {
324 .open = dev_class_open,
325 .read = seq_read,
326 .llseek = seq_lseek,
327 .release = single_release,
328};
329
041000b9
MH
330static int voice_setting_get(void *data, u64 *val)
331{
332 struct hci_dev *hdev = data;
333
334 hci_dev_lock(hdev);
335 *val = hdev->voice_setting;
336 hci_dev_unlock(hdev);
337
338 return 0;
339}
340
341DEFINE_SIMPLE_ATTRIBUTE(voice_setting_fops, voice_setting_get,
342 NULL, "0x%4.4llx\n");
343
ebd1e33b
MH
344static int auto_accept_delay_set(void *data, u64 val)
345{
346 struct hci_dev *hdev = data;
347
348 hci_dev_lock(hdev);
349 hdev->auto_accept_delay = val;
350 hci_dev_unlock(hdev);
351
352 return 0;
353}
354
355static int auto_accept_delay_get(void *data, u64 *val)
356{
357 struct hci_dev *hdev = data;
358
359 hci_dev_lock(hdev);
360 *val = hdev->auto_accept_delay;
361 hci_dev_unlock(hdev);
362
363 return 0;
364}
365
366DEFINE_SIMPLE_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get,
367 auto_accept_delay_set, "%llu\n");
368
06f5b778
MH
369static int ssp_debug_mode_set(void *data, u64 val)
370{
371 struct hci_dev *hdev = data;
372 struct sk_buff *skb;
373 __u8 mode;
374 int err;
375
376 if (val != 0 && val != 1)
377 return -EINVAL;
378
379 if (!test_bit(HCI_UP, &hdev->flags))
380 return -ENETDOWN;
381
382 hci_req_lock(hdev);
383 mode = val;
384 skb = __hci_cmd_sync(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, sizeof(mode),
385 &mode, HCI_CMD_TIMEOUT);
386 hci_req_unlock(hdev);
387
388 if (IS_ERR(skb))
389 return PTR_ERR(skb);
390
391 err = -bt_to_errno(skb->data[0]);
392 kfree_skb(skb);
393
394 if (err < 0)
395 return err;
396
397 hci_dev_lock(hdev);
398 hdev->ssp_debug_mode = val;
399 hci_dev_unlock(hdev);
400
401 return 0;
402}
403
404static int ssp_debug_mode_get(void *data, u64 *val)
405{
406 struct hci_dev *hdev = data;
407
408 hci_dev_lock(hdev);
409 *val = hdev->ssp_debug_mode;
410 hci_dev_unlock(hdev);
411
412 return 0;
413}
414
415DEFINE_SIMPLE_ATTRIBUTE(ssp_debug_mode_fops, ssp_debug_mode_get,
416 ssp_debug_mode_set, "%llu\n");
417
2bfa3531
MH
418static int idle_timeout_set(void *data, u64 val)
419{
420 struct hci_dev *hdev = data;
421
422 if (val != 0 && (val < 500 || val > 3600000))
423 return -EINVAL;
424
425 hci_dev_lock(hdev);
2be48b65 426 hdev->idle_timeout = val;
2bfa3531
MH
427 hci_dev_unlock(hdev);
428
429 return 0;
430}
431
432static int idle_timeout_get(void *data, u64 *val)
433{
434 struct hci_dev *hdev = data;
435
436 hci_dev_lock(hdev);
437 *val = hdev->idle_timeout;
438 hci_dev_unlock(hdev);
439
440 return 0;
441}
442
443DEFINE_SIMPLE_ATTRIBUTE(idle_timeout_fops, idle_timeout_get,
444 idle_timeout_set, "%llu\n");
445
446static int sniff_min_interval_set(void *data, u64 val)
447{
448 struct hci_dev *hdev = data;
449
450 if (val == 0 || val % 2 || val > hdev->sniff_max_interval)
451 return -EINVAL;
452
453 hci_dev_lock(hdev);
2be48b65 454 hdev->sniff_min_interval = val;
2bfa3531
MH
455 hci_dev_unlock(hdev);
456
457 return 0;
458}
459
460static int sniff_min_interval_get(void *data, u64 *val)
461{
462 struct hci_dev *hdev = data;
463
464 hci_dev_lock(hdev);
465 *val = hdev->sniff_min_interval;
466 hci_dev_unlock(hdev);
467
468 return 0;
469}
470
471DEFINE_SIMPLE_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get,
472 sniff_min_interval_set, "%llu\n");
473
474static int sniff_max_interval_set(void *data, u64 val)
475{
476 struct hci_dev *hdev = data;
477
478 if (val == 0 || val % 2 || val < hdev->sniff_min_interval)
479 return -EINVAL;
480
481 hci_dev_lock(hdev);
2be48b65 482 hdev->sniff_max_interval = val;
2bfa3531
MH
483 hci_dev_unlock(hdev);
484
485 return 0;
486}
487
488static int sniff_max_interval_get(void *data, u64 *val)
489{
490 struct hci_dev *hdev = data;
491
492 hci_dev_lock(hdev);
493 *val = hdev->sniff_max_interval;
494 hci_dev_unlock(hdev);
495
496 return 0;
497}
498
499DEFINE_SIMPLE_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get,
500 sniff_max_interval_set, "%llu\n");
501
e7b8fc92
MH
502static int static_address_show(struct seq_file *f, void *p)
503{
504 struct hci_dev *hdev = f->private;
505
506 hci_dev_lock(hdev);
507 seq_printf(f, "%pMR\n", &hdev->static_addr);
508 hci_dev_unlock(hdev);
509
510 return 0;
511}
512
513static int static_address_open(struct inode *inode, struct file *file)
514{
515 return single_open(file, static_address_show, inode->i_private);
516}
517
518static const struct file_operations static_address_fops = {
519 .open = static_address_open,
520 .read = seq_read,
521 .llseek = seq_lseek,
522 .release = single_release,
523};
524
92202185
MH
525static int own_address_type_set(void *data, u64 val)
526{
527 struct hci_dev *hdev = data;
528
529 if (val != 0 && val != 1)
530 return -EINVAL;
531
532 hci_dev_lock(hdev);
533 hdev->own_addr_type = val;
534 hci_dev_unlock(hdev);
535
536 return 0;
537}
538
539static int own_address_type_get(void *data, u64 *val)
540{
541 struct hci_dev *hdev = data;
542
543 hci_dev_lock(hdev);
544 *val = hdev->own_addr_type;
545 hci_dev_unlock(hdev);
546
547 return 0;
548}
549
550DEFINE_SIMPLE_ATTRIBUTE(own_address_type_fops, own_address_type_get,
551 own_address_type_set, "%llu\n");
552
8f8625cd
MH
553static int long_term_keys_show(struct seq_file *f, void *ptr)
554{
555 struct hci_dev *hdev = f->private;
556 struct list_head *p, *n;
557
558 hci_dev_lock(hdev);
559 list_for_each_safe(p, n, &hdev->link_keys) {
560 struct smp_ltk *ltk = list_entry(p, struct smp_ltk, list);
561 seq_printf(f, "%pMR (type %u) %u %u %u %.4x %*phN %*phN\\n",
562 &ltk->bdaddr, ltk->bdaddr_type, ltk->authenticated,
563 ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv),
564 8, ltk->rand, 16, ltk->val);
565 }
566 hci_dev_unlock(hdev);
567
568 return 0;
569}
570
571static int long_term_keys_open(struct inode *inode, struct file *file)
572{
573 return single_open(file, long_term_keys_show, inode->i_private);
574}
575
576static const struct file_operations long_term_keys_fops = {
577 .open = long_term_keys_open,
578 .read = seq_read,
579 .llseek = seq_lseek,
580 .release = single_release,
581};
582
4e70c7e7
MH
583static int conn_min_interval_set(void *data, u64 val)
584{
585 struct hci_dev *hdev = data;
586
587 if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval)
588 return -EINVAL;
589
590 hci_dev_lock(hdev);
2be48b65 591 hdev->le_conn_min_interval = val;
4e70c7e7
MH
592 hci_dev_unlock(hdev);
593
594 return 0;
595}
596
597static int conn_min_interval_get(void *data, u64 *val)
598{
599 struct hci_dev *hdev = data;
600
601 hci_dev_lock(hdev);
602 *val = hdev->le_conn_min_interval;
603 hci_dev_unlock(hdev);
604
605 return 0;
606}
607
608DEFINE_SIMPLE_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get,
609 conn_min_interval_set, "%llu\n");
610
611static int conn_max_interval_set(void *data, u64 val)
612{
613 struct hci_dev *hdev = data;
614
615 if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval)
616 return -EINVAL;
617
618 hci_dev_lock(hdev);
2be48b65 619 hdev->le_conn_max_interval = val;
4e70c7e7
MH
620 hci_dev_unlock(hdev);
621
622 return 0;
623}
624
625static int conn_max_interval_get(void *data, u64 *val)
626{
627 struct hci_dev *hdev = data;
628
629 hci_dev_lock(hdev);
630 *val = hdev->le_conn_max_interval;
631 hci_dev_unlock(hdev);
632
633 return 0;
634}
635
636DEFINE_SIMPLE_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get,
637 conn_max_interval_set, "%llu\n");
638
89863109
JR
639static ssize_t lowpan_read(struct file *file, char __user *user_buf,
640 size_t count, loff_t *ppos)
641{
642 struct hci_dev *hdev = file->private_data;
643 char buf[3];
644
645 buf[0] = test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags) ? 'Y' : 'N';
646 buf[1] = '\n';
647 buf[2] = '\0';
648 return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
649}
650
651static ssize_t lowpan_write(struct file *fp, const char __user *user_buffer,
652 size_t count, loff_t *position)
653{
654 struct hci_dev *hdev = fp->private_data;
655 bool enable;
656 char buf[32];
657 size_t buf_size = min(count, (sizeof(buf)-1));
658
659 if (copy_from_user(buf, user_buffer, buf_size))
660 return -EFAULT;
661
662 buf[buf_size] = '\0';
663
664 if (strtobool(buf, &enable) < 0)
665 return -EINVAL;
666
667 if (enable == test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
668 return -EALREADY;
669
670 change_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags);
671
672 return count;
673}
674
675static const struct file_operations lowpan_debugfs_fops = {
676 .open = simple_open,
677 .read = lowpan_read,
678 .write = lowpan_write,
679 .llseek = default_llseek,
680};
681
1da177e4
LT
682/* ---- HCI requests ---- */
683
42c6b129 684static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
1da177e4 685{
42c6b129 686 BT_DBG("%s result 0x%2.2x", hdev->name, result);
1da177e4
LT
687
688 if (hdev->req_status == HCI_REQ_PEND) {
689 hdev->req_result = result;
690 hdev->req_status = HCI_REQ_DONE;
691 wake_up_interruptible(&hdev->req_wait_q);
692 }
693}
694
695static void hci_req_cancel(struct hci_dev *hdev, int err)
696{
697 BT_DBG("%s err 0x%2.2x", hdev->name, err);
698
699 if (hdev->req_status == HCI_REQ_PEND) {
700 hdev->req_result = err;
701 hdev->req_status = HCI_REQ_CANCELED;
702 wake_up_interruptible(&hdev->req_wait_q);
703 }
704}
705
77a63e0a
FW
706static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
707 u8 event)
75e84b7c
JH
708{
709 struct hci_ev_cmd_complete *ev;
710 struct hci_event_hdr *hdr;
711 struct sk_buff *skb;
712
713 hci_dev_lock(hdev);
714
715 skb = hdev->recv_evt;
716 hdev->recv_evt = NULL;
717
718 hci_dev_unlock(hdev);
719
720 if (!skb)
721 return ERR_PTR(-ENODATA);
722
723 if (skb->len < sizeof(*hdr)) {
724 BT_ERR("Too short HCI event");
725 goto failed;
726 }
727
728 hdr = (void *) skb->data;
729 skb_pull(skb, HCI_EVENT_HDR_SIZE);
730
7b1abbbe
JH
731 if (event) {
732 if (hdr->evt != event)
733 goto failed;
734 return skb;
735 }
736
75e84b7c
JH
737 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
738 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
739 goto failed;
740 }
741
742 if (skb->len < sizeof(*ev)) {
743 BT_ERR("Too short cmd_complete event");
744 goto failed;
745 }
746
747 ev = (void *) skb->data;
748 skb_pull(skb, sizeof(*ev));
749
750 if (opcode == __le16_to_cpu(ev->opcode))
751 return skb;
752
753 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
754 __le16_to_cpu(ev->opcode));
755
756failed:
757 kfree_skb(skb);
758 return ERR_PTR(-ENODATA);
759}
760
7b1abbbe 761struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 762 const void *param, u8 event, u32 timeout)
75e84b7c
JH
763{
764 DECLARE_WAITQUEUE(wait, current);
765 struct hci_request req;
766 int err = 0;
767
768 BT_DBG("%s", hdev->name);
769
770 hci_req_init(&req, hdev);
771
7b1abbbe 772 hci_req_add_ev(&req, opcode, plen, param, event);
75e84b7c
JH
773
774 hdev->req_status = HCI_REQ_PEND;
775
776 err = hci_req_run(&req, hci_req_sync_complete);
777 if (err < 0)
778 return ERR_PTR(err);
779
780 add_wait_queue(&hdev->req_wait_q, &wait);
781 set_current_state(TASK_INTERRUPTIBLE);
782
783 schedule_timeout(timeout);
784
785 remove_wait_queue(&hdev->req_wait_q, &wait);
786
787 if (signal_pending(current))
788 return ERR_PTR(-EINTR);
789
790 switch (hdev->req_status) {
791 case HCI_REQ_DONE:
792 err = -bt_to_errno(hdev->req_result);
793 break;
794
795 case HCI_REQ_CANCELED:
796 err = -hdev->req_result;
797 break;
798
799 default:
800 err = -ETIMEDOUT;
801 break;
802 }
803
804 hdev->req_status = hdev->req_result = 0;
805
806 BT_DBG("%s end: err %d", hdev->name, err);
807
808 if (err < 0)
809 return ERR_PTR(err);
810
7b1abbbe
JH
811 return hci_get_cmd_complete(hdev, opcode, event);
812}
813EXPORT_SYMBOL(__hci_cmd_sync_ev);
814
815struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
07dc93dd 816 const void *param, u32 timeout)
7b1abbbe
JH
817{
818 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
75e84b7c
JH
819}
820EXPORT_SYMBOL(__hci_cmd_sync);
821
1da177e4 822/* Execute request and wait for completion. */
01178cd4 823static int __hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
824 void (*func)(struct hci_request *req,
825 unsigned long opt),
01178cd4 826 unsigned long opt, __u32 timeout)
1da177e4 827{
42c6b129 828 struct hci_request req;
1da177e4
LT
829 DECLARE_WAITQUEUE(wait, current);
830 int err = 0;
831
832 BT_DBG("%s start", hdev->name);
833
42c6b129
JH
834 hci_req_init(&req, hdev);
835
1da177e4
LT
836 hdev->req_status = HCI_REQ_PEND;
837
42c6b129 838 func(&req, opt);
53cce22d 839
42c6b129
JH
840 err = hci_req_run(&req, hci_req_sync_complete);
841 if (err < 0) {
53cce22d 842 hdev->req_status = 0;
920c8300
AG
843
844 /* ENODATA means the HCI request command queue is empty.
845 * This can happen when a request with conditionals doesn't
846 * trigger any commands to be sent. This is normal behavior
847 * and should not trigger an error return.
42c6b129 848 */
920c8300
AG
849 if (err == -ENODATA)
850 return 0;
851
852 return err;
53cce22d
JH
853 }
854
bc4445c7
AG
855 add_wait_queue(&hdev->req_wait_q, &wait);
856 set_current_state(TASK_INTERRUPTIBLE);
857
1da177e4
LT
858 schedule_timeout(timeout);
859
860 remove_wait_queue(&hdev->req_wait_q, &wait);
861
862 if (signal_pending(current))
863 return -EINTR;
864
865 switch (hdev->req_status) {
866 case HCI_REQ_DONE:
e175072f 867 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
868 break;
869
870 case HCI_REQ_CANCELED:
871 err = -hdev->req_result;
872 break;
873
874 default:
875 err = -ETIMEDOUT;
876 break;
3ff50b79 877 }
1da177e4 878
a5040efa 879 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
880
881 BT_DBG("%s end: err %d", hdev->name, err);
882
883 return err;
884}
885
01178cd4 886static int hci_req_sync(struct hci_dev *hdev,
42c6b129
JH
887 void (*req)(struct hci_request *req,
888 unsigned long opt),
01178cd4 889 unsigned long opt, __u32 timeout)
1da177e4
LT
890{
891 int ret;
892
7c6a329e
MH
893 if (!test_bit(HCI_UP, &hdev->flags))
894 return -ENETDOWN;
895
1da177e4
LT
896 /* Serialize all requests */
897 hci_req_lock(hdev);
01178cd4 898 ret = __hci_req_sync(hdev, req, opt, timeout);
1da177e4
LT
899 hci_req_unlock(hdev);
900
901 return ret;
902}
903
42c6b129 904static void hci_reset_req(struct hci_request *req, unsigned long opt)
1da177e4 905{
42c6b129 906 BT_DBG("%s %ld", req->hdev->name, opt);
1da177e4
LT
907
908 /* Reset device */
42c6b129
JH
909 set_bit(HCI_RESET, &req->hdev->flags);
910 hci_req_add(req, HCI_OP_RESET, 0, NULL);
1da177e4
LT
911}
912
42c6b129 913static void bredr_init(struct hci_request *req)
1da177e4 914{
42c6b129 915 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
2455a3ea 916
1da177e4 917 /* Read Local Supported Features */
42c6b129 918 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 919
1143e5a6 920 /* Read Local Version */
42c6b129 921 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
2177bab5
JH
922
923 /* Read BD Address */
42c6b129 924 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
1da177e4
LT
925}
926
42c6b129 927static void amp_init(struct hci_request *req)
e61ef499 928{
42c6b129 929 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
2455a3ea 930
e61ef499 931 /* Read Local Version */
42c6b129 932 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489 933
f6996cfe
MH
934 /* Read Local Supported Commands */
935 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
936
937 /* Read Local Supported Features */
938 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
939
6bcbc489 940 /* Read Local AMP Info */
42c6b129 941 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
942
943 /* Read Data Blk size */
42c6b129 944 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
7528ca1c 945
f38ba941
MH
946 /* Read Flow Control Mode */
947 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
948
7528ca1c
MH
949 /* Read Location Data */
950 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
e61ef499
AE
951}
952
42c6b129 953static void hci_init1_req(struct hci_request *req, unsigned long opt)
e61ef499 954{
42c6b129 955 struct hci_dev *hdev = req->hdev;
e61ef499
AE
956
957 BT_DBG("%s %ld", hdev->name, opt);
958
11778716
AE
959 /* Reset */
960 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
42c6b129 961 hci_reset_req(req, 0);
11778716 962
e61ef499
AE
963 switch (hdev->dev_type) {
964 case HCI_BREDR:
42c6b129 965 bredr_init(req);
e61ef499
AE
966 break;
967
968 case HCI_AMP:
42c6b129 969 amp_init(req);
e61ef499
AE
970 break;
971
972 default:
973 BT_ERR("Unknown device type %d", hdev->dev_type);
974 break;
975 }
e61ef499
AE
976}
977
42c6b129 978static void bredr_setup(struct hci_request *req)
2177bab5 979{
4ca048e3
MH
980 struct hci_dev *hdev = req->hdev;
981
2177bab5
JH
982 __le16 param;
983 __u8 flt_type;
984
985 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
42c6b129 986 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
987
988 /* Read Class of Device */
42c6b129 989 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
2177bab5
JH
990
991 /* Read Local Name */
42c6b129 992 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
2177bab5
JH
993
994 /* Read Voice Setting */
42c6b129 995 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
2177bab5 996
b4cb9fb2
MH
997 /* Read Number of Supported IAC */
998 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
999
4b836f39
MH
1000 /* Read Current IAC LAP */
1001 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
1002
2177bab5
JH
1003 /* Clear Event Filters */
1004 flt_type = HCI_FLT_CLEAR_ALL;
42c6b129 1005 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
2177bab5
JH
1006
1007 /* Connection accept timeout ~20 secs */
1008 param = __constant_cpu_to_le16(0x7d00);
42c6b129 1009 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
2177bab5 1010
4ca048e3
MH
1011 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
1012 * but it does not support page scan related HCI commands.
1013 */
1014 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
f332ec66
JH
1015 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
1016 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
1017 }
2177bab5
JH
1018}
1019
42c6b129 1020static void le_setup(struct hci_request *req)
2177bab5 1021{
c73eee91
JH
1022 struct hci_dev *hdev = req->hdev;
1023
2177bab5 1024 /* Read LE Buffer Size */
42c6b129 1025 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
2177bab5
JH
1026
1027 /* Read LE Local Supported Features */
42c6b129 1028 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
2177bab5
JH
1029
1030 /* Read LE Advertising Channel TX Power */
42c6b129 1031 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
2177bab5
JH
1032
1033 /* Read LE White List Size */
42c6b129 1034 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
2177bab5
JH
1035
1036 /* Read LE Supported States */
42c6b129 1037 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
c73eee91
JH
1038
1039 /* LE-only controllers have LE implicitly enabled */
1040 if (!lmp_bredr_capable(hdev))
1041 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
2177bab5
JH
1042}
1043
1044static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
1045{
1046 if (lmp_ext_inq_capable(hdev))
1047 return 0x02;
1048
1049 if (lmp_inq_rssi_capable(hdev))
1050 return 0x01;
1051
1052 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
1053 hdev->lmp_subver == 0x0757)
1054 return 0x01;
1055
1056 if (hdev->manufacturer == 15) {
1057 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
1058 return 0x01;
1059 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
1060 return 0x01;
1061 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
1062 return 0x01;
1063 }
1064
1065 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
1066 hdev->lmp_subver == 0x1805)
1067 return 0x01;
1068
1069 return 0x00;
1070}
1071
42c6b129 1072static void hci_setup_inquiry_mode(struct hci_request *req)
2177bab5
JH
1073{
1074 u8 mode;
1075
42c6b129 1076 mode = hci_get_inquiry_mode(req->hdev);
2177bab5 1077
42c6b129 1078 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
2177bab5
JH
1079}
1080
42c6b129 1081static void hci_setup_event_mask(struct hci_request *req)
2177bab5 1082{
42c6b129
JH
1083 struct hci_dev *hdev = req->hdev;
1084
2177bab5
JH
1085 /* The second byte is 0xff instead of 0x9f (two reserved bits
1086 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
1087 * command otherwise.
1088 */
1089 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
1090
1091 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
1092 * any event mask for pre 1.2 devices.
1093 */
1094 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
1095 return;
1096
1097 if (lmp_bredr_capable(hdev)) {
1098 events[4] |= 0x01; /* Flow Specification Complete */
1099 events[4] |= 0x02; /* Inquiry Result with RSSI */
1100 events[4] |= 0x04; /* Read Remote Extended Features Complete */
1101 events[5] |= 0x08; /* Synchronous Connection Complete */
1102 events[5] |= 0x10; /* Synchronous Connection Changed */
c7882cbd
MH
1103 } else {
1104 /* Use a different default for LE-only devices */
1105 memset(events, 0, sizeof(events));
1106 events[0] |= 0x10; /* Disconnection Complete */
1107 events[0] |= 0x80; /* Encryption Change */
1108 events[1] |= 0x08; /* Read Remote Version Information Complete */
1109 events[1] |= 0x20; /* Command Complete */
1110 events[1] |= 0x40; /* Command Status */
1111 events[1] |= 0x80; /* Hardware Error */
1112 events[2] |= 0x04; /* Number of Completed Packets */
1113 events[3] |= 0x02; /* Data Buffer Overflow */
1114 events[5] |= 0x80; /* Encryption Key Refresh Complete */
2177bab5
JH
1115 }
1116
1117 if (lmp_inq_rssi_capable(hdev))
1118 events[4] |= 0x02; /* Inquiry Result with RSSI */
1119
1120 if (lmp_sniffsubr_capable(hdev))
1121 events[5] |= 0x20; /* Sniff Subrating */
1122
1123 if (lmp_pause_enc_capable(hdev))
1124 events[5] |= 0x80; /* Encryption Key Refresh Complete */
1125
1126 if (lmp_ext_inq_capable(hdev))
1127 events[5] |= 0x40; /* Extended Inquiry Result */
1128
1129 if (lmp_no_flush_capable(hdev))
1130 events[7] |= 0x01; /* Enhanced Flush Complete */
1131
1132 if (lmp_lsto_capable(hdev))
1133 events[6] |= 0x80; /* Link Supervision Timeout Changed */
1134
1135 if (lmp_ssp_capable(hdev)) {
1136 events[6] |= 0x01; /* IO Capability Request */
1137 events[6] |= 0x02; /* IO Capability Response */
1138 events[6] |= 0x04; /* User Confirmation Request */
1139 events[6] |= 0x08; /* User Passkey Request */
1140 events[6] |= 0x10; /* Remote OOB Data Request */
1141 events[6] |= 0x20; /* Simple Pairing Complete */
1142 events[7] |= 0x04; /* User Passkey Notification */
1143 events[7] |= 0x08; /* Keypress Notification */
1144 events[7] |= 0x10; /* Remote Host Supported
1145 * Features Notification
1146 */
1147 }
1148
1149 if (lmp_le_capable(hdev))
1150 events[7] |= 0x20; /* LE Meta-Event */
1151
42c6b129 1152 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
2177bab5
JH
1153
1154 if (lmp_le_capable(hdev)) {
1155 memset(events, 0, sizeof(events));
1156 events[0] = 0x1f;
42c6b129
JH
1157 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
1158 sizeof(events), events);
2177bab5
JH
1159 }
1160}
1161
42c6b129 1162static void hci_init2_req(struct hci_request *req, unsigned long opt)
2177bab5 1163{
42c6b129
JH
1164 struct hci_dev *hdev = req->hdev;
1165
2177bab5 1166 if (lmp_bredr_capable(hdev))
42c6b129 1167 bredr_setup(req);
56f87901
JH
1168 else
1169 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2177bab5
JH
1170
1171 if (lmp_le_capable(hdev))
42c6b129 1172 le_setup(req);
2177bab5 1173
42c6b129 1174 hci_setup_event_mask(req);
2177bab5 1175
3f8e2d75
JH
1176 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
1177 * local supported commands HCI command.
1178 */
1179 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
42c6b129 1180 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
2177bab5
JH
1181
1182 if (lmp_ssp_capable(hdev)) {
57af75a8
MH
1183 /* When SSP is available, then the host features page
1184 * should also be available as well. However some
1185 * controllers list the max_page as 0 as long as SSP
1186 * has not been enabled. To achieve proper debugging
1187 * output, force the minimum max_page to 1 at least.
1188 */
1189 hdev->max_page = 0x01;
1190
2177bab5
JH
1191 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
1192 u8 mode = 0x01;
42c6b129
JH
1193 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
1194 sizeof(mode), &mode);
2177bab5
JH
1195 } else {
1196 struct hci_cp_write_eir cp;
1197
1198 memset(hdev->eir, 0, sizeof(hdev->eir));
1199 memset(&cp, 0, sizeof(cp));
1200
42c6b129 1201 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
2177bab5
JH
1202 }
1203 }
1204
1205 if (lmp_inq_rssi_capable(hdev))
42c6b129 1206 hci_setup_inquiry_mode(req);
2177bab5
JH
1207
1208 if (lmp_inq_tx_pwr_capable(hdev))
42c6b129 1209 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
2177bab5
JH
1210
1211 if (lmp_ext_feat_capable(hdev)) {
1212 struct hci_cp_read_local_ext_features cp;
1213
1214 cp.page = 0x01;
42c6b129
JH
1215 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1216 sizeof(cp), &cp);
2177bab5
JH
1217 }
1218
1219 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
1220 u8 enable = 1;
42c6b129
JH
1221 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
1222 &enable);
2177bab5
JH
1223 }
1224}
1225
42c6b129 1226static void hci_setup_link_policy(struct hci_request *req)
2177bab5 1227{
42c6b129 1228 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1229 struct hci_cp_write_def_link_policy cp;
1230 u16 link_policy = 0;
1231
1232 if (lmp_rswitch_capable(hdev))
1233 link_policy |= HCI_LP_RSWITCH;
1234 if (lmp_hold_capable(hdev))
1235 link_policy |= HCI_LP_HOLD;
1236 if (lmp_sniff_capable(hdev))
1237 link_policy |= HCI_LP_SNIFF;
1238 if (lmp_park_capable(hdev))
1239 link_policy |= HCI_LP_PARK;
1240
1241 cp.policy = cpu_to_le16(link_policy);
42c6b129 1242 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
2177bab5
JH
1243}
1244
42c6b129 1245static void hci_set_le_support(struct hci_request *req)
2177bab5 1246{
42c6b129 1247 struct hci_dev *hdev = req->hdev;
2177bab5
JH
1248 struct hci_cp_write_le_host_supported cp;
1249
c73eee91
JH
1250 /* LE-only devices do not support explicit enablement */
1251 if (!lmp_bredr_capable(hdev))
1252 return;
1253
2177bab5
JH
1254 memset(&cp, 0, sizeof(cp));
1255
1256 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
1257 cp.le = 0x01;
1258 cp.simul = lmp_le_br_capable(hdev);
1259 }
1260
1261 if (cp.le != lmp_host_le_capable(hdev))
42c6b129
JH
1262 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
1263 &cp);
2177bab5
JH
1264}
1265
d62e6d67
JH
1266static void hci_set_event_mask_page_2(struct hci_request *req)
1267{
1268 struct hci_dev *hdev = req->hdev;
1269 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
1270
1271 /* If Connectionless Slave Broadcast master role is supported
1272 * enable all necessary events for it.
1273 */
53b834d2 1274 if (lmp_csb_master_capable(hdev)) {
d62e6d67
JH
1275 events[1] |= 0x40; /* Triggered Clock Capture */
1276 events[1] |= 0x80; /* Synchronization Train Complete */
1277 events[2] |= 0x10; /* Slave Page Response Timeout */
1278 events[2] |= 0x20; /* CSB Channel Map Change */
1279 }
1280
1281 /* If Connectionless Slave Broadcast slave role is supported
1282 * enable all necessary events for it.
1283 */
53b834d2 1284 if (lmp_csb_slave_capable(hdev)) {
d62e6d67
JH
1285 events[2] |= 0x01; /* Synchronization Train Received */
1286 events[2] |= 0x02; /* CSB Receive */
1287 events[2] |= 0x04; /* CSB Timeout */
1288 events[2] |= 0x08; /* Truncated Page Complete */
1289 }
1290
1291 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
1292}
1293
42c6b129 1294static void hci_init3_req(struct hci_request *req, unsigned long opt)
2177bab5 1295{
42c6b129 1296 struct hci_dev *hdev = req->hdev;
d2c5d77f 1297 u8 p;
42c6b129 1298
b8f4e068
GP
1299 /* Some Broadcom based Bluetooth controllers do not support the
1300 * Delete Stored Link Key command. They are clearly indicating its
1301 * absence in the bit mask of supported commands.
1302 *
1303 * Check the supported commands and only if the the command is marked
1304 * as supported send it. If not supported assume that the controller
1305 * does not have actual support for stored link keys which makes this
1306 * command redundant anyway.
637b4cae 1307 */
59f45d57
JH
1308 if (hdev->commands[6] & 0x80) {
1309 struct hci_cp_delete_stored_link_key cp;
1310
1311 bacpy(&cp.bdaddr, BDADDR_ANY);
1312 cp.delete_all = 0x01;
1313 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
1314 sizeof(cp), &cp);
1315 }
1316
2177bab5 1317 if (hdev->commands[5] & 0x10)
42c6b129 1318 hci_setup_link_policy(req);
2177bab5 1319
79830f66 1320 if (lmp_le_capable(hdev)) {
bef34c0a
MH
1321 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1322 /* If the controller has a public BD_ADDR, then
1323 * by default use that one. If this is a LE only
1324 * controller without a public address, default
1325 * to the random address.
1326 */
1327 if (bacmp(&hdev->bdaddr, BDADDR_ANY))
1328 hdev->own_addr_type = ADDR_LE_DEV_PUBLIC;
1329 else
1330 hdev->own_addr_type = ADDR_LE_DEV_RANDOM;
1331 }
79830f66 1332
42c6b129 1333 hci_set_le_support(req);
79830f66 1334 }
d2c5d77f
JH
1335
1336 /* Read features beyond page 1 if available */
1337 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
1338 struct hci_cp_read_local_ext_features cp;
1339
1340 cp.page = p;
1341 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
1342 sizeof(cp), &cp);
1343 }
2177bab5
JH
1344}
1345
5d4e7e8d
JH
1346static void hci_init4_req(struct hci_request *req, unsigned long opt)
1347{
1348 struct hci_dev *hdev = req->hdev;
1349
d62e6d67
JH
1350 /* Set event mask page 2 if the HCI command for it is supported */
1351 if (hdev->commands[22] & 0x04)
1352 hci_set_event_mask_page_2(req);
1353
5d4e7e8d 1354 /* Check for Synchronization Train support */
53b834d2 1355 if (lmp_sync_train_capable(hdev))
5d4e7e8d
JH
1356 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
1357}
1358
2177bab5
JH
1359static int __hci_init(struct hci_dev *hdev)
1360{
1361 int err;
1362
1363 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
1364 if (err < 0)
1365 return err;
1366
4b4148e9
MH
1367 /* The Device Under Test (DUT) mode is special and available for
1368 * all controller types. So just create it early on.
1369 */
1370 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
1371 debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev,
1372 &dut_mode_fops);
1373 }
1374
2177bab5
JH
1375 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
1376 * BR/EDR/LE type controllers. AMP controllers only need the
1377 * first stage init.
1378 */
1379 if (hdev->dev_type != HCI_BREDR)
1380 return 0;
1381
1382 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
1383 if (err < 0)
1384 return err;
1385
5d4e7e8d
JH
1386 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
1387 if (err < 0)
1388 return err;
1389
baf27f6e
MH
1390 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
1391 if (err < 0)
1392 return err;
1393
1394 /* Only create debugfs entries during the initial setup
1395 * phase and not every time the controller gets powered on.
1396 */
1397 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1398 return 0;
1399
dfb826a8
MH
1400 debugfs_create_file("features", 0444, hdev->debugfs, hdev,
1401 &features_fops);
ceeb3bc0
MH
1402 debugfs_create_u16("manufacturer", 0444, hdev->debugfs,
1403 &hdev->manufacturer);
1404 debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver);
1405 debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev);
70afe0b8
MH
1406 debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev,
1407 &blacklist_fops);
47219839
MH
1408 debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops);
1409
baf27f6e
MH
1410 if (lmp_bredr_capable(hdev)) {
1411 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
1412 hdev, &inquiry_cache_fops);
02d08d15
MH
1413 debugfs_create_file("link_keys", 0400, hdev->debugfs,
1414 hdev, &link_keys_fops);
12c269d7
MH
1415 debugfs_create_file("use_debug_keys", 0444, hdev->debugfs,
1416 hdev, &use_debug_keys_fops);
babdbb3c
MH
1417 debugfs_create_file("dev_class", 0444, hdev->debugfs,
1418 hdev, &dev_class_fops);
041000b9
MH
1419 debugfs_create_file("voice_setting", 0444, hdev->debugfs,
1420 hdev, &voice_setting_fops);
baf27f6e
MH
1421 }
1422
06f5b778 1423 if (lmp_ssp_capable(hdev)) {
ebd1e33b
MH
1424 debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs,
1425 hdev, &auto_accept_delay_fops);
06f5b778
MH
1426 debugfs_create_file("ssp_debug_mode", 0644, hdev->debugfs,
1427 hdev, &ssp_debug_mode_fops);
1428 }
ebd1e33b 1429
2bfa3531
MH
1430 if (lmp_sniff_capable(hdev)) {
1431 debugfs_create_file("idle_timeout", 0644, hdev->debugfs,
1432 hdev, &idle_timeout_fops);
1433 debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs,
1434 hdev, &sniff_min_interval_fops);
1435 debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs,
1436 hdev, &sniff_max_interval_fops);
1437 }
1438
d0f729b8
MH
1439 if (lmp_le_capable(hdev)) {
1440 debugfs_create_u8("white_list_size", 0444, hdev->debugfs,
1441 &hdev->le_white_list_size);
e7b8fc92
MH
1442 debugfs_create_file("static_address", 0444, hdev->debugfs,
1443 hdev, &static_address_fops);
92202185
MH
1444 debugfs_create_file("own_address_type", 0644, hdev->debugfs,
1445 hdev, &own_address_type_fops);
8f8625cd
MH
1446 debugfs_create_file("long_term_keys", 0400, hdev->debugfs,
1447 hdev, &long_term_keys_fops);
4e70c7e7
MH
1448 debugfs_create_file("conn_min_interval", 0644, hdev->debugfs,
1449 hdev, &conn_min_interval_fops);
1450 debugfs_create_file("conn_max_interval", 0644, hdev->debugfs,
1451 hdev, &conn_max_interval_fops);
89863109
JR
1452 debugfs_create_file("6lowpan", 0644, hdev->debugfs, hdev,
1453 &lowpan_debugfs_fops);
d0f729b8 1454 }
e7b8fc92 1455
baf27f6e 1456 return 0;
2177bab5
JH
1457}
1458
42c6b129 1459static void hci_scan_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1460{
1461 __u8 scan = opt;
1462
42c6b129 1463 BT_DBG("%s %x", req->hdev->name, scan);
1da177e4
LT
1464
1465 /* Inquiry and Page scans */
42c6b129 1466 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
1467}
1468
42c6b129 1469static void hci_auth_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1470{
1471 __u8 auth = opt;
1472
42c6b129 1473 BT_DBG("%s %x", req->hdev->name, auth);
1da177e4
LT
1474
1475 /* Authentication */
42c6b129 1476 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
1477}
1478
42c6b129 1479static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1480{
1481 __u8 encrypt = opt;
1482
42c6b129 1483 BT_DBG("%s %x", req->hdev->name, encrypt);
1da177e4 1484
e4e8e37c 1485 /* Encryption */
42c6b129 1486 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
1487}
1488
42c6b129 1489static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
e4e8e37c
MH
1490{
1491 __le16 policy = cpu_to_le16(opt);
1492
42c6b129 1493 BT_DBG("%s %x", req->hdev->name, policy);
e4e8e37c
MH
1494
1495 /* Default link policy */
42c6b129 1496 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
e4e8e37c
MH
1497}
1498
8e87d142 1499/* Get HCI device by index.
1da177e4
LT
1500 * Device is held on return. */
1501struct hci_dev *hci_dev_get(int index)
1502{
8035ded4 1503 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
1504
1505 BT_DBG("%d", index);
1506
1507 if (index < 0)
1508 return NULL;
1509
1510 read_lock(&hci_dev_list_lock);
8035ded4 1511 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
1512 if (d->id == index) {
1513 hdev = hci_dev_hold(d);
1514 break;
1515 }
1516 }
1517 read_unlock(&hci_dev_list_lock);
1518 return hdev;
1519}
1da177e4
LT
1520
1521/* ---- Inquiry support ---- */
ff9ef578 1522
30dc78e1
JH
1523bool hci_discovery_active(struct hci_dev *hdev)
1524{
1525 struct discovery_state *discov = &hdev->discovery;
1526
6fbe195d 1527 switch (discov->state) {
343f935b 1528 case DISCOVERY_FINDING:
6fbe195d 1529 case DISCOVERY_RESOLVING:
30dc78e1
JH
1530 return true;
1531
6fbe195d
AG
1532 default:
1533 return false;
1534 }
30dc78e1
JH
1535}
1536
ff9ef578
JH
1537void hci_discovery_set_state(struct hci_dev *hdev, int state)
1538{
1539 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
1540
1541 if (hdev->discovery.state == state)
1542 return;
1543
1544 switch (state) {
1545 case DISCOVERY_STOPPED:
7b99b659
AG
1546 if (hdev->discovery.state != DISCOVERY_STARTING)
1547 mgmt_discovering(hdev, 0);
ff9ef578
JH
1548 break;
1549 case DISCOVERY_STARTING:
1550 break;
343f935b 1551 case DISCOVERY_FINDING:
ff9ef578
JH
1552 mgmt_discovering(hdev, 1);
1553 break;
30dc78e1
JH
1554 case DISCOVERY_RESOLVING:
1555 break;
ff9ef578
JH
1556 case DISCOVERY_STOPPING:
1557 break;
1558 }
1559
1560 hdev->discovery.state = state;
1561}
1562
1f9b9a5d 1563void hci_inquiry_cache_flush(struct hci_dev *hdev)
1da177e4 1564{
30883512 1565 struct discovery_state *cache = &hdev->discovery;
b57c1a56 1566 struct inquiry_entry *p, *n;
1da177e4 1567
561aafbc
JH
1568 list_for_each_entry_safe(p, n, &cache->all, all) {
1569 list_del(&p->all);
b57c1a56 1570 kfree(p);
1da177e4 1571 }
561aafbc
JH
1572
1573 INIT_LIST_HEAD(&cache->unknown);
1574 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
1575}
1576
a8c5fb1a
GP
1577struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
1578 bdaddr_t *bdaddr)
1da177e4 1579{
30883512 1580 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1581 struct inquiry_entry *e;
1582
6ed93dc6 1583 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 1584
561aafbc
JH
1585 list_for_each_entry(e, &cache->all, all) {
1586 if (!bacmp(&e->data.bdaddr, bdaddr))
1587 return e;
1588 }
1589
1590 return NULL;
1591}
1592
1593struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 1594 bdaddr_t *bdaddr)
561aafbc 1595{
30883512 1596 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
1597 struct inquiry_entry *e;
1598
6ed93dc6 1599 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
1600
1601 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 1602 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
1603 return e;
1604 }
1605
1606 return NULL;
1da177e4
LT
1607}
1608
30dc78e1 1609struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
1610 bdaddr_t *bdaddr,
1611 int state)
30dc78e1
JH
1612{
1613 struct discovery_state *cache = &hdev->discovery;
1614 struct inquiry_entry *e;
1615
6ed93dc6 1616 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
1617
1618 list_for_each_entry(e, &cache->resolve, list) {
1619 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
1620 return e;
1621 if (!bacmp(&e->data.bdaddr, bdaddr))
1622 return e;
1623 }
1624
1625 return NULL;
1626}
1627
a3d4e20a 1628void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 1629 struct inquiry_entry *ie)
a3d4e20a
JH
1630{
1631 struct discovery_state *cache = &hdev->discovery;
1632 struct list_head *pos = &cache->resolve;
1633 struct inquiry_entry *p;
1634
1635 list_del(&ie->list);
1636
1637 list_for_each_entry(p, &cache->resolve, list) {
1638 if (p->name_state != NAME_PENDING &&
a8c5fb1a 1639 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
1640 break;
1641 pos = &p->list;
1642 }
1643
1644 list_add(&ie->list, pos);
1645}
1646
3175405b 1647bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 1648 bool name_known, bool *ssp)
1da177e4 1649{
30883512 1650 struct discovery_state *cache = &hdev->discovery;
70f23020 1651 struct inquiry_entry *ie;
1da177e4 1652
6ed93dc6 1653 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 1654
2b2fec4d
SJ
1655 hci_remove_remote_oob_data(hdev, &data->bdaddr);
1656
388fc8fa
JH
1657 if (ssp)
1658 *ssp = data->ssp_mode;
1659
70f23020 1660 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 1661 if (ie) {
388fc8fa
JH
1662 if (ie->data.ssp_mode && ssp)
1663 *ssp = true;
1664
a3d4e20a 1665 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 1666 data->rssi != ie->data.rssi) {
a3d4e20a
JH
1667 ie->data.rssi = data->rssi;
1668 hci_inquiry_cache_update_resolve(hdev, ie);
1669 }
1670
561aafbc 1671 goto update;
a3d4e20a 1672 }
561aafbc
JH
1673
1674 /* Entry not in the cache. Add new one. */
1675 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1676 if (!ie)
3175405b 1677 return false;
561aafbc
JH
1678
1679 list_add(&ie->all, &cache->all);
1680
1681 if (name_known) {
1682 ie->name_state = NAME_KNOWN;
1683 } else {
1684 ie->name_state = NAME_NOT_KNOWN;
1685 list_add(&ie->list, &cache->unknown);
1686 }
70f23020 1687
561aafbc
JH
1688update:
1689 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 1690 ie->name_state != NAME_PENDING) {
561aafbc
JH
1691 ie->name_state = NAME_KNOWN;
1692 list_del(&ie->list);
1da177e4
LT
1693 }
1694
70f23020
AE
1695 memcpy(&ie->data, data, sizeof(*data));
1696 ie->timestamp = jiffies;
1da177e4 1697 cache->timestamp = jiffies;
3175405b
JH
1698
1699 if (ie->name_state == NAME_NOT_KNOWN)
1700 return false;
1701
1702 return true;
1da177e4
LT
1703}
1704
1705static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1706{
30883512 1707 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
1708 struct inquiry_info *info = (struct inquiry_info *) buf;
1709 struct inquiry_entry *e;
1710 int copied = 0;
1711
561aafbc 1712 list_for_each_entry(e, &cache->all, all) {
1da177e4 1713 struct inquiry_data *data = &e->data;
b57c1a56
JH
1714
1715 if (copied >= num)
1716 break;
1717
1da177e4
LT
1718 bacpy(&info->bdaddr, &data->bdaddr);
1719 info->pscan_rep_mode = data->pscan_rep_mode;
1720 info->pscan_period_mode = data->pscan_period_mode;
1721 info->pscan_mode = data->pscan_mode;
1722 memcpy(info->dev_class, data->dev_class, 3);
1723 info->clock_offset = data->clock_offset;
b57c1a56 1724
1da177e4 1725 info++;
b57c1a56 1726 copied++;
1da177e4
LT
1727 }
1728
1729 BT_DBG("cache %p, copied %d", cache, copied);
1730 return copied;
1731}
1732
42c6b129 1733static void hci_inq_req(struct hci_request *req, unsigned long opt)
1da177e4
LT
1734{
1735 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
42c6b129 1736 struct hci_dev *hdev = req->hdev;
1da177e4
LT
1737 struct hci_cp_inquiry cp;
1738
1739 BT_DBG("%s", hdev->name);
1740
1741 if (test_bit(HCI_INQUIRY, &hdev->flags))
1742 return;
1743
1744 /* Start Inquiry */
1745 memcpy(&cp.lap, &ir->lap, 3);
1746 cp.length = ir->length;
1747 cp.num_rsp = ir->num_rsp;
42c6b129 1748 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
1749}
1750
3e13fa1e
AG
1751static int wait_inquiry(void *word)
1752{
1753 schedule();
1754 return signal_pending(current);
1755}
1756
1da177e4
LT
1757int hci_inquiry(void __user *arg)
1758{
1759 __u8 __user *ptr = arg;
1760 struct hci_inquiry_req ir;
1761 struct hci_dev *hdev;
1762 int err = 0, do_inquiry = 0, max_rsp;
1763 long timeo;
1764 __u8 *buf;
1765
1766 if (copy_from_user(&ir, ptr, sizeof(ir)))
1767 return -EFAULT;
1768
5a08ecce
AE
1769 hdev = hci_dev_get(ir.dev_id);
1770 if (!hdev)
1da177e4
LT
1771 return -ENODEV;
1772
0736cfa8
MH
1773 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1774 err = -EBUSY;
1775 goto done;
1776 }
1777
5b69bef5
MH
1778 if (hdev->dev_type != HCI_BREDR) {
1779 err = -EOPNOTSUPP;
1780 goto done;
1781 }
1782
56f87901
JH
1783 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1784 err = -EOPNOTSUPP;
1785 goto done;
1786 }
1787
09fd0de5 1788 hci_dev_lock(hdev);
8e87d142 1789 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 1790 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1f9b9a5d 1791 hci_inquiry_cache_flush(hdev);
1da177e4
LT
1792 do_inquiry = 1;
1793 }
09fd0de5 1794 hci_dev_unlock(hdev);
1da177e4 1795
04837f64 1796 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
1797
1798 if (do_inquiry) {
01178cd4
JH
1799 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1800 timeo);
70f23020
AE
1801 if (err < 0)
1802 goto done;
3e13fa1e
AG
1803
1804 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1805 * cleared). If it is interrupted by a signal, return -EINTR.
1806 */
1807 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1808 TASK_INTERRUPTIBLE))
1809 return -EINTR;
70f23020 1810 }
1da177e4 1811
8fc9ced3
GP
1812 /* for unlimited number of responses we will use buffer with
1813 * 255 entries
1814 */
1da177e4
LT
1815 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1816
1817 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1818 * copy it to the user space.
1819 */
01df8c31 1820 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 1821 if (!buf) {
1da177e4
LT
1822 err = -ENOMEM;
1823 goto done;
1824 }
1825
09fd0de5 1826 hci_dev_lock(hdev);
1da177e4 1827 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 1828 hci_dev_unlock(hdev);
1da177e4
LT
1829
1830 BT_DBG("num_rsp %d", ir.num_rsp);
1831
1832 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1833 ptr += sizeof(ir);
1834 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 1835 ir.num_rsp))
1da177e4 1836 err = -EFAULT;
8e87d142 1837 } else
1da177e4
LT
1838 err = -EFAULT;
1839
1840 kfree(buf);
1841
1842done:
1843 hci_dev_put(hdev);
1844 return err;
1845}
1846
cbed0ca1 1847static int hci_dev_do_open(struct hci_dev *hdev)
1da177e4 1848{
1da177e4
LT
1849 int ret = 0;
1850
1da177e4
LT
1851 BT_DBG("%s %p", hdev->name, hdev);
1852
1853 hci_req_lock(hdev);
1854
94324962
JH
1855 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1856 ret = -ENODEV;
1857 goto done;
1858 }
1859
a5c8f270
MH
1860 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1861 /* Check for rfkill but allow the HCI setup stage to
1862 * proceed (which in itself doesn't cause any RF activity).
1863 */
1864 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1865 ret = -ERFKILL;
1866 goto done;
1867 }
1868
1869 /* Check for valid public address or a configured static
1870 * random adddress, but let the HCI setup proceed to
1871 * be able to determine if there is a public address
1872 * or not.
1873 *
1874 * This check is only valid for BR/EDR controllers
1875 * since AMP controllers do not have an address.
1876 */
1877 if (hdev->dev_type == HCI_BREDR &&
1878 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1879 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1880 ret = -EADDRNOTAVAIL;
1881 goto done;
1882 }
611b30f7
MH
1883 }
1884
1da177e4
LT
1885 if (test_bit(HCI_UP, &hdev->flags)) {
1886 ret = -EALREADY;
1887 goto done;
1888 }
1889
1da177e4
LT
1890 if (hdev->open(hdev)) {
1891 ret = -EIO;
1892 goto done;
1893 }
1894
f41c70c4
MH
1895 atomic_set(&hdev->cmd_cnt, 1);
1896 set_bit(HCI_INIT, &hdev->flags);
1897
1898 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1899 ret = hdev->setup(hdev);
1900
1901 if (!ret) {
f41c70c4
MH
1902 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1903 set_bit(HCI_RAW, &hdev->flags);
1904
0736cfa8
MH
1905 if (!test_bit(HCI_RAW, &hdev->flags) &&
1906 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
f41c70c4 1907 ret = __hci_init(hdev);
1da177e4
LT
1908 }
1909
f41c70c4
MH
1910 clear_bit(HCI_INIT, &hdev->flags);
1911
1da177e4
LT
1912 if (!ret) {
1913 hci_dev_hold(hdev);
1914 set_bit(HCI_UP, &hdev->flags);
1915 hci_notify(hdev, HCI_DEV_UP);
bb4b2a9a 1916 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
0736cfa8 1917 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
1514b892 1918 hdev->dev_type == HCI_BREDR) {
09fd0de5 1919 hci_dev_lock(hdev);
744cf19e 1920 mgmt_powered(hdev, 1);
09fd0de5 1921 hci_dev_unlock(hdev);
56e5cb86 1922 }
8e87d142 1923 } else {
1da177e4 1924 /* Init failed, cleanup */
3eff45ea 1925 flush_work(&hdev->tx_work);
c347b765 1926 flush_work(&hdev->cmd_work);
b78752cc 1927 flush_work(&hdev->rx_work);
1da177e4
LT
1928
1929 skb_queue_purge(&hdev->cmd_q);
1930 skb_queue_purge(&hdev->rx_q);
1931
1932 if (hdev->flush)
1933 hdev->flush(hdev);
1934
1935 if (hdev->sent_cmd) {
1936 kfree_skb(hdev->sent_cmd);
1937 hdev->sent_cmd = NULL;
1938 }
1939
1940 hdev->close(hdev);
1941 hdev->flags = 0;
1942 }
1943
1944done:
1945 hci_req_unlock(hdev);
1da177e4
LT
1946 return ret;
1947}
1948
cbed0ca1
JH
1949/* ---- HCI ioctl helpers ---- */
1950
1951int hci_dev_open(__u16 dev)
1952{
1953 struct hci_dev *hdev;
1954 int err;
1955
1956 hdev = hci_dev_get(dev);
1957 if (!hdev)
1958 return -ENODEV;
1959
e1d08f40
JH
1960 /* We need to ensure that no other power on/off work is pending
1961 * before proceeding to call hci_dev_do_open. This is
1962 * particularly important if the setup procedure has not yet
1963 * completed.
1964 */
1965 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1966 cancel_delayed_work(&hdev->power_off);
1967
a5c8f270
MH
1968 /* After this call it is guaranteed that the setup procedure
1969 * has finished. This means that error conditions like RFKILL
1970 * or no valid public or static random address apply.
1971 */
e1d08f40
JH
1972 flush_workqueue(hdev->req_workqueue);
1973
cbed0ca1
JH
1974 err = hci_dev_do_open(hdev);
1975
1976 hci_dev_put(hdev);
1977
1978 return err;
1979}
1980
1da177e4
LT
1981static int hci_dev_do_close(struct hci_dev *hdev)
1982{
1983 BT_DBG("%s %p", hdev->name, hdev);
1984
78c04c0b
VCG
1985 cancel_delayed_work(&hdev->power_off);
1986
1da177e4
LT
1987 hci_req_cancel(hdev, ENODEV);
1988 hci_req_lock(hdev);
1989
1990 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 1991 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
1992 hci_req_unlock(hdev);
1993 return 0;
1994 }
1995
3eff45ea
GP
1996 /* Flush RX and TX works */
1997 flush_work(&hdev->tx_work);
b78752cc 1998 flush_work(&hdev->rx_work);
1da177e4 1999
16ab91ab 2000 if (hdev->discov_timeout > 0) {
e0f9309f 2001 cancel_delayed_work(&hdev->discov_off);
16ab91ab 2002 hdev->discov_timeout = 0;
5e5282bb 2003 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
310a3d48 2004 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
2005 }
2006
a8b2d5c2 2007 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
2008 cancel_delayed_work(&hdev->service_cache);
2009
7ba8b4be
AG
2010 cancel_delayed_work_sync(&hdev->le_scan_disable);
2011
09fd0de5 2012 hci_dev_lock(hdev);
1f9b9a5d 2013 hci_inquiry_cache_flush(hdev);
1da177e4 2014 hci_conn_hash_flush(hdev);
09fd0de5 2015 hci_dev_unlock(hdev);
1da177e4
LT
2016
2017 hci_notify(hdev, HCI_DEV_DOWN);
2018
2019 if (hdev->flush)
2020 hdev->flush(hdev);
2021
2022 /* Reset device */
2023 skb_queue_purge(&hdev->cmd_q);
2024 atomic_set(&hdev->cmd_cnt, 1);
8af59467 2025 if (!test_bit(HCI_RAW, &hdev->flags) &&
3a6afbd2 2026 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
a6c511c6 2027 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 2028 set_bit(HCI_INIT, &hdev->flags);
01178cd4 2029 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
2030 clear_bit(HCI_INIT, &hdev->flags);
2031 }
2032
c347b765
GP
2033 /* flush cmd work */
2034 flush_work(&hdev->cmd_work);
1da177e4
LT
2035
2036 /* Drop queues */
2037 skb_queue_purge(&hdev->rx_q);
2038 skb_queue_purge(&hdev->cmd_q);
2039 skb_queue_purge(&hdev->raw_q);
2040
2041 /* Drop last sent command */
2042 if (hdev->sent_cmd) {
b79f44c1 2043 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
2044 kfree_skb(hdev->sent_cmd);
2045 hdev->sent_cmd = NULL;
2046 }
2047
b6ddb638
JH
2048 kfree_skb(hdev->recv_evt);
2049 hdev->recv_evt = NULL;
2050
1da177e4
LT
2051 /* After this point our queues are empty
2052 * and no tasks are scheduled. */
2053 hdev->close(hdev);
2054
35b973c9
JH
2055 /* Clear flags */
2056 hdev->flags = 0;
2057 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
2058
93c311a0
MH
2059 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
2060 if (hdev->dev_type == HCI_BREDR) {
2061 hci_dev_lock(hdev);
2062 mgmt_powered(hdev, 0);
2063 hci_dev_unlock(hdev);
2064 }
8ee56540 2065 }
5add6af8 2066
ced5c338 2067 /* Controller radio is available but is currently powered down */
536619e8 2068 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
ced5c338 2069
e59fda8d 2070 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 2071 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 2072
1da177e4
LT
2073 hci_req_unlock(hdev);
2074
2075 hci_dev_put(hdev);
2076 return 0;
2077}
2078
2079int hci_dev_close(__u16 dev)
2080{
2081 struct hci_dev *hdev;
2082 int err;
2083
70f23020
AE
2084 hdev = hci_dev_get(dev);
2085 if (!hdev)
1da177e4 2086 return -ENODEV;
8ee56540 2087
0736cfa8
MH
2088 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2089 err = -EBUSY;
2090 goto done;
2091 }
2092
8ee56540
MH
2093 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
2094 cancel_delayed_work(&hdev->power_off);
2095
1da177e4 2096 err = hci_dev_do_close(hdev);
8ee56540 2097
0736cfa8 2098done:
1da177e4
LT
2099 hci_dev_put(hdev);
2100 return err;
2101}
2102
2103int hci_dev_reset(__u16 dev)
2104{
2105 struct hci_dev *hdev;
2106 int ret = 0;
2107
70f23020
AE
2108 hdev = hci_dev_get(dev);
2109 if (!hdev)
1da177e4
LT
2110 return -ENODEV;
2111
2112 hci_req_lock(hdev);
1da177e4 2113
808a049e
MH
2114 if (!test_bit(HCI_UP, &hdev->flags)) {
2115 ret = -ENETDOWN;
1da177e4 2116 goto done;
808a049e 2117 }
1da177e4 2118
0736cfa8
MH
2119 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2120 ret = -EBUSY;
2121 goto done;
2122 }
2123
1da177e4
LT
2124 /* Drop queues */
2125 skb_queue_purge(&hdev->rx_q);
2126 skb_queue_purge(&hdev->cmd_q);
2127
09fd0de5 2128 hci_dev_lock(hdev);
1f9b9a5d 2129 hci_inquiry_cache_flush(hdev);
1da177e4 2130 hci_conn_hash_flush(hdev);
09fd0de5 2131 hci_dev_unlock(hdev);
1da177e4
LT
2132
2133 if (hdev->flush)
2134 hdev->flush(hdev);
2135
8e87d142 2136 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 2137 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
2138
2139 if (!test_bit(HCI_RAW, &hdev->flags))
01178cd4 2140 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
2141
2142done:
1da177e4
LT
2143 hci_req_unlock(hdev);
2144 hci_dev_put(hdev);
2145 return ret;
2146}
2147
2148int hci_dev_reset_stat(__u16 dev)
2149{
2150 struct hci_dev *hdev;
2151 int ret = 0;
2152
70f23020
AE
2153 hdev = hci_dev_get(dev);
2154 if (!hdev)
1da177e4
LT
2155 return -ENODEV;
2156
0736cfa8
MH
2157 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2158 ret = -EBUSY;
2159 goto done;
2160 }
2161
1da177e4
LT
2162 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
2163
0736cfa8 2164done:
1da177e4 2165 hci_dev_put(hdev);
1da177e4
LT
2166 return ret;
2167}
2168
2169int hci_dev_cmd(unsigned int cmd, void __user *arg)
2170{
2171 struct hci_dev *hdev;
2172 struct hci_dev_req dr;
2173 int err = 0;
2174
2175 if (copy_from_user(&dr, arg, sizeof(dr)))
2176 return -EFAULT;
2177
70f23020
AE
2178 hdev = hci_dev_get(dr.dev_id);
2179 if (!hdev)
1da177e4
LT
2180 return -ENODEV;
2181
0736cfa8
MH
2182 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
2183 err = -EBUSY;
2184 goto done;
2185 }
2186
5b69bef5
MH
2187 if (hdev->dev_type != HCI_BREDR) {
2188 err = -EOPNOTSUPP;
2189 goto done;
2190 }
2191
56f87901
JH
2192 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
2193 err = -EOPNOTSUPP;
2194 goto done;
2195 }
2196
1da177e4
LT
2197 switch (cmd) {
2198 case HCISETAUTH:
01178cd4
JH
2199 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2200 HCI_INIT_TIMEOUT);
1da177e4
LT
2201 break;
2202
2203 case HCISETENCRYPT:
2204 if (!lmp_encrypt_capable(hdev)) {
2205 err = -EOPNOTSUPP;
2206 break;
2207 }
2208
2209 if (!test_bit(HCI_AUTH, &hdev->flags)) {
2210 /* Auth must be enabled first */
01178cd4
JH
2211 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
2212 HCI_INIT_TIMEOUT);
1da177e4
LT
2213 if (err)
2214 break;
2215 }
2216
01178cd4
JH
2217 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
2218 HCI_INIT_TIMEOUT);
1da177e4
LT
2219 break;
2220
2221 case HCISETSCAN:
01178cd4
JH
2222 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
2223 HCI_INIT_TIMEOUT);
1da177e4
LT
2224 break;
2225
1da177e4 2226 case HCISETLINKPOL:
01178cd4
JH
2227 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
2228 HCI_INIT_TIMEOUT);
1da177e4
LT
2229 break;
2230
2231 case HCISETLINKMODE:
e4e8e37c
MH
2232 hdev->link_mode = ((__u16) dr.dev_opt) &
2233 (HCI_LM_MASTER | HCI_LM_ACCEPT);
2234 break;
2235
2236 case HCISETPTYPE:
2237 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
2238 break;
2239
2240 case HCISETACLMTU:
e4e8e37c
MH
2241 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
2242 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2243 break;
2244
2245 case HCISETSCOMTU:
e4e8e37c
MH
2246 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
2247 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
2248 break;
2249
2250 default:
2251 err = -EINVAL;
2252 break;
2253 }
e4e8e37c 2254
0736cfa8 2255done:
1da177e4
LT
2256 hci_dev_put(hdev);
2257 return err;
2258}
2259
2260int hci_get_dev_list(void __user *arg)
2261{
8035ded4 2262 struct hci_dev *hdev;
1da177e4
LT
2263 struct hci_dev_list_req *dl;
2264 struct hci_dev_req *dr;
1da177e4
LT
2265 int n = 0, size, err;
2266 __u16 dev_num;
2267
2268 if (get_user(dev_num, (__u16 __user *) arg))
2269 return -EFAULT;
2270
2271 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
2272 return -EINVAL;
2273
2274 size = sizeof(*dl) + dev_num * sizeof(*dr);
2275
70f23020
AE
2276 dl = kzalloc(size, GFP_KERNEL);
2277 if (!dl)
1da177e4
LT
2278 return -ENOMEM;
2279
2280 dr = dl->dev_req;
2281
f20d09d5 2282 read_lock(&hci_dev_list_lock);
8035ded4 2283 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 2284 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 2285 cancel_delayed_work(&hdev->power_off);
c542a06c 2286
a8b2d5c2
JH
2287 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2288 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2289
1da177e4
LT
2290 (dr + n)->dev_id = hdev->id;
2291 (dr + n)->dev_opt = hdev->flags;
c542a06c 2292
1da177e4
LT
2293 if (++n >= dev_num)
2294 break;
2295 }
f20d09d5 2296 read_unlock(&hci_dev_list_lock);
1da177e4
LT
2297
2298 dl->dev_num = n;
2299 size = sizeof(*dl) + n * sizeof(*dr);
2300
2301 err = copy_to_user(arg, dl, size);
2302 kfree(dl);
2303
2304 return err ? -EFAULT : 0;
2305}
2306
2307int hci_get_dev_info(void __user *arg)
2308{
2309 struct hci_dev *hdev;
2310 struct hci_dev_info di;
2311 int err = 0;
2312
2313 if (copy_from_user(&di, arg, sizeof(di)))
2314 return -EFAULT;
2315
70f23020
AE
2316 hdev = hci_dev_get(di.dev_id);
2317 if (!hdev)
1da177e4
LT
2318 return -ENODEV;
2319
a8b2d5c2 2320 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 2321 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 2322
a8b2d5c2
JH
2323 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2324 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 2325
1da177e4
LT
2326 strcpy(di.name, hdev->name);
2327 di.bdaddr = hdev->bdaddr;
60f2a3ed 2328 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
1da177e4
LT
2329 di.flags = hdev->flags;
2330 di.pkt_type = hdev->pkt_type;
572c7f84
JH
2331 if (lmp_bredr_capable(hdev)) {
2332 di.acl_mtu = hdev->acl_mtu;
2333 di.acl_pkts = hdev->acl_pkts;
2334 di.sco_mtu = hdev->sco_mtu;
2335 di.sco_pkts = hdev->sco_pkts;
2336 } else {
2337 di.acl_mtu = hdev->le_mtu;
2338 di.acl_pkts = hdev->le_pkts;
2339 di.sco_mtu = 0;
2340 di.sco_pkts = 0;
2341 }
1da177e4
LT
2342 di.link_policy = hdev->link_policy;
2343 di.link_mode = hdev->link_mode;
2344
2345 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
2346 memcpy(&di.features, &hdev->features, sizeof(di.features));
2347
2348 if (copy_to_user(arg, &di, sizeof(di)))
2349 err = -EFAULT;
2350
2351 hci_dev_put(hdev);
2352
2353 return err;
2354}
2355
2356/* ---- Interface to HCI drivers ---- */
2357
611b30f7
MH
2358static int hci_rfkill_set_block(void *data, bool blocked)
2359{
2360 struct hci_dev *hdev = data;
2361
2362 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
2363
0736cfa8
MH
2364 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
2365 return -EBUSY;
2366
5e130367
JH
2367 if (blocked) {
2368 set_bit(HCI_RFKILLED, &hdev->dev_flags);
bf543036
JH
2369 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
2370 hci_dev_do_close(hdev);
5e130367
JH
2371 } else {
2372 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1025c04c 2373 }
611b30f7
MH
2374
2375 return 0;
2376}
2377
2378static const struct rfkill_ops hci_rfkill_ops = {
2379 .set_block = hci_rfkill_set_block,
2380};
2381
ab81cbf9
JH
2382static void hci_power_on(struct work_struct *work)
2383{
2384 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
96570ffc 2385 int err;
ab81cbf9
JH
2386
2387 BT_DBG("%s", hdev->name);
2388
cbed0ca1 2389 err = hci_dev_do_open(hdev);
96570ffc
JH
2390 if (err < 0) {
2391 mgmt_set_powered_failed(hdev, err);
ab81cbf9 2392 return;
96570ffc 2393 }
ab81cbf9 2394
a5c8f270
MH
2395 /* During the HCI setup phase, a few error conditions are
2396 * ignored and they need to be checked now. If they are still
2397 * valid, it is important to turn the device back off.
2398 */
2399 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
2400 (hdev->dev_type == HCI_BREDR &&
2401 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
2402 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
bf543036
JH
2403 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2404 hci_dev_do_close(hdev);
2405 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
19202573
JH
2406 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
2407 HCI_AUTO_OFF_TIMEOUT);
bf543036 2408 }
ab81cbf9 2409
a8b2d5c2 2410 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 2411 mgmt_index_added(hdev);
ab81cbf9
JH
2412}
2413
2414static void hci_power_off(struct work_struct *work)
2415{
3243553f 2416 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 2417 power_off.work);
ab81cbf9
JH
2418
2419 BT_DBG("%s", hdev->name);
2420
8ee56540 2421 hci_dev_do_close(hdev);
ab81cbf9
JH
2422}
2423
16ab91ab
JH
2424static void hci_discov_off(struct work_struct *work)
2425{
2426 struct hci_dev *hdev;
16ab91ab
JH
2427
2428 hdev = container_of(work, struct hci_dev, discov_off.work);
2429
2430 BT_DBG("%s", hdev->name);
2431
d1967ff8 2432 mgmt_discoverable_timeout(hdev);
16ab91ab
JH
2433}
2434
2aeb9a1a
JH
2435int hci_uuids_clear(struct hci_dev *hdev)
2436{
4821002c 2437 struct bt_uuid *uuid, *tmp;
2aeb9a1a 2438
4821002c
JH
2439 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
2440 list_del(&uuid->list);
2aeb9a1a
JH
2441 kfree(uuid);
2442 }
2443
2444 return 0;
2445}
2446
55ed8ca1
JH
2447int hci_link_keys_clear(struct hci_dev *hdev)
2448{
2449 struct list_head *p, *n;
2450
2451 list_for_each_safe(p, n, &hdev->link_keys) {
2452 struct link_key *key;
2453
2454 key = list_entry(p, struct link_key, list);
2455
2456 list_del(p);
2457 kfree(key);
2458 }
2459
2460 return 0;
2461}
2462
b899efaf
VCG
2463int hci_smp_ltks_clear(struct hci_dev *hdev)
2464{
2465 struct smp_ltk *k, *tmp;
2466
2467 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2468 list_del(&k->list);
2469 kfree(k);
2470 }
2471
2472 return 0;
2473}
2474
55ed8ca1
JH
2475struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2476{
8035ded4 2477 struct link_key *k;
55ed8ca1 2478
8035ded4 2479 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
2480 if (bacmp(bdaddr, &k->bdaddr) == 0)
2481 return k;
55ed8ca1
JH
2482
2483 return NULL;
2484}
2485
745c0ce3 2486static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 2487 u8 key_type, u8 old_key_type)
d25e28ab
JH
2488{
2489 /* Legacy key */
2490 if (key_type < 0x03)
745c0ce3 2491 return true;
d25e28ab
JH
2492
2493 /* Debug keys are insecure so don't store them persistently */
2494 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 2495 return false;
d25e28ab
JH
2496
2497 /* Changed combination key and there's no previous one */
2498 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 2499 return false;
d25e28ab
JH
2500
2501 /* Security mode 3 case */
2502 if (!conn)
745c0ce3 2503 return true;
d25e28ab
JH
2504
2505 /* Neither local nor remote side had no-bonding as requirement */
2506 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 2507 return true;
d25e28ab
JH
2508
2509 /* Local side had dedicated bonding as requirement */
2510 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 2511 return true;
d25e28ab
JH
2512
2513 /* Remote side had dedicated bonding as requirement */
2514 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 2515 return true;
d25e28ab
JH
2516
2517 /* If none of the above criteria match, then don't store the key
2518 * persistently */
745c0ce3 2519 return false;
d25e28ab
JH
2520}
2521
c9839a11 2522struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 2523{
c9839a11 2524 struct smp_ltk *k;
75d262c2 2525
c9839a11
VCG
2526 list_for_each_entry(k, &hdev->long_term_keys, list) {
2527 if (k->ediv != ediv ||
a8c5fb1a 2528 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
2529 continue;
2530
c9839a11 2531 return k;
75d262c2
VCG
2532 }
2533
2534 return NULL;
2535}
75d262c2 2536
c9839a11 2537struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 2538 u8 addr_type)
75d262c2 2539{
c9839a11 2540 struct smp_ltk *k;
75d262c2 2541
c9839a11
VCG
2542 list_for_each_entry(k, &hdev->long_term_keys, list)
2543 if (addr_type == k->bdaddr_type &&
a8c5fb1a 2544 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
2545 return k;
2546
2547 return NULL;
2548}
75d262c2 2549
d25e28ab 2550int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 2551 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
2552{
2553 struct link_key *key, *old_key;
745c0ce3
VA
2554 u8 old_key_type;
2555 bool persistent;
55ed8ca1
JH
2556
2557 old_key = hci_find_link_key(hdev, bdaddr);
2558 if (old_key) {
2559 old_key_type = old_key->type;
2560 key = old_key;
2561 } else {
12adcf3a 2562 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
2563 key = kzalloc(sizeof(*key), GFP_ATOMIC);
2564 if (!key)
2565 return -ENOMEM;
2566 list_add(&key->list, &hdev->link_keys);
2567 }
2568
6ed93dc6 2569 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 2570
d25e28ab
JH
2571 /* Some buggy controller combinations generate a changed
2572 * combination key for legacy pairing even when there's no
2573 * previous key */
2574 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 2575 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 2576 type = HCI_LK_COMBINATION;
655fe6ec
JH
2577 if (conn)
2578 conn->key_type = type;
2579 }
d25e28ab 2580
55ed8ca1 2581 bacpy(&key->bdaddr, bdaddr);
9b3b4460 2582 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
2583 key->pin_len = pin_len;
2584
b6020ba0 2585 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 2586 key->type = old_key_type;
4748fed2
JH
2587 else
2588 key->type = type;
2589
4df378a1
JH
2590 if (!new_key)
2591 return 0;
2592
2593 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
2594
744cf19e 2595 mgmt_new_link_key(hdev, key, persistent);
4df378a1 2596
6ec5bcad
VA
2597 if (conn)
2598 conn->flush_key = !persistent;
55ed8ca1
JH
2599
2600 return 0;
2601}
2602
c9839a11 2603int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 2604 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 2605 ediv, u8 rand[8])
75d262c2 2606{
c9839a11 2607 struct smp_ltk *key, *old_key;
75d262c2 2608
c9839a11
VCG
2609 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
2610 return 0;
75d262c2 2611
c9839a11
VCG
2612 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
2613 if (old_key)
75d262c2 2614 key = old_key;
c9839a11
VCG
2615 else {
2616 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
2617 if (!key)
2618 return -ENOMEM;
c9839a11 2619 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
2620 }
2621
75d262c2 2622 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
2623 key->bdaddr_type = addr_type;
2624 memcpy(key->val, tk, sizeof(key->val));
2625 key->authenticated = authenticated;
2626 key->ediv = ediv;
2627 key->enc_size = enc_size;
2628 key->type = type;
2629 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 2630
c9839a11
VCG
2631 if (!new_key)
2632 return 0;
75d262c2 2633
261cc5aa
VCG
2634 if (type & HCI_SMP_LTK)
2635 mgmt_new_ltk(hdev, key, 1);
2636
75d262c2
VCG
2637 return 0;
2638}
2639
55ed8ca1
JH
2640int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
2641{
2642 struct link_key *key;
2643
2644 key = hci_find_link_key(hdev, bdaddr);
2645 if (!key)
2646 return -ENOENT;
2647
6ed93dc6 2648 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
2649
2650 list_del(&key->list);
2651 kfree(key);
2652
2653 return 0;
2654}
2655
b899efaf
VCG
2656int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2657{
2658 struct smp_ltk *k, *tmp;
2659
2660 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2661 if (bacmp(bdaddr, &k->bdaddr))
2662 continue;
2663
6ed93dc6 2664 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
2665
2666 list_del(&k->list);
2667 kfree(k);
2668 }
2669
2670 return 0;
2671}
2672
6bd32326 2673/* HCI command timer function */
bda4f23a 2674static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
2675{
2676 struct hci_dev *hdev = (void *) arg;
2677
bda4f23a
AE
2678 if (hdev->sent_cmd) {
2679 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2680 u16 opcode = __le16_to_cpu(sent->opcode);
2681
2682 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2683 } else {
2684 BT_ERR("%s command tx timeout", hdev->name);
2685 }
2686
6bd32326 2687 atomic_set(&hdev->cmd_cnt, 1);
c347b765 2688 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
2689}
2690
2763eda6 2691struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 2692 bdaddr_t *bdaddr)
2763eda6
SJ
2693{
2694 struct oob_data *data;
2695
2696 list_for_each_entry(data, &hdev->remote_oob_data, list)
2697 if (bacmp(bdaddr, &data->bdaddr) == 0)
2698 return data;
2699
2700 return NULL;
2701}
2702
2703int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2704{
2705 struct oob_data *data;
2706
2707 data = hci_find_remote_oob_data(hdev, bdaddr);
2708 if (!data)
2709 return -ENOENT;
2710
6ed93dc6 2711 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
2712
2713 list_del(&data->list);
2714 kfree(data);
2715
2716 return 0;
2717}
2718
2719int hci_remote_oob_data_clear(struct hci_dev *hdev)
2720{
2721 struct oob_data *data, *n;
2722
2723 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2724 list_del(&data->list);
2725 kfree(data);
2726 }
2727
2728 return 0;
2729}
2730
2731int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 2732 u8 *randomizer)
2763eda6
SJ
2733{
2734 struct oob_data *data;
2735
2736 data = hci_find_remote_oob_data(hdev, bdaddr);
2737
2738 if (!data) {
2739 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2740 if (!data)
2741 return -ENOMEM;
2742
2743 bacpy(&data->bdaddr, bdaddr);
2744 list_add(&data->list, &hdev->remote_oob_data);
2745 }
2746
2747 memcpy(data->hash, hash, sizeof(data->hash));
2748 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2749
6ed93dc6 2750 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
2751
2752 return 0;
2753}
2754
b9ee0a78
MH
2755struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
2756 bdaddr_t *bdaddr, u8 type)
b2a66aad 2757{
8035ded4 2758 struct bdaddr_list *b;
b2a66aad 2759
b9ee0a78
MH
2760 list_for_each_entry(b, &hdev->blacklist, list) {
2761 if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type)
b2a66aad 2762 return b;
b9ee0a78 2763 }
b2a66aad
AJ
2764
2765 return NULL;
2766}
2767
2768int hci_blacklist_clear(struct hci_dev *hdev)
2769{
2770 struct list_head *p, *n;
2771
2772 list_for_each_safe(p, n, &hdev->blacklist) {
b9ee0a78 2773 struct bdaddr_list *b = list_entry(p, struct bdaddr_list, list);
b2a66aad
AJ
2774
2775 list_del(p);
2776 kfree(b);
2777 }
2778
2779 return 0;
2780}
2781
88c1fe4b 2782int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2783{
2784 struct bdaddr_list *entry;
b2a66aad 2785
b9ee0a78 2786 if (!bacmp(bdaddr, BDADDR_ANY))
b2a66aad
AJ
2787 return -EBADF;
2788
b9ee0a78 2789 if (hci_blacklist_lookup(hdev, bdaddr, type))
5e762444 2790 return -EEXIST;
b2a66aad
AJ
2791
2792 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
2793 if (!entry)
2794 return -ENOMEM;
b2a66aad
AJ
2795
2796 bacpy(&entry->bdaddr, bdaddr);
b9ee0a78 2797 entry->bdaddr_type = type;
b2a66aad
AJ
2798
2799 list_add(&entry->list, &hdev->blacklist);
2800
88c1fe4b 2801 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
2802}
2803
88c1fe4b 2804int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
2805{
2806 struct bdaddr_list *entry;
b2a66aad 2807
b9ee0a78 2808 if (!bacmp(bdaddr, BDADDR_ANY))
5e762444 2809 return hci_blacklist_clear(hdev);
b2a66aad 2810
b9ee0a78 2811 entry = hci_blacklist_lookup(hdev, bdaddr, type);
1ec918ce 2812 if (!entry)
5e762444 2813 return -ENOENT;
b2a66aad
AJ
2814
2815 list_del(&entry->list);
2816 kfree(entry);
2817
88c1fe4b 2818 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
2819}
2820
4c87eaab 2821static void inquiry_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2822{
4c87eaab
AG
2823 if (status) {
2824 BT_ERR("Failed to start inquiry: status %d", status);
7ba8b4be 2825
4c87eaab
AG
2826 hci_dev_lock(hdev);
2827 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2828 hci_dev_unlock(hdev);
2829 return;
2830 }
7ba8b4be
AG
2831}
2832
4c87eaab 2833static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
7ba8b4be 2834{
4c87eaab
AG
2835 /* General inquiry access code (GIAC) */
2836 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2837 struct hci_request req;
2838 struct hci_cp_inquiry cp;
7ba8b4be
AG
2839 int err;
2840
4c87eaab
AG
2841 if (status) {
2842 BT_ERR("Failed to disable LE scanning: status %d", status);
2843 return;
2844 }
7ba8b4be 2845
4c87eaab
AG
2846 switch (hdev->discovery.type) {
2847 case DISCOV_TYPE_LE:
2848 hci_dev_lock(hdev);
2849 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2850 hci_dev_unlock(hdev);
2851 break;
7ba8b4be 2852
4c87eaab
AG
2853 case DISCOV_TYPE_INTERLEAVED:
2854 hci_req_init(&req, hdev);
7ba8b4be 2855
4c87eaab
AG
2856 memset(&cp, 0, sizeof(cp));
2857 memcpy(&cp.lap, lap, sizeof(cp.lap));
2858 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2859 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
7ba8b4be 2860
4c87eaab 2861 hci_dev_lock(hdev);
7dbfac1d 2862
4c87eaab 2863 hci_inquiry_cache_flush(hdev);
7dbfac1d 2864
4c87eaab
AG
2865 err = hci_req_run(&req, inquiry_complete);
2866 if (err) {
2867 BT_ERR("Inquiry request failed: err %d", err);
2868 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2869 }
7dbfac1d 2870
4c87eaab
AG
2871 hci_dev_unlock(hdev);
2872 break;
7dbfac1d 2873 }
7dbfac1d
AG
2874}
2875
7ba8b4be
AG
2876static void le_scan_disable_work(struct work_struct *work)
2877{
2878 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 2879 le_scan_disable.work);
7ba8b4be 2880 struct hci_cp_le_set_scan_enable cp;
4c87eaab
AG
2881 struct hci_request req;
2882 int err;
7ba8b4be
AG
2883
2884 BT_DBG("%s", hdev->name);
2885
4c87eaab 2886 hci_req_init(&req, hdev);
28b75a89 2887
7ba8b4be 2888 memset(&cp, 0, sizeof(cp));
4c87eaab
AG
2889 cp.enable = LE_SCAN_DISABLE;
2890 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
28b75a89 2891
4c87eaab
AG
2892 err = hci_req_run(&req, le_scan_disable_work_complete);
2893 if (err)
2894 BT_ERR("Disable LE scanning request failed: err %d", err);
28b75a89
AG
2895}
2896
9be0dab7
DH
2897/* Alloc HCI device */
2898struct hci_dev *hci_alloc_dev(void)
2899{
2900 struct hci_dev *hdev;
2901
2902 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2903 if (!hdev)
2904 return NULL;
2905
b1b813d4
DH
2906 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2907 hdev->esco_type = (ESCO_HV1);
2908 hdev->link_mode = (HCI_LM_ACCEPT);
b4cb9fb2
MH
2909 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2910 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
2911 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2912 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 2913
b1b813d4
DH
2914 hdev->sniff_max_interval = 800;
2915 hdev->sniff_min_interval = 80;
2916
bef64738
MH
2917 hdev->le_scan_interval = 0x0060;
2918 hdev->le_scan_window = 0x0030;
4e70c7e7
MH
2919 hdev->le_conn_min_interval = 0x0028;
2920 hdev->le_conn_max_interval = 0x0038;
bef64738 2921
b1b813d4
DH
2922 mutex_init(&hdev->lock);
2923 mutex_init(&hdev->req_lock);
2924
2925 INIT_LIST_HEAD(&hdev->mgmt_pending);
2926 INIT_LIST_HEAD(&hdev->blacklist);
2927 INIT_LIST_HEAD(&hdev->uuids);
2928 INIT_LIST_HEAD(&hdev->link_keys);
2929 INIT_LIST_HEAD(&hdev->long_term_keys);
2930 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 2931 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
2932
2933 INIT_WORK(&hdev->rx_work, hci_rx_work);
2934 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2935 INIT_WORK(&hdev->tx_work, hci_tx_work);
2936 INIT_WORK(&hdev->power_on, hci_power_on);
b1b813d4 2937
b1b813d4
DH
2938 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2939 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2940 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2941
b1b813d4
DH
2942 skb_queue_head_init(&hdev->rx_q);
2943 skb_queue_head_init(&hdev->cmd_q);
2944 skb_queue_head_init(&hdev->raw_q);
2945
2946 init_waitqueue_head(&hdev->req_wait_q);
2947
bda4f23a 2948 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 2949
b1b813d4
DH
2950 hci_init_sysfs(hdev);
2951 discovery_init(hdev);
9be0dab7
DH
2952
2953 return hdev;
2954}
2955EXPORT_SYMBOL(hci_alloc_dev);
2956
2957/* Free HCI device */
2958void hci_free_dev(struct hci_dev *hdev)
2959{
9be0dab7
DH
2960 /* will free via device release */
2961 put_device(&hdev->dev);
2962}
2963EXPORT_SYMBOL(hci_free_dev);
2964
1da177e4
LT
2965/* Register HCI device */
2966int hci_register_dev(struct hci_dev *hdev)
2967{
b1b813d4 2968 int id, error;
1da177e4 2969
010666a1 2970 if (!hdev->open || !hdev->close)
1da177e4
LT
2971 return -EINVAL;
2972
08add513
MM
2973 /* Do not allow HCI_AMP devices to register at index 0,
2974 * so the index can be used as the AMP controller ID.
2975 */
3df92b31
SL
2976 switch (hdev->dev_type) {
2977 case HCI_BREDR:
2978 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2979 break;
2980 case HCI_AMP:
2981 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2982 break;
2983 default:
2984 return -EINVAL;
1da177e4 2985 }
8e87d142 2986
3df92b31
SL
2987 if (id < 0)
2988 return id;
2989
1da177e4
LT
2990 sprintf(hdev->name, "hci%d", id);
2991 hdev->id = id;
2d8b3a11
AE
2992
2993 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2994
d8537548
KC
2995 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2996 WQ_MEM_RECLAIM, 1, hdev->name);
33ca954d
DH
2997 if (!hdev->workqueue) {
2998 error = -ENOMEM;
2999 goto err;
3000 }
f48fd9c8 3001
d8537548
KC
3002 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
3003 WQ_MEM_RECLAIM, 1, hdev->name);
6ead1bbc
JH
3004 if (!hdev->req_workqueue) {
3005 destroy_workqueue(hdev->workqueue);
3006 error = -ENOMEM;
3007 goto err;
3008 }
3009
0153e2ec
MH
3010 if (!IS_ERR_OR_NULL(bt_debugfs))
3011 hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs);
3012
bdc3e0f1
MH
3013 dev_set_name(&hdev->dev, "%s", hdev->name);
3014
3015 error = device_add(&hdev->dev);
33ca954d
DH
3016 if (error < 0)
3017 goto err_wqueue;
1da177e4 3018
611b30f7 3019 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
3020 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
3021 hdev);
611b30f7
MH
3022 if (hdev->rfkill) {
3023 if (rfkill_register(hdev->rfkill) < 0) {
3024 rfkill_destroy(hdev->rfkill);
3025 hdev->rfkill = NULL;
3026 }
3027 }
3028
5e130367
JH
3029 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
3030 set_bit(HCI_RFKILLED, &hdev->dev_flags);
3031
a8b2d5c2 3032 set_bit(HCI_SETUP, &hdev->dev_flags);
004b0258 3033 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
ce2be9ac 3034
01cd3404 3035 if (hdev->dev_type == HCI_BREDR) {
56f87901
JH
3036 /* Assume BR/EDR support until proven otherwise (such as
3037 * through reading supported features during init.
3038 */
3039 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
3040 }
ce2be9ac 3041
fcee3377
GP
3042 write_lock(&hci_dev_list_lock);
3043 list_add(&hdev->list, &hci_dev_list);
3044 write_unlock(&hci_dev_list_lock);
3045
1da177e4 3046 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 3047 hci_dev_hold(hdev);
1da177e4 3048
19202573 3049 queue_work(hdev->req_workqueue, &hdev->power_on);
fbe96d6f 3050
1da177e4 3051 return id;
f48fd9c8 3052
33ca954d
DH
3053err_wqueue:
3054 destroy_workqueue(hdev->workqueue);
6ead1bbc 3055 destroy_workqueue(hdev->req_workqueue);
33ca954d 3056err:
3df92b31 3057 ida_simple_remove(&hci_index_ida, hdev->id);
f48fd9c8 3058
33ca954d 3059 return error;
1da177e4
LT
3060}
3061EXPORT_SYMBOL(hci_register_dev);
3062
3063/* Unregister HCI device */
59735631 3064void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 3065{
3df92b31 3066 int i, id;
ef222013 3067
c13854ce 3068 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 3069
94324962
JH
3070 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
3071
3df92b31
SL
3072 id = hdev->id;
3073
f20d09d5 3074 write_lock(&hci_dev_list_lock);
1da177e4 3075 list_del(&hdev->list);
f20d09d5 3076 write_unlock(&hci_dev_list_lock);
1da177e4
LT
3077
3078 hci_dev_do_close(hdev);
3079
cd4c5391 3080 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
3081 kfree_skb(hdev->reassembly[i]);
3082
b9b5ef18
GP
3083 cancel_work_sync(&hdev->power_on);
3084
ab81cbf9 3085 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 3086 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 3087 hci_dev_lock(hdev);
744cf19e 3088 mgmt_index_removed(hdev);
09fd0de5 3089 hci_dev_unlock(hdev);
56e5cb86 3090 }
ab81cbf9 3091
2e58ef3e
JH
3092 /* mgmt_index_removed should take care of emptying the
3093 * pending list */
3094 BUG_ON(!list_empty(&hdev->mgmt_pending));
3095
1da177e4
LT
3096 hci_notify(hdev, HCI_DEV_UNREG);
3097
611b30f7
MH
3098 if (hdev->rfkill) {
3099 rfkill_unregister(hdev->rfkill);
3100 rfkill_destroy(hdev->rfkill);
3101 }
3102
bdc3e0f1 3103 device_del(&hdev->dev);
147e2d59 3104
0153e2ec
MH
3105 debugfs_remove_recursive(hdev->debugfs);
3106
f48fd9c8 3107 destroy_workqueue(hdev->workqueue);
6ead1bbc 3108 destroy_workqueue(hdev->req_workqueue);
f48fd9c8 3109
09fd0de5 3110 hci_dev_lock(hdev);
e2e0cacb 3111 hci_blacklist_clear(hdev);
2aeb9a1a 3112 hci_uuids_clear(hdev);
55ed8ca1 3113 hci_link_keys_clear(hdev);
b899efaf 3114 hci_smp_ltks_clear(hdev);
2763eda6 3115 hci_remote_oob_data_clear(hdev);
09fd0de5 3116 hci_dev_unlock(hdev);
e2e0cacb 3117
dc946bd8 3118 hci_dev_put(hdev);
3df92b31
SL
3119
3120 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
3121}
3122EXPORT_SYMBOL(hci_unregister_dev);
3123
3124/* Suspend HCI device */
3125int hci_suspend_dev(struct hci_dev *hdev)
3126{
3127 hci_notify(hdev, HCI_DEV_SUSPEND);
3128 return 0;
3129}
3130EXPORT_SYMBOL(hci_suspend_dev);
3131
3132/* Resume HCI device */
3133int hci_resume_dev(struct hci_dev *hdev)
3134{
3135 hci_notify(hdev, HCI_DEV_RESUME);
3136 return 0;
3137}
3138EXPORT_SYMBOL(hci_resume_dev);
3139
76bca880 3140/* Receive frame from HCI drivers */
e1a26170 3141int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
76bca880 3142{
76bca880 3143 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 3144 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
3145 kfree_skb(skb);
3146 return -ENXIO;
3147 }
3148
d82603c6 3149 /* Incoming skb */
76bca880
MH
3150 bt_cb(skb)->incoming = 1;
3151
3152 /* Time stamp */
3153 __net_timestamp(skb);
3154
76bca880 3155 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 3156 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 3157
76bca880
MH
3158 return 0;
3159}
3160EXPORT_SYMBOL(hci_recv_frame);
3161
33e882a5 3162static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 3163 int count, __u8 index)
33e882a5
SS
3164{
3165 int len = 0;
3166 int hlen = 0;
3167 int remain = count;
3168 struct sk_buff *skb;
3169 struct bt_skb_cb *scb;
3170
3171 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 3172 index >= NUM_REASSEMBLY)
33e882a5
SS
3173 return -EILSEQ;
3174
3175 skb = hdev->reassembly[index];
3176
3177 if (!skb) {
3178 switch (type) {
3179 case HCI_ACLDATA_PKT:
3180 len = HCI_MAX_FRAME_SIZE;
3181 hlen = HCI_ACL_HDR_SIZE;
3182 break;
3183 case HCI_EVENT_PKT:
3184 len = HCI_MAX_EVENT_SIZE;
3185 hlen = HCI_EVENT_HDR_SIZE;
3186 break;
3187 case HCI_SCODATA_PKT:
3188 len = HCI_MAX_SCO_SIZE;
3189 hlen = HCI_SCO_HDR_SIZE;
3190 break;
3191 }
3192
1e429f38 3193 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
3194 if (!skb)
3195 return -ENOMEM;
3196
3197 scb = (void *) skb->cb;
3198 scb->expect = hlen;
3199 scb->pkt_type = type;
3200
33e882a5
SS
3201 hdev->reassembly[index] = skb;
3202 }
3203
3204 while (count) {
3205 scb = (void *) skb->cb;
89bb46d0 3206 len = min_t(uint, scb->expect, count);
33e882a5
SS
3207
3208 memcpy(skb_put(skb, len), data, len);
3209
3210 count -= len;
3211 data += len;
3212 scb->expect -= len;
3213 remain = count;
3214
3215 switch (type) {
3216 case HCI_EVENT_PKT:
3217 if (skb->len == HCI_EVENT_HDR_SIZE) {
3218 struct hci_event_hdr *h = hci_event_hdr(skb);
3219 scb->expect = h->plen;
3220
3221 if (skb_tailroom(skb) < scb->expect) {
3222 kfree_skb(skb);
3223 hdev->reassembly[index] = NULL;
3224 return -ENOMEM;
3225 }
3226 }
3227 break;
3228
3229 case HCI_ACLDATA_PKT:
3230 if (skb->len == HCI_ACL_HDR_SIZE) {
3231 struct hci_acl_hdr *h = hci_acl_hdr(skb);
3232 scb->expect = __le16_to_cpu(h->dlen);
3233
3234 if (skb_tailroom(skb) < scb->expect) {
3235 kfree_skb(skb);
3236 hdev->reassembly[index] = NULL;
3237 return -ENOMEM;
3238 }
3239 }
3240 break;
3241
3242 case HCI_SCODATA_PKT:
3243 if (skb->len == HCI_SCO_HDR_SIZE) {
3244 struct hci_sco_hdr *h = hci_sco_hdr(skb);
3245 scb->expect = h->dlen;
3246
3247 if (skb_tailroom(skb) < scb->expect) {
3248 kfree_skb(skb);
3249 hdev->reassembly[index] = NULL;
3250 return -ENOMEM;
3251 }
3252 }
3253 break;
3254 }
3255
3256 if (scb->expect == 0) {
3257 /* Complete frame */
3258
3259 bt_cb(skb)->pkt_type = type;
e1a26170 3260 hci_recv_frame(hdev, skb);
33e882a5
SS
3261
3262 hdev->reassembly[index] = NULL;
3263 return remain;
3264 }
3265 }
3266
3267 return remain;
3268}
3269
ef222013
MH
3270int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
3271{
f39a3c06
SS
3272 int rem = 0;
3273
ef222013
MH
3274 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
3275 return -EILSEQ;
3276
da5f6c37 3277 while (count) {
1e429f38 3278 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
3279 if (rem < 0)
3280 return rem;
ef222013 3281
f39a3c06
SS
3282 data += (count - rem);
3283 count = rem;
f81c6224 3284 }
ef222013 3285
f39a3c06 3286 return rem;
ef222013
MH
3287}
3288EXPORT_SYMBOL(hci_recv_fragment);
3289
99811510
SS
3290#define STREAM_REASSEMBLY 0
3291
3292int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
3293{
3294 int type;
3295 int rem = 0;
3296
da5f6c37 3297 while (count) {
99811510
SS
3298 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
3299
3300 if (!skb) {
3301 struct { char type; } *pkt;
3302
3303 /* Start of the frame */
3304 pkt = data;
3305 type = pkt->type;
3306
3307 data++;
3308 count--;
3309 } else
3310 type = bt_cb(skb)->pkt_type;
3311
1e429f38 3312 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 3313 STREAM_REASSEMBLY);
99811510
SS
3314 if (rem < 0)
3315 return rem;
3316
3317 data += (count - rem);
3318 count = rem;
f81c6224 3319 }
99811510
SS
3320
3321 return rem;
3322}
3323EXPORT_SYMBOL(hci_recv_stream_fragment);
3324
1da177e4
LT
3325/* ---- Interface to upper protocols ---- */
3326
1da177e4
LT
3327int hci_register_cb(struct hci_cb *cb)
3328{
3329 BT_DBG("%p name %s", cb, cb->name);
3330
f20d09d5 3331 write_lock(&hci_cb_list_lock);
1da177e4 3332 list_add(&cb->list, &hci_cb_list);
f20d09d5 3333 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3334
3335 return 0;
3336}
3337EXPORT_SYMBOL(hci_register_cb);
3338
3339int hci_unregister_cb(struct hci_cb *cb)
3340{
3341 BT_DBG("%p name %s", cb, cb->name);
3342
f20d09d5 3343 write_lock(&hci_cb_list_lock);
1da177e4 3344 list_del(&cb->list);
f20d09d5 3345 write_unlock(&hci_cb_list_lock);
1da177e4
LT
3346
3347 return 0;
3348}
3349EXPORT_SYMBOL(hci_unregister_cb);
3350
51086991 3351static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4 3352{
0d48d939 3353 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 3354
cd82e61c
MH
3355 /* Time stamp */
3356 __net_timestamp(skb);
1da177e4 3357
cd82e61c
MH
3358 /* Send copy to monitor */
3359 hci_send_to_monitor(hdev, skb);
3360
3361 if (atomic_read(&hdev->promisc)) {
3362 /* Send copy to the sockets */
470fe1b5 3363 hci_send_to_sock(hdev, skb);
1da177e4
LT
3364 }
3365
3366 /* Get rid of skb owner, prior to sending to the driver. */
3367 skb_orphan(skb);
3368
7bd8f09f 3369 if (hdev->send(hdev, skb) < 0)
51086991 3370 BT_ERR("%s sending frame failed", hdev->name);
1da177e4
LT
3371}
3372
3119ae95
JH
3373void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
3374{
3375 skb_queue_head_init(&req->cmd_q);
3376 req->hdev = hdev;
5d73e034 3377 req->err = 0;
3119ae95
JH
3378}
3379
3380int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
3381{
3382 struct hci_dev *hdev = req->hdev;
3383 struct sk_buff *skb;
3384 unsigned long flags;
3385
3386 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
3387
5d73e034
AG
3388 /* If an error occured during request building, remove all HCI
3389 * commands queued on the HCI request queue.
3390 */
3391 if (req->err) {
3392 skb_queue_purge(&req->cmd_q);
3393 return req->err;
3394 }
3395
3119ae95
JH
3396 /* Do not allow empty requests */
3397 if (skb_queue_empty(&req->cmd_q))
382b0c39 3398 return -ENODATA;
3119ae95
JH
3399
3400 skb = skb_peek_tail(&req->cmd_q);
3401 bt_cb(skb)->req.complete = complete;
3402
3403 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3404 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
3405 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3406
3407 queue_work(hdev->workqueue, &hdev->cmd_work);
3408
3409 return 0;
3410}
3411
1ca3a9d0 3412static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
07dc93dd 3413 u32 plen, const void *param)
1da177e4
LT
3414{
3415 int len = HCI_COMMAND_HDR_SIZE + plen;
3416 struct hci_command_hdr *hdr;
3417 struct sk_buff *skb;
3418
1da177e4 3419 skb = bt_skb_alloc(len, GFP_ATOMIC);
1ca3a9d0
JH
3420 if (!skb)
3421 return NULL;
1da177e4
LT
3422
3423 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 3424 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
3425 hdr->plen = plen;
3426
3427 if (plen)
3428 memcpy(skb_put(skb, plen), param, plen);
3429
3430 BT_DBG("skb len %d", skb->len);
3431
0d48d939 3432 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
c78ae283 3433
1ca3a9d0
JH
3434 return skb;
3435}
3436
3437/* Send HCI command */
07dc93dd
JH
3438int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
3439 const void *param)
1ca3a9d0
JH
3440{
3441 struct sk_buff *skb;
3442
3443 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3444
3445 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3446 if (!skb) {
3447 BT_ERR("%s no memory for command", hdev->name);
3448 return -ENOMEM;
3449 }
3450
11714b3d
JH
3451 /* Stand-alone HCI commands must be flaged as
3452 * single-command requests.
3453 */
3454 bt_cb(skb)->req.start = true;
3455
1da177e4 3456 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 3457 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
3458
3459 return 0;
3460}
1da177e4 3461
71c76a17 3462/* Queue a command to an asynchronous HCI request */
07dc93dd
JH
3463void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
3464 const void *param, u8 event)
71c76a17
JH
3465{
3466 struct hci_dev *hdev = req->hdev;
3467 struct sk_buff *skb;
3468
3469 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
3470
34739c1e
AG
3471 /* If an error occured during request building, there is no point in
3472 * queueing the HCI command. We can simply return.
3473 */
3474 if (req->err)
3475 return;
3476
71c76a17
JH
3477 skb = hci_prepare_cmd(hdev, opcode, plen, param);
3478 if (!skb) {
5d73e034
AG
3479 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
3480 hdev->name, opcode);
3481 req->err = -ENOMEM;
e348fe6b 3482 return;
71c76a17
JH
3483 }
3484
3485 if (skb_queue_empty(&req->cmd_q))
3486 bt_cb(skb)->req.start = true;
3487
02350a72
JH
3488 bt_cb(skb)->req.event = event;
3489
71c76a17 3490 skb_queue_tail(&req->cmd_q, skb);
71c76a17
JH
3491}
3492
07dc93dd
JH
3493void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
3494 const void *param)
02350a72
JH
3495{
3496 hci_req_add_ev(req, opcode, plen, param, 0);
3497}
3498
1da177e4 3499/* Get data from the previously sent command */
a9de9248 3500void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
3501{
3502 struct hci_command_hdr *hdr;
3503
3504 if (!hdev->sent_cmd)
3505 return NULL;
3506
3507 hdr = (void *) hdev->sent_cmd->data;
3508
a9de9248 3509 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
3510 return NULL;
3511
f0e09510 3512 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
3513
3514 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
3515}
3516
3517/* Send ACL data */
3518static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
3519{
3520 struct hci_acl_hdr *hdr;
3521 int len = skb->len;
3522
badff6d0
ACM
3523 skb_push(skb, HCI_ACL_HDR_SIZE);
3524 skb_reset_transport_header(skb);
9c70220b 3525 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
3526 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
3527 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
3528}
3529
ee22be7e 3530static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 3531 struct sk_buff *skb, __u16 flags)
1da177e4 3532{
ee22be7e 3533 struct hci_conn *conn = chan->conn;
1da177e4
LT
3534 struct hci_dev *hdev = conn->hdev;
3535 struct sk_buff *list;
3536
087bfd99
GP
3537 skb->len = skb_headlen(skb);
3538 skb->data_len = 0;
3539
3540 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
3541
3542 switch (hdev->dev_type) {
3543 case HCI_BREDR:
3544 hci_add_acl_hdr(skb, conn->handle, flags);
3545 break;
3546 case HCI_AMP:
3547 hci_add_acl_hdr(skb, chan->handle, flags);
3548 break;
3549 default:
3550 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3551 return;
3552 }
087bfd99 3553
70f23020
AE
3554 list = skb_shinfo(skb)->frag_list;
3555 if (!list) {
1da177e4
LT
3556 /* Non fragmented */
3557 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
3558
73d80deb 3559 skb_queue_tail(queue, skb);
1da177e4
LT
3560 } else {
3561 /* Fragmented */
3562 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3563
3564 skb_shinfo(skb)->frag_list = NULL;
3565
3566 /* Queue all fragments atomically */
af3e6359 3567 spin_lock(&queue->lock);
1da177e4 3568
73d80deb 3569 __skb_queue_tail(queue, skb);
e702112f
AE
3570
3571 flags &= ~ACL_START;
3572 flags |= ACL_CONT;
1da177e4
LT
3573 do {
3574 skb = list; list = list->next;
8e87d142 3575
0d48d939 3576 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 3577 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
3578
3579 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
3580
73d80deb 3581 __skb_queue_tail(queue, skb);
1da177e4
LT
3582 } while (list);
3583
af3e6359 3584 spin_unlock(&queue->lock);
1da177e4 3585 }
73d80deb
LAD
3586}
3587
3588void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
3589{
ee22be7e 3590 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 3591
f0e09510 3592 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb 3593
ee22be7e 3594 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 3595
3eff45ea 3596 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3597}
1da177e4
LT
3598
3599/* Send SCO data */
0d861d8b 3600void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
3601{
3602 struct hci_dev *hdev = conn->hdev;
3603 struct hci_sco_hdr hdr;
3604
3605 BT_DBG("%s len %d", hdev->name, skb->len);
3606
aca3192c 3607 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
3608 hdr.dlen = skb->len;
3609
badff6d0
ACM
3610 skb_push(skb, HCI_SCO_HDR_SIZE);
3611 skb_reset_transport_header(skb);
9c70220b 3612 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4 3613
0d48d939 3614 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 3615
1da177e4 3616 skb_queue_tail(&conn->data_q, skb);
3eff45ea 3617 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 3618}
1da177e4
LT
3619
3620/* ---- HCI TX task (outgoing data) ---- */
3621
3622/* HCI Connection scheduler */
6039aa73
GP
3623static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
3624 int *quote)
1da177e4
LT
3625{
3626 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3627 struct hci_conn *conn = NULL, *c;
abc5de8f 3628 unsigned int num = 0, min = ~0;
1da177e4 3629
8e87d142 3630 /* We don't have to lock device here. Connections are always
1da177e4 3631 * added and removed with TX task disabled. */
bf4c6325
GP
3632
3633 rcu_read_lock();
3634
3635 list_for_each_entry_rcu(c, &h->list, list) {
769be974 3636 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 3637 continue;
769be974
MH
3638
3639 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
3640 continue;
3641
1da177e4
LT
3642 num++;
3643
3644 if (c->sent < min) {
3645 min = c->sent;
3646 conn = c;
3647 }
52087a79
LAD
3648
3649 if (hci_conn_num(hdev, type) == num)
3650 break;
1da177e4
LT
3651 }
3652
bf4c6325
GP
3653 rcu_read_unlock();
3654
1da177e4 3655 if (conn) {
6ed58ec5
VT
3656 int cnt, q;
3657
3658 switch (conn->type) {
3659 case ACL_LINK:
3660 cnt = hdev->acl_cnt;
3661 break;
3662 case SCO_LINK:
3663 case ESCO_LINK:
3664 cnt = hdev->sco_cnt;
3665 break;
3666 case LE_LINK:
3667 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3668 break;
3669 default:
3670 cnt = 0;
3671 BT_ERR("Unknown link type");
3672 }
3673
3674 q = cnt / num;
1da177e4
LT
3675 *quote = q ? q : 1;
3676 } else
3677 *quote = 0;
3678
3679 BT_DBG("conn %p quote %d", conn, *quote);
3680 return conn;
3681}
3682
6039aa73 3683static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
3684{
3685 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 3686 struct hci_conn *c;
1da177e4 3687
bae1f5d9 3688 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 3689
bf4c6325
GP
3690 rcu_read_lock();
3691
1da177e4 3692 /* Kill stalled connections */
bf4c6325 3693 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 3694 if (c->type == type && c->sent) {
6ed93dc6
AE
3695 BT_ERR("%s killing stalled connection %pMR",
3696 hdev->name, &c->dst);
bed71748 3697 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
3698 }
3699 }
bf4c6325
GP
3700
3701 rcu_read_unlock();
1da177e4
LT
3702}
3703
6039aa73
GP
3704static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3705 int *quote)
1da177e4 3706{
73d80deb
LAD
3707 struct hci_conn_hash *h = &hdev->conn_hash;
3708 struct hci_chan *chan = NULL;
abc5de8f 3709 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 3710 struct hci_conn *conn;
73d80deb
LAD
3711 int cnt, q, conn_num = 0;
3712
3713 BT_DBG("%s", hdev->name);
3714
bf4c6325
GP
3715 rcu_read_lock();
3716
3717 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
3718 struct hci_chan *tmp;
3719
3720 if (conn->type != type)
3721 continue;
3722
3723 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3724 continue;
3725
3726 conn_num++;
3727
8192edef 3728 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
3729 struct sk_buff *skb;
3730
3731 if (skb_queue_empty(&tmp->data_q))
3732 continue;
3733
3734 skb = skb_peek(&tmp->data_q);
3735 if (skb->priority < cur_prio)
3736 continue;
3737
3738 if (skb->priority > cur_prio) {
3739 num = 0;
3740 min = ~0;
3741 cur_prio = skb->priority;
3742 }
3743
3744 num++;
3745
3746 if (conn->sent < min) {
3747 min = conn->sent;
3748 chan = tmp;
3749 }
3750 }
3751
3752 if (hci_conn_num(hdev, type) == conn_num)
3753 break;
3754 }
3755
bf4c6325
GP
3756 rcu_read_unlock();
3757
73d80deb
LAD
3758 if (!chan)
3759 return NULL;
3760
3761 switch (chan->conn->type) {
3762 case ACL_LINK:
3763 cnt = hdev->acl_cnt;
3764 break;
bd1eb66b
AE
3765 case AMP_LINK:
3766 cnt = hdev->block_cnt;
3767 break;
73d80deb
LAD
3768 case SCO_LINK:
3769 case ESCO_LINK:
3770 cnt = hdev->sco_cnt;
3771 break;
3772 case LE_LINK:
3773 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3774 break;
3775 default:
3776 cnt = 0;
3777 BT_ERR("Unknown link type");
3778 }
3779
3780 q = cnt / num;
3781 *quote = q ? q : 1;
3782 BT_DBG("chan %p quote %d", chan, *quote);
3783 return chan;
3784}
3785
02b20f0b
LAD
3786static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3787{
3788 struct hci_conn_hash *h = &hdev->conn_hash;
3789 struct hci_conn *conn;
3790 int num = 0;
3791
3792 BT_DBG("%s", hdev->name);
3793
bf4c6325
GP
3794 rcu_read_lock();
3795
3796 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
3797 struct hci_chan *chan;
3798
3799 if (conn->type != type)
3800 continue;
3801
3802 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3803 continue;
3804
3805 num++;
3806
8192edef 3807 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
3808 struct sk_buff *skb;
3809
3810 if (chan->sent) {
3811 chan->sent = 0;
3812 continue;
3813 }
3814
3815 if (skb_queue_empty(&chan->data_q))
3816 continue;
3817
3818 skb = skb_peek(&chan->data_q);
3819 if (skb->priority >= HCI_PRIO_MAX - 1)
3820 continue;
3821
3822 skb->priority = HCI_PRIO_MAX - 1;
3823
3824 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 3825 skb->priority);
02b20f0b
LAD
3826 }
3827
3828 if (hci_conn_num(hdev, type) == num)
3829 break;
3830 }
bf4c6325
GP
3831
3832 rcu_read_unlock();
3833
02b20f0b
LAD
3834}
3835
b71d385a
AE
3836static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3837{
3838 /* Calculate count of blocks used by this packet */
3839 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3840}
3841
6039aa73 3842static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 3843{
1da177e4
LT
3844 if (!test_bit(HCI_RAW, &hdev->flags)) {
3845 /* ACL tx timeout must be longer than maximum
3846 * link supervision timeout (40.9 seconds) */
63d2bc1b 3847 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 3848 HCI_ACL_TX_TIMEOUT))
bae1f5d9 3849 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 3850 }
63d2bc1b 3851}
1da177e4 3852
6039aa73 3853static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
3854{
3855 unsigned int cnt = hdev->acl_cnt;
3856 struct hci_chan *chan;
3857 struct sk_buff *skb;
3858 int quote;
3859
3860 __check_timeout(hdev, cnt);
04837f64 3861
73d80deb 3862 while (hdev->acl_cnt &&
a8c5fb1a 3863 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
3864 u32 priority = (skb_peek(&chan->data_q))->priority;
3865 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 3866 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3867 skb->len, skb->priority);
73d80deb 3868
ec1cce24
LAD
3869 /* Stop if priority has changed */
3870 if (skb->priority < priority)
3871 break;
3872
3873 skb = skb_dequeue(&chan->data_q);
3874
73d80deb 3875 hci_conn_enter_active_mode(chan->conn,
04124681 3876 bt_cb(skb)->force_active);
04837f64 3877
57d17d70 3878 hci_send_frame(hdev, skb);
1da177e4
LT
3879 hdev->acl_last_tx = jiffies;
3880
3881 hdev->acl_cnt--;
73d80deb
LAD
3882 chan->sent++;
3883 chan->conn->sent++;
1da177e4
LT
3884 }
3885 }
02b20f0b
LAD
3886
3887 if (cnt != hdev->acl_cnt)
3888 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
3889}
3890
6039aa73 3891static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 3892{
63d2bc1b 3893 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
3894 struct hci_chan *chan;
3895 struct sk_buff *skb;
3896 int quote;
bd1eb66b 3897 u8 type;
b71d385a 3898
63d2bc1b 3899 __check_timeout(hdev, cnt);
b71d385a 3900
bd1eb66b
AE
3901 BT_DBG("%s", hdev->name);
3902
3903 if (hdev->dev_type == HCI_AMP)
3904 type = AMP_LINK;
3905 else
3906 type = ACL_LINK;
3907
b71d385a 3908 while (hdev->block_cnt > 0 &&
bd1eb66b 3909 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
3910 u32 priority = (skb_peek(&chan->data_q))->priority;
3911 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3912 int blocks;
3913
3914 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 3915 skb->len, skb->priority);
b71d385a
AE
3916
3917 /* Stop if priority has changed */
3918 if (skb->priority < priority)
3919 break;
3920
3921 skb = skb_dequeue(&chan->data_q);
3922
3923 blocks = __get_blocks(hdev, skb);
3924 if (blocks > hdev->block_cnt)
3925 return;
3926
3927 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 3928 bt_cb(skb)->force_active);
b71d385a 3929
57d17d70 3930 hci_send_frame(hdev, skb);
b71d385a
AE
3931 hdev->acl_last_tx = jiffies;
3932
3933 hdev->block_cnt -= blocks;
3934 quote -= blocks;
3935
3936 chan->sent += blocks;
3937 chan->conn->sent += blocks;
3938 }
3939 }
3940
3941 if (cnt != hdev->block_cnt)
bd1eb66b 3942 hci_prio_recalculate(hdev, type);
b71d385a
AE
3943}
3944
6039aa73 3945static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
3946{
3947 BT_DBG("%s", hdev->name);
3948
bd1eb66b
AE
3949 /* No ACL link over BR/EDR controller */
3950 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3951 return;
3952
3953 /* No AMP link over AMP controller */
3954 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
3955 return;
3956
3957 switch (hdev->flow_ctl_mode) {
3958 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3959 hci_sched_acl_pkt(hdev);
3960 break;
3961
3962 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3963 hci_sched_acl_blk(hdev);
3964 break;
3965 }
3966}
3967
1da177e4 3968/* Schedule SCO */
6039aa73 3969static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
3970{
3971 struct hci_conn *conn;
3972 struct sk_buff *skb;
3973 int quote;
3974
3975 BT_DBG("%s", hdev->name);
3976
52087a79
LAD
3977 if (!hci_conn_num(hdev, SCO_LINK))
3978 return;
3979
1da177e4
LT
3980 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3981 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3982 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 3983 hci_send_frame(hdev, skb);
1da177e4
LT
3984
3985 conn->sent++;
3986 if (conn->sent == ~0)
3987 conn->sent = 0;
3988 }
3989 }
3990}
3991
6039aa73 3992static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
3993{
3994 struct hci_conn *conn;
3995 struct sk_buff *skb;
3996 int quote;
3997
3998 BT_DBG("%s", hdev->name);
3999
52087a79
LAD
4000 if (!hci_conn_num(hdev, ESCO_LINK))
4001 return;
4002
8fc9ced3
GP
4003 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
4004 &quote))) {
b6a0dc82
MH
4005 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
4006 BT_DBG("skb %p len %d", skb, skb->len);
57d17d70 4007 hci_send_frame(hdev, skb);
b6a0dc82
MH
4008
4009 conn->sent++;
4010 if (conn->sent == ~0)
4011 conn->sent = 0;
4012 }
4013 }
4014}
4015
6039aa73 4016static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 4017{
73d80deb 4018 struct hci_chan *chan;
6ed58ec5 4019 struct sk_buff *skb;
02b20f0b 4020 int quote, cnt, tmp;
6ed58ec5
VT
4021
4022 BT_DBG("%s", hdev->name);
4023
52087a79
LAD
4024 if (!hci_conn_num(hdev, LE_LINK))
4025 return;
4026
6ed58ec5
VT
4027 if (!test_bit(HCI_RAW, &hdev->flags)) {
4028 /* LE tx timeout must be longer than maximum
4029 * link supervision timeout (40.9 seconds) */
bae1f5d9 4030 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 4031 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 4032 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
4033 }
4034
4035 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 4036 tmp = cnt;
73d80deb 4037 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
4038 u32 priority = (skb_peek(&chan->data_q))->priority;
4039 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 4040 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 4041 skb->len, skb->priority);
6ed58ec5 4042
ec1cce24
LAD
4043 /* Stop if priority has changed */
4044 if (skb->priority < priority)
4045 break;
4046
4047 skb = skb_dequeue(&chan->data_q);
4048
57d17d70 4049 hci_send_frame(hdev, skb);
6ed58ec5
VT
4050 hdev->le_last_tx = jiffies;
4051
4052 cnt--;
73d80deb
LAD
4053 chan->sent++;
4054 chan->conn->sent++;
6ed58ec5
VT
4055 }
4056 }
73d80deb 4057
6ed58ec5
VT
4058 if (hdev->le_pkts)
4059 hdev->le_cnt = cnt;
4060 else
4061 hdev->acl_cnt = cnt;
02b20f0b
LAD
4062
4063 if (cnt != tmp)
4064 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
4065}
4066
3eff45ea 4067static void hci_tx_work(struct work_struct *work)
1da177e4 4068{
3eff45ea 4069 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
4070 struct sk_buff *skb;
4071
6ed58ec5 4072 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 4073 hdev->sco_cnt, hdev->le_cnt);
1da177e4 4074
52de599e
MH
4075 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
4076 /* Schedule queues and send stuff to HCI driver */
4077 hci_sched_acl(hdev);
4078 hci_sched_sco(hdev);
4079 hci_sched_esco(hdev);
4080 hci_sched_le(hdev);
4081 }
6ed58ec5 4082
1da177e4
LT
4083 /* Send next queued raw (unknown type) packet */
4084 while ((skb = skb_dequeue(&hdev->raw_q)))
57d17d70 4085 hci_send_frame(hdev, skb);
1da177e4
LT
4086}
4087
25985edc 4088/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
4089
4090/* ACL data packet */
6039aa73 4091static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4092{
4093 struct hci_acl_hdr *hdr = (void *) skb->data;
4094 struct hci_conn *conn;
4095 __u16 handle, flags;
4096
4097 skb_pull(skb, HCI_ACL_HDR_SIZE);
4098
4099 handle = __le16_to_cpu(hdr->handle);
4100 flags = hci_flags(handle);
4101 handle = hci_handle(handle);
4102
f0e09510 4103 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 4104 handle, flags);
1da177e4
LT
4105
4106 hdev->stat.acl_rx++;
4107
4108 hci_dev_lock(hdev);
4109 conn = hci_conn_hash_lookup_handle(hdev, handle);
4110 hci_dev_unlock(hdev);
8e87d142 4111
1da177e4 4112 if (conn) {
65983fc7 4113 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 4114
1da177e4 4115 /* Send to upper protocol */
686ebf28
UF
4116 l2cap_recv_acldata(conn, skb, flags);
4117 return;
1da177e4 4118 } else {
8e87d142 4119 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 4120 hdev->name, handle);
1da177e4
LT
4121 }
4122
4123 kfree_skb(skb);
4124}
4125
4126/* SCO data packet */
6039aa73 4127static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
4128{
4129 struct hci_sco_hdr *hdr = (void *) skb->data;
4130 struct hci_conn *conn;
4131 __u16 handle;
4132
4133 skb_pull(skb, HCI_SCO_HDR_SIZE);
4134
4135 handle = __le16_to_cpu(hdr->handle);
4136
f0e09510 4137 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
4138
4139 hdev->stat.sco_rx++;
4140
4141 hci_dev_lock(hdev);
4142 conn = hci_conn_hash_lookup_handle(hdev, handle);
4143 hci_dev_unlock(hdev);
4144
4145 if (conn) {
1da177e4 4146 /* Send to upper protocol */
686ebf28
UF
4147 sco_recv_scodata(conn, skb);
4148 return;
1da177e4 4149 } else {
8e87d142 4150 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 4151 hdev->name, handle);
1da177e4
LT
4152 }
4153
4154 kfree_skb(skb);
4155}
4156
9238f36a
JH
4157static bool hci_req_is_complete(struct hci_dev *hdev)
4158{
4159 struct sk_buff *skb;
4160
4161 skb = skb_peek(&hdev->cmd_q);
4162 if (!skb)
4163 return true;
4164
4165 return bt_cb(skb)->req.start;
4166}
4167
42c6b129
JH
4168static void hci_resend_last(struct hci_dev *hdev)
4169{
4170 struct hci_command_hdr *sent;
4171 struct sk_buff *skb;
4172 u16 opcode;
4173
4174 if (!hdev->sent_cmd)
4175 return;
4176
4177 sent = (void *) hdev->sent_cmd->data;
4178 opcode = __le16_to_cpu(sent->opcode);
4179 if (opcode == HCI_OP_RESET)
4180 return;
4181
4182 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
4183 if (!skb)
4184 return;
4185
4186 skb_queue_head(&hdev->cmd_q, skb);
4187 queue_work(hdev->workqueue, &hdev->cmd_work);
4188}
4189
9238f36a
JH
4190void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
4191{
4192 hci_req_complete_t req_complete = NULL;
4193 struct sk_buff *skb;
4194 unsigned long flags;
4195
4196 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
4197
42c6b129
JH
4198 /* If the completed command doesn't match the last one that was
4199 * sent we need to do special handling of it.
9238f36a 4200 */
42c6b129
JH
4201 if (!hci_sent_cmd_data(hdev, opcode)) {
4202 /* Some CSR based controllers generate a spontaneous
4203 * reset complete event during init and any pending
4204 * command will never be completed. In such a case we
4205 * need to resend whatever was the last sent
4206 * command.
4207 */
4208 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
4209 hci_resend_last(hdev);
4210
9238f36a 4211 return;
42c6b129 4212 }
9238f36a
JH
4213
4214 /* If the command succeeded and there's still more commands in
4215 * this request the request is not yet complete.
4216 */
4217 if (!status && !hci_req_is_complete(hdev))
4218 return;
4219
4220 /* If this was the last command in a request the complete
4221 * callback would be found in hdev->sent_cmd instead of the
4222 * command queue (hdev->cmd_q).
4223 */
4224 if (hdev->sent_cmd) {
4225 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
53e21fbc
JH
4226
4227 if (req_complete) {
4228 /* We must set the complete callback to NULL to
4229 * avoid calling the callback more than once if
4230 * this function gets called again.
4231 */
4232 bt_cb(hdev->sent_cmd)->req.complete = NULL;
4233
9238f36a 4234 goto call_complete;
53e21fbc 4235 }
9238f36a
JH
4236 }
4237
4238 /* Remove all pending commands belonging to this request */
4239 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
4240 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
4241 if (bt_cb(skb)->req.start) {
4242 __skb_queue_head(&hdev->cmd_q, skb);
4243 break;
4244 }
4245
4246 req_complete = bt_cb(skb)->req.complete;
4247 kfree_skb(skb);
4248 }
4249 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
4250
4251call_complete:
4252 if (req_complete)
4253 req_complete(hdev, status);
4254}
4255
b78752cc 4256static void hci_rx_work(struct work_struct *work)
1da177e4 4257{
b78752cc 4258 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
4259 struct sk_buff *skb;
4260
4261 BT_DBG("%s", hdev->name);
4262
1da177e4 4263 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
4264 /* Send copy to monitor */
4265 hci_send_to_monitor(hdev, skb);
4266
1da177e4
LT
4267 if (atomic_read(&hdev->promisc)) {
4268 /* Send copy to the sockets */
470fe1b5 4269 hci_send_to_sock(hdev, skb);
1da177e4
LT
4270 }
4271
0736cfa8
MH
4272 if (test_bit(HCI_RAW, &hdev->flags) ||
4273 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1da177e4
LT
4274 kfree_skb(skb);
4275 continue;
4276 }
4277
4278 if (test_bit(HCI_INIT, &hdev->flags)) {
4279 /* Don't process data packets in this states. */
0d48d939 4280 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
4281 case HCI_ACLDATA_PKT:
4282 case HCI_SCODATA_PKT:
4283 kfree_skb(skb);
4284 continue;
3ff50b79 4285 }
1da177e4
LT
4286 }
4287
4288 /* Process frame */
0d48d939 4289 switch (bt_cb(skb)->pkt_type) {
1da177e4 4290 case HCI_EVENT_PKT:
b78752cc 4291 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
4292 hci_event_packet(hdev, skb);
4293 break;
4294
4295 case HCI_ACLDATA_PKT:
4296 BT_DBG("%s ACL data packet", hdev->name);
4297 hci_acldata_packet(hdev, skb);
4298 break;
4299
4300 case HCI_SCODATA_PKT:
4301 BT_DBG("%s SCO data packet", hdev->name);
4302 hci_scodata_packet(hdev, skb);
4303 break;
4304
4305 default:
4306 kfree_skb(skb);
4307 break;
4308 }
4309 }
1da177e4
LT
4310}
4311
c347b765 4312static void hci_cmd_work(struct work_struct *work)
1da177e4 4313{
c347b765 4314 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
4315 struct sk_buff *skb;
4316
2104786b
AE
4317 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
4318 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 4319
1da177e4 4320 /* Send queued commands */
5a08ecce
AE
4321 if (atomic_read(&hdev->cmd_cnt)) {
4322 skb = skb_dequeue(&hdev->cmd_q);
4323 if (!skb)
4324 return;
4325
7585b97a 4326 kfree_skb(hdev->sent_cmd);
1da177e4 4327
a675d7f1 4328 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
70f23020 4329 if (hdev->sent_cmd) {
1da177e4 4330 atomic_dec(&hdev->cmd_cnt);
57d17d70 4331 hci_send_frame(hdev, skb);
7bdb8a5c
SJ
4332 if (test_bit(HCI_RESET, &hdev->flags))
4333 del_timer(&hdev->cmd_timer);
4334 else
4335 mod_timer(&hdev->cmd_timer,
5f246e89 4336 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
4337 } else {
4338 skb_queue_head(&hdev->cmd_q, skb);
c347b765 4339 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
4340 }
4341 }
4342}
This page took 1.035058 seconds and 5 git commands to generate.