Bluetooth: Remove magic numbers from le scan cmd
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
82453021 28#include <linux/jiffies.h>
1da177e4
LT
29#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
1da177e4
LT
35#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
f48fd9c8 41#include <linux/workqueue.h>
1da177e4
LT
42#include <linux/interrupt.h>
43#include <linux/notifier.h>
611b30f7 44#include <linux/rfkill.h>
6bd32326 45#include <linux/timer.h>
3a0259bb 46#include <linux/crypto.h>
1da177e4
LT
47#include <net/sock.h>
48
49#include <asm/system.h>
70f23020 50#include <linux/uaccess.h>
1da177e4
LT
51#include <asm/unaligned.h>
52
53#include <net/bluetooth/bluetooth.h>
54#include <net/bluetooth/hci_core.h>
55
ab81cbf9
JH
56#define AUTO_OFF_TIMEOUT 2000
57
7784d78f
AE
58int enable_hs;
59
b78752cc 60static void hci_rx_work(struct work_struct *work);
c347b765 61static void hci_cmd_work(struct work_struct *work);
3eff45ea 62static void hci_tx_work(struct work_struct *work);
1da177e4 63
1da177e4
LT
64/* HCI device list */
65LIST_HEAD(hci_dev_list);
66DEFINE_RWLOCK(hci_dev_list_lock);
67
68/* HCI callback list */
69LIST_HEAD(hci_cb_list);
70DEFINE_RWLOCK(hci_cb_list_lock);
71
72/* HCI protocols */
73#define HCI_MAX_PROTO 2
74struct hci_proto *hci_proto[HCI_MAX_PROTO];
75
76/* HCI notifiers list */
e041c683 77static ATOMIC_NOTIFIER_HEAD(hci_notifier);
1da177e4
LT
78
79/* ---- HCI notifications ---- */
80
81int hci_register_notifier(struct notifier_block *nb)
82{
e041c683 83 return atomic_notifier_chain_register(&hci_notifier, nb);
1da177e4
LT
84}
85
86int hci_unregister_notifier(struct notifier_block *nb)
87{
e041c683 88 return atomic_notifier_chain_unregister(&hci_notifier, nb);
1da177e4
LT
89}
90
6516455d 91static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 92{
e041c683 93 atomic_notifier_call_chain(&hci_notifier, event, hdev);
1da177e4
LT
94}
95
96/* ---- HCI requests ---- */
97
23bb5763 98void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 99{
23bb5763
JH
100 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
101
a5040efa
JH
102 /* If this is the init phase check if the completed command matches
103 * the last init command, and if not just return.
104 */
105 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
23bb5763 106 return;
1da177e4
LT
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
8e87d142 127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 128 unsigned long opt, __u32 timeout)
1da177e4
LT
129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
e175072f 150 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
3ff50b79 160 }
1da177e4 161
a5040efa 162 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
01df8c31 170 unsigned long opt, __u32 timeout)
1da177e4
LT
171{
172 int ret;
173
7c6a329e
MH
174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
1da177e4
LT
177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
f630cf0d 190 set_bit(HCI_RESET, &hdev->flags);
a9de9248 191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
192}
193
e61ef499 194static void bredr_init(struct hci_dev *hdev)
1da177e4 195{
b0916ea0 196 struct hci_cp_delete_stored_link_key cp;
1ebb9252 197 __le16 param;
89f2783d 198 __u8 flt_type;
1da177e4 199
2455a3ea
AE
200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
1da177e4
LT
202 /* Mandatory initialization */
203
204 /* Reset */
f630cf0d 205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
e61ef499
AE
206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
f630cf0d 208 }
1da177e4
LT
209
210 /* Read Local Supported Features */
a9de9248 211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 212
1143e5a6 213 /* Read Local Version */
a9de9248 214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1143e5a6 215
1da177e4 216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
a9de9248 217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
1da177e4 218
1da177e4 219 /* Read BD Address */
a9de9248
MH
220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
1da177e4
LT
227
228 /* Read Voice Setting */
a9de9248 229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
1da177e4
LT
230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
89f2783d 234 flt_type = HCI_FLT_CLEAR_ALL;
a9de9248 235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
1da177e4 236
1da177e4 237 /* Connection accept timeout ~20 secs */
aca3192c 238 param = cpu_to_le16(0x7d00);
a9de9248 239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
b0916ea0
JH
240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
1da177e4
LT
244}
245
e61ef499
AE
246static void amp_init(struct hci_dev *hdev)
247{
2455a3ea
AE
248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
e61ef499
AE
250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
6ed58ec5
VT
291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
1da177e4
LT
299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
a9de9248 306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
a9de9248 316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
e4e8e37c 325 /* Encryption */
a9de9248 326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
327}
328
e4e8e37c
MH
329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
a418b893 333 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
8e87d142 339/* Get HCI device by index.
1da177e4
LT
340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
8035ded4 343 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
8035ded4 351 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
1da177e4
LT
360
361/* ---- Inquiry support ---- */
362static void inquiry_cache_flush(struct hci_dev *hdev)
363{
364 struct inquiry_cache *cache = &hdev->inq_cache;
365 struct inquiry_entry *next = cache->list, *e;
366
367 BT_DBG("cache %p", cache);
368
369 cache->list = NULL;
370 while ((e = next)) {
371 next = e->next;
372 kfree(e);
373 }
374}
375
376struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
377{
378 struct inquiry_cache *cache = &hdev->inq_cache;
379 struct inquiry_entry *e;
380
381 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
382
383 for (e = cache->list; e; e = e->next)
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 break;
386 return e;
387}
388
389void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
390{
391 struct inquiry_cache *cache = &hdev->inq_cache;
70f23020 392 struct inquiry_entry *ie;
1da177e4
LT
393
394 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
395
70f23020
AE
396 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
397 if (!ie) {
1da177e4 398 /* Entry not in the cache. Add new one. */
70f23020
AE
399 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
400 if (!ie)
1da177e4 401 return;
70f23020
AE
402
403 ie->next = cache->list;
404 cache->list = ie;
1da177e4
LT
405 }
406
70f23020
AE
407 memcpy(&ie->data, data, sizeof(*data));
408 ie->timestamp = jiffies;
1da177e4
LT
409 cache->timestamp = jiffies;
410}
411
412static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
413{
414 struct inquiry_cache *cache = &hdev->inq_cache;
415 struct inquiry_info *info = (struct inquiry_info *) buf;
416 struct inquiry_entry *e;
417 int copied = 0;
418
419 for (e = cache->list; e && copied < num; e = e->next, copied++) {
420 struct inquiry_data *data = &e->data;
421 bacpy(&info->bdaddr, &data->bdaddr);
422 info->pscan_rep_mode = data->pscan_rep_mode;
423 info->pscan_period_mode = data->pscan_period_mode;
424 info->pscan_mode = data->pscan_mode;
425 memcpy(info->dev_class, data->dev_class, 3);
426 info->clock_offset = data->clock_offset;
427 info++;
428 }
429
430 BT_DBG("cache %p, copied %d", cache, copied);
431 return copied;
432}
433
434static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
435{
436 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
437 struct hci_cp_inquiry cp;
438
439 BT_DBG("%s", hdev->name);
440
441 if (test_bit(HCI_INQUIRY, &hdev->flags))
442 return;
443
444 /* Start Inquiry */
445 memcpy(&cp.lap, &ir->lap, 3);
446 cp.length = ir->length;
447 cp.num_rsp = ir->num_rsp;
a9de9248 448 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
449}
450
451int hci_inquiry(void __user *arg)
452{
453 __u8 __user *ptr = arg;
454 struct hci_inquiry_req ir;
455 struct hci_dev *hdev;
456 int err = 0, do_inquiry = 0, max_rsp;
457 long timeo;
458 __u8 *buf;
459
460 if (copy_from_user(&ir, ptr, sizeof(ir)))
461 return -EFAULT;
462
5a08ecce
AE
463 hdev = hci_dev_get(ir.dev_id);
464 if (!hdev)
1da177e4
LT
465 return -ENODEV;
466
09fd0de5 467 hci_dev_lock(hdev);
8e87d142 468 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
70f23020
AE
469 inquiry_cache_empty(hdev) ||
470 ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
471 inquiry_cache_flush(hdev);
472 do_inquiry = 1;
473 }
09fd0de5 474 hci_dev_unlock(hdev);
1da177e4 475
04837f64 476 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
477
478 if (do_inquiry) {
479 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
480 if (err < 0)
481 goto done;
482 }
1da177e4
LT
483
484 /* for unlimited number of responses we will use buffer with 255 entries */
485 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
486
487 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
488 * copy it to the user space.
489 */
01df8c31 490 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 491 if (!buf) {
1da177e4
LT
492 err = -ENOMEM;
493 goto done;
494 }
495
09fd0de5 496 hci_dev_lock(hdev);
1da177e4 497 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 498 hci_dev_unlock(hdev);
1da177e4
LT
499
500 BT_DBG("num_rsp %d", ir.num_rsp);
501
502 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
503 ptr += sizeof(ir);
504 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
505 ir.num_rsp))
506 err = -EFAULT;
8e87d142 507 } else
1da177e4
LT
508 err = -EFAULT;
509
510 kfree(buf);
511
512done:
513 hci_dev_put(hdev);
514 return err;
515}
516
517/* ---- HCI ioctl helpers ---- */
518
519int hci_dev_open(__u16 dev)
520{
521 struct hci_dev *hdev;
522 int ret = 0;
523
5a08ecce
AE
524 hdev = hci_dev_get(dev);
525 if (!hdev)
1da177e4
LT
526 return -ENODEV;
527
528 BT_DBG("%s %p", hdev->name, hdev);
529
530 hci_req_lock(hdev);
531
611b30f7
MH
532 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
533 ret = -ERFKILL;
534 goto done;
535 }
536
1da177e4
LT
537 if (test_bit(HCI_UP, &hdev->flags)) {
538 ret = -EALREADY;
539 goto done;
540 }
541
542 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
543 set_bit(HCI_RAW, &hdev->flags);
544
07e3b94a
AE
545 /* Treat all non BR/EDR controllers as raw devices if
546 enable_hs is not set */
547 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
548 set_bit(HCI_RAW, &hdev->flags);
549
1da177e4
LT
550 if (hdev->open(hdev)) {
551 ret = -EIO;
552 goto done;
553 }
554
555 if (!test_bit(HCI_RAW, &hdev->flags)) {
556 atomic_set(&hdev->cmd_cnt, 1);
557 set_bit(HCI_INIT, &hdev->flags);
a5040efa 558 hdev->init_last_cmd = 0;
1da177e4 559
04837f64
MH
560 ret = __hci_request(hdev, hci_init_req, 0,
561 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4 562
eead27da 563 if (lmp_host_le_capable(hdev))
6ed58ec5
VT
564 ret = __hci_request(hdev, hci_le_init_req, 0,
565 msecs_to_jiffies(HCI_INIT_TIMEOUT));
566
1da177e4
LT
567 clear_bit(HCI_INIT, &hdev->flags);
568 }
569
570 if (!ret) {
571 hci_dev_hold(hdev);
572 set_bit(HCI_UP, &hdev->flags);
573 hci_notify(hdev, HCI_DEV_UP);
56e5cb86 574 if (!test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 575 hci_dev_lock(hdev);
744cf19e 576 mgmt_powered(hdev, 1);
09fd0de5 577 hci_dev_unlock(hdev);
56e5cb86 578 }
8e87d142 579 } else {
1da177e4 580 /* Init failed, cleanup */
3eff45ea 581 flush_work(&hdev->tx_work);
c347b765 582 flush_work(&hdev->cmd_work);
b78752cc 583 flush_work(&hdev->rx_work);
1da177e4
LT
584
585 skb_queue_purge(&hdev->cmd_q);
586 skb_queue_purge(&hdev->rx_q);
587
588 if (hdev->flush)
589 hdev->flush(hdev);
590
591 if (hdev->sent_cmd) {
592 kfree_skb(hdev->sent_cmd);
593 hdev->sent_cmd = NULL;
594 }
595
596 hdev->close(hdev);
597 hdev->flags = 0;
598 }
599
600done:
601 hci_req_unlock(hdev);
602 hci_dev_put(hdev);
603 return ret;
604}
605
606static int hci_dev_do_close(struct hci_dev *hdev)
607{
608 BT_DBG("%s %p", hdev->name, hdev);
609
610 hci_req_cancel(hdev, ENODEV);
611 hci_req_lock(hdev);
612
613 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 614 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
615 hci_req_unlock(hdev);
616 return 0;
617 }
618
3eff45ea
GP
619 /* Flush RX and TX works */
620 flush_work(&hdev->tx_work);
b78752cc 621 flush_work(&hdev->rx_work);
1da177e4 622
16ab91ab 623 if (hdev->discov_timeout > 0) {
e0f9309f 624 cancel_delayed_work(&hdev->discov_off);
16ab91ab
JH
625 hdev->discov_timeout = 0;
626 }
627
3243553f 628 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 629 cancel_delayed_work(&hdev->power_off);
3243553f 630
7d78525d
JH
631 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
632 cancel_delayed_work(&hdev->service_cache);
633
09fd0de5 634 hci_dev_lock(hdev);
1da177e4
LT
635 inquiry_cache_flush(hdev);
636 hci_conn_hash_flush(hdev);
09fd0de5 637 hci_dev_unlock(hdev);
1da177e4
LT
638
639 hci_notify(hdev, HCI_DEV_DOWN);
640
641 if (hdev->flush)
642 hdev->flush(hdev);
643
644 /* Reset device */
645 skb_queue_purge(&hdev->cmd_q);
646 atomic_set(&hdev->cmd_cnt, 1);
647 if (!test_bit(HCI_RAW, &hdev->flags)) {
648 set_bit(HCI_INIT, &hdev->flags);
04837f64 649 __hci_request(hdev, hci_reset_req, 0,
43611a7b 650 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
651 clear_bit(HCI_INIT, &hdev->flags);
652 }
653
c347b765
GP
654 /* flush cmd work */
655 flush_work(&hdev->cmd_work);
1da177e4
LT
656
657 /* Drop queues */
658 skb_queue_purge(&hdev->rx_q);
659 skb_queue_purge(&hdev->cmd_q);
660 skb_queue_purge(&hdev->raw_q);
661
662 /* Drop last sent command */
663 if (hdev->sent_cmd) {
b79f44c1 664 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
665 kfree_skb(hdev->sent_cmd);
666 hdev->sent_cmd = NULL;
667 }
668
669 /* After this point our queues are empty
670 * and no tasks are scheduled. */
671 hdev->close(hdev);
672
09fd0de5 673 hci_dev_lock(hdev);
744cf19e 674 mgmt_powered(hdev, 0);
09fd0de5 675 hci_dev_unlock(hdev);
5add6af8 676
1da177e4
LT
677 /* Clear flags */
678 hdev->flags = 0;
679
680 hci_req_unlock(hdev);
681
682 hci_dev_put(hdev);
683 return 0;
684}
685
686int hci_dev_close(__u16 dev)
687{
688 struct hci_dev *hdev;
689 int err;
690
70f23020
AE
691 hdev = hci_dev_get(dev);
692 if (!hdev)
1da177e4
LT
693 return -ENODEV;
694 err = hci_dev_do_close(hdev);
695 hci_dev_put(hdev);
696 return err;
697}
698
699int hci_dev_reset(__u16 dev)
700{
701 struct hci_dev *hdev;
702 int ret = 0;
703
70f23020
AE
704 hdev = hci_dev_get(dev);
705 if (!hdev)
1da177e4
LT
706 return -ENODEV;
707
708 hci_req_lock(hdev);
1da177e4
LT
709
710 if (!test_bit(HCI_UP, &hdev->flags))
711 goto done;
712
713 /* Drop queues */
714 skb_queue_purge(&hdev->rx_q);
715 skb_queue_purge(&hdev->cmd_q);
716
09fd0de5 717 hci_dev_lock(hdev);
1da177e4
LT
718 inquiry_cache_flush(hdev);
719 hci_conn_hash_flush(hdev);
09fd0de5 720 hci_dev_unlock(hdev);
1da177e4
LT
721
722 if (hdev->flush)
723 hdev->flush(hdev);
724
8e87d142 725 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 726 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
727
728 if (!test_bit(HCI_RAW, &hdev->flags))
04837f64
MH
729 ret = __hci_request(hdev, hci_reset_req, 0,
730 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
731
732done:
1da177e4
LT
733 hci_req_unlock(hdev);
734 hci_dev_put(hdev);
735 return ret;
736}
737
738int hci_dev_reset_stat(__u16 dev)
739{
740 struct hci_dev *hdev;
741 int ret = 0;
742
70f23020
AE
743 hdev = hci_dev_get(dev);
744 if (!hdev)
1da177e4
LT
745 return -ENODEV;
746
747 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
748
749 hci_dev_put(hdev);
750
751 return ret;
752}
753
754int hci_dev_cmd(unsigned int cmd, void __user *arg)
755{
756 struct hci_dev *hdev;
757 struct hci_dev_req dr;
758 int err = 0;
759
760 if (copy_from_user(&dr, arg, sizeof(dr)))
761 return -EFAULT;
762
70f23020
AE
763 hdev = hci_dev_get(dr.dev_id);
764 if (!hdev)
1da177e4
LT
765 return -ENODEV;
766
767 switch (cmd) {
768 case HCISETAUTH:
04837f64
MH
769 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
770 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
771 break;
772
773 case HCISETENCRYPT:
774 if (!lmp_encrypt_capable(hdev)) {
775 err = -EOPNOTSUPP;
776 break;
777 }
778
779 if (!test_bit(HCI_AUTH, &hdev->flags)) {
780 /* Auth must be enabled first */
04837f64
MH
781 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
782 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
783 if (err)
784 break;
785 }
786
04837f64
MH
787 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
788 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
789 break;
790
791 case HCISETSCAN:
04837f64
MH
792 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
793 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
794 break;
795
1da177e4 796 case HCISETLINKPOL:
e4e8e37c
MH
797 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
798 msecs_to_jiffies(HCI_INIT_TIMEOUT));
1da177e4
LT
799 break;
800
801 case HCISETLINKMODE:
e4e8e37c
MH
802 hdev->link_mode = ((__u16) dr.dev_opt) &
803 (HCI_LM_MASTER | HCI_LM_ACCEPT);
804 break;
805
806 case HCISETPTYPE:
807 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
808 break;
809
810 case HCISETACLMTU:
e4e8e37c
MH
811 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
812 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
813 break;
814
815 case HCISETSCOMTU:
e4e8e37c
MH
816 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
817 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
818 break;
819
820 default:
821 err = -EINVAL;
822 break;
823 }
e4e8e37c 824
1da177e4
LT
825 hci_dev_put(hdev);
826 return err;
827}
828
829int hci_get_dev_list(void __user *arg)
830{
8035ded4 831 struct hci_dev *hdev;
1da177e4
LT
832 struct hci_dev_list_req *dl;
833 struct hci_dev_req *dr;
1da177e4
LT
834 int n = 0, size, err;
835 __u16 dev_num;
836
837 if (get_user(dev_num, (__u16 __user *) arg))
838 return -EFAULT;
839
840 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
841 return -EINVAL;
842
843 size = sizeof(*dl) + dev_num * sizeof(*dr);
844
70f23020
AE
845 dl = kzalloc(size, GFP_KERNEL);
846 if (!dl)
1da177e4
LT
847 return -ENOMEM;
848
849 dr = dl->dev_req;
850
851 read_lock_bh(&hci_dev_list_lock);
8035ded4 852 list_for_each_entry(hdev, &hci_dev_list, list) {
3243553f 853 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
e0f9309f 854 cancel_delayed_work(&hdev->power_off);
c542a06c
JH
855
856 if (!test_bit(HCI_MGMT, &hdev->flags))
857 set_bit(HCI_PAIRABLE, &hdev->flags);
858
1da177e4
LT
859 (dr + n)->dev_id = hdev->id;
860 (dr + n)->dev_opt = hdev->flags;
c542a06c 861
1da177e4
LT
862 if (++n >= dev_num)
863 break;
864 }
865 read_unlock_bh(&hci_dev_list_lock);
866
867 dl->dev_num = n;
868 size = sizeof(*dl) + n * sizeof(*dr);
869
870 err = copy_to_user(arg, dl, size);
871 kfree(dl);
872
873 return err ? -EFAULT : 0;
874}
875
876int hci_get_dev_info(void __user *arg)
877{
878 struct hci_dev *hdev;
879 struct hci_dev_info di;
880 int err = 0;
881
882 if (copy_from_user(&di, arg, sizeof(di)))
883 return -EFAULT;
884
70f23020
AE
885 hdev = hci_dev_get(di.dev_id);
886 if (!hdev)
1da177e4
LT
887 return -ENODEV;
888
3243553f
JH
889 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
890 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 891
c542a06c
JH
892 if (!test_bit(HCI_MGMT, &hdev->flags))
893 set_bit(HCI_PAIRABLE, &hdev->flags);
894
1da177e4
LT
895 strcpy(di.name, hdev->name);
896 di.bdaddr = hdev->bdaddr;
943da25d 897 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
898 di.flags = hdev->flags;
899 di.pkt_type = hdev->pkt_type;
900 di.acl_mtu = hdev->acl_mtu;
901 di.acl_pkts = hdev->acl_pkts;
902 di.sco_mtu = hdev->sco_mtu;
903 di.sco_pkts = hdev->sco_pkts;
904 di.link_policy = hdev->link_policy;
905 di.link_mode = hdev->link_mode;
906
907 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
908 memcpy(&di.features, &hdev->features, sizeof(di.features));
909
910 if (copy_to_user(arg, &di, sizeof(di)))
911 err = -EFAULT;
912
913 hci_dev_put(hdev);
914
915 return err;
916}
917
918/* ---- Interface to HCI drivers ---- */
919
611b30f7
MH
920static int hci_rfkill_set_block(void *data, bool blocked)
921{
922 struct hci_dev *hdev = data;
923
924 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
925
926 if (!blocked)
927 return 0;
928
929 hci_dev_do_close(hdev);
930
931 return 0;
932}
933
934static const struct rfkill_ops hci_rfkill_ops = {
935 .set_block = hci_rfkill_set_block,
936};
937
1da177e4
LT
938/* Alloc HCI device */
939struct hci_dev *hci_alloc_dev(void)
940{
941 struct hci_dev *hdev;
942
25ea6db0 943 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1da177e4
LT
944 if (!hdev)
945 return NULL;
946
0ac7e700 947 hci_init_sysfs(hdev);
1da177e4
LT
948 skb_queue_head_init(&hdev->driver_init);
949
950 return hdev;
951}
952EXPORT_SYMBOL(hci_alloc_dev);
953
954/* Free HCI device */
955void hci_free_dev(struct hci_dev *hdev)
956{
957 skb_queue_purge(&hdev->driver_init);
958
a91f2e39
MH
959 /* will free via device release */
960 put_device(&hdev->dev);
1da177e4
LT
961}
962EXPORT_SYMBOL(hci_free_dev);
963
ab81cbf9
JH
964static void hci_power_on(struct work_struct *work)
965{
966 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
967
968 BT_DBG("%s", hdev->name);
969
970 if (hci_dev_open(hdev->id) < 0)
971 return;
972
973 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
80b7ab33 974 schedule_delayed_work(&hdev->power_off,
3243553f 975 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
ab81cbf9
JH
976
977 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
744cf19e 978 mgmt_index_added(hdev);
ab81cbf9
JH
979}
980
981static void hci_power_off(struct work_struct *work)
982{
3243553f
JH
983 struct hci_dev *hdev = container_of(work, struct hci_dev,
984 power_off.work);
ab81cbf9
JH
985
986 BT_DBG("%s", hdev->name);
987
988 clear_bit(HCI_AUTO_OFF, &hdev->flags);
989
3243553f 990 hci_dev_close(hdev->id);
ab81cbf9
JH
991}
992
16ab91ab
JH
993static void hci_discov_off(struct work_struct *work)
994{
995 struct hci_dev *hdev;
996 u8 scan = SCAN_PAGE;
997
998 hdev = container_of(work, struct hci_dev, discov_off.work);
999
1000 BT_DBG("%s", hdev->name);
1001
09fd0de5 1002 hci_dev_lock(hdev);
16ab91ab
JH
1003
1004 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1005
1006 hdev->discov_timeout = 0;
1007
09fd0de5 1008 hci_dev_unlock(hdev);
16ab91ab
JH
1009}
1010
2aeb9a1a
JH
1011int hci_uuids_clear(struct hci_dev *hdev)
1012{
1013 struct list_head *p, *n;
1014
1015 list_for_each_safe(p, n, &hdev->uuids) {
1016 struct bt_uuid *uuid;
1017
1018 uuid = list_entry(p, struct bt_uuid, list);
1019
1020 list_del(p);
1021 kfree(uuid);
1022 }
1023
1024 return 0;
1025}
1026
55ed8ca1
JH
1027int hci_link_keys_clear(struct hci_dev *hdev)
1028{
1029 struct list_head *p, *n;
1030
1031 list_for_each_safe(p, n, &hdev->link_keys) {
1032 struct link_key *key;
1033
1034 key = list_entry(p, struct link_key, list);
1035
1036 list_del(p);
1037 kfree(key);
1038 }
1039
1040 return 0;
1041}
1042
1043struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1044{
8035ded4 1045 struct link_key *k;
55ed8ca1 1046
8035ded4 1047 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1048 if (bacmp(bdaddr, &k->bdaddr) == 0)
1049 return k;
55ed8ca1
JH
1050
1051 return NULL;
1052}
1053
d25e28ab
JH
1054static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1055 u8 key_type, u8 old_key_type)
1056{
1057 /* Legacy key */
1058 if (key_type < 0x03)
1059 return 1;
1060
1061 /* Debug keys are insecure so don't store them persistently */
1062 if (key_type == HCI_LK_DEBUG_COMBINATION)
1063 return 0;
1064
1065 /* Changed combination key and there's no previous one */
1066 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1067 return 0;
1068
1069 /* Security mode 3 case */
1070 if (!conn)
1071 return 1;
1072
1073 /* Neither local nor remote side had no-bonding as requirement */
1074 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1075 return 1;
1076
1077 /* Local side had dedicated bonding as requirement */
1078 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1079 return 1;
1080
1081 /* Remote side had dedicated bonding as requirement */
1082 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1083 return 1;
1084
1085 /* If none of the above criteria match, then don't store the key
1086 * persistently */
1087 return 0;
1088}
1089
75d262c2
VCG
1090struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1091{
1092 struct link_key *k;
1093
1094 list_for_each_entry(k, &hdev->link_keys, list) {
1095 struct key_master_id *id;
1096
1097 if (k->type != HCI_LK_SMP_LTK)
1098 continue;
1099
1100 if (k->dlen != sizeof(*id))
1101 continue;
1102
1103 id = (void *) &k->data;
1104 if (id->ediv == ediv &&
1105 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1106 return k;
1107 }
1108
1109 return NULL;
1110}
1111EXPORT_SYMBOL(hci_find_ltk);
1112
1113struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1114 bdaddr_t *bdaddr, u8 type)
1115{
1116 struct link_key *k;
1117
1118 list_for_each_entry(k, &hdev->link_keys, list)
1119 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1120 return k;
1121
1122 return NULL;
1123}
1124EXPORT_SYMBOL(hci_find_link_key_type);
1125
d25e28ab
JH
1126int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1127 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1128{
1129 struct link_key *key, *old_key;
4df378a1 1130 u8 old_key_type, persistent;
55ed8ca1
JH
1131
1132 old_key = hci_find_link_key(hdev, bdaddr);
1133 if (old_key) {
1134 old_key_type = old_key->type;
1135 key = old_key;
1136 } else {
12adcf3a 1137 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1138 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1139 if (!key)
1140 return -ENOMEM;
1141 list_add(&key->list, &hdev->link_keys);
1142 }
1143
1144 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1145
d25e28ab
JH
1146 /* Some buggy controller combinations generate a changed
1147 * combination key for legacy pairing even when there's no
1148 * previous key */
1149 if (type == HCI_LK_CHANGED_COMBINATION &&
1150 (!conn || conn->remote_auth == 0xff) &&
655fe6ec 1151 old_key_type == 0xff) {
d25e28ab 1152 type = HCI_LK_COMBINATION;
655fe6ec
JH
1153 if (conn)
1154 conn->key_type = type;
1155 }
d25e28ab 1156
55ed8ca1
JH
1157 bacpy(&key->bdaddr, bdaddr);
1158 memcpy(key->val, val, 16);
55ed8ca1
JH
1159 key->pin_len = pin_len;
1160
b6020ba0 1161 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1162 key->type = old_key_type;
4748fed2
JH
1163 else
1164 key->type = type;
1165
4df378a1
JH
1166 if (!new_key)
1167 return 0;
1168
1169 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1170
744cf19e 1171 mgmt_new_link_key(hdev, key, persistent);
4df378a1
JH
1172
1173 if (!persistent) {
1174 list_del(&key->list);
1175 kfree(key);
1176 }
55ed8ca1
JH
1177
1178 return 0;
1179}
1180
75d262c2 1181int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
726b4ffc 1182 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
75d262c2
VCG
1183{
1184 struct link_key *key, *old_key;
1185 struct key_master_id *id;
1186 u8 old_key_type;
1187
1188 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1189
1190 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1191 if (old_key) {
1192 key = old_key;
1193 old_key_type = old_key->type;
1194 } else {
1195 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1196 if (!key)
1197 return -ENOMEM;
1198 list_add(&key->list, &hdev->link_keys);
1199 old_key_type = 0xff;
1200 }
1201
1202 key->dlen = sizeof(*id);
1203
1204 bacpy(&key->bdaddr, bdaddr);
1205 memcpy(key->val, ltk, sizeof(key->val));
1206 key->type = HCI_LK_SMP_LTK;
726b4ffc 1207 key->pin_len = key_size;
75d262c2
VCG
1208
1209 id = (void *) &key->data;
1210 id->ediv = ediv;
1211 memcpy(id->rand, rand, sizeof(id->rand));
1212
1213 if (new_key)
744cf19e 1214 mgmt_new_link_key(hdev, key, old_key_type);
75d262c2
VCG
1215
1216 return 0;
1217}
1218
55ed8ca1
JH
1219int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1220{
1221 struct link_key *key;
1222
1223 key = hci_find_link_key(hdev, bdaddr);
1224 if (!key)
1225 return -ENOENT;
1226
1227 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1228
1229 list_del(&key->list);
1230 kfree(key);
1231
1232 return 0;
1233}
1234
6bd32326
VT
1235/* HCI command timer function */
1236static void hci_cmd_timer(unsigned long arg)
1237{
1238 struct hci_dev *hdev = (void *) arg;
1239
1240 BT_ERR("%s command tx timeout", hdev->name);
1241 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1242 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1243}
1244
2763eda6
SJ
1245struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1246 bdaddr_t *bdaddr)
1247{
1248 struct oob_data *data;
1249
1250 list_for_each_entry(data, &hdev->remote_oob_data, list)
1251 if (bacmp(bdaddr, &data->bdaddr) == 0)
1252 return data;
1253
1254 return NULL;
1255}
1256
1257int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1258{
1259 struct oob_data *data;
1260
1261 data = hci_find_remote_oob_data(hdev, bdaddr);
1262 if (!data)
1263 return -ENOENT;
1264
1265 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1266
1267 list_del(&data->list);
1268 kfree(data);
1269
1270 return 0;
1271}
1272
1273int hci_remote_oob_data_clear(struct hci_dev *hdev)
1274{
1275 struct oob_data *data, *n;
1276
1277 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1278 list_del(&data->list);
1279 kfree(data);
1280 }
1281
1282 return 0;
1283}
1284
1285int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1286 u8 *randomizer)
1287{
1288 struct oob_data *data;
1289
1290 data = hci_find_remote_oob_data(hdev, bdaddr);
1291
1292 if (!data) {
1293 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1294 if (!data)
1295 return -ENOMEM;
1296
1297 bacpy(&data->bdaddr, bdaddr);
1298 list_add(&data->list, &hdev->remote_oob_data);
1299 }
1300
1301 memcpy(data->hash, hash, sizeof(data->hash));
1302 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1303
1304 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1305
1306 return 0;
1307}
1308
b2a66aad
AJ
1309struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1310 bdaddr_t *bdaddr)
1311{
8035ded4 1312 struct bdaddr_list *b;
b2a66aad 1313
8035ded4 1314 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1315 if (bacmp(bdaddr, &b->bdaddr) == 0)
1316 return b;
b2a66aad
AJ
1317
1318 return NULL;
1319}
1320
1321int hci_blacklist_clear(struct hci_dev *hdev)
1322{
1323 struct list_head *p, *n;
1324
1325 list_for_each_safe(p, n, &hdev->blacklist) {
1326 struct bdaddr_list *b;
1327
1328 b = list_entry(p, struct bdaddr_list, list);
1329
1330 list_del(p);
1331 kfree(b);
1332 }
1333
1334 return 0;
1335}
1336
1337int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1338{
1339 struct bdaddr_list *entry;
b2a66aad
AJ
1340
1341 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1342 return -EBADF;
1343
5e762444
AJ
1344 if (hci_blacklist_lookup(hdev, bdaddr))
1345 return -EEXIST;
b2a66aad
AJ
1346
1347 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1348 if (!entry)
1349 return -ENOMEM;
b2a66aad
AJ
1350
1351 bacpy(&entry->bdaddr, bdaddr);
1352
1353 list_add(&entry->list, &hdev->blacklist);
1354
744cf19e 1355 return mgmt_device_blocked(hdev, bdaddr);
b2a66aad
AJ
1356}
1357
1358int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1359{
1360 struct bdaddr_list *entry;
b2a66aad 1361
1ec918ce 1362 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1363 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1364
1365 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1366 if (!entry)
5e762444 1367 return -ENOENT;
b2a66aad
AJ
1368
1369 list_del(&entry->list);
1370 kfree(entry);
1371
744cf19e 1372 return mgmt_device_unblocked(hdev, bdaddr);
b2a66aad
AJ
1373}
1374
db323f2f 1375static void hci_clear_adv_cache(struct work_struct *work)
35815085 1376{
db323f2f
GP
1377 struct hci_dev *hdev = container_of(work, struct hci_dev,
1378 adv_work.work);
35815085
AG
1379
1380 hci_dev_lock(hdev);
1381
1382 hci_adv_entries_clear(hdev);
1383
1384 hci_dev_unlock(hdev);
1385}
1386
76c8686f
AG
1387int hci_adv_entries_clear(struct hci_dev *hdev)
1388{
1389 struct adv_entry *entry, *tmp;
1390
1391 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1392 list_del(&entry->list);
1393 kfree(entry);
1394 }
1395
1396 BT_DBG("%s adv cache cleared", hdev->name);
1397
1398 return 0;
1399}
1400
1401struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1402{
1403 struct adv_entry *entry;
1404
1405 list_for_each_entry(entry, &hdev->adv_entries, list)
1406 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1407 return entry;
1408
1409 return NULL;
1410}
1411
1412static inline int is_connectable_adv(u8 evt_type)
1413{
1414 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1415 return 1;
1416
1417 return 0;
1418}
1419
1420int hci_add_adv_entry(struct hci_dev *hdev,
1421 struct hci_ev_le_advertising_info *ev)
1422{
1423 struct adv_entry *entry;
1424
1425 if (!is_connectable_adv(ev->evt_type))
1426 return -EINVAL;
1427
1428 /* Only new entries should be added to adv_entries. So, if
1429 * bdaddr was found, don't add it. */
1430 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1431 return 0;
1432
1433 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1434 if (!entry)
1435 return -ENOMEM;
1436
1437 bacpy(&entry->bdaddr, &ev->bdaddr);
1438 entry->bdaddr_type = ev->bdaddr_type;
1439
1440 list_add(&entry->list, &hdev->adv_entries);
1441
1442 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1443 batostr(&entry->bdaddr), entry->bdaddr_type);
1444
1445 return 0;
1446}
1447
1da177e4
LT
1448/* Register HCI device */
1449int hci_register_dev(struct hci_dev *hdev)
1450{
1451 struct list_head *head = &hci_dev_list, *p;
08add513 1452 int i, id, error;
1da177e4 1453
c13854ce
MH
1454 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1455 hdev->bus, hdev->owner);
1da177e4
LT
1456
1457 if (!hdev->open || !hdev->close || !hdev->destruct)
1458 return -EINVAL;
1459
08add513
MM
1460 /* Do not allow HCI_AMP devices to register at index 0,
1461 * so the index can be used as the AMP controller ID.
1462 */
1463 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1464
1da177e4
LT
1465 write_lock_bh(&hci_dev_list_lock);
1466
1467 /* Find first available device id */
1468 list_for_each(p, &hci_dev_list) {
1469 if (list_entry(p, struct hci_dev, list)->id != id)
1470 break;
1471 head = p; id++;
1472 }
8e87d142 1473
1da177e4
LT
1474 sprintf(hdev->name, "hci%d", id);
1475 hdev->id = id;
c6feeb28 1476 list_add_tail(&hdev->list, head);
1da177e4
LT
1477
1478 atomic_set(&hdev->refcnt, 1);
09fd0de5 1479 mutex_init(&hdev->lock);
1da177e4
LT
1480
1481 hdev->flags = 0;
d23264a8 1482 hdev->dev_flags = 0;
1da177e4 1483 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
5b7f9909 1484 hdev->esco_type = (ESCO_HV1);
1da177e4 1485 hdev->link_mode = (HCI_LM_ACCEPT);
17fa4b9d 1486 hdev->io_capability = 0x03; /* No Input No Output */
1da177e4 1487
04837f64
MH
1488 hdev->idle_timeout = 0;
1489 hdev->sniff_max_interval = 800;
1490 hdev->sniff_min_interval = 80;
1491
b78752cc 1492 INIT_WORK(&hdev->rx_work, hci_rx_work);
c347b765 1493 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
3eff45ea 1494 INIT_WORK(&hdev->tx_work, hci_tx_work);
b78752cc 1495
1da177e4
LT
1496
1497 skb_queue_head_init(&hdev->rx_q);
1498 skb_queue_head_init(&hdev->cmd_q);
1499 skb_queue_head_init(&hdev->raw_q);
1500
6bd32326
VT
1501 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1502
cd4c5391 1503 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1504 hdev->reassembly[i] = NULL;
1505
1da177e4 1506 init_waitqueue_head(&hdev->req_wait_q);
a6a67efd 1507 mutex_init(&hdev->req_lock);
1da177e4
LT
1508
1509 inquiry_cache_init(hdev);
1510
1511 hci_conn_hash_init(hdev);
1512
2e58ef3e
JH
1513 INIT_LIST_HEAD(&hdev->mgmt_pending);
1514
ea4bd8ba 1515 INIT_LIST_HEAD(&hdev->blacklist);
f0358568 1516
2aeb9a1a
JH
1517 INIT_LIST_HEAD(&hdev->uuids);
1518
55ed8ca1
JH
1519 INIT_LIST_HEAD(&hdev->link_keys);
1520
2763eda6
SJ
1521 INIT_LIST_HEAD(&hdev->remote_oob_data);
1522
76c8686f
AG
1523 INIT_LIST_HEAD(&hdev->adv_entries);
1524
db323f2f 1525 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
ab81cbf9 1526 INIT_WORK(&hdev->power_on, hci_power_on);
3243553f 1527 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
ab81cbf9 1528
16ab91ab
JH
1529 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1530
1da177e4
LT
1531 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1532
1533 atomic_set(&hdev->promisc, 0);
1534
1535 write_unlock_bh(&hci_dev_list_lock);
1536
32845eb1
GP
1537 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1538 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1539 if (!hdev->workqueue) {
1540 error = -ENOMEM;
1541 goto err;
1542 }
f48fd9c8 1543
33ca954d
DH
1544 error = hci_add_sysfs(hdev);
1545 if (error < 0)
1546 goto err_wqueue;
1da177e4 1547
611b30f7
MH
1548 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1549 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1550 if (hdev->rfkill) {
1551 if (rfkill_register(hdev->rfkill) < 0) {
1552 rfkill_destroy(hdev->rfkill);
1553 hdev->rfkill = NULL;
1554 }
1555 }
1556
ab81cbf9
JH
1557 set_bit(HCI_AUTO_OFF, &hdev->flags);
1558 set_bit(HCI_SETUP, &hdev->flags);
7f971041 1559 schedule_work(&hdev->power_on);
ab81cbf9 1560
1da177e4
LT
1561 hci_notify(hdev, HCI_DEV_REG);
1562
1563 return id;
f48fd9c8 1564
33ca954d
DH
1565err_wqueue:
1566 destroy_workqueue(hdev->workqueue);
1567err:
f48fd9c8
MH
1568 write_lock_bh(&hci_dev_list_lock);
1569 list_del(&hdev->list);
1570 write_unlock_bh(&hci_dev_list_lock);
1571
33ca954d 1572 return error;
1da177e4
LT
1573}
1574EXPORT_SYMBOL(hci_register_dev);
1575
1576/* Unregister HCI device */
59735631 1577void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1578{
ef222013
MH
1579 int i;
1580
c13854ce 1581 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1582
1da177e4
LT
1583 write_lock_bh(&hci_dev_list_lock);
1584 list_del(&hdev->list);
1585 write_unlock_bh(&hci_dev_list_lock);
1586
1587 hci_dev_do_close(hdev);
1588
cd4c5391 1589 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1590 kfree_skb(hdev->reassembly[i]);
1591
ab81cbf9 1592 if (!test_bit(HCI_INIT, &hdev->flags) &&
56e5cb86 1593 !test_bit(HCI_SETUP, &hdev->flags)) {
09fd0de5 1594 hci_dev_lock(hdev);
744cf19e 1595 mgmt_index_removed(hdev);
09fd0de5 1596 hci_dev_unlock(hdev);
56e5cb86 1597 }
ab81cbf9 1598
2e58ef3e
JH
1599 /* mgmt_index_removed should take care of emptying the
1600 * pending list */
1601 BUG_ON(!list_empty(&hdev->mgmt_pending));
1602
1da177e4
LT
1603 hci_notify(hdev, HCI_DEV_UNREG);
1604
611b30f7
MH
1605 if (hdev->rfkill) {
1606 rfkill_unregister(hdev->rfkill);
1607 rfkill_destroy(hdev->rfkill);
1608 }
1609
ce242970 1610 hci_del_sysfs(hdev);
147e2d59 1611
db323f2f 1612 cancel_delayed_work_sync(&hdev->adv_work);
c6f3c5f7 1613
f48fd9c8
MH
1614 destroy_workqueue(hdev->workqueue);
1615
09fd0de5 1616 hci_dev_lock(hdev);
e2e0cacb 1617 hci_blacklist_clear(hdev);
2aeb9a1a 1618 hci_uuids_clear(hdev);
55ed8ca1 1619 hci_link_keys_clear(hdev);
2763eda6 1620 hci_remote_oob_data_clear(hdev);
76c8686f 1621 hci_adv_entries_clear(hdev);
09fd0de5 1622 hci_dev_unlock(hdev);
e2e0cacb 1623
1da177e4 1624 __hci_dev_put(hdev);
1da177e4
LT
1625}
1626EXPORT_SYMBOL(hci_unregister_dev);
1627
1628/* Suspend HCI device */
1629int hci_suspend_dev(struct hci_dev *hdev)
1630{
1631 hci_notify(hdev, HCI_DEV_SUSPEND);
1632 return 0;
1633}
1634EXPORT_SYMBOL(hci_suspend_dev);
1635
1636/* Resume HCI device */
1637int hci_resume_dev(struct hci_dev *hdev)
1638{
1639 hci_notify(hdev, HCI_DEV_RESUME);
1640 return 0;
1641}
1642EXPORT_SYMBOL(hci_resume_dev);
1643
76bca880
MH
1644/* Receive frame from HCI drivers */
1645int hci_recv_frame(struct sk_buff *skb)
1646{
1647 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1648 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1649 && !test_bit(HCI_INIT, &hdev->flags))) {
1650 kfree_skb(skb);
1651 return -ENXIO;
1652 }
1653
1654 /* Incomming skb */
1655 bt_cb(skb)->incoming = 1;
1656
1657 /* Time stamp */
1658 __net_timestamp(skb);
1659
76bca880 1660 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1661 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1662
76bca880
MH
1663 return 0;
1664}
1665EXPORT_SYMBOL(hci_recv_frame);
1666
33e882a5 1667static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1e429f38 1668 int count, __u8 index)
33e882a5
SS
1669{
1670 int len = 0;
1671 int hlen = 0;
1672 int remain = count;
1673 struct sk_buff *skb;
1674 struct bt_skb_cb *scb;
1675
1676 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1677 index >= NUM_REASSEMBLY)
1678 return -EILSEQ;
1679
1680 skb = hdev->reassembly[index];
1681
1682 if (!skb) {
1683 switch (type) {
1684 case HCI_ACLDATA_PKT:
1685 len = HCI_MAX_FRAME_SIZE;
1686 hlen = HCI_ACL_HDR_SIZE;
1687 break;
1688 case HCI_EVENT_PKT:
1689 len = HCI_MAX_EVENT_SIZE;
1690 hlen = HCI_EVENT_HDR_SIZE;
1691 break;
1692 case HCI_SCODATA_PKT:
1693 len = HCI_MAX_SCO_SIZE;
1694 hlen = HCI_SCO_HDR_SIZE;
1695 break;
1696 }
1697
1e429f38 1698 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1699 if (!skb)
1700 return -ENOMEM;
1701
1702 scb = (void *) skb->cb;
1703 scb->expect = hlen;
1704 scb->pkt_type = type;
1705
1706 skb->dev = (void *) hdev;
1707 hdev->reassembly[index] = skb;
1708 }
1709
1710 while (count) {
1711 scb = (void *) skb->cb;
1712 len = min(scb->expect, (__u16)count);
1713
1714 memcpy(skb_put(skb, len), data, len);
1715
1716 count -= len;
1717 data += len;
1718 scb->expect -= len;
1719 remain = count;
1720
1721 switch (type) {
1722 case HCI_EVENT_PKT:
1723 if (skb->len == HCI_EVENT_HDR_SIZE) {
1724 struct hci_event_hdr *h = hci_event_hdr(skb);
1725 scb->expect = h->plen;
1726
1727 if (skb_tailroom(skb) < scb->expect) {
1728 kfree_skb(skb);
1729 hdev->reassembly[index] = NULL;
1730 return -ENOMEM;
1731 }
1732 }
1733 break;
1734
1735 case HCI_ACLDATA_PKT:
1736 if (skb->len == HCI_ACL_HDR_SIZE) {
1737 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1738 scb->expect = __le16_to_cpu(h->dlen);
1739
1740 if (skb_tailroom(skb) < scb->expect) {
1741 kfree_skb(skb);
1742 hdev->reassembly[index] = NULL;
1743 return -ENOMEM;
1744 }
1745 }
1746 break;
1747
1748 case HCI_SCODATA_PKT:
1749 if (skb->len == HCI_SCO_HDR_SIZE) {
1750 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1751 scb->expect = h->dlen;
1752
1753 if (skb_tailroom(skb) < scb->expect) {
1754 kfree_skb(skb);
1755 hdev->reassembly[index] = NULL;
1756 return -ENOMEM;
1757 }
1758 }
1759 break;
1760 }
1761
1762 if (scb->expect == 0) {
1763 /* Complete frame */
1764
1765 bt_cb(skb)->pkt_type = type;
1766 hci_recv_frame(skb);
1767
1768 hdev->reassembly[index] = NULL;
1769 return remain;
1770 }
1771 }
1772
1773 return remain;
1774}
1775
ef222013
MH
1776int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1777{
f39a3c06
SS
1778 int rem = 0;
1779
ef222013
MH
1780 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1781 return -EILSEQ;
1782
da5f6c37 1783 while (count) {
1e429f38 1784 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
1785 if (rem < 0)
1786 return rem;
ef222013 1787
f39a3c06
SS
1788 data += (count - rem);
1789 count = rem;
f81c6224 1790 }
ef222013 1791
f39a3c06 1792 return rem;
ef222013
MH
1793}
1794EXPORT_SYMBOL(hci_recv_fragment);
1795
99811510
SS
1796#define STREAM_REASSEMBLY 0
1797
1798int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1799{
1800 int type;
1801 int rem = 0;
1802
da5f6c37 1803 while (count) {
99811510
SS
1804 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1805
1806 if (!skb) {
1807 struct { char type; } *pkt;
1808
1809 /* Start of the frame */
1810 pkt = data;
1811 type = pkt->type;
1812
1813 data++;
1814 count--;
1815 } else
1816 type = bt_cb(skb)->pkt_type;
1817
1e429f38
GP
1818 rem = hci_reassembly(hdev, type, data, count,
1819 STREAM_REASSEMBLY);
99811510
SS
1820 if (rem < 0)
1821 return rem;
1822
1823 data += (count - rem);
1824 count = rem;
f81c6224 1825 }
99811510
SS
1826
1827 return rem;
1828}
1829EXPORT_SYMBOL(hci_recv_stream_fragment);
1830
1da177e4
LT
1831/* ---- Interface to upper protocols ---- */
1832
f2d64f6a 1833/* Register/Unregister protocols. */
1da177e4
LT
1834int hci_register_proto(struct hci_proto *hp)
1835{
1836 int err = 0;
1837
1838 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1839
1840 if (hp->id >= HCI_MAX_PROTO)
1841 return -EINVAL;
1842
1da177e4
LT
1843 if (!hci_proto[hp->id])
1844 hci_proto[hp->id] = hp;
1845 else
1846 err = -EEXIST;
1847
1da177e4
LT
1848 return err;
1849}
1850EXPORT_SYMBOL(hci_register_proto);
1851
1852int hci_unregister_proto(struct hci_proto *hp)
1853{
1854 int err = 0;
1855
1856 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1857
1858 if (hp->id >= HCI_MAX_PROTO)
1859 return -EINVAL;
1860
1da177e4
LT
1861 if (hci_proto[hp->id])
1862 hci_proto[hp->id] = NULL;
1863 else
1864 err = -ENOENT;
1865
1da177e4
LT
1866 return err;
1867}
1868EXPORT_SYMBOL(hci_unregister_proto);
1869
1870int hci_register_cb(struct hci_cb *cb)
1871{
1872 BT_DBG("%p name %s", cb, cb->name);
1873
1874 write_lock_bh(&hci_cb_list_lock);
1875 list_add(&cb->list, &hci_cb_list);
1876 write_unlock_bh(&hci_cb_list_lock);
1877
1878 return 0;
1879}
1880EXPORT_SYMBOL(hci_register_cb);
1881
1882int hci_unregister_cb(struct hci_cb *cb)
1883{
1884 BT_DBG("%p name %s", cb, cb->name);
1885
1886 write_lock_bh(&hci_cb_list_lock);
1887 list_del(&cb->list);
1888 write_unlock_bh(&hci_cb_list_lock);
1889
1890 return 0;
1891}
1892EXPORT_SYMBOL(hci_unregister_cb);
1893
1894static int hci_send_frame(struct sk_buff *skb)
1895{
1896 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1897
1898 if (!hdev) {
1899 kfree_skb(skb);
1900 return -ENODEV;
1901 }
1902
0d48d939 1903 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4
LT
1904
1905 if (atomic_read(&hdev->promisc)) {
1906 /* Time stamp */
a61bbcf2 1907 __net_timestamp(skb);
1da177e4 1908
eec8d2bc 1909 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
1910 }
1911
1912 /* Get rid of skb owner, prior to sending to the driver. */
1913 skb_orphan(skb);
1914
1915 return hdev->send(skb);
1916}
1917
1918/* Send HCI command */
a9de9248 1919int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
1920{
1921 int len = HCI_COMMAND_HDR_SIZE + plen;
1922 struct hci_command_hdr *hdr;
1923 struct sk_buff *skb;
1924
a9de9248 1925 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1da177e4
LT
1926
1927 skb = bt_skb_alloc(len, GFP_ATOMIC);
1928 if (!skb) {
ef222013 1929 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
1930 return -ENOMEM;
1931 }
1932
1933 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 1934 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
1935 hdr->plen = plen;
1936
1937 if (plen)
1938 memcpy(skb_put(skb, plen), param, plen);
1939
1940 BT_DBG("skb len %d", skb->len);
1941
0d48d939 1942 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 1943 skb->dev = (void *) hdev;
c78ae283 1944
a5040efa
JH
1945 if (test_bit(HCI_INIT, &hdev->flags))
1946 hdev->init_last_cmd = opcode;
1947
1da177e4 1948 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 1949 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
1950
1951 return 0;
1952}
1da177e4
LT
1953
1954/* Get data from the previously sent command */
a9de9248 1955void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
1956{
1957 struct hci_command_hdr *hdr;
1958
1959 if (!hdev->sent_cmd)
1960 return NULL;
1961
1962 hdr = (void *) hdev->sent_cmd->data;
1963
a9de9248 1964 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
1965 return NULL;
1966
a9de9248 1967 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1da177e4
LT
1968
1969 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1970}
1971
1972/* Send ACL data */
1973static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1974{
1975 struct hci_acl_hdr *hdr;
1976 int len = skb->len;
1977
badff6d0
ACM
1978 skb_push(skb, HCI_ACL_HDR_SIZE);
1979 skb_reset_transport_header(skb);
9c70220b 1980 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
1981 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1982 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
1983}
1984
73d80deb
LAD
1985static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1986 struct sk_buff *skb, __u16 flags)
1da177e4
LT
1987{
1988 struct hci_dev *hdev = conn->hdev;
1989 struct sk_buff *list;
1990
70f23020
AE
1991 list = skb_shinfo(skb)->frag_list;
1992 if (!list) {
1da177e4
LT
1993 /* Non fragmented */
1994 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1995
73d80deb 1996 skb_queue_tail(queue, skb);
1da177e4
LT
1997 } else {
1998 /* Fragmented */
1999 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2000
2001 skb_shinfo(skb)->frag_list = NULL;
2002
2003 /* Queue all fragments atomically */
73d80deb 2004 spin_lock_bh(&queue->lock);
1da177e4 2005
73d80deb 2006 __skb_queue_tail(queue, skb);
e702112f
AE
2007
2008 flags &= ~ACL_START;
2009 flags |= ACL_CONT;
1da177e4
LT
2010 do {
2011 skb = list; list = list->next;
8e87d142 2012
1da177e4 2013 skb->dev = (void *) hdev;
0d48d939 2014 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2015 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2016
2017 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2018
73d80deb 2019 __skb_queue_tail(queue, skb);
1da177e4
LT
2020 } while (list);
2021
73d80deb 2022 spin_unlock_bh(&queue->lock);
1da177e4 2023 }
73d80deb
LAD
2024}
2025
2026void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2027{
2028 struct hci_conn *conn = chan->conn;
2029 struct hci_dev *hdev = conn->hdev;
2030
2031 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2032
2033 skb->dev = (void *) hdev;
2034 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2035 hci_add_acl_hdr(skb, conn->handle, flags);
2036
2037 hci_queue_acl(conn, &chan->data_q, skb, flags);
1da177e4 2038
3eff45ea 2039 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2040}
2041EXPORT_SYMBOL(hci_send_acl);
2042
2043/* Send SCO data */
0d861d8b 2044void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2045{
2046 struct hci_dev *hdev = conn->hdev;
2047 struct hci_sco_hdr hdr;
2048
2049 BT_DBG("%s len %d", hdev->name, skb->len);
2050
aca3192c 2051 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2052 hdr.dlen = skb->len;
2053
badff6d0
ACM
2054 skb_push(skb, HCI_SCO_HDR_SIZE);
2055 skb_reset_transport_header(skb);
9c70220b 2056 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2057
2058 skb->dev = (void *) hdev;
0d48d939 2059 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2060
1da177e4 2061 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2062 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4
LT
2063}
2064EXPORT_SYMBOL(hci_send_sco);
2065
2066/* ---- HCI TX task (outgoing data) ---- */
2067
2068/* HCI Connection scheduler */
2069static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2070{
2071 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2072 struct hci_conn *conn = NULL, *c;
1da177e4 2073 int num = 0, min = ~0;
1da177e4 2074
8e87d142 2075 /* We don't have to lock device here. Connections are always
1da177e4 2076 * added and removed with TX task disabled. */
bf4c6325
GP
2077
2078 rcu_read_lock();
2079
2080 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2081 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2082 continue;
769be974
MH
2083
2084 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2085 continue;
2086
1da177e4
LT
2087 num++;
2088
2089 if (c->sent < min) {
2090 min = c->sent;
2091 conn = c;
2092 }
52087a79
LAD
2093
2094 if (hci_conn_num(hdev, type) == num)
2095 break;
1da177e4
LT
2096 }
2097
bf4c6325
GP
2098 rcu_read_unlock();
2099
1da177e4 2100 if (conn) {
6ed58ec5
VT
2101 int cnt, q;
2102
2103 switch (conn->type) {
2104 case ACL_LINK:
2105 cnt = hdev->acl_cnt;
2106 break;
2107 case SCO_LINK:
2108 case ESCO_LINK:
2109 cnt = hdev->sco_cnt;
2110 break;
2111 case LE_LINK:
2112 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2113 break;
2114 default:
2115 cnt = 0;
2116 BT_ERR("Unknown link type");
2117 }
2118
2119 q = cnt / num;
1da177e4
LT
2120 *quote = q ? q : 1;
2121 } else
2122 *quote = 0;
2123
2124 BT_DBG("conn %p quote %d", conn, *quote);
2125 return conn;
2126}
2127
bae1f5d9 2128static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2129{
2130 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2131 struct hci_conn *c;
1da177e4 2132
bae1f5d9 2133 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2134
bf4c6325
GP
2135 rcu_read_lock();
2136
1da177e4 2137 /* Kill stalled connections */
bf4c6325 2138 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9
VT
2139 if (c->type == type && c->sent) {
2140 BT_ERR("%s killing stalled connection %s",
1da177e4
LT
2141 hdev->name, batostr(&c->dst));
2142 hci_acl_disconn(c, 0x13);
2143 }
2144 }
bf4c6325
GP
2145
2146 rcu_read_unlock();
1da177e4
LT
2147}
2148
73d80deb
LAD
2149static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2150 int *quote)
1da177e4 2151{
73d80deb
LAD
2152 struct hci_conn_hash *h = &hdev->conn_hash;
2153 struct hci_chan *chan = NULL;
2154 int num = 0, min = ~0, cur_prio = 0;
1da177e4 2155 struct hci_conn *conn;
73d80deb
LAD
2156 int cnt, q, conn_num = 0;
2157
2158 BT_DBG("%s", hdev->name);
2159
bf4c6325
GP
2160 rcu_read_lock();
2161
2162 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2163 struct hci_chan *tmp;
2164
2165 if (conn->type != type)
2166 continue;
2167
2168 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2169 continue;
2170
2171 conn_num++;
2172
8192edef 2173 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2174 struct sk_buff *skb;
2175
2176 if (skb_queue_empty(&tmp->data_q))
2177 continue;
2178
2179 skb = skb_peek(&tmp->data_q);
2180 if (skb->priority < cur_prio)
2181 continue;
2182
2183 if (skb->priority > cur_prio) {
2184 num = 0;
2185 min = ~0;
2186 cur_prio = skb->priority;
2187 }
2188
2189 num++;
2190
2191 if (conn->sent < min) {
2192 min = conn->sent;
2193 chan = tmp;
2194 }
2195 }
2196
2197 if (hci_conn_num(hdev, type) == conn_num)
2198 break;
2199 }
2200
bf4c6325
GP
2201 rcu_read_unlock();
2202
73d80deb
LAD
2203 if (!chan)
2204 return NULL;
2205
2206 switch (chan->conn->type) {
2207 case ACL_LINK:
2208 cnt = hdev->acl_cnt;
2209 break;
2210 case SCO_LINK:
2211 case ESCO_LINK:
2212 cnt = hdev->sco_cnt;
2213 break;
2214 case LE_LINK:
2215 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2216 break;
2217 default:
2218 cnt = 0;
2219 BT_ERR("Unknown link type");
2220 }
2221
2222 q = cnt / num;
2223 *quote = q ? q : 1;
2224 BT_DBG("chan %p quote %d", chan, *quote);
2225 return chan;
2226}
2227
02b20f0b
LAD
2228static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2229{
2230 struct hci_conn_hash *h = &hdev->conn_hash;
2231 struct hci_conn *conn;
2232 int num = 0;
2233
2234 BT_DBG("%s", hdev->name);
2235
bf4c6325
GP
2236 rcu_read_lock();
2237
2238 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2239 struct hci_chan *chan;
2240
2241 if (conn->type != type)
2242 continue;
2243
2244 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2245 continue;
2246
2247 num++;
2248
8192edef 2249 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2250 struct sk_buff *skb;
2251
2252 if (chan->sent) {
2253 chan->sent = 0;
2254 continue;
2255 }
2256
2257 if (skb_queue_empty(&chan->data_q))
2258 continue;
2259
2260 skb = skb_peek(&chan->data_q);
2261 if (skb->priority >= HCI_PRIO_MAX - 1)
2262 continue;
2263
2264 skb->priority = HCI_PRIO_MAX - 1;
2265
2266 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2267 skb->priority);
2268 }
2269
2270 if (hci_conn_num(hdev, type) == num)
2271 break;
2272 }
bf4c6325
GP
2273
2274 rcu_read_unlock();
2275
02b20f0b
LAD
2276}
2277
73d80deb
LAD
2278static inline void hci_sched_acl(struct hci_dev *hdev)
2279{
2280 struct hci_chan *chan;
1da177e4
LT
2281 struct sk_buff *skb;
2282 int quote;
73d80deb 2283 unsigned int cnt;
1da177e4
LT
2284
2285 BT_DBG("%s", hdev->name);
2286
52087a79
LAD
2287 if (!hci_conn_num(hdev, ACL_LINK))
2288 return;
2289
1da177e4
LT
2290 if (!test_bit(HCI_RAW, &hdev->flags)) {
2291 /* ACL tx timeout must be longer than maximum
2292 * link supervision timeout (40.9 seconds) */
82453021 2293 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
bae1f5d9 2294 hci_link_tx_to(hdev, ACL_LINK);
1da177e4
LT
2295 }
2296
73d80deb 2297 cnt = hdev->acl_cnt;
04837f64 2298
73d80deb
LAD
2299 while (hdev->acl_cnt &&
2300 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2301 u32 priority = (skb_peek(&chan->data_q))->priority;
2302 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2303 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2304 skb->len, skb->priority);
2305
ec1cce24
LAD
2306 /* Stop if priority has changed */
2307 if (skb->priority < priority)
2308 break;
2309
2310 skb = skb_dequeue(&chan->data_q);
2311
73d80deb
LAD
2312 hci_conn_enter_active_mode(chan->conn,
2313 bt_cb(skb)->force_active);
04837f64 2314
1da177e4
LT
2315 hci_send_frame(skb);
2316 hdev->acl_last_tx = jiffies;
2317
2318 hdev->acl_cnt--;
73d80deb
LAD
2319 chan->sent++;
2320 chan->conn->sent++;
1da177e4
LT
2321 }
2322 }
02b20f0b
LAD
2323
2324 if (cnt != hdev->acl_cnt)
2325 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2326}
2327
2328/* Schedule SCO */
2329static inline void hci_sched_sco(struct hci_dev *hdev)
2330{
2331 struct hci_conn *conn;
2332 struct sk_buff *skb;
2333 int quote;
2334
2335 BT_DBG("%s", hdev->name);
2336
52087a79
LAD
2337 if (!hci_conn_num(hdev, SCO_LINK))
2338 return;
2339
1da177e4
LT
2340 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2341 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2342 BT_DBG("skb %p len %d", skb, skb->len);
2343 hci_send_frame(skb);
2344
2345 conn->sent++;
2346 if (conn->sent == ~0)
2347 conn->sent = 0;
2348 }
2349 }
2350}
2351
b6a0dc82
MH
2352static inline void hci_sched_esco(struct hci_dev *hdev)
2353{
2354 struct hci_conn *conn;
2355 struct sk_buff *skb;
2356 int quote;
2357
2358 BT_DBG("%s", hdev->name);
2359
52087a79
LAD
2360 if (!hci_conn_num(hdev, ESCO_LINK))
2361 return;
2362
b6a0dc82
MH
2363 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2364 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2365 BT_DBG("skb %p len %d", skb, skb->len);
2366 hci_send_frame(skb);
2367
2368 conn->sent++;
2369 if (conn->sent == ~0)
2370 conn->sent = 0;
2371 }
2372 }
2373}
2374
6ed58ec5
VT
2375static inline void hci_sched_le(struct hci_dev *hdev)
2376{
73d80deb 2377 struct hci_chan *chan;
6ed58ec5 2378 struct sk_buff *skb;
02b20f0b 2379 int quote, cnt, tmp;
6ed58ec5
VT
2380
2381 BT_DBG("%s", hdev->name);
2382
52087a79
LAD
2383 if (!hci_conn_num(hdev, LE_LINK))
2384 return;
2385
6ed58ec5
VT
2386 if (!test_bit(HCI_RAW, &hdev->flags)) {
2387 /* LE tx timeout must be longer than maximum
2388 * link supervision timeout (40.9 seconds) */
bae1f5d9 2389 if (!hdev->le_cnt && hdev->le_pkts &&
6ed58ec5 2390 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2391 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2392 }
2393
2394 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2395 tmp = cnt;
73d80deb 2396 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2397 u32 priority = (skb_peek(&chan->data_q))->priority;
2398 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb
LAD
2399 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2400 skb->len, skb->priority);
6ed58ec5 2401
ec1cce24
LAD
2402 /* Stop if priority has changed */
2403 if (skb->priority < priority)
2404 break;
2405
2406 skb = skb_dequeue(&chan->data_q);
2407
6ed58ec5
VT
2408 hci_send_frame(skb);
2409 hdev->le_last_tx = jiffies;
2410
2411 cnt--;
73d80deb
LAD
2412 chan->sent++;
2413 chan->conn->sent++;
6ed58ec5
VT
2414 }
2415 }
73d80deb 2416
6ed58ec5
VT
2417 if (hdev->le_pkts)
2418 hdev->le_cnt = cnt;
2419 else
2420 hdev->acl_cnt = cnt;
02b20f0b
LAD
2421
2422 if (cnt != tmp)
2423 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2424}
2425
3eff45ea 2426static void hci_tx_work(struct work_struct *work)
1da177e4 2427{
3eff45ea 2428 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2429 struct sk_buff *skb;
2430
6ed58ec5
VT
2431 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2432 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2433
2434 /* Schedule queues and send stuff to HCI driver */
2435
2436 hci_sched_acl(hdev);
2437
2438 hci_sched_sco(hdev);
2439
b6a0dc82
MH
2440 hci_sched_esco(hdev);
2441
6ed58ec5
VT
2442 hci_sched_le(hdev);
2443
1da177e4
LT
2444 /* Send next queued raw (unknown type) packet */
2445 while ((skb = skb_dequeue(&hdev->raw_q)))
2446 hci_send_frame(skb);
1da177e4
LT
2447}
2448
25985edc 2449/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2450
2451/* ACL data packet */
2452static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2453{
2454 struct hci_acl_hdr *hdr = (void *) skb->data;
2455 struct hci_conn *conn;
2456 __u16 handle, flags;
2457
2458 skb_pull(skb, HCI_ACL_HDR_SIZE);
2459
2460 handle = __le16_to_cpu(hdr->handle);
2461 flags = hci_flags(handle);
2462 handle = hci_handle(handle);
2463
2464 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2465
2466 hdev->stat.acl_rx++;
2467
2468 hci_dev_lock(hdev);
2469 conn = hci_conn_hash_lookup_handle(hdev, handle);
2470 hci_dev_unlock(hdev);
8e87d142 2471
1da177e4
LT
2472 if (conn) {
2473 register struct hci_proto *hp;
2474
65983fc7 2475 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2476
1da177e4 2477 /* Send to upper protocol */
70f23020
AE
2478 hp = hci_proto[HCI_PROTO_L2CAP];
2479 if (hp && hp->recv_acldata) {
1da177e4
LT
2480 hp->recv_acldata(conn, skb, flags);
2481 return;
2482 }
2483 } else {
8e87d142 2484 BT_ERR("%s ACL packet for unknown connection handle %d",
1da177e4
LT
2485 hdev->name, handle);
2486 }
2487
2488 kfree_skb(skb);
2489}
2490
2491/* SCO data packet */
2492static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2493{
2494 struct hci_sco_hdr *hdr = (void *) skb->data;
2495 struct hci_conn *conn;
2496 __u16 handle;
2497
2498 skb_pull(skb, HCI_SCO_HDR_SIZE);
2499
2500 handle = __le16_to_cpu(hdr->handle);
2501
2502 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2503
2504 hdev->stat.sco_rx++;
2505
2506 hci_dev_lock(hdev);
2507 conn = hci_conn_hash_lookup_handle(hdev, handle);
2508 hci_dev_unlock(hdev);
2509
2510 if (conn) {
2511 register struct hci_proto *hp;
2512
2513 /* Send to upper protocol */
70f23020
AE
2514 hp = hci_proto[HCI_PROTO_SCO];
2515 if (hp && hp->recv_scodata) {
1da177e4
LT
2516 hp->recv_scodata(conn, skb);
2517 return;
2518 }
2519 } else {
8e87d142 2520 BT_ERR("%s SCO packet for unknown connection handle %d",
1da177e4
LT
2521 hdev->name, handle);
2522 }
2523
2524 kfree_skb(skb);
2525}
2526
b78752cc 2527static void hci_rx_work(struct work_struct *work)
1da177e4 2528{
b78752cc 2529 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2530 struct sk_buff *skb;
2531
2532 BT_DBG("%s", hdev->name);
2533
1da177e4
LT
2534 while ((skb = skb_dequeue(&hdev->rx_q))) {
2535 if (atomic_read(&hdev->promisc)) {
2536 /* Send copy to the sockets */
eec8d2bc 2537 hci_send_to_sock(hdev, skb, NULL);
1da177e4
LT
2538 }
2539
2540 if (test_bit(HCI_RAW, &hdev->flags)) {
2541 kfree_skb(skb);
2542 continue;
2543 }
2544
2545 if (test_bit(HCI_INIT, &hdev->flags)) {
2546 /* Don't process data packets in this states. */
0d48d939 2547 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2548 case HCI_ACLDATA_PKT:
2549 case HCI_SCODATA_PKT:
2550 kfree_skb(skb);
2551 continue;
3ff50b79 2552 }
1da177e4
LT
2553 }
2554
2555 /* Process frame */
0d48d939 2556 switch (bt_cb(skb)->pkt_type) {
1da177e4 2557 case HCI_EVENT_PKT:
b78752cc 2558 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2559 hci_event_packet(hdev, skb);
2560 break;
2561
2562 case HCI_ACLDATA_PKT:
2563 BT_DBG("%s ACL data packet", hdev->name);
2564 hci_acldata_packet(hdev, skb);
2565 break;
2566
2567 case HCI_SCODATA_PKT:
2568 BT_DBG("%s SCO data packet", hdev->name);
2569 hci_scodata_packet(hdev, skb);
2570 break;
2571
2572 default:
2573 kfree_skb(skb);
2574 break;
2575 }
2576 }
1da177e4
LT
2577}
2578
c347b765 2579static void hci_cmd_work(struct work_struct *work)
1da177e4 2580{
c347b765 2581 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2582 struct sk_buff *skb;
2583
2584 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2585
1da177e4 2586 /* Send queued commands */
5a08ecce
AE
2587 if (atomic_read(&hdev->cmd_cnt)) {
2588 skb = skb_dequeue(&hdev->cmd_q);
2589 if (!skb)
2590 return;
2591
7585b97a 2592 kfree_skb(hdev->sent_cmd);
1da177e4 2593
70f23020
AE
2594 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2595 if (hdev->sent_cmd) {
1da177e4
LT
2596 atomic_dec(&hdev->cmd_cnt);
2597 hci_send_frame(skb);
7bdb8a5c
SJ
2598 if (test_bit(HCI_RESET, &hdev->flags))
2599 del_timer(&hdev->cmd_timer);
2600 else
2601 mod_timer(&hdev->cmd_timer,
6bd32326 2602 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
1da177e4
LT
2603 } else {
2604 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2605 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2606 }
2607 }
2608}
2519a1fc
AG
2609
2610int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2611{
2612 /* General inquiry access code (GIAC) */
2613 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2614 struct hci_cp_inquiry cp;
2615
2616 BT_DBG("%s", hdev->name);
2617
2618 if (test_bit(HCI_INQUIRY, &hdev->flags))
2619 return -EINPROGRESS;
2620
2621 memset(&cp, 0, sizeof(cp));
2622 memcpy(&cp.lap, lap, sizeof(cp.lap));
2623 cp.length = length;
2624
2625 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2626}
023d5049
AG
2627
2628int hci_cancel_inquiry(struct hci_dev *hdev)
2629{
2630 BT_DBG("%s", hdev->name);
2631
2632 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2633 return -EPERM;
2634
2635 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2636}
7784d78f
AE
2637
2638module_param(enable_hs, bool, 0644);
2639MODULE_PARM_DESC(enable_hs, "Enable High Speed");
This page took 0.934044 seconds and 5 git commands to generate.