Bluetooth: AMP: Set no FCS for incoming L2CAP chan
[deliverable/linux.git] / net / bluetooth / hci_core.c
CommitLineData
8e87d142 1/*
1da177e4
LT
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
590051de 4 Copyright (C) 2011 ProFUSION Embedded Systems
1da177e4
LT
5
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
8e87d142
YH
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
1da177e4
LT
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
8e87d142
YH
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
1da177e4
LT
23 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
8c520a59 28#include <linux/export.h>
3df92b31 29#include <linux/idr.h>
1da177e4 30
8c520a59 31#include <linux/rfkill.h>
1da177e4
LT
32
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
b78752cc 36static void hci_rx_work(struct work_struct *work);
c347b765 37static void hci_cmd_work(struct work_struct *work);
3eff45ea 38static void hci_tx_work(struct work_struct *work);
1da177e4 39
1da177e4
LT
40/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
3df92b31
SL
48/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
1da177e4
LT
51/* ---- HCI notifications ---- */
52
6516455d 53static void hci_notify(struct hci_dev *hdev, int event)
1da177e4 54{
040030ef 55 hci_sock_dev_event(hdev, event);
1da177e4
LT
56}
57
58/* ---- HCI requests ---- */
59
23bb5763 60void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
1da177e4 61{
f0e09510 62 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
23bb5763 63
a5040efa
JH
64 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
75fb0e32
JH
67 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1036b890 69 u16 opcode = __le16_to_cpu(sent->opcode);
75fb0e32
JH
70 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
1036b890 79 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
75fb0e32
JH
80 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
23bb5763 88 return;
75fb0e32 89 }
1da177e4
LT
90
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
a8c5fb1a
GP
110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
1da177e4
LT
113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
e175072f 134 err = -bt_to_errno(hdev->req_result);
1da177e4
LT
135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
3ff50b79 144 }
1da177e4 145
a5040efa 146 hdev->req_status = hdev->req_result = 0;
1da177e4
LT
147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
6039aa73
GP
153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
1da177e4
LT
156{
157 int ret;
158
7c6a329e
MH
159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
1da177e4
LT
162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
f630cf0d 175 set_bit(HCI_RESET, &hdev->flags);
a9de9248 176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
1da177e4
LT
177}
178
e61ef499 179static void bredr_init(struct hci_dev *hdev)
1da177e4 180{
2455a3ea
AE
181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
1da177e4 183 /* Read Local Supported Features */
a9de9248 184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
1da177e4 185
1143e5a6 186 /* Read Local Version */
a9de9248 187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
1da177e4
LT
188}
189
e61ef499
AE
190static void amp_init(struct hci_dev *hdev)
191{
2455a3ea
AE
192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
e61ef499
AE
194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
6bcbc489
AE
196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
e71dfaba
AE
199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
e61ef499
AE
202}
203
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205{
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
11778716
AE
222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
e61ef499
AE
226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
e61ef499
AE
239}
240
1da177e4
LT
241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242{
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
a9de9248 248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1da177e4
LT
249}
250
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252{
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
a9de9248 258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
1da177e4
LT
259}
260
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262{
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
e4e8e37c 267 /* Encryption */
a9de9248 268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
1da177e4
LT
269}
270
e4e8e37c
MH
271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272{
273 __le16 policy = cpu_to_le16(opt);
274
a418b893 275 BT_DBG("%s %x", hdev->name, policy);
e4e8e37c
MH
276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279}
280
8e87d142 281/* Get HCI device by index.
1da177e4
LT
282 * Device is held on return. */
283struct hci_dev *hci_dev_get(int index)
284{
8035ded4 285 struct hci_dev *hdev = NULL, *d;
1da177e4
LT
286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
8035ded4 293 list_for_each_entry(d, &hci_dev_list, list) {
1da177e4
LT
294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
1da177e4
LT
302
303/* ---- Inquiry support ---- */
ff9ef578 304
30dc78e1
JH
305bool hci_discovery_active(struct hci_dev *hdev)
306{
307 struct discovery_state *discov = &hdev->discovery;
308
6fbe195d 309 switch (discov->state) {
343f935b 310 case DISCOVERY_FINDING:
6fbe195d 311 case DISCOVERY_RESOLVING:
30dc78e1
JH
312 return true;
313
6fbe195d
AG
314 default:
315 return false;
316 }
30dc78e1
JH
317}
318
ff9ef578
JH
319void hci_discovery_set_state(struct hci_dev *hdev, int state)
320{
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
7b99b659
AG
328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
ff9ef578
JH
330 break;
331 case DISCOVERY_STARTING:
332 break;
343f935b 333 case DISCOVERY_FINDING:
ff9ef578
JH
334 mgmt_discovering(hdev, 1);
335 break;
30dc78e1
JH
336 case DISCOVERY_RESOLVING:
337 break;
ff9ef578
JH
338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343}
344
1da177e4
LT
345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
30883512 347 struct discovery_state *cache = &hdev->discovery;
b57c1a56 348 struct inquiry_entry *p, *n;
1da177e4 349
561aafbc
JH
350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
b57c1a56 352 kfree(p);
1da177e4 353 }
561aafbc
JH
354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
1da177e4
LT
357}
358
a8c5fb1a
GP
359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
1da177e4 361{
30883512 362 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
363 struct inquiry_entry *e;
364
6ed93dc6 365 BT_DBG("cache %p, %pMR", cache, bdaddr);
1da177e4 366
561aafbc
JH
367 list_for_each_entry(e, &cache->all, all) {
368 if (!bacmp(&e->data.bdaddr, bdaddr))
369 return e;
370 }
371
372 return NULL;
373}
374
375struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
04124681 376 bdaddr_t *bdaddr)
561aafbc 377{
30883512 378 struct discovery_state *cache = &hdev->discovery;
561aafbc
JH
379 struct inquiry_entry *e;
380
6ed93dc6 381 BT_DBG("cache %p, %pMR", cache, bdaddr);
561aafbc
JH
382
383 list_for_each_entry(e, &cache->unknown, list) {
1da177e4 384 if (!bacmp(&e->data.bdaddr, bdaddr))
b57c1a56
JH
385 return e;
386 }
387
388 return NULL;
1da177e4
LT
389}
390
30dc78e1 391struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
04124681
GP
392 bdaddr_t *bdaddr,
393 int state)
30dc78e1
JH
394{
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
6ed93dc6 398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
30dc78e1
JH
399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408}
409
a3d4e20a 410void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
04124681 411 struct inquiry_entry *ie)
a3d4e20a
JH
412{
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
a8c5fb1a 421 abs(p->data.rssi) >= abs(ie->data.rssi))
a3d4e20a
JH
422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427}
428
3175405b 429bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
04124681 430 bool name_known, bool *ssp)
1da177e4 431{
30883512 432 struct discovery_state *cache = &hdev->discovery;
70f23020 433 struct inquiry_entry *ie;
1da177e4 434
6ed93dc6 435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
1da177e4 436
388fc8fa
JH
437 if (ssp)
438 *ssp = data->ssp_mode;
439
70f23020 440 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
a3d4e20a 441 if (ie) {
388fc8fa
JH
442 if (ie->data.ssp_mode && ssp)
443 *ssp = true;
444
a3d4e20a 445 if (ie->name_state == NAME_NEEDED &&
a8c5fb1a 446 data->rssi != ie->data.rssi) {
a3d4e20a
JH
447 ie->data.rssi = data->rssi;
448 hci_inquiry_cache_update_resolve(hdev, ie);
449 }
450
561aafbc 451 goto update;
a3d4e20a 452 }
561aafbc
JH
453
454 /* Entry not in the cache. Add new one. */
455 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
456 if (!ie)
3175405b 457 return false;
561aafbc
JH
458
459 list_add(&ie->all, &cache->all);
460
461 if (name_known) {
462 ie->name_state = NAME_KNOWN;
463 } else {
464 ie->name_state = NAME_NOT_KNOWN;
465 list_add(&ie->list, &cache->unknown);
466 }
70f23020 467
561aafbc
JH
468update:
469 if (name_known && ie->name_state != NAME_KNOWN &&
a8c5fb1a 470 ie->name_state != NAME_PENDING) {
561aafbc
JH
471 ie->name_state = NAME_KNOWN;
472 list_del(&ie->list);
1da177e4
LT
473 }
474
70f23020
AE
475 memcpy(&ie->data, data, sizeof(*data));
476 ie->timestamp = jiffies;
1da177e4 477 cache->timestamp = jiffies;
3175405b
JH
478
479 if (ie->name_state == NAME_NOT_KNOWN)
480 return false;
481
482 return true;
1da177e4
LT
483}
484
485static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
486{
30883512 487 struct discovery_state *cache = &hdev->discovery;
1da177e4
LT
488 struct inquiry_info *info = (struct inquiry_info *) buf;
489 struct inquiry_entry *e;
490 int copied = 0;
491
561aafbc 492 list_for_each_entry(e, &cache->all, all) {
1da177e4 493 struct inquiry_data *data = &e->data;
b57c1a56
JH
494
495 if (copied >= num)
496 break;
497
1da177e4
LT
498 bacpy(&info->bdaddr, &data->bdaddr);
499 info->pscan_rep_mode = data->pscan_rep_mode;
500 info->pscan_period_mode = data->pscan_period_mode;
501 info->pscan_mode = data->pscan_mode;
502 memcpy(info->dev_class, data->dev_class, 3);
503 info->clock_offset = data->clock_offset;
b57c1a56 504
1da177e4 505 info++;
b57c1a56 506 copied++;
1da177e4
LT
507 }
508
509 BT_DBG("cache %p, copied %d", cache, copied);
510 return copied;
511}
512
513static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
514{
515 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
516 struct hci_cp_inquiry cp;
517
518 BT_DBG("%s", hdev->name);
519
520 if (test_bit(HCI_INQUIRY, &hdev->flags))
521 return;
522
523 /* Start Inquiry */
524 memcpy(&cp.lap, &ir->lap, 3);
525 cp.length = ir->length;
526 cp.num_rsp = ir->num_rsp;
a9de9248 527 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
1da177e4
LT
528}
529
530int hci_inquiry(void __user *arg)
531{
532 __u8 __user *ptr = arg;
533 struct hci_inquiry_req ir;
534 struct hci_dev *hdev;
535 int err = 0, do_inquiry = 0, max_rsp;
536 long timeo;
537 __u8 *buf;
538
539 if (copy_from_user(&ir, ptr, sizeof(ir)))
540 return -EFAULT;
541
5a08ecce
AE
542 hdev = hci_dev_get(ir.dev_id);
543 if (!hdev)
1da177e4
LT
544 return -ENODEV;
545
09fd0de5 546 hci_dev_lock(hdev);
8e87d142 547 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
a8c5fb1a 548 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
1da177e4
LT
549 inquiry_cache_flush(hdev);
550 do_inquiry = 1;
551 }
09fd0de5 552 hci_dev_unlock(hdev);
1da177e4 553
04837f64 554 timeo = ir.length * msecs_to_jiffies(2000);
70f23020
AE
555
556 if (do_inquiry) {
557 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
558 if (err < 0)
559 goto done;
560 }
1da177e4 561
8fc9ced3
GP
562 /* for unlimited number of responses we will use buffer with
563 * 255 entries
564 */
1da177e4
LT
565 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
566
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
569 */
01df8c31 570 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
70f23020 571 if (!buf) {
1da177e4
LT
572 err = -ENOMEM;
573 goto done;
574 }
575
09fd0de5 576 hci_dev_lock(hdev);
1da177e4 577 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
09fd0de5 578 hci_dev_unlock(hdev);
1da177e4
LT
579
580 BT_DBG("num_rsp %d", ir.num_rsp);
581
582 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
583 ptr += sizeof(ir);
584 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
a8c5fb1a 585 ir.num_rsp))
1da177e4 586 err = -EFAULT;
8e87d142 587 } else
1da177e4
LT
588 err = -EFAULT;
589
590 kfree(buf);
591
592done:
593 hci_dev_put(hdev);
594 return err;
595}
596
3f0f524b
JH
597static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
598{
599 u8 ad_len = 0, flags = 0;
600 size_t name_len;
601
602 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
603 flags |= LE_AD_GENERAL;
604
605 if (!lmp_bredr_capable(hdev))
606 flags |= LE_AD_NO_BREDR;
607
608 if (lmp_le_br_capable(hdev))
609 flags |= LE_AD_SIM_LE_BREDR_CTRL;
610
611 if (lmp_host_le_br_capable(hdev))
612 flags |= LE_AD_SIM_LE_BREDR_HOST;
613
614 if (flags) {
615 BT_DBG("adv flags 0x%02x", flags);
616
617 ptr[0] = 2;
618 ptr[1] = EIR_FLAGS;
619 ptr[2] = flags;
620
621 ad_len += 3;
622 ptr += 3;
623 }
624
625 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
626 ptr[0] = 2;
627 ptr[1] = EIR_TX_POWER;
628 ptr[2] = (u8) hdev->adv_tx_power;
629
630 ad_len += 3;
631 ptr += 3;
632 }
633
634 name_len = strlen(hdev->dev_name);
635 if (name_len > 0) {
636 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
637
638 if (name_len > max_len) {
639 name_len = max_len;
640 ptr[1] = EIR_NAME_SHORT;
641 } else
642 ptr[1] = EIR_NAME_COMPLETE;
643
644 ptr[0] = name_len + 1;
645
646 memcpy(ptr + 2, hdev->dev_name, name_len);
647
648 ad_len += (name_len + 2);
649 ptr += (name_len + 2);
650 }
651
652 return ad_len;
653}
654
655int hci_update_ad(struct hci_dev *hdev)
656{
657 struct hci_cp_le_set_adv_data cp;
658 u8 len;
659 int err;
660
661 hci_dev_lock(hdev);
662
663 if (!lmp_le_capable(hdev)) {
664 err = -EINVAL;
665 goto unlock;
666 }
667
668 memset(&cp, 0, sizeof(cp));
669
670 len = create_ad(hdev, cp.data);
671
672 if (hdev->adv_data_len == len &&
673 memcmp(cp.data, hdev->adv_data, len) == 0) {
674 err = 0;
675 goto unlock;
676 }
677
678 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
679 hdev->adv_data_len = len;
680
681 cp.length = len;
682 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
683
684unlock:
685 hci_dev_unlock(hdev);
686
687 return err;
688}
689
1da177e4
LT
690/* ---- HCI ioctl helpers ---- */
691
692int hci_dev_open(__u16 dev)
693{
694 struct hci_dev *hdev;
695 int ret = 0;
696
5a08ecce
AE
697 hdev = hci_dev_get(dev);
698 if (!hdev)
1da177e4
LT
699 return -ENODEV;
700
701 BT_DBG("%s %p", hdev->name, hdev);
702
703 hci_req_lock(hdev);
704
94324962
JH
705 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
706 ret = -ENODEV;
707 goto done;
708 }
709
611b30f7
MH
710 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
711 ret = -ERFKILL;
712 goto done;
713 }
714
1da177e4
LT
715 if (test_bit(HCI_UP, &hdev->flags)) {
716 ret = -EALREADY;
717 goto done;
718 }
719
720 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
721 set_bit(HCI_RAW, &hdev->flags);
722
07e3b94a
AE
723 /* Treat all non BR/EDR controllers as raw devices if
724 enable_hs is not set */
725 if (hdev->dev_type != HCI_BREDR && !enable_hs)
943da25d
MH
726 set_bit(HCI_RAW, &hdev->flags);
727
1da177e4
LT
728 if (hdev->open(hdev)) {
729 ret = -EIO;
730 goto done;
731 }
732
733 if (!test_bit(HCI_RAW, &hdev->flags)) {
734 atomic_set(&hdev->cmd_cnt, 1);
735 set_bit(HCI_INIT, &hdev->flags);
a5040efa 736 hdev->init_last_cmd = 0;
1da177e4 737
5f246e89 738 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
739
740 clear_bit(HCI_INIT, &hdev->flags);
741 }
742
743 if (!ret) {
744 hci_dev_hold(hdev);
745 set_bit(HCI_UP, &hdev->flags);
746 hci_notify(hdev, HCI_DEV_UP);
3f0f524b 747 hci_update_ad(hdev);
bb4b2a9a
AE
748 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
749 mgmt_valid_hdev(hdev)) {
09fd0de5 750 hci_dev_lock(hdev);
744cf19e 751 mgmt_powered(hdev, 1);
09fd0de5 752 hci_dev_unlock(hdev);
56e5cb86 753 }
8e87d142 754 } else {
1da177e4 755 /* Init failed, cleanup */
3eff45ea 756 flush_work(&hdev->tx_work);
c347b765 757 flush_work(&hdev->cmd_work);
b78752cc 758 flush_work(&hdev->rx_work);
1da177e4
LT
759
760 skb_queue_purge(&hdev->cmd_q);
761 skb_queue_purge(&hdev->rx_q);
762
763 if (hdev->flush)
764 hdev->flush(hdev);
765
766 if (hdev->sent_cmd) {
767 kfree_skb(hdev->sent_cmd);
768 hdev->sent_cmd = NULL;
769 }
770
771 hdev->close(hdev);
772 hdev->flags = 0;
773 }
774
775done:
776 hci_req_unlock(hdev);
777 hci_dev_put(hdev);
778 return ret;
779}
780
781static int hci_dev_do_close(struct hci_dev *hdev)
782{
783 BT_DBG("%s %p", hdev->name, hdev);
784
28b75a89
AG
785 cancel_work_sync(&hdev->le_scan);
786
78c04c0b
VCG
787 cancel_delayed_work(&hdev->power_off);
788
1da177e4
LT
789 hci_req_cancel(hdev, ENODEV);
790 hci_req_lock(hdev);
791
792 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
b79f44c1 793 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
794 hci_req_unlock(hdev);
795 return 0;
796 }
797
3eff45ea
GP
798 /* Flush RX and TX works */
799 flush_work(&hdev->tx_work);
b78752cc 800 flush_work(&hdev->rx_work);
1da177e4 801
16ab91ab 802 if (hdev->discov_timeout > 0) {
e0f9309f 803 cancel_delayed_work(&hdev->discov_off);
16ab91ab 804 hdev->discov_timeout = 0;
5e5282bb 805 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
16ab91ab
JH
806 }
807
a8b2d5c2 808 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
7d78525d
JH
809 cancel_delayed_work(&hdev->service_cache);
810
7ba8b4be
AG
811 cancel_delayed_work_sync(&hdev->le_scan_disable);
812
09fd0de5 813 hci_dev_lock(hdev);
1da177e4
LT
814 inquiry_cache_flush(hdev);
815 hci_conn_hash_flush(hdev);
09fd0de5 816 hci_dev_unlock(hdev);
1da177e4
LT
817
818 hci_notify(hdev, HCI_DEV_DOWN);
819
820 if (hdev->flush)
821 hdev->flush(hdev);
822
823 /* Reset device */
824 skb_queue_purge(&hdev->cmd_q);
825 atomic_set(&hdev->cmd_cnt, 1);
8af59467 826 if (!test_bit(HCI_RAW, &hdev->flags) &&
a6c511c6 827 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
1da177e4 828 set_bit(HCI_INIT, &hdev->flags);
5f246e89 829 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
1da177e4
LT
830 clear_bit(HCI_INIT, &hdev->flags);
831 }
832
c347b765
GP
833 /* flush cmd work */
834 flush_work(&hdev->cmd_work);
1da177e4
LT
835
836 /* Drop queues */
837 skb_queue_purge(&hdev->rx_q);
838 skb_queue_purge(&hdev->cmd_q);
839 skb_queue_purge(&hdev->raw_q);
840
841 /* Drop last sent command */
842 if (hdev->sent_cmd) {
b79f44c1 843 del_timer_sync(&hdev->cmd_timer);
1da177e4
LT
844 kfree_skb(hdev->sent_cmd);
845 hdev->sent_cmd = NULL;
846 }
847
848 /* After this point our queues are empty
849 * and no tasks are scheduled. */
850 hdev->close(hdev);
851
bb4b2a9a
AE
852 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
853 mgmt_valid_hdev(hdev)) {
8ee56540
MH
854 hci_dev_lock(hdev);
855 mgmt_powered(hdev, 0);
856 hci_dev_unlock(hdev);
857 }
5add6af8 858
1da177e4
LT
859 /* Clear flags */
860 hdev->flags = 0;
861
e59fda8d 862 memset(hdev->eir, 0, sizeof(hdev->eir));
09b3c3fb 863 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
e59fda8d 864
1da177e4
LT
865 hci_req_unlock(hdev);
866
867 hci_dev_put(hdev);
868 return 0;
869}
870
871int hci_dev_close(__u16 dev)
872{
873 struct hci_dev *hdev;
874 int err;
875
70f23020
AE
876 hdev = hci_dev_get(dev);
877 if (!hdev)
1da177e4 878 return -ENODEV;
8ee56540
MH
879
880 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
881 cancel_delayed_work(&hdev->power_off);
882
1da177e4 883 err = hci_dev_do_close(hdev);
8ee56540 884
1da177e4
LT
885 hci_dev_put(hdev);
886 return err;
887}
888
889int hci_dev_reset(__u16 dev)
890{
891 struct hci_dev *hdev;
892 int ret = 0;
893
70f23020
AE
894 hdev = hci_dev_get(dev);
895 if (!hdev)
1da177e4
LT
896 return -ENODEV;
897
898 hci_req_lock(hdev);
1da177e4
LT
899
900 if (!test_bit(HCI_UP, &hdev->flags))
901 goto done;
902
903 /* Drop queues */
904 skb_queue_purge(&hdev->rx_q);
905 skb_queue_purge(&hdev->cmd_q);
906
09fd0de5 907 hci_dev_lock(hdev);
1da177e4
LT
908 inquiry_cache_flush(hdev);
909 hci_conn_hash_flush(hdev);
09fd0de5 910 hci_dev_unlock(hdev);
1da177e4
LT
911
912 if (hdev->flush)
913 hdev->flush(hdev);
914
8e87d142 915 atomic_set(&hdev->cmd_cnt, 1);
6ed58ec5 916 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
1da177e4
LT
917
918 if (!test_bit(HCI_RAW, &hdev->flags))
5f246e89 919 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
1da177e4
LT
920
921done:
1da177e4
LT
922 hci_req_unlock(hdev);
923 hci_dev_put(hdev);
924 return ret;
925}
926
927int hci_dev_reset_stat(__u16 dev)
928{
929 struct hci_dev *hdev;
930 int ret = 0;
931
70f23020
AE
932 hdev = hci_dev_get(dev);
933 if (!hdev)
1da177e4
LT
934 return -ENODEV;
935
936 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
937
938 hci_dev_put(hdev);
939
940 return ret;
941}
942
943int hci_dev_cmd(unsigned int cmd, void __user *arg)
944{
945 struct hci_dev *hdev;
946 struct hci_dev_req dr;
947 int err = 0;
948
949 if (copy_from_user(&dr, arg, sizeof(dr)))
950 return -EFAULT;
951
70f23020
AE
952 hdev = hci_dev_get(dr.dev_id);
953 if (!hdev)
1da177e4
LT
954 return -ENODEV;
955
956 switch (cmd) {
957 case HCISETAUTH:
04837f64 958 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 959 HCI_INIT_TIMEOUT);
1da177e4
LT
960 break;
961
962 case HCISETENCRYPT:
963 if (!lmp_encrypt_capable(hdev)) {
964 err = -EOPNOTSUPP;
965 break;
966 }
967
968 if (!test_bit(HCI_AUTH, &hdev->flags)) {
969 /* Auth must be enabled first */
04837f64 970 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
5f246e89 971 HCI_INIT_TIMEOUT);
1da177e4
LT
972 if (err)
973 break;
974 }
975
04837f64 976 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
5f246e89 977 HCI_INIT_TIMEOUT);
1da177e4
LT
978 break;
979
980 case HCISETSCAN:
04837f64 981 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
5f246e89 982 HCI_INIT_TIMEOUT);
1da177e4
LT
983 break;
984
1da177e4 985 case HCISETLINKPOL:
e4e8e37c 986 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
5f246e89 987 HCI_INIT_TIMEOUT);
1da177e4
LT
988 break;
989
990 case HCISETLINKMODE:
e4e8e37c
MH
991 hdev->link_mode = ((__u16) dr.dev_opt) &
992 (HCI_LM_MASTER | HCI_LM_ACCEPT);
993 break;
994
995 case HCISETPTYPE:
996 hdev->pkt_type = (__u16) dr.dev_opt;
1da177e4
LT
997 break;
998
999 case HCISETACLMTU:
e4e8e37c
MH
1000 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1001 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1002 break;
1003
1004 case HCISETSCOMTU:
e4e8e37c
MH
1005 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1006 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
1da177e4
LT
1007 break;
1008
1009 default:
1010 err = -EINVAL;
1011 break;
1012 }
e4e8e37c 1013
1da177e4
LT
1014 hci_dev_put(hdev);
1015 return err;
1016}
1017
1018int hci_get_dev_list(void __user *arg)
1019{
8035ded4 1020 struct hci_dev *hdev;
1da177e4
LT
1021 struct hci_dev_list_req *dl;
1022 struct hci_dev_req *dr;
1da177e4
LT
1023 int n = 0, size, err;
1024 __u16 dev_num;
1025
1026 if (get_user(dev_num, (__u16 __user *) arg))
1027 return -EFAULT;
1028
1029 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1030 return -EINVAL;
1031
1032 size = sizeof(*dl) + dev_num * sizeof(*dr);
1033
70f23020
AE
1034 dl = kzalloc(size, GFP_KERNEL);
1035 if (!dl)
1da177e4
LT
1036 return -ENOMEM;
1037
1038 dr = dl->dev_req;
1039
f20d09d5 1040 read_lock(&hci_dev_list_lock);
8035ded4 1041 list_for_each_entry(hdev, &hci_dev_list, list) {
a8b2d5c2 1042 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
e0f9309f 1043 cancel_delayed_work(&hdev->power_off);
c542a06c 1044
a8b2d5c2
JH
1045 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1046 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1047
1da177e4
LT
1048 (dr + n)->dev_id = hdev->id;
1049 (dr + n)->dev_opt = hdev->flags;
c542a06c 1050
1da177e4
LT
1051 if (++n >= dev_num)
1052 break;
1053 }
f20d09d5 1054 read_unlock(&hci_dev_list_lock);
1da177e4
LT
1055
1056 dl->dev_num = n;
1057 size = sizeof(*dl) + n * sizeof(*dr);
1058
1059 err = copy_to_user(arg, dl, size);
1060 kfree(dl);
1061
1062 return err ? -EFAULT : 0;
1063}
1064
1065int hci_get_dev_info(void __user *arg)
1066{
1067 struct hci_dev *hdev;
1068 struct hci_dev_info di;
1069 int err = 0;
1070
1071 if (copy_from_user(&di, arg, sizeof(di)))
1072 return -EFAULT;
1073
70f23020
AE
1074 hdev = hci_dev_get(di.dev_id);
1075 if (!hdev)
1da177e4
LT
1076 return -ENODEV;
1077
a8b2d5c2 1078 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
3243553f 1079 cancel_delayed_work_sync(&hdev->power_off);
ab81cbf9 1080
a8b2d5c2
JH
1081 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1082 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
c542a06c 1083
1da177e4
LT
1084 strcpy(di.name, hdev->name);
1085 di.bdaddr = hdev->bdaddr;
943da25d 1086 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1da177e4
LT
1087 di.flags = hdev->flags;
1088 di.pkt_type = hdev->pkt_type;
572c7f84
JH
1089 if (lmp_bredr_capable(hdev)) {
1090 di.acl_mtu = hdev->acl_mtu;
1091 di.acl_pkts = hdev->acl_pkts;
1092 di.sco_mtu = hdev->sco_mtu;
1093 di.sco_pkts = hdev->sco_pkts;
1094 } else {
1095 di.acl_mtu = hdev->le_mtu;
1096 di.acl_pkts = hdev->le_pkts;
1097 di.sco_mtu = 0;
1098 di.sco_pkts = 0;
1099 }
1da177e4
LT
1100 di.link_policy = hdev->link_policy;
1101 di.link_mode = hdev->link_mode;
1102
1103 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1104 memcpy(&di.features, &hdev->features, sizeof(di.features));
1105
1106 if (copy_to_user(arg, &di, sizeof(di)))
1107 err = -EFAULT;
1108
1109 hci_dev_put(hdev);
1110
1111 return err;
1112}
1113
1114/* ---- Interface to HCI drivers ---- */
1115
611b30f7
MH
1116static int hci_rfkill_set_block(void *data, bool blocked)
1117{
1118 struct hci_dev *hdev = data;
1119
1120 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1121
1122 if (!blocked)
1123 return 0;
1124
1125 hci_dev_do_close(hdev);
1126
1127 return 0;
1128}
1129
1130static const struct rfkill_ops hci_rfkill_ops = {
1131 .set_block = hci_rfkill_set_block,
1132};
1133
ab81cbf9
JH
1134static void hci_power_on(struct work_struct *work)
1135{
1136 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1137
1138 BT_DBG("%s", hdev->name);
1139
1140 if (hci_dev_open(hdev->id) < 0)
1141 return;
1142
a8b2d5c2 1143 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
9345d40c 1144 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
ab81cbf9 1145
a8b2d5c2 1146 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
744cf19e 1147 mgmt_index_added(hdev);
ab81cbf9
JH
1148}
1149
1150static void hci_power_off(struct work_struct *work)
1151{
3243553f 1152 struct hci_dev *hdev = container_of(work, struct hci_dev,
a8c5fb1a 1153 power_off.work);
ab81cbf9
JH
1154
1155 BT_DBG("%s", hdev->name);
1156
8ee56540 1157 hci_dev_do_close(hdev);
ab81cbf9
JH
1158}
1159
16ab91ab
JH
1160static void hci_discov_off(struct work_struct *work)
1161{
1162 struct hci_dev *hdev;
1163 u8 scan = SCAN_PAGE;
1164
1165 hdev = container_of(work, struct hci_dev, discov_off.work);
1166
1167 BT_DBG("%s", hdev->name);
1168
09fd0de5 1169 hci_dev_lock(hdev);
16ab91ab
JH
1170
1171 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1172
1173 hdev->discov_timeout = 0;
1174
09fd0de5 1175 hci_dev_unlock(hdev);
16ab91ab
JH
1176}
1177
2aeb9a1a
JH
1178int hci_uuids_clear(struct hci_dev *hdev)
1179{
1180 struct list_head *p, *n;
1181
1182 list_for_each_safe(p, n, &hdev->uuids) {
1183 struct bt_uuid *uuid;
1184
1185 uuid = list_entry(p, struct bt_uuid, list);
1186
1187 list_del(p);
1188 kfree(uuid);
1189 }
1190
1191 return 0;
1192}
1193
55ed8ca1
JH
1194int hci_link_keys_clear(struct hci_dev *hdev)
1195{
1196 struct list_head *p, *n;
1197
1198 list_for_each_safe(p, n, &hdev->link_keys) {
1199 struct link_key *key;
1200
1201 key = list_entry(p, struct link_key, list);
1202
1203 list_del(p);
1204 kfree(key);
1205 }
1206
1207 return 0;
1208}
1209
b899efaf
VCG
1210int hci_smp_ltks_clear(struct hci_dev *hdev)
1211{
1212 struct smp_ltk *k, *tmp;
1213
1214 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1215 list_del(&k->list);
1216 kfree(k);
1217 }
1218
1219 return 0;
1220}
1221
55ed8ca1
JH
1222struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1223{
8035ded4 1224 struct link_key *k;
55ed8ca1 1225
8035ded4 1226 list_for_each_entry(k, &hdev->link_keys, list)
55ed8ca1
JH
1227 if (bacmp(bdaddr, &k->bdaddr) == 0)
1228 return k;
55ed8ca1
JH
1229
1230 return NULL;
1231}
1232
745c0ce3 1233static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
a8c5fb1a 1234 u8 key_type, u8 old_key_type)
d25e28ab
JH
1235{
1236 /* Legacy key */
1237 if (key_type < 0x03)
745c0ce3 1238 return true;
d25e28ab
JH
1239
1240 /* Debug keys are insecure so don't store them persistently */
1241 if (key_type == HCI_LK_DEBUG_COMBINATION)
745c0ce3 1242 return false;
d25e28ab
JH
1243
1244 /* Changed combination key and there's no previous one */
1245 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
745c0ce3 1246 return false;
d25e28ab
JH
1247
1248 /* Security mode 3 case */
1249 if (!conn)
745c0ce3 1250 return true;
d25e28ab
JH
1251
1252 /* Neither local nor remote side had no-bonding as requirement */
1253 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
745c0ce3 1254 return true;
d25e28ab
JH
1255
1256 /* Local side had dedicated bonding as requirement */
1257 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
745c0ce3 1258 return true;
d25e28ab
JH
1259
1260 /* Remote side had dedicated bonding as requirement */
1261 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
745c0ce3 1262 return true;
d25e28ab
JH
1263
1264 /* If none of the above criteria match, then don't store the key
1265 * persistently */
745c0ce3 1266 return false;
d25e28ab
JH
1267}
1268
c9839a11 1269struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
75d262c2 1270{
c9839a11 1271 struct smp_ltk *k;
75d262c2 1272
c9839a11
VCG
1273 list_for_each_entry(k, &hdev->long_term_keys, list) {
1274 if (k->ediv != ediv ||
a8c5fb1a 1275 memcmp(rand, k->rand, sizeof(k->rand)))
75d262c2
VCG
1276 continue;
1277
c9839a11 1278 return k;
75d262c2
VCG
1279 }
1280
1281 return NULL;
1282}
75d262c2 1283
c9839a11 1284struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
04124681 1285 u8 addr_type)
75d262c2 1286{
c9839a11 1287 struct smp_ltk *k;
75d262c2 1288
c9839a11
VCG
1289 list_for_each_entry(k, &hdev->long_term_keys, list)
1290 if (addr_type == k->bdaddr_type &&
a8c5fb1a 1291 bacmp(bdaddr, &k->bdaddr) == 0)
75d262c2
VCG
1292 return k;
1293
1294 return NULL;
1295}
75d262c2 1296
d25e28ab 1297int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
04124681 1298 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
55ed8ca1
JH
1299{
1300 struct link_key *key, *old_key;
745c0ce3
VA
1301 u8 old_key_type;
1302 bool persistent;
55ed8ca1
JH
1303
1304 old_key = hci_find_link_key(hdev, bdaddr);
1305 if (old_key) {
1306 old_key_type = old_key->type;
1307 key = old_key;
1308 } else {
12adcf3a 1309 old_key_type = conn ? conn->key_type : 0xff;
55ed8ca1
JH
1310 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1311 if (!key)
1312 return -ENOMEM;
1313 list_add(&key->list, &hdev->link_keys);
1314 }
1315
6ed93dc6 1316 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
55ed8ca1 1317
d25e28ab
JH
1318 /* Some buggy controller combinations generate a changed
1319 * combination key for legacy pairing even when there's no
1320 * previous key */
1321 if (type == HCI_LK_CHANGED_COMBINATION &&
a8c5fb1a 1322 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
d25e28ab 1323 type = HCI_LK_COMBINATION;
655fe6ec
JH
1324 if (conn)
1325 conn->key_type = type;
1326 }
d25e28ab 1327
55ed8ca1 1328 bacpy(&key->bdaddr, bdaddr);
9b3b4460 1329 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
55ed8ca1
JH
1330 key->pin_len = pin_len;
1331
b6020ba0 1332 if (type == HCI_LK_CHANGED_COMBINATION)
55ed8ca1 1333 key->type = old_key_type;
4748fed2
JH
1334 else
1335 key->type = type;
1336
4df378a1
JH
1337 if (!new_key)
1338 return 0;
1339
1340 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1341
744cf19e 1342 mgmt_new_link_key(hdev, key, persistent);
4df378a1 1343
6ec5bcad
VA
1344 if (conn)
1345 conn->flush_key = !persistent;
55ed8ca1
JH
1346
1347 return 0;
1348}
1349
c9839a11 1350int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
9a006657 1351 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
04124681 1352 ediv, u8 rand[8])
75d262c2 1353{
c9839a11 1354 struct smp_ltk *key, *old_key;
75d262c2 1355
c9839a11
VCG
1356 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1357 return 0;
75d262c2 1358
c9839a11
VCG
1359 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1360 if (old_key)
75d262c2 1361 key = old_key;
c9839a11
VCG
1362 else {
1363 key = kzalloc(sizeof(*key), GFP_ATOMIC);
75d262c2
VCG
1364 if (!key)
1365 return -ENOMEM;
c9839a11 1366 list_add(&key->list, &hdev->long_term_keys);
75d262c2
VCG
1367 }
1368
75d262c2 1369 bacpy(&key->bdaddr, bdaddr);
c9839a11
VCG
1370 key->bdaddr_type = addr_type;
1371 memcpy(key->val, tk, sizeof(key->val));
1372 key->authenticated = authenticated;
1373 key->ediv = ediv;
1374 key->enc_size = enc_size;
1375 key->type = type;
1376 memcpy(key->rand, rand, sizeof(key->rand));
75d262c2 1377
c9839a11
VCG
1378 if (!new_key)
1379 return 0;
75d262c2 1380
261cc5aa
VCG
1381 if (type & HCI_SMP_LTK)
1382 mgmt_new_ltk(hdev, key, 1);
1383
75d262c2
VCG
1384 return 0;
1385}
1386
55ed8ca1
JH
1387int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1388{
1389 struct link_key *key;
1390
1391 key = hci_find_link_key(hdev, bdaddr);
1392 if (!key)
1393 return -ENOENT;
1394
6ed93dc6 1395 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
55ed8ca1
JH
1396
1397 list_del(&key->list);
1398 kfree(key);
1399
1400 return 0;
1401}
1402
b899efaf
VCG
1403int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1404{
1405 struct smp_ltk *k, *tmp;
1406
1407 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1408 if (bacmp(bdaddr, &k->bdaddr))
1409 continue;
1410
6ed93dc6 1411 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
b899efaf
VCG
1412
1413 list_del(&k->list);
1414 kfree(k);
1415 }
1416
1417 return 0;
1418}
1419
6bd32326 1420/* HCI command timer function */
bda4f23a 1421static void hci_cmd_timeout(unsigned long arg)
6bd32326
VT
1422{
1423 struct hci_dev *hdev = (void *) arg;
1424
bda4f23a
AE
1425 if (hdev->sent_cmd) {
1426 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1427 u16 opcode = __le16_to_cpu(sent->opcode);
1428
1429 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1430 } else {
1431 BT_ERR("%s command tx timeout", hdev->name);
1432 }
1433
6bd32326 1434 atomic_set(&hdev->cmd_cnt, 1);
c347b765 1435 queue_work(hdev->workqueue, &hdev->cmd_work);
6bd32326
VT
1436}
1437
2763eda6 1438struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
04124681 1439 bdaddr_t *bdaddr)
2763eda6
SJ
1440{
1441 struct oob_data *data;
1442
1443 list_for_each_entry(data, &hdev->remote_oob_data, list)
1444 if (bacmp(bdaddr, &data->bdaddr) == 0)
1445 return data;
1446
1447 return NULL;
1448}
1449
1450int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1451{
1452 struct oob_data *data;
1453
1454 data = hci_find_remote_oob_data(hdev, bdaddr);
1455 if (!data)
1456 return -ENOENT;
1457
6ed93dc6 1458 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
2763eda6
SJ
1459
1460 list_del(&data->list);
1461 kfree(data);
1462
1463 return 0;
1464}
1465
1466int hci_remote_oob_data_clear(struct hci_dev *hdev)
1467{
1468 struct oob_data *data, *n;
1469
1470 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1471 list_del(&data->list);
1472 kfree(data);
1473 }
1474
1475 return 0;
1476}
1477
1478int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
04124681 1479 u8 *randomizer)
2763eda6
SJ
1480{
1481 struct oob_data *data;
1482
1483 data = hci_find_remote_oob_data(hdev, bdaddr);
1484
1485 if (!data) {
1486 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1487 if (!data)
1488 return -ENOMEM;
1489
1490 bacpy(&data->bdaddr, bdaddr);
1491 list_add(&data->list, &hdev->remote_oob_data);
1492 }
1493
1494 memcpy(data->hash, hash, sizeof(data->hash));
1495 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1496
6ed93dc6 1497 BT_DBG("%s for %pMR", hdev->name, bdaddr);
2763eda6
SJ
1498
1499 return 0;
1500}
1501
04124681 1502struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
b2a66aad 1503{
8035ded4 1504 struct bdaddr_list *b;
b2a66aad 1505
8035ded4 1506 list_for_each_entry(b, &hdev->blacklist, list)
b2a66aad
AJ
1507 if (bacmp(bdaddr, &b->bdaddr) == 0)
1508 return b;
b2a66aad
AJ
1509
1510 return NULL;
1511}
1512
1513int hci_blacklist_clear(struct hci_dev *hdev)
1514{
1515 struct list_head *p, *n;
1516
1517 list_for_each_safe(p, n, &hdev->blacklist) {
1518 struct bdaddr_list *b;
1519
1520 b = list_entry(p, struct bdaddr_list, list);
1521
1522 list_del(p);
1523 kfree(b);
1524 }
1525
1526 return 0;
1527}
1528
88c1fe4b 1529int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1530{
1531 struct bdaddr_list *entry;
b2a66aad
AJ
1532
1533 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1534 return -EBADF;
1535
5e762444
AJ
1536 if (hci_blacklist_lookup(hdev, bdaddr))
1537 return -EEXIST;
b2a66aad
AJ
1538
1539 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
5e762444
AJ
1540 if (!entry)
1541 return -ENOMEM;
b2a66aad
AJ
1542
1543 bacpy(&entry->bdaddr, bdaddr);
1544
1545 list_add(&entry->list, &hdev->blacklist);
1546
88c1fe4b 1547 return mgmt_device_blocked(hdev, bdaddr, type);
b2a66aad
AJ
1548}
1549
88c1fe4b 1550int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
b2a66aad
AJ
1551{
1552 struct bdaddr_list *entry;
b2a66aad 1553
1ec918ce 1554 if (bacmp(bdaddr, BDADDR_ANY) == 0)
5e762444 1555 return hci_blacklist_clear(hdev);
b2a66aad
AJ
1556
1557 entry = hci_blacklist_lookup(hdev, bdaddr);
1ec918ce 1558 if (!entry)
5e762444 1559 return -ENOENT;
b2a66aad
AJ
1560
1561 list_del(&entry->list);
1562 kfree(entry);
1563
88c1fe4b 1564 return mgmt_device_unblocked(hdev, bdaddr, type);
b2a66aad
AJ
1565}
1566
7ba8b4be
AG
1567static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1568{
1569 struct le_scan_params *param = (struct le_scan_params *) opt;
1570 struct hci_cp_le_set_scan_param cp;
1571
1572 memset(&cp, 0, sizeof(cp));
1573 cp.type = param->type;
1574 cp.interval = cpu_to_le16(param->interval);
1575 cp.window = cpu_to_le16(param->window);
1576
1577 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1578}
1579
1580static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1581{
1582 struct hci_cp_le_set_scan_enable cp;
1583
1584 memset(&cp, 0, sizeof(cp));
1585 cp.enable = 1;
0431a43c 1586 cp.filter_dup = 1;
7ba8b4be
AG
1587
1588 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1589}
1590
1591static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
04124681 1592 u16 window, int timeout)
7ba8b4be
AG
1593{
1594 long timeo = msecs_to_jiffies(3000);
1595 struct le_scan_params param;
1596 int err;
1597
1598 BT_DBG("%s", hdev->name);
1599
1600 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1601 return -EINPROGRESS;
1602
1603 param.type = type;
1604 param.interval = interval;
1605 param.window = window;
1606
1607 hci_req_lock(hdev);
1608
1609 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
04124681 1610 timeo);
7ba8b4be
AG
1611 if (!err)
1612 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1613
1614 hci_req_unlock(hdev);
1615
1616 if (err < 0)
1617 return err;
1618
1619 schedule_delayed_work(&hdev->le_scan_disable,
04124681 1620 msecs_to_jiffies(timeout));
7ba8b4be
AG
1621
1622 return 0;
1623}
1624
7dbfac1d
AG
1625int hci_cancel_le_scan(struct hci_dev *hdev)
1626{
1627 BT_DBG("%s", hdev->name);
1628
1629 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1630 return -EALREADY;
1631
1632 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1633 struct hci_cp_le_set_scan_enable cp;
1634
1635 /* Send HCI command to disable LE Scan */
1636 memset(&cp, 0, sizeof(cp));
1637 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1638 }
1639
1640 return 0;
1641}
1642
7ba8b4be
AG
1643static void le_scan_disable_work(struct work_struct *work)
1644{
1645 struct hci_dev *hdev = container_of(work, struct hci_dev,
04124681 1646 le_scan_disable.work);
7ba8b4be
AG
1647 struct hci_cp_le_set_scan_enable cp;
1648
1649 BT_DBG("%s", hdev->name);
1650
1651 memset(&cp, 0, sizeof(cp));
1652
1653 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1654}
1655
28b75a89
AG
1656static void le_scan_work(struct work_struct *work)
1657{
1658 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1659 struct le_scan_params *param = &hdev->le_scan_params;
1660
1661 BT_DBG("%s", hdev->name);
1662
04124681
GP
1663 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1664 param->timeout);
28b75a89
AG
1665}
1666
1667int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
04124681 1668 int timeout)
28b75a89
AG
1669{
1670 struct le_scan_params *param = &hdev->le_scan_params;
1671
1672 BT_DBG("%s", hdev->name);
1673
f1550478
JH
1674 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1675 return -ENOTSUPP;
1676
28b75a89
AG
1677 if (work_busy(&hdev->le_scan))
1678 return -EINPROGRESS;
1679
1680 param->type = type;
1681 param->interval = interval;
1682 param->window = window;
1683 param->timeout = timeout;
1684
1685 queue_work(system_long_wq, &hdev->le_scan);
1686
1687 return 0;
1688}
1689
9be0dab7
DH
1690/* Alloc HCI device */
1691struct hci_dev *hci_alloc_dev(void)
1692{
1693 struct hci_dev *hdev;
1694
1695 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1696 if (!hdev)
1697 return NULL;
1698
b1b813d4
DH
1699 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1700 hdev->esco_type = (ESCO_HV1);
1701 hdev->link_mode = (HCI_LM_ACCEPT);
1702 hdev->io_capability = 0x03; /* No Input No Output */
bbaf444a
JH
1703 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1704 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
b1b813d4 1705
b1b813d4
DH
1706 hdev->sniff_max_interval = 800;
1707 hdev->sniff_min_interval = 80;
1708
1709 mutex_init(&hdev->lock);
1710 mutex_init(&hdev->req_lock);
1711
1712 INIT_LIST_HEAD(&hdev->mgmt_pending);
1713 INIT_LIST_HEAD(&hdev->blacklist);
1714 INIT_LIST_HEAD(&hdev->uuids);
1715 INIT_LIST_HEAD(&hdev->link_keys);
1716 INIT_LIST_HEAD(&hdev->long_term_keys);
1717 INIT_LIST_HEAD(&hdev->remote_oob_data);
6b536b5e 1718 INIT_LIST_HEAD(&hdev->conn_hash.list);
b1b813d4
DH
1719
1720 INIT_WORK(&hdev->rx_work, hci_rx_work);
1721 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1722 INIT_WORK(&hdev->tx_work, hci_tx_work);
1723 INIT_WORK(&hdev->power_on, hci_power_on);
1724 INIT_WORK(&hdev->le_scan, le_scan_work);
1725
b1b813d4
DH
1726 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1727 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1728 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1729
9be0dab7 1730 skb_queue_head_init(&hdev->driver_init);
b1b813d4
DH
1731 skb_queue_head_init(&hdev->rx_q);
1732 skb_queue_head_init(&hdev->cmd_q);
1733 skb_queue_head_init(&hdev->raw_q);
1734
1735 init_waitqueue_head(&hdev->req_wait_q);
1736
bda4f23a 1737 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
b1b813d4 1738
b1b813d4
DH
1739 hci_init_sysfs(hdev);
1740 discovery_init(hdev);
9be0dab7
DH
1741
1742 return hdev;
1743}
1744EXPORT_SYMBOL(hci_alloc_dev);
1745
1746/* Free HCI device */
1747void hci_free_dev(struct hci_dev *hdev)
1748{
1749 skb_queue_purge(&hdev->driver_init);
1750
1751 /* will free via device release */
1752 put_device(&hdev->dev);
1753}
1754EXPORT_SYMBOL(hci_free_dev);
1755
1da177e4
LT
1756/* Register HCI device */
1757int hci_register_dev(struct hci_dev *hdev)
1758{
b1b813d4 1759 int id, error;
1da177e4 1760
010666a1 1761 if (!hdev->open || !hdev->close)
1da177e4
LT
1762 return -EINVAL;
1763
08add513
MM
1764 /* Do not allow HCI_AMP devices to register at index 0,
1765 * so the index can be used as the AMP controller ID.
1766 */
3df92b31
SL
1767 switch (hdev->dev_type) {
1768 case HCI_BREDR:
1769 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1770 break;
1771 case HCI_AMP:
1772 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1773 break;
1774 default:
1775 return -EINVAL;
1da177e4 1776 }
8e87d142 1777
3df92b31
SL
1778 if (id < 0)
1779 return id;
1780
1da177e4
LT
1781 sprintf(hdev->name, "hci%d", id);
1782 hdev->id = id;
2d8b3a11
AE
1783
1784 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1785
3df92b31
SL
1786 write_lock(&hci_dev_list_lock);
1787 list_add(&hdev->list, &hci_dev_list);
f20d09d5 1788 write_unlock(&hci_dev_list_lock);
1da177e4 1789
32845eb1 1790 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
a8c5fb1a 1791 WQ_MEM_RECLAIM, 1);
33ca954d
DH
1792 if (!hdev->workqueue) {
1793 error = -ENOMEM;
1794 goto err;
1795 }
f48fd9c8 1796
33ca954d
DH
1797 error = hci_add_sysfs(hdev);
1798 if (error < 0)
1799 goto err_wqueue;
1da177e4 1800
611b30f7 1801 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
a8c5fb1a
GP
1802 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1803 hdev);
611b30f7
MH
1804 if (hdev->rfkill) {
1805 if (rfkill_register(hdev->rfkill) < 0) {
1806 rfkill_destroy(hdev->rfkill);
1807 hdev->rfkill = NULL;
1808 }
1809 }
1810
a8b2d5c2 1811 set_bit(HCI_SETUP, &hdev->dev_flags);
ce2be9ac
AE
1812
1813 if (hdev->dev_type != HCI_AMP)
1814 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1815
7f971041 1816 schedule_work(&hdev->power_on);
ab81cbf9 1817
1da177e4 1818 hci_notify(hdev, HCI_DEV_REG);
dc946bd8 1819 hci_dev_hold(hdev);
1da177e4
LT
1820
1821 return id;
f48fd9c8 1822
33ca954d
DH
1823err_wqueue:
1824 destroy_workqueue(hdev->workqueue);
1825err:
3df92b31 1826 ida_simple_remove(&hci_index_ida, hdev->id);
f20d09d5 1827 write_lock(&hci_dev_list_lock);
f48fd9c8 1828 list_del(&hdev->list);
f20d09d5 1829 write_unlock(&hci_dev_list_lock);
f48fd9c8 1830
33ca954d 1831 return error;
1da177e4
LT
1832}
1833EXPORT_SYMBOL(hci_register_dev);
1834
1835/* Unregister HCI device */
59735631 1836void hci_unregister_dev(struct hci_dev *hdev)
1da177e4 1837{
3df92b31 1838 int i, id;
ef222013 1839
c13854ce 1840 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1da177e4 1841
94324962
JH
1842 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1843
3df92b31
SL
1844 id = hdev->id;
1845
f20d09d5 1846 write_lock(&hci_dev_list_lock);
1da177e4 1847 list_del(&hdev->list);
f20d09d5 1848 write_unlock(&hci_dev_list_lock);
1da177e4
LT
1849
1850 hci_dev_do_close(hdev);
1851
cd4c5391 1852 for (i = 0; i < NUM_REASSEMBLY; i++)
ef222013
MH
1853 kfree_skb(hdev->reassembly[i]);
1854
ab81cbf9 1855 if (!test_bit(HCI_INIT, &hdev->flags) &&
a8c5fb1a 1856 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
09fd0de5 1857 hci_dev_lock(hdev);
744cf19e 1858 mgmt_index_removed(hdev);
09fd0de5 1859 hci_dev_unlock(hdev);
56e5cb86 1860 }
ab81cbf9 1861
2e58ef3e
JH
1862 /* mgmt_index_removed should take care of emptying the
1863 * pending list */
1864 BUG_ON(!list_empty(&hdev->mgmt_pending));
1865
1da177e4
LT
1866 hci_notify(hdev, HCI_DEV_UNREG);
1867
611b30f7
MH
1868 if (hdev->rfkill) {
1869 rfkill_unregister(hdev->rfkill);
1870 rfkill_destroy(hdev->rfkill);
1871 }
1872
ce242970 1873 hci_del_sysfs(hdev);
147e2d59 1874
f48fd9c8
MH
1875 destroy_workqueue(hdev->workqueue);
1876
09fd0de5 1877 hci_dev_lock(hdev);
e2e0cacb 1878 hci_blacklist_clear(hdev);
2aeb9a1a 1879 hci_uuids_clear(hdev);
55ed8ca1 1880 hci_link_keys_clear(hdev);
b899efaf 1881 hci_smp_ltks_clear(hdev);
2763eda6 1882 hci_remote_oob_data_clear(hdev);
09fd0de5 1883 hci_dev_unlock(hdev);
e2e0cacb 1884
dc946bd8 1885 hci_dev_put(hdev);
3df92b31
SL
1886
1887 ida_simple_remove(&hci_index_ida, id);
1da177e4
LT
1888}
1889EXPORT_SYMBOL(hci_unregister_dev);
1890
1891/* Suspend HCI device */
1892int hci_suspend_dev(struct hci_dev *hdev)
1893{
1894 hci_notify(hdev, HCI_DEV_SUSPEND);
1895 return 0;
1896}
1897EXPORT_SYMBOL(hci_suspend_dev);
1898
1899/* Resume HCI device */
1900int hci_resume_dev(struct hci_dev *hdev)
1901{
1902 hci_notify(hdev, HCI_DEV_RESUME);
1903 return 0;
1904}
1905EXPORT_SYMBOL(hci_resume_dev);
1906
76bca880
MH
1907/* Receive frame from HCI drivers */
1908int hci_recv_frame(struct sk_buff *skb)
1909{
1910 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1911 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
a8c5fb1a 1912 && !test_bit(HCI_INIT, &hdev->flags))) {
76bca880
MH
1913 kfree_skb(skb);
1914 return -ENXIO;
1915 }
1916
1917 /* Incomming skb */
1918 bt_cb(skb)->incoming = 1;
1919
1920 /* Time stamp */
1921 __net_timestamp(skb);
1922
76bca880 1923 skb_queue_tail(&hdev->rx_q, skb);
b78752cc 1924 queue_work(hdev->workqueue, &hdev->rx_work);
c78ae283 1925
76bca880
MH
1926 return 0;
1927}
1928EXPORT_SYMBOL(hci_recv_frame);
1929
33e882a5 1930static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
a8c5fb1a 1931 int count, __u8 index)
33e882a5
SS
1932{
1933 int len = 0;
1934 int hlen = 0;
1935 int remain = count;
1936 struct sk_buff *skb;
1937 struct bt_skb_cb *scb;
1938
1939 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
a8c5fb1a 1940 index >= NUM_REASSEMBLY)
33e882a5
SS
1941 return -EILSEQ;
1942
1943 skb = hdev->reassembly[index];
1944
1945 if (!skb) {
1946 switch (type) {
1947 case HCI_ACLDATA_PKT:
1948 len = HCI_MAX_FRAME_SIZE;
1949 hlen = HCI_ACL_HDR_SIZE;
1950 break;
1951 case HCI_EVENT_PKT:
1952 len = HCI_MAX_EVENT_SIZE;
1953 hlen = HCI_EVENT_HDR_SIZE;
1954 break;
1955 case HCI_SCODATA_PKT:
1956 len = HCI_MAX_SCO_SIZE;
1957 hlen = HCI_SCO_HDR_SIZE;
1958 break;
1959 }
1960
1e429f38 1961 skb = bt_skb_alloc(len, GFP_ATOMIC);
33e882a5
SS
1962 if (!skb)
1963 return -ENOMEM;
1964
1965 scb = (void *) skb->cb;
1966 scb->expect = hlen;
1967 scb->pkt_type = type;
1968
1969 skb->dev = (void *) hdev;
1970 hdev->reassembly[index] = skb;
1971 }
1972
1973 while (count) {
1974 scb = (void *) skb->cb;
89bb46d0 1975 len = min_t(uint, scb->expect, count);
33e882a5
SS
1976
1977 memcpy(skb_put(skb, len), data, len);
1978
1979 count -= len;
1980 data += len;
1981 scb->expect -= len;
1982 remain = count;
1983
1984 switch (type) {
1985 case HCI_EVENT_PKT:
1986 if (skb->len == HCI_EVENT_HDR_SIZE) {
1987 struct hci_event_hdr *h = hci_event_hdr(skb);
1988 scb->expect = h->plen;
1989
1990 if (skb_tailroom(skb) < scb->expect) {
1991 kfree_skb(skb);
1992 hdev->reassembly[index] = NULL;
1993 return -ENOMEM;
1994 }
1995 }
1996 break;
1997
1998 case HCI_ACLDATA_PKT:
1999 if (skb->len == HCI_ACL_HDR_SIZE) {
2000 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2001 scb->expect = __le16_to_cpu(h->dlen);
2002
2003 if (skb_tailroom(skb) < scb->expect) {
2004 kfree_skb(skb);
2005 hdev->reassembly[index] = NULL;
2006 return -ENOMEM;
2007 }
2008 }
2009 break;
2010
2011 case HCI_SCODATA_PKT:
2012 if (skb->len == HCI_SCO_HDR_SIZE) {
2013 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2014 scb->expect = h->dlen;
2015
2016 if (skb_tailroom(skb) < scb->expect) {
2017 kfree_skb(skb);
2018 hdev->reassembly[index] = NULL;
2019 return -ENOMEM;
2020 }
2021 }
2022 break;
2023 }
2024
2025 if (scb->expect == 0) {
2026 /* Complete frame */
2027
2028 bt_cb(skb)->pkt_type = type;
2029 hci_recv_frame(skb);
2030
2031 hdev->reassembly[index] = NULL;
2032 return remain;
2033 }
2034 }
2035
2036 return remain;
2037}
2038
ef222013
MH
2039int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2040{
f39a3c06
SS
2041 int rem = 0;
2042
ef222013
MH
2043 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2044 return -EILSEQ;
2045
da5f6c37 2046 while (count) {
1e429f38 2047 rem = hci_reassembly(hdev, type, data, count, type - 1);
f39a3c06
SS
2048 if (rem < 0)
2049 return rem;
ef222013 2050
f39a3c06
SS
2051 data += (count - rem);
2052 count = rem;
f81c6224 2053 }
ef222013 2054
f39a3c06 2055 return rem;
ef222013
MH
2056}
2057EXPORT_SYMBOL(hci_recv_fragment);
2058
99811510
SS
2059#define STREAM_REASSEMBLY 0
2060
2061int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2062{
2063 int type;
2064 int rem = 0;
2065
da5f6c37 2066 while (count) {
99811510
SS
2067 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2068
2069 if (!skb) {
2070 struct { char type; } *pkt;
2071
2072 /* Start of the frame */
2073 pkt = data;
2074 type = pkt->type;
2075
2076 data++;
2077 count--;
2078 } else
2079 type = bt_cb(skb)->pkt_type;
2080
1e429f38 2081 rem = hci_reassembly(hdev, type, data, count,
a8c5fb1a 2082 STREAM_REASSEMBLY);
99811510
SS
2083 if (rem < 0)
2084 return rem;
2085
2086 data += (count - rem);
2087 count = rem;
f81c6224 2088 }
99811510
SS
2089
2090 return rem;
2091}
2092EXPORT_SYMBOL(hci_recv_stream_fragment);
2093
1da177e4
LT
2094/* ---- Interface to upper protocols ---- */
2095
1da177e4
LT
2096int hci_register_cb(struct hci_cb *cb)
2097{
2098 BT_DBG("%p name %s", cb, cb->name);
2099
f20d09d5 2100 write_lock(&hci_cb_list_lock);
1da177e4 2101 list_add(&cb->list, &hci_cb_list);
f20d09d5 2102 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2103
2104 return 0;
2105}
2106EXPORT_SYMBOL(hci_register_cb);
2107
2108int hci_unregister_cb(struct hci_cb *cb)
2109{
2110 BT_DBG("%p name %s", cb, cb->name);
2111
f20d09d5 2112 write_lock(&hci_cb_list_lock);
1da177e4 2113 list_del(&cb->list);
f20d09d5 2114 write_unlock(&hci_cb_list_lock);
1da177e4
LT
2115
2116 return 0;
2117}
2118EXPORT_SYMBOL(hci_unregister_cb);
2119
2120static int hci_send_frame(struct sk_buff *skb)
2121{
2122 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2123
2124 if (!hdev) {
2125 kfree_skb(skb);
2126 return -ENODEV;
2127 }
2128
0d48d939 2129 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1da177e4 2130
cd82e61c
MH
2131 /* Time stamp */
2132 __net_timestamp(skb);
1da177e4 2133
cd82e61c
MH
2134 /* Send copy to monitor */
2135 hci_send_to_monitor(hdev, skb);
2136
2137 if (atomic_read(&hdev->promisc)) {
2138 /* Send copy to the sockets */
470fe1b5 2139 hci_send_to_sock(hdev, skb);
1da177e4
LT
2140 }
2141
2142 /* Get rid of skb owner, prior to sending to the driver. */
2143 skb_orphan(skb);
2144
2145 return hdev->send(skb);
2146}
2147
2148/* Send HCI command */
a9de9248 2149int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1da177e4
LT
2150{
2151 int len = HCI_COMMAND_HDR_SIZE + plen;
2152 struct hci_command_hdr *hdr;
2153 struct sk_buff *skb;
2154
f0e09510 2155 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
1da177e4
LT
2156
2157 skb = bt_skb_alloc(len, GFP_ATOMIC);
2158 if (!skb) {
ef222013 2159 BT_ERR("%s no memory for command", hdev->name);
1da177e4
LT
2160 return -ENOMEM;
2161 }
2162
2163 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
a9de9248 2164 hdr->opcode = cpu_to_le16(opcode);
1da177e4
LT
2165 hdr->plen = plen;
2166
2167 if (plen)
2168 memcpy(skb_put(skb, plen), param, plen);
2169
2170 BT_DBG("skb len %d", skb->len);
2171
0d48d939 2172 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1da177e4 2173 skb->dev = (void *) hdev;
c78ae283 2174
a5040efa
JH
2175 if (test_bit(HCI_INIT, &hdev->flags))
2176 hdev->init_last_cmd = opcode;
2177
1da177e4 2178 skb_queue_tail(&hdev->cmd_q, skb);
c347b765 2179 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2180
2181 return 0;
2182}
1da177e4
LT
2183
2184/* Get data from the previously sent command */
a9de9248 2185void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1da177e4
LT
2186{
2187 struct hci_command_hdr *hdr;
2188
2189 if (!hdev->sent_cmd)
2190 return NULL;
2191
2192 hdr = (void *) hdev->sent_cmd->data;
2193
a9de9248 2194 if (hdr->opcode != cpu_to_le16(opcode))
1da177e4
LT
2195 return NULL;
2196
f0e09510 2197 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
1da177e4
LT
2198
2199 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2200}
2201
2202/* Send ACL data */
2203static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2204{
2205 struct hci_acl_hdr *hdr;
2206 int len = skb->len;
2207
badff6d0
ACM
2208 skb_push(skb, HCI_ACL_HDR_SIZE);
2209 skb_reset_transport_header(skb);
9c70220b 2210 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
aca3192c
YH
2211 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2212 hdr->dlen = cpu_to_le16(len);
1da177e4
LT
2213}
2214
ee22be7e 2215static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
a8c5fb1a 2216 struct sk_buff *skb, __u16 flags)
1da177e4 2217{
ee22be7e 2218 struct hci_conn *conn = chan->conn;
1da177e4
LT
2219 struct hci_dev *hdev = conn->hdev;
2220 struct sk_buff *list;
2221
087bfd99
GP
2222 skb->len = skb_headlen(skb);
2223 skb->data_len = 0;
2224
2225 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
204a6e54
AE
2226
2227 switch (hdev->dev_type) {
2228 case HCI_BREDR:
2229 hci_add_acl_hdr(skb, conn->handle, flags);
2230 break;
2231 case HCI_AMP:
2232 hci_add_acl_hdr(skb, chan->handle, flags);
2233 break;
2234 default:
2235 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2236 return;
2237 }
087bfd99 2238
70f23020
AE
2239 list = skb_shinfo(skb)->frag_list;
2240 if (!list) {
1da177e4
LT
2241 /* Non fragmented */
2242 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2243
73d80deb 2244 skb_queue_tail(queue, skb);
1da177e4
LT
2245 } else {
2246 /* Fragmented */
2247 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2248
2249 skb_shinfo(skb)->frag_list = NULL;
2250
2251 /* Queue all fragments atomically */
af3e6359 2252 spin_lock(&queue->lock);
1da177e4 2253
73d80deb 2254 __skb_queue_tail(queue, skb);
e702112f
AE
2255
2256 flags &= ~ACL_START;
2257 flags |= ACL_CONT;
1da177e4
LT
2258 do {
2259 skb = list; list = list->next;
8e87d142 2260
1da177e4 2261 skb->dev = (void *) hdev;
0d48d939 2262 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
e702112f 2263 hci_add_acl_hdr(skb, conn->handle, flags);
1da177e4
LT
2264
2265 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2266
73d80deb 2267 __skb_queue_tail(queue, skb);
1da177e4
LT
2268 } while (list);
2269
af3e6359 2270 spin_unlock(&queue->lock);
1da177e4 2271 }
73d80deb
LAD
2272}
2273
2274void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2275{
ee22be7e 2276 struct hci_dev *hdev = chan->conn->hdev;
73d80deb 2277
f0e09510 2278 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
73d80deb
LAD
2279
2280 skb->dev = (void *) hdev;
73d80deb 2281
ee22be7e 2282 hci_queue_acl(chan, &chan->data_q, skb, flags);
1da177e4 2283
3eff45ea 2284 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2285}
1da177e4
LT
2286
2287/* Send SCO data */
0d861d8b 2288void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1da177e4
LT
2289{
2290 struct hci_dev *hdev = conn->hdev;
2291 struct hci_sco_hdr hdr;
2292
2293 BT_DBG("%s len %d", hdev->name, skb->len);
2294
aca3192c 2295 hdr.handle = cpu_to_le16(conn->handle);
1da177e4
LT
2296 hdr.dlen = skb->len;
2297
badff6d0
ACM
2298 skb_push(skb, HCI_SCO_HDR_SIZE);
2299 skb_reset_transport_header(skb);
9c70220b 2300 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1da177e4
LT
2301
2302 skb->dev = (void *) hdev;
0d48d939 2303 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
c78ae283 2304
1da177e4 2305 skb_queue_tail(&conn->data_q, skb);
3eff45ea 2306 queue_work(hdev->workqueue, &hdev->tx_work);
1da177e4 2307}
1da177e4
LT
2308
2309/* ---- HCI TX task (outgoing data) ---- */
2310
2311/* HCI Connection scheduler */
6039aa73
GP
2312static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2313 int *quote)
1da177e4
LT
2314{
2315 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2316 struct hci_conn *conn = NULL, *c;
abc5de8f 2317 unsigned int num = 0, min = ~0;
1da177e4 2318
8e87d142 2319 /* We don't have to lock device here. Connections are always
1da177e4 2320 * added and removed with TX task disabled. */
bf4c6325
GP
2321
2322 rcu_read_lock();
2323
2324 list_for_each_entry_rcu(c, &h->list, list) {
769be974 2325 if (c->type != type || skb_queue_empty(&c->data_q))
1da177e4 2326 continue;
769be974
MH
2327
2328 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2329 continue;
2330
1da177e4
LT
2331 num++;
2332
2333 if (c->sent < min) {
2334 min = c->sent;
2335 conn = c;
2336 }
52087a79
LAD
2337
2338 if (hci_conn_num(hdev, type) == num)
2339 break;
1da177e4
LT
2340 }
2341
bf4c6325
GP
2342 rcu_read_unlock();
2343
1da177e4 2344 if (conn) {
6ed58ec5
VT
2345 int cnt, q;
2346
2347 switch (conn->type) {
2348 case ACL_LINK:
2349 cnt = hdev->acl_cnt;
2350 break;
2351 case SCO_LINK:
2352 case ESCO_LINK:
2353 cnt = hdev->sco_cnt;
2354 break;
2355 case LE_LINK:
2356 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2357 break;
2358 default:
2359 cnt = 0;
2360 BT_ERR("Unknown link type");
2361 }
2362
2363 q = cnt / num;
1da177e4
LT
2364 *quote = q ? q : 1;
2365 } else
2366 *quote = 0;
2367
2368 BT_DBG("conn %p quote %d", conn, *quote);
2369 return conn;
2370}
2371
6039aa73 2372static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1da177e4
LT
2373{
2374 struct hci_conn_hash *h = &hdev->conn_hash;
8035ded4 2375 struct hci_conn *c;
1da177e4 2376
bae1f5d9 2377 BT_ERR("%s link tx timeout", hdev->name);
1da177e4 2378
bf4c6325
GP
2379 rcu_read_lock();
2380
1da177e4 2381 /* Kill stalled connections */
bf4c6325 2382 list_for_each_entry_rcu(c, &h->list, list) {
bae1f5d9 2383 if (c->type == type && c->sent) {
6ed93dc6
AE
2384 BT_ERR("%s killing stalled connection %pMR",
2385 hdev->name, &c->dst);
7490c6c2 2386 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
1da177e4
LT
2387 }
2388 }
bf4c6325
GP
2389
2390 rcu_read_unlock();
1da177e4
LT
2391}
2392
6039aa73
GP
2393static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2394 int *quote)
1da177e4 2395{
73d80deb
LAD
2396 struct hci_conn_hash *h = &hdev->conn_hash;
2397 struct hci_chan *chan = NULL;
abc5de8f 2398 unsigned int num = 0, min = ~0, cur_prio = 0;
1da177e4 2399 struct hci_conn *conn;
73d80deb
LAD
2400 int cnt, q, conn_num = 0;
2401
2402 BT_DBG("%s", hdev->name);
2403
bf4c6325
GP
2404 rcu_read_lock();
2405
2406 list_for_each_entry_rcu(conn, &h->list, list) {
73d80deb
LAD
2407 struct hci_chan *tmp;
2408
2409 if (conn->type != type)
2410 continue;
2411
2412 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2413 continue;
2414
2415 conn_num++;
2416
8192edef 2417 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
73d80deb
LAD
2418 struct sk_buff *skb;
2419
2420 if (skb_queue_empty(&tmp->data_q))
2421 continue;
2422
2423 skb = skb_peek(&tmp->data_q);
2424 if (skb->priority < cur_prio)
2425 continue;
2426
2427 if (skb->priority > cur_prio) {
2428 num = 0;
2429 min = ~0;
2430 cur_prio = skb->priority;
2431 }
2432
2433 num++;
2434
2435 if (conn->sent < min) {
2436 min = conn->sent;
2437 chan = tmp;
2438 }
2439 }
2440
2441 if (hci_conn_num(hdev, type) == conn_num)
2442 break;
2443 }
2444
bf4c6325
GP
2445 rcu_read_unlock();
2446
73d80deb
LAD
2447 if (!chan)
2448 return NULL;
2449
2450 switch (chan->conn->type) {
2451 case ACL_LINK:
2452 cnt = hdev->acl_cnt;
2453 break;
bd1eb66b
AE
2454 case AMP_LINK:
2455 cnt = hdev->block_cnt;
2456 break;
73d80deb
LAD
2457 case SCO_LINK:
2458 case ESCO_LINK:
2459 cnt = hdev->sco_cnt;
2460 break;
2461 case LE_LINK:
2462 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2463 break;
2464 default:
2465 cnt = 0;
2466 BT_ERR("Unknown link type");
2467 }
2468
2469 q = cnt / num;
2470 *quote = q ? q : 1;
2471 BT_DBG("chan %p quote %d", chan, *quote);
2472 return chan;
2473}
2474
02b20f0b
LAD
2475static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2476{
2477 struct hci_conn_hash *h = &hdev->conn_hash;
2478 struct hci_conn *conn;
2479 int num = 0;
2480
2481 BT_DBG("%s", hdev->name);
2482
bf4c6325
GP
2483 rcu_read_lock();
2484
2485 list_for_each_entry_rcu(conn, &h->list, list) {
02b20f0b
LAD
2486 struct hci_chan *chan;
2487
2488 if (conn->type != type)
2489 continue;
2490
2491 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2492 continue;
2493
2494 num++;
2495
8192edef 2496 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
02b20f0b
LAD
2497 struct sk_buff *skb;
2498
2499 if (chan->sent) {
2500 chan->sent = 0;
2501 continue;
2502 }
2503
2504 if (skb_queue_empty(&chan->data_q))
2505 continue;
2506
2507 skb = skb_peek(&chan->data_q);
2508 if (skb->priority >= HCI_PRIO_MAX - 1)
2509 continue;
2510
2511 skb->priority = HCI_PRIO_MAX - 1;
2512
2513 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
a8c5fb1a 2514 skb->priority);
02b20f0b
LAD
2515 }
2516
2517 if (hci_conn_num(hdev, type) == num)
2518 break;
2519 }
bf4c6325
GP
2520
2521 rcu_read_unlock();
2522
02b20f0b
LAD
2523}
2524
b71d385a
AE
2525static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2526{
2527 /* Calculate count of blocks used by this packet */
2528 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2529}
2530
6039aa73 2531static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
73d80deb 2532{
1da177e4
LT
2533 if (!test_bit(HCI_RAW, &hdev->flags)) {
2534 /* ACL tx timeout must be longer than maximum
2535 * link supervision timeout (40.9 seconds) */
63d2bc1b 2536 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
5f246e89 2537 HCI_ACL_TX_TIMEOUT))
bae1f5d9 2538 hci_link_tx_to(hdev, ACL_LINK);
1da177e4 2539 }
63d2bc1b 2540}
1da177e4 2541
6039aa73 2542static void hci_sched_acl_pkt(struct hci_dev *hdev)
63d2bc1b
AE
2543{
2544 unsigned int cnt = hdev->acl_cnt;
2545 struct hci_chan *chan;
2546 struct sk_buff *skb;
2547 int quote;
2548
2549 __check_timeout(hdev, cnt);
04837f64 2550
73d80deb 2551 while (hdev->acl_cnt &&
a8c5fb1a 2552 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
ec1cce24
LAD
2553 u32 priority = (skb_peek(&chan->data_q))->priority;
2554 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2555 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2556 skb->len, skb->priority);
73d80deb 2557
ec1cce24
LAD
2558 /* Stop if priority has changed */
2559 if (skb->priority < priority)
2560 break;
2561
2562 skb = skb_dequeue(&chan->data_q);
2563
73d80deb 2564 hci_conn_enter_active_mode(chan->conn,
04124681 2565 bt_cb(skb)->force_active);
04837f64 2566
1da177e4
LT
2567 hci_send_frame(skb);
2568 hdev->acl_last_tx = jiffies;
2569
2570 hdev->acl_cnt--;
73d80deb
LAD
2571 chan->sent++;
2572 chan->conn->sent++;
1da177e4
LT
2573 }
2574 }
02b20f0b
LAD
2575
2576 if (cnt != hdev->acl_cnt)
2577 hci_prio_recalculate(hdev, ACL_LINK);
1da177e4
LT
2578}
2579
6039aa73 2580static void hci_sched_acl_blk(struct hci_dev *hdev)
b71d385a 2581{
63d2bc1b 2582 unsigned int cnt = hdev->block_cnt;
b71d385a
AE
2583 struct hci_chan *chan;
2584 struct sk_buff *skb;
2585 int quote;
bd1eb66b 2586 u8 type;
b71d385a 2587
63d2bc1b 2588 __check_timeout(hdev, cnt);
b71d385a 2589
bd1eb66b
AE
2590 BT_DBG("%s", hdev->name);
2591
2592 if (hdev->dev_type == HCI_AMP)
2593 type = AMP_LINK;
2594 else
2595 type = ACL_LINK;
2596
b71d385a 2597 while (hdev->block_cnt > 0 &&
bd1eb66b 2598 (chan = hci_chan_sent(hdev, type, &quote))) {
b71d385a
AE
2599 u32 priority = (skb_peek(&chan->data_q))->priority;
2600 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2601 int blocks;
2602
2603 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2604 skb->len, skb->priority);
b71d385a
AE
2605
2606 /* Stop if priority has changed */
2607 if (skb->priority < priority)
2608 break;
2609
2610 skb = skb_dequeue(&chan->data_q);
2611
2612 blocks = __get_blocks(hdev, skb);
2613 if (blocks > hdev->block_cnt)
2614 return;
2615
2616 hci_conn_enter_active_mode(chan->conn,
a8c5fb1a 2617 bt_cb(skb)->force_active);
b71d385a
AE
2618
2619 hci_send_frame(skb);
2620 hdev->acl_last_tx = jiffies;
2621
2622 hdev->block_cnt -= blocks;
2623 quote -= blocks;
2624
2625 chan->sent += blocks;
2626 chan->conn->sent += blocks;
2627 }
2628 }
2629
2630 if (cnt != hdev->block_cnt)
bd1eb66b 2631 hci_prio_recalculate(hdev, type);
b71d385a
AE
2632}
2633
6039aa73 2634static void hci_sched_acl(struct hci_dev *hdev)
b71d385a
AE
2635{
2636 BT_DBG("%s", hdev->name);
2637
bd1eb66b
AE
2638 /* No ACL link over BR/EDR controller */
2639 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2640 return;
2641
2642 /* No AMP link over AMP controller */
2643 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
b71d385a
AE
2644 return;
2645
2646 switch (hdev->flow_ctl_mode) {
2647 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2648 hci_sched_acl_pkt(hdev);
2649 break;
2650
2651 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2652 hci_sched_acl_blk(hdev);
2653 break;
2654 }
2655}
2656
1da177e4 2657/* Schedule SCO */
6039aa73 2658static void hci_sched_sco(struct hci_dev *hdev)
1da177e4
LT
2659{
2660 struct hci_conn *conn;
2661 struct sk_buff *skb;
2662 int quote;
2663
2664 BT_DBG("%s", hdev->name);
2665
52087a79
LAD
2666 if (!hci_conn_num(hdev, SCO_LINK))
2667 return;
2668
1da177e4
LT
2669 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2670 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2671 BT_DBG("skb %p len %d", skb, skb->len);
2672 hci_send_frame(skb);
2673
2674 conn->sent++;
2675 if (conn->sent == ~0)
2676 conn->sent = 0;
2677 }
2678 }
2679}
2680
6039aa73 2681static void hci_sched_esco(struct hci_dev *hdev)
b6a0dc82
MH
2682{
2683 struct hci_conn *conn;
2684 struct sk_buff *skb;
2685 int quote;
2686
2687 BT_DBG("%s", hdev->name);
2688
52087a79
LAD
2689 if (!hci_conn_num(hdev, ESCO_LINK))
2690 return;
2691
8fc9ced3
GP
2692 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2693 &quote))) {
b6a0dc82
MH
2694 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2695 BT_DBG("skb %p len %d", skb, skb->len);
2696 hci_send_frame(skb);
2697
2698 conn->sent++;
2699 if (conn->sent == ~0)
2700 conn->sent = 0;
2701 }
2702 }
2703}
2704
6039aa73 2705static void hci_sched_le(struct hci_dev *hdev)
6ed58ec5 2706{
73d80deb 2707 struct hci_chan *chan;
6ed58ec5 2708 struct sk_buff *skb;
02b20f0b 2709 int quote, cnt, tmp;
6ed58ec5
VT
2710
2711 BT_DBG("%s", hdev->name);
2712
52087a79
LAD
2713 if (!hci_conn_num(hdev, LE_LINK))
2714 return;
2715
6ed58ec5
VT
2716 if (!test_bit(HCI_RAW, &hdev->flags)) {
2717 /* LE tx timeout must be longer than maximum
2718 * link supervision timeout (40.9 seconds) */
bae1f5d9 2719 if (!hdev->le_cnt && hdev->le_pkts &&
a8c5fb1a 2720 time_after(jiffies, hdev->le_last_tx + HZ * 45))
bae1f5d9 2721 hci_link_tx_to(hdev, LE_LINK);
6ed58ec5
VT
2722 }
2723
2724 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
02b20f0b 2725 tmp = cnt;
73d80deb 2726 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
ec1cce24
LAD
2727 u32 priority = (skb_peek(&chan->data_q))->priority;
2728 while (quote-- && (skb = skb_peek(&chan->data_q))) {
73d80deb 2729 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
a8c5fb1a 2730 skb->len, skb->priority);
6ed58ec5 2731
ec1cce24
LAD
2732 /* Stop if priority has changed */
2733 if (skb->priority < priority)
2734 break;
2735
2736 skb = skb_dequeue(&chan->data_q);
2737
6ed58ec5
VT
2738 hci_send_frame(skb);
2739 hdev->le_last_tx = jiffies;
2740
2741 cnt--;
73d80deb
LAD
2742 chan->sent++;
2743 chan->conn->sent++;
6ed58ec5
VT
2744 }
2745 }
73d80deb 2746
6ed58ec5
VT
2747 if (hdev->le_pkts)
2748 hdev->le_cnt = cnt;
2749 else
2750 hdev->acl_cnt = cnt;
02b20f0b
LAD
2751
2752 if (cnt != tmp)
2753 hci_prio_recalculate(hdev, LE_LINK);
6ed58ec5
VT
2754}
2755
3eff45ea 2756static void hci_tx_work(struct work_struct *work)
1da177e4 2757{
3eff45ea 2758 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
1da177e4
LT
2759 struct sk_buff *skb;
2760
6ed58ec5 2761 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
a8c5fb1a 2762 hdev->sco_cnt, hdev->le_cnt);
1da177e4
LT
2763
2764 /* Schedule queues and send stuff to HCI driver */
2765
2766 hci_sched_acl(hdev);
2767
2768 hci_sched_sco(hdev);
2769
b6a0dc82
MH
2770 hci_sched_esco(hdev);
2771
6ed58ec5
VT
2772 hci_sched_le(hdev);
2773
1da177e4
LT
2774 /* Send next queued raw (unknown type) packet */
2775 while ((skb = skb_dequeue(&hdev->raw_q)))
2776 hci_send_frame(skb);
1da177e4
LT
2777}
2778
25985edc 2779/* ----- HCI RX task (incoming data processing) ----- */
1da177e4
LT
2780
2781/* ACL data packet */
6039aa73 2782static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2783{
2784 struct hci_acl_hdr *hdr = (void *) skb->data;
2785 struct hci_conn *conn;
2786 __u16 handle, flags;
2787
2788 skb_pull(skb, HCI_ACL_HDR_SIZE);
2789
2790 handle = __le16_to_cpu(hdr->handle);
2791 flags = hci_flags(handle);
2792 handle = hci_handle(handle);
2793
f0e09510 2794 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
a8c5fb1a 2795 handle, flags);
1da177e4
LT
2796
2797 hdev->stat.acl_rx++;
2798
2799 hci_dev_lock(hdev);
2800 conn = hci_conn_hash_lookup_handle(hdev, handle);
2801 hci_dev_unlock(hdev);
8e87d142 2802
1da177e4 2803 if (conn) {
65983fc7 2804 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
04837f64 2805
671267bf
JH
2806 hci_dev_lock(hdev);
2807 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2808 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2809 mgmt_device_connected(hdev, &conn->dst, conn->type,
2810 conn->dst_type, 0, NULL, 0,
2811 conn->dev_class);
2812 hci_dev_unlock(hdev);
2813
1da177e4 2814 /* Send to upper protocol */
686ebf28
UF
2815 l2cap_recv_acldata(conn, skb, flags);
2816 return;
1da177e4 2817 } else {
8e87d142 2818 BT_ERR("%s ACL packet for unknown connection handle %d",
a8c5fb1a 2819 hdev->name, handle);
1da177e4
LT
2820 }
2821
2822 kfree_skb(skb);
2823}
2824
2825/* SCO data packet */
6039aa73 2826static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1da177e4
LT
2827{
2828 struct hci_sco_hdr *hdr = (void *) skb->data;
2829 struct hci_conn *conn;
2830 __u16 handle;
2831
2832 skb_pull(skb, HCI_SCO_HDR_SIZE);
2833
2834 handle = __le16_to_cpu(hdr->handle);
2835
f0e09510 2836 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
1da177e4
LT
2837
2838 hdev->stat.sco_rx++;
2839
2840 hci_dev_lock(hdev);
2841 conn = hci_conn_hash_lookup_handle(hdev, handle);
2842 hci_dev_unlock(hdev);
2843
2844 if (conn) {
1da177e4 2845 /* Send to upper protocol */
686ebf28
UF
2846 sco_recv_scodata(conn, skb);
2847 return;
1da177e4 2848 } else {
8e87d142 2849 BT_ERR("%s SCO packet for unknown connection handle %d",
a8c5fb1a 2850 hdev->name, handle);
1da177e4
LT
2851 }
2852
2853 kfree_skb(skb);
2854}
2855
b78752cc 2856static void hci_rx_work(struct work_struct *work)
1da177e4 2857{
b78752cc 2858 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
1da177e4
LT
2859 struct sk_buff *skb;
2860
2861 BT_DBG("%s", hdev->name);
2862
1da177e4 2863 while ((skb = skb_dequeue(&hdev->rx_q))) {
cd82e61c
MH
2864 /* Send copy to monitor */
2865 hci_send_to_monitor(hdev, skb);
2866
1da177e4
LT
2867 if (atomic_read(&hdev->promisc)) {
2868 /* Send copy to the sockets */
470fe1b5 2869 hci_send_to_sock(hdev, skb);
1da177e4
LT
2870 }
2871
2872 if (test_bit(HCI_RAW, &hdev->flags)) {
2873 kfree_skb(skb);
2874 continue;
2875 }
2876
2877 if (test_bit(HCI_INIT, &hdev->flags)) {
2878 /* Don't process data packets in this states. */
0d48d939 2879 switch (bt_cb(skb)->pkt_type) {
1da177e4
LT
2880 case HCI_ACLDATA_PKT:
2881 case HCI_SCODATA_PKT:
2882 kfree_skb(skb);
2883 continue;
3ff50b79 2884 }
1da177e4
LT
2885 }
2886
2887 /* Process frame */
0d48d939 2888 switch (bt_cb(skb)->pkt_type) {
1da177e4 2889 case HCI_EVENT_PKT:
b78752cc 2890 BT_DBG("%s Event packet", hdev->name);
1da177e4
LT
2891 hci_event_packet(hdev, skb);
2892 break;
2893
2894 case HCI_ACLDATA_PKT:
2895 BT_DBG("%s ACL data packet", hdev->name);
2896 hci_acldata_packet(hdev, skb);
2897 break;
2898
2899 case HCI_SCODATA_PKT:
2900 BT_DBG("%s SCO data packet", hdev->name);
2901 hci_scodata_packet(hdev, skb);
2902 break;
2903
2904 default:
2905 kfree_skb(skb);
2906 break;
2907 }
2908 }
1da177e4
LT
2909}
2910
c347b765 2911static void hci_cmd_work(struct work_struct *work)
1da177e4 2912{
c347b765 2913 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
1da177e4
LT
2914 struct sk_buff *skb;
2915
2104786b
AE
2916 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2917 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
1da177e4 2918
1da177e4 2919 /* Send queued commands */
5a08ecce
AE
2920 if (atomic_read(&hdev->cmd_cnt)) {
2921 skb = skb_dequeue(&hdev->cmd_q);
2922 if (!skb)
2923 return;
2924
7585b97a 2925 kfree_skb(hdev->sent_cmd);
1da177e4 2926
70f23020
AE
2927 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2928 if (hdev->sent_cmd) {
1da177e4
LT
2929 atomic_dec(&hdev->cmd_cnt);
2930 hci_send_frame(skb);
7bdb8a5c
SJ
2931 if (test_bit(HCI_RESET, &hdev->flags))
2932 del_timer(&hdev->cmd_timer);
2933 else
2934 mod_timer(&hdev->cmd_timer,
5f246e89 2935 jiffies + HCI_CMD_TIMEOUT);
1da177e4
LT
2936 } else {
2937 skb_queue_head(&hdev->cmd_q, skb);
c347b765 2938 queue_work(hdev->workqueue, &hdev->cmd_work);
1da177e4
LT
2939 }
2940 }
2941}
2519a1fc
AG
2942
2943int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2944{
2945 /* General inquiry access code (GIAC) */
2946 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2947 struct hci_cp_inquiry cp;
2948
2949 BT_DBG("%s", hdev->name);
2950
2951 if (test_bit(HCI_INQUIRY, &hdev->flags))
2952 return -EINPROGRESS;
2953
4663262c
JH
2954 inquiry_cache_flush(hdev);
2955
2519a1fc
AG
2956 memset(&cp, 0, sizeof(cp));
2957 memcpy(&cp.lap, lap, sizeof(cp.lap));
2958 cp.length = length;
2959
2960 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2961}
023d5049
AG
2962
2963int hci_cancel_inquiry(struct hci_dev *hdev)
2964{
2965 BT_DBG("%s", hdev->name);
2966
2967 if (!test_bit(HCI_INQUIRY, &hdev->flags))
7537e5c3 2968 return -EALREADY;
023d5049
AG
2969
2970 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2971}
31f7956c
AG
2972
2973u8 bdaddr_to_le(u8 bdaddr_type)
2974{
2975 switch (bdaddr_type) {
2976 case BDADDR_LE_PUBLIC:
2977 return ADDR_LE_DEV_PUBLIC;
2978
2979 default:
2980 /* Fallback to LE Random address type */
2981 return ADDR_LE_DEV_RANDOM;
2982 }
2983}
This page took 1.159672 seconds and 5 git commands to generate.