Bluetooth: Implement a more complete adapter initialization sequence
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 /* Handle HCI Event packets */
49
50 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
51 {
52 __u8 status = *((__u8 *) skb->data);
53
54 BT_DBG("%s status 0x%x", hdev->name, status);
55
56 if (status)
57 return;
58
59 clear_bit(HCI_INQUIRY, &hdev->flags);
60
61 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
62
63 hci_conn_check_pending(hdev);
64 }
65
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_INQUIRY, &hdev->flags);
76
77 hci_conn_check_pending(hdev);
78 }
79
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
81 {
82 BT_DBG("%s", hdev->name);
83 }
84
85 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 struct hci_rp_role_discovery *rp = (void *) skb->data;
88 struct hci_conn *conn;
89
90 BT_DBG("%s status 0x%x", hdev->name, rp->status);
91
92 if (rp->status)
93 return;
94
95 hci_dev_lock(hdev);
96
97 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
98 if (conn) {
99 if (rp->role)
100 conn->link_mode &= ~HCI_LM_MASTER;
101 else
102 conn->link_mode |= HCI_LM_MASTER;
103 }
104
105 hci_dev_unlock(hdev);
106 }
107
108 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
109 {
110 struct hci_rp_read_link_policy *rp = (void *) skb->data;
111 struct hci_conn *conn;
112
113 BT_DBG("%s status 0x%x", hdev->name, rp->status);
114
115 if (rp->status)
116 return;
117
118 hci_dev_lock(hdev);
119
120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
121 if (conn)
122 conn->link_policy = __le16_to_cpu(rp->policy);
123
124 hci_dev_unlock(hdev);
125 }
126
127 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
128 {
129 struct hci_rp_write_link_policy *rp = (void *) skb->data;
130 struct hci_conn *conn;
131 void *sent;
132
133 BT_DBG("%s status 0x%x", hdev->name, rp->status);
134
135 if (rp->status)
136 return;
137
138 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
139 if (!sent)
140 return;
141
142 hci_dev_lock(hdev);
143
144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
145 if (conn)
146 conn->link_policy = get_unaligned_le16(sent + 2);
147
148 hci_dev_unlock(hdev);
149 }
150
151 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
152 {
153 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
154
155 BT_DBG("%s status 0x%x", hdev->name, rp->status);
156
157 if (rp->status)
158 return;
159
160 hdev->link_policy = __le16_to_cpu(rp->policy);
161 }
162
163 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
164 {
165 __u8 status = *((__u8 *) skb->data);
166 void *sent;
167
168 BT_DBG("%s status 0x%x", hdev->name, status);
169
170 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
171 if (!sent)
172 return;
173
174 if (!status)
175 hdev->link_policy = get_unaligned_le16(sent);
176
177 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
178 }
179
180 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
181 {
182 __u8 status = *((__u8 *) skb->data);
183
184 BT_DBG("%s status 0x%x", hdev->name, status);
185
186 hci_req_complete(hdev, HCI_OP_RESET, status);
187 }
188
189 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
190 {
191 __u8 status = *((__u8 *) skb->data);
192 void *sent;
193
194 BT_DBG("%s status 0x%x", hdev->name, status);
195
196 if (status)
197 return;
198
199 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
200 if (!sent)
201 return;
202
203 memcpy(hdev->dev_name, sent, 248);
204 }
205
206 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
207 {
208 struct hci_rp_read_local_name *rp = (void *) skb->data;
209
210 BT_DBG("%s status 0x%x", hdev->name, rp->status);
211
212 if (rp->status)
213 return;
214
215 memcpy(hdev->dev_name, rp->name, 248);
216 }
217
218 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
219 {
220 __u8 status = *((__u8 *) skb->data);
221 void *sent;
222
223 BT_DBG("%s status 0x%x", hdev->name, status);
224
225 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
226 if (!sent)
227 return;
228
229 if (!status) {
230 __u8 param = *((__u8 *) sent);
231
232 if (param == AUTH_ENABLED)
233 set_bit(HCI_AUTH, &hdev->flags);
234 else
235 clear_bit(HCI_AUTH, &hdev->flags);
236 }
237
238 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
239 }
240
241 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param)
256 set_bit(HCI_ENCRYPT, &hdev->flags);
257 else
258 clear_bit(HCI_ENCRYPT, &hdev->flags);
259 }
260
261 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
262 }
263
264 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
265 {
266 __u8 status = *((__u8 *) skb->data);
267 void *sent;
268
269 BT_DBG("%s status 0x%x", hdev->name, status);
270
271 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
272 if (!sent)
273 return;
274
275 if (!status) {
276 __u8 param = *((__u8 *) sent);
277 int old_pscan, old_iscan;
278
279 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
280 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
281
282 if (param & SCAN_INQUIRY) {
283 set_bit(HCI_ISCAN, &hdev->flags);
284 if (!old_iscan)
285 mgmt_discoverable(hdev->id, 1);
286 } else if (old_iscan)
287 mgmt_discoverable(hdev->id, 0);
288
289 if (param & SCAN_PAGE) {
290 set_bit(HCI_PSCAN, &hdev->flags);
291 if (!old_pscan)
292 mgmt_connectable(hdev->id, 1);
293 } else if (old_pscan)
294 mgmt_connectable(hdev->id, 0);
295 }
296
297 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
298 }
299
300 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
301 {
302 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
303
304 BT_DBG("%s status 0x%x", hdev->name, rp->status);
305
306 if (rp->status)
307 return;
308
309 memcpy(hdev->dev_class, rp->dev_class, 3);
310
311 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
312 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
313 }
314
315 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
316 {
317 __u8 status = *((__u8 *) skb->data);
318 void *sent;
319
320 BT_DBG("%s status 0x%x", hdev->name, status);
321
322 if (status)
323 return;
324
325 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
326 if (!sent)
327 return;
328
329 memcpy(hdev->dev_class, sent, 3);
330 }
331
332 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
333 {
334 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
335 __u16 setting;
336
337 BT_DBG("%s status 0x%x", hdev->name, rp->status);
338
339 if (rp->status)
340 return;
341
342 setting = __le16_to_cpu(rp->voice_setting);
343
344 if (hdev->voice_setting == setting)
345 return;
346
347 hdev->voice_setting = setting;
348
349 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
350
351 if (hdev->notify) {
352 tasklet_disable(&hdev->tx_task);
353 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
354 tasklet_enable(&hdev->tx_task);
355 }
356 }
357
358 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
359 {
360 __u8 status = *((__u8 *) skb->data);
361 __u16 setting;
362 void *sent;
363
364 BT_DBG("%s status 0x%x", hdev->name, status);
365
366 if (status)
367 return;
368
369 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
370 if (!sent)
371 return;
372
373 setting = get_unaligned_le16(sent);
374
375 if (hdev->voice_setting == setting)
376 return;
377
378 hdev->voice_setting = setting;
379
380 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
381
382 if (hdev->notify) {
383 tasklet_disable(&hdev->tx_task);
384 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
385 tasklet_enable(&hdev->tx_task);
386 }
387 }
388
389 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
390 {
391 __u8 status = *((__u8 *) skb->data);
392
393 BT_DBG("%s status 0x%x", hdev->name, status);
394
395 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
396 }
397
398 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
399 {
400 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
401
402 BT_DBG("%s status 0x%x", hdev->name, rp->status);
403
404 if (rp->status)
405 return;
406
407 hdev->ssp_mode = rp->mode;
408 }
409
410 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
411 {
412 __u8 status = *((__u8 *) skb->data);
413 void *sent;
414
415 BT_DBG("%s status 0x%x", hdev->name, status);
416
417 if (status)
418 return;
419
420 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
421 if (!sent)
422 return;
423
424 hdev->ssp_mode = *((__u8 *) sent);
425 }
426
427 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
428 {
429 if (hdev->features[6] & LMP_EXT_INQ)
430 return 2;
431
432 if (hdev->features[3] & LMP_RSSI_INQ)
433 return 1;
434
435 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
436 hdev->lmp_subver == 0x0757)
437 return 1;
438
439 if (hdev->manufacturer == 15) {
440 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
441 return 1;
442 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
443 return 1;
444 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
445 return 1;
446 }
447
448 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
449 hdev->lmp_subver == 0x1805)
450 return 1;
451
452 return 0;
453 }
454
455 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
456 {
457 u8 mode;
458
459 mode = hci_get_inquiry_mode(hdev);
460
461 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
462 }
463
464 static void hci_setup_event_mask(struct hci_dev *hdev)
465 {
466 /* The second byte is 0xff instead of 0x9f (two reserved bits
467 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
468 * command otherwise */
469 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
470
471 /* Events for 1.2 and newer controllers */
472 if (hdev->lmp_ver > 1) {
473 events[4] |= 0x01; /* Flow Specification Complete */
474 events[4] |= 0x02; /* Inquiry Result with RSSI */
475 events[4] |= 0x04; /* Read Remote Extended Features Complete */
476 events[5] |= 0x08; /* Synchronous Connection Complete */
477 events[5] |= 0x10; /* Synchronous Connection Changed */
478 }
479
480 if (hdev->features[3] & LMP_RSSI_INQ)
481 events[4] |= 0x04; /* Inquiry Result with RSSI */
482
483 if (hdev->features[5] & LMP_SNIFF_SUBR)
484 events[5] |= 0x20; /* Sniff Subrating */
485
486 if (hdev->features[5] & LMP_PAUSE_ENC)
487 events[5] |= 0x80; /* Encryption Key Refresh Complete */
488
489 if (hdev->features[6] & LMP_EXT_INQ)
490 events[5] |= 0x40; /* Extended Inquiry Result */
491
492 if (hdev->features[6] & LMP_NO_FLUSH)
493 events[7] |= 0x01; /* Enhanced Flush Complete */
494
495 if (hdev->features[7] & LMP_LSTO)
496 events[6] |= 0x80; /* Link Supervision Timeout Changed */
497
498 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
499 events[6] |= 0x01; /* IO Capability Request */
500 events[6] |= 0x02; /* IO Capability Response */
501 events[6] |= 0x04; /* User Confirmation Request */
502 events[6] |= 0x08; /* User Passkey Request */
503 events[6] |= 0x10; /* Remote OOB Data Request */
504 events[6] |= 0x20; /* Simple Pairing Complete */
505 events[7] |= 0x04; /* User Passkey Notification */
506 events[7] |= 0x08; /* Keypress Notification */
507 events[7] |= 0x10; /* Remote Host Supported
508 * Features Notification */
509 }
510
511 if (hdev->features[4] & LMP_LE)
512 events[7] |= 0x20; /* LE Meta-Event */
513
514 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
515 }
516
517 static void hci_setup(struct hci_dev *hdev)
518 {
519 hci_setup_event_mask(hdev);
520
521 if (hdev->lmp_ver > 1)
522 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
523
524 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
525 u8 mode = 0x01;
526 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
527 }
528
529 if (hdev->features[3] & LMP_RSSI_INQ)
530 hci_setup_inquiry_mode(hdev);
531
532 if (hdev->features[7] & LMP_INQ_TX_PWR)
533 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
534 }
535
536 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
537 {
538 struct hci_rp_read_local_version *rp = (void *) skb->data;
539
540 BT_DBG("%s status 0x%x", hdev->name, rp->status);
541
542 if (rp->status)
543 return;
544
545 hdev->hci_ver = rp->hci_ver;
546 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
547 hdev->lmp_ver = rp->lmp_ver;
548 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
549 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
550
551 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
552 hdev->manufacturer,
553 hdev->hci_ver, hdev->hci_rev);
554
555 if (test_bit(HCI_INIT, &hdev->flags))
556 hci_setup(hdev);
557 }
558
559 static void hci_setup_link_policy(struct hci_dev *hdev)
560 {
561 u16 link_policy = 0;
562
563 if (hdev->features[0] & LMP_RSWITCH)
564 link_policy |= HCI_LP_RSWITCH;
565 if (hdev->features[0] & LMP_HOLD)
566 link_policy |= HCI_LP_HOLD;
567 if (hdev->features[0] & LMP_SNIFF)
568 link_policy |= HCI_LP_SNIFF;
569 if (hdev->features[1] & LMP_PARK)
570 link_policy |= HCI_LP_PARK;
571
572 link_policy = cpu_to_le16(link_policy);
573 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
574 sizeof(link_policy), &link_policy);
575 }
576
577 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
578 {
579 struct hci_rp_read_local_commands *rp = (void *) skb->data;
580
581 BT_DBG("%s status 0x%x", hdev->name, rp->status);
582
583 if (rp->status)
584 goto done;
585
586 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
587
588 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
589 hci_setup_link_policy(hdev);
590
591 done:
592 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
593 }
594
595 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
596 {
597 struct hci_rp_read_local_features *rp = (void *) skb->data;
598
599 BT_DBG("%s status 0x%x", hdev->name, rp->status);
600
601 if (rp->status)
602 return;
603
604 memcpy(hdev->features, rp->features, 8);
605
606 /* Adjust default settings according to features
607 * supported by device. */
608
609 if (hdev->features[0] & LMP_3SLOT)
610 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
611
612 if (hdev->features[0] & LMP_5SLOT)
613 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
614
615 if (hdev->features[1] & LMP_HV2) {
616 hdev->pkt_type |= (HCI_HV2);
617 hdev->esco_type |= (ESCO_HV2);
618 }
619
620 if (hdev->features[1] & LMP_HV3) {
621 hdev->pkt_type |= (HCI_HV3);
622 hdev->esco_type |= (ESCO_HV3);
623 }
624
625 if (hdev->features[3] & LMP_ESCO)
626 hdev->esco_type |= (ESCO_EV3);
627
628 if (hdev->features[4] & LMP_EV4)
629 hdev->esco_type |= (ESCO_EV4);
630
631 if (hdev->features[4] & LMP_EV5)
632 hdev->esco_type |= (ESCO_EV5);
633
634 if (hdev->features[5] & LMP_EDR_ESCO_2M)
635 hdev->esco_type |= (ESCO_2EV3);
636
637 if (hdev->features[5] & LMP_EDR_ESCO_3M)
638 hdev->esco_type |= (ESCO_3EV3);
639
640 if (hdev->features[5] & LMP_EDR_3S_ESCO)
641 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
642
643 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
644 hdev->features[0], hdev->features[1],
645 hdev->features[2], hdev->features[3],
646 hdev->features[4], hdev->features[5],
647 hdev->features[6], hdev->features[7]);
648 }
649
650 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
651 {
652 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
653
654 BT_DBG("%s status 0x%x", hdev->name, rp->status);
655
656 if (rp->status)
657 return;
658
659 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
660 hdev->sco_mtu = rp->sco_mtu;
661 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
662 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
663
664 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
665 hdev->sco_mtu = 64;
666 hdev->sco_pkts = 8;
667 }
668
669 hdev->acl_cnt = hdev->acl_pkts;
670 hdev->sco_cnt = hdev->sco_pkts;
671
672 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
673 hdev->acl_mtu, hdev->acl_pkts,
674 hdev->sco_mtu, hdev->sco_pkts);
675 }
676
677 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
678 {
679 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
680
681 BT_DBG("%s status 0x%x", hdev->name, rp->status);
682
683 if (!rp->status)
684 bacpy(&hdev->bdaddr, &rp->bdaddr);
685
686 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
687 }
688
689 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
690 {
691 __u8 status = *((__u8 *) skb->data);
692
693 BT_DBG("%s status 0x%x", hdev->name, status);
694
695 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
696 }
697
698 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
699 struct sk_buff *skb)
700 {
701 __u8 status = *((__u8 *) skb->data);
702
703 BT_DBG("%s status 0x%x", hdev->name, status);
704
705 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
706 }
707
708 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
709 {
710 __u8 status = *((__u8 *) skb->data);
711
712 BT_DBG("%s status 0x%x", hdev->name, status);
713
714 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
715 }
716
717 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
718 struct sk_buff *skb)
719 {
720 __u8 status = *((__u8 *) skb->data);
721
722 BT_DBG("%s status 0x%x", hdev->name, status);
723
724 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
725 }
726
727 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
728 struct sk_buff *skb)
729 {
730 __u8 status = *((__u8 *) skb->data);
731
732 BT_DBG("%s status 0x%x", hdev->name, status);
733
734 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
735 }
736
737 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
738 {
739 __u8 status = *((__u8 *) skb->data);
740
741 BT_DBG("%s status 0x%x", hdev->name, status);
742
743 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
744 }
745
746 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
747 {
748 BT_DBG("%s status 0x%x", hdev->name, status);
749
750 if (status) {
751 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
752
753 hci_conn_check_pending(hdev);
754 } else
755 set_bit(HCI_INQUIRY, &hdev->flags);
756 }
757
758 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
759 {
760 struct hci_cp_create_conn *cp;
761 struct hci_conn *conn;
762
763 BT_DBG("%s status 0x%x", hdev->name, status);
764
765 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
766 if (!cp)
767 return;
768
769 hci_dev_lock(hdev);
770
771 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
772
773 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
774
775 if (status) {
776 if (conn && conn->state == BT_CONNECT) {
777 if (status != 0x0c || conn->attempt > 2) {
778 conn->state = BT_CLOSED;
779 hci_proto_connect_cfm(conn, status);
780 hci_conn_del(conn);
781 } else
782 conn->state = BT_CONNECT2;
783 }
784 } else {
785 if (!conn) {
786 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
787 if (conn) {
788 conn->out = 1;
789 conn->link_mode |= HCI_LM_MASTER;
790 } else
791 BT_ERR("No memory for new connection");
792 }
793 }
794
795 hci_dev_unlock(hdev);
796 }
797
798 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
799 {
800 struct hci_cp_add_sco *cp;
801 struct hci_conn *acl, *sco;
802 __u16 handle;
803
804 BT_DBG("%s status 0x%x", hdev->name, status);
805
806 if (!status)
807 return;
808
809 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
810 if (!cp)
811 return;
812
813 handle = __le16_to_cpu(cp->handle);
814
815 BT_DBG("%s handle %d", hdev->name, handle);
816
817 hci_dev_lock(hdev);
818
819 acl = hci_conn_hash_lookup_handle(hdev, handle);
820 if (acl && (sco = acl->link)) {
821 sco->state = BT_CLOSED;
822
823 hci_proto_connect_cfm(sco, status);
824 hci_conn_del(sco);
825 }
826
827 hci_dev_unlock(hdev);
828 }
829
830 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
831 {
832 struct hci_cp_auth_requested *cp;
833 struct hci_conn *conn;
834
835 BT_DBG("%s status 0x%x", hdev->name, status);
836
837 if (!status)
838 return;
839
840 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
841 if (!cp)
842 return;
843
844 hci_dev_lock(hdev);
845
846 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
847 if (conn) {
848 if (conn->state == BT_CONFIG) {
849 hci_proto_connect_cfm(conn, status);
850 hci_conn_put(conn);
851 }
852 }
853
854 hci_dev_unlock(hdev);
855 }
856
857 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
858 {
859 struct hci_cp_set_conn_encrypt *cp;
860 struct hci_conn *conn;
861
862 BT_DBG("%s status 0x%x", hdev->name, status);
863
864 if (!status)
865 return;
866
867 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
868 if (!cp)
869 return;
870
871 hci_dev_lock(hdev);
872
873 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
874 if (conn) {
875 if (conn->state == BT_CONFIG) {
876 hci_proto_connect_cfm(conn, status);
877 hci_conn_put(conn);
878 }
879 }
880
881 hci_dev_unlock(hdev);
882 }
883
884 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
885 struct hci_conn *conn)
886 {
887 if (conn->state != BT_CONFIG || !conn->out)
888 return 0;
889
890 if (conn->pending_sec_level == BT_SECURITY_SDP)
891 return 0;
892
893 /* Only request authentication for SSP connections or non-SSP
894 * devices with sec_level HIGH */
895 if (!(hdev->ssp_mode > 0 && conn->ssp_mode > 0) &&
896 conn->pending_sec_level != BT_SECURITY_HIGH)
897 return 0;
898
899 return 1;
900 }
901
902 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
903 {
904 struct hci_cp_remote_name_req *cp;
905 struct hci_conn *conn;
906
907 BT_DBG("%s status 0x%x", hdev->name, status);
908
909 /* If successful wait for the name req complete event before
910 * checking for the need to do authentication */
911 if (!status)
912 return;
913
914 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
915 if (!cp)
916 return;
917
918 hci_dev_lock(hdev);
919
920 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
921 if (conn && hci_outgoing_auth_needed(hdev, conn)) {
922 struct hci_cp_auth_requested cp;
923 cp.handle = __cpu_to_le16(conn->handle);
924 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
925 }
926
927 hci_dev_unlock(hdev);
928 }
929
930 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
931 {
932 struct hci_cp_read_remote_features *cp;
933 struct hci_conn *conn;
934
935 BT_DBG("%s status 0x%x", hdev->name, status);
936
937 if (!status)
938 return;
939
940 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
941 if (!cp)
942 return;
943
944 hci_dev_lock(hdev);
945
946 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
947 if (conn) {
948 if (conn->state == BT_CONFIG) {
949 hci_proto_connect_cfm(conn, status);
950 hci_conn_put(conn);
951 }
952 }
953
954 hci_dev_unlock(hdev);
955 }
956
957 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
958 {
959 struct hci_cp_read_remote_ext_features *cp;
960 struct hci_conn *conn;
961
962 BT_DBG("%s status 0x%x", hdev->name, status);
963
964 if (!status)
965 return;
966
967 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
968 if (!cp)
969 return;
970
971 hci_dev_lock(hdev);
972
973 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
974 if (conn) {
975 if (conn->state == BT_CONFIG) {
976 hci_proto_connect_cfm(conn, status);
977 hci_conn_put(conn);
978 }
979 }
980
981 hci_dev_unlock(hdev);
982 }
983
984 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
985 {
986 struct hci_cp_setup_sync_conn *cp;
987 struct hci_conn *acl, *sco;
988 __u16 handle;
989
990 BT_DBG("%s status 0x%x", hdev->name, status);
991
992 if (!status)
993 return;
994
995 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
996 if (!cp)
997 return;
998
999 handle = __le16_to_cpu(cp->handle);
1000
1001 BT_DBG("%s handle %d", hdev->name, handle);
1002
1003 hci_dev_lock(hdev);
1004
1005 acl = hci_conn_hash_lookup_handle(hdev, handle);
1006 if (acl && (sco = acl->link)) {
1007 sco->state = BT_CLOSED;
1008
1009 hci_proto_connect_cfm(sco, status);
1010 hci_conn_del(sco);
1011 }
1012
1013 hci_dev_unlock(hdev);
1014 }
1015
1016 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1017 {
1018 struct hci_cp_sniff_mode *cp;
1019 struct hci_conn *conn;
1020
1021 BT_DBG("%s status 0x%x", hdev->name, status);
1022
1023 if (!status)
1024 return;
1025
1026 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1027 if (!cp)
1028 return;
1029
1030 hci_dev_lock(hdev);
1031
1032 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1033 if (conn) {
1034 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1035
1036 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1037 hci_sco_setup(conn, status);
1038 }
1039
1040 hci_dev_unlock(hdev);
1041 }
1042
1043 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1044 {
1045 struct hci_cp_exit_sniff_mode *cp;
1046 struct hci_conn *conn;
1047
1048 BT_DBG("%s status 0x%x", hdev->name, status);
1049
1050 if (!status)
1051 return;
1052
1053 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1054 if (!cp)
1055 return;
1056
1057 hci_dev_lock(hdev);
1058
1059 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1060 if (conn) {
1061 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend);
1062
1063 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1064 hci_sco_setup(conn, status);
1065 }
1066
1067 hci_dev_unlock(hdev);
1068 }
1069
1070 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1071 {
1072 __u8 status = *((__u8 *) skb->data);
1073
1074 BT_DBG("%s status %d", hdev->name, status);
1075
1076 clear_bit(HCI_INQUIRY, &hdev->flags);
1077
1078 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1079
1080 hci_conn_check_pending(hdev);
1081 }
1082
1083 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1084 {
1085 struct inquiry_data data;
1086 struct inquiry_info *info = (void *) (skb->data + 1);
1087 int num_rsp = *((__u8 *) skb->data);
1088
1089 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1090
1091 if (!num_rsp)
1092 return;
1093
1094 hci_dev_lock(hdev);
1095
1096 for (; num_rsp; num_rsp--) {
1097 bacpy(&data.bdaddr, &info->bdaddr);
1098 data.pscan_rep_mode = info->pscan_rep_mode;
1099 data.pscan_period_mode = info->pscan_period_mode;
1100 data.pscan_mode = info->pscan_mode;
1101 memcpy(data.dev_class, info->dev_class, 3);
1102 data.clock_offset = info->clock_offset;
1103 data.rssi = 0x00;
1104 data.ssp_mode = 0x00;
1105 info++;
1106 hci_inquiry_cache_update(hdev, &data);
1107 }
1108
1109 hci_dev_unlock(hdev);
1110 }
1111
1112 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1113 {
1114 struct hci_ev_conn_complete *ev = (void *) skb->data;
1115 struct hci_conn *conn;
1116
1117 BT_DBG("%s", hdev->name);
1118
1119 hci_dev_lock(hdev);
1120
1121 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1122 if (!conn) {
1123 if (ev->link_type != SCO_LINK)
1124 goto unlock;
1125
1126 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1127 if (!conn)
1128 goto unlock;
1129
1130 conn->type = SCO_LINK;
1131 }
1132
1133 if (!ev->status) {
1134 conn->handle = __le16_to_cpu(ev->handle);
1135
1136 if (conn->type == ACL_LINK) {
1137 conn->state = BT_CONFIG;
1138 hci_conn_hold(conn);
1139 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1140 } else
1141 conn->state = BT_CONNECTED;
1142
1143 hci_conn_hold_device(conn);
1144 hci_conn_add_sysfs(conn);
1145
1146 if (test_bit(HCI_AUTH, &hdev->flags))
1147 conn->link_mode |= HCI_LM_AUTH;
1148
1149 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1150 conn->link_mode |= HCI_LM_ENCRYPT;
1151
1152 /* Get remote features */
1153 if (conn->type == ACL_LINK) {
1154 struct hci_cp_read_remote_features cp;
1155 cp.handle = ev->handle;
1156 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1157 sizeof(cp), &cp);
1158 }
1159
1160 /* Set packet type for incoming connection */
1161 if (!conn->out && hdev->hci_ver < 3) {
1162 struct hci_cp_change_conn_ptype cp;
1163 cp.handle = ev->handle;
1164 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1165 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1166 sizeof(cp), &cp);
1167 }
1168 } else
1169 conn->state = BT_CLOSED;
1170
1171 if (conn->type == ACL_LINK)
1172 hci_sco_setup(conn, ev->status);
1173
1174 if (ev->status) {
1175 hci_proto_connect_cfm(conn, ev->status);
1176 hci_conn_del(conn);
1177 } else if (ev->link_type != ACL_LINK)
1178 hci_proto_connect_cfm(conn, ev->status);
1179
1180 unlock:
1181 hci_dev_unlock(hdev);
1182
1183 hci_conn_check_pending(hdev);
1184 }
1185
1186 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1187 {
1188 struct hci_ev_conn_request *ev = (void *) skb->data;
1189 int mask = hdev->link_mode;
1190
1191 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1192 batostr(&ev->bdaddr), ev->link_type);
1193
1194 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1195
1196 if ((mask & HCI_LM_ACCEPT) && !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1197 /* Connection accepted */
1198 struct inquiry_entry *ie;
1199 struct hci_conn *conn;
1200
1201 hci_dev_lock(hdev);
1202
1203 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1204 if (ie)
1205 memcpy(ie->data.dev_class, ev->dev_class, 3);
1206
1207 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1208 if (!conn) {
1209 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1210 if (!conn) {
1211 BT_ERR("No memory for new connection");
1212 hci_dev_unlock(hdev);
1213 return;
1214 }
1215 }
1216
1217 memcpy(conn->dev_class, ev->dev_class, 3);
1218 conn->state = BT_CONNECT;
1219
1220 hci_dev_unlock(hdev);
1221
1222 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1223 struct hci_cp_accept_conn_req cp;
1224
1225 bacpy(&cp.bdaddr, &ev->bdaddr);
1226
1227 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1228 cp.role = 0x00; /* Become master */
1229 else
1230 cp.role = 0x01; /* Remain slave */
1231
1232 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1233 sizeof(cp), &cp);
1234 } else {
1235 struct hci_cp_accept_sync_conn_req cp;
1236
1237 bacpy(&cp.bdaddr, &ev->bdaddr);
1238 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1239
1240 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1241 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1242 cp.max_latency = cpu_to_le16(0xffff);
1243 cp.content_format = cpu_to_le16(hdev->voice_setting);
1244 cp.retrans_effort = 0xff;
1245
1246 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1247 sizeof(cp), &cp);
1248 }
1249 } else {
1250 /* Connection rejected */
1251 struct hci_cp_reject_conn_req cp;
1252
1253 bacpy(&cp.bdaddr, &ev->bdaddr);
1254 cp.reason = 0x0f;
1255 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1256 }
1257 }
1258
1259 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1260 {
1261 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1262 struct hci_conn *conn;
1263
1264 BT_DBG("%s status %d", hdev->name, ev->status);
1265
1266 if (ev->status)
1267 return;
1268
1269 hci_dev_lock(hdev);
1270
1271 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1272 if (conn) {
1273 conn->state = BT_CLOSED;
1274
1275 hci_proto_disconn_cfm(conn, ev->reason);
1276 hci_conn_del(conn);
1277 }
1278
1279 hci_dev_unlock(hdev);
1280 }
1281
1282 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1283 {
1284 struct hci_ev_auth_complete *ev = (void *) skb->data;
1285 struct hci_conn *conn;
1286
1287 BT_DBG("%s status %d", hdev->name, ev->status);
1288
1289 hci_dev_lock(hdev);
1290
1291 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1292 if (conn) {
1293 if (!ev->status) {
1294 conn->link_mode |= HCI_LM_AUTH;
1295 conn->sec_level = conn->pending_sec_level;
1296 } else
1297 conn->sec_level = BT_SECURITY_LOW;
1298
1299 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1300
1301 if (conn->state == BT_CONFIG) {
1302 if (!ev->status && hdev->ssp_mode > 0 &&
1303 conn->ssp_mode > 0) {
1304 struct hci_cp_set_conn_encrypt cp;
1305 cp.handle = ev->handle;
1306 cp.encrypt = 0x01;
1307 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1308 sizeof(cp), &cp);
1309 } else {
1310 conn->state = BT_CONNECTED;
1311 hci_proto_connect_cfm(conn, ev->status);
1312 hci_conn_put(conn);
1313 }
1314 } else {
1315 hci_auth_cfm(conn, ev->status);
1316
1317 hci_conn_hold(conn);
1318 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1319 hci_conn_put(conn);
1320 }
1321
1322 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend)) {
1323 if (!ev->status) {
1324 struct hci_cp_set_conn_encrypt cp;
1325 cp.handle = ev->handle;
1326 cp.encrypt = 0x01;
1327 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT,
1328 sizeof(cp), &cp);
1329 } else {
1330 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1331 hci_encrypt_cfm(conn, ev->status, 0x00);
1332 }
1333 }
1334 }
1335
1336 hci_dev_unlock(hdev);
1337 }
1338
1339 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1340 {
1341 struct hci_ev_remote_name *ev = (void *) skb->data;
1342 struct hci_conn *conn;
1343
1344 BT_DBG("%s", hdev->name);
1345
1346 hci_conn_check_pending(hdev);
1347
1348 hci_dev_lock(hdev);
1349
1350 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1351 if (conn && hci_outgoing_auth_needed(hdev, conn)) {
1352 struct hci_cp_auth_requested cp;
1353 cp.handle = __cpu_to_le16(conn->handle);
1354 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1355 }
1356
1357 hci_dev_unlock(hdev);
1358 }
1359
1360 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1361 {
1362 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1363 struct hci_conn *conn;
1364
1365 BT_DBG("%s status %d", hdev->name, ev->status);
1366
1367 hci_dev_lock(hdev);
1368
1369 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1370 if (conn) {
1371 if (!ev->status) {
1372 if (ev->encrypt) {
1373 /* Encryption implies authentication */
1374 conn->link_mode |= HCI_LM_AUTH;
1375 conn->link_mode |= HCI_LM_ENCRYPT;
1376 } else
1377 conn->link_mode &= ~HCI_LM_ENCRYPT;
1378 }
1379
1380 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend);
1381
1382 if (conn->state == BT_CONFIG) {
1383 if (!ev->status)
1384 conn->state = BT_CONNECTED;
1385
1386 hci_proto_connect_cfm(conn, ev->status);
1387 hci_conn_put(conn);
1388 } else
1389 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1390 }
1391
1392 hci_dev_unlock(hdev);
1393 }
1394
1395 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1396 {
1397 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1398 struct hci_conn *conn;
1399
1400 BT_DBG("%s status %d", hdev->name, ev->status);
1401
1402 hci_dev_lock(hdev);
1403
1404 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1405 if (conn) {
1406 if (!ev->status)
1407 conn->link_mode |= HCI_LM_SECURE;
1408
1409 clear_bit(HCI_CONN_AUTH_PEND, &conn->pend);
1410
1411 hci_key_change_cfm(conn, ev->status);
1412 }
1413
1414 hci_dev_unlock(hdev);
1415 }
1416
1417 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1418 {
1419 struct hci_ev_remote_features *ev = (void *) skb->data;
1420 struct hci_conn *conn;
1421
1422 BT_DBG("%s status %d", hdev->name, ev->status);
1423
1424 hci_dev_lock(hdev);
1425
1426 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1427 if (!conn)
1428 goto unlock;
1429
1430 if (!ev->status)
1431 memcpy(conn->features, ev->features, 8);
1432
1433 if (conn->state != BT_CONFIG)
1434 goto unlock;
1435
1436 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
1437 struct hci_cp_read_remote_ext_features cp;
1438 cp.handle = ev->handle;
1439 cp.page = 0x01;
1440 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
1441 sizeof(cp), &cp);
1442 goto unlock;
1443 }
1444
1445 if (!ev->status) {
1446 struct hci_cp_remote_name_req cp;
1447 memset(&cp, 0, sizeof(cp));
1448 bacpy(&cp.bdaddr, &conn->dst);
1449 cp.pscan_rep_mode = 0x02;
1450 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1451 }
1452
1453 if (!hci_outgoing_auth_needed(hdev, conn)) {
1454 conn->state = BT_CONNECTED;
1455 hci_proto_connect_cfm(conn, ev->status);
1456 hci_conn_put(conn);
1457 }
1458
1459 unlock:
1460 hci_dev_unlock(hdev);
1461 }
1462
1463 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
1464 {
1465 BT_DBG("%s", hdev->name);
1466 }
1467
1468 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1469 {
1470 BT_DBG("%s", hdev->name);
1471 }
1472
1473 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1474 {
1475 struct hci_ev_cmd_complete *ev = (void *) skb->data;
1476 __u16 opcode;
1477
1478 skb_pull(skb, sizeof(*ev));
1479
1480 opcode = __le16_to_cpu(ev->opcode);
1481
1482 switch (opcode) {
1483 case HCI_OP_INQUIRY_CANCEL:
1484 hci_cc_inquiry_cancel(hdev, skb);
1485 break;
1486
1487 case HCI_OP_EXIT_PERIODIC_INQ:
1488 hci_cc_exit_periodic_inq(hdev, skb);
1489 break;
1490
1491 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
1492 hci_cc_remote_name_req_cancel(hdev, skb);
1493 break;
1494
1495 case HCI_OP_ROLE_DISCOVERY:
1496 hci_cc_role_discovery(hdev, skb);
1497 break;
1498
1499 case HCI_OP_READ_LINK_POLICY:
1500 hci_cc_read_link_policy(hdev, skb);
1501 break;
1502
1503 case HCI_OP_WRITE_LINK_POLICY:
1504 hci_cc_write_link_policy(hdev, skb);
1505 break;
1506
1507 case HCI_OP_READ_DEF_LINK_POLICY:
1508 hci_cc_read_def_link_policy(hdev, skb);
1509 break;
1510
1511 case HCI_OP_WRITE_DEF_LINK_POLICY:
1512 hci_cc_write_def_link_policy(hdev, skb);
1513 break;
1514
1515 case HCI_OP_RESET:
1516 hci_cc_reset(hdev, skb);
1517 break;
1518
1519 case HCI_OP_WRITE_LOCAL_NAME:
1520 hci_cc_write_local_name(hdev, skb);
1521 break;
1522
1523 case HCI_OP_READ_LOCAL_NAME:
1524 hci_cc_read_local_name(hdev, skb);
1525 break;
1526
1527 case HCI_OP_WRITE_AUTH_ENABLE:
1528 hci_cc_write_auth_enable(hdev, skb);
1529 break;
1530
1531 case HCI_OP_WRITE_ENCRYPT_MODE:
1532 hci_cc_write_encrypt_mode(hdev, skb);
1533 break;
1534
1535 case HCI_OP_WRITE_SCAN_ENABLE:
1536 hci_cc_write_scan_enable(hdev, skb);
1537 break;
1538
1539 case HCI_OP_READ_CLASS_OF_DEV:
1540 hci_cc_read_class_of_dev(hdev, skb);
1541 break;
1542
1543 case HCI_OP_WRITE_CLASS_OF_DEV:
1544 hci_cc_write_class_of_dev(hdev, skb);
1545 break;
1546
1547 case HCI_OP_READ_VOICE_SETTING:
1548 hci_cc_read_voice_setting(hdev, skb);
1549 break;
1550
1551 case HCI_OP_WRITE_VOICE_SETTING:
1552 hci_cc_write_voice_setting(hdev, skb);
1553 break;
1554
1555 case HCI_OP_HOST_BUFFER_SIZE:
1556 hci_cc_host_buffer_size(hdev, skb);
1557 break;
1558
1559 case HCI_OP_READ_SSP_MODE:
1560 hci_cc_read_ssp_mode(hdev, skb);
1561 break;
1562
1563 case HCI_OP_WRITE_SSP_MODE:
1564 hci_cc_write_ssp_mode(hdev, skb);
1565 break;
1566
1567 case HCI_OP_READ_LOCAL_VERSION:
1568 hci_cc_read_local_version(hdev, skb);
1569 break;
1570
1571 case HCI_OP_READ_LOCAL_COMMANDS:
1572 hci_cc_read_local_commands(hdev, skb);
1573 break;
1574
1575 case HCI_OP_READ_LOCAL_FEATURES:
1576 hci_cc_read_local_features(hdev, skb);
1577 break;
1578
1579 case HCI_OP_READ_BUFFER_SIZE:
1580 hci_cc_read_buffer_size(hdev, skb);
1581 break;
1582
1583 case HCI_OP_READ_BD_ADDR:
1584 hci_cc_read_bd_addr(hdev, skb);
1585 break;
1586
1587 case HCI_OP_WRITE_CA_TIMEOUT:
1588 hci_cc_write_ca_timeout(hdev, skb);
1589 break;
1590
1591 case HCI_OP_DELETE_STORED_LINK_KEY:
1592 hci_cc_delete_stored_link_key(hdev, skb);
1593 break;
1594
1595 case HCI_OP_SET_EVENT_MASK:
1596 hci_cc_set_event_mask(hdev, skb);
1597 break;
1598
1599 case HCI_OP_WRITE_INQUIRY_MODE:
1600 hci_cc_write_inquiry_mode(hdev, skb);
1601 break;
1602
1603 case HCI_OP_READ_INQ_RSP_TX_POWER:
1604 hci_cc_read_inq_rsp_tx_power(hdev, skb);
1605 break;
1606
1607 case HCI_OP_SET_EVENT_FLT:
1608 hci_cc_set_event_flt(hdev, skb);
1609 break;
1610
1611 default:
1612 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1613 break;
1614 }
1615
1616 if (ev->ncmd) {
1617 atomic_set(&hdev->cmd_cnt, 1);
1618 if (!skb_queue_empty(&hdev->cmd_q))
1619 tasklet_schedule(&hdev->cmd_task);
1620 }
1621 }
1622
1623 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
1624 {
1625 struct hci_ev_cmd_status *ev = (void *) skb->data;
1626 __u16 opcode;
1627
1628 skb_pull(skb, sizeof(*ev));
1629
1630 opcode = __le16_to_cpu(ev->opcode);
1631
1632 switch (opcode) {
1633 case HCI_OP_INQUIRY:
1634 hci_cs_inquiry(hdev, ev->status);
1635 break;
1636
1637 case HCI_OP_CREATE_CONN:
1638 hci_cs_create_conn(hdev, ev->status);
1639 break;
1640
1641 case HCI_OP_ADD_SCO:
1642 hci_cs_add_sco(hdev, ev->status);
1643 break;
1644
1645 case HCI_OP_AUTH_REQUESTED:
1646 hci_cs_auth_requested(hdev, ev->status);
1647 break;
1648
1649 case HCI_OP_SET_CONN_ENCRYPT:
1650 hci_cs_set_conn_encrypt(hdev, ev->status);
1651 break;
1652
1653 case HCI_OP_REMOTE_NAME_REQ:
1654 hci_cs_remote_name_req(hdev, ev->status);
1655 break;
1656
1657 case HCI_OP_READ_REMOTE_FEATURES:
1658 hci_cs_read_remote_features(hdev, ev->status);
1659 break;
1660
1661 case HCI_OP_READ_REMOTE_EXT_FEATURES:
1662 hci_cs_read_remote_ext_features(hdev, ev->status);
1663 break;
1664
1665 case HCI_OP_SETUP_SYNC_CONN:
1666 hci_cs_setup_sync_conn(hdev, ev->status);
1667 break;
1668
1669 case HCI_OP_SNIFF_MODE:
1670 hci_cs_sniff_mode(hdev, ev->status);
1671 break;
1672
1673 case HCI_OP_EXIT_SNIFF_MODE:
1674 hci_cs_exit_sniff_mode(hdev, ev->status);
1675 break;
1676
1677 default:
1678 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1679 break;
1680 }
1681
1682 if (ev->ncmd) {
1683 atomic_set(&hdev->cmd_cnt, 1);
1684 if (!skb_queue_empty(&hdev->cmd_q))
1685 tasklet_schedule(&hdev->cmd_task);
1686 }
1687 }
1688
1689 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1690 {
1691 struct hci_ev_role_change *ev = (void *) skb->data;
1692 struct hci_conn *conn;
1693
1694 BT_DBG("%s status %d", hdev->name, ev->status);
1695
1696 hci_dev_lock(hdev);
1697
1698 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1699 if (conn) {
1700 if (!ev->status) {
1701 if (ev->role)
1702 conn->link_mode &= ~HCI_LM_MASTER;
1703 else
1704 conn->link_mode |= HCI_LM_MASTER;
1705 }
1706
1707 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->pend);
1708
1709 hci_role_switch_cfm(conn, ev->status, ev->role);
1710 }
1711
1712 hci_dev_unlock(hdev);
1713 }
1714
1715 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
1716 {
1717 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
1718 __le16 *ptr;
1719 int i;
1720
1721 skb_pull(skb, sizeof(*ev));
1722
1723 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
1724
1725 if (skb->len < ev->num_hndl * 4) {
1726 BT_DBG("%s bad parameters", hdev->name);
1727 return;
1728 }
1729
1730 tasklet_disable(&hdev->tx_task);
1731
1732 for (i = 0, ptr = (__le16 *) skb->data; i < ev->num_hndl; i++) {
1733 struct hci_conn *conn;
1734 __u16 handle, count;
1735
1736 handle = get_unaligned_le16(ptr++);
1737 count = get_unaligned_le16(ptr++);
1738
1739 conn = hci_conn_hash_lookup_handle(hdev, handle);
1740 if (conn) {
1741 conn->sent -= count;
1742
1743 if (conn->type == ACL_LINK) {
1744 hdev->acl_cnt += count;
1745 if (hdev->acl_cnt > hdev->acl_pkts)
1746 hdev->acl_cnt = hdev->acl_pkts;
1747 } else {
1748 hdev->sco_cnt += count;
1749 if (hdev->sco_cnt > hdev->sco_pkts)
1750 hdev->sco_cnt = hdev->sco_pkts;
1751 }
1752 }
1753 }
1754
1755 tasklet_schedule(&hdev->tx_task);
1756
1757 tasklet_enable(&hdev->tx_task);
1758 }
1759
1760 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1761 {
1762 struct hci_ev_mode_change *ev = (void *) skb->data;
1763 struct hci_conn *conn;
1764
1765 BT_DBG("%s status %d", hdev->name, ev->status);
1766
1767 hci_dev_lock(hdev);
1768
1769 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1770 if (conn) {
1771 conn->mode = ev->mode;
1772 conn->interval = __le16_to_cpu(ev->interval);
1773
1774 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
1775 if (conn->mode == HCI_CM_ACTIVE)
1776 conn->power_save = 1;
1777 else
1778 conn->power_save = 0;
1779 }
1780
1781 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->pend))
1782 hci_sco_setup(conn, ev->status);
1783 }
1784
1785 hci_dev_unlock(hdev);
1786 }
1787
1788 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1789 {
1790 struct hci_ev_pin_code_req *ev = (void *) skb->data;
1791 struct hci_conn *conn;
1792
1793 BT_DBG("%s", hdev->name);
1794
1795 hci_dev_lock(hdev);
1796
1797 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1798 if (conn && conn->state == BT_CONNECTED) {
1799 hci_conn_hold(conn);
1800 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1801 hci_conn_put(conn);
1802 }
1803
1804 if (!test_bit(HCI_PAIRABLE, &hdev->flags))
1805 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
1806 sizeof(ev->bdaddr), &ev->bdaddr);
1807
1808 hci_dev_unlock(hdev);
1809 }
1810
1811 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1812 {
1813 BT_DBG("%s", hdev->name);
1814 }
1815
1816 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
1817 {
1818 struct hci_ev_link_key_notify *ev = (void *) skb->data;
1819 struct hci_conn *conn;
1820
1821 BT_DBG("%s", hdev->name);
1822
1823 hci_dev_lock(hdev);
1824
1825 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1826 if (conn) {
1827 hci_conn_hold(conn);
1828 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1829 hci_conn_put(conn);
1830 }
1831
1832 hci_dev_unlock(hdev);
1833 }
1834
1835 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
1836 {
1837 struct hci_ev_clock_offset *ev = (void *) skb->data;
1838 struct hci_conn *conn;
1839
1840 BT_DBG("%s status %d", hdev->name, ev->status);
1841
1842 hci_dev_lock(hdev);
1843
1844 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1845 if (conn && !ev->status) {
1846 struct inquiry_entry *ie;
1847
1848 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
1849 if (ie) {
1850 ie->data.clock_offset = ev->clock_offset;
1851 ie->timestamp = jiffies;
1852 }
1853 }
1854
1855 hci_dev_unlock(hdev);
1856 }
1857
1858 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1859 {
1860 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
1861 struct hci_conn *conn;
1862
1863 BT_DBG("%s status %d", hdev->name, ev->status);
1864
1865 hci_dev_lock(hdev);
1866
1867 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1868 if (conn && !ev->status)
1869 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
1870
1871 hci_dev_unlock(hdev);
1872 }
1873
1874 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
1875 {
1876 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
1877 struct inquiry_entry *ie;
1878
1879 BT_DBG("%s", hdev->name);
1880
1881 hci_dev_lock(hdev);
1882
1883 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1884 if (ie) {
1885 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
1886 ie->timestamp = jiffies;
1887 }
1888
1889 hci_dev_unlock(hdev);
1890 }
1891
1892 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
1893 {
1894 struct inquiry_data data;
1895 int num_rsp = *((__u8 *) skb->data);
1896
1897 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1898
1899 if (!num_rsp)
1900 return;
1901
1902 hci_dev_lock(hdev);
1903
1904 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
1905 struct inquiry_info_with_rssi_and_pscan_mode *info = (void *) (skb->data + 1);
1906
1907 for (; num_rsp; num_rsp--) {
1908 bacpy(&data.bdaddr, &info->bdaddr);
1909 data.pscan_rep_mode = info->pscan_rep_mode;
1910 data.pscan_period_mode = info->pscan_period_mode;
1911 data.pscan_mode = info->pscan_mode;
1912 memcpy(data.dev_class, info->dev_class, 3);
1913 data.clock_offset = info->clock_offset;
1914 data.rssi = info->rssi;
1915 data.ssp_mode = 0x00;
1916 info++;
1917 hci_inquiry_cache_update(hdev, &data);
1918 }
1919 } else {
1920 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
1921
1922 for (; num_rsp; num_rsp--) {
1923 bacpy(&data.bdaddr, &info->bdaddr);
1924 data.pscan_rep_mode = info->pscan_rep_mode;
1925 data.pscan_period_mode = info->pscan_period_mode;
1926 data.pscan_mode = 0x00;
1927 memcpy(data.dev_class, info->dev_class, 3);
1928 data.clock_offset = info->clock_offset;
1929 data.rssi = info->rssi;
1930 data.ssp_mode = 0x00;
1931 info++;
1932 hci_inquiry_cache_update(hdev, &data);
1933 }
1934 }
1935
1936 hci_dev_unlock(hdev);
1937 }
1938
1939 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
1940 {
1941 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
1942 struct hci_conn *conn;
1943
1944 BT_DBG("%s", hdev->name);
1945
1946 hci_dev_lock(hdev);
1947
1948 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1949 if (!conn)
1950 goto unlock;
1951
1952 if (!ev->status && ev->page == 0x01) {
1953 struct inquiry_entry *ie;
1954
1955 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
1956 if (ie)
1957 ie->data.ssp_mode = (ev->features[0] & 0x01);
1958
1959 conn->ssp_mode = (ev->features[0] & 0x01);
1960 }
1961
1962 if (conn->state != BT_CONFIG)
1963 goto unlock;
1964
1965 if (!ev->status) {
1966 struct hci_cp_remote_name_req cp;
1967 memset(&cp, 0, sizeof(cp));
1968 bacpy(&cp.bdaddr, &conn->dst);
1969 cp.pscan_rep_mode = 0x02;
1970 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1971 }
1972
1973 if (!hci_outgoing_auth_needed(hdev, conn)) {
1974 conn->state = BT_CONNECTED;
1975 hci_proto_connect_cfm(conn, ev->status);
1976 hci_conn_put(conn);
1977 }
1978
1979 unlock:
1980 hci_dev_unlock(hdev);
1981 }
1982
1983 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1984 {
1985 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
1986 struct hci_conn *conn;
1987
1988 BT_DBG("%s status %d", hdev->name, ev->status);
1989
1990 hci_dev_lock(hdev);
1991
1992 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1993 if (!conn) {
1994 if (ev->link_type == ESCO_LINK)
1995 goto unlock;
1996
1997 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1998 if (!conn)
1999 goto unlock;
2000
2001 conn->type = SCO_LINK;
2002 }
2003
2004 switch (ev->status) {
2005 case 0x00:
2006 conn->handle = __le16_to_cpu(ev->handle);
2007 conn->state = BT_CONNECTED;
2008
2009 hci_conn_hold_device(conn);
2010 hci_conn_add_sysfs(conn);
2011 break;
2012
2013 case 0x11: /* Unsupported Feature or Parameter Value */
2014 case 0x1c: /* SCO interval rejected */
2015 case 0x1a: /* Unsupported Remote Feature */
2016 case 0x1f: /* Unspecified error */
2017 if (conn->out && conn->attempt < 2) {
2018 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2019 (hdev->esco_type & EDR_ESCO_MASK);
2020 hci_setup_sync(conn, conn->link->handle);
2021 goto unlock;
2022 }
2023 /* fall through */
2024
2025 default:
2026 conn->state = BT_CLOSED;
2027 break;
2028 }
2029
2030 hci_proto_connect_cfm(conn, ev->status);
2031 if (ev->status)
2032 hci_conn_del(conn);
2033
2034 unlock:
2035 hci_dev_unlock(hdev);
2036 }
2037
2038 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2039 {
2040 BT_DBG("%s", hdev->name);
2041 }
2042
2043 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2044 {
2045 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2046 struct hci_conn *conn;
2047
2048 BT_DBG("%s status %d", hdev->name, ev->status);
2049
2050 hci_dev_lock(hdev);
2051
2052 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2053 if (conn) {
2054 }
2055
2056 hci_dev_unlock(hdev);
2057 }
2058
2059 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2060 {
2061 struct inquiry_data data;
2062 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2063 int num_rsp = *((__u8 *) skb->data);
2064
2065 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2066
2067 if (!num_rsp)
2068 return;
2069
2070 hci_dev_lock(hdev);
2071
2072 for (; num_rsp; num_rsp--) {
2073 bacpy(&data.bdaddr, &info->bdaddr);
2074 data.pscan_rep_mode = info->pscan_rep_mode;
2075 data.pscan_period_mode = info->pscan_period_mode;
2076 data.pscan_mode = 0x00;
2077 memcpy(data.dev_class, info->dev_class, 3);
2078 data.clock_offset = info->clock_offset;
2079 data.rssi = info->rssi;
2080 data.ssp_mode = 0x01;
2081 info++;
2082 hci_inquiry_cache_update(hdev, &data);
2083 }
2084
2085 hci_dev_unlock(hdev);
2086 }
2087
2088 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2089 {
2090 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2091 struct hci_conn *conn;
2092
2093 BT_DBG("%s", hdev->name);
2094
2095 hci_dev_lock(hdev);
2096
2097 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2098 if (!conn)
2099 goto unlock;
2100
2101 hci_conn_hold(conn);
2102
2103 if (!test_bit(HCI_MGMT, &hdev->flags))
2104 goto unlock;
2105
2106 if (test_bit(HCI_PAIRABLE, &hdev->flags) ||
2107 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2108 /* FIXME: Do IO capa response based on information
2109 * provided through the management interface */
2110 } else {
2111 struct hci_cp_io_capability_neg_reply cp;
2112
2113 bacpy(&cp.bdaddr, &ev->bdaddr);
2114 cp.reason = 0x16; /* Pairing not allowed */
2115
2116 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
2117 sizeof(cp), &cp);
2118 }
2119
2120 unlock:
2121 hci_dev_unlock(hdev);
2122 }
2123
2124 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
2125 {
2126 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
2127 struct hci_conn *conn;
2128
2129 BT_DBG("%s", hdev->name);
2130
2131 hci_dev_lock(hdev);
2132
2133 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2134 if (!conn)
2135 goto unlock;
2136
2137 hci_conn_hold(conn);
2138
2139 conn->remote_cap = ev->capability;
2140 conn->remote_oob = ev->oob_data;
2141 conn->remote_auth = ev->authentication;
2142
2143 unlock:
2144 hci_dev_unlock(hdev);
2145 }
2146
2147 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2148 {
2149 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
2150 struct hci_conn *conn;
2151
2152 BT_DBG("%s", hdev->name);
2153
2154 hci_dev_lock(hdev);
2155
2156 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2157 if (conn)
2158 hci_conn_put(conn);
2159
2160 hci_dev_unlock(hdev);
2161 }
2162
2163 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2164 {
2165 struct hci_ev_remote_host_features *ev = (void *) skb->data;
2166 struct inquiry_entry *ie;
2167
2168 BT_DBG("%s", hdev->name);
2169
2170 hci_dev_lock(hdev);
2171
2172 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2173 if (ie)
2174 ie->data.ssp_mode = (ev->features[0] & 0x01);
2175
2176 hci_dev_unlock(hdev);
2177 }
2178
2179 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
2180 {
2181 struct hci_event_hdr *hdr = (void *) skb->data;
2182 __u8 event = hdr->evt;
2183
2184 skb_pull(skb, HCI_EVENT_HDR_SIZE);
2185
2186 switch (event) {
2187 case HCI_EV_INQUIRY_COMPLETE:
2188 hci_inquiry_complete_evt(hdev, skb);
2189 break;
2190
2191 case HCI_EV_INQUIRY_RESULT:
2192 hci_inquiry_result_evt(hdev, skb);
2193 break;
2194
2195 case HCI_EV_CONN_COMPLETE:
2196 hci_conn_complete_evt(hdev, skb);
2197 break;
2198
2199 case HCI_EV_CONN_REQUEST:
2200 hci_conn_request_evt(hdev, skb);
2201 break;
2202
2203 case HCI_EV_DISCONN_COMPLETE:
2204 hci_disconn_complete_evt(hdev, skb);
2205 break;
2206
2207 case HCI_EV_AUTH_COMPLETE:
2208 hci_auth_complete_evt(hdev, skb);
2209 break;
2210
2211 case HCI_EV_REMOTE_NAME:
2212 hci_remote_name_evt(hdev, skb);
2213 break;
2214
2215 case HCI_EV_ENCRYPT_CHANGE:
2216 hci_encrypt_change_evt(hdev, skb);
2217 break;
2218
2219 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
2220 hci_change_link_key_complete_evt(hdev, skb);
2221 break;
2222
2223 case HCI_EV_REMOTE_FEATURES:
2224 hci_remote_features_evt(hdev, skb);
2225 break;
2226
2227 case HCI_EV_REMOTE_VERSION:
2228 hci_remote_version_evt(hdev, skb);
2229 break;
2230
2231 case HCI_EV_QOS_SETUP_COMPLETE:
2232 hci_qos_setup_complete_evt(hdev, skb);
2233 break;
2234
2235 case HCI_EV_CMD_COMPLETE:
2236 hci_cmd_complete_evt(hdev, skb);
2237 break;
2238
2239 case HCI_EV_CMD_STATUS:
2240 hci_cmd_status_evt(hdev, skb);
2241 break;
2242
2243 case HCI_EV_ROLE_CHANGE:
2244 hci_role_change_evt(hdev, skb);
2245 break;
2246
2247 case HCI_EV_NUM_COMP_PKTS:
2248 hci_num_comp_pkts_evt(hdev, skb);
2249 break;
2250
2251 case HCI_EV_MODE_CHANGE:
2252 hci_mode_change_evt(hdev, skb);
2253 break;
2254
2255 case HCI_EV_PIN_CODE_REQ:
2256 hci_pin_code_request_evt(hdev, skb);
2257 break;
2258
2259 case HCI_EV_LINK_KEY_REQ:
2260 hci_link_key_request_evt(hdev, skb);
2261 break;
2262
2263 case HCI_EV_LINK_KEY_NOTIFY:
2264 hci_link_key_notify_evt(hdev, skb);
2265 break;
2266
2267 case HCI_EV_CLOCK_OFFSET:
2268 hci_clock_offset_evt(hdev, skb);
2269 break;
2270
2271 case HCI_EV_PKT_TYPE_CHANGE:
2272 hci_pkt_type_change_evt(hdev, skb);
2273 break;
2274
2275 case HCI_EV_PSCAN_REP_MODE:
2276 hci_pscan_rep_mode_evt(hdev, skb);
2277 break;
2278
2279 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
2280 hci_inquiry_result_with_rssi_evt(hdev, skb);
2281 break;
2282
2283 case HCI_EV_REMOTE_EXT_FEATURES:
2284 hci_remote_ext_features_evt(hdev, skb);
2285 break;
2286
2287 case HCI_EV_SYNC_CONN_COMPLETE:
2288 hci_sync_conn_complete_evt(hdev, skb);
2289 break;
2290
2291 case HCI_EV_SYNC_CONN_CHANGED:
2292 hci_sync_conn_changed_evt(hdev, skb);
2293 break;
2294
2295 case HCI_EV_SNIFF_SUBRATE:
2296 hci_sniff_subrate_evt(hdev, skb);
2297 break;
2298
2299 case HCI_EV_EXTENDED_INQUIRY_RESULT:
2300 hci_extended_inquiry_result_evt(hdev, skb);
2301 break;
2302
2303 case HCI_EV_IO_CAPA_REQUEST:
2304 hci_io_capa_request_evt(hdev, skb);
2305 break;
2306
2307 case HCI_EV_IO_CAPA_REPLY:
2308 hci_io_capa_reply_evt(hdev, skb);
2309 break;
2310
2311 case HCI_EV_SIMPLE_PAIR_COMPLETE:
2312 hci_simple_pair_complete_evt(hdev, skb);
2313 break;
2314
2315 case HCI_EV_REMOTE_HOST_FEATURES:
2316 hci_remote_host_features_evt(hdev, skb);
2317 break;
2318
2319 default:
2320 BT_DBG("%s event 0x%x", hdev->name, event);
2321 break;
2322 }
2323
2324 kfree_skb(skb);
2325 hdev->stat.evt_rx++;
2326 }
2327
2328 /* Generate internal stack event */
2329 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
2330 {
2331 struct hci_event_hdr *hdr;
2332 struct hci_ev_stack_internal *ev;
2333 struct sk_buff *skb;
2334
2335 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
2336 if (!skb)
2337 return;
2338
2339 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
2340 hdr->evt = HCI_EV_STACK_INTERNAL;
2341 hdr->plen = sizeof(*ev) + dlen;
2342
2343 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
2344 ev->type = type;
2345 memcpy(ev->data, data, dlen);
2346
2347 bt_cb(skb)->incoming = 1;
2348 __net_timestamp(skb);
2349
2350 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
2351 skb->dev = (void *) hdev;
2352 hci_send_to_sock(hdev, skb, NULL);
2353 kfree_skb(skb);
2354 }
This page took 0.08667 seconds and 5 git commands to generate.