97152d9d7116b4f38c8874708cf3012cec853e36
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 static bool enable_le;
49
50 /* Handle HCI Event packets */
51
52 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53 {
54 __u8 status = *((__u8 *) skb->data);
55
56 BT_DBG("%s status 0x%x", hdev->name, status);
57
58 if (status) {
59 hci_dev_lock(hdev);
60 mgmt_stop_discovery_failed(hdev, status);
61 hci_dev_unlock(hdev);
62 return;
63 }
64
65 clear_bit(HCI_INQUIRY, &hdev->flags);
66
67 hci_dev_lock(hdev);
68 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69 hci_dev_unlock(hdev);
70
71 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72
73 hci_conn_check_pending(hdev);
74 }
75
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_conn_check_pending(hdev);
86 }
87
88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114 }
115
116 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117 {
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133 }
134
135 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136 {
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157 }
158
159 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 /* Reset all flags, except persistent ones */
199 hdev->dev_flags &= BIT(HCI_MGMT) | BIT(HCI_SETUP) | BIT(HCI_AUTO_OFF) |
200 BIT(HCI_LINK_KEYS) | BIT(HCI_DEBUG_KEYS);
201 }
202
203 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
204 {
205 __u8 status = *((__u8 *) skb->data);
206 void *sent;
207
208 BT_DBG("%s status 0x%x", hdev->name, status);
209
210 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
211 if (!sent)
212 return;
213
214 hci_dev_lock(hdev);
215
216 if (test_bit(HCI_MGMT, &hdev->dev_flags))
217 mgmt_set_local_name_complete(hdev, sent, status);
218
219 if (status == 0)
220 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
221
222 hci_dev_unlock(hdev);
223 }
224
225 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
226 {
227 struct hci_rp_read_local_name *rp = (void *) skb->data;
228
229 BT_DBG("%s status 0x%x", hdev->name, rp->status);
230
231 if (rp->status)
232 return;
233
234 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
235 }
236
237 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
238 {
239 __u8 status = *((__u8 *) skb->data);
240 void *sent;
241
242 BT_DBG("%s status 0x%x", hdev->name, status);
243
244 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
245 if (!sent)
246 return;
247
248 if (!status) {
249 __u8 param = *((__u8 *) sent);
250
251 if (param == AUTH_ENABLED)
252 set_bit(HCI_AUTH, &hdev->flags);
253 else
254 clear_bit(HCI_AUTH, &hdev->flags);
255 }
256
257 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
258 }
259
260 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
261 {
262 __u8 status = *((__u8 *) skb->data);
263 void *sent;
264
265 BT_DBG("%s status 0x%x", hdev->name, status);
266
267 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
268 if (!sent)
269 return;
270
271 if (!status) {
272 __u8 param = *((__u8 *) sent);
273
274 if (param)
275 set_bit(HCI_ENCRYPT, &hdev->flags);
276 else
277 clear_bit(HCI_ENCRYPT, &hdev->flags);
278 }
279
280 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
281 }
282
283 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
284 {
285 __u8 param, status = *((__u8 *) skb->data);
286 int old_pscan, old_iscan;
287 void *sent;
288
289 BT_DBG("%s status 0x%x", hdev->name, status);
290
291 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
292 if (!sent)
293 return;
294
295 param = *((__u8 *) sent);
296
297 hci_dev_lock(hdev);
298
299 if (status != 0) {
300 mgmt_write_scan_failed(hdev, param, status);
301 hdev->discov_timeout = 0;
302 goto done;
303 }
304
305 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
306 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
307
308 if (param & SCAN_INQUIRY) {
309 set_bit(HCI_ISCAN, &hdev->flags);
310 if (!old_iscan)
311 mgmt_discoverable(hdev, 1);
312 if (hdev->discov_timeout > 0) {
313 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
314 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
315 to);
316 }
317 } else if (old_iscan)
318 mgmt_discoverable(hdev, 0);
319
320 if (param & SCAN_PAGE) {
321 set_bit(HCI_PSCAN, &hdev->flags);
322 if (!old_pscan)
323 mgmt_connectable(hdev, 1);
324 } else if (old_pscan)
325 mgmt_connectable(hdev, 0);
326
327 done:
328 hci_dev_unlock(hdev);
329 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
330 }
331
332 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333 {
334 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335
336 BT_DBG("%s status 0x%x", hdev->name, rp->status);
337
338 if (rp->status)
339 return;
340
341 memcpy(hdev->dev_class, rp->dev_class, 3);
342
343 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345 }
346
347 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 __u8 status = *((__u8 *) skb->data);
350 void *sent;
351
352 BT_DBG("%s status 0x%x", hdev->name, status);
353
354 if (status)
355 return;
356
357 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
358 if (!sent)
359 return;
360
361 memcpy(hdev->dev_class, sent, 3);
362 }
363
364 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
365 {
366 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
367 __u16 setting;
368
369 BT_DBG("%s status 0x%x", hdev->name, rp->status);
370
371 if (rp->status)
372 return;
373
374 setting = __le16_to_cpu(rp->voice_setting);
375
376 if (hdev->voice_setting == setting)
377 return;
378
379 hdev->voice_setting = setting;
380
381 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
382
383 if (hdev->notify)
384 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
385 }
386
387 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
388 {
389 __u8 status = *((__u8 *) skb->data);
390 __u16 setting;
391 void *sent;
392
393 BT_DBG("%s status 0x%x", hdev->name, status);
394
395 if (status)
396 return;
397
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
399 if (!sent)
400 return;
401
402 setting = get_unaligned_le16(sent);
403
404 if (hdev->voice_setting == setting)
405 return;
406
407 hdev->voice_setting = setting;
408
409 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
410
411 if (hdev->notify)
412 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
413 }
414
415 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
416 {
417 __u8 status = *((__u8 *) skb->data);
418
419 BT_DBG("%s status 0x%x", hdev->name, status);
420
421 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
422 }
423
424 static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
425 {
426 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
427
428 BT_DBG("%s status 0x%x", hdev->name, rp->status);
429
430 if (rp->status)
431 return;
432
433 if (rp->mode)
434 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
435 else
436 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
437 }
438
439 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
440 {
441 __u8 status = *((__u8 *) skb->data);
442 void *sent;
443
444 BT_DBG("%s status 0x%x", hdev->name, status);
445
446 if (status)
447 return;
448
449 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
450 if (!sent)
451 return;
452
453 if (*((u8 *) sent))
454 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455 else
456 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
457 }
458
459 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
460 {
461 if (hdev->features[6] & LMP_EXT_INQ)
462 return 2;
463
464 if (hdev->features[3] & LMP_RSSI_INQ)
465 return 1;
466
467 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
468 hdev->lmp_subver == 0x0757)
469 return 1;
470
471 if (hdev->manufacturer == 15) {
472 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
473 return 1;
474 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
475 return 1;
476 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
477 return 1;
478 }
479
480 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
481 hdev->lmp_subver == 0x1805)
482 return 1;
483
484 return 0;
485 }
486
487 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
488 {
489 u8 mode;
490
491 mode = hci_get_inquiry_mode(hdev);
492
493 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
494 }
495
496 static void hci_setup_event_mask(struct hci_dev *hdev)
497 {
498 /* The second byte is 0xff instead of 0x9f (two reserved bits
499 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
500 * command otherwise */
501 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
502
503 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
504 * any event mask for pre 1.2 devices */
505 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
506 return;
507
508 events[4] |= 0x01; /* Flow Specification Complete */
509 events[4] |= 0x02; /* Inquiry Result with RSSI */
510 events[4] |= 0x04; /* Read Remote Extended Features Complete */
511 events[5] |= 0x08; /* Synchronous Connection Complete */
512 events[5] |= 0x10; /* Synchronous Connection Changed */
513
514 if (hdev->features[3] & LMP_RSSI_INQ)
515 events[4] |= 0x04; /* Inquiry Result with RSSI */
516
517 if (hdev->features[5] & LMP_SNIFF_SUBR)
518 events[5] |= 0x20; /* Sniff Subrating */
519
520 if (hdev->features[5] & LMP_PAUSE_ENC)
521 events[5] |= 0x80; /* Encryption Key Refresh Complete */
522
523 if (hdev->features[6] & LMP_EXT_INQ)
524 events[5] |= 0x40; /* Extended Inquiry Result */
525
526 if (hdev->features[6] & LMP_NO_FLUSH)
527 events[7] |= 0x01; /* Enhanced Flush Complete */
528
529 if (hdev->features[7] & LMP_LSTO)
530 events[6] |= 0x80; /* Link Supervision Timeout Changed */
531
532 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
533 events[6] |= 0x01; /* IO Capability Request */
534 events[6] |= 0x02; /* IO Capability Response */
535 events[6] |= 0x04; /* User Confirmation Request */
536 events[6] |= 0x08; /* User Passkey Request */
537 events[6] |= 0x10; /* Remote OOB Data Request */
538 events[6] |= 0x20; /* Simple Pairing Complete */
539 events[7] |= 0x04; /* User Passkey Notification */
540 events[7] |= 0x08; /* Keypress Notification */
541 events[7] |= 0x10; /* Remote Host Supported
542 * Features Notification */
543 }
544
545 if (hdev->features[4] & LMP_LE)
546 events[7] |= 0x20; /* LE Meta-Event */
547
548 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
549 }
550
551 static void hci_set_le_support(struct hci_dev *hdev)
552 {
553 struct hci_cp_write_le_host_supported cp;
554
555 memset(&cp, 0, sizeof(cp));
556
557 if (enable_le) {
558 cp.le = 1;
559 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
560 }
561
562 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
563 }
564
565 static void hci_setup(struct hci_dev *hdev)
566 {
567 if (hdev->dev_type != HCI_BREDR)
568 return;
569
570 hci_setup_event_mask(hdev);
571
572 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
573 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
574
575 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
576 u8 mode = 0x01;
577 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
578 }
579
580 if (hdev->features[3] & LMP_RSSI_INQ)
581 hci_setup_inquiry_mode(hdev);
582
583 if (hdev->features[7] & LMP_INQ_TX_PWR)
584 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
585
586 if (hdev->features[7] & LMP_EXTFEATURES) {
587 struct hci_cp_read_local_ext_features cp;
588
589 cp.page = 0x01;
590 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
591 sizeof(cp), &cp);
592 }
593
594 if (hdev->features[4] & LMP_LE)
595 hci_set_le_support(hdev);
596 }
597
598 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
599 {
600 struct hci_rp_read_local_version *rp = (void *) skb->data;
601
602 BT_DBG("%s status 0x%x", hdev->name, rp->status);
603
604 if (rp->status)
605 return;
606
607 hdev->hci_ver = rp->hci_ver;
608 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
609 hdev->lmp_ver = rp->lmp_ver;
610 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
611 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
612
613 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
614 hdev->manufacturer,
615 hdev->hci_ver, hdev->hci_rev);
616
617 if (test_bit(HCI_INIT, &hdev->flags))
618 hci_setup(hdev);
619 }
620
621 static void hci_setup_link_policy(struct hci_dev *hdev)
622 {
623 u16 link_policy = 0;
624
625 if (hdev->features[0] & LMP_RSWITCH)
626 link_policy |= HCI_LP_RSWITCH;
627 if (hdev->features[0] & LMP_HOLD)
628 link_policy |= HCI_LP_HOLD;
629 if (hdev->features[0] & LMP_SNIFF)
630 link_policy |= HCI_LP_SNIFF;
631 if (hdev->features[1] & LMP_PARK)
632 link_policy |= HCI_LP_PARK;
633
634 link_policy = cpu_to_le16(link_policy);
635 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
636 sizeof(link_policy), &link_policy);
637 }
638
639 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
640 {
641 struct hci_rp_read_local_commands *rp = (void *) skb->data;
642
643 BT_DBG("%s status 0x%x", hdev->name, rp->status);
644
645 if (rp->status)
646 goto done;
647
648 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
649
650 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
651 hci_setup_link_policy(hdev);
652
653 done:
654 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
655 }
656
657 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
658 {
659 struct hci_rp_read_local_features *rp = (void *) skb->data;
660
661 BT_DBG("%s status 0x%x", hdev->name, rp->status);
662
663 if (rp->status)
664 return;
665
666 memcpy(hdev->features, rp->features, 8);
667
668 /* Adjust default settings according to features
669 * supported by device. */
670
671 if (hdev->features[0] & LMP_3SLOT)
672 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
673
674 if (hdev->features[0] & LMP_5SLOT)
675 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
676
677 if (hdev->features[1] & LMP_HV2) {
678 hdev->pkt_type |= (HCI_HV2);
679 hdev->esco_type |= (ESCO_HV2);
680 }
681
682 if (hdev->features[1] & LMP_HV3) {
683 hdev->pkt_type |= (HCI_HV3);
684 hdev->esco_type |= (ESCO_HV3);
685 }
686
687 if (hdev->features[3] & LMP_ESCO)
688 hdev->esco_type |= (ESCO_EV3);
689
690 if (hdev->features[4] & LMP_EV4)
691 hdev->esco_type |= (ESCO_EV4);
692
693 if (hdev->features[4] & LMP_EV5)
694 hdev->esco_type |= (ESCO_EV5);
695
696 if (hdev->features[5] & LMP_EDR_ESCO_2M)
697 hdev->esco_type |= (ESCO_2EV3);
698
699 if (hdev->features[5] & LMP_EDR_ESCO_3M)
700 hdev->esco_type |= (ESCO_3EV3);
701
702 if (hdev->features[5] & LMP_EDR_3S_ESCO)
703 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
704
705 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
706 hdev->features[0], hdev->features[1],
707 hdev->features[2], hdev->features[3],
708 hdev->features[4], hdev->features[5],
709 hdev->features[6], hdev->features[7]);
710 }
711
712 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
713 struct sk_buff *skb)
714 {
715 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
716
717 BT_DBG("%s status 0x%x", hdev->name, rp->status);
718
719 if (rp->status)
720 return;
721
722 switch (rp->page) {
723 case 0:
724 memcpy(hdev->features, rp->features, 8);
725 break;
726 case 1:
727 memcpy(hdev->host_features, rp->features, 8);
728 break;
729 }
730
731 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
732 }
733
734 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
735 struct sk_buff *skb)
736 {
737 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
738
739 BT_DBG("%s status 0x%x", hdev->name, rp->status);
740
741 if (rp->status)
742 return;
743
744 hdev->flow_ctl_mode = rp->mode;
745
746 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
747 }
748
749 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
750 {
751 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
752
753 BT_DBG("%s status 0x%x", hdev->name, rp->status);
754
755 if (rp->status)
756 return;
757
758 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
759 hdev->sco_mtu = rp->sco_mtu;
760 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
761 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
762
763 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
764 hdev->sco_mtu = 64;
765 hdev->sco_pkts = 8;
766 }
767
768 hdev->acl_cnt = hdev->acl_pkts;
769 hdev->sco_cnt = hdev->sco_pkts;
770
771 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
772 hdev->acl_mtu, hdev->acl_pkts,
773 hdev->sco_mtu, hdev->sco_pkts);
774 }
775
776 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
777 {
778 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
779
780 BT_DBG("%s status 0x%x", hdev->name, rp->status);
781
782 if (!rp->status)
783 bacpy(&hdev->bdaddr, &rp->bdaddr);
784
785 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
786 }
787
788 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
789 struct sk_buff *skb)
790 {
791 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
792
793 BT_DBG("%s status 0x%x", hdev->name, rp->status);
794
795 if (rp->status)
796 return;
797
798 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
799 hdev->block_len = __le16_to_cpu(rp->block_len);
800 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
801
802 hdev->block_cnt = hdev->num_blocks;
803
804 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
805 hdev->block_cnt, hdev->block_len);
806
807 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
808 }
809
810 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
811 {
812 __u8 status = *((__u8 *) skb->data);
813
814 BT_DBG("%s status 0x%x", hdev->name, status);
815
816 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
817 }
818
819 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
820 struct sk_buff *skb)
821 {
822 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
823
824 BT_DBG("%s status 0x%x", hdev->name, rp->status);
825
826 if (rp->status)
827 return;
828
829 hdev->amp_status = rp->amp_status;
830 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
831 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
832 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
833 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
834 hdev->amp_type = rp->amp_type;
835 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
836 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
837 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
838 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
839
840 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
841 }
842
843 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
844 struct sk_buff *skb)
845 {
846 __u8 status = *((__u8 *) skb->data);
847
848 BT_DBG("%s status 0x%x", hdev->name, status);
849
850 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
851 }
852
853 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
854 {
855 __u8 status = *((__u8 *) skb->data);
856
857 BT_DBG("%s status 0x%x", hdev->name, status);
858
859 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
860 }
861
862 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
863 struct sk_buff *skb)
864 {
865 __u8 status = *((__u8 *) skb->data);
866
867 BT_DBG("%s status 0x%x", hdev->name, status);
868
869 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
870 }
871
872 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
873 struct sk_buff *skb)
874 {
875 __u8 status = *((__u8 *) skb->data);
876
877 BT_DBG("%s status 0x%x", hdev->name, status);
878
879 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
880 }
881
882 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
883 {
884 __u8 status = *((__u8 *) skb->data);
885
886 BT_DBG("%s status 0x%x", hdev->name, status);
887
888 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
889 }
890
891 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
892 {
893 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
894 struct hci_cp_pin_code_reply *cp;
895 struct hci_conn *conn;
896
897 BT_DBG("%s status 0x%x", hdev->name, rp->status);
898
899 hci_dev_lock(hdev);
900
901 if (test_bit(HCI_MGMT, &hdev->dev_flags))
902 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
903
904 if (rp->status != 0)
905 goto unlock;
906
907 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
908 if (!cp)
909 goto unlock;
910
911 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
912 if (conn)
913 conn->pin_length = cp->pin_len;
914
915 unlock:
916 hci_dev_unlock(hdev);
917 }
918
919 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
920 {
921 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
922
923 BT_DBG("%s status 0x%x", hdev->name, rp->status);
924
925 hci_dev_lock(hdev);
926
927 if (test_bit(HCI_MGMT, &hdev->dev_flags))
928 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
929 rp->status);
930
931 hci_dev_unlock(hdev);
932 }
933
934 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
935 struct sk_buff *skb)
936 {
937 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
938
939 BT_DBG("%s status 0x%x", hdev->name, rp->status);
940
941 if (rp->status)
942 return;
943
944 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
945 hdev->le_pkts = rp->le_max_pkt;
946
947 hdev->le_cnt = hdev->le_pkts;
948
949 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
950
951 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
952 }
953
954 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
955 {
956 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
957
958 BT_DBG("%s status 0x%x", hdev->name, rp->status);
959
960 hci_dev_lock(hdev);
961
962 if (test_bit(HCI_MGMT, &hdev->dev_flags))
963 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr,
964 rp->status);
965
966 hci_dev_unlock(hdev);
967 }
968
969 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
970 struct sk_buff *skb)
971 {
972 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
973
974 BT_DBG("%s status 0x%x", hdev->name, rp->status);
975
976 hci_dev_lock(hdev);
977
978 if (test_bit(HCI_MGMT, &hdev->dev_flags))
979 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
980 rp->status);
981
982 hci_dev_unlock(hdev);
983 }
984
985 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
986 {
987 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
988
989 BT_DBG("%s status 0x%x", hdev->name, rp->status);
990
991 hci_dev_lock(hdev);
992
993 if (test_bit(HCI_MGMT, &hdev->dev_flags))
994 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr,
995 rp->status);
996
997 hci_dev_unlock(hdev);
998 }
999
1000 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1001 struct sk_buff *skb)
1002 {
1003 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1004
1005 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1006
1007 hci_dev_lock(hdev);
1008
1009 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1010 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1011 rp->status);
1012
1013 hci_dev_unlock(hdev);
1014 }
1015
1016 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1017 struct sk_buff *skb)
1018 {
1019 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1020
1021 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1022
1023 hci_dev_lock(hdev);
1024 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1025 rp->randomizer, rp->status);
1026 hci_dev_unlock(hdev);
1027 }
1028
1029 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1030 {
1031 __u8 status = *((__u8 *) skb->data);
1032
1033 BT_DBG("%s status 0x%x", hdev->name, status);
1034
1035 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1036 }
1037
1038 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1039 struct sk_buff *skb)
1040 {
1041 struct hci_cp_le_set_scan_enable *cp;
1042 __u8 status = *((__u8 *) skb->data);
1043
1044 BT_DBG("%s status 0x%x", hdev->name, status);
1045
1046 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1047 if (!cp)
1048 return;
1049
1050 switch (cp->enable) {
1051 case LE_SCANNING_ENABLED:
1052 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1053
1054 if (status)
1055 return;
1056
1057 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1058
1059 cancel_delayed_work_sync(&hdev->adv_work);
1060
1061 hci_dev_lock(hdev);
1062 hci_adv_entries_clear(hdev);
1063 hci_discovery_set_state(hdev, DISCOVERY_LE_SCAN);
1064 hci_dev_unlock(hdev);
1065 break;
1066
1067 case LE_SCANNING_DISABLED:
1068 if (status)
1069 return;
1070
1071 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1072
1073 hci_dev_lock(hdev);
1074 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1075 hci_dev_unlock(hdev);
1076
1077 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1078 break;
1079
1080 default:
1081 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1082 break;
1083 }
1084 }
1085
1086 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1087 {
1088 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1089
1090 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1091
1092 if (rp->status)
1093 return;
1094
1095 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1096 }
1097
1098 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1099 {
1100 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1101
1102 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1103
1104 if (rp->status)
1105 return;
1106
1107 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1108 }
1109
1110 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1111 struct sk_buff *skb)
1112 {
1113 struct hci_cp_read_local_ext_features cp;
1114 __u8 status = *((__u8 *) skb->data);
1115
1116 BT_DBG("%s status 0x%x", hdev->name, status);
1117
1118 if (status)
1119 return;
1120
1121 cp.page = 0x01;
1122 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1123 }
1124
1125 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1126 {
1127 BT_DBG("%s status 0x%x", hdev->name, status);
1128
1129 if (status) {
1130 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1131 hci_conn_check_pending(hdev);
1132 hci_dev_lock(hdev);
1133 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1134 mgmt_start_discovery_failed(hdev, status);
1135 hci_dev_unlock(hdev);
1136 return;
1137 }
1138
1139 set_bit(HCI_INQUIRY, &hdev->flags);
1140
1141 hci_dev_lock(hdev);
1142 hci_discovery_set_state(hdev, DISCOVERY_INQUIRY);
1143 hci_dev_unlock(hdev);
1144 }
1145
1146 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1147 {
1148 struct hci_cp_create_conn *cp;
1149 struct hci_conn *conn;
1150
1151 BT_DBG("%s status 0x%x", hdev->name, status);
1152
1153 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1154 if (!cp)
1155 return;
1156
1157 hci_dev_lock(hdev);
1158
1159 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1160
1161 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1162
1163 if (status) {
1164 if (conn && conn->state == BT_CONNECT) {
1165 if (status != 0x0c || conn->attempt > 2) {
1166 conn->state = BT_CLOSED;
1167 hci_proto_connect_cfm(conn, status);
1168 hci_conn_del(conn);
1169 } else
1170 conn->state = BT_CONNECT2;
1171 }
1172 } else {
1173 if (!conn) {
1174 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1175 if (conn) {
1176 conn->out = true;
1177 conn->link_mode |= HCI_LM_MASTER;
1178 } else
1179 BT_ERR("No memory for new connection");
1180 }
1181 }
1182
1183 hci_dev_unlock(hdev);
1184 }
1185
1186 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1187 {
1188 struct hci_cp_add_sco *cp;
1189 struct hci_conn *acl, *sco;
1190 __u16 handle;
1191
1192 BT_DBG("%s status 0x%x", hdev->name, status);
1193
1194 if (!status)
1195 return;
1196
1197 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1198 if (!cp)
1199 return;
1200
1201 handle = __le16_to_cpu(cp->handle);
1202
1203 BT_DBG("%s handle %d", hdev->name, handle);
1204
1205 hci_dev_lock(hdev);
1206
1207 acl = hci_conn_hash_lookup_handle(hdev, handle);
1208 if (acl) {
1209 sco = acl->link;
1210 if (sco) {
1211 sco->state = BT_CLOSED;
1212
1213 hci_proto_connect_cfm(sco, status);
1214 hci_conn_del(sco);
1215 }
1216 }
1217
1218 hci_dev_unlock(hdev);
1219 }
1220
1221 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1222 {
1223 struct hci_cp_auth_requested *cp;
1224 struct hci_conn *conn;
1225
1226 BT_DBG("%s status 0x%x", hdev->name, status);
1227
1228 if (!status)
1229 return;
1230
1231 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1232 if (!cp)
1233 return;
1234
1235 hci_dev_lock(hdev);
1236
1237 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1238 if (conn) {
1239 if (conn->state == BT_CONFIG) {
1240 hci_proto_connect_cfm(conn, status);
1241 hci_conn_put(conn);
1242 }
1243 }
1244
1245 hci_dev_unlock(hdev);
1246 }
1247
1248 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1249 {
1250 struct hci_cp_set_conn_encrypt *cp;
1251 struct hci_conn *conn;
1252
1253 BT_DBG("%s status 0x%x", hdev->name, status);
1254
1255 if (!status)
1256 return;
1257
1258 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1259 if (!cp)
1260 return;
1261
1262 hci_dev_lock(hdev);
1263
1264 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1265 if (conn) {
1266 if (conn->state == BT_CONFIG) {
1267 hci_proto_connect_cfm(conn, status);
1268 hci_conn_put(conn);
1269 }
1270 }
1271
1272 hci_dev_unlock(hdev);
1273 }
1274
1275 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1276 struct hci_conn *conn)
1277 {
1278 if (conn->state != BT_CONFIG || !conn->out)
1279 return 0;
1280
1281 if (conn->pending_sec_level == BT_SECURITY_SDP)
1282 return 0;
1283
1284 /* Only request authentication for SSP connections or non-SSP
1285 * devices with sec_level HIGH or if MITM protection is requested */
1286 if (!hci_conn_ssp_enabled(conn) &&
1287 conn->pending_sec_level != BT_SECURITY_HIGH &&
1288 !(conn->auth_type & 0x01))
1289 return 0;
1290
1291 return 1;
1292 }
1293
1294 static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e)
1295 {
1296 struct hci_cp_remote_name_req cp;
1297
1298 memset(&cp, 0, sizeof(cp));
1299
1300 bacpy(&cp.bdaddr, &e->data.bdaddr);
1301 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1302 cp.pscan_mode = e->data.pscan_mode;
1303 cp.clock_offset = e->data.clock_offset;
1304
1305 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1306 }
1307
1308 static bool hci_resolve_next_name(struct hci_dev *hdev)
1309 {
1310 struct discovery_state *discov = &hdev->discovery;
1311 struct inquiry_entry *e;
1312
1313 if (list_empty(&discov->resolve))
1314 return false;
1315
1316 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1317 if (hci_resolve_name(hdev, e) == 0) {
1318 e->name_state = NAME_PENDING;
1319 return true;
1320 }
1321
1322 return false;
1323 }
1324
1325 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1326 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1327 {
1328 struct discovery_state *discov = &hdev->discovery;
1329 struct inquiry_entry *e;
1330
1331 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1332 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00,
1333 name, name_len, conn->dev_class);
1334
1335 if (discov->state == DISCOVERY_STOPPED)
1336 return;
1337
1338 if (discov->state == DISCOVERY_STOPPING)
1339 goto discov_complete;
1340
1341 if (discov->state != DISCOVERY_RESOLVING)
1342 return;
1343
1344 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1345 if (e) {
1346 e->name_state = NAME_KNOWN;
1347 list_del(&e->list);
1348 if (name)
1349 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1350 e->data.rssi, name, name_len);
1351 }
1352
1353 if (hci_resolve_next_name(hdev))
1354 return;
1355
1356 discov_complete:
1357 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1358 }
1359
1360 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1361 {
1362 struct hci_cp_remote_name_req *cp;
1363 struct hci_conn *conn;
1364
1365 BT_DBG("%s status 0x%x", hdev->name, status);
1366
1367 /* If successful wait for the name req complete event before
1368 * checking for the need to do authentication */
1369 if (!status)
1370 return;
1371
1372 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1373 if (!cp)
1374 return;
1375
1376 hci_dev_lock(hdev);
1377
1378 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1379
1380 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1381 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1382
1383 if (!conn)
1384 goto unlock;
1385
1386 if (!hci_outgoing_auth_needed(hdev, conn))
1387 goto unlock;
1388
1389 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1390 struct hci_cp_auth_requested cp;
1391 cp.handle = __cpu_to_le16(conn->handle);
1392 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1393 }
1394
1395 unlock:
1396 hci_dev_unlock(hdev);
1397 }
1398
1399 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1400 {
1401 struct hci_cp_read_remote_features *cp;
1402 struct hci_conn *conn;
1403
1404 BT_DBG("%s status 0x%x", hdev->name, status);
1405
1406 if (!status)
1407 return;
1408
1409 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1410 if (!cp)
1411 return;
1412
1413 hci_dev_lock(hdev);
1414
1415 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1416 if (conn) {
1417 if (conn->state == BT_CONFIG) {
1418 hci_proto_connect_cfm(conn, status);
1419 hci_conn_put(conn);
1420 }
1421 }
1422
1423 hci_dev_unlock(hdev);
1424 }
1425
1426 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1427 {
1428 struct hci_cp_read_remote_ext_features *cp;
1429 struct hci_conn *conn;
1430
1431 BT_DBG("%s status 0x%x", hdev->name, status);
1432
1433 if (!status)
1434 return;
1435
1436 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1437 if (!cp)
1438 return;
1439
1440 hci_dev_lock(hdev);
1441
1442 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1443 if (conn) {
1444 if (conn->state == BT_CONFIG) {
1445 hci_proto_connect_cfm(conn, status);
1446 hci_conn_put(conn);
1447 }
1448 }
1449
1450 hci_dev_unlock(hdev);
1451 }
1452
1453 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1454 {
1455 struct hci_cp_setup_sync_conn *cp;
1456 struct hci_conn *acl, *sco;
1457 __u16 handle;
1458
1459 BT_DBG("%s status 0x%x", hdev->name, status);
1460
1461 if (!status)
1462 return;
1463
1464 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1465 if (!cp)
1466 return;
1467
1468 handle = __le16_to_cpu(cp->handle);
1469
1470 BT_DBG("%s handle %d", hdev->name, handle);
1471
1472 hci_dev_lock(hdev);
1473
1474 acl = hci_conn_hash_lookup_handle(hdev, handle);
1475 if (acl) {
1476 sco = acl->link;
1477 if (sco) {
1478 sco->state = BT_CLOSED;
1479
1480 hci_proto_connect_cfm(sco, status);
1481 hci_conn_del(sco);
1482 }
1483 }
1484
1485 hci_dev_unlock(hdev);
1486 }
1487
1488 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1489 {
1490 struct hci_cp_sniff_mode *cp;
1491 struct hci_conn *conn;
1492
1493 BT_DBG("%s status 0x%x", hdev->name, status);
1494
1495 if (!status)
1496 return;
1497
1498 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1499 if (!cp)
1500 return;
1501
1502 hci_dev_lock(hdev);
1503
1504 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1505 if (conn) {
1506 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1507
1508 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1509 hci_sco_setup(conn, status);
1510 }
1511
1512 hci_dev_unlock(hdev);
1513 }
1514
1515 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1516 {
1517 struct hci_cp_exit_sniff_mode *cp;
1518 struct hci_conn *conn;
1519
1520 BT_DBG("%s status 0x%x", hdev->name, status);
1521
1522 if (!status)
1523 return;
1524
1525 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1526 if (!cp)
1527 return;
1528
1529 hci_dev_lock(hdev);
1530
1531 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1532 if (conn) {
1533 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1534
1535 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1536 hci_sco_setup(conn, status);
1537 }
1538
1539 hci_dev_unlock(hdev);
1540 }
1541
1542 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1543 {
1544 struct hci_cp_le_create_conn *cp;
1545 struct hci_conn *conn;
1546
1547 BT_DBG("%s status 0x%x", hdev->name, status);
1548
1549 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1550 if (!cp)
1551 return;
1552
1553 hci_dev_lock(hdev);
1554
1555 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1556
1557 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1558 conn);
1559
1560 if (status) {
1561 if (conn && conn->state == BT_CONNECT) {
1562 conn->state = BT_CLOSED;
1563 hci_proto_connect_cfm(conn, status);
1564 hci_conn_del(conn);
1565 }
1566 } else {
1567 if (!conn) {
1568 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1569 if (conn) {
1570 conn->dst_type = cp->peer_addr_type;
1571 conn->out = true;
1572 } else {
1573 BT_ERR("No memory for new connection");
1574 }
1575 }
1576 }
1577
1578 hci_dev_unlock(hdev);
1579 }
1580
1581 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1582 {
1583 BT_DBG("%s status 0x%x", hdev->name, status);
1584 }
1585
1586 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1587 {
1588 __u8 status = *((__u8 *) skb->data);
1589 struct discovery_state *discov = &hdev->discovery;
1590 struct inquiry_entry *e;
1591
1592 BT_DBG("%s status %d", hdev->name, status);
1593
1594 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1595
1596 hci_conn_check_pending(hdev);
1597
1598 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1599 return;
1600
1601 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1602 return;
1603
1604 hci_dev_lock(hdev);
1605
1606 if (discov->state != DISCOVERY_INQUIRY)
1607 goto unlock;
1608
1609 if (list_empty(&discov->resolve)) {
1610 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1611 goto unlock;
1612 }
1613
1614 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1615 if (e && hci_resolve_name(hdev, e) == 0) {
1616 e->name_state = NAME_PENDING;
1617 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1618 } else {
1619 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1620 }
1621
1622 unlock:
1623 hci_dev_unlock(hdev);
1624 }
1625
1626 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1627 {
1628 struct inquiry_data data;
1629 struct inquiry_info *info = (void *) (skb->data + 1);
1630 int num_rsp = *((__u8 *) skb->data);
1631
1632 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1633
1634 if (!num_rsp)
1635 return;
1636
1637 hci_dev_lock(hdev);
1638
1639 for (; num_rsp; num_rsp--, info++) {
1640 bool name_known;
1641
1642 bacpy(&data.bdaddr, &info->bdaddr);
1643 data.pscan_rep_mode = info->pscan_rep_mode;
1644 data.pscan_period_mode = info->pscan_period_mode;
1645 data.pscan_mode = info->pscan_mode;
1646 memcpy(data.dev_class, info->dev_class, 3);
1647 data.clock_offset = info->clock_offset;
1648 data.rssi = 0x00;
1649 data.ssp_mode = 0x00;
1650
1651 name_known = hci_inquiry_cache_update(hdev, &data, false);
1652 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1653 info->dev_class, 0, !name_known,
1654 NULL, 0);
1655 }
1656
1657 hci_dev_unlock(hdev);
1658 }
1659
1660 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1661 {
1662 struct hci_ev_conn_complete *ev = (void *) skb->data;
1663 struct hci_conn *conn;
1664
1665 BT_DBG("%s", hdev->name);
1666
1667 hci_dev_lock(hdev);
1668
1669 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1670 if (!conn) {
1671 if (ev->link_type != SCO_LINK)
1672 goto unlock;
1673
1674 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1675 if (!conn)
1676 goto unlock;
1677
1678 conn->type = SCO_LINK;
1679 }
1680
1681 if (!ev->status) {
1682 conn->handle = __le16_to_cpu(ev->handle);
1683
1684 if (conn->type == ACL_LINK) {
1685 conn->state = BT_CONFIG;
1686 hci_conn_hold(conn);
1687 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1688 } else
1689 conn->state = BT_CONNECTED;
1690
1691 hci_conn_hold_device(conn);
1692 hci_conn_add_sysfs(conn);
1693
1694 if (test_bit(HCI_AUTH, &hdev->flags))
1695 conn->link_mode |= HCI_LM_AUTH;
1696
1697 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1698 conn->link_mode |= HCI_LM_ENCRYPT;
1699
1700 /* Get remote features */
1701 if (conn->type == ACL_LINK) {
1702 struct hci_cp_read_remote_features cp;
1703 cp.handle = ev->handle;
1704 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1705 sizeof(cp), &cp);
1706 }
1707
1708 /* Set packet type for incoming connection */
1709 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1710 struct hci_cp_change_conn_ptype cp;
1711 cp.handle = ev->handle;
1712 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1713 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1714 sizeof(cp), &cp);
1715 }
1716 } else {
1717 conn->state = BT_CLOSED;
1718 if (conn->type == ACL_LINK)
1719 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1720 conn->dst_type, ev->status);
1721 }
1722
1723 if (conn->type == ACL_LINK)
1724 hci_sco_setup(conn, ev->status);
1725
1726 if (ev->status) {
1727 hci_proto_connect_cfm(conn, ev->status);
1728 hci_conn_del(conn);
1729 } else if (ev->link_type != ACL_LINK)
1730 hci_proto_connect_cfm(conn, ev->status);
1731
1732 unlock:
1733 hci_dev_unlock(hdev);
1734
1735 hci_conn_check_pending(hdev);
1736 }
1737
1738 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1739 {
1740 struct hci_ev_conn_request *ev = (void *) skb->data;
1741 int mask = hdev->link_mode;
1742
1743 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1744 batostr(&ev->bdaddr), ev->link_type);
1745
1746 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1747
1748 if ((mask & HCI_LM_ACCEPT) &&
1749 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1750 /* Connection accepted */
1751 struct inquiry_entry *ie;
1752 struct hci_conn *conn;
1753
1754 hci_dev_lock(hdev);
1755
1756 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1757 if (ie)
1758 memcpy(ie->data.dev_class, ev->dev_class, 3);
1759
1760 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1761 if (!conn) {
1762 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1763 if (!conn) {
1764 BT_ERR("No memory for new connection");
1765 hci_dev_unlock(hdev);
1766 return;
1767 }
1768 }
1769
1770 memcpy(conn->dev_class, ev->dev_class, 3);
1771 conn->state = BT_CONNECT;
1772
1773 hci_dev_unlock(hdev);
1774
1775 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1776 struct hci_cp_accept_conn_req cp;
1777
1778 bacpy(&cp.bdaddr, &ev->bdaddr);
1779
1780 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1781 cp.role = 0x00; /* Become master */
1782 else
1783 cp.role = 0x01; /* Remain slave */
1784
1785 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1786 sizeof(cp), &cp);
1787 } else {
1788 struct hci_cp_accept_sync_conn_req cp;
1789
1790 bacpy(&cp.bdaddr, &ev->bdaddr);
1791 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1792
1793 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1794 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1795 cp.max_latency = cpu_to_le16(0xffff);
1796 cp.content_format = cpu_to_le16(hdev->voice_setting);
1797 cp.retrans_effort = 0xff;
1798
1799 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1800 sizeof(cp), &cp);
1801 }
1802 } else {
1803 /* Connection rejected */
1804 struct hci_cp_reject_conn_req cp;
1805
1806 bacpy(&cp.bdaddr, &ev->bdaddr);
1807 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1808 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1809 }
1810 }
1811
1812 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1813 {
1814 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1815 struct hci_conn *conn;
1816
1817 BT_DBG("%s status %d", hdev->name, ev->status);
1818
1819 hci_dev_lock(hdev);
1820
1821 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1822 if (!conn)
1823 goto unlock;
1824
1825 if (ev->status == 0)
1826 conn->state = BT_CLOSED;
1827
1828 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1829 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1830 if (ev->status != 0)
1831 mgmt_disconnect_failed(hdev, &conn->dst, ev->status);
1832 else
1833 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1834 conn->dst_type);
1835 }
1836
1837 if (ev->status == 0) {
1838 hci_proto_disconn_cfm(conn, ev->reason);
1839 hci_conn_del(conn);
1840 }
1841
1842 unlock:
1843 hci_dev_unlock(hdev);
1844 }
1845
1846 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1847 {
1848 struct hci_ev_auth_complete *ev = (void *) skb->data;
1849 struct hci_conn *conn;
1850
1851 BT_DBG("%s status %d", hdev->name, ev->status);
1852
1853 hci_dev_lock(hdev);
1854
1855 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1856 if (!conn)
1857 goto unlock;
1858
1859 if (!ev->status) {
1860 if (!hci_conn_ssp_enabled(conn) &&
1861 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1862 BT_INFO("re-auth of legacy device is not possible.");
1863 } else {
1864 conn->link_mode |= HCI_LM_AUTH;
1865 conn->sec_level = conn->pending_sec_level;
1866 }
1867 } else {
1868 mgmt_auth_failed(hdev, &conn->dst, ev->status);
1869 }
1870
1871 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1872 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1873
1874 if (conn->state == BT_CONFIG) {
1875 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1876 struct hci_cp_set_conn_encrypt cp;
1877 cp.handle = ev->handle;
1878 cp.encrypt = 0x01;
1879 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1880 &cp);
1881 } else {
1882 conn->state = BT_CONNECTED;
1883 hci_proto_connect_cfm(conn, ev->status);
1884 hci_conn_put(conn);
1885 }
1886 } else {
1887 hci_auth_cfm(conn, ev->status);
1888
1889 hci_conn_hold(conn);
1890 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1891 hci_conn_put(conn);
1892 }
1893
1894 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1895 if (!ev->status) {
1896 struct hci_cp_set_conn_encrypt cp;
1897 cp.handle = ev->handle;
1898 cp.encrypt = 0x01;
1899 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1900 &cp);
1901 } else {
1902 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1903 hci_encrypt_cfm(conn, ev->status, 0x00);
1904 }
1905 }
1906
1907 unlock:
1908 hci_dev_unlock(hdev);
1909 }
1910
1911 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1912 {
1913 struct hci_ev_remote_name *ev = (void *) skb->data;
1914 struct hci_conn *conn;
1915
1916 BT_DBG("%s", hdev->name);
1917
1918 hci_conn_check_pending(hdev);
1919
1920 hci_dev_lock(hdev);
1921
1922 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1923
1924 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1925 goto check_auth;
1926
1927 if (ev->status == 0)
1928 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1929 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1930 else
1931 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1932
1933 check_auth:
1934 if (!conn)
1935 goto unlock;
1936
1937 if (!hci_outgoing_auth_needed(hdev, conn))
1938 goto unlock;
1939
1940 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1941 struct hci_cp_auth_requested cp;
1942 cp.handle = __cpu_to_le16(conn->handle);
1943 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1944 }
1945
1946 unlock:
1947 hci_dev_unlock(hdev);
1948 }
1949
1950 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1951 {
1952 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1953 struct hci_conn *conn;
1954
1955 BT_DBG("%s status %d", hdev->name, ev->status);
1956
1957 hci_dev_lock(hdev);
1958
1959 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1960 if (conn) {
1961 if (!ev->status) {
1962 if (ev->encrypt) {
1963 /* Encryption implies authentication */
1964 conn->link_mode |= HCI_LM_AUTH;
1965 conn->link_mode |= HCI_LM_ENCRYPT;
1966 conn->sec_level = conn->pending_sec_level;
1967 } else
1968 conn->link_mode &= ~HCI_LM_ENCRYPT;
1969 }
1970
1971 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1972
1973 if (conn->state == BT_CONFIG) {
1974 if (!ev->status)
1975 conn->state = BT_CONNECTED;
1976
1977 hci_proto_connect_cfm(conn, ev->status);
1978 hci_conn_put(conn);
1979 } else
1980 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1981 }
1982
1983 hci_dev_unlock(hdev);
1984 }
1985
1986 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1987 {
1988 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1989 struct hci_conn *conn;
1990
1991 BT_DBG("%s status %d", hdev->name, ev->status);
1992
1993 hci_dev_lock(hdev);
1994
1995 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1996 if (conn) {
1997 if (!ev->status)
1998 conn->link_mode |= HCI_LM_SECURE;
1999
2000 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2001
2002 hci_key_change_cfm(conn, ev->status);
2003 }
2004
2005 hci_dev_unlock(hdev);
2006 }
2007
2008 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2009 {
2010 struct hci_ev_remote_features *ev = (void *) skb->data;
2011 struct hci_conn *conn;
2012
2013 BT_DBG("%s status %d", hdev->name, ev->status);
2014
2015 hci_dev_lock(hdev);
2016
2017 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2018 if (!conn)
2019 goto unlock;
2020
2021 if (!ev->status)
2022 memcpy(conn->features, ev->features, 8);
2023
2024 if (conn->state != BT_CONFIG)
2025 goto unlock;
2026
2027 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2028 struct hci_cp_read_remote_ext_features cp;
2029 cp.handle = ev->handle;
2030 cp.page = 0x01;
2031 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2032 sizeof(cp), &cp);
2033 goto unlock;
2034 }
2035
2036 if (!ev->status) {
2037 struct hci_cp_remote_name_req cp;
2038 memset(&cp, 0, sizeof(cp));
2039 bacpy(&cp.bdaddr, &conn->dst);
2040 cp.pscan_rep_mode = 0x02;
2041 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2042 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2043 mgmt_device_connected(hdev, &conn->dst, conn->type,
2044 conn->dst_type, NULL, 0,
2045 conn->dev_class);
2046
2047 if (!hci_outgoing_auth_needed(hdev, conn)) {
2048 conn->state = BT_CONNECTED;
2049 hci_proto_connect_cfm(conn, ev->status);
2050 hci_conn_put(conn);
2051 }
2052
2053 unlock:
2054 hci_dev_unlock(hdev);
2055 }
2056
2057 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2058 {
2059 BT_DBG("%s", hdev->name);
2060 }
2061
2062 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2063 {
2064 BT_DBG("%s", hdev->name);
2065 }
2066
2067 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2068 {
2069 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2070 __u16 opcode;
2071
2072 skb_pull(skb, sizeof(*ev));
2073
2074 opcode = __le16_to_cpu(ev->opcode);
2075
2076 switch (opcode) {
2077 case HCI_OP_INQUIRY_CANCEL:
2078 hci_cc_inquiry_cancel(hdev, skb);
2079 break;
2080
2081 case HCI_OP_EXIT_PERIODIC_INQ:
2082 hci_cc_exit_periodic_inq(hdev, skb);
2083 break;
2084
2085 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2086 hci_cc_remote_name_req_cancel(hdev, skb);
2087 break;
2088
2089 case HCI_OP_ROLE_DISCOVERY:
2090 hci_cc_role_discovery(hdev, skb);
2091 break;
2092
2093 case HCI_OP_READ_LINK_POLICY:
2094 hci_cc_read_link_policy(hdev, skb);
2095 break;
2096
2097 case HCI_OP_WRITE_LINK_POLICY:
2098 hci_cc_write_link_policy(hdev, skb);
2099 break;
2100
2101 case HCI_OP_READ_DEF_LINK_POLICY:
2102 hci_cc_read_def_link_policy(hdev, skb);
2103 break;
2104
2105 case HCI_OP_WRITE_DEF_LINK_POLICY:
2106 hci_cc_write_def_link_policy(hdev, skb);
2107 break;
2108
2109 case HCI_OP_RESET:
2110 hci_cc_reset(hdev, skb);
2111 break;
2112
2113 case HCI_OP_WRITE_LOCAL_NAME:
2114 hci_cc_write_local_name(hdev, skb);
2115 break;
2116
2117 case HCI_OP_READ_LOCAL_NAME:
2118 hci_cc_read_local_name(hdev, skb);
2119 break;
2120
2121 case HCI_OP_WRITE_AUTH_ENABLE:
2122 hci_cc_write_auth_enable(hdev, skb);
2123 break;
2124
2125 case HCI_OP_WRITE_ENCRYPT_MODE:
2126 hci_cc_write_encrypt_mode(hdev, skb);
2127 break;
2128
2129 case HCI_OP_WRITE_SCAN_ENABLE:
2130 hci_cc_write_scan_enable(hdev, skb);
2131 break;
2132
2133 case HCI_OP_READ_CLASS_OF_DEV:
2134 hci_cc_read_class_of_dev(hdev, skb);
2135 break;
2136
2137 case HCI_OP_WRITE_CLASS_OF_DEV:
2138 hci_cc_write_class_of_dev(hdev, skb);
2139 break;
2140
2141 case HCI_OP_READ_VOICE_SETTING:
2142 hci_cc_read_voice_setting(hdev, skb);
2143 break;
2144
2145 case HCI_OP_WRITE_VOICE_SETTING:
2146 hci_cc_write_voice_setting(hdev, skb);
2147 break;
2148
2149 case HCI_OP_HOST_BUFFER_SIZE:
2150 hci_cc_host_buffer_size(hdev, skb);
2151 break;
2152
2153 case HCI_OP_READ_SSP_MODE:
2154 hci_cc_read_ssp_mode(hdev, skb);
2155 break;
2156
2157 case HCI_OP_WRITE_SSP_MODE:
2158 hci_cc_write_ssp_mode(hdev, skb);
2159 break;
2160
2161 case HCI_OP_READ_LOCAL_VERSION:
2162 hci_cc_read_local_version(hdev, skb);
2163 break;
2164
2165 case HCI_OP_READ_LOCAL_COMMANDS:
2166 hci_cc_read_local_commands(hdev, skb);
2167 break;
2168
2169 case HCI_OP_READ_LOCAL_FEATURES:
2170 hci_cc_read_local_features(hdev, skb);
2171 break;
2172
2173 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2174 hci_cc_read_local_ext_features(hdev, skb);
2175 break;
2176
2177 case HCI_OP_READ_BUFFER_SIZE:
2178 hci_cc_read_buffer_size(hdev, skb);
2179 break;
2180
2181 case HCI_OP_READ_BD_ADDR:
2182 hci_cc_read_bd_addr(hdev, skb);
2183 break;
2184
2185 case HCI_OP_READ_DATA_BLOCK_SIZE:
2186 hci_cc_read_data_block_size(hdev, skb);
2187 break;
2188
2189 case HCI_OP_WRITE_CA_TIMEOUT:
2190 hci_cc_write_ca_timeout(hdev, skb);
2191 break;
2192
2193 case HCI_OP_READ_FLOW_CONTROL_MODE:
2194 hci_cc_read_flow_control_mode(hdev, skb);
2195 break;
2196
2197 case HCI_OP_READ_LOCAL_AMP_INFO:
2198 hci_cc_read_local_amp_info(hdev, skb);
2199 break;
2200
2201 case HCI_OP_DELETE_STORED_LINK_KEY:
2202 hci_cc_delete_stored_link_key(hdev, skb);
2203 break;
2204
2205 case HCI_OP_SET_EVENT_MASK:
2206 hci_cc_set_event_mask(hdev, skb);
2207 break;
2208
2209 case HCI_OP_WRITE_INQUIRY_MODE:
2210 hci_cc_write_inquiry_mode(hdev, skb);
2211 break;
2212
2213 case HCI_OP_READ_INQ_RSP_TX_POWER:
2214 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2215 break;
2216
2217 case HCI_OP_SET_EVENT_FLT:
2218 hci_cc_set_event_flt(hdev, skb);
2219 break;
2220
2221 case HCI_OP_PIN_CODE_REPLY:
2222 hci_cc_pin_code_reply(hdev, skb);
2223 break;
2224
2225 case HCI_OP_PIN_CODE_NEG_REPLY:
2226 hci_cc_pin_code_neg_reply(hdev, skb);
2227 break;
2228
2229 case HCI_OP_READ_LOCAL_OOB_DATA:
2230 hci_cc_read_local_oob_data_reply(hdev, skb);
2231 break;
2232
2233 case HCI_OP_LE_READ_BUFFER_SIZE:
2234 hci_cc_le_read_buffer_size(hdev, skb);
2235 break;
2236
2237 case HCI_OP_USER_CONFIRM_REPLY:
2238 hci_cc_user_confirm_reply(hdev, skb);
2239 break;
2240
2241 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2242 hci_cc_user_confirm_neg_reply(hdev, skb);
2243 break;
2244
2245 case HCI_OP_USER_PASSKEY_REPLY:
2246 hci_cc_user_passkey_reply(hdev, skb);
2247 break;
2248
2249 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2250 hci_cc_user_passkey_neg_reply(hdev, skb);
2251
2252 case HCI_OP_LE_SET_SCAN_PARAM:
2253 hci_cc_le_set_scan_param(hdev, skb);
2254 break;
2255
2256 case HCI_OP_LE_SET_SCAN_ENABLE:
2257 hci_cc_le_set_scan_enable(hdev, skb);
2258 break;
2259
2260 case HCI_OP_LE_LTK_REPLY:
2261 hci_cc_le_ltk_reply(hdev, skb);
2262 break;
2263
2264 case HCI_OP_LE_LTK_NEG_REPLY:
2265 hci_cc_le_ltk_neg_reply(hdev, skb);
2266 break;
2267
2268 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2269 hci_cc_write_le_host_supported(hdev, skb);
2270 break;
2271
2272 default:
2273 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2274 break;
2275 }
2276
2277 if (ev->opcode != HCI_OP_NOP)
2278 del_timer(&hdev->cmd_timer);
2279
2280 if (ev->ncmd) {
2281 atomic_set(&hdev->cmd_cnt, 1);
2282 if (!skb_queue_empty(&hdev->cmd_q))
2283 queue_work(hdev->workqueue, &hdev->cmd_work);
2284 }
2285 }
2286
2287 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2288 {
2289 struct hci_ev_cmd_status *ev = (void *) skb->data;
2290 __u16 opcode;
2291
2292 skb_pull(skb, sizeof(*ev));
2293
2294 opcode = __le16_to_cpu(ev->opcode);
2295
2296 switch (opcode) {
2297 case HCI_OP_INQUIRY:
2298 hci_cs_inquiry(hdev, ev->status);
2299 break;
2300
2301 case HCI_OP_CREATE_CONN:
2302 hci_cs_create_conn(hdev, ev->status);
2303 break;
2304
2305 case HCI_OP_ADD_SCO:
2306 hci_cs_add_sco(hdev, ev->status);
2307 break;
2308
2309 case HCI_OP_AUTH_REQUESTED:
2310 hci_cs_auth_requested(hdev, ev->status);
2311 break;
2312
2313 case HCI_OP_SET_CONN_ENCRYPT:
2314 hci_cs_set_conn_encrypt(hdev, ev->status);
2315 break;
2316
2317 case HCI_OP_REMOTE_NAME_REQ:
2318 hci_cs_remote_name_req(hdev, ev->status);
2319 break;
2320
2321 case HCI_OP_READ_REMOTE_FEATURES:
2322 hci_cs_read_remote_features(hdev, ev->status);
2323 break;
2324
2325 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2326 hci_cs_read_remote_ext_features(hdev, ev->status);
2327 break;
2328
2329 case HCI_OP_SETUP_SYNC_CONN:
2330 hci_cs_setup_sync_conn(hdev, ev->status);
2331 break;
2332
2333 case HCI_OP_SNIFF_MODE:
2334 hci_cs_sniff_mode(hdev, ev->status);
2335 break;
2336
2337 case HCI_OP_EXIT_SNIFF_MODE:
2338 hci_cs_exit_sniff_mode(hdev, ev->status);
2339 break;
2340
2341 case HCI_OP_DISCONNECT:
2342 if (ev->status != 0)
2343 mgmt_disconnect_failed(hdev, NULL, ev->status);
2344 break;
2345
2346 case HCI_OP_LE_CREATE_CONN:
2347 hci_cs_le_create_conn(hdev, ev->status);
2348 break;
2349
2350 case HCI_OP_LE_START_ENC:
2351 hci_cs_le_start_enc(hdev, ev->status);
2352 break;
2353
2354 default:
2355 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2356 break;
2357 }
2358
2359 if (ev->opcode != HCI_OP_NOP)
2360 del_timer(&hdev->cmd_timer);
2361
2362 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2363 atomic_set(&hdev->cmd_cnt, 1);
2364 if (!skb_queue_empty(&hdev->cmd_q))
2365 queue_work(hdev->workqueue, &hdev->cmd_work);
2366 }
2367 }
2368
2369 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2370 {
2371 struct hci_ev_role_change *ev = (void *) skb->data;
2372 struct hci_conn *conn;
2373
2374 BT_DBG("%s status %d", hdev->name, ev->status);
2375
2376 hci_dev_lock(hdev);
2377
2378 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2379 if (conn) {
2380 if (!ev->status) {
2381 if (ev->role)
2382 conn->link_mode &= ~HCI_LM_MASTER;
2383 else
2384 conn->link_mode |= HCI_LM_MASTER;
2385 }
2386
2387 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2388
2389 hci_role_switch_cfm(conn, ev->status, ev->role);
2390 }
2391
2392 hci_dev_unlock(hdev);
2393 }
2394
2395 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2396 {
2397 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2398 int i;
2399
2400 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2401 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2402 return;
2403 }
2404
2405 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2406 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2407 BT_DBG("%s bad parameters", hdev->name);
2408 return;
2409 }
2410
2411 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2412
2413 for (i = 0; i < ev->num_hndl; i++) {
2414 struct hci_comp_pkts_info *info = &ev->handles[i];
2415 struct hci_conn *conn;
2416 __u16 handle, count;
2417
2418 handle = __le16_to_cpu(info->handle);
2419 count = __le16_to_cpu(info->count);
2420
2421 conn = hci_conn_hash_lookup_handle(hdev, handle);
2422 if (!conn)
2423 continue;
2424
2425 conn->sent -= count;
2426
2427 switch (conn->type) {
2428 case ACL_LINK:
2429 hdev->acl_cnt += count;
2430 if (hdev->acl_cnt > hdev->acl_pkts)
2431 hdev->acl_cnt = hdev->acl_pkts;
2432 break;
2433
2434 case LE_LINK:
2435 if (hdev->le_pkts) {
2436 hdev->le_cnt += count;
2437 if (hdev->le_cnt > hdev->le_pkts)
2438 hdev->le_cnt = hdev->le_pkts;
2439 } else {
2440 hdev->acl_cnt += count;
2441 if (hdev->acl_cnt > hdev->acl_pkts)
2442 hdev->acl_cnt = hdev->acl_pkts;
2443 }
2444 break;
2445
2446 case SCO_LINK:
2447 hdev->sco_cnt += count;
2448 if (hdev->sco_cnt > hdev->sco_pkts)
2449 hdev->sco_cnt = hdev->sco_pkts;
2450 break;
2451
2452 default:
2453 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2454 break;
2455 }
2456 }
2457
2458 queue_work(hdev->workqueue, &hdev->tx_work);
2459 }
2460
2461 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2462 struct sk_buff *skb)
2463 {
2464 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2465 int i;
2466
2467 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2468 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2469 return;
2470 }
2471
2472 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2473 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2474 BT_DBG("%s bad parameters", hdev->name);
2475 return;
2476 }
2477
2478 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2479 ev->num_hndl);
2480
2481 for (i = 0; i < ev->num_hndl; i++) {
2482 struct hci_comp_blocks_info *info = &ev->handles[i];
2483 struct hci_conn *conn;
2484 __u16 handle, block_count;
2485
2486 handle = __le16_to_cpu(info->handle);
2487 block_count = __le16_to_cpu(info->blocks);
2488
2489 conn = hci_conn_hash_lookup_handle(hdev, handle);
2490 if (!conn)
2491 continue;
2492
2493 conn->sent -= block_count;
2494
2495 switch (conn->type) {
2496 case ACL_LINK:
2497 hdev->block_cnt += block_count;
2498 if (hdev->block_cnt > hdev->num_blocks)
2499 hdev->block_cnt = hdev->num_blocks;
2500 break;
2501
2502 default:
2503 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2504 break;
2505 }
2506 }
2507
2508 queue_work(hdev->workqueue, &hdev->tx_work);
2509 }
2510
2511 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2512 {
2513 struct hci_ev_mode_change *ev = (void *) skb->data;
2514 struct hci_conn *conn;
2515
2516 BT_DBG("%s status %d", hdev->name, ev->status);
2517
2518 hci_dev_lock(hdev);
2519
2520 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2521 if (conn) {
2522 conn->mode = ev->mode;
2523 conn->interval = __le16_to_cpu(ev->interval);
2524
2525 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2526 if (conn->mode == HCI_CM_ACTIVE)
2527 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2528 else
2529 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2530 }
2531
2532 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2533 hci_sco_setup(conn, ev->status);
2534 }
2535
2536 hci_dev_unlock(hdev);
2537 }
2538
2539 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2540 {
2541 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2542 struct hci_conn *conn;
2543
2544 BT_DBG("%s", hdev->name);
2545
2546 hci_dev_lock(hdev);
2547
2548 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2549 if (!conn)
2550 goto unlock;
2551
2552 if (conn->state == BT_CONNECTED) {
2553 hci_conn_hold(conn);
2554 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2555 hci_conn_put(conn);
2556 }
2557
2558 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2559 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2560 sizeof(ev->bdaddr), &ev->bdaddr);
2561 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2562 u8 secure;
2563
2564 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2565 secure = 1;
2566 else
2567 secure = 0;
2568
2569 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2570 }
2571
2572 unlock:
2573 hci_dev_unlock(hdev);
2574 }
2575
2576 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2577 {
2578 struct hci_ev_link_key_req *ev = (void *) skb->data;
2579 struct hci_cp_link_key_reply cp;
2580 struct hci_conn *conn;
2581 struct link_key *key;
2582
2583 BT_DBG("%s", hdev->name);
2584
2585 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2586 return;
2587
2588 hci_dev_lock(hdev);
2589
2590 key = hci_find_link_key(hdev, &ev->bdaddr);
2591 if (!key) {
2592 BT_DBG("%s link key not found for %s", hdev->name,
2593 batostr(&ev->bdaddr));
2594 goto not_found;
2595 }
2596
2597 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2598 batostr(&ev->bdaddr));
2599
2600 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2601 key->type == HCI_LK_DEBUG_COMBINATION) {
2602 BT_DBG("%s ignoring debug key", hdev->name);
2603 goto not_found;
2604 }
2605
2606 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2607 if (conn) {
2608 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2609 conn->auth_type != 0xff &&
2610 (conn->auth_type & 0x01)) {
2611 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2612 goto not_found;
2613 }
2614
2615 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2616 conn->pending_sec_level == BT_SECURITY_HIGH) {
2617 BT_DBG("%s ignoring key unauthenticated for high \
2618 security", hdev->name);
2619 goto not_found;
2620 }
2621
2622 conn->key_type = key->type;
2623 conn->pin_length = key->pin_len;
2624 }
2625
2626 bacpy(&cp.bdaddr, &ev->bdaddr);
2627 memcpy(cp.link_key, key->val, 16);
2628
2629 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2630
2631 hci_dev_unlock(hdev);
2632
2633 return;
2634
2635 not_found:
2636 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2637 hci_dev_unlock(hdev);
2638 }
2639
2640 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2641 {
2642 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2643 struct hci_conn *conn;
2644 u8 pin_len = 0;
2645
2646 BT_DBG("%s", hdev->name);
2647
2648 hci_dev_lock(hdev);
2649
2650 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2651 if (conn) {
2652 hci_conn_hold(conn);
2653 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2654 pin_len = conn->pin_length;
2655
2656 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2657 conn->key_type = ev->key_type;
2658
2659 hci_conn_put(conn);
2660 }
2661
2662 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2663 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2664 ev->key_type, pin_len);
2665
2666 hci_dev_unlock(hdev);
2667 }
2668
2669 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2670 {
2671 struct hci_ev_clock_offset *ev = (void *) skb->data;
2672 struct hci_conn *conn;
2673
2674 BT_DBG("%s status %d", hdev->name, ev->status);
2675
2676 hci_dev_lock(hdev);
2677
2678 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2679 if (conn && !ev->status) {
2680 struct inquiry_entry *ie;
2681
2682 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2683 if (ie) {
2684 ie->data.clock_offset = ev->clock_offset;
2685 ie->timestamp = jiffies;
2686 }
2687 }
2688
2689 hci_dev_unlock(hdev);
2690 }
2691
2692 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2693 {
2694 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2695 struct hci_conn *conn;
2696
2697 BT_DBG("%s status %d", hdev->name, ev->status);
2698
2699 hci_dev_lock(hdev);
2700
2701 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2702 if (conn && !ev->status)
2703 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2704
2705 hci_dev_unlock(hdev);
2706 }
2707
2708 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2709 {
2710 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2711 struct inquiry_entry *ie;
2712
2713 BT_DBG("%s", hdev->name);
2714
2715 hci_dev_lock(hdev);
2716
2717 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2718 if (ie) {
2719 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2720 ie->timestamp = jiffies;
2721 }
2722
2723 hci_dev_unlock(hdev);
2724 }
2725
2726 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2727 {
2728 struct inquiry_data data;
2729 int num_rsp = *((__u8 *) skb->data);
2730 bool name_known;
2731
2732 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2733
2734 if (!num_rsp)
2735 return;
2736
2737 hci_dev_lock(hdev);
2738
2739 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2740 struct inquiry_info_with_rssi_and_pscan_mode *info;
2741 info = (void *) (skb->data + 1);
2742
2743 for (; num_rsp; num_rsp--, info++) {
2744 bacpy(&data.bdaddr, &info->bdaddr);
2745 data.pscan_rep_mode = info->pscan_rep_mode;
2746 data.pscan_period_mode = info->pscan_period_mode;
2747 data.pscan_mode = info->pscan_mode;
2748 memcpy(data.dev_class, info->dev_class, 3);
2749 data.clock_offset = info->clock_offset;
2750 data.rssi = info->rssi;
2751 data.ssp_mode = 0x00;
2752
2753 name_known = hci_inquiry_cache_update(hdev, &data,
2754 false);
2755 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2756 info->dev_class, info->rssi,
2757 !name_known, NULL, 0);
2758 }
2759 } else {
2760 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2761
2762 for (; num_rsp; num_rsp--, info++) {
2763 bacpy(&data.bdaddr, &info->bdaddr);
2764 data.pscan_rep_mode = info->pscan_rep_mode;
2765 data.pscan_period_mode = info->pscan_period_mode;
2766 data.pscan_mode = 0x00;
2767 memcpy(data.dev_class, info->dev_class, 3);
2768 data.clock_offset = info->clock_offset;
2769 data.rssi = info->rssi;
2770 data.ssp_mode = 0x00;
2771 name_known = hci_inquiry_cache_update(hdev, &data,
2772 false);
2773 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2774 info->dev_class, info->rssi,
2775 !name_known, NULL, 0);
2776 }
2777 }
2778
2779 hci_dev_unlock(hdev);
2780 }
2781
2782 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2783 {
2784 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2785 struct hci_conn *conn;
2786
2787 BT_DBG("%s", hdev->name);
2788
2789 hci_dev_lock(hdev);
2790
2791 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2792 if (!conn)
2793 goto unlock;
2794
2795 if (!ev->status && ev->page == 0x01) {
2796 struct inquiry_entry *ie;
2797
2798 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2799 if (ie)
2800 ie->data.ssp_mode = (ev->features[0] & 0x01);
2801
2802 if (ev->features[0] & 0x01)
2803 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2804 }
2805
2806 if (conn->state != BT_CONFIG)
2807 goto unlock;
2808
2809 if (!ev->status) {
2810 struct hci_cp_remote_name_req cp;
2811 memset(&cp, 0, sizeof(cp));
2812 bacpy(&cp.bdaddr, &conn->dst);
2813 cp.pscan_rep_mode = 0x02;
2814 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2815 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2816 mgmt_device_connected(hdev, &conn->dst, conn->type,
2817 conn->dst_type, NULL, 0,
2818 conn->dev_class);
2819
2820 if (!hci_outgoing_auth_needed(hdev, conn)) {
2821 conn->state = BT_CONNECTED;
2822 hci_proto_connect_cfm(conn, ev->status);
2823 hci_conn_put(conn);
2824 }
2825
2826 unlock:
2827 hci_dev_unlock(hdev);
2828 }
2829
2830 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2831 {
2832 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2833 struct hci_conn *conn;
2834
2835 BT_DBG("%s status %d", hdev->name, ev->status);
2836
2837 hci_dev_lock(hdev);
2838
2839 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2840 if (!conn) {
2841 if (ev->link_type == ESCO_LINK)
2842 goto unlock;
2843
2844 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2845 if (!conn)
2846 goto unlock;
2847
2848 conn->type = SCO_LINK;
2849 }
2850
2851 switch (ev->status) {
2852 case 0x00:
2853 conn->handle = __le16_to_cpu(ev->handle);
2854 conn->state = BT_CONNECTED;
2855
2856 hci_conn_hold_device(conn);
2857 hci_conn_add_sysfs(conn);
2858 break;
2859
2860 case 0x11: /* Unsupported Feature or Parameter Value */
2861 case 0x1c: /* SCO interval rejected */
2862 case 0x1a: /* Unsupported Remote Feature */
2863 case 0x1f: /* Unspecified error */
2864 if (conn->out && conn->attempt < 2) {
2865 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2866 (hdev->esco_type & EDR_ESCO_MASK);
2867 hci_setup_sync(conn, conn->link->handle);
2868 goto unlock;
2869 }
2870 /* fall through */
2871
2872 default:
2873 conn->state = BT_CLOSED;
2874 break;
2875 }
2876
2877 hci_proto_connect_cfm(conn, ev->status);
2878 if (ev->status)
2879 hci_conn_del(conn);
2880
2881 unlock:
2882 hci_dev_unlock(hdev);
2883 }
2884
2885 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2886 {
2887 BT_DBG("%s", hdev->name);
2888 }
2889
2890 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2891 {
2892 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2893
2894 BT_DBG("%s status %d", hdev->name, ev->status);
2895 }
2896
2897 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2898 {
2899 struct inquiry_data data;
2900 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2901 int num_rsp = *((__u8 *) skb->data);
2902
2903 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2904
2905 if (!num_rsp)
2906 return;
2907
2908 hci_dev_lock(hdev);
2909
2910 for (; num_rsp; num_rsp--, info++) {
2911 bool name_known;
2912
2913 bacpy(&data.bdaddr, &info->bdaddr);
2914 data.pscan_rep_mode = info->pscan_rep_mode;
2915 data.pscan_period_mode = info->pscan_period_mode;
2916 data.pscan_mode = 0x00;
2917 memcpy(data.dev_class, info->dev_class, 3);
2918 data.clock_offset = info->clock_offset;
2919 data.rssi = info->rssi;
2920 data.ssp_mode = 0x01;
2921
2922 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2923 name_known = eir_has_data_type(info->data,
2924 sizeof(info->data),
2925 EIR_NAME_COMPLETE);
2926 else
2927 name_known = true;
2928
2929 name_known = hci_inquiry_cache_update(hdev, &data, name_known);
2930 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2931 info->dev_class, info->rssi,
2932 !name_known, info->data,
2933 sizeof(info->data));
2934 }
2935
2936 hci_dev_unlock(hdev);
2937 }
2938
2939 static inline u8 hci_get_auth_req(struct hci_conn *conn)
2940 {
2941 /* If remote requests dedicated bonding follow that lead */
2942 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2943 /* If both remote and local IO capabilities allow MITM
2944 * protection then require it, otherwise don't */
2945 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2946 return 0x02;
2947 else
2948 return 0x03;
2949 }
2950
2951 /* If remote requests no-bonding follow that lead */
2952 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2953 return conn->remote_auth | (conn->auth_type & 0x01);
2954
2955 return conn->auth_type;
2956 }
2957
2958 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2959 {
2960 struct hci_ev_io_capa_request *ev = (void *) skb->data;
2961 struct hci_conn *conn;
2962
2963 BT_DBG("%s", hdev->name);
2964
2965 hci_dev_lock(hdev);
2966
2967 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2968 if (!conn)
2969 goto unlock;
2970
2971 hci_conn_hold(conn);
2972
2973 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2974 goto unlock;
2975
2976 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
2977 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
2978 struct hci_cp_io_capability_reply cp;
2979
2980 bacpy(&cp.bdaddr, &ev->bdaddr);
2981 /* Change the IO capability from KeyboardDisplay
2982 * to DisplayYesNo as it is not supported by BT spec. */
2983 cp.capability = (conn->io_capability == 0x04) ?
2984 0x01 : conn->io_capability;
2985 conn->auth_type = hci_get_auth_req(conn);
2986 cp.authentication = conn->auth_type;
2987
2988 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
2989 hci_find_remote_oob_data(hdev, &conn->dst))
2990 cp.oob_data = 0x01;
2991 else
2992 cp.oob_data = 0x00;
2993
2994 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
2995 sizeof(cp), &cp);
2996 } else {
2997 struct hci_cp_io_capability_neg_reply cp;
2998
2999 bacpy(&cp.bdaddr, &ev->bdaddr);
3000 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3001
3002 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3003 sizeof(cp), &cp);
3004 }
3005
3006 unlock:
3007 hci_dev_unlock(hdev);
3008 }
3009
3010 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3011 {
3012 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3013 struct hci_conn *conn;
3014
3015 BT_DBG("%s", hdev->name);
3016
3017 hci_dev_lock(hdev);
3018
3019 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3020 if (!conn)
3021 goto unlock;
3022
3023 conn->remote_cap = ev->capability;
3024 conn->remote_auth = ev->authentication;
3025 if (ev->oob_data)
3026 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3027
3028 unlock:
3029 hci_dev_unlock(hdev);
3030 }
3031
3032 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3033 struct sk_buff *skb)
3034 {
3035 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3036 int loc_mitm, rem_mitm, confirm_hint = 0;
3037 struct hci_conn *conn;
3038
3039 BT_DBG("%s", hdev->name);
3040
3041 hci_dev_lock(hdev);
3042
3043 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3044 goto unlock;
3045
3046 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3047 if (!conn)
3048 goto unlock;
3049
3050 loc_mitm = (conn->auth_type & 0x01);
3051 rem_mitm = (conn->remote_auth & 0x01);
3052
3053 /* If we require MITM but the remote device can't provide that
3054 * (it has NoInputNoOutput) then reject the confirmation
3055 * request. The only exception is when we're dedicated bonding
3056 * initiators (connect_cfm_cb set) since then we always have the MITM
3057 * bit set. */
3058 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3059 BT_DBG("Rejecting request: remote device can't provide MITM");
3060 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3061 sizeof(ev->bdaddr), &ev->bdaddr);
3062 goto unlock;
3063 }
3064
3065 /* If no side requires MITM protection; auto-accept */
3066 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3067 (!rem_mitm || conn->io_capability == 0x03)) {
3068
3069 /* If we're not the initiators request authorization to
3070 * proceed from user space (mgmt_user_confirm with
3071 * confirm_hint set to 1). */
3072 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3073 BT_DBG("Confirming auto-accept as acceptor");
3074 confirm_hint = 1;
3075 goto confirm;
3076 }
3077
3078 BT_DBG("Auto-accept of user confirmation with %ums delay",
3079 hdev->auto_accept_delay);
3080
3081 if (hdev->auto_accept_delay > 0) {
3082 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3083 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3084 goto unlock;
3085 }
3086
3087 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3088 sizeof(ev->bdaddr), &ev->bdaddr);
3089 goto unlock;
3090 }
3091
3092 confirm:
3093 mgmt_user_confirm_request(hdev, &ev->bdaddr, ev->passkey,
3094 confirm_hint);
3095
3096 unlock:
3097 hci_dev_unlock(hdev);
3098 }
3099
3100 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3101 struct sk_buff *skb)
3102 {
3103 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3104
3105 BT_DBG("%s", hdev->name);
3106
3107 hci_dev_lock(hdev);
3108
3109 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3110 mgmt_user_passkey_request(hdev, &ev->bdaddr);
3111
3112 hci_dev_unlock(hdev);
3113 }
3114
3115 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3116 {
3117 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3118 struct hci_conn *conn;
3119
3120 BT_DBG("%s", hdev->name);
3121
3122 hci_dev_lock(hdev);
3123
3124 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3125 if (!conn)
3126 goto unlock;
3127
3128 /* To avoid duplicate auth_failed events to user space we check
3129 * the HCI_CONN_AUTH_PEND flag which will be set if we
3130 * initiated the authentication. A traditional auth_complete
3131 * event gets always produced as initiator and is also mapped to
3132 * the mgmt_auth_failed event */
3133 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3134 mgmt_auth_failed(hdev, &conn->dst, ev->status);
3135
3136 hci_conn_put(conn);
3137
3138 unlock:
3139 hci_dev_unlock(hdev);
3140 }
3141
3142 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3143 {
3144 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3145 struct inquiry_entry *ie;
3146
3147 BT_DBG("%s", hdev->name);
3148
3149 hci_dev_lock(hdev);
3150
3151 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3152 if (ie)
3153 ie->data.ssp_mode = (ev->features[0] & 0x01);
3154
3155 hci_dev_unlock(hdev);
3156 }
3157
3158 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3159 struct sk_buff *skb)
3160 {
3161 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3162 struct oob_data *data;
3163
3164 BT_DBG("%s", hdev->name);
3165
3166 hci_dev_lock(hdev);
3167
3168 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3169 goto unlock;
3170
3171 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3172 if (data) {
3173 struct hci_cp_remote_oob_data_reply cp;
3174
3175 bacpy(&cp.bdaddr, &ev->bdaddr);
3176 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3177 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3178
3179 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3180 &cp);
3181 } else {
3182 struct hci_cp_remote_oob_data_neg_reply cp;
3183
3184 bacpy(&cp.bdaddr, &ev->bdaddr);
3185 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3186 &cp);
3187 }
3188
3189 unlock:
3190 hci_dev_unlock(hdev);
3191 }
3192
3193 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3194 {
3195 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3196 struct hci_conn *conn;
3197
3198 BT_DBG("%s status %d", hdev->name, ev->status);
3199
3200 hci_dev_lock(hdev);
3201
3202 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3203 if (!conn) {
3204 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3205 if (!conn) {
3206 BT_ERR("No memory for new connection");
3207 hci_dev_unlock(hdev);
3208 return;
3209 }
3210
3211 conn->dst_type = ev->bdaddr_type;
3212 }
3213
3214 if (ev->status) {
3215 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3216 conn->dst_type, ev->status);
3217 hci_proto_connect_cfm(conn, ev->status);
3218 conn->state = BT_CLOSED;
3219 hci_conn_del(conn);
3220 goto unlock;
3221 }
3222
3223 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3224 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3225 conn->dst_type, NULL, 0, 0);
3226
3227 conn->sec_level = BT_SECURITY_LOW;
3228 conn->handle = __le16_to_cpu(ev->handle);
3229 conn->state = BT_CONNECTED;
3230
3231 hci_conn_hold_device(conn);
3232 hci_conn_add_sysfs(conn);
3233
3234 hci_proto_connect_cfm(conn, ev->status);
3235
3236 unlock:
3237 hci_dev_unlock(hdev);
3238 }
3239
3240 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3241 struct sk_buff *skb)
3242 {
3243 u8 num_reports = skb->data[0];
3244 void *ptr = &skb->data[1];
3245 s8 rssi;
3246
3247 hci_dev_lock(hdev);
3248
3249 while (num_reports--) {
3250 struct hci_ev_le_advertising_info *ev = ptr;
3251
3252 hci_add_adv_entry(hdev, ev);
3253
3254 rssi = ev->data[ev->length];
3255 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3256 NULL, rssi, 0, ev->data, ev->length);
3257
3258 ptr += sizeof(*ev) + ev->length + 1;
3259 }
3260
3261 hci_dev_unlock(hdev);
3262 }
3263
3264 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3265 struct sk_buff *skb)
3266 {
3267 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3268 struct hci_cp_le_ltk_reply cp;
3269 struct hci_cp_le_ltk_neg_reply neg;
3270 struct hci_conn *conn;
3271 struct smp_ltk *ltk;
3272
3273 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3274
3275 hci_dev_lock(hdev);
3276
3277 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3278 if (conn == NULL)
3279 goto not_found;
3280
3281 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3282 if (ltk == NULL)
3283 goto not_found;
3284
3285 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3286 cp.handle = cpu_to_le16(conn->handle);
3287
3288 if (ltk->authenticated)
3289 conn->sec_level = BT_SECURITY_HIGH;
3290
3291 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3292
3293 if (ltk->type & HCI_SMP_STK) {
3294 list_del(&ltk->list);
3295 kfree(ltk);
3296 }
3297
3298 hci_dev_unlock(hdev);
3299
3300 return;
3301
3302 not_found:
3303 neg.handle = ev->handle;
3304 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3305 hci_dev_unlock(hdev);
3306 }
3307
3308 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3309 {
3310 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3311
3312 skb_pull(skb, sizeof(*le_ev));
3313
3314 switch (le_ev->subevent) {
3315 case HCI_EV_LE_CONN_COMPLETE:
3316 hci_le_conn_complete_evt(hdev, skb);
3317 break;
3318
3319 case HCI_EV_LE_ADVERTISING_REPORT:
3320 hci_le_adv_report_evt(hdev, skb);
3321 break;
3322
3323 case HCI_EV_LE_LTK_REQ:
3324 hci_le_ltk_request_evt(hdev, skb);
3325 break;
3326
3327 default:
3328 break;
3329 }
3330 }
3331
3332 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3333 {
3334 struct hci_event_hdr *hdr = (void *) skb->data;
3335 __u8 event = hdr->evt;
3336
3337 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3338
3339 switch (event) {
3340 case HCI_EV_INQUIRY_COMPLETE:
3341 hci_inquiry_complete_evt(hdev, skb);
3342 break;
3343
3344 case HCI_EV_INQUIRY_RESULT:
3345 hci_inquiry_result_evt(hdev, skb);
3346 break;
3347
3348 case HCI_EV_CONN_COMPLETE:
3349 hci_conn_complete_evt(hdev, skb);
3350 break;
3351
3352 case HCI_EV_CONN_REQUEST:
3353 hci_conn_request_evt(hdev, skb);
3354 break;
3355
3356 case HCI_EV_DISCONN_COMPLETE:
3357 hci_disconn_complete_evt(hdev, skb);
3358 break;
3359
3360 case HCI_EV_AUTH_COMPLETE:
3361 hci_auth_complete_evt(hdev, skb);
3362 break;
3363
3364 case HCI_EV_REMOTE_NAME:
3365 hci_remote_name_evt(hdev, skb);
3366 break;
3367
3368 case HCI_EV_ENCRYPT_CHANGE:
3369 hci_encrypt_change_evt(hdev, skb);
3370 break;
3371
3372 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3373 hci_change_link_key_complete_evt(hdev, skb);
3374 break;
3375
3376 case HCI_EV_REMOTE_FEATURES:
3377 hci_remote_features_evt(hdev, skb);
3378 break;
3379
3380 case HCI_EV_REMOTE_VERSION:
3381 hci_remote_version_evt(hdev, skb);
3382 break;
3383
3384 case HCI_EV_QOS_SETUP_COMPLETE:
3385 hci_qos_setup_complete_evt(hdev, skb);
3386 break;
3387
3388 case HCI_EV_CMD_COMPLETE:
3389 hci_cmd_complete_evt(hdev, skb);
3390 break;
3391
3392 case HCI_EV_CMD_STATUS:
3393 hci_cmd_status_evt(hdev, skb);
3394 break;
3395
3396 case HCI_EV_ROLE_CHANGE:
3397 hci_role_change_evt(hdev, skb);
3398 break;
3399
3400 case HCI_EV_NUM_COMP_PKTS:
3401 hci_num_comp_pkts_evt(hdev, skb);
3402 break;
3403
3404 case HCI_EV_MODE_CHANGE:
3405 hci_mode_change_evt(hdev, skb);
3406 break;
3407
3408 case HCI_EV_PIN_CODE_REQ:
3409 hci_pin_code_request_evt(hdev, skb);
3410 break;
3411
3412 case HCI_EV_LINK_KEY_REQ:
3413 hci_link_key_request_evt(hdev, skb);
3414 break;
3415
3416 case HCI_EV_LINK_KEY_NOTIFY:
3417 hci_link_key_notify_evt(hdev, skb);
3418 break;
3419
3420 case HCI_EV_CLOCK_OFFSET:
3421 hci_clock_offset_evt(hdev, skb);
3422 break;
3423
3424 case HCI_EV_PKT_TYPE_CHANGE:
3425 hci_pkt_type_change_evt(hdev, skb);
3426 break;
3427
3428 case HCI_EV_PSCAN_REP_MODE:
3429 hci_pscan_rep_mode_evt(hdev, skb);
3430 break;
3431
3432 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3433 hci_inquiry_result_with_rssi_evt(hdev, skb);
3434 break;
3435
3436 case HCI_EV_REMOTE_EXT_FEATURES:
3437 hci_remote_ext_features_evt(hdev, skb);
3438 break;
3439
3440 case HCI_EV_SYNC_CONN_COMPLETE:
3441 hci_sync_conn_complete_evt(hdev, skb);
3442 break;
3443
3444 case HCI_EV_SYNC_CONN_CHANGED:
3445 hci_sync_conn_changed_evt(hdev, skb);
3446 break;
3447
3448 case HCI_EV_SNIFF_SUBRATE:
3449 hci_sniff_subrate_evt(hdev, skb);
3450 break;
3451
3452 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3453 hci_extended_inquiry_result_evt(hdev, skb);
3454 break;
3455
3456 case HCI_EV_IO_CAPA_REQUEST:
3457 hci_io_capa_request_evt(hdev, skb);
3458 break;
3459
3460 case HCI_EV_IO_CAPA_REPLY:
3461 hci_io_capa_reply_evt(hdev, skb);
3462 break;
3463
3464 case HCI_EV_USER_CONFIRM_REQUEST:
3465 hci_user_confirm_request_evt(hdev, skb);
3466 break;
3467
3468 case HCI_EV_USER_PASSKEY_REQUEST:
3469 hci_user_passkey_request_evt(hdev, skb);
3470 break;
3471
3472 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3473 hci_simple_pair_complete_evt(hdev, skb);
3474 break;
3475
3476 case HCI_EV_REMOTE_HOST_FEATURES:
3477 hci_remote_host_features_evt(hdev, skb);
3478 break;
3479
3480 case HCI_EV_LE_META:
3481 hci_le_meta_evt(hdev, skb);
3482 break;
3483
3484 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3485 hci_remote_oob_data_request_evt(hdev, skb);
3486 break;
3487
3488 case HCI_EV_NUM_COMP_BLOCKS:
3489 hci_num_comp_blocks_evt(hdev, skb);
3490 break;
3491
3492 default:
3493 BT_DBG("%s event 0x%x", hdev->name, event);
3494 break;
3495 }
3496
3497 kfree_skb(skb);
3498 hdev->stat.evt_rx++;
3499 }
3500
3501 /* Generate internal stack event */
3502 void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3503 {
3504 struct hci_event_hdr *hdr;
3505 struct hci_ev_stack_internal *ev;
3506 struct sk_buff *skb;
3507
3508 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3509 if (!skb)
3510 return;
3511
3512 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3513 hdr->evt = HCI_EV_STACK_INTERNAL;
3514 hdr->plen = sizeof(*ev) + dlen;
3515
3516 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3517 ev->type = type;
3518 memcpy(ev->data, data, dlen);
3519
3520 bt_cb(skb)->incoming = 1;
3521 __net_timestamp(skb);
3522
3523 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3524 skb->dev = (void *) hdev;
3525 hci_send_to_sock(hdev, skb, NULL);
3526 kfree_skb(skb);
3527 }
3528
3529 module_param(enable_le, bool, 0644);
3530 MODULE_PARM_DESC(enable_le, "Enable LE support");
This page took 0.098767 seconds and 4 git commands to generate.