Bluetooth: Track the current configured random address
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35
36 /* Handle HCI Event packets */
37
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 __u8 status = *((__u8 *) skb->data);
41
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43
44 if (status)
45 return;
46
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50
51 hci_conn_check_pending(hdev);
52 }
53
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55 {
56 __u8 status = *((__u8 *) skb->data);
57
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
59
60 if (status)
61 return;
62
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64 }
65
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76
77 hci_conn_check_pending(hdev);
78 }
79
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81 struct sk_buff *skb)
82 {
83 BT_DBG("%s", hdev->name);
84 }
85
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87 {
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
90
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92
93 if (rp->status)
94 return;
95
96 hci_dev_lock(hdev);
97
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99 if (conn) {
100 if (rp->role)
101 conn->link_mode &= ~HCI_LM_MASTER;
102 else
103 conn->link_mode |= HCI_LM_MASTER;
104 }
105
106 hci_dev_unlock(hdev);
107 }
108
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110 {
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
113
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115
116 if (rp->status)
117 return;
118
119 hci_dev_lock(hdev);
120
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122 if (conn)
123 conn->link_policy = __le16_to_cpu(rp->policy);
124
125 hci_dev_unlock(hdev);
126 }
127
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129 {
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
132 void *sent;
133
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135
136 if (rp->status)
137 return;
138
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140 if (!sent)
141 return;
142
143 hci_dev_lock(hdev);
144
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146 if (conn)
147 conn->link_policy = get_unaligned_le16(sent + 2);
148
149 hci_dev_unlock(hdev);
150 }
151
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153 struct sk_buff *skb)
154 {
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158
159 if (rp->status)
160 return;
161
162 hdev->link_policy = __le16_to_cpu(rp->policy);
163 }
164
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166 struct sk_buff *skb)
167 {
168 __u8 status = *((__u8 *) skb->data);
169 void *sent;
170
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
172
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174 if (!sent)
175 return;
176
177 if (!status)
178 hdev->link_policy = get_unaligned_le16(sent);
179 }
180
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182 {
183 __u8 status = *((__u8 *) skb->data);
184
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
186
187 clear_bit(HCI_RESET, &hdev->flags);
188
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
198
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
201
202 hdev->ssp_debug_mode = 0;
203 }
204
205 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207 __u8 status = *((__u8 *) skb->data);
208 void *sent;
209
210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
211
212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (!sent)
214 return;
215
216 hci_dev_lock(hdev);
217
218 if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 mgmt_set_local_name_complete(hdev, sent, status);
220 else if (!status)
221 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222
223 hci_dev_unlock(hdev);
224 }
225
226 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
227 {
228 struct hci_rp_read_local_name *rp = (void *) skb->data;
229
230 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
231
232 if (rp->status)
233 return;
234
235 if (test_bit(HCI_SETUP, &hdev->dev_flags))
236 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
237 }
238
239 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
240 {
241 __u8 status = *((__u8 *) skb->data);
242 void *sent;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
245
246 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
247 if (!sent)
248 return;
249
250 if (!status) {
251 __u8 param = *((__u8 *) sent);
252
253 if (param == AUTH_ENABLED)
254 set_bit(HCI_AUTH, &hdev->flags);
255 else
256 clear_bit(HCI_AUTH, &hdev->flags);
257 }
258
259 if (test_bit(HCI_MGMT, &hdev->dev_flags))
260 mgmt_auth_enable_complete(hdev, status);
261 }
262
263 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 {
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
267
268 BT_DBG("%s status 0x%2.2x", hdev->name, status);
269
270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 if (!sent)
272 return;
273
274 if (!status) {
275 __u8 param = *((__u8 *) sent);
276
277 if (param)
278 set_bit(HCI_ENCRYPT, &hdev->flags);
279 else
280 clear_bit(HCI_ENCRYPT, &hdev->flags);
281 }
282 }
283
284 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
285 {
286 __u8 param, status = *((__u8 *) skb->data);
287 int old_pscan, old_iscan;
288 void *sent;
289
290 BT_DBG("%s status 0x%2.2x", hdev->name, status);
291
292 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
293 if (!sent)
294 return;
295
296 param = *((__u8 *) sent);
297
298 hci_dev_lock(hdev);
299
300 if (status) {
301 mgmt_write_scan_failed(hdev, param, status);
302 hdev->discov_timeout = 0;
303 goto done;
304 }
305
306 /* We need to ensure that we set this back on if someone changed
307 * the scan mode through a raw HCI socket.
308 */
309 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
310
311 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
312 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
313
314 if (param & SCAN_INQUIRY) {
315 set_bit(HCI_ISCAN, &hdev->flags);
316 if (!old_iscan)
317 mgmt_discoverable(hdev, 1);
318 } else if (old_iscan)
319 mgmt_discoverable(hdev, 0);
320
321 if (param & SCAN_PAGE) {
322 set_bit(HCI_PSCAN, &hdev->flags);
323 if (!old_pscan)
324 mgmt_connectable(hdev, 1);
325 } else if (old_pscan)
326 mgmt_connectable(hdev, 0);
327
328 done:
329 hci_dev_unlock(hdev);
330 }
331
332 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333 {
334 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335
336 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
337
338 if (rp->status)
339 return;
340
341 memcpy(hdev->dev_class, rp->dev_class, 3);
342
343 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345 }
346
347 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 __u8 status = *((__u8 *) skb->data);
350 void *sent;
351
352 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353
354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
355 if (!sent)
356 return;
357
358 hci_dev_lock(hdev);
359
360 if (status == 0)
361 memcpy(hdev->dev_class, sent, 3);
362
363 if (test_bit(HCI_MGMT, &hdev->dev_flags))
364 mgmt_set_class_of_dev_complete(hdev, sent, status);
365
366 hci_dev_unlock(hdev);
367 }
368
369 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370 {
371 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
372 __u16 setting;
373
374 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
375
376 if (rp->status)
377 return;
378
379 setting = __le16_to_cpu(rp->voice_setting);
380
381 if (hdev->voice_setting == setting)
382 return;
383
384 hdev->voice_setting = setting;
385
386 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387
388 if (hdev->notify)
389 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390 }
391
392 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
393 struct sk_buff *skb)
394 {
395 __u8 status = *((__u8 *) skb->data);
396 __u16 setting;
397 void *sent;
398
399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
400
401 if (status)
402 return;
403
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
405 if (!sent)
406 return;
407
408 setting = get_unaligned_le16(sent);
409
410 if (hdev->voice_setting == setting)
411 return;
412
413 hdev->voice_setting = setting;
414
415 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416
417 if (hdev->notify)
418 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419 }
420
421 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
422 struct sk_buff *skb)
423 {
424 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->num_iac = rp->num_iac;
432
433 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
434 }
435
436 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
437 {
438 __u8 status = *((__u8 *) skb->data);
439 struct hci_cp_write_ssp_mode *sent;
440
441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442
443 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
444 if (!sent)
445 return;
446
447 if (!status) {
448 if (sent->mode)
449 hdev->features[1][0] |= LMP_HOST_SSP;
450 else
451 hdev->features[1][0] &= ~LMP_HOST_SSP;
452 }
453
454 if (test_bit(HCI_MGMT, &hdev->dev_flags))
455 mgmt_ssp_enable_complete(hdev, sent->mode, status);
456 else if (!status) {
457 if (sent->mode)
458 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459 else
460 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461 }
462 }
463
464 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
465 {
466 u8 status = *((u8 *) skb->data);
467 struct hci_cp_write_sc_support *sent;
468
469 BT_DBG("%s status 0x%2.2x", hdev->name, status);
470
471 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
472 if (!sent)
473 return;
474
475 if (!status) {
476 if (sent->support)
477 hdev->features[1][0] |= LMP_HOST_SC;
478 else
479 hdev->features[1][0] &= ~LMP_HOST_SC;
480 }
481
482 if (test_bit(HCI_MGMT, &hdev->dev_flags))
483 mgmt_sc_enable_complete(hdev, sent->support, status);
484 else if (!status) {
485 if (sent->support)
486 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
487 else
488 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
489 }
490 }
491
492 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
493 {
494 struct hci_rp_read_local_version *rp = (void *) skb->data;
495
496 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
497
498 if (rp->status)
499 return;
500
501 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
502 hdev->hci_ver = rp->hci_ver;
503 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
504 hdev->lmp_ver = rp->lmp_ver;
505 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
506 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
507 }
508 }
509
510 static void hci_cc_read_local_commands(struct hci_dev *hdev,
511 struct sk_buff *skb)
512 {
513 struct hci_rp_read_local_commands *rp = (void *) skb->data;
514
515 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
516
517 if (rp->status)
518 return;
519
520 if (test_bit(HCI_SETUP, &hdev->dev_flags))
521 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
522 }
523
524 static void hci_cc_read_local_features(struct hci_dev *hdev,
525 struct sk_buff *skb)
526 {
527 struct hci_rp_read_local_features *rp = (void *) skb->data;
528
529 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
530
531 if (rp->status)
532 return;
533
534 memcpy(hdev->features, rp->features, 8);
535
536 /* Adjust default settings according to features
537 * supported by device. */
538
539 if (hdev->features[0][0] & LMP_3SLOT)
540 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
541
542 if (hdev->features[0][0] & LMP_5SLOT)
543 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
544
545 if (hdev->features[0][1] & LMP_HV2) {
546 hdev->pkt_type |= (HCI_HV2);
547 hdev->esco_type |= (ESCO_HV2);
548 }
549
550 if (hdev->features[0][1] & LMP_HV3) {
551 hdev->pkt_type |= (HCI_HV3);
552 hdev->esco_type |= (ESCO_HV3);
553 }
554
555 if (lmp_esco_capable(hdev))
556 hdev->esco_type |= (ESCO_EV3);
557
558 if (hdev->features[0][4] & LMP_EV4)
559 hdev->esco_type |= (ESCO_EV4);
560
561 if (hdev->features[0][4] & LMP_EV5)
562 hdev->esco_type |= (ESCO_EV5);
563
564 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
565 hdev->esco_type |= (ESCO_2EV3);
566
567 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
568 hdev->esco_type |= (ESCO_3EV3);
569
570 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
571 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
572 }
573
574 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
575 struct sk_buff *skb)
576 {
577 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
578
579 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
580
581 if (rp->status)
582 return;
583
584 if (hdev->max_page < rp->max_page)
585 hdev->max_page = rp->max_page;
586
587 if (rp->page < HCI_MAX_PAGES)
588 memcpy(hdev->features[rp->page], rp->features, 8);
589 }
590
591 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
592 struct sk_buff *skb)
593 {
594 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
595
596 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
597
598 if (!rp->status)
599 hdev->flow_ctl_mode = rp->mode;
600 }
601
602 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
603 {
604 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
605
606 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607
608 if (rp->status)
609 return;
610
611 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
612 hdev->sco_mtu = rp->sco_mtu;
613 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
614 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
615
616 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
617 hdev->sco_mtu = 64;
618 hdev->sco_pkts = 8;
619 }
620
621 hdev->acl_cnt = hdev->acl_pkts;
622 hdev->sco_cnt = hdev->sco_pkts;
623
624 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
625 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
626 }
627
628 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
629 {
630 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
631
632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633
634 if (!rp->status)
635 bacpy(&hdev->bdaddr, &rp->bdaddr);
636 }
637
638 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
639 struct sk_buff *skb)
640 {
641 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
642
643 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
644
645 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
646 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
647 hdev->page_scan_window = __le16_to_cpu(rp->window);
648 }
649 }
650
651 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
652 struct sk_buff *skb)
653 {
654 u8 status = *((u8 *) skb->data);
655 struct hci_cp_write_page_scan_activity *sent;
656
657 BT_DBG("%s status 0x%2.2x", hdev->name, status);
658
659 if (status)
660 return;
661
662 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
663 if (!sent)
664 return;
665
666 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
667 hdev->page_scan_window = __le16_to_cpu(sent->window);
668 }
669
670 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
671 struct sk_buff *skb)
672 {
673 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
674
675 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
676
677 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
678 hdev->page_scan_type = rp->type;
679 }
680
681 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
682 struct sk_buff *skb)
683 {
684 u8 status = *((u8 *) skb->data);
685 u8 *type;
686
687 BT_DBG("%s status 0x%2.2x", hdev->name, status);
688
689 if (status)
690 return;
691
692 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
693 if (type)
694 hdev->page_scan_type = *type;
695 }
696
697 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
698 struct sk_buff *skb)
699 {
700 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
701
702 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
703
704 if (rp->status)
705 return;
706
707 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
708 hdev->block_len = __le16_to_cpu(rp->block_len);
709 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
710
711 hdev->block_cnt = hdev->num_blocks;
712
713 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
714 hdev->block_cnt, hdev->block_len);
715 }
716
717 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
718 struct sk_buff *skb)
719 {
720 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
721
722 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
723
724 if (rp->status)
725 goto a2mp_rsp;
726
727 hdev->amp_status = rp->amp_status;
728 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
729 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
730 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
731 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
732 hdev->amp_type = rp->amp_type;
733 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
734 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
735 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
736 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
737
738 a2mp_rsp:
739 a2mp_send_getinfo_rsp(hdev);
740 }
741
742 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
743 struct sk_buff *skb)
744 {
745 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
746 struct amp_assoc *assoc = &hdev->loc_assoc;
747 size_t rem_len, frag_len;
748
749 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
750
751 if (rp->status)
752 goto a2mp_rsp;
753
754 frag_len = skb->len - sizeof(*rp);
755 rem_len = __le16_to_cpu(rp->rem_len);
756
757 if (rem_len > frag_len) {
758 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
759
760 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
761 assoc->offset += frag_len;
762
763 /* Read other fragments */
764 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
765
766 return;
767 }
768
769 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
770 assoc->len = assoc->offset + rem_len;
771 assoc->offset = 0;
772
773 a2mp_rsp:
774 /* Send A2MP Rsp when all fragments are received */
775 a2mp_send_getampassoc_rsp(hdev, rp->status);
776 a2mp_send_create_phy_link_req(hdev, rp->status);
777 }
778
779 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
780 struct sk_buff *skb)
781 {
782 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
783
784 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
785
786 if (!rp->status)
787 hdev->inq_tx_power = rp->tx_power;
788 }
789
790 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
791 {
792 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
793 struct hci_cp_pin_code_reply *cp;
794 struct hci_conn *conn;
795
796 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
797
798 hci_dev_lock(hdev);
799
800 if (test_bit(HCI_MGMT, &hdev->dev_flags))
801 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
802
803 if (rp->status)
804 goto unlock;
805
806 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
807 if (!cp)
808 goto unlock;
809
810 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
811 if (conn)
812 conn->pin_length = cp->pin_len;
813
814 unlock:
815 hci_dev_unlock(hdev);
816 }
817
818 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
819 {
820 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 hci_dev_lock(hdev);
825
826 if (test_bit(HCI_MGMT, &hdev->dev_flags))
827 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
828 rp->status);
829
830 hci_dev_unlock(hdev);
831 }
832
833 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
834 struct sk_buff *skb)
835 {
836 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
837
838 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
839
840 if (rp->status)
841 return;
842
843 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
844 hdev->le_pkts = rp->le_max_pkt;
845
846 hdev->le_cnt = hdev->le_pkts;
847
848 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
849 }
850
851 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
852 struct sk_buff *skb)
853 {
854 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
855
856 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
857
858 if (!rp->status)
859 memcpy(hdev->le_features, rp->features, 8);
860 }
861
862 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
863 struct sk_buff *skb)
864 {
865 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
866
867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868
869 if (!rp->status)
870 hdev->adv_tx_power = rp->tx_power;
871 }
872
873 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
874 {
875 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
876
877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
878
879 hci_dev_lock(hdev);
880
881 if (test_bit(HCI_MGMT, &hdev->dev_flags))
882 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
883 rp->status);
884
885 hci_dev_unlock(hdev);
886 }
887
888 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
889 struct sk_buff *skb)
890 {
891 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
892
893 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
894
895 hci_dev_lock(hdev);
896
897 if (test_bit(HCI_MGMT, &hdev->dev_flags))
898 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
899 ACL_LINK, 0, rp->status);
900
901 hci_dev_unlock(hdev);
902 }
903
904 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
905 {
906 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
907
908 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
909
910 hci_dev_lock(hdev);
911
912 if (test_bit(HCI_MGMT, &hdev->dev_flags))
913 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
914 0, rp->status);
915
916 hci_dev_unlock(hdev);
917 }
918
919 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
920 struct sk_buff *skb)
921 {
922 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
923
924 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
925
926 hci_dev_lock(hdev);
927
928 if (test_bit(HCI_MGMT, &hdev->dev_flags))
929 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
930 ACL_LINK, 0, rp->status);
931
932 hci_dev_unlock(hdev);
933 }
934
935 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
936 struct sk_buff *skb)
937 {
938 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
939
940 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
941
942 hci_dev_lock(hdev);
943 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
944 NULL, NULL, rp->status);
945 hci_dev_unlock(hdev);
946 }
947
948 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
949 struct sk_buff *skb)
950 {
951 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
952
953 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
954
955 hci_dev_lock(hdev);
956 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
957 rp->hash256, rp->randomizer256,
958 rp->status);
959 hci_dev_unlock(hdev);
960 }
961
962
963 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
964 {
965 __u8 status = *((__u8 *) skb->data);
966 bdaddr_t *sent;
967
968 BT_DBG("%s status 0x%2.2x", hdev->name, status);
969
970 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
971 if (!sent)
972 return;
973
974 hci_dev_lock(hdev);
975
976 if (!status)
977 bacpy(&hdev->random_addr, sent);
978
979 hci_dev_unlock(hdev);
980 }
981
982 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
983 {
984 __u8 *sent, status = *((__u8 *) skb->data);
985
986 BT_DBG("%s status 0x%2.2x", hdev->name, status);
987
988 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
989 if (!sent)
990 return;
991
992 hci_dev_lock(hdev);
993
994 if (!status) {
995 if (*sent)
996 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
997 else
998 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
999 }
1000
1001 hci_dev_unlock(hdev);
1002 }
1003
1004 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1005 struct sk_buff *skb)
1006 {
1007 struct hci_cp_le_set_scan_enable *cp;
1008 __u8 status = *((__u8 *) skb->data);
1009
1010 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1011
1012 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1013 if (!cp)
1014 return;
1015
1016 if (status)
1017 return;
1018
1019 switch (cp->enable) {
1020 case LE_SCAN_ENABLE:
1021 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1022 break;
1023
1024 case LE_SCAN_DISABLE:
1025 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1026 break;
1027
1028 default:
1029 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1030 break;
1031 }
1032 }
1033
1034 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1035 struct sk_buff *skb)
1036 {
1037 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1038
1039 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1040
1041 if (!rp->status)
1042 hdev->le_white_list_size = rp->size;
1043 }
1044
1045 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1046 struct sk_buff *skb)
1047 {
1048 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1049
1050 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1051
1052 if (!rp->status)
1053 memcpy(hdev->le_states, rp->le_states, 8);
1054 }
1055
1056 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1057 struct sk_buff *skb)
1058 {
1059 struct hci_cp_write_le_host_supported *sent;
1060 __u8 status = *((__u8 *) skb->data);
1061
1062 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1063
1064 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1065 if (!sent)
1066 return;
1067
1068 if (!status) {
1069 if (sent->le) {
1070 hdev->features[1][0] |= LMP_HOST_LE;
1071 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1072 } else {
1073 hdev->features[1][0] &= ~LMP_HOST_LE;
1074 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1075 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1076 }
1077
1078 if (sent->simul)
1079 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1080 else
1081 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1082 }
1083 }
1084
1085 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1086 struct sk_buff *skb)
1087 {
1088 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1089
1090 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1091 hdev->name, rp->status, rp->phy_handle);
1092
1093 if (rp->status)
1094 return;
1095
1096 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1097 }
1098
1099 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1100 {
1101 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1102
1103 if (status) {
1104 hci_conn_check_pending(hdev);
1105 return;
1106 }
1107
1108 set_bit(HCI_INQUIRY, &hdev->flags);
1109 }
1110
1111 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1112 {
1113 struct hci_cp_create_conn *cp;
1114 struct hci_conn *conn;
1115
1116 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1117
1118 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1119 if (!cp)
1120 return;
1121
1122 hci_dev_lock(hdev);
1123
1124 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1125
1126 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1127
1128 if (status) {
1129 if (conn && conn->state == BT_CONNECT) {
1130 if (status != 0x0c || conn->attempt > 2) {
1131 conn->state = BT_CLOSED;
1132 hci_proto_connect_cfm(conn, status);
1133 hci_conn_del(conn);
1134 } else
1135 conn->state = BT_CONNECT2;
1136 }
1137 } else {
1138 if (!conn) {
1139 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1140 if (conn) {
1141 conn->out = true;
1142 conn->link_mode |= HCI_LM_MASTER;
1143 } else
1144 BT_ERR("No memory for new connection");
1145 }
1146 }
1147
1148 hci_dev_unlock(hdev);
1149 }
1150
1151 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1152 {
1153 struct hci_cp_add_sco *cp;
1154 struct hci_conn *acl, *sco;
1155 __u16 handle;
1156
1157 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1158
1159 if (!status)
1160 return;
1161
1162 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1163 if (!cp)
1164 return;
1165
1166 handle = __le16_to_cpu(cp->handle);
1167
1168 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1169
1170 hci_dev_lock(hdev);
1171
1172 acl = hci_conn_hash_lookup_handle(hdev, handle);
1173 if (acl) {
1174 sco = acl->link;
1175 if (sco) {
1176 sco->state = BT_CLOSED;
1177
1178 hci_proto_connect_cfm(sco, status);
1179 hci_conn_del(sco);
1180 }
1181 }
1182
1183 hci_dev_unlock(hdev);
1184 }
1185
1186 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1187 {
1188 struct hci_cp_auth_requested *cp;
1189 struct hci_conn *conn;
1190
1191 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1192
1193 if (!status)
1194 return;
1195
1196 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1197 if (!cp)
1198 return;
1199
1200 hci_dev_lock(hdev);
1201
1202 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1203 if (conn) {
1204 if (conn->state == BT_CONFIG) {
1205 hci_proto_connect_cfm(conn, status);
1206 hci_conn_drop(conn);
1207 }
1208 }
1209
1210 hci_dev_unlock(hdev);
1211 }
1212
1213 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1214 {
1215 struct hci_cp_set_conn_encrypt *cp;
1216 struct hci_conn *conn;
1217
1218 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1219
1220 if (!status)
1221 return;
1222
1223 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1224 if (!cp)
1225 return;
1226
1227 hci_dev_lock(hdev);
1228
1229 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1230 if (conn) {
1231 if (conn->state == BT_CONFIG) {
1232 hci_proto_connect_cfm(conn, status);
1233 hci_conn_drop(conn);
1234 }
1235 }
1236
1237 hci_dev_unlock(hdev);
1238 }
1239
1240 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1241 struct hci_conn *conn)
1242 {
1243 if (conn->state != BT_CONFIG || !conn->out)
1244 return 0;
1245
1246 if (conn->pending_sec_level == BT_SECURITY_SDP)
1247 return 0;
1248
1249 /* Only request authentication for SSP connections or non-SSP
1250 * devices with sec_level MEDIUM or HIGH or if MITM protection
1251 * is requested.
1252 */
1253 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1254 conn->pending_sec_level != BT_SECURITY_HIGH &&
1255 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1256 return 0;
1257
1258 return 1;
1259 }
1260
1261 static int hci_resolve_name(struct hci_dev *hdev,
1262 struct inquiry_entry *e)
1263 {
1264 struct hci_cp_remote_name_req cp;
1265
1266 memset(&cp, 0, sizeof(cp));
1267
1268 bacpy(&cp.bdaddr, &e->data.bdaddr);
1269 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1270 cp.pscan_mode = e->data.pscan_mode;
1271 cp.clock_offset = e->data.clock_offset;
1272
1273 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1274 }
1275
1276 static bool hci_resolve_next_name(struct hci_dev *hdev)
1277 {
1278 struct discovery_state *discov = &hdev->discovery;
1279 struct inquiry_entry *e;
1280
1281 if (list_empty(&discov->resolve))
1282 return false;
1283
1284 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1285 if (!e)
1286 return false;
1287
1288 if (hci_resolve_name(hdev, e) == 0) {
1289 e->name_state = NAME_PENDING;
1290 return true;
1291 }
1292
1293 return false;
1294 }
1295
1296 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1297 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1298 {
1299 struct discovery_state *discov = &hdev->discovery;
1300 struct inquiry_entry *e;
1301
1302 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1303 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1304 name_len, conn->dev_class);
1305
1306 if (discov->state == DISCOVERY_STOPPED)
1307 return;
1308
1309 if (discov->state == DISCOVERY_STOPPING)
1310 goto discov_complete;
1311
1312 if (discov->state != DISCOVERY_RESOLVING)
1313 return;
1314
1315 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1316 /* If the device was not found in a list of found devices names of which
1317 * are pending. there is no need to continue resolving a next name as it
1318 * will be done upon receiving another Remote Name Request Complete
1319 * Event */
1320 if (!e)
1321 return;
1322
1323 list_del(&e->list);
1324 if (name) {
1325 e->name_state = NAME_KNOWN;
1326 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1327 e->data.rssi, name, name_len);
1328 } else {
1329 e->name_state = NAME_NOT_KNOWN;
1330 }
1331
1332 if (hci_resolve_next_name(hdev))
1333 return;
1334
1335 discov_complete:
1336 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1337 }
1338
1339 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1340 {
1341 struct hci_cp_remote_name_req *cp;
1342 struct hci_conn *conn;
1343
1344 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1345
1346 /* If successful wait for the name req complete event before
1347 * checking for the need to do authentication */
1348 if (!status)
1349 return;
1350
1351 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1352 if (!cp)
1353 return;
1354
1355 hci_dev_lock(hdev);
1356
1357 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1358
1359 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1360 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1361
1362 if (!conn)
1363 goto unlock;
1364
1365 if (!hci_outgoing_auth_needed(hdev, conn))
1366 goto unlock;
1367
1368 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1369 struct hci_cp_auth_requested auth_cp;
1370
1371 auth_cp.handle = __cpu_to_le16(conn->handle);
1372 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1373 sizeof(auth_cp), &auth_cp);
1374 }
1375
1376 unlock:
1377 hci_dev_unlock(hdev);
1378 }
1379
1380 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1381 {
1382 struct hci_cp_read_remote_features *cp;
1383 struct hci_conn *conn;
1384
1385 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1386
1387 if (!status)
1388 return;
1389
1390 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1391 if (!cp)
1392 return;
1393
1394 hci_dev_lock(hdev);
1395
1396 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1397 if (conn) {
1398 if (conn->state == BT_CONFIG) {
1399 hci_proto_connect_cfm(conn, status);
1400 hci_conn_drop(conn);
1401 }
1402 }
1403
1404 hci_dev_unlock(hdev);
1405 }
1406
1407 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1408 {
1409 struct hci_cp_read_remote_ext_features *cp;
1410 struct hci_conn *conn;
1411
1412 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1413
1414 if (!status)
1415 return;
1416
1417 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1418 if (!cp)
1419 return;
1420
1421 hci_dev_lock(hdev);
1422
1423 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1424 if (conn) {
1425 if (conn->state == BT_CONFIG) {
1426 hci_proto_connect_cfm(conn, status);
1427 hci_conn_drop(conn);
1428 }
1429 }
1430
1431 hci_dev_unlock(hdev);
1432 }
1433
1434 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1435 {
1436 struct hci_cp_setup_sync_conn *cp;
1437 struct hci_conn *acl, *sco;
1438 __u16 handle;
1439
1440 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1441
1442 if (!status)
1443 return;
1444
1445 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1446 if (!cp)
1447 return;
1448
1449 handle = __le16_to_cpu(cp->handle);
1450
1451 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1452
1453 hci_dev_lock(hdev);
1454
1455 acl = hci_conn_hash_lookup_handle(hdev, handle);
1456 if (acl) {
1457 sco = acl->link;
1458 if (sco) {
1459 sco->state = BT_CLOSED;
1460
1461 hci_proto_connect_cfm(sco, status);
1462 hci_conn_del(sco);
1463 }
1464 }
1465
1466 hci_dev_unlock(hdev);
1467 }
1468
1469 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1470 {
1471 struct hci_cp_sniff_mode *cp;
1472 struct hci_conn *conn;
1473
1474 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1475
1476 if (!status)
1477 return;
1478
1479 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1480 if (!cp)
1481 return;
1482
1483 hci_dev_lock(hdev);
1484
1485 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1486 if (conn) {
1487 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1488
1489 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1490 hci_sco_setup(conn, status);
1491 }
1492
1493 hci_dev_unlock(hdev);
1494 }
1495
1496 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1497 {
1498 struct hci_cp_exit_sniff_mode *cp;
1499 struct hci_conn *conn;
1500
1501 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1502
1503 if (!status)
1504 return;
1505
1506 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1507 if (!cp)
1508 return;
1509
1510 hci_dev_lock(hdev);
1511
1512 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1513 if (conn) {
1514 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1515
1516 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1517 hci_sco_setup(conn, status);
1518 }
1519
1520 hci_dev_unlock(hdev);
1521 }
1522
1523 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1524 {
1525 struct hci_cp_disconnect *cp;
1526 struct hci_conn *conn;
1527
1528 if (!status)
1529 return;
1530
1531 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1532 if (!cp)
1533 return;
1534
1535 hci_dev_lock(hdev);
1536
1537 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1538 if (conn)
1539 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1540 conn->dst_type, status);
1541
1542 hci_dev_unlock(hdev);
1543 }
1544
1545 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1546 {
1547 struct hci_cp_create_phy_link *cp;
1548
1549 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1550
1551 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1552 if (!cp)
1553 return;
1554
1555 hci_dev_lock(hdev);
1556
1557 if (status) {
1558 struct hci_conn *hcon;
1559
1560 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1561 if (hcon)
1562 hci_conn_del(hcon);
1563 } else {
1564 amp_write_remote_assoc(hdev, cp->phy_handle);
1565 }
1566
1567 hci_dev_unlock(hdev);
1568 }
1569
1570 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1571 {
1572 struct hci_cp_accept_phy_link *cp;
1573
1574 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1575
1576 if (status)
1577 return;
1578
1579 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1580 if (!cp)
1581 return;
1582
1583 amp_write_remote_assoc(hdev, cp->phy_handle);
1584 }
1585
1586 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1587 {
1588 __u8 status = *((__u8 *) skb->data);
1589 struct discovery_state *discov = &hdev->discovery;
1590 struct inquiry_entry *e;
1591
1592 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1593
1594 hci_conn_check_pending(hdev);
1595
1596 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1597 return;
1598
1599 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1600 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1601
1602 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1603 return;
1604
1605 hci_dev_lock(hdev);
1606
1607 if (discov->state != DISCOVERY_FINDING)
1608 goto unlock;
1609
1610 if (list_empty(&discov->resolve)) {
1611 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1612 goto unlock;
1613 }
1614
1615 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1616 if (e && hci_resolve_name(hdev, e) == 0) {
1617 e->name_state = NAME_PENDING;
1618 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1619 } else {
1620 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1621 }
1622
1623 unlock:
1624 hci_dev_unlock(hdev);
1625 }
1626
1627 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1628 {
1629 struct inquiry_data data;
1630 struct inquiry_info *info = (void *) (skb->data + 1);
1631 int num_rsp = *((__u8 *) skb->data);
1632
1633 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1634
1635 if (!num_rsp)
1636 return;
1637
1638 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1639 return;
1640
1641 hci_dev_lock(hdev);
1642
1643 for (; num_rsp; num_rsp--, info++) {
1644 bool name_known, ssp;
1645
1646 bacpy(&data.bdaddr, &info->bdaddr);
1647 data.pscan_rep_mode = info->pscan_rep_mode;
1648 data.pscan_period_mode = info->pscan_period_mode;
1649 data.pscan_mode = info->pscan_mode;
1650 memcpy(data.dev_class, info->dev_class, 3);
1651 data.clock_offset = info->clock_offset;
1652 data.rssi = 0x00;
1653 data.ssp_mode = 0x00;
1654
1655 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1656 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1657 info->dev_class, 0, !name_known, ssp, NULL,
1658 0);
1659 }
1660
1661 hci_dev_unlock(hdev);
1662 }
1663
1664 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1665 {
1666 struct hci_ev_conn_complete *ev = (void *) skb->data;
1667 struct hci_conn *conn;
1668
1669 BT_DBG("%s", hdev->name);
1670
1671 hci_dev_lock(hdev);
1672
1673 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1674 if (!conn) {
1675 if (ev->link_type != SCO_LINK)
1676 goto unlock;
1677
1678 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1679 if (!conn)
1680 goto unlock;
1681
1682 conn->type = SCO_LINK;
1683 }
1684
1685 if (!ev->status) {
1686 conn->handle = __le16_to_cpu(ev->handle);
1687
1688 if (conn->type == ACL_LINK) {
1689 conn->state = BT_CONFIG;
1690 hci_conn_hold(conn);
1691
1692 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1693 !hci_find_link_key(hdev, &ev->bdaddr))
1694 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1695 else
1696 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1697 } else
1698 conn->state = BT_CONNECTED;
1699
1700 hci_conn_add_sysfs(conn);
1701
1702 if (test_bit(HCI_AUTH, &hdev->flags))
1703 conn->link_mode |= HCI_LM_AUTH;
1704
1705 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1706 conn->link_mode |= HCI_LM_ENCRYPT;
1707
1708 /* Get remote features */
1709 if (conn->type == ACL_LINK) {
1710 struct hci_cp_read_remote_features cp;
1711 cp.handle = ev->handle;
1712 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1713 sizeof(cp), &cp);
1714 }
1715
1716 /* Set packet type for incoming connection */
1717 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1718 struct hci_cp_change_conn_ptype cp;
1719 cp.handle = ev->handle;
1720 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1721 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1722 &cp);
1723 }
1724 } else {
1725 conn->state = BT_CLOSED;
1726 if (conn->type == ACL_LINK)
1727 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1728 conn->dst_type, ev->status);
1729 }
1730
1731 if (conn->type == ACL_LINK)
1732 hci_sco_setup(conn, ev->status);
1733
1734 if (ev->status) {
1735 hci_proto_connect_cfm(conn, ev->status);
1736 hci_conn_del(conn);
1737 } else if (ev->link_type != ACL_LINK)
1738 hci_proto_connect_cfm(conn, ev->status);
1739
1740 unlock:
1741 hci_dev_unlock(hdev);
1742
1743 hci_conn_check_pending(hdev);
1744 }
1745
1746 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1747 {
1748 struct hci_ev_conn_request *ev = (void *) skb->data;
1749 int mask = hdev->link_mode;
1750 __u8 flags = 0;
1751
1752 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1753 ev->link_type);
1754
1755 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1756 &flags);
1757
1758 if ((mask & HCI_LM_ACCEPT) &&
1759 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1760 /* Connection accepted */
1761 struct inquiry_entry *ie;
1762 struct hci_conn *conn;
1763
1764 hci_dev_lock(hdev);
1765
1766 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1767 if (ie)
1768 memcpy(ie->data.dev_class, ev->dev_class, 3);
1769
1770 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1771 &ev->bdaddr);
1772 if (!conn) {
1773 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1774 if (!conn) {
1775 BT_ERR("No memory for new connection");
1776 hci_dev_unlock(hdev);
1777 return;
1778 }
1779 }
1780
1781 memcpy(conn->dev_class, ev->dev_class, 3);
1782
1783 hci_dev_unlock(hdev);
1784
1785 if (ev->link_type == ACL_LINK ||
1786 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1787 struct hci_cp_accept_conn_req cp;
1788 conn->state = BT_CONNECT;
1789
1790 bacpy(&cp.bdaddr, &ev->bdaddr);
1791
1792 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1793 cp.role = 0x00; /* Become master */
1794 else
1795 cp.role = 0x01; /* Remain slave */
1796
1797 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1798 &cp);
1799 } else if (!(flags & HCI_PROTO_DEFER)) {
1800 struct hci_cp_accept_sync_conn_req cp;
1801 conn->state = BT_CONNECT;
1802
1803 bacpy(&cp.bdaddr, &ev->bdaddr);
1804 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1805
1806 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1807 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1808 cp.max_latency = __constant_cpu_to_le16(0xffff);
1809 cp.content_format = cpu_to_le16(hdev->voice_setting);
1810 cp.retrans_effort = 0xff;
1811
1812 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1813 sizeof(cp), &cp);
1814 } else {
1815 conn->state = BT_CONNECT2;
1816 hci_proto_connect_cfm(conn, 0);
1817 }
1818 } else {
1819 /* Connection rejected */
1820 struct hci_cp_reject_conn_req cp;
1821
1822 bacpy(&cp.bdaddr, &ev->bdaddr);
1823 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1824 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1825 }
1826 }
1827
1828 static u8 hci_to_mgmt_reason(u8 err)
1829 {
1830 switch (err) {
1831 case HCI_ERROR_CONNECTION_TIMEOUT:
1832 return MGMT_DEV_DISCONN_TIMEOUT;
1833 case HCI_ERROR_REMOTE_USER_TERM:
1834 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1835 case HCI_ERROR_REMOTE_POWER_OFF:
1836 return MGMT_DEV_DISCONN_REMOTE;
1837 case HCI_ERROR_LOCAL_HOST_TERM:
1838 return MGMT_DEV_DISCONN_LOCAL_HOST;
1839 default:
1840 return MGMT_DEV_DISCONN_UNKNOWN;
1841 }
1842 }
1843
1844 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1845 {
1846 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1847 u8 reason = hci_to_mgmt_reason(ev->reason);
1848 struct hci_conn *conn;
1849 u8 type;
1850
1851 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1852
1853 hci_dev_lock(hdev);
1854
1855 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1856 if (!conn)
1857 goto unlock;
1858
1859 if (ev->status) {
1860 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1861 conn->dst_type, ev->status);
1862 goto unlock;
1863 }
1864
1865 conn->state = BT_CLOSED;
1866
1867 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1868 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1869 conn->dst_type, reason);
1870
1871 if (conn->type == ACL_LINK && conn->flush_key)
1872 hci_remove_link_key(hdev, &conn->dst);
1873
1874 type = conn->type;
1875
1876 hci_proto_disconn_cfm(conn, ev->reason);
1877 hci_conn_del(conn);
1878
1879 /* Re-enable advertising if necessary, since it might
1880 * have been disabled by the connection. From the
1881 * HCI_LE_Set_Advertise_Enable command description in
1882 * the core specification (v4.0):
1883 * "The Controller shall continue advertising until the Host
1884 * issues an LE_Set_Advertise_Enable command with
1885 * Advertising_Enable set to 0x00 (Advertising is disabled)
1886 * or until a connection is created or until the Advertising
1887 * is timed out due to Directed Advertising."
1888 */
1889 if (type == LE_LINK)
1890 mgmt_reenable_advertising(hdev);
1891
1892 unlock:
1893 hci_dev_unlock(hdev);
1894 }
1895
1896 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1897 {
1898 struct hci_ev_auth_complete *ev = (void *) skb->data;
1899 struct hci_conn *conn;
1900
1901 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1902
1903 hci_dev_lock(hdev);
1904
1905 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1906 if (!conn)
1907 goto unlock;
1908
1909 if (!ev->status) {
1910 if (!hci_conn_ssp_enabled(conn) &&
1911 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1912 BT_INFO("re-auth of legacy device is not possible.");
1913 } else {
1914 conn->link_mode |= HCI_LM_AUTH;
1915 conn->sec_level = conn->pending_sec_level;
1916 }
1917 } else {
1918 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1919 ev->status);
1920 }
1921
1922 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1923 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1924
1925 if (conn->state == BT_CONFIG) {
1926 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1927 struct hci_cp_set_conn_encrypt cp;
1928 cp.handle = ev->handle;
1929 cp.encrypt = 0x01;
1930 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1931 &cp);
1932 } else {
1933 conn->state = BT_CONNECTED;
1934 hci_proto_connect_cfm(conn, ev->status);
1935 hci_conn_drop(conn);
1936 }
1937 } else {
1938 hci_auth_cfm(conn, ev->status);
1939
1940 hci_conn_hold(conn);
1941 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1942 hci_conn_drop(conn);
1943 }
1944
1945 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1946 if (!ev->status) {
1947 struct hci_cp_set_conn_encrypt cp;
1948 cp.handle = ev->handle;
1949 cp.encrypt = 0x01;
1950 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1951 &cp);
1952 } else {
1953 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1954 hci_encrypt_cfm(conn, ev->status, 0x00);
1955 }
1956 }
1957
1958 unlock:
1959 hci_dev_unlock(hdev);
1960 }
1961
1962 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1963 {
1964 struct hci_ev_remote_name *ev = (void *) skb->data;
1965 struct hci_conn *conn;
1966
1967 BT_DBG("%s", hdev->name);
1968
1969 hci_conn_check_pending(hdev);
1970
1971 hci_dev_lock(hdev);
1972
1973 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1974
1975 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1976 goto check_auth;
1977
1978 if (ev->status == 0)
1979 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1980 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1981 else
1982 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1983
1984 check_auth:
1985 if (!conn)
1986 goto unlock;
1987
1988 if (!hci_outgoing_auth_needed(hdev, conn))
1989 goto unlock;
1990
1991 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1992 struct hci_cp_auth_requested cp;
1993 cp.handle = __cpu_to_le16(conn->handle);
1994 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1995 }
1996
1997 unlock:
1998 hci_dev_unlock(hdev);
1999 }
2000
2001 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2002 {
2003 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2004 struct hci_conn *conn;
2005
2006 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2007
2008 hci_dev_lock(hdev);
2009
2010 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2011 if (!conn)
2012 goto unlock;
2013
2014 if (!ev->status) {
2015 if (ev->encrypt) {
2016 /* Encryption implies authentication */
2017 conn->link_mode |= HCI_LM_AUTH;
2018 conn->link_mode |= HCI_LM_ENCRYPT;
2019 conn->sec_level = conn->pending_sec_level;
2020
2021 /* P-256 authentication key implies FIPS */
2022 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2023 conn->link_mode |= HCI_LM_FIPS;
2024
2025 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2026 conn->type == LE_LINK)
2027 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2028 } else {
2029 conn->link_mode &= ~HCI_LM_ENCRYPT;
2030 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2031 }
2032 }
2033
2034 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2035
2036 if (ev->status && conn->state == BT_CONNECTED) {
2037 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2038 hci_conn_drop(conn);
2039 goto unlock;
2040 }
2041
2042 if (conn->state == BT_CONFIG) {
2043 if (!ev->status)
2044 conn->state = BT_CONNECTED;
2045
2046 hci_proto_connect_cfm(conn, ev->status);
2047 hci_conn_drop(conn);
2048 } else
2049 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2050
2051 unlock:
2052 hci_dev_unlock(hdev);
2053 }
2054
2055 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2056 struct sk_buff *skb)
2057 {
2058 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2059 struct hci_conn *conn;
2060
2061 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2062
2063 hci_dev_lock(hdev);
2064
2065 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2066 if (conn) {
2067 if (!ev->status)
2068 conn->link_mode |= HCI_LM_SECURE;
2069
2070 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2071
2072 hci_key_change_cfm(conn, ev->status);
2073 }
2074
2075 hci_dev_unlock(hdev);
2076 }
2077
2078 static void hci_remote_features_evt(struct hci_dev *hdev,
2079 struct sk_buff *skb)
2080 {
2081 struct hci_ev_remote_features *ev = (void *) skb->data;
2082 struct hci_conn *conn;
2083
2084 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2085
2086 hci_dev_lock(hdev);
2087
2088 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2089 if (!conn)
2090 goto unlock;
2091
2092 if (!ev->status)
2093 memcpy(conn->features[0], ev->features, 8);
2094
2095 if (conn->state != BT_CONFIG)
2096 goto unlock;
2097
2098 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2099 struct hci_cp_read_remote_ext_features cp;
2100 cp.handle = ev->handle;
2101 cp.page = 0x01;
2102 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2103 sizeof(cp), &cp);
2104 goto unlock;
2105 }
2106
2107 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2108 struct hci_cp_remote_name_req cp;
2109 memset(&cp, 0, sizeof(cp));
2110 bacpy(&cp.bdaddr, &conn->dst);
2111 cp.pscan_rep_mode = 0x02;
2112 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2113 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2114 mgmt_device_connected(hdev, &conn->dst, conn->type,
2115 conn->dst_type, 0, NULL, 0,
2116 conn->dev_class);
2117
2118 if (!hci_outgoing_auth_needed(hdev, conn)) {
2119 conn->state = BT_CONNECTED;
2120 hci_proto_connect_cfm(conn, ev->status);
2121 hci_conn_drop(conn);
2122 }
2123
2124 unlock:
2125 hci_dev_unlock(hdev);
2126 }
2127
2128 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2129 {
2130 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2131 u8 status = skb->data[sizeof(*ev)];
2132 __u16 opcode;
2133
2134 skb_pull(skb, sizeof(*ev));
2135
2136 opcode = __le16_to_cpu(ev->opcode);
2137
2138 switch (opcode) {
2139 case HCI_OP_INQUIRY_CANCEL:
2140 hci_cc_inquiry_cancel(hdev, skb);
2141 break;
2142
2143 case HCI_OP_PERIODIC_INQ:
2144 hci_cc_periodic_inq(hdev, skb);
2145 break;
2146
2147 case HCI_OP_EXIT_PERIODIC_INQ:
2148 hci_cc_exit_periodic_inq(hdev, skb);
2149 break;
2150
2151 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2152 hci_cc_remote_name_req_cancel(hdev, skb);
2153 break;
2154
2155 case HCI_OP_ROLE_DISCOVERY:
2156 hci_cc_role_discovery(hdev, skb);
2157 break;
2158
2159 case HCI_OP_READ_LINK_POLICY:
2160 hci_cc_read_link_policy(hdev, skb);
2161 break;
2162
2163 case HCI_OP_WRITE_LINK_POLICY:
2164 hci_cc_write_link_policy(hdev, skb);
2165 break;
2166
2167 case HCI_OP_READ_DEF_LINK_POLICY:
2168 hci_cc_read_def_link_policy(hdev, skb);
2169 break;
2170
2171 case HCI_OP_WRITE_DEF_LINK_POLICY:
2172 hci_cc_write_def_link_policy(hdev, skb);
2173 break;
2174
2175 case HCI_OP_RESET:
2176 hci_cc_reset(hdev, skb);
2177 break;
2178
2179 case HCI_OP_WRITE_LOCAL_NAME:
2180 hci_cc_write_local_name(hdev, skb);
2181 break;
2182
2183 case HCI_OP_READ_LOCAL_NAME:
2184 hci_cc_read_local_name(hdev, skb);
2185 break;
2186
2187 case HCI_OP_WRITE_AUTH_ENABLE:
2188 hci_cc_write_auth_enable(hdev, skb);
2189 break;
2190
2191 case HCI_OP_WRITE_ENCRYPT_MODE:
2192 hci_cc_write_encrypt_mode(hdev, skb);
2193 break;
2194
2195 case HCI_OP_WRITE_SCAN_ENABLE:
2196 hci_cc_write_scan_enable(hdev, skb);
2197 break;
2198
2199 case HCI_OP_READ_CLASS_OF_DEV:
2200 hci_cc_read_class_of_dev(hdev, skb);
2201 break;
2202
2203 case HCI_OP_WRITE_CLASS_OF_DEV:
2204 hci_cc_write_class_of_dev(hdev, skb);
2205 break;
2206
2207 case HCI_OP_READ_VOICE_SETTING:
2208 hci_cc_read_voice_setting(hdev, skb);
2209 break;
2210
2211 case HCI_OP_WRITE_VOICE_SETTING:
2212 hci_cc_write_voice_setting(hdev, skb);
2213 break;
2214
2215 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2216 hci_cc_read_num_supported_iac(hdev, skb);
2217 break;
2218
2219 case HCI_OP_WRITE_SSP_MODE:
2220 hci_cc_write_ssp_mode(hdev, skb);
2221 break;
2222
2223 case HCI_OP_WRITE_SC_SUPPORT:
2224 hci_cc_write_sc_support(hdev, skb);
2225 break;
2226
2227 case HCI_OP_READ_LOCAL_VERSION:
2228 hci_cc_read_local_version(hdev, skb);
2229 break;
2230
2231 case HCI_OP_READ_LOCAL_COMMANDS:
2232 hci_cc_read_local_commands(hdev, skb);
2233 break;
2234
2235 case HCI_OP_READ_LOCAL_FEATURES:
2236 hci_cc_read_local_features(hdev, skb);
2237 break;
2238
2239 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2240 hci_cc_read_local_ext_features(hdev, skb);
2241 break;
2242
2243 case HCI_OP_READ_BUFFER_SIZE:
2244 hci_cc_read_buffer_size(hdev, skb);
2245 break;
2246
2247 case HCI_OP_READ_BD_ADDR:
2248 hci_cc_read_bd_addr(hdev, skb);
2249 break;
2250
2251 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2252 hci_cc_read_page_scan_activity(hdev, skb);
2253 break;
2254
2255 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2256 hci_cc_write_page_scan_activity(hdev, skb);
2257 break;
2258
2259 case HCI_OP_READ_PAGE_SCAN_TYPE:
2260 hci_cc_read_page_scan_type(hdev, skb);
2261 break;
2262
2263 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2264 hci_cc_write_page_scan_type(hdev, skb);
2265 break;
2266
2267 case HCI_OP_READ_DATA_BLOCK_SIZE:
2268 hci_cc_read_data_block_size(hdev, skb);
2269 break;
2270
2271 case HCI_OP_READ_FLOW_CONTROL_MODE:
2272 hci_cc_read_flow_control_mode(hdev, skb);
2273 break;
2274
2275 case HCI_OP_READ_LOCAL_AMP_INFO:
2276 hci_cc_read_local_amp_info(hdev, skb);
2277 break;
2278
2279 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2280 hci_cc_read_local_amp_assoc(hdev, skb);
2281 break;
2282
2283 case HCI_OP_READ_INQ_RSP_TX_POWER:
2284 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2285 break;
2286
2287 case HCI_OP_PIN_CODE_REPLY:
2288 hci_cc_pin_code_reply(hdev, skb);
2289 break;
2290
2291 case HCI_OP_PIN_CODE_NEG_REPLY:
2292 hci_cc_pin_code_neg_reply(hdev, skb);
2293 break;
2294
2295 case HCI_OP_READ_LOCAL_OOB_DATA:
2296 hci_cc_read_local_oob_data(hdev, skb);
2297 break;
2298
2299 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2300 hci_cc_read_local_oob_ext_data(hdev, skb);
2301 break;
2302
2303 case HCI_OP_LE_READ_BUFFER_SIZE:
2304 hci_cc_le_read_buffer_size(hdev, skb);
2305 break;
2306
2307 case HCI_OP_LE_READ_LOCAL_FEATURES:
2308 hci_cc_le_read_local_features(hdev, skb);
2309 break;
2310
2311 case HCI_OP_LE_READ_ADV_TX_POWER:
2312 hci_cc_le_read_adv_tx_power(hdev, skb);
2313 break;
2314
2315 case HCI_OP_USER_CONFIRM_REPLY:
2316 hci_cc_user_confirm_reply(hdev, skb);
2317 break;
2318
2319 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2320 hci_cc_user_confirm_neg_reply(hdev, skb);
2321 break;
2322
2323 case HCI_OP_USER_PASSKEY_REPLY:
2324 hci_cc_user_passkey_reply(hdev, skb);
2325 break;
2326
2327 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2328 hci_cc_user_passkey_neg_reply(hdev, skb);
2329 break;
2330
2331 case HCI_OP_LE_SET_RANDOM_ADDR:
2332 hci_cc_le_set_random_addr(hdev, skb);
2333 break;
2334
2335 case HCI_OP_LE_SET_ADV_ENABLE:
2336 hci_cc_le_set_adv_enable(hdev, skb);
2337 break;
2338
2339 case HCI_OP_LE_SET_SCAN_ENABLE:
2340 hci_cc_le_set_scan_enable(hdev, skb);
2341 break;
2342
2343 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2344 hci_cc_le_read_white_list_size(hdev, skb);
2345 break;
2346
2347 case HCI_OP_LE_READ_SUPPORTED_STATES:
2348 hci_cc_le_read_supported_states(hdev, skb);
2349 break;
2350
2351 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2352 hci_cc_write_le_host_supported(hdev, skb);
2353 break;
2354
2355 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2356 hci_cc_write_remote_amp_assoc(hdev, skb);
2357 break;
2358
2359 default:
2360 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2361 break;
2362 }
2363
2364 if (opcode != HCI_OP_NOP)
2365 del_timer(&hdev->cmd_timer);
2366
2367 hci_req_cmd_complete(hdev, opcode, status);
2368
2369 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2370 atomic_set(&hdev->cmd_cnt, 1);
2371 if (!skb_queue_empty(&hdev->cmd_q))
2372 queue_work(hdev->workqueue, &hdev->cmd_work);
2373 }
2374 }
2375
2376 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2377 {
2378 struct hci_ev_cmd_status *ev = (void *) skb->data;
2379 __u16 opcode;
2380
2381 skb_pull(skb, sizeof(*ev));
2382
2383 opcode = __le16_to_cpu(ev->opcode);
2384
2385 switch (opcode) {
2386 case HCI_OP_INQUIRY:
2387 hci_cs_inquiry(hdev, ev->status);
2388 break;
2389
2390 case HCI_OP_CREATE_CONN:
2391 hci_cs_create_conn(hdev, ev->status);
2392 break;
2393
2394 case HCI_OP_ADD_SCO:
2395 hci_cs_add_sco(hdev, ev->status);
2396 break;
2397
2398 case HCI_OP_AUTH_REQUESTED:
2399 hci_cs_auth_requested(hdev, ev->status);
2400 break;
2401
2402 case HCI_OP_SET_CONN_ENCRYPT:
2403 hci_cs_set_conn_encrypt(hdev, ev->status);
2404 break;
2405
2406 case HCI_OP_REMOTE_NAME_REQ:
2407 hci_cs_remote_name_req(hdev, ev->status);
2408 break;
2409
2410 case HCI_OP_READ_REMOTE_FEATURES:
2411 hci_cs_read_remote_features(hdev, ev->status);
2412 break;
2413
2414 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2415 hci_cs_read_remote_ext_features(hdev, ev->status);
2416 break;
2417
2418 case HCI_OP_SETUP_SYNC_CONN:
2419 hci_cs_setup_sync_conn(hdev, ev->status);
2420 break;
2421
2422 case HCI_OP_SNIFF_MODE:
2423 hci_cs_sniff_mode(hdev, ev->status);
2424 break;
2425
2426 case HCI_OP_EXIT_SNIFF_MODE:
2427 hci_cs_exit_sniff_mode(hdev, ev->status);
2428 break;
2429
2430 case HCI_OP_DISCONNECT:
2431 hci_cs_disconnect(hdev, ev->status);
2432 break;
2433
2434 case HCI_OP_CREATE_PHY_LINK:
2435 hci_cs_create_phylink(hdev, ev->status);
2436 break;
2437
2438 case HCI_OP_ACCEPT_PHY_LINK:
2439 hci_cs_accept_phylink(hdev, ev->status);
2440 break;
2441
2442 default:
2443 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2444 break;
2445 }
2446
2447 if (opcode != HCI_OP_NOP)
2448 del_timer(&hdev->cmd_timer);
2449
2450 if (ev->status ||
2451 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2452 hci_req_cmd_complete(hdev, opcode, ev->status);
2453
2454 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2455 atomic_set(&hdev->cmd_cnt, 1);
2456 if (!skb_queue_empty(&hdev->cmd_q))
2457 queue_work(hdev->workqueue, &hdev->cmd_work);
2458 }
2459 }
2460
2461 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2462 {
2463 struct hci_ev_role_change *ev = (void *) skb->data;
2464 struct hci_conn *conn;
2465
2466 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2467
2468 hci_dev_lock(hdev);
2469
2470 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2471 if (conn) {
2472 if (!ev->status) {
2473 if (ev->role)
2474 conn->link_mode &= ~HCI_LM_MASTER;
2475 else
2476 conn->link_mode |= HCI_LM_MASTER;
2477 }
2478
2479 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2480
2481 hci_role_switch_cfm(conn, ev->status, ev->role);
2482 }
2483
2484 hci_dev_unlock(hdev);
2485 }
2486
2487 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2488 {
2489 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2490 int i;
2491
2492 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2493 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2494 return;
2495 }
2496
2497 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2498 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2499 BT_DBG("%s bad parameters", hdev->name);
2500 return;
2501 }
2502
2503 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2504
2505 for (i = 0; i < ev->num_hndl; i++) {
2506 struct hci_comp_pkts_info *info = &ev->handles[i];
2507 struct hci_conn *conn;
2508 __u16 handle, count;
2509
2510 handle = __le16_to_cpu(info->handle);
2511 count = __le16_to_cpu(info->count);
2512
2513 conn = hci_conn_hash_lookup_handle(hdev, handle);
2514 if (!conn)
2515 continue;
2516
2517 conn->sent -= count;
2518
2519 switch (conn->type) {
2520 case ACL_LINK:
2521 hdev->acl_cnt += count;
2522 if (hdev->acl_cnt > hdev->acl_pkts)
2523 hdev->acl_cnt = hdev->acl_pkts;
2524 break;
2525
2526 case LE_LINK:
2527 if (hdev->le_pkts) {
2528 hdev->le_cnt += count;
2529 if (hdev->le_cnt > hdev->le_pkts)
2530 hdev->le_cnt = hdev->le_pkts;
2531 } else {
2532 hdev->acl_cnt += count;
2533 if (hdev->acl_cnt > hdev->acl_pkts)
2534 hdev->acl_cnt = hdev->acl_pkts;
2535 }
2536 break;
2537
2538 case SCO_LINK:
2539 hdev->sco_cnt += count;
2540 if (hdev->sco_cnt > hdev->sco_pkts)
2541 hdev->sco_cnt = hdev->sco_pkts;
2542 break;
2543
2544 default:
2545 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2546 break;
2547 }
2548 }
2549
2550 queue_work(hdev->workqueue, &hdev->tx_work);
2551 }
2552
2553 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2554 __u16 handle)
2555 {
2556 struct hci_chan *chan;
2557
2558 switch (hdev->dev_type) {
2559 case HCI_BREDR:
2560 return hci_conn_hash_lookup_handle(hdev, handle);
2561 case HCI_AMP:
2562 chan = hci_chan_lookup_handle(hdev, handle);
2563 if (chan)
2564 return chan->conn;
2565 break;
2566 default:
2567 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2568 break;
2569 }
2570
2571 return NULL;
2572 }
2573
2574 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2575 {
2576 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2577 int i;
2578
2579 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2580 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2581 return;
2582 }
2583
2584 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2585 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2586 BT_DBG("%s bad parameters", hdev->name);
2587 return;
2588 }
2589
2590 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2591 ev->num_hndl);
2592
2593 for (i = 0; i < ev->num_hndl; i++) {
2594 struct hci_comp_blocks_info *info = &ev->handles[i];
2595 struct hci_conn *conn = NULL;
2596 __u16 handle, block_count;
2597
2598 handle = __le16_to_cpu(info->handle);
2599 block_count = __le16_to_cpu(info->blocks);
2600
2601 conn = __hci_conn_lookup_handle(hdev, handle);
2602 if (!conn)
2603 continue;
2604
2605 conn->sent -= block_count;
2606
2607 switch (conn->type) {
2608 case ACL_LINK:
2609 case AMP_LINK:
2610 hdev->block_cnt += block_count;
2611 if (hdev->block_cnt > hdev->num_blocks)
2612 hdev->block_cnt = hdev->num_blocks;
2613 break;
2614
2615 default:
2616 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2617 break;
2618 }
2619 }
2620
2621 queue_work(hdev->workqueue, &hdev->tx_work);
2622 }
2623
2624 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2625 {
2626 struct hci_ev_mode_change *ev = (void *) skb->data;
2627 struct hci_conn *conn;
2628
2629 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2630
2631 hci_dev_lock(hdev);
2632
2633 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2634 if (conn) {
2635 conn->mode = ev->mode;
2636
2637 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2638 &conn->flags)) {
2639 if (conn->mode == HCI_CM_ACTIVE)
2640 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2641 else
2642 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2643 }
2644
2645 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2646 hci_sco_setup(conn, ev->status);
2647 }
2648
2649 hci_dev_unlock(hdev);
2650 }
2651
2652 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2653 {
2654 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2655 struct hci_conn *conn;
2656
2657 BT_DBG("%s", hdev->name);
2658
2659 hci_dev_lock(hdev);
2660
2661 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2662 if (!conn)
2663 goto unlock;
2664
2665 if (conn->state == BT_CONNECTED) {
2666 hci_conn_hold(conn);
2667 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2668 hci_conn_drop(conn);
2669 }
2670
2671 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2672 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2673 sizeof(ev->bdaddr), &ev->bdaddr);
2674 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2675 u8 secure;
2676
2677 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2678 secure = 1;
2679 else
2680 secure = 0;
2681
2682 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2683 }
2684
2685 unlock:
2686 hci_dev_unlock(hdev);
2687 }
2688
2689 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2690 {
2691 struct hci_ev_link_key_req *ev = (void *) skb->data;
2692 struct hci_cp_link_key_reply cp;
2693 struct hci_conn *conn;
2694 struct link_key *key;
2695
2696 BT_DBG("%s", hdev->name);
2697
2698 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2699 return;
2700
2701 hci_dev_lock(hdev);
2702
2703 key = hci_find_link_key(hdev, &ev->bdaddr);
2704 if (!key) {
2705 BT_DBG("%s link key not found for %pMR", hdev->name,
2706 &ev->bdaddr);
2707 goto not_found;
2708 }
2709
2710 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2711 &ev->bdaddr);
2712
2713 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2714 key->type == HCI_LK_DEBUG_COMBINATION) {
2715 BT_DBG("%s ignoring debug key", hdev->name);
2716 goto not_found;
2717 }
2718
2719 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2720 if (conn) {
2721 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2722 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2723 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2724 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2725 goto not_found;
2726 }
2727
2728 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2729 conn->pending_sec_level == BT_SECURITY_HIGH) {
2730 BT_DBG("%s ignoring key unauthenticated for high security",
2731 hdev->name);
2732 goto not_found;
2733 }
2734
2735 conn->key_type = key->type;
2736 conn->pin_length = key->pin_len;
2737 }
2738
2739 bacpy(&cp.bdaddr, &ev->bdaddr);
2740 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2741
2742 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2743
2744 hci_dev_unlock(hdev);
2745
2746 return;
2747
2748 not_found:
2749 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2750 hci_dev_unlock(hdev);
2751 }
2752
2753 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2754 {
2755 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2756 struct hci_conn *conn;
2757 u8 pin_len = 0;
2758
2759 BT_DBG("%s", hdev->name);
2760
2761 hci_dev_lock(hdev);
2762
2763 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2764 if (conn) {
2765 hci_conn_hold(conn);
2766 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2767 pin_len = conn->pin_length;
2768
2769 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2770 conn->key_type = ev->key_type;
2771
2772 hci_conn_drop(conn);
2773 }
2774
2775 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2776 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2777 ev->key_type, pin_len);
2778
2779 hci_dev_unlock(hdev);
2780 }
2781
2782 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2783 {
2784 struct hci_ev_clock_offset *ev = (void *) skb->data;
2785 struct hci_conn *conn;
2786
2787 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2788
2789 hci_dev_lock(hdev);
2790
2791 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2792 if (conn && !ev->status) {
2793 struct inquiry_entry *ie;
2794
2795 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2796 if (ie) {
2797 ie->data.clock_offset = ev->clock_offset;
2798 ie->timestamp = jiffies;
2799 }
2800 }
2801
2802 hci_dev_unlock(hdev);
2803 }
2804
2805 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2806 {
2807 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2808 struct hci_conn *conn;
2809
2810 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2811
2812 hci_dev_lock(hdev);
2813
2814 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2815 if (conn && !ev->status)
2816 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2817
2818 hci_dev_unlock(hdev);
2819 }
2820
2821 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2822 {
2823 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2824 struct inquiry_entry *ie;
2825
2826 BT_DBG("%s", hdev->name);
2827
2828 hci_dev_lock(hdev);
2829
2830 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2831 if (ie) {
2832 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2833 ie->timestamp = jiffies;
2834 }
2835
2836 hci_dev_unlock(hdev);
2837 }
2838
2839 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2840 struct sk_buff *skb)
2841 {
2842 struct inquiry_data data;
2843 int num_rsp = *((__u8 *) skb->data);
2844 bool name_known, ssp;
2845
2846 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2847
2848 if (!num_rsp)
2849 return;
2850
2851 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2852 return;
2853
2854 hci_dev_lock(hdev);
2855
2856 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2857 struct inquiry_info_with_rssi_and_pscan_mode *info;
2858 info = (void *) (skb->data + 1);
2859
2860 for (; num_rsp; num_rsp--, info++) {
2861 bacpy(&data.bdaddr, &info->bdaddr);
2862 data.pscan_rep_mode = info->pscan_rep_mode;
2863 data.pscan_period_mode = info->pscan_period_mode;
2864 data.pscan_mode = info->pscan_mode;
2865 memcpy(data.dev_class, info->dev_class, 3);
2866 data.clock_offset = info->clock_offset;
2867 data.rssi = info->rssi;
2868 data.ssp_mode = 0x00;
2869
2870 name_known = hci_inquiry_cache_update(hdev, &data,
2871 false, &ssp);
2872 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2873 info->dev_class, info->rssi,
2874 !name_known, ssp, NULL, 0);
2875 }
2876 } else {
2877 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2878
2879 for (; num_rsp; num_rsp--, info++) {
2880 bacpy(&data.bdaddr, &info->bdaddr);
2881 data.pscan_rep_mode = info->pscan_rep_mode;
2882 data.pscan_period_mode = info->pscan_period_mode;
2883 data.pscan_mode = 0x00;
2884 memcpy(data.dev_class, info->dev_class, 3);
2885 data.clock_offset = info->clock_offset;
2886 data.rssi = info->rssi;
2887 data.ssp_mode = 0x00;
2888 name_known = hci_inquiry_cache_update(hdev, &data,
2889 false, &ssp);
2890 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2891 info->dev_class, info->rssi,
2892 !name_known, ssp, NULL, 0);
2893 }
2894 }
2895
2896 hci_dev_unlock(hdev);
2897 }
2898
2899 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2900 struct sk_buff *skb)
2901 {
2902 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2903 struct hci_conn *conn;
2904
2905 BT_DBG("%s", hdev->name);
2906
2907 hci_dev_lock(hdev);
2908
2909 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2910 if (!conn)
2911 goto unlock;
2912
2913 if (ev->page < HCI_MAX_PAGES)
2914 memcpy(conn->features[ev->page], ev->features, 8);
2915
2916 if (!ev->status && ev->page == 0x01) {
2917 struct inquiry_entry *ie;
2918
2919 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2920 if (ie)
2921 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2922
2923 if (ev->features[0] & LMP_HOST_SSP) {
2924 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2925 } else {
2926 /* It is mandatory by the Bluetooth specification that
2927 * Extended Inquiry Results are only used when Secure
2928 * Simple Pairing is enabled, but some devices violate
2929 * this.
2930 *
2931 * To make these devices work, the internal SSP
2932 * enabled flag needs to be cleared if the remote host
2933 * features do not indicate SSP support */
2934 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2935 }
2936
2937 if (ev->features[0] & LMP_HOST_SC)
2938 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
2939 }
2940
2941 if (conn->state != BT_CONFIG)
2942 goto unlock;
2943
2944 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2945 struct hci_cp_remote_name_req cp;
2946 memset(&cp, 0, sizeof(cp));
2947 bacpy(&cp.bdaddr, &conn->dst);
2948 cp.pscan_rep_mode = 0x02;
2949 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2950 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2951 mgmt_device_connected(hdev, &conn->dst, conn->type,
2952 conn->dst_type, 0, NULL, 0,
2953 conn->dev_class);
2954
2955 if (!hci_outgoing_auth_needed(hdev, conn)) {
2956 conn->state = BT_CONNECTED;
2957 hci_proto_connect_cfm(conn, ev->status);
2958 hci_conn_drop(conn);
2959 }
2960
2961 unlock:
2962 hci_dev_unlock(hdev);
2963 }
2964
2965 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2966 struct sk_buff *skb)
2967 {
2968 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2969 struct hci_conn *conn;
2970
2971 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2972
2973 hci_dev_lock(hdev);
2974
2975 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2976 if (!conn) {
2977 if (ev->link_type == ESCO_LINK)
2978 goto unlock;
2979
2980 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2981 if (!conn)
2982 goto unlock;
2983
2984 conn->type = SCO_LINK;
2985 }
2986
2987 switch (ev->status) {
2988 case 0x00:
2989 conn->handle = __le16_to_cpu(ev->handle);
2990 conn->state = BT_CONNECTED;
2991
2992 hci_conn_add_sysfs(conn);
2993 break;
2994
2995 case 0x0d: /* Connection Rejected due to Limited Resources */
2996 case 0x11: /* Unsupported Feature or Parameter Value */
2997 case 0x1c: /* SCO interval rejected */
2998 case 0x1a: /* Unsupported Remote Feature */
2999 case 0x1f: /* Unspecified error */
3000 if (conn->out) {
3001 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3002 (hdev->esco_type & EDR_ESCO_MASK);
3003 if (hci_setup_sync(conn, conn->link->handle))
3004 goto unlock;
3005 }
3006 /* fall through */
3007
3008 default:
3009 conn->state = BT_CLOSED;
3010 break;
3011 }
3012
3013 hci_proto_connect_cfm(conn, ev->status);
3014 if (ev->status)
3015 hci_conn_del(conn);
3016
3017 unlock:
3018 hci_dev_unlock(hdev);
3019 }
3020
3021 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3022 {
3023 size_t parsed = 0;
3024
3025 while (parsed < eir_len) {
3026 u8 field_len = eir[0];
3027
3028 if (field_len == 0)
3029 return parsed;
3030
3031 parsed += field_len + 1;
3032 eir += field_len + 1;
3033 }
3034
3035 return eir_len;
3036 }
3037
3038 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3039 struct sk_buff *skb)
3040 {
3041 struct inquiry_data data;
3042 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3043 int num_rsp = *((__u8 *) skb->data);
3044 size_t eir_len;
3045
3046 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3047
3048 if (!num_rsp)
3049 return;
3050
3051 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3052 return;
3053
3054 hci_dev_lock(hdev);
3055
3056 for (; num_rsp; num_rsp--, info++) {
3057 bool name_known, ssp;
3058
3059 bacpy(&data.bdaddr, &info->bdaddr);
3060 data.pscan_rep_mode = info->pscan_rep_mode;
3061 data.pscan_period_mode = info->pscan_period_mode;
3062 data.pscan_mode = 0x00;
3063 memcpy(data.dev_class, info->dev_class, 3);
3064 data.clock_offset = info->clock_offset;
3065 data.rssi = info->rssi;
3066 data.ssp_mode = 0x01;
3067
3068 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3069 name_known = eir_has_data_type(info->data,
3070 sizeof(info->data),
3071 EIR_NAME_COMPLETE);
3072 else
3073 name_known = true;
3074
3075 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3076 &ssp);
3077 eir_len = eir_get_length(info->data, sizeof(info->data));
3078 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3079 info->dev_class, info->rssi, !name_known,
3080 ssp, info->data, eir_len);
3081 }
3082
3083 hci_dev_unlock(hdev);
3084 }
3085
3086 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3087 struct sk_buff *skb)
3088 {
3089 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3090 struct hci_conn *conn;
3091
3092 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3093 __le16_to_cpu(ev->handle));
3094
3095 hci_dev_lock(hdev);
3096
3097 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3098 if (!conn)
3099 goto unlock;
3100
3101 if (!ev->status)
3102 conn->sec_level = conn->pending_sec_level;
3103
3104 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3105
3106 if (ev->status && conn->state == BT_CONNECTED) {
3107 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3108 hci_conn_drop(conn);
3109 goto unlock;
3110 }
3111
3112 if (conn->state == BT_CONFIG) {
3113 if (!ev->status)
3114 conn->state = BT_CONNECTED;
3115
3116 hci_proto_connect_cfm(conn, ev->status);
3117 hci_conn_drop(conn);
3118 } else {
3119 hci_auth_cfm(conn, ev->status);
3120
3121 hci_conn_hold(conn);
3122 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3123 hci_conn_drop(conn);
3124 }
3125
3126 unlock:
3127 hci_dev_unlock(hdev);
3128 }
3129
3130 static u8 hci_get_auth_req(struct hci_conn *conn)
3131 {
3132 /* If remote requests dedicated bonding follow that lead */
3133 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3134 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3135 /* If both remote and local IO capabilities allow MITM
3136 * protection then require it, otherwise don't */
3137 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3138 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3139 return HCI_AT_DEDICATED_BONDING;
3140 else
3141 return HCI_AT_DEDICATED_BONDING_MITM;
3142 }
3143
3144 /* If remote requests no-bonding follow that lead */
3145 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3146 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3147 return conn->remote_auth | (conn->auth_type & 0x01);
3148
3149 return conn->auth_type;
3150 }
3151
3152 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3153 {
3154 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3155 struct hci_conn *conn;
3156
3157 BT_DBG("%s", hdev->name);
3158
3159 hci_dev_lock(hdev);
3160
3161 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3162 if (!conn)
3163 goto unlock;
3164
3165 hci_conn_hold(conn);
3166
3167 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3168 goto unlock;
3169
3170 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3171 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3172 struct hci_cp_io_capability_reply cp;
3173
3174 bacpy(&cp.bdaddr, &ev->bdaddr);
3175 /* Change the IO capability from KeyboardDisplay
3176 * to DisplayYesNo as it is not supported by BT spec. */
3177 cp.capability = (conn->io_capability == 0x04) ?
3178 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3179 conn->auth_type = hci_get_auth_req(conn);
3180 cp.authentication = conn->auth_type;
3181
3182 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3183 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3184 cp.oob_data = 0x01;
3185 else
3186 cp.oob_data = 0x00;
3187
3188 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3189 sizeof(cp), &cp);
3190 } else {
3191 struct hci_cp_io_capability_neg_reply cp;
3192
3193 bacpy(&cp.bdaddr, &ev->bdaddr);
3194 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3195
3196 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3197 sizeof(cp), &cp);
3198 }
3199
3200 unlock:
3201 hci_dev_unlock(hdev);
3202 }
3203
3204 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3205 {
3206 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3207 struct hci_conn *conn;
3208
3209 BT_DBG("%s", hdev->name);
3210
3211 hci_dev_lock(hdev);
3212
3213 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3214 if (!conn)
3215 goto unlock;
3216
3217 conn->remote_cap = ev->capability;
3218 conn->remote_auth = ev->authentication;
3219 if (ev->oob_data)
3220 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3221
3222 unlock:
3223 hci_dev_unlock(hdev);
3224 }
3225
3226 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3227 struct sk_buff *skb)
3228 {
3229 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3230 int loc_mitm, rem_mitm, confirm_hint = 0;
3231 struct hci_conn *conn;
3232
3233 BT_DBG("%s", hdev->name);
3234
3235 hci_dev_lock(hdev);
3236
3237 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3238 goto unlock;
3239
3240 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3241 if (!conn)
3242 goto unlock;
3243
3244 loc_mitm = (conn->auth_type & 0x01);
3245 rem_mitm = (conn->remote_auth & 0x01);
3246
3247 /* If we require MITM but the remote device can't provide that
3248 * (it has NoInputNoOutput) then reject the confirmation
3249 * request. The only exception is when we're dedicated bonding
3250 * initiators (connect_cfm_cb set) since then we always have the MITM
3251 * bit set. */
3252 if (!conn->connect_cfm_cb && loc_mitm &&
3253 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3254 BT_DBG("Rejecting request: remote device can't provide MITM");
3255 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3256 sizeof(ev->bdaddr), &ev->bdaddr);
3257 goto unlock;
3258 }
3259
3260 /* If no side requires MITM protection; auto-accept */
3261 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3262 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3263
3264 /* If we're not the initiators request authorization to
3265 * proceed from user space (mgmt_user_confirm with
3266 * confirm_hint set to 1). */
3267 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3268 BT_DBG("Confirming auto-accept as acceptor");
3269 confirm_hint = 1;
3270 goto confirm;
3271 }
3272
3273 BT_DBG("Auto-accept of user confirmation with %ums delay",
3274 hdev->auto_accept_delay);
3275
3276 if (hdev->auto_accept_delay > 0) {
3277 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3278 queue_delayed_work(conn->hdev->workqueue,
3279 &conn->auto_accept_work, delay);
3280 goto unlock;
3281 }
3282
3283 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3284 sizeof(ev->bdaddr), &ev->bdaddr);
3285 goto unlock;
3286 }
3287
3288 confirm:
3289 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3290 confirm_hint);
3291
3292 unlock:
3293 hci_dev_unlock(hdev);
3294 }
3295
3296 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3297 struct sk_buff *skb)
3298 {
3299 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3300
3301 BT_DBG("%s", hdev->name);
3302
3303 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3304 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3305 }
3306
3307 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3308 struct sk_buff *skb)
3309 {
3310 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3311 struct hci_conn *conn;
3312
3313 BT_DBG("%s", hdev->name);
3314
3315 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3316 if (!conn)
3317 return;
3318
3319 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3320 conn->passkey_entered = 0;
3321
3322 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3323 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3324 conn->dst_type, conn->passkey_notify,
3325 conn->passkey_entered);
3326 }
3327
3328 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3329 {
3330 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3331 struct hci_conn *conn;
3332
3333 BT_DBG("%s", hdev->name);
3334
3335 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3336 if (!conn)
3337 return;
3338
3339 switch (ev->type) {
3340 case HCI_KEYPRESS_STARTED:
3341 conn->passkey_entered = 0;
3342 return;
3343
3344 case HCI_KEYPRESS_ENTERED:
3345 conn->passkey_entered++;
3346 break;
3347
3348 case HCI_KEYPRESS_ERASED:
3349 conn->passkey_entered--;
3350 break;
3351
3352 case HCI_KEYPRESS_CLEARED:
3353 conn->passkey_entered = 0;
3354 break;
3355
3356 case HCI_KEYPRESS_COMPLETED:
3357 return;
3358 }
3359
3360 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3361 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3362 conn->dst_type, conn->passkey_notify,
3363 conn->passkey_entered);
3364 }
3365
3366 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3367 struct sk_buff *skb)
3368 {
3369 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3370 struct hci_conn *conn;
3371
3372 BT_DBG("%s", hdev->name);
3373
3374 hci_dev_lock(hdev);
3375
3376 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3377 if (!conn)
3378 goto unlock;
3379
3380 /* To avoid duplicate auth_failed events to user space we check
3381 * the HCI_CONN_AUTH_PEND flag which will be set if we
3382 * initiated the authentication. A traditional auth_complete
3383 * event gets always produced as initiator and is also mapped to
3384 * the mgmt_auth_failed event */
3385 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3386 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3387 ev->status);
3388
3389 hci_conn_drop(conn);
3390
3391 unlock:
3392 hci_dev_unlock(hdev);
3393 }
3394
3395 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3396 struct sk_buff *skb)
3397 {
3398 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3399 struct inquiry_entry *ie;
3400 struct hci_conn *conn;
3401
3402 BT_DBG("%s", hdev->name);
3403
3404 hci_dev_lock(hdev);
3405
3406 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3407 if (conn)
3408 memcpy(conn->features[1], ev->features, 8);
3409
3410 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3411 if (ie)
3412 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3413
3414 hci_dev_unlock(hdev);
3415 }
3416
3417 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3418 struct sk_buff *skb)
3419 {
3420 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3421 struct oob_data *data;
3422
3423 BT_DBG("%s", hdev->name);
3424
3425 hci_dev_lock(hdev);
3426
3427 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3428 goto unlock;
3429
3430 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3431 if (data) {
3432 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3433 struct hci_cp_remote_oob_ext_data_reply cp;
3434
3435 bacpy(&cp.bdaddr, &ev->bdaddr);
3436 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3437 memcpy(cp.randomizer192, data->randomizer192,
3438 sizeof(cp.randomizer192));
3439 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3440 memcpy(cp.randomizer256, data->randomizer256,
3441 sizeof(cp.randomizer256));
3442
3443 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3444 sizeof(cp), &cp);
3445 } else {
3446 struct hci_cp_remote_oob_data_reply cp;
3447
3448 bacpy(&cp.bdaddr, &ev->bdaddr);
3449 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3450 memcpy(cp.randomizer, data->randomizer192,
3451 sizeof(cp.randomizer));
3452
3453 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3454 sizeof(cp), &cp);
3455 }
3456 } else {
3457 struct hci_cp_remote_oob_data_neg_reply cp;
3458
3459 bacpy(&cp.bdaddr, &ev->bdaddr);
3460 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3461 sizeof(cp), &cp);
3462 }
3463
3464 unlock:
3465 hci_dev_unlock(hdev);
3466 }
3467
3468 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3469 struct sk_buff *skb)
3470 {
3471 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3472 struct hci_conn *hcon, *bredr_hcon;
3473
3474 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3475 ev->status);
3476
3477 hci_dev_lock(hdev);
3478
3479 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3480 if (!hcon) {
3481 hci_dev_unlock(hdev);
3482 return;
3483 }
3484
3485 if (ev->status) {
3486 hci_conn_del(hcon);
3487 hci_dev_unlock(hdev);
3488 return;
3489 }
3490
3491 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3492
3493 hcon->state = BT_CONNECTED;
3494 bacpy(&hcon->dst, &bredr_hcon->dst);
3495
3496 hci_conn_hold(hcon);
3497 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3498 hci_conn_drop(hcon);
3499
3500 hci_conn_add_sysfs(hcon);
3501
3502 amp_physical_cfm(bredr_hcon, hcon);
3503
3504 hci_dev_unlock(hdev);
3505 }
3506
3507 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3508 {
3509 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3510 struct hci_conn *hcon;
3511 struct hci_chan *hchan;
3512 struct amp_mgr *mgr;
3513
3514 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3515 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3516 ev->status);
3517
3518 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3519 if (!hcon)
3520 return;
3521
3522 /* Create AMP hchan */
3523 hchan = hci_chan_create(hcon);
3524 if (!hchan)
3525 return;
3526
3527 hchan->handle = le16_to_cpu(ev->handle);
3528
3529 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3530
3531 mgr = hcon->amp_mgr;
3532 if (mgr && mgr->bredr_chan) {
3533 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3534
3535 l2cap_chan_lock(bredr_chan);
3536
3537 bredr_chan->conn->mtu = hdev->block_mtu;
3538 l2cap_logical_cfm(bredr_chan, hchan, 0);
3539 hci_conn_hold(hcon);
3540
3541 l2cap_chan_unlock(bredr_chan);
3542 }
3543 }
3544
3545 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3546 struct sk_buff *skb)
3547 {
3548 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3549 struct hci_chan *hchan;
3550
3551 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3552 le16_to_cpu(ev->handle), ev->status);
3553
3554 if (ev->status)
3555 return;
3556
3557 hci_dev_lock(hdev);
3558
3559 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3560 if (!hchan)
3561 goto unlock;
3562
3563 amp_destroy_logical_link(hchan, ev->reason);
3564
3565 unlock:
3566 hci_dev_unlock(hdev);
3567 }
3568
3569 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3570 struct sk_buff *skb)
3571 {
3572 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3573 struct hci_conn *hcon;
3574
3575 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3576
3577 if (ev->status)
3578 return;
3579
3580 hci_dev_lock(hdev);
3581
3582 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3583 if (hcon) {
3584 hcon->state = BT_CLOSED;
3585 hci_conn_del(hcon);
3586 }
3587
3588 hci_dev_unlock(hdev);
3589 }
3590
3591 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3592 {
3593 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3594 struct hci_conn *conn;
3595 struct smp_irk *irk;
3596
3597 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3598
3599 hci_dev_lock(hdev);
3600
3601 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3602 if (!conn) {
3603 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3604 if (!conn) {
3605 BT_ERR("No memory for new connection");
3606 goto unlock;
3607 }
3608
3609 conn->dst_type = ev->bdaddr_type;
3610
3611 /* The advertising parameters for own address type
3612 * define which source address and source address
3613 * type this connections has.
3614 */
3615 if (bacmp(&conn->src, BDADDR_ANY)) {
3616 conn->src_type = ADDR_LE_DEV_PUBLIC;
3617 } else {
3618 bacpy(&conn->src, &hdev->static_addr);
3619 conn->src_type = ADDR_LE_DEV_RANDOM;
3620 }
3621
3622 if (ev->role == LE_CONN_ROLE_MASTER) {
3623 conn->out = true;
3624 conn->link_mode |= HCI_LM_MASTER;
3625 }
3626 }
3627
3628 /* Lookup the identity address from the stored connection
3629 * address and address type.
3630 *
3631 * When establishing connections to an identity address, the
3632 * connection procedure will store the resolvable random
3633 * address first. Now if it can be converted back into the
3634 * identity address, start using the identity address from
3635 * now on.
3636 */
3637 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
3638 if (irk) {
3639 bacpy(&conn->dst, &irk->bdaddr);
3640 conn->dst_type = irk->addr_type;
3641 }
3642
3643 if (ev->status) {
3644 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3645 conn->dst_type, ev->status);
3646 hci_proto_connect_cfm(conn, ev->status);
3647 conn->state = BT_CLOSED;
3648 hci_conn_del(conn);
3649 goto unlock;
3650 }
3651
3652 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3653 mgmt_device_connected(hdev, &conn->dst, conn->type,
3654 conn->dst_type, 0, NULL, 0, NULL);
3655
3656 conn->sec_level = BT_SECURITY_LOW;
3657 conn->handle = __le16_to_cpu(ev->handle);
3658 conn->state = BT_CONNECTED;
3659
3660 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3661 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3662
3663 hci_conn_add_sysfs(conn);
3664
3665 hci_proto_connect_cfm(conn, ev->status);
3666
3667 unlock:
3668 hci_dev_unlock(hdev);
3669 }
3670
3671 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3672 {
3673 u8 num_reports = skb->data[0];
3674 void *ptr = &skb->data[1];
3675 s8 rssi;
3676
3677 while (num_reports--) {
3678 struct hci_ev_le_advertising_info *ev = ptr;
3679
3680 rssi = ev->data[ev->length];
3681 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3682 NULL, rssi, 0, 1, ev->data, ev->length);
3683
3684 ptr += sizeof(*ev) + ev->length + 1;
3685 }
3686 }
3687
3688 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3689 {
3690 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3691 struct hci_cp_le_ltk_reply cp;
3692 struct hci_cp_le_ltk_neg_reply neg;
3693 struct hci_conn *conn;
3694 struct smp_ltk *ltk;
3695
3696 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3697
3698 hci_dev_lock(hdev);
3699
3700 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3701 if (conn == NULL)
3702 goto not_found;
3703
3704 ltk = hci_find_ltk(hdev, ev->ediv, ev->random, conn->out);
3705 if (ltk == NULL)
3706 goto not_found;
3707
3708 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3709 cp.handle = cpu_to_le16(conn->handle);
3710
3711 if (ltk->authenticated)
3712 conn->pending_sec_level = BT_SECURITY_HIGH;
3713 else
3714 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3715
3716 conn->enc_key_size = ltk->enc_size;
3717
3718 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3719
3720 if (ltk->type & HCI_SMP_STK) {
3721 list_del(&ltk->list);
3722 kfree(ltk);
3723 }
3724
3725 hci_dev_unlock(hdev);
3726
3727 return;
3728
3729 not_found:
3730 neg.handle = ev->handle;
3731 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3732 hci_dev_unlock(hdev);
3733 }
3734
3735 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3736 {
3737 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3738
3739 skb_pull(skb, sizeof(*le_ev));
3740
3741 switch (le_ev->subevent) {
3742 case HCI_EV_LE_CONN_COMPLETE:
3743 hci_le_conn_complete_evt(hdev, skb);
3744 break;
3745
3746 case HCI_EV_LE_ADVERTISING_REPORT:
3747 hci_le_adv_report_evt(hdev, skb);
3748 break;
3749
3750 case HCI_EV_LE_LTK_REQ:
3751 hci_le_ltk_request_evt(hdev, skb);
3752 break;
3753
3754 default:
3755 break;
3756 }
3757 }
3758
3759 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3760 {
3761 struct hci_ev_channel_selected *ev = (void *) skb->data;
3762 struct hci_conn *hcon;
3763
3764 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3765
3766 skb_pull(skb, sizeof(*ev));
3767
3768 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3769 if (!hcon)
3770 return;
3771
3772 amp_read_loc_assoc_final_data(hdev, hcon);
3773 }
3774
3775 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3776 {
3777 struct hci_event_hdr *hdr = (void *) skb->data;
3778 __u8 event = hdr->evt;
3779
3780 hci_dev_lock(hdev);
3781
3782 /* Received events are (currently) only needed when a request is
3783 * ongoing so avoid unnecessary memory allocation.
3784 */
3785 if (hdev->req_status == HCI_REQ_PEND) {
3786 kfree_skb(hdev->recv_evt);
3787 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3788 }
3789
3790 hci_dev_unlock(hdev);
3791
3792 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3793
3794 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3795 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
3796 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
3797
3798 hci_req_cmd_complete(hdev, opcode, 0);
3799 }
3800
3801 switch (event) {
3802 case HCI_EV_INQUIRY_COMPLETE:
3803 hci_inquiry_complete_evt(hdev, skb);
3804 break;
3805
3806 case HCI_EV_INQUIRY_RESULT:
3807 hci_inquiry_result_evt(hdev, skb);
3808 break;
3809
3810 case HCI_EV_CONN_COMPLETE:
3811 hci_conn_complete_evt(hdev, skb);
3812 break;
3813
3814 case HCI_EV_CONN_REQUEST:
3815 hci_conn_request_evt(hdev, skb);
3816 break;
3817
3818 case HCI_EV_DISCONN_COMPLETE:
3819 hci_disconn_complete_evt(hdev, skb);
3820 break;
3821
3822 case HCI_EV_AUTH_COMPLETE:
3823 hci_auth_complete_evt(hdev, skb);
3824 break;
3825
3826 case HCI_EV_REMOTE_NAME:
3827 hci_remote_name_evt(hdev, skb);
3828 break;
3829
3830 case HCI_EV_ENCRYPT_CHANGE:
3831 hci_encrypt_change_evt(hdev, skb);
3832 break;
3833
3834 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3835 hci_change_link_key_complete_evt(hdev, skb);
3836 break;
3837
3838 case HCI_EV_REMOTE_FEATURES:
3839 hci_remote_features_evt(hdev, skb);
3840 break;
3841
3842 case HCI_EV_CMD_COMPLETE:
3843 hci_cmd_complete_evt(hdev, skb);
3844 break;
3845
3846 case HCI_EV_CMD_STATUS:
3847 hci_cmd_status_evt(hdev, skb);
3848 break;
3849
3850 case HCI_EV_ROLE_CHANGE:
3851 hci_role_change_evt(hdev, skb);
3852 break;
3853
3854 case HCI_EV_NUM_COMP_PKTS:
3855 hci_num_comp_pkts_evt(hdev, skb);
3856 break;
3857
3858 case HCI_EV_MODE_CHANGE:
3859 hci_mode_change_evt(hdev, skb);
3860 break;
3861
3862 case HCI_EV_PIN_CODE_REQ:
3863 hci_pin_code_request_evt(hdev, skb);
3864 break;
3865
3866 case HCI_EV_LINK_KEY_REQ:
3867 hci_link_key_request_evt(hdev, skb);
3868 break;
3869
3870 case HCI_EV_LINK_KEY_NOTIFY:
3871 hci_link_key_notify_evt(hdev, skb);
3872 break;
3873
3874 case HCI_EV_CLOCK_OFFSET:
3875 hci_clock_offset_evt(hdev, skb);
3876 break;
3877
3878 case HCI_EV_PKT_TYPE_CHANGE:
3879 hci_pkt_type_change_evt(hdev, skb);
3880 break;
3881
3882 case HCI_EV_PSCAN_REP_MODE:
3883 hci_pscan_rep_mode_evt(hdev, skb);
3884 break;
3885
3886 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3887 hci_inquiry_result_with_rssi_evt(hdev, skb);
3888 break;
3889
3890 case HCI_EV_REMOTE_EXT_FEATURES:
3891 hci_remote_ext_features_evt(hdev, skb);
3892 break;
3893
3894 case HCI_EV_SYNC_CONN_COMPLETE:
3895 hci_sync_conn_complete_evt(hdev, skb);
3896 break;
3897
3898 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3899 hci_extended_inquiry_result_evt(hdev, skb);
3900 break;
3901
3902 case HCI_EV_KEY_REFRESH_COMPLETE:
3903 hci_key_refresh_complete_evt(hdev, skb);
3904 break;
3905
3906 case HCI_EV_IO_CAPA_REQUEST:
3907 hci_io_capa_request_evt(hdev, skb);
3908 break;
3909
3910 case HCI_EV_IO_CAPA_REPLY:
3911 hci_io_capa_reply_evt(hdev, skb);
3912 break;
3913
3914 case HCI_EV_USER_CONFIRM_REQUEST:
3915 hci_user_confirm_request_evt(hdev, skb);
3916 break;
3917
3918 case HCI_EV_USER_PASSKEY_REQUEST:
3919 hci_user_passkey_request_evt(hdev, skb);
3920 break;
3921
3922 case HCI_EV_USER_PASSKEY_NOTIFY:
3923 hci_user_passkey_notify_evt(hdev, skb);
3924 break;
3925
3926 case HCI_EV_KEYPRESS_NOTIFY:
3927 hci_keypress_notify_evt(hdev, skb);
3928 break;
3929
3930 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3931 hci_simple_pair_complete_evt(hdev, skb);
3932 break;
3933
3934 case HCI_EV_REMOTE_HOST_FEATURES:
3935 hci_remote_host_features_evt(hdev, skb);
3936 break;
3937
3938 case HCI_EV_LE_META:
3939 hci_le_meta_evt(hdev, skb);
3940 break;
3941
3942 case HCI_EV_CHANNEL_SELECTED:
3943 hci_chan_selected_evt(hdev, skb);
3944 break;
3945
3946 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3947 hci_remote_oob_data_request_evt(hdev, skb);
3948 break;
3949
3950 case HCI_EV_PHY_LINK_COMPLETE:
3951 hci_phy_link_complete_evt(hdev, skb);
3952 break;
3953
3954 case HCI_EV_LOGICAL_LINK_COMPLETE:
3955 hci_loglink_complete_evt(hdev, skb);
3956 break;
3957
3958 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3959 hci_disconn_loglink_complete_evt(hdev, skb);
3960 break;
3961
3962 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3963 hci_disconn_phylink_complete_evt(hdev, skb);
3964 break;
3965
3966 case HCI_EV_NUM_COMP_BLOCKS:
3967 hci_num_comp_blocks_evt(hdev, skb);
3968 break;
3969
3970 default:
3971 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3972 break;
3973 }
3974
3975 kfree_skb(skb);
3976 hdev->stat.evt_rx++;
3977 }
This page took 0.393202 seconds and 5 git commands to generate.