netfilter: nft_payload: work around vlan header stripping
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38
39 #define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
40 "\x00\x00\x00\x00\x00\x00\x00\x00"
41
42 /* Handle HCI Event packets */
43
44 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
45 {
46 __u8 status = *((__u8 *) skb->data);
47
48 BT_DBG("%s status 0x%2.2x", hdev->name, status);
49
50 if (status)
51 return;
52
53 clear_bit(HCI_INQUIRY, &hdev->flags);
54 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
55 wake_up_bit(&hdev->flags, HCI_INQUIRY);
56
57 hci_dev_lock(hdev);
58 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
59 hci_dev_unlock(hdev);
60
61 hci_conn_check_pending(hdev);
62 }
63
64 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
65 {
66 __u8 status = *((__u8 *) skb->data);
67
68 BT_DBG("%s status 0x%2.2x", hdev->name, status);
69
70 if (status)
71 return;
72
73 hci_dev_set_flag(hdev, HCI_PERIODIC_INQ);
74 }
75
76 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77 {
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%2.2x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ);
86
87 hci_conn_check_pending(hdev);
88 }
89
90 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
91 struct sk_buff *skb)
92 {
93 BT_DBG("%s", hdev->name);
94 }
95
96 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
97 {
98 struct hci_rp_role_discovery *rp = (void *) skb->data;
99 struct hci_conn *conn;
100
101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
102
103 if (rp->status)
104 return;
105
106 hci_dev_lock(hdev);
107
108 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
109 if (conn)
110 conn->role = rp->role;
111
112 hci_dev_unlock(hdev);
113 }
114
115 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
116 {
117 struct hci_rp_read_link_policy *rp = (void *) skb->data;
118 struct hci_conn *conn;
119
120 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
121
122 if (rp->status)
123 return;
124
125 hci_dev_lock(hdev);
126
127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
128 if (conn)
129 conn->link_policy = __le16_to_cpu(rp->policy);
130
131 hci_dev_unlock(hdev);
132 }
133
134 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
135 {
136 struct hci_rp_write_link_policy *rp = (void *) skb->data;
137 struct hci_conn *conn;
138 void *sent;
139
140 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
141
142 if (rp->status)
143 return;
144
145 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
146 if (!sent)
147 return;
148
149 hci_dev_lock(hdev);
150
151 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
152 if (conn)
153 conn->link_policy = get_unaligned_le16(sent + 2);
154
155 hci_dev_unlock(hdev);
156 }
157
158 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
159 struct sk_buff *skb)
160 {
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169 }
170
171 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
172 struct sk_buff *skb)
173 {
174 __u8 status = *((__u8 *) skb->data);
175 void *sent;
176
177 BT_DBG("%s status 0x%2.2x", hdev->name, status);
178
179 if (status)
180 return;
181
182 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
183 if (!sent)
184 return;
185
186 hdev->link_policy = get_unaligned_le16(sent);
187 }
188
189 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
190 {
191 __u8 status = *((__u8 *) skb->data);
192
193 BT_DBG("%s status 0x%2.2x", hdev->name, status);
194
195 clear_bit(HCI_RESET, &hdev->flags);
196
197 if (status)
198 return;
199
200 /* Reset all non-persistent flags */
201 hci_dev_clear_volatile_flags(hdev);
202
203 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
204
205 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
206 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
207
208 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
209 hdev->adv_data_len = 0;
210
211 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
212 hdev->scan_rsp_data_len = 0;
213
214 hdev->le_scan_type = LE_SCAN_PASSIVE;
215
216 hdev->ssp_debug_mode = 0;
217
218 hci_bdaddr_list_clear(&hdev->le_white_list);
219 }
220
221 static void hci_cc_read_stored_link_key(struct hci_dev *hdev,
222 struct sk_buff *skb)
223 {
224 struct hci_rp_read_stored_link_key *rp = (void *)skb->data;
225 struct hci_cp_read_stored_link_key *sent;
226
227 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
228
229 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY);
230 if (!sent)
231 return;
232
233 if (!rp->status && sent->read_all == 0x01) {
234 hdev->stored_max_keys = rp->max_keys;
235 hdev->stored_num_keys = rp->num_keys;
236 }
237 }
238
239 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
240 struct sk_buff *skb)
241 {
242 struct hci_rp_delete_stored_link_key *rp = (void *)skb->data;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
245
246 if (rp->status)
247 return;
248
249 if (rp->num_keys <= hdev->stored_num_keys)
250 hdev->stored_num_keys -= rp->num_keys;
251 else
252 hdev->stored_num_keys = 0;
253 }
254
255 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
256 {
257 __u8 status = *((__u8 *) skb->data);
258 void *sent;
259
260 BT_DBG("%s status 0x%2.2x", hdev->name, status);
261
262 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
263 if (!sent)
264 return;
265
266 hci_dev_lock(hdev);
267
268 if (hci_dev_test_flag(hdev, HCI_MGMT))
269 mgmt_set_local_name_complete(hdev, sent, status);
270 else if (!status)
271 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
272
273 hci_dev_unlock(hdev);
274 }
275
276 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
277 {
278 struct hci_rp_read_local_name *rp = (void *) skb->data;
279
280 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
281
282 if (rp->status)
283 return;
284
285 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
286 hci_dev_test_flag(hdev, HCI_CONFIG))
287 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
288 }
289
290 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 __u8 status = *((__u8 *) skb->data);
293 void *sent;
294
295 BT_DBG("%s status 0x%2.2x", hdev->name, status);
296
297 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
298 if (!sent)
299 return;
300
301 hci_dev_lock(hdev);
302
303 if (!status) {
304 __u8 param = *((__u8 *) sent);
305
306 if (param == AUTH_ENABLED)
307 set_bit(HCI_AUTH, &hdev->flags);
308 else
309 clear_bit(HCI_AUTH, &hdev->flags);
310 }
311
312 if (hci_dev_test_flag(hdev, HCI_MGMT))
313 mgmt_auth_enable_complete(hdev, status);
314
315 hci_dev_unlock(hdev);
316 }
317
318 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
319 {
320 __u8 status = *((__u8 *) skb->data);
321 __u8 param;
322 void *sent;
323
324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
325
326 if (status)
327 return;
328
329 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
330 if (!sent)
331 return;
332
333 param = *((__u8 *) sent);
334
335 if (param)
336 set_bit(HCI_ENCRYPT, &hdev->flags);
337 else
338 clear_bit(HCI_ENCRYPT, &hdev->flags);
339 }
340
341 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
342 {
343 __u8 status = *((__u8 *) skb->data);
344 __u8 param;
345 void *sent;
346
347 BT_DBG("%s status 0x%2.2x", hdev->name, status);
348
349 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
350 if (!sent)
351 return;
352
353 param = *((__u8 *) sent);
354
355 hci_dev_lock(hdev);
356
357 if (status) {
358 hdev->discov_timeout = 0;
359 goto done;
360 }
361
362 if (param & SCAN_INQUIRY)
363 set_bit(HCI_ISCAN, &hdev->flags);
364 else
365 clear_bit(HCI_ISCAN, &hdev->flags);
366
367 if (param & SCAN_PAGE)
368 set_bit(HCI_PSCAN, &hdev->flags);
369 else
370 clear_bit(HCI_PSCAN, &hdev->flags);
371
372 done:
373 hci_dev_unlock(hdev);
374 }
375
376 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
379
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381
382 if (rp->status)
383 return;
384
385 memcpy(hdev->dev_class, rp->dev_class, 3);
386
387 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
388 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
389 }
390
391 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
392 {
393 __u8 status = *((__u8 *) skb->data);
394 void *sent;
395
396 BT_DBG("%s status 0x%2.2x", hdev->name, status);
397
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
399 if (!sent)
400 return;
401
402 hci_dev_lock(hdev);
403
404 if (status == 0)
405 memcpy(hdev->dev_class, sent, 3);
406
407 if (hci_dev_test_flag(hdev, HCI_MGMT))
408 mgmt_set_class_of_dev_complete(hdev, sent, status);
409
410 hci_dev_unlock(hdev);
411 }
412
413 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
414 {
415 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
416 __u16 setting;
417
418 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
419
420 if (rp->status)
421 return;
422
423 setting = __le16_to_cpu(rp->voice_setting);
424
425 if (hdev->voice_setting == setting)
426 return;
427
428 hdev->voice_setting = setting;
429
430 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
431
432 if (hdev->notify)
433 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
434 }
435
436 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
437 struct sk_buff *skb)
438 {
439 __u8 status = *((__u8 *) skb->data);
440 __u16 setting;
441 void *sent;
442
443 BT_DBG("%s status 0x%2.2x", hdev->name, status);
444
445 if (status)
446 return;
447
448 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
449 if (!sent)
450 return;
451
452 setting = get_unaligned_le16(sent);
453
454 if (hdev->voice_setting == setting)
455 return;
456
457 hdev->voice_setting = setting;
458
459 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
460
461 if (hdev->notify)
462 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
463 }
464
465 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
466 struct sk_buff *skb)
467 {
468 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
469
470 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
471
472 if (rp->status)
473 return;
474
475 hdev->num_iac = rp->num_iac;
476
477 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
478 }
479
480 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
481 {
482 __u8 status = *((__u8 *) skb->data);
483 struct hci_cp_write_ssp_mode *sent;
484
485 BT_DBG("%s status 0x%2.2x", hdev->name, status);
486
487 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
488 if (!sent)
489 return;
490
491 hci_dev_lock(hdev);
492
493 if (!status) {
494 if (sent->mode)
495 hdev->features[1][0] |= LMP_HOST_SSP;
496 else
497 hdev->features[1][0] &= ~LMP_HOST_SSP;
498 }
499
500 if (hci_dev_test_flag(hdev, HCI_MGMT))
501 mgmt_ssp_enable_complete(hdev, sent->mode, status);
502 else if (!status) {
503 if (sent->mode)
504 hci_dev_set_flag(hdev, HCI_SSP_ENABLED);
505 else
506 hci_dev_clear_flag(hdev, HCI_SSP_ENABLED);
507 }
508
509 hci_dev_unlock(hdev);
510 }
511
512 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
513 {
514 u8 status = *((u8 *) skb->data);
515 struct hci_cp_write_sc_support *sent;
516
517 BT_DBG("%s status 0x%2.2x", hdev->name, status);
518
519 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
520 if (!sent)
521 return;
522
523 hci_dev_lock(hdev);
524
525 if (!status) {
526 if (sent->support)
527 hdev->features[1][0] |= LMP_HOST_SC;
528 else
529 hdev->features[1][0] &= ~LMP_HOST_SC;
530 }
531
532 if (!hci_dev_test_flag(hdev, HCI_MGMT) && !status) {
533 if (sent->support)
534 hci_dev_set_flag(hdev, HCI_SC_ENABLED);
535 else
536 hci_dev_clear_flag(hdev, HCI_SC_ENABLED);
537 }
538
539 hci_dev_unlock(hdev);
540 }
541
542 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
543 {
544 struct hci_rp_read_local_version *rp = (void *) skb->data;
545
546 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
547
548 if (rp->status)
549 return;
550
551 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
552 hci_dev_test_flag(hdev, HCI_CONFIG)) {
553 hdev->hci_ver = rp->hci_ver;
554 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
555 hdev->lmp_ver = rp->lmp_ver;
556 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
557 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
558 }
559 }
560
561 static void hci_cc_read_local_commands(struct hci_dev *hdev,
562 struct sk_buff *skb)
563 {
564 struct hci_rp_read_local_commands *rp = (void *) skb->data;
565
566 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
567
568 if (rp->status)
569 return;
570
571 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
572 hci_dev_test_flag(hdev, HCI_CONFIG))
573 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
574 }
575
576 static void hci_cc_read_local_features(struct hci_dev *hdev,
577 struct sk_buff *skb)
578 {
579 struct hci_rp_read_local_features *rp = (void *) skb->data;
580
581 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
582
583 if (rp->status)
584 return;
585
586 memcpy(hdev->features, rp->features, 8);
587
588 /* Adjust default settings according to features
589 * supported by device. */
590
591 if (hdev->features[0][0] & LMP_3SLOT)
592 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
593
594 if (hdev->features[0][0] & LMP_5SLOT)
595 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
596
597 if (hdev->features[0][1] & LMP_HV2) {
598 hdev->pkt_type |= (HCI_HV2);
599 hdev->esco_type |= (ESCO_HV2);
600 }
601
602 if (hdev->features[0][1] & LMP_HV3) {
603 hdev->pkt_type |= (HCI_HV3);
604 hdev->esco_type |= (ESCO_HV3);
605 }
606
607 if (lmp_esco_capable(hdev))
608 hdev->esco_type |= (ESCO_EV3);
609
610 if (hdev->features[0][4] & LMP_EV4)
611 hdev->esco_type |= (ESCO_EV4);
612
613 if (hdev->features[0][4] & LMP_EV5)
614 hdev->esco_type |= (ESCO_EV5);
615
616 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
617 hdev->esco_type |= (ESCO_2EV3);
618
619 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
620 hdev->esco_type |= (ESCO_3EV3);
621
622 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
623 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
624 }
625
626 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
627 struct sk_buff *skb)
628 {
629 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
630
631 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
632
633 if (rp->status)
634 return;
635
636 if (hdev->max_page < rp->max_page)
637 hdev->max_page = rp->max_page;
638
639 if (rp->page < HCI_MAX_PAGES)
640 memcpy(hdev->features[rp->page], rp->features, 8);
641 }
642
643 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
644 struct sk_buff *skb)
645 {
646 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
647
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649
650 if (rp->status)
651 return;
652
653 hdev->flow_ctl_mode = rp->mode;
654 }
655
656 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
657 {
658 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
659
660 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
661
662 if (rp->status)
663 return;
664
665 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
666 hdev->sco_mtu = rp->sco_mtu;
667 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
668 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
669
670 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
671 hdev->sco_mtu = 64;
672 hdev->sco_pkts = 8;
673 }
674
675 hdev->acl_cnt = hdev->acl_pkts;
676 hdev->sco_cnt = hdev->sco_pkts;
677
678 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
679 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
680 }
681
682 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
683 {
684 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
685
686 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
687
688 if (rp->status)
689 return;
690
691 if (test_bit(HCI_INIT, &hdev->flags))
692 bacpy(&hdev->bdaddr, &rp->bdaddr);
693
694 if (hci_dev_test_flag(hdev, HCI_SETUP))
695 bacpy(&hdev->setup_addr, &rp->bdaddr);
696 }
697
698 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
699 struct sk_buff *skb)
700 {
701 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
702
703 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
704
705 if (rp->status)
706 return;
707
708 if (test_bit(HCI_INIT, &hdev->flags)) {
709 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
710 hdev->page_scan_window = __le16_to_cpu(rp->window);
711 }
712 }
713
714 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
715 struct sk_buff *skb)
716 {
717 u8 status = *((u8 *) skb->data);
718 struct hci_cp_write_page_scan_activity *sent;
719
720 BT_DBG("%s status 0x%2.2x", hdev->name, status);
721
722 if (status)
723 return;
724
725 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
726 if (!sent)
727 return;
728
729 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
730 hdev->page_scan_window = __le16_to_cpu(sent->window);
731 }
732
733 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
734 struct sk_buff *skb)
735 {
736 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
737
738 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
739
740 if (rp->status)
741 return;
742
743 if (test_bit(HCI_INIT, &hdev->flags))
744 hdev->page_scan_type = rp->type;
745 }
746
747 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
748 struct sk_buff *skb)
749 {
750 u8 status = *((u8 *) skb->data);
751 u8 *type;
752
753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
754
755 if (status)
756 return;
757
758 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
759 if (type)
760 hdev->page_scan_type = *type;
761 }
762
763 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
764 struct sk_buff *skb)
765 {
766 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
767
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
769
770 if (rp->status)
771 return;
772
773 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
774 hdev->block_len = __le16_to_cpu(rp->block_len);
775 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
776
777 hdev->block_cnt = hdev->num_blocks;
778
779 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
780 hdev->block_cnt, hdev->block_len);
781 }
782
783 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
784 {
785 struct hci_rp_read_clock *rp = (void *) skb->data;
786 struct hci_cp_read_clock *cp;
787 struct hci_conn *conn;
788
789 BT_DBG("%s", hdev->name);
790
791 if (skb->len < sizeof(*rp))
792 return;
793
794 if (rp->status)
795 return;
796
797 hci_dev_lock(hdev);
798
799 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
800 if (!cp)
801 goto unlock;
802
803 if (cp->which == 0x00) {
804 hdev->clock = le32_to_cpu(rp->clock);
805 goto unlock;
806 }
807
808 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
809 if (conn) {
810 conn->clock = le32_to_cpu(rp->clock);
811 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
812 }
813
814 unlock:
815 hci_dev_unlock(hdev);
816 }
817
818 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
819 struct sk_buff *skb)
820 {
821 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
822
823 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
824
825 if (rp->status)
826 return;
827
828 hdev->amp_status = rp->amp_status;
829 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
830 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
831 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
832 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
833 hdev->amp_type = rp->amp_type;
834 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
835 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
836 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
837 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
838 }
839
840 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
841 struct sk_buff *skb)
842 {
843 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
844
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846
847 if (rp->status)
848 return;
849
850 hdev->inq_tx_power = rp->tx_power;
851 }
852
853 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
854 {
855 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
856 struct hci_cp_pin_code_reply *cp;
857 struct hci_conn *conn;
858
859 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
860
861 hci_dev_lock(hdev);
862
863 if (hci_dev_test_flag(hdev, HCI_MGMT))
864 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
865
866 if (rp->status)
867 goto unlock;
868
869 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
870 if (!cp)
871 goto unlock;
872
873 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
874 if (conn)
875 conn->pin_length = cp->pin_len;
876
877 unlock:
878 hci_dev_unlock(hdev);
879 }
880
881 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
882 {
883 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
884
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
886
887 hci_dev_lock(hdev);
888
889 if (hci_dev_test_flag(hdev, HCI_MGMT))
890 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
891 rp->status);
892
893 hci_dev_unlock(hdev);
894 }
895
896 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
897 struct sk_buff *skb)
898 {
899 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
900
901 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
902
903 if (rp->status)
904 return;
905
906 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
907 hdev->le_pkts = rp->le_max_pkt;
908
909 hdev->le_cnt = hdev->le_pkts;
910
911 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
912 }
913
914 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
915 struct sk_buff *skb)
916 {
917 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
918
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920
921 if (rp->status)
922 return;
923
924 memcpy(hdev->le_features, rp->features, 8);
925 }
926
927 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
928 struct sk_buff *skb)
929 {
930 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
931
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
933
934 if (rp->status)
935 return;
936
937 hdev->adv_tx_power = rp->tx_power;
938 }
939
940 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
941 {
942 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
943
944 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945
946 hci_dev_lock(hdev);
947
948 if (hci_dev_test_flag(hdev, HCI_MGMT))
949 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
950 rp->status);
951
952 hci_dev_unlock(hdev);
953 }
954
955 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
956 struct sk_buff *skb)
957 {
958 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
959
960 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
961
962 hci_dev_lock(hdev);
963
964 if (hci_dev_test_flag(hdev, HCI_MGMT))
965 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
966 ACL_LINK, 0, rp->status);
967
968 hci_dev_unlock(hdev);
969 }
970
971 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
972 {
973 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
974
975 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
976
977 hci_dev_lock(hdev);
978
979 if (hci_dev_test_flag(hdev, HCI_MGMT))
980 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
981 0, rp->status);
982
983 hci_dev_unlock(hdev);
984 }
985
986 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
987 struct sk_buff *skb)
988 {
989 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
990
991 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
992
993 hci_dev_lock(hdev);
994
995 if (hci_dev_test_flag(hdev, HCI_MGMT))
996 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
997 ACL_LINK, 0, rp->status);
998
999 hci_dev_unlock(hdev);
1000 }
1001
1002 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1004 {
1005 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1006
1007 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008 }
1009
1010 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1011 struct sk_buff *skb)
1012 {
1013 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1014
1015 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1016 }
1017
1018 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1019 {
1020 __u8 status = *((__u8 *) skb->data);
1021 bdaddr_t *sent;
1022
1023 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1024
1025 if (status)
1026 return;
1027
1028 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1029 if (!sent)
1030 return;
1031
1032 hci_dev_lock(hdev);
1033
1034 bacpy(&hdev->random_addr, sent);
1035
1036 hci_dev_unlock(hdev);
1037 }
1038
1039 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1040 {
1041 __u8 *sent, status = *((__u8 *) skb->data);
1042
1043 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1044
1045 if (status)
1046 return;
1047
1048 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1049 if (!sent)
1050 return;
1051
1052 hci_dev_lock(hdev);
1053
1054 /* If we're doing connection initiation as peripheral. Set a
1055 * timeout in case something goes wrong.
1056 */
1057 if (*sent) {
1058 struct hci_conn *conn;
1059
1060 hci_dev_set_flag(hdev, HCI_LE_ADV);
1061
1062 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1063 if (conn)
1064 queue_delayed_work(hdev->workqueue,
1065 &conn->le_conn_timeout,
1066 conn->conn_timeout);
1067 } else {
1068 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1069 }
1070
1071 hci_dev_unlock(hdev);
1072 }
1073
1074 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1075 {
1076 struct hci_cp_le_set_scan_param *cp;
1077 __u8 status = *((__u8 *) skb->data);
1078
1079 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1080
1081 if (status)
1082 return;
1083
1084 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1085 if (!cp)
1086 return;
1087
1088 hci_dev_lock(hdev);
1089
1090 hdev->le_scan_type = cp->type;
1091
1092 hci_dev_unlock(hdev);
1093 }
1094
1095 static bool has_pending_adv_report(struct hci_dev *hdev)
1096 {
1097 struct discovery_state *d = &hdev->discovery;
1098
1099 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1100 }
1101
1102 static void clear_pending_adv_report(struct hci_dev *hdev)
1103 {
1104 struct discovery_state *d = &hdev->discovery;
1105
1106 bacpy(&d->last_adv_addr, BDADDR_ANY);
1107 d->last_adv_data_len = 0;
1108 }
1109
1110 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1111 u8 bdaddr_type, s8 rssi, u32 flags,
1112 u8 *data, u8 len)
1113 {
1114 struct discovery_state *d = &hdev->discovery;
1115
1116 bacpy(&d->last_adv_addr, bdaddr);
1117 d->last_adv_addr_type = bdaddr_type;
1118 d->last_adv_rssi = rssi;
1119 d->last_adv_flags = flags;
1120 memcpy(d->last_adv_data, data, len);
1121 d->last_adv_data_len = len;
1122 }
1123
1124 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1125 struct sk_buff *skb)
1126 {
1127 struct hci_cp_le_set_scan_enable *cp;
1128 __u8 status = *((__u8 *) skb->data);
1129
1130 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1131
1132 if (status)
1133 return;
1134
1135 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1136 if (!cp)
1137 return;
1138
1139 hci_dev_lock(hdev);
1140
1141 switch (cp->enable) {
1142 case LE_SCAN_ENABLE:
1143 hci_dev_set_flag(hdev, HCI_LE_SCAN);
1144 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1145 clear_pending_adv_report(hdev);
1146 break;
1147
1148 case LE_SCAN_DISABLE:
1149 /* We do this here instead of when setting DISCOVERY_STOPPED
1150 * since the latter would potentially require waiting for
1151 * inquiry to stop too.
1152 */
1153 if (has_pending_adv_report(hdev)) {
1154 struct discovery_state *d = &hdev->discovery;
1155
1156 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1157 d->last_adv_addr_type, NULL,
1158 d->last_adv_rssi, d->last_adv_flags,
1159 d->last_adv_data,
1160 d->last_adv_data_len, NULL, 0);
1161 }
1162
1163 /* Cancel this timer so that we don't try to disable scanning
1164 * when it's already disabled.
1165 */
1166 cancel_delayed_work(&hdev->le_scan_disable);
1167
1168 hci_dev_clear_flag(hdev, HCI_LE_SCAN);
1169
1170 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1171 * interrupted scanning due to a connect request. Mark
1172 * therefore discovery as stopped. If this was not
1173 * because of a connect request advertising might have
1174 * been disabled because of active scanning, so
1175 * re-enable it again if necessary.
1176 */
1177 if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))
1178 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1179 else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) &&
1180 hdev->discovery.state == DISCOVERY_FINDING)
1181 mgmt_reenable_advertising(hdev);
1182
1183 break;
1184
1185 default:
1186 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1187 break;
1188 }
1189
1190 hci_dev_unlock(hdev);
1191 }
1192
1193 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1194 struct sk_buff *skb)
1195 {
1196 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1197
1198 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1199
1200 if (rp->status)
1201 return;
1202
1203 hdev->le_white_list_size = rp->size;
1204 }
1205
1206 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1207 struct sk_buff *skb)
1208 {
1209 __u8 status = *((__u8 *) skb->data);
1210
1211 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1212
1213 if (status)
1214 return;
1215
1216 hci_bdaddr_list_clear(&hdev->le_white_list);
1217 }
1218
1219 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1220 struct sk_buff *skb)
1221 {
1222 struct hci_cp_le_add_to_white_list *sent;
1223 __u8 status = *((__u8 *) skb->data);
1224
1225 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1226
1227 if (status)
1228 return;
1229
1230 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1231 if (!sent)
1232 return;
1233
1234 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1235 sent->bdaddr_type);
1236 }
1237
1238 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1239 struct sk_buff *skb)
1240 {
1241 struct hci_cp_le_del_from_white_list *sent;
1242 __u8 status = *((__u8 *) skb->data);
1243
1244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1245
1246 if (status)
1247 return;
1248
1249 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1250 if (!sent)
1251 return;
1252
1253 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1254 sent->bdaddr_type);
1255 }
1256
1257 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1258 struct sk_buff *skb)
1259 {
1260 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1261
1262 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1263
1264 if (rp->status)
1265 return;
1266
1267 memcpy(hdev->le_states, rp->le_states, 8);
1268 }
1269
1270 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1271 struct sk_buff *skb)
1272 {
1273 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1274
1275 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1276
1277 if (rp->status)
1278 return;
1279
1280 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1281 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1282 }
1283
1284 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1285 struct sk_buff *skb)
1286 {
1287 struct hci_cp_le_write_def_data_len *sent;
1288 __u8 status = *((__u8 *) skb->data);
1289
1290 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1291
1292 if (status)
1293 return;
1294
1295 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1296 if (!sent)
1297 return;
1298
1299 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1300 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1301 }
1302
1303 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1304 struct sk_buff *skb)
1305 {
1306 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1307
1308 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1309
1310 if (rp->status)
1311 return;
1312
1313 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1314 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1315 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1316 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1317 }
1318
1319 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1320 struct sk_buff *skb)
1321 {
1322 struct hci_cp_write_le_host_supported *sent;
1323 __u8 status = *((__u8 *) skb->data);
1324
1325 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1326
1327 if (status)
1328 return;
1329
1330 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1331 if (!sent)
1332 return;
1333
1334 hci_dev_lock(hdev);
1335
1336 if (sent->le) {
1337 hdev->features[1][0] |= LMP_HOST_LE;
1338 hci_dev_set_flag(hdev, HCI_LE_ENABLED);
1339 } else {
1340 hdev->features[1][0] &= ~LMP_HOST_LE;
1341 hci_dev_clear_flag(hdev, HCI_LE_ENABLED);
1342 hci_dev_clear_flag(hdev, HCI_ADVERTISING);
1343 }
1344
1345 if (sent->simul)
1346 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1347 else
1348 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1349
1350 hci_dev_unlock(hdev);
1351 }
1352
1353 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1354 {
1355 struct hci_cp_le_set_adv_param *cp;
1356 u8 status = *((u8 *) skb->data);
1357
1358 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1359
1360 if (status)
1361 return;
1362
1363 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1364 if (!cp)
1365 return;
1366
1367 hci_dev_lock(hdev);
1368 hdev->adv_addr_type = cp->own_address_type;
1369 hci_dev_unlock(hdev);
1370 }
1371
1372 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1373 {
1374 struct hci_rp_read_rssi *rp = (void *) skb->data;
1375 struct hci_conn *conn;
1376
1377 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1378
1379 if (rp->status)
1380 return;
1381
1382 hci_dev_lock(hdev);
1383
1384 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1385 if (conn)
1386 conn->rssi = rp->rssi;
1387
1388 hci_dev_unlock(hdev);
1389 }
1390
1391 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1392 {
1393 struct hci_cp_read_tx_power *sent;
1394 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1395 struct hci_conn *conn;
1396
1397 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1398
1399 if (rp->status)
1400 return;
1401
1402 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1403 if (!sent)
1404 return;
1405
1406 hci_dev_lock(hdev);
1407
1408 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1409 if (!conn)
1410 goto unlock;
1411
1412 switch (sent->type) {
1413 case 0x00:
1414 conn->tx_power = rp->tx_power;
1415 break;
1416 case 0x01:
1417 conn->max_tx_power = rp->tx_power;
1418 break;
1419 }
1420
1421 unlock:
1422 hci_dev_unlock(hdev);
1423 }
1424
1425 static void hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, struct sk_buff *skb)
1426 {
1427 u8 status = *((u8 *) skb->data);
1428 u8 *mode;
1429
1430 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1431
1432 if (status)
1433 return;
1434
1435 mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE);
1436 if (mode)
1437 hdev->ssp_debug_mode = *mode;
1438 }
1439
1440 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1441 {
1442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1443
1444 if (status) {
1445 hci_conn_check_pending(hdev);
1446 return;
1447 }
1448
1449 set_bit(HCI_INQUIRY, &hdev->flags);
1450 }
1451
1452 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1453 {
1454 struct hci_cp_create_conn *cp;
1455 struct hci_conn *conn;
1456
1457 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1458
1459 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1460 if (!cp)
1461 return;
1462
1463 hci_dev_lock(hdev);
1464
1465 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1466
1467 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1468
1469 if (status) {
1470 if (conn && conn->state == BT_CONNECT) {
1471 if (status != 0x0c || conn->attempt > 2) {
1472 conn->state = BT_CLOSED;
1473 hci_connect_cfm(conn, status);
1474 hci_conn_del(conn);
1475 } else
1476 conn->state = BT_CONNECT2;
1477 }
1478 } else {
1479 if (!conn) {
1480 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1481 HCI_ROLE_MASTER);
1482 if (!conn)
1483 BT_ERR("No memory for new connection");
1484 }
1485 }
1486
1487 hci_dev_unlock(hdev);
1488 }
1489
1490 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1491 {
1492 struct hci_cp_add_sco *cp;
1493 struct hci_conn *acl, *sco;
1494 __u16 handle;
1495
1496 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1497
1498 if (!status)
1499 return;
1500
1501 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1502 if (!cp)
1503 return;
1504
1505 handle = __le16_to_cpu(cp->handle);
1506
1507 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1508
1509 hci_dev_lock(hdev);
1510
1511 acl = hci_conn_hash_lookup_handle(hdev, handle);
1512 if (acl) {
1513 sco = acl->link;
1514 if (sco) {
1515 sco->state = BT_CLOSED;
1516
1517 hci_connect_cfm(sco, status);
1518 hci_conn_del(sco);
1519 }
1520 }
1521
1522 hci_dev_unlock(hdev);
1523 }
1524
1525 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1526 {
1527 struct hci_cp_auth_requested *cp;
1528 struct hci_conn *conn;
1529
1530 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1531
1532 if (!status)
1533 return;
1534
1535 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1536 if (!cp)
1537 return;
1538
1539 hci_dev_lock(hdev);
1540
1541 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1542 if (conn) {
1543 if (conn->state == BT_CONFIG) {
1544 hci_connect_cfm(conn, status);
1545 hci_conn_drop(conn);
1546 }
1547 }
1548
1549 hci_dev_unlock(hdev);
1550 }
1551
1552 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1553 {
1554 struct hci_cp_set_conn_encrypt *cp;
1555 struct hci_conn *conn;
1556
1557 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1558
1559 if (!status)
1560 return;
1561
1562 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1563 if (!cp)
1564 return;
1565
1566 hci_dev_lock(hdev);
1567
1568 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1569 if (conn) {
1570 if (conn->state == BT_CONFIG) {
1571 hci_connect_cfm(conn, status);
1572 hci_conn_drop(conn);
1573 }
1574 }
1575
1576 hci_dev_unlock(hdev);
1577 }
1578
1579 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1580 struct hci_conn *conn)
1581 {
1582 if (conn->state != BT_CONFIG || !conn->out)
1583 return 0;
1584
1585 if (conn->pending_sec_level == BT_SECURITY_SDP)
1586 return 0;
1587
1588 /* Only request authentication for SSP connections or non-SSP
1589 * devices with sec_level MEDIUM or HIGH or if MITM protection
1590 * is requested.
1591 */
1592 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1593 conn->pending_sec_level != BT_SECURITY_FIPS &&
1594 conn->pending_sec_level != BT_SECURITY_HIGH &&
1595 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1596 return 0;
1597
1598 return 1;
1599 }
1600
1601 static int hci_resolve_name(struct hci_dev *hdev,
1602 struct inquiry_entry *e)
1603 {
1604 struct hci_cp_remote_name_req cp;
1605
1606 memset(&cp, 0, sizeof(cp));
1607
1608 bacpy(&cp.bdaddr, &e->data.bdaddr);
1609 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1610 cp.pscan_mode = e->data.pscan_mode;
1611 cp.clock_offset = e->data.clock_offset;
1612
1613 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1614 }
1615
1616 static bool hci_resolve_next_name(struct hci_dev *hdev)
1617 {
1618 struct discovery_state *discov = &hdev->discovery;
1619 struct inquiry_entry *e;
1620
1621 if (list_empty(&discov->resolve))
1622 return false;
1623
1624 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1625 if (!e)
1626 return false;
1627
1628 if (hci_resolve_name(hdev, e) == 0) {
1629 e->name_state = NAME_PENDING;
1630 return true;
1631 }
1632
1633 return false;
1634 }
1635
1636 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1637 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1638 {
1639 struct discovery_state *discov = &hdev->discovery;
1640 struct inquiry_entry *e;
1641
1642 /* Update the mgmt connected state if necessary. Be careful with
1643 * conn objects that exist but are not (yet) connected however.
1644 * Only those in BT_CONFIG or BT_CONNECTED states can be
1645 * considered connected.
1646 */
1647 if (conn &&
1648 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1649 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1650 mgmt_device_connected(hdev, conn, 0, name, name_len);
1651
1652 if (discov->state == DISCOVERY_STOPPED)
1653 return;
1654
1655 if (discov->state == DISCOVERY_STOPPING)
1656 goto discov_complete;
1657
1658 if (discov->state != DISCOVERY_RESOLVING)
1659 return;
1660
1661 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1662 /* If the device was not found in a list of found devices names of which
1663 * are pending. there is no need to continue resolving a next name as it
1664 * will be done upon receiving another Remote Name Request Complete
1665 * Event */
1666 if (!e)
1667 return;
1668
1669 list_del(&e->list);
1670 if (name) {
1671 e->name_state = NAME_KNOWN;
1672 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1673 e->data.rssi, name, name_len);
1674 } else {
1675 e->name_state = NAME_NOT_KNOWN;
1676 }
1677
1678 if (hci_resolve_next_name(hdev))
1679 return;
1680
1681 discov_complete:
1682 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1683 }
1684
1685 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1686 {
1687 struct hci_cp_remote_name_req *cp;
1688 struct hci_conn *conn;
1689
1690 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1691
1692 /* If successful wait for the name req complete event before
1693 * checking for the need to do authentication */
1694 if (!status)
1695 return;
1696
1697 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1698 if (!cp)
1699 return;
1700
1701 hci_dev_lock(hdev);
1702
1703 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1704
1705 if (hci_dev_test_flag(hdev, HCI_MGMT))
1706 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1707
1708 if (!conn)
1709 goto unlock;
1710
1711 if (!hci_outgoing_auth_needed(hdev, conn))
1712 goto unlock;
1713
1714 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1715 struct hci_cp_auth_requested auth_cp;
1716
1717 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1718
1719 auth_cp.handle = __cpu_to_le16(conn->handle);
1720 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1721 sizeof(auth_cp), &auth_cp);
1722 }
1723
1724 unlock:
1725 hci_dev_unlock(hdev);
1726 }
1727
1728 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1729 {
1730 struct hci_cp_read_remote_features *cp;
1731 struct hci_conn *conn;
1732
1733 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1734
1735 if (!status)
1736 return;
1737
1738 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1739 if (!cp)
1740 return;
1741
1742 hci_dev_lock(hdev);
1743
1744 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1745 if (conn) {
1746 if (conn->state == BT_CONFIG) {
1747 hci_connect_cfm(conn, status);
1748 hci_conn_drop(conn);
1749 }
1750 }
1751
1752 hci_dev_unlock(hdev);
1753 }
1754
1755 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1756 {
1757 struct hci_cp_read_remote_ext_features *cp;
1758 struct hci_conn *conn;
1759
1760 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1761
1762 if (!status)
1763 return;
1764
1765 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1766 if (!cp)
1767 return;
1768
1769 hci_dev_lock(hdev);
1770
1771 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1772 if (conn) {
1773 if (conn->state == BT_CONFIG) {
1774 hci_connect_cfm(conn, status);
1775 hci_conn_drop(conn);
1776 }
1777 }
1778
1779 hci_dev_unlock(hdev);
1780 }
1781
1782 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1783 {
1784 struct hci_cp_setup_sync_conn *cp;
1785 struct hci_conn *acl, *sco;
1786 __u16 handle;
1787
1788 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1789
1790 if (!status)
1791 return;
1792
1793 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1794 if (!cp)
1795 return;
1796
1797 handle = __le16_to_cpu(cp->handle);
1798
1799 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1800
1801 hci_dev_lock(hdev);
1802
1803 acl = hci_conn_hash_lookup_handle(hdev, handle);
1804 if (acl) {
1805 sco = acl->link;
1806 if (sco) {
1807 sco->state = BT_CLOSED;
1808
1809 hci_connect_cfm(sco, status);
1810 hci_conn_del(sco);
1811 }
1812 }
1813
1814 hci_dev_unlock(hdev);
1815 }
1816
1817 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1818 {
1819 struct hci_cp_sniff_mode *cp;
1820 struct hci_conn *conn;
1821
1822 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1823
1824 if (!status)
1825 return;
1826
1827 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1828 if (!cp)
1829 return;
1830
1831 hci_dev_lock(hdev);
1832
1833 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1834 if (conn) {
1835 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1836
1837 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1838 hci_sco_setup(conn, status);
1839 }
1840
1841 hci_dev_unlock(hdev);
1842 }
1843
1844 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1845 {
1846 struct hci_cp_exit_sniff_mode *cp;
1847 struct hci_conn *conn;
1848
1849 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1850
1851 if (!status)
1852 return;
1853
1854 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1855 if (!cp)
1856 return;
1857
1858 hci_dev_lock(hdev);
1859
1860 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1861 if (conn) {
1862 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1863
1864 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1865 hci_sco_setup(conn, status);
1866 }
1867
1868 hci_dev_unlock(hdev);
1869 }
1870
1871 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1872 {
1873 struct hci_cp_disconnect *cp;
1874 struct hci_conn *conn;
1875
1876 if (!status)
1877 return;
1878
1879 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1880 if (!cp)
1881 return;
1882
1883 hci_dev_lock(hdev);
1884
1885 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1886 if (conn)
1887 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1888 conn->dst_type, status);
1889
1890 hci_dev_unlock(hdev);
1891 }
1892
1893 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1894 {
1895 struct hci_cp_le_create_conn *cp;
1896 struct hci_conn *conn;
1897
1898 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1899
1900 /* All connection failure handling is taken care of by the
1901 * hci_le_conn_failed function which is triggered by the HCI
1902 * request completion callbacks used for connecting.
1903 */
1904 if (status)
1905 return;
1906
1907 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1908 if (!cp)
1909 return;
1910
1911 hci_dev_lock(hdev);
1912
1913 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1914 if (!conn)
1915 goto unlock;
1916
1917 /* Store the initiator and responder address information which
1918 * is needed for SMP. These values will not change during the
1919 * lifetime of the connection.
1920 */
1921 conn->init_addr_type = cp->own_address_type;
1922 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1923 bacpy(&conn->init_addr, &hdev->random_addr);
1924 else
1925 bacpy(&conn->init_addr, &hdev->bdaddr);
1926
1927 conn->resp_addr_type = cp->peer_addr_type;
1928 bacpy(&conn->resp_addr, &cp->peer_addr);
1929
1930 /* We don't want the connection attempt to stick around
1931 * indefinitely since LE doesn't have a page timeout concept
1932 * like BR/EDR. Set a timer for any connection that doesn't use
1933 * the white list for connecting.
1934 */
1935 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1936 queue_delayed_work(conn->hdev->workqueue,
1937 &conn->le_conn_timeout,
1938 conn->conn_timeout);
1939
1940 unlock:
1941 hci_dev_unlock(hdev);
1942 }
1943
1944 static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status)
1945 {
1946 struct hci_cp_le_read_remote_features *cp;
1947 struct hci_conn *conn;
1948
1949 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1950
1951 if (!status)
1952 return;
1953
1954 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES);
1955 if (!cp)
1956 return;
1957
1958 hci_dev_lock(hdev);
1959
1960 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1961 if (conn) {
1962 if (conn->state == BT_CONFIG) {
1963 hci_connect_cfm(conn, status);
1964 hci_conn_drop(conn);
1965 }
1966 }
1967
1968 hci_dev_unlock(hdev);
1969 }
1970
1971 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1972 {
1973 struct hci_cp_le_start_enc *cp;
1974 struct hci_conn *conn;
1975
1976 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1977
1978 if (!status)
1979 return;
1980
1981 hci_dev_lock(hdev);
1982
1983 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1984 if (!cp)
1985 goto unlock;
1986
1987 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1988 if (!conn)
1989 goto unlock;
1990
1991 if (conn->state != BT_CONNECTED)
1992 goto unlock;
1993
1994 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1995 hci_conn_drop(conn);
1996
1997 unlock:
1998 hci_dev_unlock(hdev);
1999 }
2000
2001 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2002 {
2003 struct hci_cp_switch_role *cp;
2004 struct hci_conn *conn;
2005
2006 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2007
2008 if (!status)
2009 return;
2010
2011 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2012 if (!cp)
2013 return;
2014
2015 hci_dev_lock(hdev);
2016
2017 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2018 if (conn)
2019 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2020
2021 hci_dev_unlock(hdev);
2022 }
2023
2024 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2025 {
2026 __u8 status = *((__u8 *) skb->data);
2027 struct discovery_state *discov = &hdev->discovery;
2028 struct inquiry_entry *e;
2029
2030 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2031
2032 hci_conn_check_pending(hdev);
2033
2034 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2035 return;
2036
2037 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2038 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2039
2040 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2041 return;
2042
2043 hci_dev_lock(hdev);
2044
2045 if (discov->state != DISCOVERY_FINDING)
2046 goto unlock;
2047
2048 if (list_empty(&discov->resolve)) {
2049 /* When BR/EDR inquiry is active and no LE scanning is in
2050 * progress, then change discovery state to indicate completion.
2051 *
2052 * When running LE scanning and BR/EDR inquiry simultaneously
2053 * and the LE scan already finished, then change the discovery
2054 * state to indicate completion.
2055 */
2056 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2057 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2058 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2059 goto unlock;
2060 }
2061
2062 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2063 if (e && hci_resolve_name(hdev, e) == 0) {
2064 e->name_state = NAME_PENDING;
2065 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2066 } else {
2067 /* When BR/EDR inquiry is active and no LE scanning is in
2068 * progress, then change discovery state to indicate completion.
2069 *
2070 * When running LE scanning and BR/EDR inquiry simultaneously
2071 * and the LE scan already finished, then change the discovery
2072 * state to indicate completion.
2073 */
2074 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) ||
2075 !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks))
2076 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2077 }
2078
2079 unlock:
2080 hci_dev_unlock(hdev);
2081 }
2082
2083 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2084 {
2085 struct inquiry_data data;
2086 struct inquiry_info *info = (void *) (skb->data + 1);
2087 int num_rsp = *((__u8 *) skb->data);
2088
2089 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2090
2091 if (!num_rsp)
2092 return;
2093
2094 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
2095 return;
2096
2097 hci_dev_lock(hdev);
2098
2099 for (; num_rsp; num_rsp--, info++) {
2100 u32 flags;
2101
2102 bacpy(&data.bdaddr, &info->bdaddr);
2103 data.pscan_rep_mode = info->pscan_rep_mode;
2104 data.pscan_period_mode = info->pscan_period_mode;
2105 data.pscan_mode = info->pscan_mode;
2106 memcpy(data.dev_class, info->dev_class, 3);
2107 data.clock_offset = info->clock_offset;
2108 data.rssi = HCI_RSSI_INVALID;
2109 data.ssp_mode = 0x00;
2110
2111 flags = hci_inquiry_cache_update(hdev, &data, false);
2112
2113 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2114 info->dev_class, HCI_RSSI_INVALID,
2115 flags, NULL, 0, NULL, 0);
2116 }
2117
2118 hci_dev_unlock(hdev);
2119 }
2120
2121 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2122 {
2123 struct hci_ev_conn_complete *ev = (void *) skb->data;
2124 struct hci_conn *conn;
2125
2126 BT_DBG("%s", hdev->name);
2127
2128 hci_dev_lock(hdev);
2129
2130 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2131 if (!conn) {
2132 if (ev->link_type != SCO_LINK)
2133 goto unlock;
2134
2135 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2136 if (!conn)
2137 goto unlock;
2138
2139 conn->type = SCO_LINK;
2140 }
2141
2142 if (!ev->status) {
2143 conn->handle = __le16_to_cpu(ev->handle);
2144
2145 if (conn->type == ACL_LINK) {
2146 conn->state = BT_CONFIG;
2147 hci_conn_hold(conn);
2148
2149 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2150 !hci_find_link_key(hdev, &ev->bdaddr))
2151 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2152 else
2153 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2154 } else
2155 conn->state = BT_CONNECTED;
2156
2157 hci_debugfs_create_conn(conn);
2158 hci_conn_add_sysfs(conn);
2159
2160 if (test_bit(HCI_AUTH, &hdev->flags))
2161 set_bit(HCI_CONN_AUTH, &conn->flags);
2162
2163 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2164 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2165
2166 /* Get remote features */
2167 if (conn->type == ACL_LINK) {
2168 struct hci_cp_read_remote_features cp;
2169 cp.handle = ev->handle;
2170 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2171 sizeof(cp), &cp);
2172
2173 hci_update_page_scan(hdev);
2174 }
2175
2176 /* Set packet type for incoming connection */
2177 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2178 struct hci_cp_change_conn_ptype cp;
2179 cp.handle = ev->handle;
2180 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2181 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2182 &cp);
2183 }
2184 } else {
2185 conn->state = BT_CLOSED;
2186 if (conn->type == ACL_LINK)
2187 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2188 conn->dst_type, ev->status);
2189 }
2190
2191 if (conn->type == ACL_LINK)
2192 hci_sco_setup(conn, ev->status);
2193
2194 if (ev->status) {
2195 hci_connect_cfm(conn, ev->status);
2196 hci_conn_del(conn);
2197 } else if (ev->link_type != ACL_LINK)
2198 hci_connect_cfm(conn, ev->status);
2199
2200 unlock:
2201 hci_dev_unlock(hdev);
2202
2203 hci_conn_check_pending(hdev);
2204 }
2205
2206 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2207 {
2208 struct hci_cp_reject_conn_req cp;
2209
2210 bacpy(&cp.bdaddr, bdaddr);
2211 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2212 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2213 }
2214
2215 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2216 {
2217 struct hci_ev_conn_request *ev = (void *) skb->data;
2218 int mask = hdev->link_mode;
2219 struct inquiry_entry *ie;
2220 struct hci_conn *conn;
2221 __u8 flags = 0;
2222
2223 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2224 ev->link_type);
2225
2226 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2227 &flags);
2228
2229 if (!(mask & HCI_LM_ACCEPT)) {
2230 hci_reject_conn(hdev, &ev->bdaddr);
2231 return;
2232 }
2233
2234 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2235 BDADDR_BREDR)) {
2236 hci_reject_conn(hdev, &ev->bdaddr);
2237 return;
2238 }
2239
2240 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2241 * connection. These features are only touched through mgmt so
2242 * only do the checks if HCI_MGMT is set.
2243 */
2244 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
2245 !hci_dev_test_flag(hdev, HCI_CONNECTABLE) &&
2246 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2247 BDADDR_BREDR)) {
2248 hci_reject_conn(hdev, &ev->bdaddr);
2249 return;
2250 }
2251
2252 /* Connection accepted */
2253
2254 hci_dev_lock(hdev);
2255
2256 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2257 if (ie)
2258 memcpy(ie->data.dev_class, ev->dev_class, 3);
2259
2260 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2261 &ev->bdaddr);
2262 if (!conn) {
2263 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2264 HCI_ROLE_SLAVE);
2265 if (!conn) {
2266 BT_ERR("No memory for new connection");
2267 hci_dev_unlock(hdev);
2268 return;
2269 }
2270 }
2271
2272 memcpy(conn->dev_class, ev->dev_class, 3);
2273
2274 hci_dev_unlock(hdev);
2275
2276 if (ev->link_type == ACL_LINK ||
2277 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2278 struct hci_cp_accept_conn_req cp;
2279 conn->state = BT_CONNECT;
2280
2281 bacpy(&cp.bdaddr, &ev->bdaddr);
2282
2283 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2284 cp.role = 0x00; /* Become master */
2285 else
2286 cp.role = 0x01; /* Remain slave */
2287
2288 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2289 } else if (!(flags & HCI_PROTO_DEFER)) {
2290 struct hci_cp_accept_sync_conn_req cp;
2291 conn->state = BT_CONNECT;
2292
2293 bacpy(&cp.bdaddr, &ev->bdaddr);
2294 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2295
2296 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2297 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2298 cp.max_latency = cpu_to_le16(0xffff);
2299 cp.content_format = cpu_to_le16(hdev->voice_setting);
2300 cp.retrans_effort = 0xff;
2301
2302 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2303 &cp);
2304 } else {
2305 conn->state = BT_CONNECT2;
2306 hci_connect_cfm(conn, 0);
2307 }
2308 }
2309
2310 static u8 hci_to_mgmt_reason(u8 err)
2311 {
2312 switch (err) {
2313 case HCI_ERROR_CONNECTION_TIMEOUT:
2314 return MGMT_DEV_DISCONN_TIMEOUT;
2315 case HCI_ERROR_REMOTE_USER_TERM:
2316 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2317 case HCI_ERROR_REMOTE_POWER_OFF:
2318 return MGMT_DEV_DISCONN_REMOTE;
2319 case HCI_ERROR_LOCAL_HOST_TERM:
2320 return MGMT_DEV_DISCONN_LOCAL_HOST;
2321 default:
2322 return MGMT_DEV_DISCONN_UNKNOWN;
2323 }
2324 }
2325
2326 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2327 {
2328 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2329 u8 reason = hci_to_mgmt_reason(ev->reason);
2330 struct hci_conn_params *params;
2331 struct hci_conn *conn;
2332 bool mgmt_connected;
2333 u8 type;
2334
2335 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2336
2337 hci_dev_lock(hdev);
2338
2339 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2340 if (!conn)
2341 goto unlock;
2342
2343 if (ev->status) {
2344 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2345 conn->dst_type, ev->status);
2346 goto unlock;
2347 }
2348
2349 conn->state = BT_CLOSED;
2350
2351 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2352 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2353 reason, mgmt_connected);
2354
2355 if (conn->type == ACL_LINK) {
2356 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2357 hci_remove_link_key(hdev, &conn->dst);
2358
2359 hci_update_page_scan(hdev);
2360 }
2361
2362 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2363 if (params) {
2364 switch (params->auto_connect) {
2365 case HCI_AUTO_CONN_LINK_LOSS:
2366 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2367 break;
2368 /* Fall through */
2369
2370 case HCI_AUTO_CONN_DIRECT:
2371 case HCI_AUTO_CONN_ALWAYS:
2372 list_del_init(&params->action);
2373 list_add(&params->action, &hdev->pend_le_conns);
2374 hci_update_background_scan(hdev);
2375 break;
2376
2377 default:
2378 break;
2379 }
2380 }
2381
2382 type = conn->type;
2383
2384 hci_disconn_cfm(conn, ev->reason);
2385 hci_conn_del(conn);
2386
2387 /* Re-enable advertising if necessary, since it might
2388 * have been disabled by the connection. From the
2389 * HCI_LE_Set_Advertise_Enable command description in
2390 * the core specification (v4.0):
2391 * "The Controller shall continue advertising until the Host
2392 * issues an LE_Set_Advertise_Enable command with
2393 * Advertising_Enable set to 0x00 (Advertising is disabled)
2394 * or until a connection is created or until the Advertising
2395 * is timed out due to Directed Advertising."
2396 */
2397 if (type == LE_LINK)
2398 mgmt_reenable_advertising(hdev);
2399
2400 unlock:
2401 hci_dev_unlock(hdev);
2402 }
2403
2404 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2405 {
2406 struct hci_ev_auth_complete *ev = (void *) skb->data;
2407 struct hci_conn *conn;
2408
2409 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2410
2411 hci_dev_lock(hdev);
2412
2413 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2414 if (!conn)
2415 goto unlock;
2416
2417 if (!ev->status) {
2418 if (!hci_conn_ssp_enabled(conn) &&
2419 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2420 BT_INFO("re-auth of legacy device is not possible.");
2421 } else {
2422 set_bit(HCI_CONN_AUTH, &conn->flags);
2423 conn->sec_level = conn->pending_sec_level;
2424 }
2425 } else {
2426 mgmt_auth_failed(conn, ev->status);
2427 }
2428
2429 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2430 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2431
2432 if (conn->state == BT_CONFIG) {
2433 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2434 struct hci_cp_set_conn_encrypt cp;
2435 cp.handle = ev->handle;
2436 cp.encrypt = 0x01;
2437 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2438 &cp);
2439 } else {
2440 conn->state = BT_CONNECTED;
2441 hci_connect_cfm(conn, ev->status);
2442 hci_conn_drop(conn);
2443 }
2444 } else {
2445 hci_auth_cfm(conn, ev->status);
2446
2447 hci_conn_hold(conn);
2448 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2449 hci_conn_drop(conn);
2450 }
2451
2452 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2453 if (!ev->status) {
2454 struct hci_cp_set_conn_encrypt cp;
2455 cp.handle = ev->handle;
2456 cp.encrypt = 0x01;
2457 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2458 &cp);
2459 } else {
2460 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2461 hci_encrypt_cfm(conn, ev->status, 0x00);
2462 }
2463 }
2464
2465 unlock:
2466 hci_dev_unlock(hdev);
2467 }
2468
2469 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2470 {
2471 struct hci_ev_remote_name *ev = (void *) skb->data;
2472 struct hci_conn *conn;
2473
2474 BT_DBG("%s", hdev->name);
2475
2476 hci_conn_check_pending(hdev);
2477
2478 hci_dev_lock(hdev);
2479
2480 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2481
2482 if (!hci_dev_test_flag(hdev, HCI_MGMT))
2483 goto check_auth;
2484
2485 if (ev->status == 0)
2486 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2487 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2488 else
2489 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2490
2491 check_auth:
2492 if (!conn)
2493 goto unlock;
2494
2495 if (!hci_outgoing_auth_needed(hdev, conn))
2496 goto unlock;
2497
2498 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2499 struct hci_cp_auth_requested cp;
2500
2501 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2502
2503 cp.handle = __cpu_to_le16(conn->handle);
2504 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2505 }
2506
2507 unlock:
2508 hci_dev_unlock(hdev);
2509 }
2510
2511 static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status,
2512 u16 opcode, struct sk_buff *skb)
2513 {
2514 const struct hci_rp_read_enc_key_size *rp;
2515 struct hci_conn *conn;
2516 u16 handle;
2517
2518 BT_DBG("%s status 0x%02x", hdev->name, status);
2519
2520 if (!skb || skb->len < sizeof(*rp)) {
2521 BT_ERR("%s invalid HCI Read Encryption Key Size response",
2522 hdev->name);
2523 return;
2524 }
2525
2526 rp = (void *)skb->data;
2527 handle = le16_to_cpu(rp->handle);
2528
2529 hci_dev_lock(hdev);
2530
2531 conn = hci_conn_hash_lookup_handle(hdev, handle);
2532 if (!conn)
2533 goto unlock;
2534
2535 /* If we fail to read the encryption key size, assume maximum
2536 * (which is the same we do also when this HCI command isn't
2537 * supported.
2538 */
2539 if (rp->status) {
2540 BT_ERR("%s failed to read key size for handle %u", hdev->name,
2541 handle);
2542 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2543 } else {
2544 conn->enc_key_size = rp->key_size;
2545 }
2546
2547 if (conn->state == BT_CONFIG) {
2548 conn->state = BT_CONNECTED;
2549 hci_connect_cfm(conn, 0);
2550 hci_conn_drop(conn);
2551 } else {
2552 u8 encrypt;
2553
2554 if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags))
2555 encrypt = 0x00;
2556 else if (test_bit(HCI_CONN_AES_CCM, &conn->flags))
2557 encrypt = 0x02;
2558 else
2559 encrypt = 0x01;
2560
2561 hci_encrypt_cfm(conn, 0, encrypt);
2562 }
2563
2564 unlock:
2565 hci_dev_unlock(hdev);
2566 }
2567
2568 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2569 {
2570 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2571 struct hci_conn *conn;
2572
2573 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2574
2575 hci_dev_lock(hdev);
2576
2577 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2578 if (!conn)
2579 goto unlock;
2580
2581 if (!ev->status) {
2582 if (ev->encrypt) {
2583 /* Encryption implies authentication */
2584 set_bit(HCI_CONN_AUTH, &conn->flags);
2585 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2586 conn->sec_level = conn->pending_sec_level;
2587
2588 /* P-256 authentication key implies FIPS */
2589 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2590 set_bit(HCI_CONN_FIPS, &conn->flags);
2591
2592 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2593 conn->type == LE_LINK)
2594 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2595 } else {
2596 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2597 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2598 }
2599 }
2600
2601 /* We should disregard the current RPA and generate a new one
2602 * whenever the encryption procedure fails.
2603 */
2604 if (ev->status && conn->type == LE_LINK)
2605 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
2606
2607 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2608
2609 if (ev->status && conn->state == BT_CONNECTED) {
2610 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2611 hci_conn_drop(conn);
2612 goto unlock;
2613 }
2614
2615 /* In Secure Connections Only mode, do not allow any connections
2616 * that are not encrypted with AES-CCM using a P-256 authenticated
2617 * combination key.
2618 */
2619 if (hci_dev_test_flag(hdev, HCI_SC_ONLY) &&
2620 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2621 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2622 hci_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2623 hci_conn_drop(conn);
2624 goto unlock;
2625 }
2626
2627 /* Try reading the encryption key size for encrypted ACL links */
2628 if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {
2629 struct hci_cp_read_enc_key_size cp;
2630 struct hci_request req;
2631
2632 /* Only send HCI_Read_Encryption_Key_Size if the
2633 * controller really supports it. If it doesn't, assume
2634 * the default size (16).
2635 */
2636 if (!(hdev->commands[20] & 0x10)) {
2637 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2638 goto notify;
2639 }
2640
2641 hci_req_init(&req, hdev);
2642
2643 cp.handle = cpu_to_le16(conn->handle);
2644 hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp);
2645
2646 if (hci_req_run_skb(&req, read_enc_key_size_complete)) {
2647 BT_ERR("Sending HCI Read Encryption Key Size failed");
2648 conn->enc_key_size = HCI_LINK_KEY_SIZE;
2649 goto notify;
2650 }
2651
2652 goto unlock;
2653 }
2654
2655 notify:
2656 if (conn->state == BT_CONFIG) {
2657 if (!ev->status)
2658 conn->state = BT_CONNECTED;
2659
2660 hci_connect_cfm(conn, ev->status);
2661 hci_conn_drop(conn);
2662 } else
2663 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2664
2665 unlock:
2666 hci_dev_unlock(hdev);
2667 }
2668
2669 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2670 struct sk_buff *skb)
2671 {
2672 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2673 struct hci_conn *conn;
2674
2675 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2676
2677 hci_dev_lock(hdev);
2678
2679 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2680 if (conn) {
2681 if (!ev->status)
2682 set_bit(HCI_CONN_SECURE, &conn->flags);
2683
2684 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2685
2686 hci_key_change_cfm(conn, ev->status);
2687 }
2688
2689 hci_dev_unlock(hdev);
2690 }
2691
2692 static void hci_remote_features_evt(struct hci_dev *hdev,
2693 struct sk_buff *skb)
2694 {
2695 struct hci_ev_remote_features *ev = (void *) skb->data;
2696 struct hci_conn *conn;
2697
2698 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2699
2700 hci_dev_lock(hdev);
2701
2702 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2703 if (!conn)
2704 goto unlock;
2705
2706 if (!ev->status)
2707 memcpy(conn->features[0], ev->features, 8);
2708
2709 if (conn->state != BT_CONFIG)
2710 goto unlock;
2711
2712 if (!ev->status && lmp_ext_feat_capable(hdev) &&
2713 lmp_ext_feat_capable(conn)) {
2714 struct hci_cp_read_remote_ext_features cp;
2715 cp.handle = ev->handle;
2716 cp.page = 0x01;
2717 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2718 sizeof(cp), &cp);
2719 goto unlock;
2720 }
2721
2722 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2723 struct hci_cp_remote_name_req cp;
2724 memset(&cp, 0, sizeof(cp));
2725 bacpy(&cp.bdaddr, &conn->dst);
2726 cp.pscan_rep_mode = 0x02;
2727 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2728 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2729 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2730
2731 if (!hci_outgoing_auth_needed(hdev, conn)) {
2732 conn->state = BT_CONNECTED;
2733 hci_connect_cfm(conn, ev->status);
2734 hci_conn_drop(conn);
2735 }
2736
2737 unlock:
2738 hci_dev_unlock(hdev);
2739 }
2740
2741 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
2742 u16 *opcode, u8 *status,
2743 hci_req_complete_t *req_complete,
2744 hci_req_complete_skb_t *req_complete_skb)
2745 {
2746 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2747
2748 *opcode = __le16_to_cpu(ev->opcode);
2749 *status = skb->data[sizeof(*ev)];
2750
2751 skb_pull(skb, sizeof(*ev));
2752
2753 switch (*opcode) {
2754 case HCI_OP_INQUIRY_CANCEL:
2755 hci_cc_inquiry_cancel(hdev, skb);
2756 break;
2757
2758 case HCI_OP_PERIODIC_INQ:
2759 hci_cc_periodic_inq(hdev, skb);
2760 break;
2761
2762 case HCI_OP_EXIT_PERIODIC_INQ:
2763 hci_cc_exit_periodic_inq(hdev, skb);
2764 break;
2765
2766 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2767 hci_cc_remote_name_req_cancel(hdev, skb);
2768 break;
2769
2770 case HCI_OP_ROLE_DISCOVERY:
2771 hci_cc_role_discovery(hdev, skb);
2772 break;
2773
2774 case HCI_OP_READ_LINK_POLICY:
2775 hci_cc_read_link_policy(hdev, skb);
2776 break;
2777
2778 case HCI_OP_WRITE_LINK_POLICY:
2779 hci_cc_write_link_policy(hdev, skb);
2780 break;
2781
2782 case HCI_OP_READ_DEF_LINK_POLICY:
2783 hci_cc_read_def_link_policy(hdev, skb);
2784 break;
2785
2786 case HCI_OP_WRITE_DEF_LINK_POLICY:
2787 hci_cc_write_def_link_policy(hdev, skb);
2788 break;
2789
2790 case HCI_OP_RESET:
2791 hci_cc_reset(hdev, skb);
2792 break;
2793
2794 case HCI_OP_READ_STORED_LINK_KEY:
2795 hci_cc_read_stored_link_key(hdev, skb);
2796 break;
2797
2798 case HCI_OP_DELETE_STORED_LINK_KEY:
2799 hci_cc_delete_stored_link_key(hdev, skb);
2800 break;
2801
2802 case HCI_OP_WRITE_LOCAL_NAME:
2803 hci_cc_write_local_name(hdev, skb);
2804 break;
2805
2806 case HCI_OP_READ_LOCAL_NAME:
2807 hci_cc_read_local_name(hdev, skb);
2808 break;
2809
2810 case HCI_OP_WRITE_AUTH_ENABLE:
2811 hci_cc_write_auth_enable(hdev, skb);
2812 break;
2813
2814 case HCI_OP_WRITE_ENCRYPT_MODE:
2815 hci_cc_write_encrypt_mode(hdev, skb);
2816 break;
2817
2818 case HCI_OP_WRITE_SCAN_ENABLE:
2819 hci_cc_write_scan_enable(hdev, skb);
2820 break;
2821
2822 case HCI_OP_READ_CLASS_OF_DEV:
2823 hci_cc_read_class_of_dev(hdev, skb);
2824 break;
2825
2826 case HCI_OP_WRITE_CLASS_OF_DEV:
2827 hci_cc_write_class_of_dev(hdev, skb);
2828 break;
2829
2830 case HCI_OP_READ_VOICE_SETTING:
2831 hci_cc_read_voice_setting(hdev, skb);
2832 break;
2833
2834 case HCI_OP_WRITE_VOICE_SETTING:
2835 hci_cc_write_voice_setting(hdev, skb);
2836 break;
2837
2838 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2839 hci_cc_read_num_supported_iac(hdev, skb);
2840 break;
2841
2842 case HCI_OP_WRITE_SSP_MODE:
2843 hci_cc_write_ssp_mode(hdev, skb);
2844 break;
2845
2846 case HCI_OP_WRITE_SC_SUPPORT:
2847 hci_cc_write_sc_support(hdev, skb);
2848 break;
2849
2850 case HCI_OP_READ_LOCAL_VERSION:
2851 hci_cc_read_local_version(hdev, skb);
2852 break;
2853
2854 case HCI_OP_READ_LOCAL_COMMANDS:
2855 hci_cc_read_local_commands(hdev, skb);
2856 break;
2857
2858 case HCI_OP_READ_LOCAL_FEATURES:
2859 hci_cc_read_local_features(hdev, skb);
2860 break;
2861
2862 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2863 hci_cc_read_local_ext_features(hdev, skb);
2864 break;
2865
2866 case HCI_OP_READ_BUFFER_SIZE:
2867 hci_cc_read_buffer_size(hdev, skb);
2868 break;
2869
2870 case HCI_OP_READ_BD_ADDR:
2871 hci_cc_read_bd_addr(hdev, skb);
2872 break;
2873
2874 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2875 hci_cc_read_page_scan_activity(hdev, skb);
2876 break;
2877
2878 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2879 hci_cc_write_page_scan_activity(hdev, skb);
2880 break;
2881
2882 case HCI_OP_READ_PAGE_SCAN_TYPE:
2883 hci_cc_read_page_scan_type(hdev, skb);
2884 break;
2885
2886 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2887 hci_cc_write_page_scan_type(hdev, skb);
2888 break;
2889
2890 case HCI_OP_READ_DATA_BLOCK_SIZE:
2891 hci_cc_read_data_block_size(hdev, skb);
2892 break;
2893
2894 case HCI_OP_READ_FLOW_CONTROL_MODE:
2895 hci_cc_read_flow_control_mode(hdev, skb);
2896 break;
2897
2898 case HCI_OP_READ_LOCAL_AMP_INFO:
2899 hci_cc_read_local_amp_info(hdev, skb);
2900 break;
2901
2902 case HCI_OP_READ_CLOCK:
2903 hci_cc_read_clock(hdev, skb);
2904 break;
2905
2906 case HCI_OP_READ_INQ_RSP_TX_POWER:
2907 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2908 break;
2909
2910 case HCI_OP_PIN_CODE_REPLY:
2911 hci_cc_pin_code_reply(hdev, skb);
2912 break;
2913
2914 case HCI_OP_PIN_CODE_NEG_REPLY:
2915 hci_cc_pin_code_neg_reply(hdev, skb);
2916 break;
2917
2918 case HCI_OP_READ_LOCAL_OOB_DATA:
2919 hci_cc_read_local_oob_data(hdev, skb);
2920 break;
2921
2922 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2923 hci_cc_read_local_oob_ext_data(hdev, skb);
2924 break;
2925
2926 case HCI_OP_LE_READ_BUFFER_SIZE:
2927 hci_cc_le_read_buffer_size(hdev, skb);
2928 break;
2929
2930 case HCI_OP_LE_READ_LOCAL_FEATURES:
2931 hci_cc_le_read_local_features(hdev, skb);
2932 break;
2933
2934 case HCI_OP_LE_READ_ADV_TX_POWER:
2935 hci_cc_le_read_adv_tx_power(hdev, skb);
2936 break;
2937
2938 case HCI_OP_USER_CONFIRM_REPLY:
2939 hci_cc_user_confirm_reply(hdev, skb);
2940 break;
2941
2942 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2943 hci_cc_user_confirm_neg_reply(hdev, skb);
2944 break;
2945
2946 case HCI_OP_USER_PASSKEY_REPLY:
2947 hci_cc_user_passkey_reply(hdev, skb);
2948 break;
2949
2950 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2951 hci_cc_user_passkey_neg_reply(hdev, skb);
2952 break;
2953
2954 case HCI_OP_LE_SET_RANDOM_ADDR:
2955 hci_cc_le_set_random_addr(hdev, skb);
2956 break;
2957
2958 case HCI_OP_LE_SET_ADV_ENABLE:
2959 hci_cc_le_set_adv_enable(hdev, skb);
2960 break;
2961
2962 case HCI_OP_LE_SET_SCAN_PARAM:
2963 hci_cc_le_set_scan_param(hdev, skb);
2964 break;
2965
2966 case HCI_OP_LE_SET_SCAN_ENABLE:
2967 hci_cc_le_set_scan_enable(hdev, skb);
2968 break;
2969
2970 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2971 hci_cc_le_read_white_list_size(hdev, skb);
2972 break;
2973
2974 case HCI_OP_LE_CLEAR_WHITE_LIST:
2975 hci_cc_le_clear_white_list(hdev, skb);
2976 break;
2977
2978 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2979 hci_cc_le_add_to_white_list(hdev, skb);
2980 break;
2981
2982 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2983 hci_cc_le_del_from_white_list(hdev, skb);
2984 break;
2985
2986 case HCI_OP_LE_READ_SUPPORTED_STATES:
2987 hci_cc_le_read_supported_states(hdev, skb);
2988 break;
2989
2990 case HCI_OP_LE_READ_DEF_DATA_LEN:
2991 hci_cc_le_read_def_data_len(hdev, skb);
2992 break;
2993
2994 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2995 hci_cc_le_write_def_data_len(hdev, skb);
2996 break;
2997
2998 case HCI_OP_LE_READ_MAX_DATA_LEN:
2999 hci_cc_le_read_max_data_len(hdev, skb);
3000 break;
3001
3002 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
3003 hci_cc_write_le_host_supported(hdev, skb);
3004 break;
3005
3006 case HCI_OP_LE_SET_ADV_PARAM:
3007 hci_cc_set_adv_param(hdev, skb);
3008 break;
3009
3010 case HCI_OP_READ_RSSI:
3011 hci_cc_read_rssi(hdev, skb);
3012 break;
3013
3014 case HCI_OP_READ_TX_POWER:
3015 hci_cc_read_tx_power(hdev, skb);
3016 break;
3017
3018 case HCI_OP_WRITE_SSP_DEBUG_MODE:
3019 hci_cc_write_ssp_debug_mode(hdev, skb);
3020 break;
3021
3022 default:
3023 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3024 break;
3025 }
3026
3027 if (*opcode != HCI_OP_NOP)
3028 cancel_delayed_work(&hdev->cmd_timer);
3029
3030 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3031 atomic_set(&hdev->cmd_cnt, 1);
3032
3033 hci_req_cmd_complete(hdev, *opcode, *status, req_complete,
3034 req_complete_skb);
3035
3036 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3037 queue_work(hdev->workqueue, &hdev->cmd_work);
3038 }
3039
3040 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb,
3041 u16 *opcode, u8 *status,
3042 hci_req_complete_t *req_complete,
3043 hci_req_complete_skb_t *req_complete_skb)
3044 {
3045 struct hci_ev_cmd_status *ev = (void *) skb->data;
3046
3047 skb_pull(skb, sizeof(*ev));
3048
3049 *opcode = __le16_to_cpu(ev->opcode);
3050 *status = ev->status;
3051
3052 switch (*opcode) {
3053 case HCI_OP_INQUIRY:
3054 hci_cs_inquiry(hdev, ev->status);
3055 break;
3056
3057 case HCI_OP_CREATE_CONN:
3058 hci_cs_create_conn(hdev, ev->status);
3059 break;
3060
3061 case HCI_OP_DISCONNECT:
3062 hci_cs_disconnect(hdev, ev->status);
3063 break;
3064
3065 case HCI_OP_ADD_SCO:
3066 hci_cs_add_sco(hdev, ev->status);
3067 break;
3068
3069 case HCI_OP_AUTH_REQUESTED:
3070 hci_cs_auth_requested(hdev, ev->status);
3071 break;
3072
3073 case HCI_OP_SET_CONN_ENCRYPT:
3074 hci_cs_set_conn_encrypt(hdev, ev->status);
3075 break;
3076
3077 case HCI_OP_REMOTE_NAME_REQ:
3078 hci_cs_remote_name_req(hdev, ev->status);
3079 break;
3080
3081 case HCI_OP_READ_REMOTE_FEATURES:
3082 hci_cs_read_remote_features(hdev, ev->status);
3083 break;
3084
3085 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3086 hci_cs_read_remote_ext_features(hdev, ev->status);
3087 break;
3088
3089 case HCI_OP_SETUP_SYNC_CONN:
3090 hci_cs_setup_sync_conn(hdev, ev->status);
3091 break;
3092
3093 case HCI_OP_SNIFF_MODE:
3094 hci_cs_sniff_mode(hdev, ev->status);
3095 break;
3096
3097 case HCI_OP_EXIT_SNIFF_MODE:
3098 hci_cs_exit_sniff_mode(hdev, ev->status);
3099 break;
3100
3101 case HCI_OP_SWITCH_ROLE:
3102 hci_cs_switch_role(hdev, ev->status);
3103 break;
3104
3105 case HCI_OP_LE_CREATE_CONN:
3106 hci_cs_le_create_conn(hdev, ev->status);
3107 break;
3108
3109 case HCI_OP_LE_READ_REMOTE_FEATURES:
3110 hci_cs_le_read_remote_features(hdev, ev->status);
3111 break;
3112
3113 case HCI_OP_LE_START_ENC:
3114 hci_cs_le_start_enc(hdev, ev->status);
3115 break;
3116
3117 default:
3118 BT_DBG("%s opcode 0x%4.4x", hdev->name, *opcode);
3119 break;
3120 }
3121
3122 if (*opcode != HCI_OP_NOP)
3123 cancel_delayed_work(&hdev->cmd_timer);
3124
3125 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags))
3126 atomic_set(&hdev->cmd_cnt, 1);
3127
3128 /* Indicate request completion if the command failed. Also, if
3129 * we're not waiting for a special event and we get a success
3130 * command status we should try to flag the request as completed
3131 * (since for this kind of commands there will not be a command
3132 * complete event).
3133 */
3134 if (ev->status ||
3135 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3136 hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete,
3137 req_complete_skb);
3138
3139 if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q))
3140 queue_work(hdev->workqueue, &hdev->cmd_work);
3141 }
3142
3143 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3144 {
3145 struct hci_ev_hardware_error *ev = (void *) skb->data;
3146
3147 hdev->hw_error_code = ev->code;
3148
3149 queue_work(hdev->req_workqueue, &hdev->error_reset);
3150 }
3151
3152 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3153 {
3154 struct hci_ev_role_change *ev = (void *) skb->data;
3155 struct hci_conn *conn;
3156
3157 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3158
3159 hci_dev_lock(hdev);
3160
3161 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3162 if (conn) {
3163 if (!ev->status)
3164 conn->role = ev->role;
3165
3166 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3167
3168 hci_role_switch_cfm(conn, ev->status, ev->role);
3169 }
3170
3171 hci_dev_unlock(hdev);
3172 }
3173
3174 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3175 {
3176 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3177 int i;
3178
3179 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3180 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3181 return;
3182 }
3183
3184 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3185 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3186 BT_DBG("%s bad parameters", hdev->name);
3187 return;
3188 }
3189
3190 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3191
3192 for (i = 0; i < ev->num_hndl; i++) {
3193 struct hci_comp_pkts_info *info = &ev->handles[i];
3194 struct hci_conn *conn;
3195 __u16 handle, count;
3196
3197 handle = __le16_to_cpu(info->handle);
3198 count = __le16_to_cpu(info->count);
3199
3200 conn = hci_conn_hash_lookup_handle(hdev, handle);
3201 if (!conn)
3202 continue;
3203
3204 conn->sent -= count;
3205
3206 switch (conn->type) {
3207 case ACL_LINK:
3208 hdev->acl_cnt += count;
3209 if (hdev->acl_cnt > hdev->acl_pkts)
3210 hdev->acl_cnt = hdev->acl_pkts;
3211 break;
3212
3213 case LE_LINK:
3214 if (hdev->le_pkts) {
3215 hdev->le_cnt += count;
3216 if (hdev->le_cnt > hdev->le_pkts)
3217 hdev->le_cnt = hdev->le_pkts;
3218 } else {
3219 hdev->acl_cnt += count;
3220 if (hdev->acl_cnt > hdev->acl_pkts)
3221 hdev->acl_cnt = hdev->acl_pkts;
3222 }
3223 break;
3224
3225 case SCO_LINK:
3226 hdev->sco_cnt += count;
3227 if (hdev->sco_cnt > hdev->sco_pkts)
3228 hdev->sco_cnt = hdev->sco_pkts;
3229 break;
3230
3231 default:
3232 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3233 break;
3234 }
3235 }
3236
3237 queue_work(hdev->workqueue, &hdev->tx_work);
3238 }
3239
3240 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3241 __u16 handle)
3242 {
3243 struct hci_chan *chan;
3244
3245 switch (hdev->dev_type) {
3246 case HCI_BREDR:
3247 return hci_conn_hash_lookup_handle(hdev, handle);
3248 case HCI_AMP:
3249 chan = hci_chan_lookup_handle(hdev, handle);
3250 if (chan)
3251 return chan->conn;
3252 break;
3253 default:
3254 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3255 break;
3256 }
3257
3258 return NULL;
3259 }
3260
3261 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3262 {
3263 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3264 int i;
3265
3266 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3267 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3268 return;
3269 }
3270
3271 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3272 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3273 BT_DBG("%s bad parameters", hdev->name);
3274 return;
3275 }
3276
3277 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3278 ev->num_hndl);
3279
3280 for (i = 0; i < ev->num_hndl; i++) {
3281 struct hci_comp_blocks_info *info = &ev->handles[i];
3282 struct hci_conn *conn = NULL;
3283 __u16 handle, block_count;
3284
3285 handle = __le16_to_cpu(info->handle);
3286 block_count = __le16_to_cpu(info->blocks);
3287
3288 conn = __hci_conn_lookup_handle(hdev, handle);
3289 if (!conn)
3290 continue;
3291
3292 conn->sent -= block_count;
3293
3294 switch (conn->type) {
3295 case ACL_LINK:
3296 case AMP_LINK:
3297 hdev->block_cnt += block_count;
3298 if (hdev->block_cnt > hdev->num_blocks)
3299 hdev->block_cnt = hdev->num_blocks;
3300 break;
3301
3302 default:
3303 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3304 break;
3305 }
3306 }
3307
3308 queue_work(hdev->workqueue, &hdev->tx_work);
3309 }
3310
3311 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3312 {
3313 struct hci_ev_mode_change *ev = (void *) skb->data;
3314 struct hci_conn *conn;
3315
3316 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3317
3318 hci_dev_lock(hdev);
3319
3320 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3321 if (conn) {
3322 conn->mode = ev->mode;
3323
3324 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3325 &conn->flags)) {
3326 if (conn->mode == HCI_CM_ACTIVE)
3327 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3328 else
3329 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3330 }
3331
3332 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3333 hci_sco_setup(conn, ev->status);
3334 }
3335
3336 hci_dev_unlock(hdev);
3337 }
3338
3339 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3340 {
3341 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3342 struct hci_conn *conn;
3343
3344 BT_DBG("%s", hdev->name);
3345
3346 hci_dev_lock(hdev);
3347
3348 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3349 if (!conn)
3350 goto unlock;
3351
3352 if (conn->state == BT_CONNECTED) {
3353 hci_conn_hold(conn);
3354 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3355 hci_conn_drop(conn);
3356 }
3357
3358 if (!hci_dev_test_flag(hdev, HCI_BONDABLE) &&
3359 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3360 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3361 sizeof(ev->bdaddr), &ev->bdaddr);
3362 } else if (hci_dev_test_flag(hdev, HCI_MGMT)) {
3363 u8 secure;
3364
3365 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3366 secure = 1;
3367 else
3368 secure = 0;
3369
3370 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3371 }
3372
3373 unlock:
3374 hci_dev_unlock(hdev);
3375 }
3376
3377 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3378 {
3379 if (key_type == HCI_LK_CHANGED_COMBINATION)
3380 return;
3381
3382 conn->pin_length = pin_len;
3383 conn->key_type = key_type;
3384
3385 switch (key_type) {
3386 case HCI_LK_LOCAL_UNIT:
3387 case HCI_LK_REMOTE_UNIT:
3388 case HCI_LK_DEBUG_COMBINATION:
3389 return;
3390 case HCI_LK_COMBINATION:
3391 if (pin_len == 16)
3392 conn->pending_sec_level = BT_SECURITY_HIGH;
3393 else
3394 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3395 break;
3396 case HCI_LK_UNAUTH_COMBINATION_P192:
3397 case HCI_LK_UNAUTH_COMBINATION_P256:
3398 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3399 break;
3400 case HCI_LK_AUTH_COMBINATION_P192:
3401 conn->pending_sec_level = BT_SECURITY_HIGH;
3402 break;
3403 case HCI_LK_AUTH_COMBINATION_P256:
3404 conn->pending_sec_level = BT_SECURITY_FIPS;
3405 break;
3406 }
3407 }
3408
3409 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3410 {
3411 struct hci_ev_link_key_req *ev = (void *) skb->data;
3412 struct hci_cp_link_key_reply cp;
3413 struct hci_conn *conn;
3414 struct link_key *key;
3415
3416 BT_DBG("%s", hdev->name);
3417
3418 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3419 return;
3420
3421 hci_dev_lock(hdev);
3422
3423 key = hci_find_link_key(hdev, &ev->bdaddr);
3424 if (!key) {
3425 BT_DBG("%s link key not found for %pMR", hdev->name,
3426 &ev->bdaddr);
3427 goto not_found;
3428 }
3429
3430 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3431 &ev->bdaddr);
3432
3433 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3434 if (conn) {
3435 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3436
3437 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3438 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3439 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3440 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3441 goto not_found;
3442 }
3443
3444 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3445 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3446 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3447 BT_DBG("%s ignoring key unauthenticated for high security",
3448 hdev->name);
3449 goto not_found;
3450 }
3451
3452 conn_set_key(conn, key->type, key->pin_len);
3453 }
3454
3455 bacpy(&cp.bdaddr, &ev->bdaddr);
3456 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3457
3458 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3459
3460 hci_dev_unlock(hdev);
3461
3462 return;
3463
3464 not_found:
3465 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3466 hci_dev_unlock(hdev);
3467 }
3468
3469 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3470 {
3471 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3472 struct hci_conn *conn;
3473 struct link_key *key;
3474 bool persistent;
3475 u8 pin_len = 0;
3476
3477 BT_DBG("%s", hdev->name);
3478
3479 hci_dev_lock(hdev);
3480
3481 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3482 if (!conn)
3483 goto unlock;
3484
3485 hci_conn_hold(conn);
3486 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3487 hci_conn_drop(conn);
3488
3489 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3490 conn_set_key(conn, ev->key_type, conn->pin_length);
3491
3492 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3493 goto unlock;
3494
3495 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3496 ev->key_type, pin_len, &persistent);
3497 if (!key)
3498 goto unlock;
3499
3500 /* Update connection information since adding the key will have
3501 * fixed up the type in the case of changed combination keys.
3502 */
3503 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3504 conn_set_key(conn, key->type, key->pin_len);
3505
3506 mgmt_new_link_key(hdev, key, persistent);
3507
3508 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3509 * is set. If it's not set simply remove the key from the kernel
3510 * list (we've still notified user space about it but with
3511 * store_hint being 0).
3512 */
3513 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3514 !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) {
3515 list_del_rcu(&key->list);
3516 kfree_rcu(key, rcu);
3517 goto unlock;
3518 }
3519
3520 if (persistent)
3521 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3522 else
3523 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3524
3525 unlock:
3526 hci_dev_unlock(hdev);
3527 }
3528
3529 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3530 {
3531 struct hci_ev_clock_offset *ev = (void *) skb->data;
3532 struct hci_conn *conn;
3533
3534 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3535
3536 hci_dev_lock(hdev);
3537
3538 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3539 if (conn && !ev->status) {
3540 struct inquiry_entry *ie;
3541
3542 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3543 if (ie) {
3544 ie->data.clock_offset = ev->clock_offset;
3545 ie->timestamp = jiffies;
3546 }
3547 }
3548
3549 hci_dev_unlock(hdev);
3550 }
3551
3552 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3553 {
3554 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3555 struct hci_conn *conn;
3556
3557 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3558
3559 hci_dev_lock(hdev);
3560
3561 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3562 if (conn && !ev->status)
3563 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3564
3565 hci_dev_unlock(hdev);
3566 }
3567
3568 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3569 {
3570 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3571 struct inquiry_entry *ie;
3572
3573 BT_DBG("%s", hdev->name);
3574
3575 hci_dev_lock(hdev);
3576
3577 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3578 if (ie) {
3579 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3580 ie->timestamp = jiffies;
3581 }
3582
3583 hci_dev_unlock(hdev);
3584 }
3585
3586 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3587 struct sk_buff *skb)
3588 {
3589 struct inquiry_data data;
3590 int num_rsp = *((__u8 *) skb->data);
3591
3592 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3593
3594 if (!num_rsp)
3595 return;
3596
3597 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3598 return;
3599
3600 hci_dev_lock(hdev);
3601
3602 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3603 struct inquiry_info_with_rssi_and_pscan_mode *info;
3604 info = (void *) (skb->data + 1);
3605
3606 for (; num_rsp; num_rsp--, info++) {
3607 u32 flags;
3608
3609 bacpy(&data.bdaddr, &info->bdaddr);
3610 data.pscan_rep_mode = info->pscan_rep_mode;
3611 data.pscan_period_mode = info->pscan_period_mode;
3612 data.pscan_mode = info->pscan_mode;
3613 memcpy(data.dev_class, info->dev_class, 3);
3614 data.clock_offset = info->clock_offset;
3615 data.rssi = info->rssi;
3616 data.ssp_mode = 0x00;
3617
3618 flags = hci_inquiry_cache_update(hdev, &data, false);
3619
3620 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3621 info->dev_class, info->rssi,
3622 flags, NULL, 0, NULL, 0);
3623 }
3624 } else {
3625 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3626
3627 for (; num_rsp; num_rsp--, info++) {
3628 u32 flags;
3629
3630 bacpy(&data.bdaddr, &info->bdaddr);
3631 data.pscan_rep_mode = info->pscan_rep_mode;
3632 data.pscan_period_mode = info->pscan_period_mode;
3633 data.pscan_mode = 0x00;
3634 memcpy(data.dev_class, info->dev_class, 3);
3635 data.clock_offset = info->clock_offset;
3636 data.rssi = info->rssi;
3637 data.ssp_mode = 0x00;
3638
3639 flags = hci_inquiry_cache_update(hdev, &data, false);
3640
3641 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3642 info->dev_class, info->rssi,
3643 flags, NULL, 0, NULL, 0);
3644 }
3645 }
3646
3647 hci_dev_unlock(hdev);
3648 }
3649
3650 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3651 struct sk_buff *skb)
3652 {
3653 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3654 struct hci_conn *conn;
3655
3656 BT_DBG("%s", hdev->name);
3657
3658 hci_dev_lock(hdev);
3659
3660 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3661 if (!conn)
3662 goto unlock;
3663
3664 if (ev->page < HCI_MAX_PAGES)
3665 memcpy(conn->features[ev->page], ev->features, 8);
3666
3667 if (!ev->status && ev->page == 0x01) {
3668 struct inquiry_entry *ie;
3669
3670 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3671 if (ie)
3672 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3673
3674 if (ev->features[0] & LMP_HOST_SSP) {
3675 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3676 } else {
3677 /* It is mandatory by the Bluetooth specification that
3678 * Extended Inquiry Results are only used when Secure
3679 * Simple Pairing is enabled, but some devices violate
3680 * this.
3681 *
3682 * To make these devices work, the internal SSP
3683 * enabled flag needs to be cleared if the remote host
3684 * features do not indicate SSP support */
3685 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3686 }
3687
3688 if (ev->features[0] & LMP_HOST_SC)
3689 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3690 }
3691
3692 if (conn->state != BT_CONFIG)
3693 goto unlock;
3694
3695 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3696 struct hci_cp_remote_name_req cp;
3697 memset(&cp, 0, sizeof(cp));
3698 bacpy(&cp.bdaddr, &conn->dst);
3699 cp.pscan_rep_mode = 0x02;
3700 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3701 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3702 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3703
3704 if (!hci_outgoing_auth_needed(hdev, conn)) {
3705 conn->state = BT_CONNECTED;
3706 hci_connect_cfm(conn, ev->status);
3707 hci_conn_drop(conn);
3708 }
3709
3710 unlock:
3711 hci_dev_unlock(hdev);
3712 }
3713
3714 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3715 struct sk_buff *skb)
3716 {
3717 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3718 struct hci_conn *conn;
3719
3720 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3721
3722 hci_dev_lock(hdev);
3723
3724 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3725 if (!conn) {
3726 if (ev->link_type == ESCO_LINK)
3727 goto unlock;
3728
3729 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3730 if (!conn)
3731 goto unlock;
3732
3733 conn->type = SCO_LINK;
3734 }
3735
3736 switch (ev->status) {
3737 case 0x00:
3738 conn->handle = __le16_to_cpu(ev->handle);
3739 conn->state = BT_CONNECTED;
3740
3741 hci_debugfs_create_conn(conn);
3742 hci_conn_add_sysfs(conn);
3743 break;
3744
3745 case 0x10: /* Connection Accept Timeout */
3746 case 0x0d: /* Connection Rejected due to Limited Resources */
3747 case 0x11: /* Unsupported Feature or Parameter Value */
3748 case 0x1c: /* SCO interval rejected */
3749 case 0x1a: /* Unsupported Remote Feature */
3750 case 0x1f: /* Unspecified error */
3751 case 0x20: /* Unsupported LMP Parameter value */
3752 if (conn->out) {
3753 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3754 (hdev->esco_type & EDR_ESCO_MASK);
3755 if (hci_setup_sync(conn, conn->link->handle))
3756 goto unlock;
3757 }
3758 /* fall through */
3759
3760 default:
3761 conn->state = BT_CLOSED;
3762 break;
3763 }
3764
3765 hci_connect_cfm(conn, ev->status);
3766 if (ev->status)
3767 hci_conn_del(conn);
3768
3769 unlock:
3770 hci_dev_unlock(hdev);
3771 }
3772
3773 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3774 {
3775 size_t parsed = 0;
3776
3777 while (parsed < eir_len) {
3778 u8 field_len = eir[0];
3779
3780 if (field_len == 0)
3781 return parsed;
3782
3783 parsed += field_len + 1;
3784 eir += field_len + 1;
3785 }
3786
3787 return eir_len;
3788 }
3789
3790 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3791 struct sk_buff *skb)
3792 {
3793 struct inquiry_data data;
3794 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3795 int num_rsp = *((__u8 *) skb->data);
3796 size_t eir_len;
3797
3798 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3799
3800 if (!num_rsp)
3801 return;
3802
3803 if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ))
3804 return;
3805
3806 hci_dev_lock(hdev);
3807
3808 for (; num_rsp; num_rsp--, info++) {
3809 u32 flags;
3810 bool name_known;
3811
3812 bacpy(&data.bdaddr, &info->bdaddr);
3813 data.pscan_rep_mode = info->pscan_rep_mode;
3814 data.pscan_period_mode = info->pscan_period_mode;
3815 data.pscan_mode = 0x00;
3816 memcpy(data.dev_class, info->dev_class, 3);
3817 data.clock_offset = info->clock_offset;
3818 data.rssi = info->rssi;
3819 data.ssp_mode = 0x01;
3820
3821 if (hci_dev_test_flag(hdev, HCI_MGMT))
3822 name_known = eir_has_data_type(info->data,
3823 sizeof(info->data),
3824 EIR_NAME_COMPLETE);
3825 else
3826 name_known = true;
3827
3828 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3829
3830 eir_len = eir_get_length(info->data, sizeof(info->data));
3831
3832 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3833 info->dev_class, info->rssi,
3834 flags, info->data, eir_len, NULL, 0);
3835 }
3836
3837 hci_dev_unlock(hdev);
3838 }
3839
3840 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3841 struct sk_buff *skb)
3842 {
3843 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3844 struct hci_conn *conn;
3845
3846 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3847 __le16_to_cpu(ev->handle));
3848
3849 hci_dev_lock(hdev);
3850
3851 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3852 if (!conn)
3853 goto unlock;
3854
3855 /* For BR/EDR the necessary steps are taken through the
3856 * auth_complete event.
3857 */
3858 if (conn->type != LE_LINK)
3859 goto unlock;
3860
3861 if (!ev->status)
3862 conn->sec_level = conn->pending_sec_level;
3863
3864 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3865
3866 if (ev->status && conn->state == BT_CONNECTED) {
3867 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3868 hci_conn_drop(conn);
3869 goto unlock;
3870 }
3871
3872 if (conn->state == BT_CONFIG) {
3873 if (!ev->status)
3874 conn->state = BT_CONNECTED;
3875
3876 hci_connect_cfm(conn, ev->status);
3877 hci_conn_drop(conn);
3878 } else {
3879 hci_auth_cfm(conn, ev->status);
3880
3881 hci_conn_hold(conn);
3882 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3883 hci_conn_drop(conn);
3884 }
3885
3886 unlock:
3887 hci_dev_unlock(hdev);
3888 }
3889
3890 static u8 hci_get_auth_req(struct hci_conn *conn)
3891 {
3892 /* If remote requests no-bonding follow that lead */
3893 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3894 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3895 return conn->remote_auth | (conn->auth_type & 0x01);
3896
3897 /* If both remote and local have enough IO capabilities, require
3898 * MITM protection
3899 */
3900 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3901 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3902 return conn->remote_auth | 0x01;
3903
3904 /* No MITM protection possible so ignore remote requirement */
3905 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3906 }
3907
3908 static u8 bredr_oob_data_present(struct hci_conn *conn)
3909 {
3910 struct hci_dev *hdev = conn->hdev;
3911 struct oob_data *data;
3912
3913 data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR);
3914 if (!data)
3915 return 0x00;
3916
3917 if (bredr_sc_enabled(hdev)) {
3918 /* When Secure Connections is enabled, then just
3919 * return the present value stored with the OOB
3920 * data. The stored value contains the right present
3921 * information. However it can only be trusted when
3922 * not in Secure Connection Only mode.
3923 */
3924 if (!hci_dev_test_flag(hdev, HCI_SC_ONLY))
3925 return data->present;
3926
3927 /* When Secure Connections Only mode is enabled, then
3928 * the P-256 values are required. If they are not
3929 * available, then do not declare that OOB data is
3930 * present.
3931 */
3932 if (!memcmp(data->rand256, ZERO_KEY, 16) ||
3933 !memcmp(data->hash256, ZERO_KEY, 16))
3934 return 0x00;
3935
3936 return 0x02;
3937 }
3938
3939 /* When Secure Connections is not enabled or actually
3940 * not supported by the hardware, then check that if
3941 * P-192 data values are present.
3942 */
3943 if (!memcmp(data->rand192, ZERO_KEY, 16) ||
3944 !memcmp(data->hash192, ZERO_KEY, 16))
3945 return 0x00;
3946
3947 return 0x01;
3948 }
3949
3950 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3951 {
3952 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3953 struct hci_conn *conn;
3954
3955 BT_DBG("%s", hdev->name);
3956
3957 hci_dev_lock(hdev);
3958
3959 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3960 if (!conn)
3961 goto unlock;
3962
3963 hci_conn_hold(conn);
3964
3965 if (!hci_dev_test_flag(hdev, HCI_MGMT))
3966 goto unlock;
3967
3968 /* Allow pairing if we're pairable, the initiators of the
3969 * pairing or if the remote is not requesting bonding.
3970 */
3971 if (hci_dev_test_flag(hdev, HCI_BONDABLE) ||
3972 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3973 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3974 struct hci_cp_io_capability_reply cp;
3975
3976 bacpy(&cp.bdaddr, &ev->bdaddr);
3977 /* Change the IO capability from KeyboardDisplay
3978 * to DisplayYesNo as it is not supported by BT spec. */
3979 cp.capability = (conn->io_capability == 0x04) ?
3980 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3981
3982 /* If we are initiators, there is no remote information yet */
3983 if (conn->remote_auth == 0xff) {
3984 /* Request MITM protection if our IO caps allow it
3985 * except for the no-bonding case.
3986 */
3987 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3988 conn->auth_type != HCI_AT_NO_BONDING)
3989 conn->auth_type |= 0x01;
3990 } else {
3991 conn->auth_type = hci_get_auth_req(conn);
3992 }
3993
3994 /* If we're not bondable, force one of the non-bondable
3995 * authentication requirement values.
3996 */
3997 if (!hci_dev_test_flag(hdev, HCI_BONDABLE))
3998 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3999
4000 cp.authentication = conn->auth_type;
4001 cp.oob_data = bredr_oob_data_present(conn);
4002
4003 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
4004 sizeof(cp), &cp);
4005 } else {
4006 struct hci_cp_io_capability_neg_reply cp;
4007
4008 bacpy(&cp.bdaddr, &ev->bdaddr);
4009 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
4010
4011 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
4012 sizeof(cp), &cp);
4013 }
4014
4015 unlock:
4016 hci_dev_unlock(hdev);
4017 }
4018
4019 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
4020 {
4021 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
4022 struct hci_conn *conn;
4023
4024 BT_DBG("%s", hdev->name);
4025
4026 hci_dev_lock(hdev);
4027
4028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4029 if (!conn)
4030 goto unlock;
4031
4032 conn->remote_cap = ev->capability;
4033 conn->remote_auth = ev->authentication;
4034
4035 unlock:
4036 hci_dev_unlock(hdev);
4037 }
4038
4039 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
4040 struct sk_buff *skb)
4041 {
4042 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
4043 int loc_mitm, rem_mitm, confirm_hint = 0;
4044 struct hci_conn *conn;
4045
4046 BT_DBG("%s", hdev->name);
4047
4048 hci_dev_lock(hdev);
4049
4050 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4051 goto unlock;
4052
4053 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4054 if (!conn)
4055 goto unlock;
4056
4057 loc_mitm = (conn->auth_type & 0x01);
4058 rem_mitm = (conn->remote_auth & 0x01);
4059
4060 /* If we require MITM but the remote device can't provide that
4061 * (it has NoInputNoOutput) then reject the confirmation
4062 * request. We check the security level here since it doesn't
4063 * necessarily match conn->auth_type.
4064 */
4065 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
4066 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
4067 BT_DBG("Rejecting request: remote device can't provide MITM");
4068 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
4069 sizeof(ev->bdaddr), &ev->bdaddr);
4070 goto unlock;
4071 }
4072
4073 /* If no side requires MITM protection; auto-accept */
4074 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
4075 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
4076
4077 /* If we're not the initiators request authorization to
4078 * proceed from user space (mgmt_user_confirm with
4079 * confirm_hint set to 1). The exception is if neither
4080 * side had MITM or if the local IO capability is
4081 * NoInputNoOutput, in which case we do auto-accept
4082 */
4083 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
4084 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
4085 (loc_mitm || rem_mitm)) {
4086 BT_DBG("Confirming auto-accept as acceptor");
4087 confirm_hint = 1;
4088 goto confirm;
4089 }
4090
4091 BT_DBG("Auto-accept of user confirmation with %ums delay",
4092 hdev->auto_accept_delay);
4093
4094 if (hdev->auto_accept_delay > 0) {
4095 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
4096 queue_delayed_work(conn->hdev->workqueue,
4097 &conn->auto_accept_work, delay);
4098 goto unlock;
4099 }
4100
4101 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
4102 sizeof(ev->bdaddr), &ev->bdaddr);
4103 goto unlock;
4104 }
4105
4106 confirm:
4107 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
4108 le32_to_cpu(ev->passkey), confirm_hint);
4109
4110 unlock:
4111 hci_dev_unlock(hdev);
4112 }
4113
4114 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
4115 struct sk_buff *skb)
4116 {
4117 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
4118
4119 BT_DBG("%s", hdev->name);
4120
4121 if (hci_dev_test_flag(hdev, HCI_MGMT))
4122 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
4123 }
4124
4125 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4126 struct sk_buff *skb)
4127 {
4128 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4129 struct hci_conn *conn;
4130
4131 BT_DBG("%s", hdev->name);
4132
4133 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4134 if (!conn)
4135 return;
4136
4137 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4138 conn->passkey_entered = 0;
4139
4140 if (hci_dev_test_flag(hdev, HCI_MGMT))
4141 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4142 conn->dst_type, conn->passkey_notify,
4143 conn->passkey_entered);
4144 }
4145
4146 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4147 {
4148 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4149 struct hci_conn *conn;
4150
4151 BT_DBG("%s", hdev->name);
4152
4153 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4154 if (!conn)
4155 return;
4156
4157 switch (ev->type) {
4158 case HCI_KEYPRESS_STARTED:
4159 conn->passkey_entered = 0;
4160 return;
4161
4162 case HCI_KEYPRESS_ENTERED:
4163 conn->passkey_entered++;
4164 break;
4165
4166 case HCI_KEYPRESS_ERASED:
4167 conn->passkey_entered--;
4168 break;
4169
4170 case HCI_KEYPRESS_CLEARED:
4171 conn->passkey_entered = 0;
4172 break;
4173
4174 case HCI_KEYPRESS_COMPLETED:
4175 return;
4176 }
4177
4178 if (hci_dev_test_flag(hdev, HCI_MGMT))
4179 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4180 conn->dst_type, conn->passkey_notify,
4181 conn->passkey_entered);
4182 }
4183
4184 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4185 struct sk_buff *skb)
4186 {
4187 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4188 struct hci_conn *conn;
4189
4190 BT_DBG("%s", hdev->name);
4191
4192 hci_dev_lock(hdev);
4193
4194 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4195 if (!conn)
4196 goto unlock;
4197
4198 /* Reset the authentication requirement to unknown */
4199 conn->remote_auth = 0xff;
4200
4201 /* To avoid duplicate auth_failed events to user space we check
4202 * the HCI_CONN_AUTH_PEND flag which will be set if we
4203 * initiated the authentication. A traditional auth_complete
4204 * event gets always produced as initiator and is also mapped to
4205 * the mgmt_auth_failed event */
4206 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4207 mgmt_auth_failed(conn, ev->status);
4208
4209 hci_conn_drop(conn);
4210
4211 unlock:
4212 hci_dev_unlock(hdev);
4213 }
4214
4215 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4216 struct sk_buff *skb)
4217 {
4218 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4219 struct inquiry_entry *ie;
4220 struct hci_conn *conn;
4221
4222 BT_DBG("%s", hdev->name);
4223
4224 hci_dev_lock(hdev);
4225
4226 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4227 if (conn)
4228 memcpy(conn->features[1], ev->features, 8);
4229
4230 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4231 if (ie)
4232 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4233
4234 hci_dev_unlock(hdev);
4235 }
4236
4237 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4238 struct sk_buff *skb)
4239 {
4240 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4241 struct oob_data *data;
4242
4243 BT_DBG("%s", hdev->name);
4244
4245 hci_dev_lock(hdev);
4246
4247 if (!hci_dev_test_flag(hdev, HCI_MGMT))
4248 goto unlock;
4249
4250 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4251 if (!data) {
4252 struct hci_cp_remote_oob_data_neg_reply cp;
4253
4254 bacpy(&cp.bdaddr, &ev->bdaddr);
4255 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4256 sizeof(cp), &cp);
4257 goto unlock;
4258 }
4259
4260 if (bredr_sc_enabled(hdev)) {
4261 struct hci_cp_remote_oob_ext_data_reply cp;
4262
4263 bacpy(&cp.bdaddr, &ev->bdaddr);
4264 if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) {
4265 memset(cp.hash192, 0, sizeof(cp.hash192));
4266 memset(cp.rand192, 0, sizeof(cp.rand192));
4267 } else {
4268 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4269 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4270 }
4271 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4272 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4273
4274 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4275 sizeof(cp), &cp);
4276 } else {
4277 struct hci_cp_remote_oob_data_reply cp;
4278
4279 bacpy(&cp.bdaddr, &ev->bdaddr);
4280 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4281 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4282
4283 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4284 sizeof(cp), &cp);
4285 }
4286
4287 unlock:
4288 hci_dev_unlock(hdev);
4289 }
4290
4291 #if IS_ENABLED(CONFIG_BT_HS)
4292 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4293 {
4294 struct hci_ev_channel_selected *ev = (void *)skb->data;
4295 struct hci_conn *hcon;
4296
4297 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4298
4299 skb_pull(skb, sizeof(*ev));
4300
4301 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4302 if (!hcon)
4303 return;
4304
4305 amp_read_loc_assoc_final_data(hdev, hcon);
4306 }
4307
4308 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4309 struct sk_buff *skb)
4310 {
4311 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4312 struct hci_conn *hcon, *bredr_hcon;
4313
4314 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4315 ev->status);
4316
4317 hci_dev_lock(hdev);
4318
4319 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4320 if (!hcon) {
4321 hci_dev_unlock(hdev);
4322 return;
4323 }
4324
4325 if (ev->status) {
4326 hci_conn_del(hcon);
4327 hci_dev_unlock(hdev);
4328 return;
4329 }
4330
4331 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4332
4333 hcon->state = BT_CONNECTED;
4334 bacpy(&hcon->dst, &bredr_hcon->dst);
4335
4336 hci_conn_hold(hcon);
4337 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4338 hci_conn_drop(hcon);
4339
4340 hci_debugfs_create_conn(hcon);
4341 hci_conn_add_sysfs(hcon);
4342
4343 amp_physical_cfm(bredr_hcon, hcon);
4344
4345 hci_dev_unlock(hdev);
4346 }
4347
4348 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4349 {
4350 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4351 struct hci_conn *hcon;
4352 struct hci_chan *hchan;
4353 struct amp_mgr *mgr;
4354
4355 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4356 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4357 ev->status);
4358
4359 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4360 if (!hcon)
4361 return;
4362
4363 /* Create AMP hchan */
4364 hchan = hci_chan_create(hcon);
4365 if (!hchan)
4366 return;
4367
4368 hchan->handle = le16_to_cpu(ev->handle);
4369
4370 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4371
4372 mgr = hcon->amp_mgr;
4373 if (mgr && mgr->bredr_chan) {
4374 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4375
4376 l2cap_chan_lock(bredr_chan);
4377
4378 bredr_chan->conn->mtu = hdev->block_mtu;
4379 l2cap_logical_cfm(bredr_chan, hchan, 0);
4380 hci_conn_hold(hcon);
4381
4382 l2cap_chan_unlock(bredr_chan);
4383 }
4384 }
4385
4386 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4387 struct sk_buff *skb)
4388 {
4389 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4390 struct hci_chan *hchan;
4391
4392 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4393 le16_to_cpu(ev->handle), ev->status);
4394
4395 if (ev->status)
4396 return;
4397
4398 hci_dev_lock(hdev);
4399
4400 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4401 if (!hchan)
4402 goto unlock;
4403
4404 amp_destroy_logical_link(hchan, ev->reason);
4405
4406 unlock:
4407 hci_dev_unlock(hdev);
4408 }
4409
4410 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4411 struct sk_buff *skb)
4412 {
4413 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4414 struct hci_conn *hcon;
4415
4416 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4417
4418 if (ev->status)
4419 return;
4420
4421 hci_dev_lock(hdev);
4422
4423 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4424 if (hcon) {
4425 hcon->state = BT_CLOSED;
4426 hci_conn_del(hcon);
4427 }
4428
4429 hci_dev_unlock(hdev);
4430 }
4431 #endif
4432
4433 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4434 {
4435 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4436 struct hci_conn_params *params;
4437 struct hci_conn *conn;
4438 struct smp_irk *irk;
4439 u8 addr_type;
4440
4441 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4442
4443 hci_dev_lock(hdev);
4444
4445 /* All controllers implicitly stop advertising in the event of a
4446 * connection, so ensure that the state bit is cleared.
4447 */
4448 hci_dev_clear_flag(hdev, HCI_LE_ADV);
4449
4450 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4451 if (!conn) {
4452 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4453 if (!conn) {
4454 BT_ERR("No memory for new connection");
4455 goto unlock;
4456 }
4457
4458 conn->dst_type = ev->bdaddr_type;
4459
4460 /* If we didn't have a hci_conn object previously
4461 * but we're in master role this must be something
4462 * initiated using a white list. Since white list based
4463 * connections are not "first class citizens" we don't
4464 * have full tracking of them. Therefore, we go ahead
4465 * with a "best effort" approach of determining the
4466 * initiator address based on the HCI_PRIVACY flag.
4467 */
4468 if (conn->out) {
4469 conn->resp_addr_type = ev->bdaddr_type;
4470 bacpy(&conn->resp_addr, &ev->bdaddr);
4471 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
4472 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4473 bacpy(&conn->init_addr, &hdev->rpa);
4474 } else {
4475 hci_copy_identity_address(hdev,
4476 &conn->init_addr,
4477 &conn->init_addr_type);
4478 }
4479 }
4480 } else {
4481 cancel_delayed_work(&conn->le_conn_timeout);
4482 }
4483
4484 if (!conn->out) {
4485 /* Set the responder (our side) address type based on
4486 * the advertising address type.
4487 */
4488 conn->resp_addr_type = hdev->adv_addr_type;
4489 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4490 bacpy(&conn->resp_addr, &hdev->random_addr);
4491 else
4492 bacpy(&conn->resp_addr, &hdev->bdaddr);
4493
4494 conn->init_addr_type = ev->bdaddr_type;
4495 bacpy(&conn->init_addr, &ev->bdaddr);
4496
4497 /* For incoming connections, set the default minimum
4498 * and maximum connection interval. They will be used
4499 * to check if the parameters are in range and if not
4500 * trigger the connection update procedure.
4501 */
4502 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4503 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4504 }
4505
4506 /* Lookup the identity address from the stored connection
4507 * address and address type.
4508 *
4509 * When establishing connections to an identity address, the
4510 * connection procedure will store the resolvable random
4511 * address first. Now if it can be converted back into the
4512 * identity address, start using the identity address from
4513 * now on.
4514 */
4515 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4516 if (irk) {
4517 bacpy(&conn->dst, &irk->bdaddr);
4518 conn->dst_type = irk->addr_type;
4519 }
4520
4521 if (ev->status) {
4522 hci_le_conn_failed(conn, ev->status);
4523 goto unlock;
4524 }
4525
4526 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4527 addr_type = BDADDR_LE_PUBLIC;
4528 else
4529 addr_type = BDADDR_LE_RANDOM;
4530
4531 /* Drop the connection if the device is blocked */
4532 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4533 hci_conn_drop(conn);
4534 goto unlock;
4535 }
4536
4537 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4538 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4539
4540 conn->sec_level = BT_SECURITY_LOW;
4541 conn->handle = __le16_to_cpu(ev->handle);
4542 conn->state = BT_CONFIG;
4543
4544 conn->le_conn_interval = le16_to_cpu(ev->interval);
4545 conn->le_conn_latency = le16_to_cpu(ev->latency);
4546 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4547
4548 hci_debugfs_create_conn(conn);
4549 hci_conn_add_sysfs(conn);
4550
4551 if (!ev->status) {
4552 /* The remote features procedure is defined for master
4553 * role only. So only in case of an initiated connection
4554 * request the remote features.
4555 *
4556 * If the local controller supports slave-initiated features
4557 * exchange, then requesting the remote features in slave
4558 * role is possible. Otherwise just transition into the
4559 * connected state without requesting the remote features.
4560 */
4561 if (conn->out ||
4562 (hdev->le_features[0] & HCI_LE_SLAVE_FEATURES)) {
4563 struct hci_cp_le_read_remote_features cp;
4564
4565 cp.handle = __cpu_to_le16(conn->handle);
4566
4567 hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES,
4568 sizeof(cp), &cp);
4569
4570 hci_conn_hold(conn);
4571 } else {
4572 conn->state = BT_CONNECTED;
4573 hci_connect_cfm(conn, ev->status);
4574 }
4575 } else {
4576 hci_connect_cfm(conn, ev->status);
4577 }
4578
4579 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4580 conn->dst_type);
4581 if (params) {
4582 list_del_init(&params->action);
4583 if (params->conn) {
4584 hci_conn_drop(params->conn);
4585 hci_conn_put(params->conn);
4586 params->conn = NULL;
4587 }
4588 }
4589
4590 unlock:
4591 hci_update_background_scan(hdev);
4592 hci_dev_unlock(hdev);
4593 }
4594
4595 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4596 struct sk_buff *skb)
4597 {
4598 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4599 struct hci_conn *conn;
4600
4601 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4602
4603 if (ev->status)
4604 return;
4605
4606 hci_dev_lock(hdev);
4607
4608 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4609 if (conn) {
4610 conn->le_conn_interval = le16_to_cpu(ev->interval);
4611 conn->le_conn_latency = le16_to_cpu(ev->latency);
4612 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4613 }
4614
4615 hci_dev_unlock(hdev);
4616 }
4617
4618 /* This function requires the caller holds hdev->lock */
4619 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4620 bdaddr_t *addr,
4621 u8 addr_type, u8 adv_type)
4622 {
4623 struct hci_conn *conn;
4624 struct hci_conn_params *params;
4625
4626 /* If the event is not connectable don't proceed further */
4627 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4628 return NULL;
4629
4630 /* Ignore if the device is blocked */
4631 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4632 return NULL;
4633
4634 /* Most controller will fail if we try to create new connections
4635 * while we have an existing one in slave role.
4636 */
4637 if (hdev->conn_hash.le_num_slave > 0)
4638 return NULL;
4639
4640 /* If we're not connectable only connect devices that we have in
4641 * our pend_le_conns list.
4642 */
4643 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4644 addr, addr_type);
4645 if (!params)
4646 return NULL;
4647
4648 switch (params->auto_connect) {
4649 case HCI_AUTO_CONN_DIRECT:
4650 /* Only devices advertising with ADV_DIRECT_IND are
4651 * triggering a connection attempt. This is allowing
4652 * incoming connections from slave devices.
4653 */
4654 if (adv_type != LE_ADV_DIRECT_IND)
4655 return NULL;
4656 break;
4657 case HCI_AUTO_CONN_ALWAYS:
4658 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4659 * are triggering a connection attempt. This means
4660 * that incoming connectioms from slave device are
4661 * accepted and also outgoing connections to slave
4662 * devices are established when found.
4663 */
4664 break;
4665 default:
4666 return NULL;
4667 }
4668
4669 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4670 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4671 if (!IS_ERR(conn)) {
4672 /* Store the pointer since we don't really have any
4673 * other owner of the object besides the params that
4674 * triggered it. This way we can abort the connection if
4675 * the parameters get removed and keep the reference
4676 * count consistent once the connection is established.
4677 */
4678 params->conn = hci_conn_get(conn);
4679 return conn;
4680 }
4681
4682 switch (PTR_ERR(conn)) {
4683 case -EBUSY:
4684 /* If hci_connect() returns -EBUSY it means there is already
4685 * an LE connection attempt going on. Since controllers don't
4686 * support more than one connection attempt at the time, we
4687 * don't consider this an error case.
4688 */
4689 break;
4690 default:
4691 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4692 return NULL;
4693 }
4694
4695 return NULL;
4696 }
4697
4698 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4699 u8 bdaddr_type, bdaddr_t *direct_addr,
4700 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4701 {
4702 struct discovery_state *d = &hdev->discovery;
4703 struct smp_irk *irk;
4704 struct hci_conn *conn;
4705 bool match;
4706 u32 flags;
4707
4708 /* If the direct address is present, then this report is from
4709 * a LE Direct Advertising Report event. In that case it is
4710 * important to see if the address is matching the local
4711 * controller address.
4712 */
4713 if (direct_addr) {
4714 /* Only resolvable random addresses are valid for these
4715 * kind of reports and others can be ignored.
4716 */
4717 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4718 return;
4719
4720 /* If the controller is not using resolvable random
4721 * addresses, then this report can be ignored.
4722 */
4723 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
4724 return;
4725
4726 /* If the local IRK of the controller does not match
4727 * with the resolvable random address provided, then
4728 * this report can be ignored.
4729 */
4730 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4731 return;
4732 }
4733
4734 /* Check if we need to convert to identity address */
4735 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4736 if (irk) {
4737 bdaddr = &irk->bdaddr;
4738 bdaddr_type = irk->addr_type;
4739 }
4740
4741 /* Check if we have been requested to connect to this device */
4742 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4743 if (conn && type == LE_ADV_IND) {
4744 /* Store report for later inclusion by
4745 * mgmt_device_connected
4746 */
4747 memcpy(conn->le_adv_data, data, len);
4748 conn->le_adv_data_len = len;
4749 }
4750
4751 /* Passive scanning shouldn't trigger any device found events,
4752 * except for devices marked as CONN_REPORT for which we do send
4753 * device found events.
4754 */
4755 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4756 if (type == LE_ADV_DIRECT_IND)
4757 return;
4758
4759 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4760 bdaddr, bdaddr_type))
4761 return;
4762
4763 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4764 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4765 else
4766 flags = 0;
4767 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4768 rssi, flags, data, len, NULL, 0);
4769 return;
4770 }
4771
4772 /* When receiving non-connectable or scannable undirected
4773 * advertising reports, this means that the remote device is
4774 * not connectable and then clearly indicate this in the
4775 * device found event.
4776 *
4777 * When receiving a scan response, then there is no way to
4778 * know if the remote device is connectable or not. However
4779 * since scan responses are merged with a previously seen
4780 * advertising report, the flags field from that report
4781 * will be used.
4782 *
4783 * In the really unlikely case that a controller get confused
4784 * and just sends a scan response event, then it is marked as
4785 * not connectable as well.
4786 */
4787 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4788 type == LE_ADV_SCAN_RSP)
4789 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4790 else
4791 flags = 0;
4792
4793 /* If there's nothing pending either store the data from this
4794 * event or send an immediate device found event if the data
4795 * should not be stored for later.
4796 */
4797 if (!has_pending_adv_report(hdev)) {
4798 /* If the report will trigger a SCAN_REQ store it for
4799 * later merging.
4800 */
4801 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4802 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4803 rssi, flags, data, len);
4804 return;
4805 }
4806
4807 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4808 rssi, flags, data, len, NULL, 0);
4809 return;
4810 }
4811
4812 /* Check if the pending report is for the same device as the new one */
4813 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4814 bdaddr_type == d->last_adv_addr_type);
4815
4816 /* If the pending data doesn't match this report or this isn't a
4817 * scan response (e.g. we got a duplicate ADV_IND) then force
4818 * sending of the pending data.
4819 */
4820 if (type != LE_ADV_SCAN_RSP || !match) {
4821 /* Send out whatever is in the cache, but skip duplicates */
4822 if (!match)
4823 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4824 d->last_adv_addr_type, NULL,
4825 d->last_adv_rssi, d->last_adv_flags,
4826 d->last_adv_data,
4827 d->last_adv_data_len, NULL, 0);
4828
4829 /* If the new report will trigger a SCAN_REQ store it for
4830 * later merging.
4831 */
4832 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4833 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4834 rssi, flags, data, len);
4835 return;
4836 }
4837
4838 /* The advertising reports cannot be merged, so clear
4839 * the pending report and send out a device found event.
4840 */
4841 clear_pending_adv_report(hdev);
4842 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4843 rssi, flags, data, len, NULL, 0);
4844 return;
4845 }
4846
4847 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4848 * the new event is a SCAN_RSP. We can therefore proceed with
4849 * sending a merged device found event.
4850 */
4851 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4852 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4853 d->last_adv_data, d->last_adv_data_len, data, len);
4854 clear_pending_adv_report(hdev);
4855 }
4856
4857 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4858 {
4859 u8 num_reports = skb->data[0];
4860 void *ptr = &skb->data[1];
4861
4862 hci_dev_lock(hdev);
4863
4864 while (num_reports--) {
4865 struct hci_ev_le_advertising_info *ev = ptr;
4866 s8 rssi;
4867
4868 rssi = ev->data[ev->length];
4869 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4870 ev->bdaddr_type, NULL, 0, rssi,
4871 ev->data, ev->length);
4872
4873 ptr += sizeof(*ev) + ev->length + 1;
4874 }
4875
4876 hci_dev_unlock(hdev);
4877 }
4878
4879 static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev,
4880 struct sk_buff *skb)
4881 {
4882 struct hci_ev_le_remote_feat_complete *ev = (void *)skb->data;
4883 struct hci_conn *conn;
4884
4885 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4886
4887 hci_dev_lock(hdev);
4888
4889 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4890 if (conn) {
4891 if (!ev->status)
4892 memcpy(conn->features[0], ev->features, 8);
4893
4894 if (conn->state == BT_CONFIG) {
4895 __u8 status;
4896
4897 /* If the local controller supports slave-initiated
4898 * features exchange, but the remote controller does
4899 * not, then it is possible that the error code 0x1a
4900 * for unsupported remote feature gets returned.
4901 *
4902 * In this specific case, allow the connection to
4903 * transition into connected state and mark it as
4904 * successful.
4905 */
4906 if ((hdev->le_features[0] & HCI_LE_SLAVE_FEATURES) &&
4907 !conn->out && ev->status == 0x1a)
4908 status = 0x00;
4909 else
4910 status = ev->status;
4911
4912 conn->state = BT_CONNECTED;
4913 hci_connect_cfm(conn, status);
4914 hci_conn_drop(conn);
4915 }
4916 }
4917
4918 hci_dev_unlock(hdev);
4919 }
4920
4921 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4922 {
4923 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4924 struct hci_cp_le_ltk_reply cp;
4925 struct hci_cp_le_ltk_neg_reply neg;
4926 struct hci_conn *conn;
4927 struct smp_ltk *ltk;
4928
4929 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4930
4931 hci_dev_lock(hdev);
4932
4933 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4934 if (conn == NULL)
4935 goto not_found;
4936
4937 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4938 if (!ltk)
4939 goto not_found;
4940
4941 if (smp_ltk_is_sc(ltk)) {
4942 /* With SC both EDiv and Rand are set to zero */
4943 if (ev->ediv || ev->rand)
4944 goto not_found;
4945 } else {
4946 /* For non-SC keys check that EDiv and Rand match */
4947 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4948 goto not_found;
4949 }
4950
4951 memcpy(cp.ltk, ltk->val, ltk->enc_size);
4952 memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size);
4953 cp.handle = cpu_to_le16(conn->handle);
4954
4955 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4956
4957 conn->enc_key_size = ltk->enc_size;
4958
4959 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4960
4961 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4962 * temporary key used to encrypt a connection following
4963 * pairing. It is used during the Encrypted Session Setup to
4964 * distribute the keys. Later, security can be re-established
4965 * using a distributed LTK.
4966 */
4967 if (ltk->type == SMP_STK) {
4968 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4969 list_del_rcu(&ltk->list);
4970 kfree_rcu(ltk, rcu);
4971 } else {
4972 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4973 }
4974
4975 hci_dev_unlock(hdev);
4976
4977 return;
4978
4979 not_found:
4980 neg.handle = ev->handle;
4981 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4982 hci_dev_unlock(hdev);
4983 }
4984
4985 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4986 u8 reason)
4987 {
4988 struct hci_cp_le_conn_param_req_neg_reply cp;
4989
4990 cp.handle = cpu_to_le16(handle);
4991 cp.reason = reason;
4992
4993 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4994 &cp);
4995 }
4996
4997 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4998 struct sk_buff *skb)
4999 {
5000 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
5001 struct hci_cp_le_conn_param_req_reply cp;
5002 struct hci_conn *hcon;
5003 u16 handle, min, max, latency, timeout;
5004
5005 handle = le16_to_cpu(ev->handle);
5006 min = le16_to_cpu(ev->interval_min);
5007 max = le16_to_cpu(ev->interval_max);
5008 latency = le16_to_cpu(ev->latency);
5009 timeout = le16_to_cpu(ev->timeout);
5010
5011 hcon = hci_conn_hash_lookup_handle(hdev, handle);
5012 if (!hcon || hcon->state != BT_CONNECTED)
5013 return send_conn_param_neg_reply(hdev, handle,
5014 HCI_ERROR_UNKNOWN_CONN_ID);
5015
5016 if (hci_check_conn_params(min, max, latency, timeout))
5017 return send_conn_param_neg_reply(hdev, handle,
5018 HCI_ERROR_INVALID_LL_PARAMS);
5019
5020 if (hcon->role == HCI_ROLE_MASTER) {
5021 struct hci_conn_params *params;
5022 u8 store_hint;
5023
5024 hci_dev_lock(hdev);
5025
5026 params = hci_conn_params_lookup(hdev, &hcon->dst,
5027 hcon->dst_type);
5028 if (params) {
5029 params->conn_min_interval = min;
5030 params->conn_max_interval = max;
5031 params->conn_latency = latency;
5032 params->supervision_timeout = timeout;
5033 store_hint = 0x01;
5034 } else{
5035 store_hint = 0x00;
5036 }
5037
5038 hci_dev_unlock(hdev);
5039
5040 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
5041 store_hint, min, max, latency, timeout);
5042 }
5043
5044 cp.handle = ev->handle;
5045 cp.interval_min = ev->interval_min;
5046 cp.interval_max = ev->interval_max;
5047 cp.latency = ev->latency;
5048 cp.timeout = ev->timeout;
5049 cp.min_ce_len = 0;
5050 cp.max_ce_len = 0;
5051
5052 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
5053 }
5054
5055 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
5056 struct sk_buff *skb)
5057 {
5058 u8 num_reports = skb->data[0];
5059 void *ptr = &skb->data[1];
5060
5061 hci_dev_lock(hdev);
5062
5063 while (num_reports--) {
5064 struct hci_ev_le_direct_adv_info *ev = ptr;
5065
5066 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
5067 ev->bdaddr_type, &ev->direct_addr,
5068 ev->direct_addr_type, ev->rssi, NULL, 0);
5069
5070 ptr += sizeof(*ev);
5071 }
5072
5073 hci_dev_unlock(hdev);
5074 }
5075
5076 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
5077 {
5078 struct hci_ev_le_meta *le_ev = (void *) skb->data;
5079
5080 skb_pull(skb, sizeof(*le_ev));
5081
5082 switch (le_ev->subevent) {
5083 case HCI_EV_LE_CONN_COMPLETE:
5084 hci_le_conn_complete_evt(hdev, skb);
5085 break;
5086
5087 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
5088 hci_le_conn_update_complete_evt(hdev, skb);
5089 break;
5090
5091 case HCI_EV_LE_ADVERTISING_REPORT:
5092 hci_le_adv_report_evt(hdev, skb);
5093 break;
5094
5095 case HCI_EV_LE_REMOTE_FEAT_COMPLETE:
5096 hci_le_remote_feat_complete_evt(hdev, skb);
5097 break;
5098
5099 case HCI_EV_LE_LTK_REQ:
5100 hci_le_ltk_request_evt(hdev, skb);
5101 break;
5102
5103 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
5104 hci_le_remote_conn_param_req_evt(hdev, skb);
5105 break;
5106
5107 case HCI_EV_LE_DIRECT_ADV_REPORT:
5108 hci_le_direct_adv_report_evt(hdev, skb);
5109 break;
5110
5111 default:
5112 break;
5113 }
5114 }
5115
5116 static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
5117 u8 event, struct sk_buff *skb)
5118 {
5119 struct hci_ev_cmd_complete *ev;
5120 struct hci_event_hdr *hdr;
5121
5122 if (!skb)
5123 return false;
5124
5125 if (skb->len < sizeof(*hdr)) {
5126 BT_ERR("Too short HCI event");
5127 return false;
5128 }
5129
5130 hdr = (void *) skb->data;
5131 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5132
5133 if (event) {
5134 if (hdr->evt != event)
5135 return false;
5136 return true;
5137 }
5138
5139 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
5140 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
5141 return false;
5142 }
5143
5144 if (skb->len < sizeof(*ev)) {
5145 BT_ERR("Too short cmd_complete event");
5146 return false;
5147 }
5148
5149 ev = (void *) skb->data;
5150 skb_pull(skb, sizeof(*ev));
5151
5152 if (opcode != __le16_to_cpu(ev->opcode)) {
5153 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
5154 __le16_to_cpu(ev->opcode));
5155 return false;
5156 }
5157
5158 return true;
5159 }
5160
5161 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
5162 {
5163 struct hci_event_hdr *hdr = (void *) skb->data;
5164 hci_req_complete_t req_complete = NULL;
5165 hci_req_complete_skb_t req_complete_skb = NULL;
5166 struct sk_buff *orig_skb = NULL;
5167 u8 status = 0, event = hdr->evt, req_evt = 0;
5168 u16 opcode = HCI_OP_NOP;
5169
5170 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
5171 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
5172 opcode = __le16_to_cpu(cmd_hdr->opcode);
5173 hci_req_cmd_complete(hdev, opcode, status, &req_complete,
5174 &req_complete_skb);
5175 req_evt = event;
5176 }
5177
5178 /* If it looks like we might end up having to call
5179 * req_complete_skb, store a pristine copy of the skb since the
5180 * various handlers may modify the original one through
5181 * skb_pull() calls, etc.
5182 */
5183 if (req_complete_skb || event == HCI_EV_CMD_STATUS ||
5184 event == HCI_EV_CMD_COMPLETE)
5185 orig_skb = skb_clone(skb, GFP_KERNEL);
5186
5187 skb_pull(skb, HCI_EVENT_HDR_SIZE);
5188
5189 switch (event) {
5190 case HCI_EV_INQUIRY_COMPLETE:
5191 hci_inquiry_complete_evt(hdev, skb);
5192 break;
5193
5194 case HCI_EV_INQUIRY_RESULT:
5195 hci_inquiry_result_evt(hdev, skb);
5196 break;
5197
5198 case HCI_EV_CONN_COMPLETE:
5199 hci_conn_complete_evt(hdev, skb);
5200 break;
5201
5202 case HCI_EV_CONN_REQUEST:
5203 hci_conn_request_evt(hdev, skb);
5204 break;
5205
5206 case HCI_EV_DISCONN_COMPLETE:
5207 hci_disconn_complete_evt(hdev, skb);
5208 break;
5209
5210 case HCI_EV_AUTH_COMPLETE:
5211 hci_auth_complete_evt(hdev, skb);
5212 break;
5213
5214 case HCI_EV_REMOTE_NAME:
5215 hci_remote_name_evt(hdev, skb);
5216 break;
5217
5218 case HCI_EV_ENCRYPT_CHANGE:
5219 hci_encrypt_change_evt(hdev, skb);
5220 break;
5221
5222 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
5223 hci_change_link_key_complete_evt(hdev, skb);
5224 break;
5225
5226 case HCI_EV_REMOTE_FEATURES:
5227 hci_remote_features_evt(hdev, skb);
5228 break;
5229
5230 case HCI_EV_CMD_COMPLETE:
5231 hci_cmd_complete_evt(hdev, skb, &opcode, &status,
5232 &req_complete, &req_complete_skb);
5233 break;
5234
5235 case HCI_EV_CMD_STATUS:
5236 hci_cmd_status_evt(hdev, skb, &opcode, &status, &req_complete,
5237 &req_complete_skb);
5238 break;
5239
5240 case HCI_EV_HARDWARE_ERROR:
5241 hci_hardware_error_evt(hdev, skb);
5242 break;
5243
5244 case HCI_EV_ROLE_CHANGE:
5245 hci_role_change_evt(hdev, skb);
5246 break;
5247
5248 case HCI_EV_NUM_COMP_PKTS:
5249 hci_num_comp_pkts_evt(hdev, skb);
5250 break;
5251
5252 case HCI_EV_MODE_CHANGE:
5253 hci_mode_change_evt(hdev, skb);
5254 break;
5255
5256 case HCI_EV_PIN_CODE_REQ:
5257 hci_pin_code_request_evt(hdev, skb);
5258 break;
5259
5260 case HCI_EV_LINK_KEY_REQ:
5261 hci_link_key_request_evt(hdev, skb);
5262 break;
5263
5264 case HCI_EV_LINK_KEY_NOTIFY:
5265 hci_link_key_notify_evt(hdev, skb);
5266 break;
5267
5268 case HCI_EV_CLOCK_OFFSET:
5269 hci_clock_offset_evt(hdev, skb);
5270 break;
5271
5272 case HCI_EV_PKT_TYPE_CHANGE:
5273 hci_pkt_type_change_evt(hdev, skb);
5274 break;
5275
5276 case HCI_EV_PSCAN_REP_MODE:
5277 hci_pscan_rep_mode_evt(hdev, skb);
5278 break;
5279
5280 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5281 hci_inquiry_result_with_rssi_evt(hdev, skb);
5282 break;
5283
5284 case HCI_EV_REMOTE_EXT_FEATURES:
5285 hci_remote_ext_features_evt(hdev, skb);
5286 break;
5287
5288 case HCI_EV_SYNC_CONN_COMPLETE:
5289 hci_sync_conn_complete_evt(hdev, skb);
5290 break;
5291
5292 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5293 hci_extended_inquiry_result_evt(hdev, skb);
5294 break;
5295
5296 case HCI_EV_KEY_REFRESH_COMPLETE:
5297 hci_key_refresh_complete_evt(hdev, skb);
5298 break;
5299
5300 case HCI_EV_IO_CAPA_REQUEST:
5301 hci_io_capa_request_evt(hdev, skb);
5302 break;
5303
5304 case HCI_EV_IO_CAPA_REPLY:
5305 hci_io_capa_reply_evt(hdev, skb);
5306 break;
5307
5308 case HCI_EV_USER_CONFIRM_REQUEST:
5309 hci_user_confirm_request_evt(hdev, skb);
5310 break;
5311
5312 case HCI_EV_USER_PASSKEY_REQUEST:
5313 hci_user_passkey_request_evt(hdev, skb);
5314 break;
5315
5316 case HCI_EV_USER_PASSKEY_NOTIFY:
5317 hci_user_passkey_notify_evt(hdev, skb);
5318 break;
5319
5320 case HCI_EV_KEYPRESS_NOTIFY:
5321 hci_keypress_notify_evt(hdev, skb);
5322 break;
5323
5324 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5325 hci_simple_pair_complete_evt(hdev, skb);
5326 break;
5327
5328 case HCI_EV_REMOTE_HOST_FEATURES:
5329 hci_remote_host_features_evt(hdev, skb);
5330 break;
5331
5332 case HCI_EV_LE_META:
5333 hci_le_meta_evt(hdev, skb);
5334 break;
5335
5336 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5337 hci_remote_oob_data_request_evt(hdev, skb);
5338 break;
5339
5340 #if IS_ENABLED(CONFIG_BT_HS)
5341 case HCI_EV_CHANNEL_SELECTED:
5342 hci_chan_selected_evt(hdev, skb);
5343 break;
5344
5345 case HCI_EV_PHY_LINK_COMPLETE:
5346 hci_phy_link_complete_evt(hdev, skb);
5347 break;
5348
5349 case HCI_EV_LOGICAL_LINK_COMPLETE:
5350 hci_loglink_complete_evt(hdev, skb);
5351 break;
5352
5353 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5354 hci_disconn_loglink_complete_evt(hdev, skb);
5355 break;
5356
5357 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5358 hci_disconn_phylink_complete_evt(hdev, skb);
5359 break;
5360 #endif
5361
5362 case HCI_EV_NUM_COMP_BLOCKS:
5363 hci_num_comp_blocks_evt(hdev, skb);
5364 break;
5365
5366 default:
5367 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5368 break;
5369 }
5370
5371 if (req_complete) {
5372 req_complete(hdev, status, opcode);
5373 } else if (req_complete_skb) {
5374 if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) {
5375 kfree_skb(orig_skb);
5376 orig_skb = NULL;
5377 }
5378 req_complete_skb(hdev, status, opcode, orig_skb);
5379 }
5380
5381 kfree_skb(orig_skb);
5382 kfree_skb(skb);
5383 hdev->stat.evt_rx++;
5384 }
This page took 0.209402 seconds and 5 git commands to generate.