Merge tag 'drm/tegra/for-3.19-rc1-fixes' of git://people.freedesktop.org/~tagr/linux...
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36
37 /* Handle HCI Event packets */
38
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn)
105 conn->role = rp->role;
106
107 hci_dev_unlock(hdev);
108 }
109
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
114
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116
117 if (rp->status)
118 return;
119
120 hci_dev_lock(hdev);
121
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 if (conn)
124 conn->link_policy = __le16_to_cpu(rp->policy);
125
126 hci_dev_unlock(hdev);
127 }
128
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
133 void *sent;
134
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136
137 if (rp->status)
138 return;
139
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 if (!sent)
142 return;
143
144 hci_dev_lock(hdev);
145
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 if (conn)
148 conn->link_policy = get_unaligned_le16(sent + 2);
149
150 hci_dev_unlock(hdev);
151 }
152
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 struct sk_buff *skb)
155 {
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159
160 if (rp->status)
161 return;
162
163 hdev->link_policy = __le16_to_cpu(rp->policy);
164 }
165
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 struct sk_buff *skb)
168 {
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
171
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173
174 if (status)
175 return;
176
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent)
179 return;
180
181 hdev->link_policy = get_unaligned_le16(sent);
182 }
183
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 __u8 status = *((__u8 *) skb->data);
187
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
189
190 clear_bit(HCI_RESET, &hdev->flags);
191
192 if (status)
193 return;
194
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
197
198 hdev->discovery.state = DISCOVERY_STOPPED;
199 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
200 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
201
202 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
203 hdev->adv_data_len = 0;
204
205 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
206 hdev->scan_rsp_data_len = 0;
207
208 hdev->le_scan_type = LE_SCAN_PASSIVE;
209
210 hdev->ssp_debug_mode = 0;
211
212 hci_bdaddr_list_clear(&hdev->le_white_list);
213 }
214
215 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216 {
217 __u8 status = *((__u8 *) skb->data);
218 void *sent;
219
220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
221
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (!sent)
224 return;
225
226 hci_dev_lock(hdev);
227
228 if (test_bit(HCI_MGMT, &hdev->dev_flags))
229 mgmt_set_local_name_complete(hdev, sent, status);
230 else if (!status)
231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
232
233 hci_dev_unlock(hdev);
234 }
235
236 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 struct hci_rp_read_local_name *rp = (void *) skb->data;
239
240 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
241
242 if (rp->status)
243 return;
244
245 if (test_bit(HCI_SETUP, &hdev->dev_flags))
246 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
247 }
248
249 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
250 {
251 __u8 status = *((__u8 *) skb->data);
252 void *sent;
253
254 BT_DBG("%s status 0x%2.2x", hdev->name, status);
255
256 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
257 if (!sent)
258 return;
259
260 hci_dev_lock(hdev);
261
262 if (!status) {
263 __u8 param = *((__u8 *) sent);
264
265 if (param == AUTH_ENABLED)
266 set_bit(HCI_AUTH, &hdev->flags);
267 else
268 clear_bit(HCI_AUTH, &hdev->flags);
269 }
270
271 if (test_bit(HCI_MGMT, &hdev->dev_flags))
272 mgmt_auth_enable_complete(hdev, status);
273
274 hci_dev_unlock(hdev);
275 }
276
277 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
278 {
279 __u8 status = *((__u8 *) skb->data);
280 __u8 param;
281 void *sent;
282
283 BT_DBG("%s status 0x%2.2x", hdev->name, status);
284
285 if (status)
286 return;
287
288 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
289 if (!sent)
290 return;
291
292 param = *((__u8 *) sent);
293
294 if (param)
295 set_bit(HCI_ENCRYPT, &hdev->flags);
296 else
297 clear_bit(HCI_ENCRYPT, &hdev->flags);
298 }
299
300 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
301 {
302 __u8 status = *((__u8 *) skb->data);
303 __u8 param;
304 void *sent;
305
306 BT_DBG("%s status 0x%2.2x", hdev->name, status);
307
308 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
309 if (!sent)
310 return;
311
312 param = *((__u8 *) sent);
313
314 hci_dev_lock(hdev);
315
316 if (status) {
317 hdev->discov_timeout = 0;
318 goto done;
319 }
320
321 if (param & SCAN_INQUIRY)
322 set_bit(HCI_ISCAN, &hdev->flags);
323 else
324 clear_bit(HCI_ISCAN, &hdev->flags);
325
326 if (param & SCAN_PAGE)
327 set_bit(HCI_PSCAN, &hdev->flags);
328 else
329 clear_bit(HCI_PSCAN, &hdev->flags);
330
331 done:
332 hci_dev_unlock(hdev);
333 }
334
335 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 {
337 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338
339 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
340
341 if (rp->status)
342 return;
343
344 memcpy(hdev->dev_class, rp->dev_class, 3);
345
346 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
347 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
348 }
349
350 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 {
352 __u8 status = *((__u8 *) skb->data);
353 void *sent;
354
355 BT_DBG("%s status 0x%2.2x", hdev->name, status);
356
357 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
358 if (!sent)
359 return;
360
361 hci_dev_lock(hdev);
362
363 if (status == 0)
364 memcpy(hdev->dev_class, sent, 3);
365
366 if (test_bit(HCI_MGMT, &hdev->dev_flags))
367 mgmt_set_class_of_dev_complete(hdev, sent, status);
368
369 hci_dev_unlock(hdev);
370 }
371
372 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 {
374 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
375 __u16 setting;
376
377 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
378
379 if (rp->status)
380 return;
381
382 setting = __le16_to_cpu(rp->voice_setting);
383
384 if (hdev->voice_setting == setting)
385 return;
386
387 hdev->voice_setting = setting;
388
389 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
390
391 if (hdev->notify)
392 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
393 }
394
395 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
396 struct sk_buff *skb)
397 {
398 __u8 status = *((__u8 *) skb->data);
399 __u16 setting;
400 void *sent;
401
402 BT_DBG("%s status 0x%2.2x", hdev->name, status);
403
404 if (status)
405 return;
406
407 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
408 if (!sent)
409 return;
410
411 setting = get_unaligned_le16(sent);
412
413 if (hdev->voice_setting == setting)
414 return;
415
416 hdev->voice_setting = setting;
417
418 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
419
420 if (hdev->notify)
421 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
422 }
423
424 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
425 struct sk_buff *skb)
426 {
427 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
428
429 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
430
431 if (rp->status)
432 return;
433
434 hdev->num_iac = rp->num_iac;
435
436 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
437 }
438
439 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
440 {
441 __u8 status = *((__u8 *) skb->data);
442 struct hci_cp_write_ssp_mode *sent;
443
444 BT_DBG("%s status 0x%2.2x", hdev->name, status);
445
446 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
447 if (!sent)
448 return;
449
450 hci_dev_lock(hdev);
451
452 if (!status) {
453 if (sent->mode)
454 hdev->features[1][0] |= LMP_HOST_SSP;
455 else
456 hdev->features[1][0] &= ~LMP_HOST_SSP;
457 }
458
459 if (test_bit(HCI_MGMT, &hdev->dev_flags))
460 mgmt_ssp_enable_complete(hdev, sent->mode, status);
461 else if (!status) {
462 if (sent->mode)
463 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
464 else
465 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
466 }
467
468 hci_dev_unlock(hdev);
469 }
470
471 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
472 {
473 u8 status = *((u8 *) skb->data);
474 struct hci_cp_write_sc_support *sent;
475
476 BT_DBG("%s status 0x%2.2x", hdev->name, status);
477
478 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
479 if (!sent)
480 return;
481
482 hci_dev_lock(hdev);
483
484 if (!status) {
485 if (sent->support)
486 hdev->features[1][0] |= LMP_HOST_SC;
487 else
488 hdev->features[1][0] &= ~LMP_HOST_SC;
489 }
490
491 if (test_bit(HCI_MGMT, &hdev->dev_flags))
492 mgmt_sc_enable_complete(hdev, sent->support, status);
493 else if (!status) {
494 if (sent->support)
495 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
496 else
497 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
498 }
499
500 hci_dev_unlock(hdev);
501 }
502
503 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
504 {
505 struct hci_rp_read_local_version *rp = (void *) skb->data;
506
507 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
508
509 if (rp->status)
510 return;
511
512 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
513 hdev->hci_ver = rp->hci_ver;
514 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
515 hdev->lmp_ver = rp->lmp_ver;
516 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
517 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
518 }
519 }
520
521 static void hci_cc_read_local_commands(struct hci_dev *hdev,
522 struct sk_buff *skb)
523 {
524 struct hci_rp_read_local_commands *rp = (void *) skb->data;
525
526 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
527
528 if (rp->status)
529 return;
530
531 if (test_bit(HCI_SETUP, &hdev->dev_flags))
532 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
533 }
534
535 static void hci_cc_read_local_features(struct hci_dev *hdev,
536 struct sk_buff *skb)
537 {
538 struct hci_rp_read_local_features *rp = (void *) skb->data;
539
540 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
541
542 if (rp->status)
543 return;
544
545 memcpy(hdev->features, rp->features, 8);
546
547 /* Adjust default settings according to features
548 * supported by device. */
549
550 if (hdev->features[0][0] & LMP_3SLOT)
551 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
552
553 if (hdev->features[0][0] & LMP_5SLOT)
554 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
555
556 if (hdev->features[0][1] & LMP_HV2) {
557 hdev->pkt_type |= (HCI_HV2);
558 hdev->esco_type |= (ESCO_HV2);
559 }
560
561 if (hdev->features[0][1] & LMP_HV3) {
562 hdev->pkt_type |= (HCI_HV3);
563 hdev->esco_type |= (ESCO_HV3);
564 }
565
566 if (lmp_esco_capable(hdev))
567 hdev->esco_type |= (ESCO_EV3);
568
569 if (hdev->features[0][4] & LMP_EV4)
570 hdev->esco_type |= (ESCO_EV4);
571
572 if (hdev->features[0][4] & LMP_EV5)
573 hdev->esco_type |= (ESCO_EV5);
574
575 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
576 hdev->esco_type |= (ESCO_2EV3);
577
578 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
579 hdev->esco_type |= (ESCO_3EV3);
580
581 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
582 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
583 }
584
585 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
586 struct sk_buff *skb)
587 {
588 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
589
590 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
591
592 if (rp->status)
593 return;
594
595 if (hdev->max_page < rp->max_page)
596 hdev->max_page = rp->max_page;
597
598 if (rp->page < HCI_MAX_PAGES)
599 memcpy(hdev->features[rp->page], rp->features, 8);
600 }
601
602 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
603 struct sk_buff *skb)
604 {
605 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
606
607 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608
609 if (rp->status)
610 return;
611
612 hdev->flow_ctl_mode = rp->mode;
613 }
614
615 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
616 {
617 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
618
619 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
620
621 if (rp->status)
622 return;
623
624 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
625 hdev->sco_mtu = rp->sco_mtu;
626 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
627 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
628
629 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
630 hdev->sco_mtu = 64;
631 hdev->sco_pkts = 8;
632 }
633
634 hdev->acl_cnt = hdev->acl_pkts;
635 hdev->sco_cnt = hdev->sco_pkts;
636
637 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
638 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
639 }
640
641 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
642 {
643 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
644
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
646
647 if (rp->status)
648 return;
649
650 if (test_bit(HCI_INIT, &hdev->flags))
651 bacpy(&hdev->bdaddr, &rp->bdaddr);
652
653 if (test_bit(HCI_SETUP, &hdev->dev_flags))
654 bacpy(&hdev->setup_addr, &rp->bdaddr);
655 }
656
657 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
658 struct sk_buff *skb)
659 {
660 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
661
662 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
663
664 if (rp->status)
665 return;
666
667 if (test_bit(HCI_INIT, &hdev->flags)) {
668 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
669 hdev->page_scan_window = __le16_to_cpu(rp->window);
670 }
671 }
672
673 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
674 struct sk_buff *skb)
675 {
676 u8 status = *((u8 *) skb->data);
677 struct hci_cp_write_page_scan_activity *sent;
678
679 BT_DBG("%s status 0x%2.2x", hdev->name, status);
680
681 if (status)
682 return;
683
684 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
685 if (!sent)
686 return;
687
688 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
689 hdev->page_scan_window = __le16_to_cpu(sent->window);
690 }
691
692 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
693 struct sk_buff *skb)
694 {
695 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
696
697 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
698
699 if (rp->status)
700 return;
701
702 if (test_bit(HCI_INIT, &hdev->flags))
703 hdev->page_scan_type = rp->type;
704 }
705
706 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
707 struct sk_buff *skb)
708 {
709 u8 status = *((u8 *) skb->data);
710 u8 *type;
711
712 BT_DBG("%s status 0x%2.2x", hdev->name, status);
713
714 if (status)
715 return;
716
717 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
718 if (type)
719 hdev->page_scan_type = *type;
720 }
721
722 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
723 struct sk_buff *skb)
724 {
725 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
726
727 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
728
729 if (rp->status)
730 return;
731
732 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
733 hdev->block_len = __le16_to_cpu(rp->block_len);
734 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
735
736 hdev->block_cnt = hdev->num_blocks;
737
738 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
739 hdev->block_cnt, hdev->block_len);
740 }
741
742 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
743 {
744 struct hci_rp_read_clock *rp = (void *) skb->data;
745 struct hci_cp_read_clock *cp;
746 struct hci_conn *conn;
747
748 BT_DBG("%s", hdev->name);
749
750 if (skb->len < sizeof(*rp))
751 return;
752
753 if (rp->status)
754 return;
755
756 hci_dev_lock(hdev);
757
758 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
759 if (!cp)
760 goto unlock;
761
762 if (cp->which == 0x00) {
763 hdev->clock = le32_to_cpu(rp->clock);
764 goto unlock;
765 }
766
767 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
768 if (conn) {
769 conn->clock = le32_to_cpu(rp->clock);
770 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
771 }
772
773 unlock:
774 hci_dev_unlock(hdev);
775 }
776
777 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
778 struct sk_buff *skb)
779 {
780 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
781
782 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
783
784 if (rp->status)
785 goto a2mp_rsp;
786
787 hdev->amp_status = rp->amp_status;
788 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
789 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
790 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
791 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
792 hdev->amp_type = rp->amp_type;
793 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
794 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
795 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
796 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
797
798 a2mp_rsp:
799 a2mp_send_getinfo_rsp(hdev);
800 }
801
802 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
803 struct sk_buff *skb)
804 {
805 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
806 struct amp_assoc *assoc = &hdev->loc_assoc;
807 size_t rem_len, frag_len;
808
809 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
810
811 if (rp->status)
812 goto a2mp_rsp;
813
814 frag_len = skb->len - sizeof(*rp);
815 rem_len = __le16_to_cpu(rp->rem_len);
816
817 if (rem_len > frag_len) {
818 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
819
820 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
821 assoc->offset += frag_len;
822
823 /* Read other fragments */
824 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
825
826 return;
827 }
828
829 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
830 assoc->len = assoc->offset + rem_len;
831 assoc->offset = 0;
832
833 a2mp_rsp:
834 /* Send A2MP Rsp when all fragments are received */
835 a2mp_send_getampassoc_rsp(hdev, rp->status);
836 a2mp_send_create_phy_link_req(hdev, rp->status);
837 }
838
839 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
840 struct sk_buff *skb)
841 {
842 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
843
844 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
845
846 if (rp->status)
847 return;
848
849 hdev->inq_tx_power = rp->tx_power;
850 }
851
852 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
853 {
854 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
855 struct hci_cp_pin_code_reply *cp;
856 struct hci_conn *conn;
857
858 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
859
860 hci_dev_lock(hdev);
861
862 if (test_bit(HCI_MGMT, &hdev->dev_flags))
863 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
864
865 if (rp->status)
866 goto unlock;
867
868 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
869 if (!cp)
870 goto unlock;
871
872 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
873 if (conn)
874 conn->pin_length = cp->pin_len;
875
876 unlock:
877 hci_dev_unlock(hdev);
878 }
879
880 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
881 {
882 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
883
884 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
885
886 hci_dev_lock(hdev);
887
888 if (test_bit(HCI_MGMT, &hdev->dev_flags))
889 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
890 rp->status);
891
892 hci_dev_unlock(hdev);
893 }
894
895 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
896 struct sk_buff *skb)
897 {
898 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
899
900 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
901
902 if (rp->status)
903 return;
904
905 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
906 hdev->le_pkts = rp->le_max_pkt;
907
908 hdev->le_cnt = hdev->le_pkts;
909
910 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
911 }
912
913 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
914 struct sk_buff *skb)
915 {
916 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
917
918 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
919
920 if (rp->status)
921 return;
922
923 memcpy(hdev->le_features, rp->features, 8);
924 }
925
926 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
927 struct sk_buff *skb)
928 {
929 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
930
931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
932
933 if (rp->status)
934 return;
935
936 hdev->adv_tx_power = rp->tx_power;
937 }
938
939 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
940 {
941 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
942
943 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
944
945 hci_dev_lock(hdev);
946
947 if (test_bit(HCI_MGMT, &hdev->dev_flags))
948 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
949 rp->status);
950
951 hci_dev_unlock(hdev);
952 }
953
954 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
955 struct sk_buff *skb)
956 {
957 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
958
959 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
960
961 hci_dev_lock(hdev);
962
963 if (test_bit(HCI_MGMT, &hdev->dev_flags))
964 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
965 ACL_LINK, 0, rp->status);
966
967 hci_dev_unlock(hdev);
968 }
969
970 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
971 {
972 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
973
974 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
975
976 hci_dev_lock(hdev);
977
978 if (test_bit(HCI_MGMT, &hdev->dev_flags))
979 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
980 0, rp->status);
981
982 hci_dev_unlock(hdev);
983 }
984
985 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
986 struct sk_buff *skb)
987 {
988 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
989
990 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
991
992 hci_dev_lock(hdev);
993
994 if (test_bit(HCI_MGMT, &hdev->dev_flags))
995 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
996 ACL_LINK, 0, rp->status);
997
998 hci_dev_unlock(hdev);
999 }
1000
1001 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1002 struct sk_buff *skb)
1003 {
1004 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1005
1006 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1007
1008 hci_dev_lock(hdev);
1009 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1010 rp->status);
1011 hci_dev_unlock(hdev);
1012 }
1013
1014 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1015 struct sk_buff *skb)
1016 {
1017 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1018
1019 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1020
1021 hci_dev_lock(hdev);
1022 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1023 rp->hash256, rp->rand256,
1024 rp->status);
1025 hci_dev_unlock(hdev);
1026 }
1027
1028
1029 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1030 {
1031 __u8 status = *((__u8 *) skb->data);
1032 bdaddr_t *sent;
1033
1034 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1035
1036 if (status)
1037 return;
1038
1039 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1040 if (!sent)
1041 return;
1042
1043 hci_dev_lock(hdev);
1044
1045 bacpy(&hdev->random_addr, sent);
1046
1047 hci_dev_unlock(hdev);
1048 }
1049
1050 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1051 {
1052 __u8 *sent, status = *((__u8 *) skb->data);
1053
1054 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1055
1056 if (status)
1057 return;
1058
1059 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1060 if (!sent)
1061 return;
1062
1063 hci_dev_lock(hdev);
1064
1065 /* If we're doing connection initiation as peripheral. Set a
1066 * timeout in case something goes wrong.
1067 */
1068 if (*sent) {
1069 struct hci_conn *conn;
1070
1071 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1072
1073 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1074 if (conn)
1075 queue_delayed_work(hdev->workqueue,
1076 &conn->le_conn_timeout,
1077 conn->conn_timeout);
1078 } else {
1079 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1080 }
1081
1082 hci_dev_unlock(hdev);
1083 }
1084
1085 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1086 {
1087 struct hci_cp_le_set_scan_param *cp;
1088 __u8 status = *((__u8 *) skb->data);
1089
1090 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1091
1092 if (status)
1093 return;
1094
1095 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1096 if (!cp)
1097 return;
1098
1099 hci_dev_lock(hdev);
1100
1101 hdev->le_scan_type = cp->type;
1102
1103 hci_dev_unlock(hdev);
1104 }
1105
1106 static bool has_pending_adv_report(struct hci_dev *hdev)
1107 {
1108 struct discovery_state *d = &hdev->discovery;
1109
1110 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1111 }
1112
1113 static void clear_pending_adv_report(struct hci_dev *hdev)
1114 {
1115 struct discovery_state *d = &hdev->discovery;
1116
1117 bacpy(&d->last_adv_addr, BDADDR_ANY);
1118 d->last_adv_data_len = 0;
1119 }
1120
1121 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1122 u8 bdaddr_type, s8 rssi, u32 flags,
1123 u8 *data, u8 len)
1124 {
1125 struct discovery_state *d = &hdev->discovery;
1126
1127 bacpy(&d->last_adv_addr, bdaddr);
1128 d->last_adv_addr_type = bdaddr_type;
1129 d->last_adv_rssi = rssi;
1130 d->last_adv_flags = flags;
1131 memcpy(d->last_adv_data, data, len);
1132 d->last_adv_data_len = len;
1133 }
1134
1135 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1136 struct sk_buff *skb)
1137 {
1138 struct hci_cp_le_set_scan_enable *cp;
1139 __u8 status = *((__u8 *) skb->data);
1140
1141 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142
1143 if (status)
1144 return;
1145
1146 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1147 if (!cp)
1148 return;
1149
1150 hci_dev_lock(hdev);
1151
1152 switch (cp->enable) {
1153 case LE_SCAN_ENABLE:
1154 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1155 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1156 clear_pending_adv_report(hdev);
1157 break;
1158
1159 case LE_SCAN_DISABLE:
1160 /* We do this here instead of when setting DISCOVERY_STOPPED
1161 * since the latter would potentially require waiting for
1162 * inquiry to stop too.
1163 */
1164 if (has_pending_adv_report(hdev)) {
1165 struct discovery_state *d = &hdev->discovery;
1166
1167 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1168 d->last_adv_addr_type, NULL,
1169 d->last_adv_rssi, d->last_adv_flags,
1170 d->last_adv_data,
1171 d->last_adv_data_len, NULL, 0);
1172 }
1173
1174 /* Cancel this timer so that we don't try to disable scanning
1175 * when it's already disabled.
1176 */
1177 cancel_delayed_work(&hdev->le_scan_disable);
1178
1179 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1180
1181 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1182 * interrupted scanning due to a connect request. Mark
1183 * therefore discovery as stopped. If this was not
1184 * because of a connect request advertising might have
1185 * been disabled because of active scanning, so
1186 * re-enable it again if necessary.
1187 */
1188 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1189 &hdev->dev_flags))
1190 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1191 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1192 hdev->discovery.state == DISCOVERY_FINDING)
1193 mgmt_reenable_advertising(hdev);
1194
1195 break;
1196
1197 default:
1198 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1199 break;
1200 }
1201
1202 hci_dev_unlock(hdev);
1203 }
1204
1205 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1206 struct sk_buff *skb)
1207 {
1208 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1209
1210 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1211
1212 if (rp->status)
1213 return;
1214
1215 hdev->le_white_list_size = rp->size;
1216 }
1217
1218 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1219 struct sk_buff *skb)
1220 {
1221 __u8 status = *((__u8 *) skb->data);
1222
1223 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1224
1225 if (status)
1226 return;
1227
1228 hci_bdaddr_list_clear(&hdev->le_white_list);
1229 }
1230
1231 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1232 struct sk_buff *skb)
1233 {
1234 struct hci_cp_le_add_to_white_list *sent;
1235 __u8 status = *((__u8 *) skb->data);
1236
1237 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1238
1239 if (status)
1240 return;
1241
1242 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1243 if (!sent)
1244 return;
1245
1246 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1247 sent->bdaddr_type);
1248 }
1249
1250 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1251 struct sk_buff *skb)
1252 {
1253 struct hci_cp_le_del_from_white_list *sent;
1254 __u8 status = *((__u8 *) skb->data);
1255
1256 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1257
1258 if (status)
1259 return;
1260
1261 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1262 if (!sent)
1263 return;
1264
1265 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1266 sent->bdaddr_type);
1267 }
1268
1269 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1270 struct sk_buff *skb)
1271 {
1272 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1273
1274 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1275
1276 if (rp->status)
1277 return;
1278
1279 memcpy(hdev->le_states, rp->le_states, 8);
1280 }
1281
1282 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1283 struct sk_buff *skb)
1284 {
1285 struct hci_cp_write_le_host_supported *sent;
1286 __u8 status = *((__u8 *) skb->data);
1287
1288 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1289
1290 if (status)
1291 return;
1292
1293 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1294 if (!sent)
1295 return;
1296
1297 hci_dev_lock(hdev);
1298
1299 if (sent->le) {
1300 hdev->features[1][0] |= LMP_HOST_LE;
1301 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1302 } else {
1303 hdev->features[1][0] &= ~LMP_HOST_LE;
1304 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1305 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1306 }
1307
1308 if (sent->simul)
1309 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1310 else
1311 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1312
1313 hci_dev_unlock(hdev);
1314 }
1315
1316 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1317 {
1318 struct hci_cp_le_set_adv_param *cp;
1319 u8 status = *((u8 *) skb->data);
1320
1321 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1322
1323 if (status)
1324 return;
1325
1326 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1327 if (!cp)
1328 return;
1329
1330 hci_dev_lock(hdev);
1331 hdev->adv_addr_type = cp->own_address_type;
1332 hci_dev_unlock(hdev);
1333 }
1334
1335 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1336 struct sk_buff *skb)
1337 {
1338 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1339
1340 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1341 hdev->name, rp->status, rp->phy_handle);
1342
1343 if (rp->status)
1344 return;
1345
1346 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1347 }
1348
1349 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1350 {
1351 struct hci_rp_read_rssi *rp = (void *) skb->data;
1352 struct hci_conn *conn;
1353
1354 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1355
1356 if (rp->status)
1357 return;
1358
1359 hci_dev_lock(hdev);
1360
1361 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1362 if (conn)
1363 conn->rssi = rp->rssi;
1364
1365 hci_dev_unlock(hdev);
1366 }
1367
1368 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1369 {
1370 struct hci_cp_read_tx_power *sent;
1371 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1372 struct hci_conn *conn;
1373
1374 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1375
1376 if (rp->status)
1377 return;
1378
1379 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1380 if (!sent)
1381 return;
1382
1383 hci_dev_lock(hdev);
1384
1385 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1386 if (!conn)
1387 goto unlock;
1388
1389 switch (sent->type) {
1390 case 0x00:
1391 conn->tx_power = rp->tx_power;
1392 break;
1393 case 0x01:
1394 conn->max_tx_power = rp->tx_power;
1395 break;
1396 }
1397
1398 unlock:
1399 hci_dev_unlock(hdev);
1400 }
1401
1402 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1403 {
1404 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1405
1406 if (status) {
1407 hci_conn_check_pending(hdev);
1408 return;
1409 }
1410
1411 set_bit(HCI_INQUIRY, &hdev->flags);
1412 }
1413
1414 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1415 {
1416 struct hci_cp_create_conn *cp;
1417 struct hci_conn *conn;
1418
1419 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1420
1421 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1422 if (!cp)
1423 return;
1424
1425 hci_dev_lock(hdev);
1426
1427 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1428
1429 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1430
1431 if (status) {
1432 if (conn && conn->state == BT_CONNECT) {
1433 if (status != 0x0c || conn->attempt > 2) {
1434 conn->state = BT_CLOSED;
1435 hci_proto_connect_cfm(conn, status);
1436 hci_conn_del(conn);
1437 } else
1438 conn->state = BT_CONNECT2;
1439 }
1440 } else {
1441 if (!conn) {
1442 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1443 HCI_ROLE_MASTER);
1444 if (!conn)
1445 BT_ERR("No memory for new connection");
1446 }
1447 }
1448
1449 hci_dev_unlock(hdev);
1450 }
1451
1452 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1453 {
1454 struct hci_cp_add_sco *cp;
1455 struct hci_conn *acl, *sco;
1456 __u16 handle;
1457
1458 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1459
1460 if (!status)
1461 return;
1462
1463 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1464 if (!cp)
1465 return;
1466
1467 handle = __le16_to_cpu(cp->handle);
1468
1469 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1470
1471 hci_dev_lock(hdev);
1472
1473 acl = hci_conn_hash_lookup_handle(hdev, handle);
1474 if (acl) {
1475 sco = acl->link;
1476 if (sco) {
1477 sco->state = BT_CLOSED;
1478
1479 hci_proto_connect_cfm(sco, status);
1480 hci_conn_del(sco);
1481 }
1482 }
1483
1484 hci_dev_unlock(hdev);
1485 }
1486
1487 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1488 {
1489 struct hci_cp_auth_requested *cp;
1490 struct hci_conn *conn;
1491
1492 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1493
1494 if (!status)
1495 return;
1496
1497 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1498 if (!cp)
1499 return;
1500
1501 hci_dev_lock(hdev);
1502
1503 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1504 if (conn) {
1505 if (conn->state == BT_CONFIG) {
1506 hci_proto_connect_cfm(conn, status);
1507 hci_conn_drop(conn);
1508 }
1509 }
1510
1511 hci_dev_unlock(hdev);
1512 }
1513
1514 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1515 {
1516 struct hci_cp_set_conn_encrypt *cp;
1517 struct hci_conn *conn;
1518
1519 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1520
1521 if (!status)
1522 return;
1523
1524 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1525 if (!cp)
1526 return;
1527
1528 hci_dev_lock(hdev);
1529
1530 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1531 if (conn) {
1532 if (conn->state == BT_CONFIG) {
1533 hci_proto_connect_cfm(conn, status);
1534 hci_conn_drop(conn);
1535 }
1536 }
1537
1538 hci_dev_unlock(hdev);
1539 }
1540
1541 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1542 struct hci_conn *conn)
1543 {
1544 if (conn->state != BT_CONFIG || !conn->out)
1545 return 0;
1546
1547 if (conn->pending_sec_level == BT_SECURITY_SDP)
1548 return 0;
1549
1550 /* Only request authentication for SSP connections or non-SSP
1551 * devices with sec_level MEDIUM or HIGH or if MITM protection
1552 * is requested.
1553 */
1554 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1555 conn->pending_sec_level != BT_SECURITY_FIPS &&
1556 conn->pending_sec_level != BT_SECURITY_HIGH &&
1557 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1558 return 0;
1559
1560 return 1;
1561 }
1562
1563 static int hci_resolve_name(struct hci_dev *hdev,
1564 struct inquiry_entry *e)
1565 {
1566 struct hci_cp_remote_name_req cp;
1567
1568 memset(&cp, 0, sizeof(cp));
1569
1570 bacpy(&cp.bdaddr, &e->data.bdaddr);
1571 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1572 cp.pscan_mode = e->data.pscan_mode;
1573 cp.clock_offset = e->data.clock_offset;
1574
1575 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1576 }
1577
1578 static bool hci_resolve_next_name(struct hci_dev *hdev)
1579 {
1580 struct discovery_state *discov = &hdev->discovery;
1581 struct inquiry_entry *e;
1582
1583 if (list_empty(&discov->resolve))
1584 return false;
1585
1586 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1587 if (!e)
1588 return false;
1589
1590 if (hci_resolve_name(hdev, e) == 0) {
1591 e->name_state = NAME_PENDING;
1592 return true;
1593 }
1594
1595 return false;
1596 }
1597
1598 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1599 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1600 {
1601 struct discovery_state *discov = &hdev->discovery;
1602 struct inquiry_entry *e;
1603
1604 /* Update the mgmt connected state if necessary. Be careful with
1605 * conn objects that exist but are not (yet) connected however.
1606 * Only those in BT_CONFIG or BT_CONNECTED states can be
1607 * considered connected.
1608 */
1609 if (conn &&
1610 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1611 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1612 mgmt_device_connected(hdev, conn, 0, name, name_len);
1613
1614 if (discov->state == DISCOVERY_STOPPED)
1615 return;
1616
1617 if (discov->state == DISCOVERY_STOPPING)
1618 goto discov_complete;
1619
1620 if (discov->state != DISCOVERY_RESOLVING)
1621 return;
1622
1623 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1624 /* If the device was not found in a list of found devices names of which
1625 * are pending. there is no need to continue resolving a next name as it
1626 * will be done upon receiving another Remote Name Request Complete
1627 * Event */
1628 if (!e)
1629 return;
1630
1631 list_del(&e->list);
1632 if (name) {
1633 e->name_state = NAME_KNOWN;
1634 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1635 e->data.rssi, name, name_len);
1636 } else {
1637 e->name_state = NAME_NOT_KNOWN;
1638 }
1639
1640 if (hci_resolve_next_name(hdev))
1641 return;
1642
1643 discov_complete:
1644 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1645 }
1646
1647 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1648 {
1649 struct hci_cp_remote_name_req *cp;
1650 struct hci_conn *conn;
1651
1652 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1653
1654 /* If successful wait for the name req complete event before
1655 * checking for the need to do authentication */
1656 if (!status)
1657 return;
1658
1659 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1660 if (!cp)
1661 return;
1662
1663 hci_dev_lock(hdev);
1664
1665 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1666
1667 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1668 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1669
1670 if (!conn)
1671 goto unlock;
1672
1673 if (!hci_outgoing_auth_needed(hdev, conn))
1674 goto unlock;
1675
1676 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1677 struct hci_cp_auth_requested auth_cp;
1678
1679 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1680
1681 auth_cp.handle = __cpu_to_le16(conn->handle);
1682 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1683 sizeof(auth_cp), &auth_cp);
1684 }
1685
1686 unlock:
1687 hci_dev_unlock(hdev);
1688 }
1689
1690 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1691 {
1692 struct hci_cp_read_remote_features *cp;
1693 struct hci_conn *conn;
1694
1695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1696
1697 if (!status)
1698 return;
1699
1700 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1701 if (!cp)
1702 return;
1703
1704 hci_dev_lock(hdev);
1705
1706 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1707 if (conn) {
1708 if (conn->state == BT_CONFIG) {
1709 hci_proto_connect_cfm(conn, status);
1710 hci_conn_drop(conn);
1711 }
1712 }
1713
1714 hci_dev_unlock(hdev);
1715 }
1716
1717 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1718 {
1719 struct hci_cp_read_remote_ext_features *cp;
1720 struct hci_conn *conn;
1721
1722 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1723
1724 if (!status)
1725 return;
1726
1727 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1728 if (!cp)
1729 return;
1730
1731 hci_dev_lock(hdev);
1732
1733 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1734 if (conn) {
1735 if (conn->state == BT_CONFIG) {
1736 hci_proto_connect_cfm(conn, status);
1737 hci_conn_drop(conn);
1738 }
1739 }
1740
1741 hci_dev_unlock(hdev);
1742 }
1743
1744 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1745 {
1746 struct hci_cp_setup_sync_conn *cp;
1747 struct hci_conn *acl, *sco;
1748 __u16 handle;
1749
1750 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1751
1752 if (!status)
1753 return;
1754
1755 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1756 if (!cp)
1757 return;
1758
1759 handle = __le16_to_cpu(cp->handle);
1760
1761 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1762
1763 hci_dev_lock(hdev);
1764
1765 acl = hci_conn_hash_lookup_handle(hdev, handle);
1766 if (acl) {
1767 sco = acl->link;
1768 if (sco) {
1769 sco->state = BT_CLOSED;
1770
1771 hci_proto_connect_cfm(sco, status);
1772 hci_conn_del(sco);
1773 }
1774 }
1775
1776 hci_dev_unlock(hdev);
1777 }
1778
1779 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1780 {
1781 struct hci_cp_sniff_mode *cp;
1782 struct hci_conn *conn;
1783
1784 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1785
1786 if (!status)
1787 return;
1788
1789 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1790 if (!cp)
1791 return;
1792
1793 hci_dev_lock(hdev);
1794
1795 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1796 if (conn) {
1797 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1798
1799 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1800 hci_sco_setup(conn, status);
1801 }
1802
1803 hci_dev_unlock(hdev);
1804 }
1805
1806 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1807 {
1808 struct hci_cp_exit_sniff_mode *cp;
1809 struct hci_conn *conn;
1810
1811 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1812
1813 if (!status)
1814 return;
1815
1816 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1817 if (!cp)
1818 return;
1819
1820 hci_dev_lock(hdev);
1821
1822 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1823 if (conn) {
1824 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1825
1826 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1827 hci_sco_setup(conn, status);
1828 }
1829
1830 hci_dev_unlock(hdev);
1831 }
1832
1833 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1834 {
1835 struct hci_cp_disconnect *cp;
1836 struct hci_conn *conn;
1837
1838 if (!status)
1839 return;
1840
1841 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1842 if (!cp)
1843 return;
1844
1845 hci_dev_lock(hdev);
1846
1847 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1848 if (conn)
1849 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1850 conn->dst_type, status);
1851
1852 hci_dev_unlock(hdev);
1853 }
1854
1855 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1856 {
1857 struct hci_cp_create_phy_link *cp;
1858
1859 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1860
1861 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1862 if (!cp)
1863 return;
1864
1865 hci_dev_lock(hdev);
1866
1867 if (status) {
1868 struct hci_conn *hcon;
1869
1870 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1871 if (hcon)
1872 hci_conn_del(hcon);
1873 } else {
1874 amp_write_remote_assoc(hdev, cp->phy_handle);
1875 }
1876
1877 hci_dev_unlock(hdev);
1878 }
1879
1880 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1881 {
1882 struct hci_cp_accept_phy_link *cp;
1883
1884 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1885
1886 if (status)
1887 return;
1888
1889 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1890 if (!cp)
1891 return;
1892
1893 amp_write_remote_assoc(hdev, cp->phy_handle);
1894 }
1895
1896 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1897 {
1898 struct hci_cp_le_create_conn *cp;
1899 struct hci_conn *conn;
1900
1901 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1902
1903 /* All connection failure handling is taken care of by the
1904 * hci_le_conn_failed function which is triggered by the HCI
1905 * request completion callbacks used for connecting.
1906 */
1907 if (status)
1908 return;
1909
1910 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1911 if (!cp)
1912 return;
1913
1914 hci_dev_lock(hdev);
1915
1916 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1917 if (!conn)
1918 goto unlock;
1919
1920 /* Store the initiator and responder address information which
1921 * is needed for SMP. These values will not change during the
1922 * lifetime of the connection.
1923 */
1924 conn->init_addr_type = cp->own_address_type;
1925 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1926 bacpy(&conn->init_addr, &hdev->random_addr);
1927 else
1928 bacpy(&conn->init_addr, &hdev->bdaddr);
1929
1930 conn->resp_addr_type = cp->peer_addr_type;
1931 bacpy(&conn->resp_addr, &cp->peer_addr);
1932
1933 /* We don't want the connection attempt to stick around
1934 * indefinitely since LE doesn't have a page timeout concept
1935 * like BR/EDR. Set a timer for any connection that doesn't use
1936 * the white list for connecting.
1937 */
1938 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1939 queue_delayed_work(conn->hdev->workqueue,
1940 &conn->le_conn_timeout,
1941 conn->conn_timeout);
1942
1943 unlock:
1944 hci_dev_unlock(hdev);
1945 }
1946
1947 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1948 {
1949 struct hci_cp_le_start_enc *cp;
1950 struct hci_conn *conn;
1951
1952 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1953
1954 if (!status)
1955 return;
1956
1957 hci_dev_lock(hdev);
1958
1959 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1960 if (!cp)
1961 goto unlock;
1962
1963 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1964 if (!conn)
1965 goto unlock;
1966
1967 if (conn->state != BT_CONNECTED)
1968 goto unlock;
1969
1970 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1971 hci_conn_drop(conn);
1972
1973 unlock:
1974 hci_dev_unlock(hdev);
1975 }
1976
1977 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
1978 {
1979 struct hci_cp_switch_role *cp;
1980 struct hci_conn *conn;
1981
1982 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1983
1984 if (!status)
1985 return;
1986
1987 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
1988 if (!cp)
1989 return;
1990
1991 hci_dev_lock(hdev);
1992
1993 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1994 if (conn)
1995 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
1996
1997 hci_dev_unlock(hdev);
1998 }
1999
2000 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2001 {
2002 __u8 status = *((__u8 *) skb->data);
2003 struct discovery_state *discov = &hdev->discovery;
2004 struct inquiry_entry *e;
2005
2006 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2007
2008 hci_conn_check_pending(hdev);
2009
2010 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2011 return;
2012
2013 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2014 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2015
2016 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2017 return;
2018
2019 hci_dev_lock(hdev);
2020
2021 if (discov->state != DISCOVERY_FINDING)
2022 goto unlock;
2023
2024 if (list_empty(&discov->resolve)) {
2025 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2026 goto unlock;
2027 }
2028
2029 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2030 if (e && hci_resolve_name(hdev, e) == 0) {
2031 e->name_state = NAME_PENDING;
2032 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2033 } else {
2034 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2035 }
2036
2037 unlock:
2038 hci_dev_unlock(hdev);
2039 }
2040
2041 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2042 {
2043 struct inquiry_data data;
2044 struct inquiry_info *info = (void *) (skb->data + 1);
2045 int num_rsp = *((__u8 *) skb->data);
2046
2047 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2048
2049 if (!num_rsp)
2050 return;
2051
2052 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2053 return;
2054
2055 hci_dev_lock(hdev);
2056
2057 for (; num_rsp; num_rsp--, info++) {
2058 u32 flags;
2059
2060 bacpy(&data.bdaddr, &info->bdaddr);
2061 data.pscan_rep_mode = info->pscan_rep_mode;
2062 data.pscan_period_mode = info->pscan_period_mode;
2063 data.pscan_mode = info->pscan_mode;
2064 memcpy(data.dev_class, info->dev_class, 3);
2065 data.clock_offset = info->clock_offset;
2066 data.rssi = HCI_RSSI_INVALID;
2067 data.ssp_mode = 0x00;
2068
2069 flags = hci_inquiry_cache_update(hdev, &data, false);
2070
2071 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2072 info->dev_class, HCI_RSSI_INVALID,
2073 flags, NULL, 0, NULL, 0);
2074 }
2075
2076 hci_dev_unlock(hdev);
2077 }
2078
2079 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2080 {
2081 struct hci_ev_conn_complete *ev = (void *) skb->data;
2082 struct hci_conn *conn;
2083
2084 BT_DBG("%s", hdev->name);
2085
2086 hci_dev_lock(hdev);
2087
2088 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2089 if (!conn) {
2090 if (ev->link_type != SCO_LINK)
2091 goto unlock;
2092
2093 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2094 if (!conn)
2095 goto unlock;
2096
2097 conn->type = SCO_LINK;
2098 }
2099
2100 if (!ev->status) {
2101 conn->handle = __le16_to_cpu(ev->handle);
2102
2103 if (conn->type == ACL_LINK) {
2104 conn->state = BT_CONFIG;
2105 hci_conn_hold(conn);
2106
2107 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2108 !hci_find_link_key(hdev, &ev->bdaddr))
2109 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2110 else
2111 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2112 } else
2113 conn->state = BT_CONNECTED;
2114
2115 hci_conn_add_sysfs(conn);
2116
2117 if (test_bit(HCI_AUTH, &hdev->flags))
2118 set_bit(HCI_CONN_AUTH, &conn->flags);
2119
2120 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2121 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2122
2123 /* Get remote features */
2124 if (conn->type == ACL_LINK) {
2125 struct hci_cp_read_remote_features cp;
2126 cp.handle = ev->handle;
2127 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2128 sizeof(cp), &cp);
2129
2130 hci_update_page_scan(hdev, NULL);
2131 }
2132
2133 /* Set packet type for incoming connection */
2134 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2135 struct hci_cp_change_conn_ptype cp;
2136 cp.handle = ev->handle;
2137 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2138 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2139 &cp);
2140 }
2141 } else {
2142 conn->state = BT_CLOSED;
2143 if (conn->type == ACL_LINK)
2144 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2145 conn->dst_type, ev->status);
2146 }
2147
2148 if (conn->type == ACL_LINK)
2149 hci_sco_setup(conn, ev->status);
2150
2151 if (ev->status) {
2152 hci_proto_connect_cfm(conn, ev->status);
2153 hci_conn_del(conn);
2154 } else if (ev->link_type != ACL_LINK)
2155 hci_proto_connect_cfm(conn, ev->status);
2156
2157 unlock:
2158 hci_dev_unlock(hdev);
2159
2160 hci_conn_check_pending(hdev);
2161 }
2162
2163 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2164 {
2165 struct hci_cp_reject_conn_req cp;
2166
2167 bacpy(&cp.bdaddr, bdaddr);
2168 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2169 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2170 }
2171
2172 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2173 {
2174 struct hci_ev_conn_request *ev = (void *) skb->data;
2175 int mask = hdev->link_mode;
2176 struct inquiry_entry *ie;
2177 struct hci_conn *conn;
2178 __u8 flags = 0;
2179
2180 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2181 ev->link_type);
2182
2183 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2184 &flags);
2185
2186 if (!(mask & HCI_LM_ACCEPT)) {
2187 hci_reject_conn(hdev, &ev->bdaddr);
2188 return;
2189 }
2190
2191 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2192 BDADDR_BREDR)) {
2193 hci_reject_conn(hdev, &ev->bdaddr);
2194 return;
2195 }
2196
2197 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2198 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2199 BDADDR_BREDR)) {
2200 hci_reject_conn(hdev, &ev->bdaddr);
2201 return;
2202 }
2203
2204 /* Connection accepted */
2205
2206 hci_dev_lock(hdev);
2207
2208 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2209 if (ie)
2210 memcpy(ie->data.dev_class, ev->dev_class, 3);
2211
2212 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2213 &ev->bdaddr);
2214 if (!conn) {
2215 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2216 HCI_ROLE_SLAVE);
2217 if (!conn) {
2218 BT_ERR("No memory for new connection");
2219 hci_dev_unlock(hdev);
2220 return;
2221 }
2222 }
2223
2224 memcpy(conn->dev_class, ev->dev_class, 3);
2225
2226 hci_dev_unlock(hdev);
2227
2228 if (ev->link_type == ACL_LINK ||
2229 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2230 struct hci_cp_accept_conn_req cp;
2231 conn->state = BT_CONNECT;
2232
2233 bacpy(&cp.bdaddr, &ev->bdaddr);
2234
2235 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2236 cp.role = 0x00; /* Become master */
2237 else
2238 cp.role = 0x01; /* Remain slave */
2239
2240 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2241 } else if (!(flags & HCI_PROTO_DEFER)) {
2242 struct hci_cp_accept_sync_conn_req cp;
2243 conn->state = BT_CONNECT;
2244
2245 bacpy(&cp.bdaddr, &ev->bdaddr);
2246 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2247
2248 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2249 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2250 cp.max_latency = cpu_to_le16(0xffff);
2251 cp.content_format = cpu_to_le16(hdev->voice_setting);
2252 cp.retrans_effort = 0xff;
2253
2254 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2255 &cp);
2256 } else {
2257 conn->state = BT_CONNECT2;
2258 hci_proto_connect_cfm(conn, 0);
2259 }
2260 }
2261
2262 static u8 hci_to_mgmt_reason(u8 err)
2263 {
2264 switch (err) {
2265 case HCI_ERROR_CONNECTION_TIMEOUT:
2266 return MGMT_DEV_DISCONN_TIMEOUT;
2267 case HCI_ERROR_REMOTE_USER_TERM:
2268 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2269 case HCI_ERROR_REMOTE_POWER_OFF:
2270 return MGMT_DEV_DISCONN_REMOTE;
2271 case HCI_ERROR_LOCAL_HOST_TERM:
2272 return MGMT_DEV_DISCONN_LOCAL_HOST;
2273 default:
2274 return MGMT_DEV_DISCONN_UNKNOWN;
2275 }
2276 }
2277
2278 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2279 {
2280 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2281 u8 reason = hci_to_mgmt_reason(ev->reason);
2282 struct hci_conn_params *params;
2283 struct hci_conn *conn;
2284 bool mgmt_connected;
2285 u8 type;
2286
2287 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2288
2289 hci_dev_lock(hdev);
2290
2291 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2292 if (!conn)
2293 goto unlock;
2294
2295 if (ev->status) {
2296 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2297 conn->dst_type, ev->status);
2298 goto unlock;
2299 }
2300
2301 conn->state = BT_CLOSED;
2302
2303 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2304 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2305 reason, mgmt_connected);
2306
2307 if (conn->type == ACL_LINK) {
2308 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2309 hci_remove_link_key(hdev, &conn->dst);
2310
2311 hci_update_page_scan(hdev, NULL);
2312 }
2313
2314 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2315 if (params) {
2316 switch (params->auto_connect) {
2317 case HCI_AUTO_CONN_LINK_LOSS:
2318 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2319 break;
2320 /* Fall through */
2321
2322 case HCI_AUTO_CONN_DIRECT:
2323 case HCI_AUTO_CONN_ALWAYS:
2324 list_del_init(&params->action);
2325 list_add(&params->action, &hdev->pend_le_conns);
2326 hci_update_background_scan(hdev);
2327 break;
2328
2329 default:
2330 break;
2331 }
2332 }
2333
2334 type = conn->type;
2335
2336 hci_proto_disconn_cfm(conn, ev->reason);
2337 hci_conn_del(conn);
2338
2339 /* Re-enable advertising if necessary, since it might
2340 * have been disabled by the connection. From the
2341 * HCI_LE_Set_Advertise_Enable command description in
2342 * the core specification (v4.0):
2343 * "The Controller shall continue advertising until the Host
2344 * issues an LE_Set_Advertise_Enable command with
2345 * Advertising_Enable set to 0x00 (Advertising is disabled)
2346 * or until a connection is created or until the Advertising
2347 * is timed out due to Directed Advertising."
2348 */
2349 if (type == LE_LINK)
2350 mgmt_reenable_advertising(hdev);
2351
2352 unlock:
2353 hci_dev_unlock(hdev);
2354 }
2355
2356 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2357 {
2358 struct hci_ev_auth_complete *ev = (void *) skb->data;
2359 struct hci_conn *conn;
2360
2361 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2362
2363 hci_dev_lock(hdev);
2364
2365 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2366 if (!conn)
2367 goto unlock;
2368
2369 if (!ev->status) {
2370 if (!hci_conn_ssp_enabled(conn) &&
2371 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2372 BT_INFO("re-auth of legacy device is not possible.");
2373 } else {
2374 set_bit(HCI_CONN_AUTH, &conn->flags);
2375 conn->sec_level = conn->pending_sec_level;
2376 }
2377 } else {
2378 mgmt_auth_failed(conn, ev->status);
2379 }
2380
2381 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2382 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2383
2384 if (conn->state == BT_CONFIG) {
2385 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2386 struct hci_cp_set_conn_encrypt cp;
2387 cp.handle = ev->handle;
2388 cp.encrypt = 0x01;
2389 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2390 &cp);
2391 } else {
2392 conn->state = BT_CONNECTED;
2393 hci_proto_connect_cfm(conn, ev->status);
2394 hci_conn_drop(conn);
2395 }
2396 } else {
2397 hci_auth_cfm(conn, ev->status);
2398
2399 hci_conn_hold(conn);
2400 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2401 hci_conn_drop(conn);
2402 }
2403
2404 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2405 if (!ev->status) {
2406 struct hci_cp_set_conn_encrypt cp;
2407 cp.handle = ev->handle;
2408 cp.encrypt = 0x01;
2409 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2410 &cp);
2411 } else {
2412 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2413 hci_encrypt_cfm(conn, ev->status, 0x00);
2414 }
2415 }
2416
2417 unlock:
2418 hci_dev_unlock(hdev);
2419 }
2420
2421 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2422 {
2423 struct hci_ev_remote_name *ev = (void *) skb->data;
2424 struct hci_conn *conn;
2425
2426 BT_DBG("%s", hdev->name);
2427
2428 hci_conn_check_pending(hdev);
2429
2430 hci_dev_lock(hdev);
2431
2432 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2433
2434 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2435 goto check_auth;
2436
2437 if (ev->status == 0)
2438 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2439 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2440 else
2441 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2442
2443 check_auth:
2444 if (!conn)
2445 goto unlock;
2446
2447 if (!hci_outgoing_auth_needed(hdev, conn))
2448 goto unlock;
2449
2450 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2451 struct hci_cp_auth_requested cp;
2452
2453 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2454
2455 cp.handle = __cpu_to_le16(conn->handle);
2456 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2457 }
2458
2459 unlock:
2460 hci_dev_unlock(hdev);
2461 }
2462
2463 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2464 {
2465 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2466 struct hci_conn *conn;
2467
2468 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2469
2470 hci_dev_lock(hdev);
2471
2472 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2473 if (!conn)
2474 goto unlock;
2475
2476 if (!ev->status) {
2477 if (ev->encrypt) {
2478 /* Encryption implies authentication */
2479 set_bit(HCI_CONN_AUTH, &conn->flags);
2480 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2481 conn->sec_level = conn->pending_sec_level;
2482
2483 /* P-256 authentication key implies FIPS */
2484 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2485 set_bit(HCI_CONN_FIPS, &conn->flags);
2486
2487 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2488 conn->type == LE_LINK)
2489 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2490 } else {
2491 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2492 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2493 }
2494 }
2495
2496 /* We should disregard the current RPA and generate a new one
2497 * whenever the encryption procedure fails.
2498 */
2499 if (ev->status && conn->type == LE_LINK)
2500 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2501
2502 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2503
2504 if (ev->status && conn->state == BT_CONNECTED) {
2505 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2506 hci_conn_drop(conn);
2507 goto unlock;
2508 }
2509
2510 if (conn->state == BT_CONFIG) {
2511 if (!ev->status)
2512 conn->state = BT_CONNECTED;
2513
2514 /* In Secure Connections Only mode, do not allow any
2515 * connections that are not encrypted with AES-CCM
2516 * using a P-256 authenticated combination key.
2517 */
2518 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2519 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2520 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2521 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2522 hci_conn_drop(conn);
2523 goto unlock;
2524 }
2525
2526 hci_proto_connect_cfm(conn, ev->status);
2527 hci_conn_drop(conn);
2528 } else
2529 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2530
2531 unlock:
2532 hci_dev_unlock(hdev);
2533 }
2534
2535 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2536 struct sk_buff *skb)
2537 {
2538 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2539 struct hci_conn *conn;
2540
2541 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2542
2543 hci_dev_lock(hdev);
2544
2545 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2546 if (conn) {
2547 if (!ev->status)
2548 set_bit(HCI_CONN_SECURE, &conn->flags);
2549
2550 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2551
2552 hci_key_change_cfm(conn, ev->status);
2553 }
2554
2555 hci_dev_unlock(hdev);
2556 }
2557
2558 static void hci_remote_features_evt(struct hci_dev *hdev,
2559 struct sk_buff *skb)
2560 {
2561 struct hci_ev_remote_features *ev = (void *) skb->data;
2562 struct hci_conn *conn;
2563
2564 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2565
2566 hci_dev_lock(hdev);
2567
2568 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2569 if (!conn)
2570 goto unlock;
2571
2572 if (!ev->status)
2573 memcpy(conn->features[0], ev->features, 8);
2574
2575 if (conn->state != BT_CONFIG)
2576 goto unlock;
2577
2578 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2579 struct hci_cp_read_remote_ext_features cp;
2580 cp.handle = ev->handle;
2581 cp.page = 0x01;
2582 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2583 sizeof(cp), &cp);
2584 goto unlock;
2585 }
2586
2587 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2588 struct hci_cp_remote_name_req cp;
2589 memset(&cp, 0, sizeof(cp));
2590 bacpy(&cp.bdaddr, &conn->dst);
2591 cp.pscan_rep_mode = 0x02;
2592 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2593 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2594 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2595
2596 if (!hci_outgoing_auth_needed(hdev, conn)) {
2597 conn->state = BT_CONNECTED;
2598 hci_proto_connect_cfm(conn, ev->status);
2599 hci_conn_drop(conn);
2600 }
2601
2602 unlock:
2603 hci_dev_unlock(hdev);
2604 }
2605
2606 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2607 {
2608 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2609 u8 status = skb->data[sizeof(*ev)];
2610 __u16 opcode;
2611
2612 skb_pull(skb, sizeof(*ev));
2613
2614 opcode = __le16_to_cpu(ev->opcode);
2615
2616 switch (opcode) {
2617 case HCI_OP_INQUIRY_CANCEL:
2618 hci_cc_inquiry_cancel(hdev, skb);
2619 break;
2620
2621 case HCI_OP_PERIODIC_INQ:
2622 hci_cc_periodic_inq(hdev, skb);
2623 break;
2624
2625 case HCI_OP_EXIT_PERIODIC_INQ:
2626 hci_cc_exit_periodic_inq(hdev, skb);
2627 break;
2628
2629 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2630 hci_cc_remote_name_req_cancel(hdev, skb);
2631 break;
2632
2633 case HCI_OP_ROLE_DISCOVERY:
2634 hci_cc_role_discovery(hdev, skb);
2635 break;
2636
2637 case HCI_OP_READ_LINK_POLICY:
2638 hci_cc_read_link_policy(hdev, skb);
2639 break;
2640
2641 case HCI_OP_WRITE_LINK_POLICY:
2642 hci_cc_write_link_policy(hdev, skb);
2643 break;
2644
2645 case HCI_OP_READ_DEF_LINK_POLICY:
2646 hci_cc_read_def_link_policy(hdev, skb);
2647 break;
2648
2649 case HCI_OP_WRITE_DEF_LINK_POLICY:
2650 hci_cc_write_def_link_policy(hdev, skb);
2651 break;
2652
2653 case HCI_OP_RESET:
2654 hci_cc_reset(hdev, skb);
2655 break;
2656
2657 case HCI_OP_WRITE_LOCAL_NAME:
2658 hci_cc_write_local_name(hdev, skb);
2659 break;
2660
2661 case HCI_OP_READ_LOCAL_NAME:
2662 hci_cc_read_local_name(hdev, skb);
2663 break;
2664
2665 case HCI_OP_WRITE_AUTH_ENABLE:
2666 hci_cc_write_auth_enable(hdev, skb);
2667 break;
2668
2669 case HCI_OP_WRITE_ENCRYPT_MODE:
2670 hci_cc_write_encrypt_mode(hdev, skb);
2671 break;
2672
2673 case HCI_OP_WRITE_SCAN_ENABLE:
2674 hci_cc_write_scan_enable(hdev, skb);
2675 break;
2676
2677 case HCI_OP_READ_CLASS_OF_DEV:
2678 hci_cc_read_class_of_dev(hdev, skb);
2679 break;
2680
2681 case HCI_OP_WRITE_CLASS_OF_DEV:
2682 hci_cc_write_class_of_dev(hdev, skb);
2683 break;
2684
2685 case HCI_OP_READ_VOICE_SETTING:
2686 hci_cc_read_voice_setting(hdev, skb);
2687 break;
2688
2689 case HCI_OP_WRITE_VOICE_SETTING:
2690 hci_cc_write_voice_setting(hdev, skb);
2691 break;
2692
2693 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2694 hci_cc_read_num_supported_iac(hdev, skb);
2695 break;
2696
2697 case HCI_OP_WRITE_SSP_MODE:
2698 hci_cc_write_ssp_mode(hdev, skb);
2699 break;
2700
2701 case HCI_OP_WRITE_SC_SUPPORT:
2702 hci_cc_write_sc_support(hdev, skb);
2703 break;
2704
2705 case HCI_OP_READ_LOCAL_VERSION:
2706 hci_cc_read_local_version(hdev, skb);
2707 break;
2708
2709 case HCI_OP_READ_LOCAL_COMMANDS:
2710 hci_cc_read_local_commands(hdev, skb);
2711 break;
2712
2713 case HCI_OP_READ_LOCAL_FEATURES:
2714 hci_cc_read_local_features(hdev, skb);
2715 break;
2716
2717 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2718 hci_cc_read_local_ext_features(hdev, skb);
2719 break;
2720
2721 case HCI_OP_READ_BUFFER_SIZE:
2722 hci_cc_read_buffer_size(hdev, skb);
2723 break;
2724
2725 case HCI_OP_READ_BD_ADDR:
2726 hci_cc_read_bd_addr(hdev, skb);
2727 break;
2728
2729 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2730 hci_cc_read_page_scan_activity(hdev, skb);
2731 break;
2732
2733 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2734 hci_cc_write_page_scan_activity(hdev, skb);
2735 break;
2736
2737 case HCI_OP_READ_PAGE_SCAN_TYPE:
2738 hci_cc_read_page_scan_type(hdev, skb);
2739 break;
2740
2741 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2742 hci_cc_write_page_scan_type(hdev, skb);
2743 break;
2744
2745 case HCI_OP_READ_DATA_BLOCK_SIZE:
2746 hci_cc_read_data_block_size(hdev, skb);
2747 break;
2748
2749 case HCI_OP_READ_FLOW_CONTROL_MODE:
2750 hci_cc_read_flow_control_mode(hdev, skb);
2751 break;
2752
2753 case HCI_OP_READ_LOCAL_AMP_INFO:
2754 hci_cc_read_local_amp_info(hdev, skb);
2755 break;
2756
2757 case HCI_OP_READ_CLOCK:
2758 hci_cc_read_clock(hdev, skb);
2759 break;
2760
2761 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2762 hci_cc_read_local_amp_assoc(hdev, skb);
2763 break;
2764
2765 case HCI_OP_READ_INQ_RSP_TX_POWER:
2766 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2767 break;
2768
2769 case HCI_OP_PIN_CODE_REPLY:
2770 hci_cc_pin_code_reply(hdev, skb);
2771 break;
2772
2773 case HCI_OP_PIN_CODE_NEG_REPLY:
2774 hci_cc_pin_code_neg_reply(hdev, skb);
2775 break;
2776
2777 case HCI_OP_READ_LOCAL_OOB_DATA:
2778 hci_cc_read_local_oob_data(hdev, skb);
2779 break;
2780
2781 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2782 hci_cc_read_local_oob_ext_data(hdev, skb);
2783 break;
2784
2785 case HCI_OP_LE_READ_BUFFER_SIZE:
2786 hci_cc_le_read_buffer_size(hdev, skb);
2787 break;
2788
2789 case HCI_OP_LE_READ_LOCAL_FEATURES:
2790 hci_cc_le_read_local_features(hdev, skb);
2791 break;
2792
2793 case HCI_OP_LE_READ_ADV_TX_POWER:
2794 hci_cc_le_read_adv_tx_power(hdev, skb);
2795 break;
2796
2797 case HCI_OP_USER_CONFIRM_REPLY:
2798 hci_cc_user_confirm_reply(hdev, skb);
2799 break;
2800
2801 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2802 hci_cc_user_confirm_neg_reply(hdev, skb);
2803 break;
2804
2805 case HCI_OP_USER_PASSKEY_REPLY:
2806 hci_cc_user_passkey_reply(hdev, skb);
2807 break;
2808
2809 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2810 hci_cc_user_passkey_neg_reply(hdev, skb);
2811 break;
2812
2813 case HCI_OP_LE_SET_RANDOM_ADDR:
2814 hci_cc_le_set_random_addr(hdev, skb);
2815 break;
2816
2817 case HCI_OP_LE_SET_ADV_ENABLE:
2818 hci_cc_le_set_adv_enable(hdev, skb);
2819 break;
2820
2821 case HCI_OP_LE_SET_SCAN_PARAM:
2822 hci_cc_le_set_scan_param(hdev, skb);
2823 break;
2824
2825 case HCI_OP_LE_SET_SCAN_ENABLE:
2826 hci_cc_le_set_scan_enable(hdev, skb);
2827 break;
2828
2829 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2830 hci_cc_le_read_white_list_size(hdev, skb);
2831 break;
2832
2833 case HCI_OP_LE_CLEAR_WHITE_LIST:
2834 hci_cc_le_clear_white_list(hdev, skb);
2835 break;
2836
2837 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2838 hci_cc_le_add_to_white_list(hdev, skb);
2839 break;
2840
2841 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2842 hci_cc_le_del_from_white_list(hdev, skb);
2843 break;
2844
2845 case HCI_OP_LE_READ_SUPPORTED_STATES:
2846 hci_cc_le_read_supported_states(hdev, skb);
2847 break;
2848
2849 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2850 hci_cc_write_le_host_supported(hdev, skb);
2851 break;
2852
2853 case HCI_OP_LE_SET_ADV_PARAM:
2854 hci_cc_set_adv_param(hdev, skb);
2855 break;
2856
2857 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2858 hci_cc_write_remote_amp_assoc(hdev, skb);
2859 break;
2860
2861 case HCI_OP_READ_RSSI:
2862 hci_cc_read_rssi(hdev, skb);
2863 break;
2864
2865 case HCI_OP_READ_TX_POWER:
2866 hci_cc_read_tx_power(hdev, skb);
2867 break;
2868
2869 default:
2870 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2871 break;
2872 }
2873
2874 if (opcode != HCI_OP_NOP)
2875 cancel_delayed_work(&hdev->cmd_timer);
2876
2877 hci_req_cmd_complete(hdev, opcode, status);
2878
2879 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2880 atomic_set(&hdev->cmd_cnt, 1);
2881 if (!skb_queue_empty(&hdev->cmd_q))
2882 queue_work(hdev->workqueue, &hdev->cmd_work);
2883 }
2884 }
2885
2886 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2887 {
2888 struct hci_ev_cmd_status *ev = (void *) skb->data;
2889 __u16 opcode;
2890
2891 skb_pull(skb, sizeof(*ev));
2892
2893 opcode = __le16_to_cpu(ev->opcode);
2894
2895 switch (opcode) {
2896 case HCI_OP_INQUIRY:
2897 hci_cs_inquiry(hdev, ev->status);
2898 break;
2899
2900 case HCI_OP_CREATE_CONN:
2901 hci_cs_create_conn(hdev, ev->status);
2902 break;
2903
2904 case HCI_OP_DISCONNECT:
2905 hci_cs_disconnect(hdev, ev->status);
2906 break;
2907
2908 case HCI_OP_ADD_SCO:
2909 hci_cs_add_sco(hdev, ev->status);
2910 break;
2911
2912 case HCI_OP_AUTH_REQUESTED:
2913 hci_cs_auth_requested(hdev, ev->status);
2914 break;
2915
2916 case HCI_OP_SET_CONN_ENCRYPT:
2917 hci_cs_set_conn_encrypt(hdev, ev->status);
2918 break;
2919
2920 case HCI_OP_REMOTE_NAME_REQ:
2921 hci_cs_remote_name_req(hdev, ev->status);
2922 break;
2923
2924 case HCI_OP_READ_REMOTE_FEATURES:
2925 hci_cs_read_remote_features(hdev, ev->status);
2926 break;
2927
2928 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2929 hci_cs_read_remote_ext_features(hdev, ev->status);
2930 break;
2931
2932 case HCI_OP_SETUP_SYNC_CONN:
2933 hci_cs_setup_sync_conn(hdev, ev->status);
2934 break;
2935
2936 case HCI_OP_CREATE_PHY_LINK:
2937 hci_cs_create_phylink(hdev, ev->status);
2938 break;
2939
2940 case HCI_OP_ACCEPT_PHY_LINK:
2941 hci_cs_accept_phylink(hdev, ev->status);
2942 break;
2943
2944 case HCI_OP_SNIFF_MODE:
2945 hci_cs_sniff_mode(hdev, ev->status);
2946 break;
2947
2948 case HCI_OP_EXIT_SNIFF_MODE:
2949 hci_cs_exit_sniff_mode(hdev, ev->status);
2950 break;
2951
2952 case HCI_OP_SWITCH_ROLE:
2953 hci_cs_switch_role(hdev, ev->status);
2954 break;
2955
2956 case HCI_OP_LE_CREATE_CONN:
2957 hci_cs_le_create_conn(hdev, ev->status);
2958 break;
2959
2960 case HCI_OP_LE_START_ENC:
2961 hci_cs_le_start_enc(hdev, ev->status);
2962 break;
2963
2964 default:
2965 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2966 break;
2967 }
2968
2969 if (opcode != HCI_OP_NOP)
2970 cancel_delayed_work(&hdev->cmd_timer);
2971
2972 if (ev->status ||
2973 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2974 hci_req_cmd_complete(hdev, opcode, ev->status);
2975
2976 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2977 atomic_set(&hdev->cmd_cnt, 1);
2978 if (!skb_queue_empty(&hdev->cmd_q))
2979 queue_work(hdev->workqueue, &hdev->cmd_work);
2980 }
2981 }
2982
2983 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
2984 {
2985 struct hci_ev_hardware_error *ev = (void *) skb->data;
2986
2987 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
2988 }
2989
2990 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2991 {
2992 struct hci_ev_role_change *ev = (void *) skb->data;
2993 struct hci_conn *conn;
2994
2995 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2996
2997 hci_dev_lock(hdev);
2998
2999 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3000 if (conn) {
3001 if (!ev->status)
3002 conn->role = ev->role;
3003
3004 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3005
3006 hci_role_switch_cfm(conn, ev->status, ev->role);
3007 }
3008
3009 hci_dev_unlock(hdev);
3010 }
3011
3012 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3013 {
3014 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3015 int i;
3016
3017 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3018 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3019 return;
3020 }
3021
3022 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3023 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3024 BT_DBG("%s bad parameters", hdev->name);
3025 return;
3026 }
3027
3028 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3029
3030 for (i = 0; i < ev->num_hndl; i++) {
3031 struct hci_comp_pkts_info *info = &ev->handles[i];
3032 struct hci_conn *conn;
3033 __u16 handle, count;
3034
3035 handle = __le16_to_cpu(info->handle);
3036 count = __le16_to_cpu(info->count);
3037
3038 conn = hci_conn_hash_lookup_handle(hdev, handle);
3039 if (!conn)
3040 continue;
3041
3042 conn->sent -= count;
3043
3044 switch (conn->type) {
3045 case ACL_LINK:
3046 hdev->acl_cnt += count;
3047 if (hdev->acl_cnt > hdev->acl_pkts)
3048 hdev->acl_cnt = hdev->acl_pkts;
3049 break;
3050
3051 case LE_LINK:
3052 if (hdev->le_pkts) {
3053 hdev->le_cnt += count;
3054 if (hdev->le_cnt > hdev->le_pkts)
3055 hdev->le_cnt = hdev->le_pkts;
3056 } else {
3057 hdev->acl_cnt += count;
3058 if (hdev->acl_cnt > hdev->acl_pkts)
3059 hdev->acl_cnt = hdev->acl_pkts;
3060 }
3061 break;
3062
3063 case SCO_LINK:
3064 hdev->sco_cnt += count;
3065 if (hdev->sco_cnt > hdev->sco_pkts)
3066 hdev->sco_cnt = hdev->sco_pkts;
3067 break;
3068
3069 default:
3070 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3071 break;
3072 }
3073 }
3074
3075 queue_work(hdev->workqueue, &hdev->tx_work);
3076 }
3077
3078 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3079 __u16 handle)
3080 {
3081 struct hci_chan *chan;
3082
3083 switch (hdev->dev_type) {
3084 case HCI_BREDR:
3085 return hci_conn_hash_lookup_handle(hdev, handle);
3086 case HCI_AMP:
3087 chan = hci_chan_lookup_handle(hdev, handle);
3088 if (chan)
3089 return chan->conn;
3090 break;
3091 default:
3092 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3093 break;
3094 }
3095
3096 return NULL;
3097 }
3098
3099 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3100 {
3101 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3102 int i;
3103
3104 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3105 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3106 return;
3107 }
3108
3109 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3110 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3111 BT_DBG("%s bad parameters", hdev->name);
3112 return;
3113 }
3114
3115 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3116 ev->num_hndl);
3117
3118 for (i = 0; i < ev->num_hndl; i++) {
3119 struct hci_comp_blocks_info *info = &ev->handles[i];
3120 struct hci_conn *conn = NULL;
3121 __u16 handle, block_count;
3122
3123 handle = __le16_to_cpu(info->handle);
3124 block_count = __le16_to_cpu(info->blocks);
3125
3126 conn = __hci_conn_lookup_handle(hdev, handle);
3127 if (!conn)
3128 continue;
3129
3130 conn->sent -= block_count;
3131
3132 switch (conn->type) {
3133 case ACL_LINK:
3134 case AMP_LINK:
3135 hdev->block_cnt += block_count;
3136 if (hdev->block_cnt > hdev->num_blocks)
3137 hdev->block_cnt = hdev->num_blocks;
3138 break;
3139
3140 default:
3141 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3142 break;
3143 }
3144 }
3145
3146 queue_work(hdev->workqueue, &hdev->tx_work);
3147 }
3148
3149 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3150 {
3151 struct hci_ev_mode_change *ev = (void *) skb->data;
3152 struct hci_conn *conn;
3153
3154 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3155
3156 hci_dev_lock(hdev);
3157
3158 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3159 if (conn) {
3160 conn->mode = ev->mode;
3161
3162 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3163 &conn->flags)) {
3164 if (conn->mode == HCI_CM_ACTIVE)
3165 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3166 else
3167 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3168 }
3169
3170 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3171 hci_sco_setup(conn, ev->status);
3172 }
3173
3174 hci_dev_unlock(hdev);
3175 }
3176
3177 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3178 {
3179 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3180 struct hci_conn *conn;
3181
3182 BT_DBG("%s", hdev->name);
3183
3184 hci_dev_lock(hdev);
3185
3186 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3187 if (!conn)
3188 goto unlock;
3189
3190 if (conn->state == BT_CONNECTED) {
3191 hci_conn_hold(conn);
3192 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3193 hci_conn_drop(conn);
3194 }
3195
3196 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3197 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3198 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3199 sizeof(ev->bdaddr), &ev->bdaddr);
3200 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3201 u8 secure;
3202
3203 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3204 secure = 1;
3205 else
3206 secure = 0;
3207
3208 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3209 }
3210
3211 unlock:
3212 hci_dev_unlock(hdev);
3213 }
3214
3215 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3216 {
3217 if (key_type == HCI_LK_CHANGED_COMBINATION)
3218 return;
3219
3220 conn->pin_length = pin_len;
3221 conn->key_type = key_type;
3222
3223 switch (key_type) {
3224 case HCI_LK_LOCAL_UNIT:
3225 case HCI_LK_REMOTE_UNIT:
3226 case HCI_LK_DEBUG_COMBINATION:
3227 return;
3228 case HCI_LK_COMBINATION:
3229 if (pin_len == 16)
3230 conn->pending_sec_level = BT_SECURITY_HIGH;
3231 else
3232 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3233 break;
3234 case HCI_LK_UNAUTH_COMBINATION_P192:
3235 case HCI_LK_UNAUTH_COMBINATION_P256:
3236 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3237 break;
3238 case HCI_LK_AUTH_COMBINATION_P192:
3239 conn->pending_sec_level = BT_SECURITY_HIGH;
3240 break;
3241 case HCI_LK_AUTH_COMBINATION_P256:
3242 conn->pending_sec_level = BT_SECURITY_FIPS;
3243 break;
3244 }
3245 }
3246
3247 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3248 {
3249 struct hci_ev_link_key_req *ev = (void *) skb->data;
3250 struct hci_cp_link_key_reply cp;
3251 struct hci_conn *conn;
3252 struct link_key *key;
3253
3254 BT_DBG("%s", hdev->name);
3255
3256 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3257 return;
3258
3259 hci_dev_lock(hdev);
3260
3261 key = hci_find_link_key(hdev, &ev->bdaddr);
3262 if (!key) {
3263 BT_DBG("%s link key not found for %pMR", hdev->name,
3264 &ev->bdaddr);
3265 goto not_found;
3266 }
3267
3268 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3269 &ev->bdaddr);
3270
3271 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3272 if (conn) {
3273 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3274
3275 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3276 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3277 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3278 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3279 goto not_found;
3280 }
3281
3282 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3283 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3284 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3285 BT_DBG("%s ignoring key unauthenticated for high security",
3286 hdev->name);
3287 goto not_found;
3288 }
3289
3290 conn_set_key(conn, key->type, key->pin_len);
3291 }
3292
3293 bacpy(&cp.bdaddr, &ev->bdaddr);
3294 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3295
3296 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3297
3298 hci_dev_unlock(hdev);
3299
3300 return;
3301
3302 not_found:
3303 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3304 hci_dev_unlock(hdev);
3305 }
3306
3307 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3308 {
3309 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3310 struct hci_conn *conn;
3311 struct link_key *key;
3312 bool persistent;
3313 u8 pin_len = 0;
3314
3315 BT_DBG("%s", hdev->name);
3316
3317 hci_dev_lock(hdev);
3318
3319 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3320 if (!conn)
3321 goto unlock;
3322
3323 hci_conn_hold(conn);
3324 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3325 hci_conn_drop(conn);
3326
3327 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3328 conn_set_key(conn, ev->key_type, conn->pin_length);
3329
3330 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3331 goto unlock;
3332
3333 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3334 ev->key_type, pin_len, &persistent);
3335 if (!key)
3336 goto unlock;
3337
3338 /* Update connection information since adding the key will have
3339 * fixed up the type in the case of changed combination keys.
3340 */
3341 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3342 conn_set_key(conn, key->type, key->pin_len);
3343
3344 mgmt_new_link_key(hdev, key, persistent);
3345
3346 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3347 * is set. If it's not set simply remove the key from the kernel
3348 * list (we've still notified user space about it but with
3349 * store_hint being 0).
3350 */
3351 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3352 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3353 list_del_rcu(&key->list);
3354 kfree_rcu(key, rcu);
3355 goto unlock;
3356 }
3357
3358 if (persistent)
3359 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3360 else
3361 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3362
3363 unlock:
3364 hci_dev_unlock(hdev);
3365 }
3366
3367 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3368 {
3369 struct hci_ev_clock_offset *ev = (void *) skb->data;
3370 struct hci_conn *conn;
3371
3372 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3373
3374 hci_dev_lock(hdev);
3375
3376 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3377 if (conn && !ev->status) {
3378 struct inquiry_entry *ie;
3379
3380 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3381 if (ie) {
3382 ie->data.clock_offset = ev->clock_offset;
3383 ie->timestamp = jiffies;
3384 }
3385 }
3386
3387 hci_dev_unlock(hdev);
3388 }
3389
3390 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3391 {
3392 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3393 struct hci_conn *conn;
3394
3395 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3396
3397 hci_dev_lock(hdev);
3398
3399 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3400 if (conn && !ev->status)
3401 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3402
3403 hci_dev_unlock(hdev);
3404 }
3405
3406 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3407 {
3408 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3409 struct inquiry_entry *ie;
3410
3411 BT_DBG("%s", hdev->name);
3412
3413 hci_dev_lock(hdev);
3414
3415 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3416 if (ie) {
3417 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3418 ie->timestamp = jiffies;
3419 }
3420
3421 hci_dev_unlock(hdev);
3422 }
3423
3424 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3425 struct sk_buff *skb)
3426 {
3427 struct inquiry_data data;
3428 int num_rsp = *((__u8 *) skb->data);
3429
3430 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3431
3432 if (!num_rsp)
3433 return;
3434
3435 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3436 return;
3437
3438 hci_dev_lock(hdev);
3439
3440 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3441 struct inquiry_info_with_rssi_and_pscan_mode *info;
3442 info = (void *) (skb->data + 1);
3443
3444 for (; num_rsp; num_rsp--, info++) {
3445 u32 flags;
3446
3447 bacpy(&data.bdaddr, &info->bdaddr);
3448 data.pscan_rep_mode = info->pscan_rep_mode;
3449 data.pscan_period_mode = info->pscan_period_mode;
3450 data.pscan_mode = info->pscan_mode;
3451 memcpy(data.dev_class, info->dev_class, 3);
3452 data.clock_offset = info->clock_offset;
3453 data.rssi = info->rssi;
3454 data.ssp_mode = 0x00;
3455
3456 flags = hci_inquiry_cache_update(hdev, &data, false);
3457
3458 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3459 info->dev_class, info->rssi,
3460 flags, NULL, 0, NULL, 0);
3461 }
3462 } else {
3463 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3464
3465 for (; num_rsp; num_rsp--, info++) {
3466 u32 flags;
3467
3468 bacpy(&data.bdaddr, &info->bdaddr);
3469 data.pscan_rep_mode = info->pscan_rep_mode;
3470 data.pscan_period_mode = info->pscan_period_mode;
3471 data.pscan_mode = 0x00;
3472 memcpy(data.dev_class, info->dev_class, 3);
3473 data.clock_offset = info->clock_offset;
3474 data.rssi = info->rssi;
3475 data.ssp_mode = 0x00;
3476
3477 flags = hci_inquiry_cache_update(hdev, &data, false);
3478
3479 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3480 info->dev_class, info->rssi,
3481 flags, NULL, 0, NULL, 0);
3482 }
3483 }
3484
3485 hci_dev_unlock(hdev);
3486 }
3487
3488 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3489 struct sk_buff *skb)
3490 {
3491 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3492 struct hci_conn *conn;
3493
3494 BT_DBG("%s", hdev->name);
3495
3496 hci_dev_lock(hdev);
3497
3498 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3499 if (!conn)
3500 goto unlock;
3501
3502 if (ev->page < HCI_MAX_PAGES)
3503 memcpy(conn->features[ev->page], ev->features, 8);
3504
3505 if (!ev->status && ev->page == 0x01) {
3506 struct inquiry_entry *ie;
3507
3508 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3509 if (ie)
3510 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3511
3512 if (ev->features[0] & LMP_HOST_SSP) {
3513 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3514 } else {
3515 /* It is mandatory by the Bluetooth specification that
3516 * Extended Inquiry Results are only used when Secure
3517 * Simple Pairing is enabled, but some devices violate
3518 * this.
3519 *
3520 * To make these devices work, the internal SSP
3521 * enabled flag needs to be cleared if the remote host
3522 * features do not indicate SSP support */
3523 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3524 }
3525
3526 if (ev->features[0] & LMP_HOST_SC)
3527 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3528 }
3529
3530 if (conn->state != BT_CONFIG)
3531 goto unlock;
3532
3533 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3534 struct hci_cp_remote_name_req cp;
3535 memset(&cp, 0, sizeof(cp));
3536 bacpy(&cp.bdaddr, &conn->dst);
3537 cp.pscan_rep_mode = 0x02;
3538 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3539 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3540 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3541
3542 if (!hci_outgoing_auth_needed(hdev, conn)) {
3543 conn->state = BT_CONNECTED;
3544 hci_proto_connect_cfm(conn, ev->status);
3545 hci_conn_drop(conn);
3546 }
3547
3548 unlock:
3549 hci_dev_unlock(hdev);
3550 }
3551
3552 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3553 struct sk_buff *skb)
3554 {
3555 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3556 struct hci_conn *conn;
3557
3558 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3559
3560 hci_dev_lock(hdev);
3561
3562 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3563 if (!conn) {
3564 if (ev->link_type == ESCO_LINK)
3565 goto unlock;
3566
3567 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3568 if (!conn)
3569 goto unlock;
3570
3571 conn->type = SCO_LINK;
3572 }
3573
3574 switch (ev->status) {
3575 case 0x00:
3576 conn->handle = __le16_to_cpu(ev->handle);
3577 conn->state = BT_CONNECTED;
3578
3579 hci_conn_add_sysfs(conn);
3580 break;
3581
3582 case 0x10: /* Connection Accept Timeout */
3583 case 0x0d: /* Connection Rejected due to Limited Resources */
3584 case 0x11: /* Unsupported Feature or Parameter Value */
3585 case 0x1c: /* SCO interval rejected */
3586 case 0x1a: /* Unsupported Remote Feature */
3587 case 0x1f: /* Unspecified error */
3588 case 0x20: /* Unsupported LMP Parameter value */
3589 if (conn->out) {
3590 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3591 (hdev->esco_type & EDR_ESCO_MASK);
3592 if (hci_setup_sync(conn, conn->link->handle))
3593 goto unlock;
3594 }
3595 /* fall through */
3596
3597 default:
3598 conn->state = BT_CLOSED;
3599 break;
3600 }
3601
3602 hci_proto_connect_cfm(conn, ev->status);
3603 if (ev->status)
3604 hci_conn_del(conn);
3605
3606 unlock:
3607 hci_dev_unlock(hdev);
3608 }
3609
3610 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3611 {
3612 size_t parsed = 0;
3613
3614 while (parsed < eir_len) {
3615 u8 field_len = eir[0];
3616
3617 if (field_len == 0)
3618 return parsed;
3619
3620 parsed += field_len + 1;
3621 eir += field_len + 1;
3622 }
3623
3624 return eir_len;
3625 }
3626
3627 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3628 struct sk_buff *skb)
3629 {
3630 struct inquiry_data data;
3631 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3632 int num_rsp = *((__u8 *) skb->data);
3633 size_t eir_len;
3634
3635 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3636
3637 if (!num_rsp)
3638 return;
3639
3640 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3641 return;
3642
3643 hci_dev_lock(hdev);
3644
3645 for (; num_rsp; num_rsp--, info++) {
3646 u32 flags;
3647 bool name_known;
3648
3649 bacpy(&data.bdaddr, &info->bdaddr);
3650 data.pscan_rep_mode = info->pscan_rep_mode;
3651 data.pscan_period_mode = info->pscan_period_mode;
3652 data.pscan_mode = 0x00;
3653 memcpy(data.dev_class, info->dev_class, 3);
3654 data.clock_offset = info->clock_offset;
3655 data.rssi = info->rssi;
3656 data.ssp_mode = 0x01;
3657
3658 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3659 name_known = eir_has_data_type(info->data,
3660 sizeof(info->data),
3661 EIR_NAME_COMPLETE);
3662 else
3663 name_known = true;
3664
3665 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3666
3667 eir_len = eir_get_length(info->data, sizeof(info->data));
3668
3669 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3670 info->dev_class, info->rssi,
3671 flags, info->data, eir_len, NULL, 0);
3672 }
3673
3674 hci_dev_unlock(hdev);
3675 }
3676
3677 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3678 struct sk_buff *skb)
3679 {
3680 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3681 struct hci_conn *conn;
3682
3683 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3684 __le16_to_cpu(ev->handle));
3685
3686 hci_dev_lock(hdev);
3687
3688 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3689 if (!conn)
3690 goto unlock;
3691
3692 /* For BR/EDR the necessary steps are taken through the
3693 * auth_complete event.
3694 */
3695 if (conn->type != LE_LINK)
3696 goto unlock;
3697
3698 if (!ev->status)
3699 conn->sec_level = conn->pending_sec_level;
3700
3701 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3702
3703 if (ev->status && conn->state == BT_CONNECTED) {
3704 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3705 hci_conn_drop(conn);
3706 goto unlock;
3707 }
3708
3709 if (conn->state == BT_CONFIG) {
3710 if (!ev->status)
3711 conn->state = BT_CONNECTED;
3712
3713 hci_proto_connect_cfm(conn, ev->status);
3714 hci_conn_drop(conn);
3715 } else {
3716 hci_auth_cfm(conn, ev->status);
3717
3718 hci_conn_hold(conn);
3719 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3720 hci_conn_drop(conn);
3721 }
3722
3723 unlock:
3724 hci_dev_unlock(hdev);
3725 }
3726
3727 static u8 hci_get_auth_req(struct hci_conn *conn)
3728 {
3729 /* If remote requests no-bonding follow that lead */
3730 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3731 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3732 return conn->remote_auth | (conn->auth_type & 0x01);
3733
3734 /* If both remote and local have enough IO capabilities, require
3735 * MITM protection
3736 */
3737 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3738 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3739 return conn->remote_auth | 0x01;
3740
3741 /* No MITM protection possible so ignore remote requirement */
3742 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3743 }
3744
3745 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3746 {
3747 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3748 struct hci_conn *conn;
3749
3750 BT_DBG("%s", hdev->name);
3751
3752 hci_dev_lock(hdev);
3753
3754 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3755 if (!conn)
3756 goto unlock;
3757
3758 hci_conn_hold(conn);
3759
3760 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3761 goto unlock;
3762
3763 /* Allow pairing if we're pairable, the initiators of the
3764 * pairing or if the remote is not requesting bonding.
3765 */
3766 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3767 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3768 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3769 struct hci_cp_io_capability_reply cp;
3770
3771 bacpy(&cp.bdaddr, &ev->bdaddr);
3772 /* Change the IO capability from KeyboardDisplay
3773 * to DisplayYesNo as it is not supported by BT spec. */
3774 cp.capability = (conn->io_capability == 0x04) ?
3775 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3776
3777 /* If we are initiators, there is no remote information yet */
3778 if (conn->remote_auth == 0xff) {
3779 /* Request MITM protection if our IO caps allow it
3780 * except for the no-bonding case.
3781 */
3782 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3783 conn->auth_type != HCI_AT_NO_BONDING)
3784 conn->auth_type |= 0x01;
3785 } else {
3786 conn->auth_type = hci_get_auth_req(conn);
3787 }
3788
3789 /* If we're not bondable, force one of the non-bondable
3790 * authentication requirement values.
3791 */
3792 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3793 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3794
3795 cp.authentication = conn->auth_type;
3796
3797 if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
3798 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3799 cp.oob_data = 0x01;
3800 else
3801 cp.oob_data = 0x00;
3802
3803 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3804 sizeof(cp), &cp);
3805 } else {
3806 struct hci_cp_io_capability_neg_reply cp;
3807
3808 bacpy(&cp.bdaddr, &ev->bdaddr);
3809 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3810
3811 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3812 sizeof(cp), &cp);
3813 }
3814
3815 unlock:
3816 hci_dev_unlock(hdev);
3817 }
3818
3819 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3820 {
3821 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3822 struct hci_conn *conn;
3823
3824 BT_DBG("%s", hdev->name);
3825
3826 hci_dev_lock(hdev);
3827
3828 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3829 if (!conn)
3830 goto unlock;
3831
3832 conn->remote_cap = ev->capability;
3833 conn->remote_auth = ev->authentication;
3834 if (ev->oob_data)
3835 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3836
3837 unlock:
3838 hci_dev_unlock(hdev);
3839 }
3840
3841 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3842 struct sk_buff *skb)
3843 {
3844 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3845 int loc_mitm, rem_mitm, confirm_hint = 0;
3846 struct hci_conn *conn;
3847
3848 BT_DBG("%s", hdev->name);
3849
3850 hci_dev_lock(hdev);
3851
3852 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3853 goto unlock;
3854
3855 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3856 if (!conn)
3857 goto unlock;
3858
3859 loc_mitm = (conn->auth_type & 0x01);
3860 rem_mitm = (conn->remote_auth & 0x01);
3861
3862 /* If we require MITM but the remote device can't provide that
3863 * (it has NoInputNoOutput) then reject the confirmation
3864 * request. We check the security level here since it doesn't
3865 * necessarily match conn->auth_type.
3866 */
3867 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3868 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3869 BT_DBG("Rejecting request: remote device can't provide MITM");
3870 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3871 sizeof(ev->bdaddr), &ev->bdaddr);
3872 goto unlock;
3873 }
3874
3875 /* If no side requires MITM protection; auto-accept */
3876 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3877 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3878
3879 /* If we're not the initiators request authorization to
3880 * proceed from user space (mgmt_user_confirm with
3881 * confirm_hint set to 1). The exception is if neither
3882 * side had MITM or if the local IO capability is
3883 * NoInputNoOutput, in which case we do auto-accept
3884 */
3885 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3886 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3887 (loc_mitm || rem_mitm)) {
3888 BT_DBG("Confirming auto-accept as acceptor");
3889 confirm_hint = 1;
3890 goto confirm;
3891 }
3892
3893 BT_DBG("Auto-accept of user confirmation with %ums delay",
3894 hdev->auto_accept_delay);
3895
3896 if (hdev->auto_accept_delay > 0) {
3897 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3898 queue_delayed_work(conn->hdev->workqueue,
3899 &conn->auto_accept_work, delay);
3900 goto unlock;
3901 }
3902
3903 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3904 sizeof(ev->bdaddr), &ev->bdaddr);
3905 goto unlock;
3906 }
3907
3908 confirm:
3909 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3910 le32_to_cpu(ev->passkey), confirm_hint);
3911
3912 unlock:
3913 hci_dev_unlock(hdev);
3914 }
3915
3916 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3917 struct sk_buff *skb)
3918 {
3919 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3920
3921 BT_DBG("%s", hdev->name);
3922
3923 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3924 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3925 }
3926
3927 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3928 struct sk_buff *skb)
3929 {
3930 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3931 struct hci_conn *conn;
3932
3933 BT_DBG("%s", hdev->name);
3934
3935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3936 if (!conn)
3937 return;
3938
3939 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3940 conn->passkey_entered = 0;
3941
3942 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3943 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3944 conn->dst_type, conn->passkey_notify,
3945 conn->passkey_entered);
3946 }
3947
3948 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3949 {
3950 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3951 struct hci_conn *conn;
3952
3953 BT_DBG("%s", hdev->name);
3954
3955 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3956 if (!conn)
3957 return;
3958
3959 switch (ev->type) {
3960 case HCI_KEYPRESS_STARTED:
3961 conn->passkey_entered = 0;
3962 return;
3963
3964 case HCI_KEYPRESS_ENTERED:
3965 conn->passkey_entered++;
3966 break;
3967
3968 case HCI_KEYPRESS_ERASED:
3969 conn->passkey_entered--;
3970 break;
3971
3972 case HCI_KEYPRESS_CLEARED:
3973 conn->passkey_entered = 0;
3974 break;
3975
3976 case HCI_KEYPRESS_COMPLETED:
3977 return;
3978 }
3979
3980 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3981 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3982 conn->dst_type, conn->passkey_notify,
3983 conn->passkey_entered);
3984 }
3985
3986 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3987 struct sk_buff *skb)
3988 {
3989 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3990 struct hci_conn *conn;
3991
3992 BT_DBG("%s", hdev->name);
3993
3994 hci_dev_lock(hdev);
3995
3996 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3997 if (!conn)
3998 goto unlock;
3999
4000 /* Reset the authentication requirement to unknown */
4001 conn->remote_auth = 0xff;
4002
4003 /* To avoid duplicate auth_failed events to user space we check
4004 * the HCI_CONN_AUTH_PEND flag which will be set if we
4005 * initiated the authentication. A traditional auth_complete
4006 * event gets always produced as initiator and is also mapped to
4007 * the mgmt_auth_failed event */
4008 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4009 mgmt_auth_failed(conn, ev->status);
4010
4011 hci_conn_drop(conn);
4012
4013 unlock:
4014 hci_dev_unlock(hdev);
4015 }
4016
4017 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4018 struct sk_buff *skb)
4019 {
4020 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4021 struct inquiry_entry *ie;
4022 struct hci_conn *conn;
4023
4024 BT_DBG("%s", hdev->name);
4025
4026 hci_dev_lock(hdev);
4027
4028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4029 if (conn)
4030 memcpy(conn->features[1], ev->features, 8);
4031
4032 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4033 if (ie)
4034 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4035
4036 hci_dev_unlock(hdev);
4037 }
4038
4039 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4040 struct sk_buff *skb)
4041 {
4042 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4043 struct oob_data *data;
4044
4045 BT_DBG("%s", hdev->name);
4046
4047 hci_dev_lock(hdev);
4048
4049 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4050 goto unlock;
4051
4052 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4053 if (data) {
4054 if (bredr_sc_enabled(hdev)) {
4055 struct hci_cp_remote_oob_ext_data_reply cp;
4056
4057 bacpy(&cp.bdaddr, &ev->bdaddr);
4058 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4059 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4060 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4061 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4062
4063 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4064 sizeof(cp), &cp);
4065 } else {
4066 struct hci_cp_remote_oob_data_reply cp;
4067
4068 bacpy(&cp.bdaddr, &ev->bdaddr);
4069 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4070 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4071
4072 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4073 sizeof(cp), &cp);
4074 }
4075 } else {
4076 struct hci_cp_remote_oob_data_neg_reply cp;
4077
4078 bacpy(&cp.bdaddr, &ev->bdaddr);
4079 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4080 sizeof(cp), &cp);
4081 }
4082
4083 unlock:
4084 hci_dev_unlock(hdev);
4085 }
4086
4087 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4088 struct sk_buff *skb)
4089 {
4090 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4091 struct hci_conn *hcon, *bredr_hcon;
4092
4093 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4094 ev->status);
4095
4096 hci_dev_lock(hdev);
4097
4098 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4099 if (!hcon) {
4100 hci_dev_unlock(hdev);
4101 return;
4102 }
4103
4104 if (ev->status) {
4105 hci_conn_del(hcon);
4106 hci_dev_unlock(hdev);
4107 return;
4108 }
4109
4110 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4111
4112 hcon->state = BT_CONNECTED;
4113 bacpy(&hcon->dst, &bredr_hcon->dst);
4114
4115 hci_conn_hold(hcon);
4116 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4117 hci_conn_drop(hcon);
4118
4119 hci_conn_add_sysfs(hcon);
4120
4121 amp_physical_cfm(bredr_hcon, hcon);
4122
4123 hci_dev_unlock(hdev);
4124 }
4125
4126 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4127 {
4128 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4129 struct hci_conn *hcon;
4130 struct hci_chan *hchan;
4131 struct amp_mgr *mgr;
4132
4133 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4134 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4135 ev->status);
4136
4137 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4138 if (!hcon)
4139 return;
4140
4141 /* Create AMP hchan */
4142 hchan = hci_chan_create(hcon);
4143 if (!hchan)
4144 return;
4145
4146 hchan->handle = le16_to_cpu(ev->handle);
4147
4148 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4149
4150 mgr = hcon->amp_mgr;
4151 if (mgr && mgr->bredr_chan) {
4152 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4153
4154 l2cap_chan_lock(bredr_chan);
4155
4156 bredr_chan->conn->mtu = hdev->block_mtu;
4157 l2cap_logical_cfm(bredr_chan, hchan, 0);
4158 hci_conn_hold(hcon);
4159
4160 l2cap_chan_unlock(bredr_chan);
4161 }
4162 }
4163
4164 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4165 struct sk_buff *skb)
4166 {
4167 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4168 struct hci_chan *hchan;
4169
4170 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4171 le16_to_cpu(ev->handle), ev->status);
4172
4173 if (ev->status)
4174 return;
4175
4176 hci_dev_lock(hdev);
4177
4178 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4179 if (!hchan)
4180 goto unlock;
4181
4182 amp_destroy_logical_link(hchan, ev->reason);
4183
4184 unlock:
4185 hci_dev_unlock(hdev);
4186 }
4187
4188 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4189 struct sk_buff *skb)
4190 {
4191 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4192 struct hci_conn *hcon;
4193
4194 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4195
4196 if (ev->status)
4197 return;
4198
4199 hci_dev_lock(hdev);
4200
4201 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4202 if (hcon) {
4203 hcon->state = BT_CLOSED;
4204 hci_conn_del(hcon);
4205 }
4206
4207 hci_dev_unlock(hdev);
4208 }
4209
4210 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4211 {
4212 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4213 struct hci_conn_params *params;
4214 struct hci_conn *conn;
4215 struct smp_irk *irk;
4216 u8 addr_type;
4217
4218 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4219
4220 hci_dev_lock(hdev);
4221
4222 /* All controllers implicitly stop advertising in the event of a
4223 * connection, so ensure that the state bit is cleared.
4224 */
4225 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4226
4227 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4228 if (!conn) {
4229 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4230 if (!conn) {
4231 BT_ERR("No memory for new connection");
4232 goto unlock;
4233 }
4234
4235 conn->dst_type = ev->bdaddr_type;
4236
4237 /* If we didn't have a hci_conn object previously
4238 * but we're in master role this must be something
4239 * initiated using a white list. Since white list based
4240 * connections are not "first class citizens" we don't
4241 * have full tracking of them. Therefore, we go ahead
4242 * with a "best effort" approach of determining the
4243 * initiator address based on the HCI_PRIVACY flag.
4244 */
4245 if (conn->out) {
4246 conn->resp_addr_type = ev->bdaddr_type;
4247 bacpy(&conn->resp_addr, &ev->bdaddr);
4248 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4249 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4250 bacpy(&conn->init_addr, &hdev->rpa);
4251 } else {
4252 hci_copy_identity_address(hdev,
4253 &conn->init_addr,
4254 &conn->init_addr_type);
4255 }
4256 }
4257 } else {
4258 cancel_delayed_work(&conn->le_conn_timeout);
4259 }
4260
4261 if (!conn->out) {
4262 /* Set the responder (our side) address type based on
4263 * the advertising address type.
4264 */
4265 conn->resp_addr_type = hdev->adv_addr_type;
4266 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4267 bacpy(&conn->resp_addr, &hdev->random_addr);
4268 else
4269 bacpy(&conn->resp_addr, &hdev->bdaddr);
4270
4271 conn->init_addr_type = ev->bdaddr_type;
4272 bacpy(&conn->init_addr, &ev->bdaddr);
4273
4274 /* For incoming connections, set the default minimum
4275 * and maximum connection interval. They will be used
4276 * to check if the parameters are in range and if not
4277 * trigger the connection update procedure.
4278 */
4279 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4280 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4281 }
4282
4283 /* Lookup the identity address from the stored connection
4284 * address and address type.
4285 *
4286 * When establishing connections to an identity address, the
4287 * connection procedure will store the resolvable random
4288 * address first. Now if it can be converted back into the
4289 * identity address, start using the identity address from
4290 * now on.
4291 */
4292 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4293 if (irk) {
4294 bacpy(&conn->dst, &irk->bdaddr);
4295 conn->dst_type = irk->addr_type;
4296 }
4297
4298 if (ev->status) {
4299 hci_le_conn_failed(conn, ev->status);
4300 goto unlock;
4301 }
4302
4303 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4304 addr_type = BDADDR_LE_PUBLIC;
4305 else
4306 addr_type = BDADDR_LE_RANDOM;
4307
4308 /* Drop the connection if the device is blocked */
4309 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4310 hci_conn_drop(conn);
4311 goto unlock;
4312 }
4313
4314 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4315 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4316
4317 conn->sec_level = BT_SECURITY_LOW;
4318 conn->handle = __le16_to_cpu(ev->handle);
4319 conn->state = BT_CONNECTED;
4320
4321 conn->le_conn_interval = le16_to_cpu(ev->interval);
4322 conn->le_conn_latency = le16_to_cpu(ev->latency);
4323 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4324
4325 hci_conn_add_sysfs(conn);
4326
4327 hci_proto_connect_cfm(conn, ev->status);
4328
4329 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4330 conn->dst_type);
4331 if (params) {
4332 list_del_init(&params->action);
4333 if (params->conn) {
4334 hci_conn_drop(params->conn);
4335 hci_conn_put(params->conn);
4336 params->conn = NULL;
4337 }
4338 }
4339
4340 unlock:
4341 hci_update_background_scan(hdev);
4342 hci_dev_unlock(hdev);
4343 }
4344
4345 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4346 struct sk_buff *skb)
4347 {
4348 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4349 struct hci_conn *conn;
4350
4351 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4352
4353 if (ev->status)
4354 return;
4355
4356 hci_dev_lock(hdev);
4357
4358 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4359 if (conn) {
4360 conn->le_conn_interval = le16_to_cpu(ev->interval);
4361 conn->le_conn_latency = le16_to_cpu(ev->latency);
4362 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4363 }
4364
4365 hci_dev_unlock(hdev);
4366 }
4367
4368 /* This function requires the caller holds hdev->lock */
4369 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4370 bdaddr_t *addr,
4371 u8 addr_type, u8 adv_type)
4372 {
4373 struct hci_conn *conn;
4374 struct hci_conn_params *params;
4375
4376 /* If the event is not connectable don't proceed further */
4377 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4378 return NULL;
4379
4380 /* Ignore if the device is blocked */
4381 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4382 return NULL;
4383
4384 /* Most controller will fail if we try to create new connections
4385 * while we have an existing one in slave role.
4386 */
4387 if (hdev->conn_hash.le_num_slave > 0)
4388 return NULL;
4389
4390 /* If we're not connectable only connect devices that we have in
4391 * our pend_le_conns list.
4392 */
4393 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4394 addr, addr_type);
4395 if (!params)
4396 return NULL;
4397
4398 switch (params->auto_connect) {
4399 case HCI_AUTO_CONN_DIRECT:
4400 /* Only devices advertising with ADV_DIRECT_IND are
4401 * triggering a connection attempt. This is allowing
4402 * incoming connections from slave devices.
4403 */
4404 if (adv_type != LE_ADV_DIRECT_IND)
4405 return NULL;
4406 break;
4407 case HCI_AUTO_CONN_ALWAYS:
4408 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4409 * are triggering a connection attempt. This means
4410 * that incoming connectioms from slave device are
4411 * accepted and also outgoing connections to slave
4412 * devices are established when found.
4413 */
4414 break;
4415 default:
4416 return NULL;
4417 }
4418
4419 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4420 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4421 if (!IS_ERR(conn)) {
4422 /* Store the pointer since we don't really have any
4423 * other owner of the object besides the params that
4424 * triggered it. This way we can abort the connection if
4425 * the parameters get removed and keep the reference
4426 * count consistent once the connection is established.
4427 */
4428 params->conn = hci_conn_get(conn);
4429 return conn;
4430 }
4431
4432 switch (PTR_ERR(conn)) {
4433 case -EBUSY:
4434 /* If hci_connect() returns -EBUSY it means there is already
4435 * an LE connection attempt going on. Since controllers don't
4436 * support more than one connection attempt at the time, we
4437 * don't consider this an error case.
4438 */
4439 break;
4440 default:
4441 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4442 return NULL;
4443 }
4444
4445 return NULL;
4446 }
4447
4448 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4449 u8 bdaddr_type, bdaddr_t *direct_addr,
4450 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4451 {
4452 struct discovery_state *d = &hdev->discovery;
4453 struct smp_irk *irk;
4454 struct hci_conn *conn;
4455 bool match;
4456 u32 flags;
4457
4458 /* If the direct address is present, then this report is from
4459 * a LE Direct Advertising Report event. In that case it is
4460 * important to see if the address is matching the local
4461 * controller address.
4462 */
4463 if (direct_addr) {
4464 /* Only resolvable random addresses are valid for these
4465 * kind of reports and others can be ignored.
4466 */
4467 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4468 return;
4469
4470 /* If the controller is not using resolvable random
4471 * addresses, then this report can be ignored.
4472 */
4473 if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
4474 return;
4475
4476 /* If the local IRK of the controller does not match
4477 * with the resolvable random address provided, then
4478 * this report can be ignored.
4479 */
4480 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4481 return;
4482 }
4483
4484 /* Check if we need to convert to identity address */
4485 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4486 if (irk) {
4487 bdaddr = &irk->bdaddr;
4488 bdaddr_type = irk->addr_type;
4489 }
4490
4491 /* Check if we have been requested to connect to this device */
4492 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4493 if (conn && type == LE_ADV_IND) {
4494 /* Store report for later inclusion by
4495 * mgmt_device_connected
4496 */
4497 memcpy(conn->le_adv_data, data, len);
4498 conn->le_adv_data_len = len;
4499 }
4500
4501 /* Passive scanning shouldn't trigger any device found events,
4502 * except for devices marked as CONN_REPORT for which we do send
4503 * device found events.
4504 */
4505 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4506 if (type == LE_ADV_DIRECT_IND)
4507 return;
4508
4509 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4510 bdaddr, bdaddr_type))
4511 return;
4512
4513 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4514 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4515 else
4516 flags = 0;
4517 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4518 rssi, flags, data, len, NULL, 0);
4519 return;
4520 }
4521
4522 /* When receiving non-connectable or scannable undirected
4523 * advertising reports, this means that the remote device is
4524 * not connectable and then clearly indicate this in the
4525 * device found event.
4526 *
4527 * When receiving a scan response, then there is no way to
4528 * know if the remote device is connectable or not. However
4529 * since scan responses are merged with a previously seen
4530 * advertising report, the flags field from that report
4531 * will be used.
4532 *
4533 * In the really unlikely case that a controller get confused
4534 * and just sends a scan response event, then it is marked as
4535 * not connectable as well.
4536 */
4537 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4538 type == LE_ADV_SCAN_RSP)
4539 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4540 else
4541 flags = 0;
4542
4543 /* If there's nothing pending either store the data from this
4544 * event or send an immediate device found event if the data
4545 * should not be stored for later.
4546 */
4547 if (!has_pending_adv_report(hdev)) {
4548 /* If the report will trigger a SCAN_REQ store it for
4549 * later merging.
4550 */
4551 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4552 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4553 rssi, flags, data, len);
4554 return;
4555 }
4556
4557 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4558 rssi, flags, data, len, NULL, 0);
4559 return;
4560 }
4561
4562 /* Check if the pending report is for the same device as the new one */
4563 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4564 bdaddr_type == d->last_adv_addr_type);
4565
4566 /* If the pending data doesn't match this report or this isn't a
4567 * scan response (e.g. we got a duplicate ADV_IND) then force
4568 * sending of the pending data.
4569 */
4570 if (type != LE_ADV_SCAN_RSP || !match) {
4571 /* Send out whatever is in the cache, but skip duplicates */
4572 if (!match)
4573 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4574 d->last_adv_addr_type, NULL,
4575 d->last_adv_rssi, d->last_adv_flags,
4576 d->last_adv_data,
4577 d->last_adv_data_len, NULL, 0);
4578
4579 /* If the new report will trigger a SCAN_REQ store it for
4580 * later merging.
4581 */
4582 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4583 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4584 rssi, flags, data, len);
4585 return;
4586 }
4587
4588 /* The advertising reports cannot be merged, so clear
4589 * the pending report and send out a device found event.
4590 */
4591 clear_pending_adv_report(hdev);
4592 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4593 rssi, flags, data, len, NULL, 0);
4594 return;
4595 }
4596
4597 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4598 * the new event is a SCAN_RSP. We can therefore proceed with
4599 * sending a merged device found event.
4600 */
4601 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4602 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4603 d->last_adv_data, d->last_adv_data_len, data, len);
4604 clear_pending_adv_report(hdev);
4605 }
4606
4607 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4608 {
4609 u8 num_reports = skb->data[0];
4610 void *ptr = &skb->data[1];
4611
4612 hci_dev_lock(hdev);
4613
4614 while (num_reports--) {
4615 struct hci_ev_le_advertising_info *ev = ptr;
4616 s8 rssi;
4617
4618 rssi = ev->data[ev->length];
4619 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4620 ev->bdaddr_type, NULL, 0, rssi,
4621 ev->data, ev->length);
4622
4623 ptr += sizeof(*ev) + ev->length + 1;
4624 }
4625
4626 hci_dev_unlock(hdev);
4627 }
4628
4629 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4630 {
4631 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4632 struct hci_cp_le_ltk_reply cp;
4633 struct hci_cp_le_ltk_neg_reply neg;
4634 struct hci_conn *conn;
4635 struct smp_ltk *ltk;
4636
4637 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4638
4639 hci_dev_lock(hdev);
4640
4641 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4642 if (conn == NULL)
4643 goto not_found;
4644
4645 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4646 if (!ltk)
4647 goto not_found;
4648
4649 if (smp_ltk_is_sc(ltk)) {
4650 /* With SC both EDiv and Rand are set to zero */
4651 if (ev->ediv || ev->rand)
4652 goto not_found;
4653 } else {
4654 /* For non-SC keys check that EDiv and Rand match */
4655 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4656 goto not_found;
4657 }
4658
4659 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4660 cp.handle = cpu_to_le16(conn->handle);
4661
4662 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4663
4664 conn->enc_key_size = ltk->enc_size;
4665
4666 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4667
4668 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4669 * temporary key used to encrypt a connection following
4670 * pairing. It is used during the Encrypted Session Setup to
4671 * distribute the keys. Later, security can be re-established
4672 * using a distributed LTK.
4673 */
4674 if (ltk->type == SMP_STK) {
4675 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4676 list_del_rcu(&ltk->list);
4677 kfree_rcu(ltk, rcu);
4678 } else {
4679 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4680 }
4681
4682 hci_dev_unlock(hdev);
4683
4684 return;
4685
4686 not_found:
4687 neg.handle = ev->handle;
4688 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4689 hci_dev_unlock(hdev);
4690 }
4691
4692 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4693 u8 reason)
4694 {
4695 struct hci_cp_le_conn_param_req_neg_reply cp;
4696
4697 cp.handle = cpu_to_le16(handle);
4698 cp.reason = reason;
4699
4700 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4701 &cp);
4702 }
4703
4704 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4705 struct sk_buff *skb)
4706 {
4707 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4708 struct hci_cp_le_conn_param_req_reply cp;
4709 struct hci_conn *hcon;
4710 u16 handle, min, max, latency, timeout;
4711
4712 handle = le16_to_cpu(ev->handle);
4713 min = le16_to_cpu(ev->interval_min);
4714 max = le16_to_cpu(ev->interval_max);
4715 latency = le16_to_cpu(ev->latency);
4716 timeout = le16_to_cpu(ev->timeout);
4717
4718 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4719 if (!hcon || hcon->state != BT_CONNECTED)
4720 return send_conn_param_neg_reply(hdev, handle,
4721 HCI_ERROR_UNKNOWN_CONN_ID);
4722
4723 if (hci_check_conn_params(min, max, latency, timeout))
4724 return send_conn_param_neg_reply(hdev, handle,
4725 HCI_ERROR_INVALID_LL_PARAMS);
4726
4727 if (hcon->role == HCI_ROLE_MASTER) {
4728 struct hci_conn_params *params;
4729 u8 store_hint;
4730
4731 hci_dev_lock(hdev);
4732
4733 params = hci_conn_params_lookup(hdev, &hcon->dst,
4734 hcon->dst_type);
4735 if (params) {
4736 params->conn_min_interval = min;
4737 params->conn_max_interval = max;
4738 params->conn_latency = latency;
4739 params->supervision_timeout = timeout;
4740 store_hint = 0x01;
4741 } else{
4742 store_hint = 0x00;
4743 }
4744
4745 hci_dev_unlock(hdev);
4746
4747 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4748 store_hint, min, max, latency, timeout);
4749 }
4750
4751 cp.handle = ev->handle;
4752 cp.interval_min = ev->interval_min;
4753 cp.interval_max = ev->interval_max;
4754 cp.latency = ev->latency;
4755 cp.timeout = ev->timeout;
4756 cp.min_ce_len = 0;
4757 cp.max_ce_len = 0;
4758
4759 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4760 }
4761
4762 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4763 struct sk_buff *skb)
4764 {
4765 u8 num_reports = skb->data[0];
4766 void *ptr = &skb->data[1];
4767
4768 hci_dev_lock(hdev);
4769
4770 while (num_reports--) {
4771 struct hci_ev_le_direct_adv_info *ev = ptr;
4772
4773 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4774 ev->bdaddr_type, &ev->direct_addr,
4775 ev->direct_addr_type, ev->rssi, NULL, 0);
4776
4777 ptr += sizeof(*ev);
4778 }
4779
4780 hci_dev_unlock(hdev);
4781 }
4782
4783 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4784 {
4785 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4786
4787 skb_pull(skb, sizeof(*le_ev));
4788
4789 switch (le_ev->subevent) {
4790 case HCI_EV_LE_CONN_COMPLETE:
4791 hci_le_conn_complete_evt(hdev, skb);
4792 break;
4793
4794 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4795 hci_le_conn_update_complete_evt(hdev, skb);
4796 break;
4797
4798 case HCI_EV_LE_ADVERTISING_REPORT:
4799 hci_le_adv_report_evt(hdev, skb);
4800 break;
4801
4802 case HCI_EV_LE_LTK_REQ:
4803 hci_le_ltk_request_evt(hdev, skb);
4804 break;
4805
4806 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4807 hci_le_remote_conn_param_req_evt(hdev, skb);
4808 break;
4809
4810 case HCI_EV_LE_DIRECT_ADV_REPORT:
4811 hci_le_direct_adv_report_evt(hdev, skb);
4812 break;
4813
4814 default:
4815 break;
4816 }
4817 }
4818
4819 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4820 {
4821 struct hci_ev_channel_selected *ev = (void *) skb->data;
4822 struct hci_conn *hcon;
4823
4824 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4825
4826 skb_pull(skb, sizeof(*ev));
4827
4828 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4829 if (!hcon)
4830 return;
4831
4832 amp_read_loc_assoc_final_data(hdev, hcon);
4833 }
4834
4835 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4836 {
4837 struct hci_event_hdr *hdr = (void *) skb->data;
4838 __u8 event = hdr->evt;
4839
4840 hci_dev_lock(hdev);
4841
4842 /* Received events are (currently) only needed when a request is
4843 * ongoing so avoid unnecessary memory allocation.
4844 */
4845 if (hci_req_pending(hdev)) {
4846 kfree_skb(hdev->recv_evt);
4847 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4848 }
4849
4850 hci_dev_unlock(hdev);
4851
4852 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4853
4854 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4855 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4856 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4857
4858 hci_req_cmd_complete(hdev, opcode, 0);
4859 }
4860
4861 switch (event) {
4862 case HCI_EV_INQUIRY_COMPLETE:
4863 hci_inquiry_complete_evt(hdev, skb);
4864 break;
4865
4866 case HCI_EV_INQUIRY_RESULT:
4867 hci_inquiry_result_evt(hdev, skb);
4868 break;
4869
4870 case HCI_EV_CONN_COMPLETE:
4871 hci_conn_complete_evt(hdev, skb);
4872 break;
4873
4874 case HCI_EV_CONN_REQUEST:
4875 hci_conn_request_evt(hdev, skb);
4876 break;
4877
4878 case HCI_EV_DISCONN_COMPLETE:
4879 hci_disconn_complete_evt(hdev, skb);
4880 break;
4881
4882 case HCI_EV_AUTH_COMPLETE:
4883 hci_auth_complete_evt(hdev, skb);
4884 break;
4885
4886 case HCI_EV_REMOTE_NAME:
4887 hci_remote_name_evt(hdev, skb);
4888 break;
4889
4890 case HCI_EV_ENCRYPT_CHANGE:
4891 hci_encrypt_change_evt(hdev, skb);
4892 break;
4893
4894 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4895 hci_change_link_key_complete_evt(hdev, skb);
4896 break;
4897
4898 case HCI_EV_REMOTE_FEATURES:
4899 hci_remote_features_evt(hdev, skb);
4900 break;
4901
4902 case HCI_EV_CMD_COMPLETE:
4903 hci_cmd_complete_evt(hdev, skb);
4904 break;
4905
4906 case HCI_EV_CMD_STATUS:
4907 hci_cmd_status_evt(hdev, skb);
4908 break;
4909
4910 case HCI_EV_HARDWARE_ERROR:
4911 hci_hardware_error_evt(hdev, skb);
4912 break;
4913
4914 case HCI_EV_ROLE_CHANGE:
4915 hci_role_change_evt(hdev, skb);
4916 break;
4917
4918 case HCI_EV_NUM_COMP_PKTS:
4919 hci_num_comp_pkts_evt(hdev, skb);
4920 break;
4921
4922 case HCI_EV_MODE_CHANGE:
4923 hci_mode_change_evt(hdev, skb);
4924 break;
4925
4926 case HCI_EV_PIN_CODE_REQ:
4927 hci_pin_code_request_evt(hdev, skb);
4928 break;
4929
4930 case HCI_EV_LINK_KEY_REQ:
4931 hci_link_key_request_evt(hdev, skb);
4932 break;
4933
4934 case HCI_EV_LINK_KEY_NOTIFY:
4935 hci_link_key_notify_evt(hdev, skb);
4936 break;
4937
4938 case HCI_EV_CLOCK_OFFSET:
4939 hci_clock_offset_evt(hdev, skb);
4940 break;
4941
4942 case HCI_EV_PKT_TYPE_CHANGE:
4943 hci_pkt_type_change_evt(hdev, skb);
4944 break;
4945
4946 case HCI_EV_PSCAN_REP_MODE:
4947 hci_pscan_rep_mode_evt(hdev, skb);
4948 break;
4949
4950 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4951 hci_inquiry_result_with_rssi_evt(hdev, skb);
4952 break;
4953
4954 case HCI_EV_REMOTE_EXT_FEATURES:
4955 hci_remote_ext_features_evt(hdev, skb);
4956 break;
4957
4958 case HCI_EV_SYNC_CONN_COMPLETE:
4959 hci_sync_conn_complete_evt(hdev, skb);
4960 break;
4961
4962 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4963 hci_extended_inquiry_result_evt(hdev, skb);
4964 break;
4965
4966 case HCI_EV_KEY_REFRESH_COMPLETE:
4967 hci_key_refresh_complete_evt(hdev, skb);
4968 break;
4969
4970 case HCI_EV_IO_CAPA_REQUEST:
4971 hci_io_capa_request_evt(hdev, skb);
4972 break;
4973
4974 case HCI_EV_IO_CAPA_REPLY:
4975 hci_io_capa_reply_evt(hdev, skb);
4976 break;
4977
4978 case HCI_EV_USER_CONFIRM_REQUEST:
4979 hci_user_confirm_request_evt(hdev, skb);
4980 break;
4981
4982 case HCI_EV_USER_PASSKEY_REQUEST:
4983 hci_user_passkey_request_evt(hdev, skb);
4984 break;
4985
4986 case HCI_EV_USER_PASSKEY_NOTIFY:
4987 hci_user_passkey_notify_evt(hdev, skb);
4988 break;
4989
4990 case HCI_EV_KEYPRESS_NOTIFY:
4991 hci_keypress_notify_evt(hdev, skb);
4992 break;
4993
4994 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4995 hci_simple_pair_complete_evt(hdev, skb);
4996 break;
4997
4998 case HCI_EV_REMOTE_HOST_FEATURES:
4999 hci_remote_host_features_evt(hdev, skb);
5000 break;
5001
5002 case HCI_EV_LE_META:
5003 hci_le_meta_evt(hdev, skb);
5004 break;
5005
5006 case HCI_EV_CHANNEL_SELECTED:
5007 hci_chan_selected_evt(hdev, skb);
5008 break;
5009
5010 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5011 hci_remote_oob_data_request_evt(hdev, skb);
5012 break;
5013
5014 case HCI_EV_PHY_LINK_COMPLETE:
5015 hci_phy_link_complete_evt(hdev, skb);
5016 break;
5017
5018 case HCI_EV_LOGICAL_LINK_COMPLETE:
5019 hci_loglink_complete_evt(hdev, skb);
5020 break;
5021
5022 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5023 hci_disconn_loglink_complete_evt(hdev, skb);
5024 break;
5025
5026 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5027 hci_disconn_phylink_complete_evt(hdev, skb);
5028 break;
5029
5030 case HCI_EV_NUM_COMP_BLOCKS:
5031 hci_num_comp_blocks_evt(hdev, skb);
5032 break;
5033
5034 default:
5035 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5036 break;
5037 }
5038
5039 kfree_skb(skb);
5040 hdev->stat.evt_rx++;
5041 }
This page took 0.126263 seconds and 6 git commands to generate.