Bluetooth: Rename hci_find_ltk_by_addr to hci_find_ltk
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36
37 /* Handle HCI Event packets */
38
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn)
105 conn->role = rp->role;
106
107 hci_dev_unlock(hdev);
108 }
109
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
114
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116
117 if (rp->status)
118 return;
119
120 hci_dev_lock(hdev);
121
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 if (conn)
124 conn->link_policy = __le16_to_cpu(rp->policy);
125
126 hci_dev_unlock(hdev);
127 }
128
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
133 void *sent;
134
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136
137 if (rp->status)
138 return;
139
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 if (!sent)
142 return;
143
144 hci_dev_lock(hdev);
145
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 if (conn)
148 conn->link_policy = get_unaligned_le16(sent + 2);
149
150 hci_dev_unlock(hdev);
151 }
152
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 struct sk_buff *skb)
155 {
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159
160 if (rp->status)
161 return;
162
163 hdev->link_policy = __le16_to_cpu(rp->policy);
164 }
165
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 struct sk_buff *skb)
168 {
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
171
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173
174 if (status)
175 return;
176
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent)
179 return;
180
181 hdev->link_policy = get_unaligned_le16(sent);
182 }
183
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 __u8 status = *((__u8 *) skb->data);
187
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
189
190 clear_bit(HCI_RESET, &hdev->flags);
191
192 if (status)
193 return;
194
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
197
198 hdev->discovery.state = DISCOVERY_STOPPED;
199 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
200 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
201
202 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
203 hdev->adv_data_len = 0;
204
205 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
206 hdev->scan_rsp_data_len = 0;
207
208 hdev->le_scan_type = LE_SCAN_PASSIVE;
209
210 hdev->ssp_debug_mode = 0;
211
212 hci_bdaddr_list_clear(&hdev->le_white_list);
213 }
214
215 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216 {
217 __u8 status = *((__u8 *) skb->data);
218 void *sent;
219
220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
221
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (!sent)
224 return;
225
226 hci_dev_lock(hdev);
227
228 if (test_bit(HCI_MGMT, &hdev->dev_flags))
229 mgmt_set_local_name_complete(hdev, sent, status);
230 else if (!status)
231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
232
233 hci_dev_unlock(hdev);
234 }
235
236 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 struct hci_rp_read_local_name *rp = (void *) skb->data;
239
240 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
241
242 if (rp->status)
243 return;
244
245 if (test_bit(HCI_SETUP, &hdev->dev_flags))
246 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
247 }
248
249 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
250 {
251 __u8 status = *((__u8 *) skb->data);
252 void *sent;
253
254 BT_DBG("%s status 0x%2.2x", hdev->name, status);
255
256 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
257 if (!sent)
258 return;
259
260 if (!status) {
261 __u8 param = *((__u8 *) sent);
262
263 if (param == AUTH_ENABLED)
264 set_bit(HCI_AUTH, &hdev->flags);
265 else
266 clear_bit(HCI_AUTH, &hdev->flags);
267 }
268
269 if (test_bit(HCI_MGMT, &hdev->dev_flags))
270 mgmt_auth_enable_complete(hdev, status);
271 }
272
273 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
274 {
275 __u8 status = *((__u8 *) skb->data);
276 __u8 param;
277 void *sent;
278
279 BT_DBG("%s status 0x%2.2x", hdev->name, status);
280
281 if (status)
282 return;
283
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent)
286 return;
287
288 param = *((__u8 *) sent);
289
290 if (param)
291 set_bit(HCI_ENCRYPT, &hdev->flags);
292 else
293 clear_bit(HCI_ENCRYPT, &hdev->flags);
294 }
295
296 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 __u8 status = *((__u8 *) skb->data);
299 __u8 param;
300 void *sent;
301
302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303
304 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
305 if (!sent)
306 return;
307
308 param = *((__u8 *) sent);
309
310 hci_dev_lock(hdev);
311
312 if (status) {
313 hdev->discov_timeout = 0;
314 goto done;
315 }
316
317 if (param & SCAN_INQUIRY)
318 set_bit(HCI_ISCAN, &hdev->flags);
319 else
320 clear_bit(HCI_ISCAN, &hdev->flags);
321
322 if (param & SCAN_PAGE)
323 set_bit(HCI_PSCAN, &hdev->flags);
324 else
325 clear_bit(HCI_PSCAN, &hdev->flags);
326
327 done:
328 hci_dev_unlock(hdev);
329 }
330
331 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332 {
333 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334
335 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
336
337 if (rp->status)
338 return;
339
340 memcpy(hdev->dev_class, rp->dev_class, 3);
341
342 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
344 }
345
346 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347 {
348 __u8 status = *((__u8 *) skb->data);
349 void *sent;
350
351 BT_DBG("%s status 0x%2.2x", hdev->name, status);
352
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
354 if (!sent)
355 return;
356
357 hci_dev_lock(hdev);
358
359 if (status == 0)
360 memcpy(hdev->dev_class, sent, 3);
361
362 if (test_bit(HCI_MGMT, &hdev->dev_flags))
363 mgmt_set_class_of_dev_complete(hdev, sent, status);
364
365 hci_dev_unlock(hdev);
366 }
367
368 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
369 {
370 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
371 __u16 setting;
372
373 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
374
375 if (rp->status)
376 return;
377
378 setting = __le16_to_cpu(rp->voice_setting);
379
380 if (hdev->voice_setting == setting)
381 return;
382
383 hdev->voice_setting = setting;
384
385 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
386
387 if (hdev->notify)
388 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
389 }
390
391 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
392 struct sk_buff *skb)
393 {
394 __u8 status = *((__u8 *) skb->data);
395 __u16 setting;
396 void *sent;
397
398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
399
400 if (status)
401 return;
402
403 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
404 if (!sent)
405 return;
406
407 setting = get_unaligned_le16(sent);
408
409 if (hdev->voice_setting == setting)
410 return;
411
412 hdev->voice_setting = setting;
413
414 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
415
416 if (hdev->notify)
417 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
418 }
419
420 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
421 struct sk_buff *skb)
422 {
423 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
424
425 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
426
427 if (rp->status)
428 return;
429
430 hdev->num_iac = rp->num_iac;
431
432 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
433 }
434
435 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
436 {
437 __u8 status = *((__u8 *) skb->data);
438 struct hci_cp_write_ssp_mode *sent;
439
440 BT_DBG("%s status 0x%2.2x", hdev->name, status);
441
442 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
443 if (!sent)
444 return;
445
446 if (!status) {
447 if (sent->mode)
448 hdev->features[1][0] |= LMP_HOST_SSP;
449 else
450 hdev->features[1][0] &= ~LMP_HOST_SSP;
451 }
452
453 if (test_bit(HCI_MGMT, &hdev->dev_flags))
454 mgmt_ssp_enable_complete(hdev, sent->mode, status);
455 else if (!status) {
456 if (sent->mode)
457 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 else
459 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
460 }
461 }
462
463 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
464 {
465 u8 status = *((u8 *) skb->data);
466 struct hci_cp_write_sc_support *sent;
467
468 BT_DBG("%s status 0x%2.2x", hdev->name, status);
469
470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
471 if (!sent)
472 return;
473
474 if (!status) {
475 if (sent->support)
476 hdev->features[1][0] |= LMP_HOST_SC;
477 else
478 hdev->features[1][0] &= ~LMP_HOST_SC;
479 }
480
481 if (test_bit(HCI_MGMT, &hdev->dev_flags))
482 mgmt_sc_enable_complete(hdev, sent->support, status);
483 else if (!status) {
484 if (sent->support)
485 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
486 else
487 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
488 }
489 }
490
491 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
492 {
493 struct hci_rp_read_local_version *rp = (void *) skb->data;
494
495 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
496
497 if (rp->status)
498 return;
499
500 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
501 hdev->hci_ver = rp->hci_ver;
502 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
503 hdev->lmp_ver = rp->lmp_ver;
504 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
505 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
506 }
507 }
508
509 static void hci_cc_read_local_commands(struct hci_dev *hdev,
510 struct sk_buff *skb)
511 {
512 struct hci_rp_read_local_commands *rp = (void *) skb->data;
513
514 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
515
516 if (rp->status)
517 return;
518
519 if (test_bit(HCI_SETUP, &hdev->dev_flags))
520 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
521 }
522
523 static void hci_cc_read_local_features(struct hci_dev *hdev,
524 struct sk_buff *skb)
525 {
526 struct hci_rp_read_local_features *rp = (void *) skb->data;
527
528 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
529
530 if (rp->status)
531 return;
532
533 memcpy(hdev->features, rp->features, 8);
534
535 /* Adjust default settings according to features
536 * supported by device. */
537
538 if (hdev->features[0][0] & LMP_3SLOT)
539 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
540
541 if (hdev->features[0][0] & LMP_5SLOT)
542 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
543
544 if (hdev->features[0][1] & LMP_HV2) {
545 hdev->pkt_type |= (HCI_HV2);
546 hdev->esco_type |= (ESCO_HV2);
547 }
548
549 if (hdev->features[0][1] & LMP_HV3) {
550 hdev->pkt_type |= (HCI_HV3);
551 hdev->esco_type |= (ESCO_HV3);
552 }
553
554 if (lmp_esco_capable(hdev))
555 hdev->esco_type |= (ESCO_EV3);
556
557 if (hdev->features[0][4] & LMP_EV4)
558 hdev->esco_type |= (ESCO_EV4);
559
560 if (hdev->features[0][4] & LMP_EV5)
561 hdev->esco_type |= (ESCO_EV5);
562
563 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
564 hdev->esco_type |= (ESCO_2EV3);
565
566 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
567 hdev->esco_type |= (ESCO_3EV3);
568
569 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
570 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
571 }
572
573 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
574 struct sk_buff *skb)
575 {
576 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
577
578 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
579
580 if (rp->status)
581 return;
582
583 if (hdev->max_page < rp->max_page)
584 hdev->max_page = rp->max_page;
585
586 if (rp->page < HCI_MAX_PAGES)
587 memcpy(hdev->features[rp->page], rp->features, 8);
588 }
589
590 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
591 struct sk_buff *skb)
592 {
593 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
594
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596
597 if (rp->status)
598 return;
599
600 hdev->flow_ctl_mode = rp->mode;
601 }
602
603 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
604 {
605 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
606
607 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608
609 if (rp->status)
610 return;
611
612 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
613 hdev->sco_mtu = rp->sco_mtu;
614 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
615 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
616
617 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
618 hdev->sco_mtu = 64;
619 hdev->sco_pkts = 8;
620 }
621
622 hdev->acl_cnt = hdev->acl_pkts;
623 hdev->sco_cnt = hdev->sco_pkts;
624
625 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
626 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
627 }
628
629 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
630 {
631 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
632
633 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
634
635 if (rp->status)
636 return;
637
638 if (test_bit(HCI_INIT, &hdev->flags))
639 bacpy(&hdev->bdaddr, &rp->bdaddr);
640
641 if (test_bit(HCI_SETUP, &hdev->dev_flags))
642 bacpy(&hdev->setup_addr, &rp->bdaddr);
643 }
644
645 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646 struct sk_buff *skb)
647 {
648 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652 if (rp->status)
653 return;
654
655 if (test_bit(HCI_INIT, &hdev->flags)) {
656 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
657 hdev->page_scan_window = __le16_to_cpu(rp->window);
658 }
659 }
660
661 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
662 struct sk_buff *skb)
663 {
664 u8 status = *((u8 *) skb->data);
665 struct hci_cp_write_page_scan_activity *sent;
666
667 BT_DBG("%s status 0x%2.2x", hdev->name, status);
668
669 if (status)
670 return;
671
672 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
673 if (!sent)
674 return;
675
676 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
677 hdev->page_scan_window = __le16_to_cpu(sent->window);
678 }
679
680 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
681 struct sk_buff *skb)
682 {
683 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
684
685 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
686
687 if (rp->status)
688 return;
689
690 if (test_bit(HCI_INIT, &hdev->flags))
691 hdev->page_scan_type = rp->type;
692 }
693
694 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
695 struct sk_buff *skb)
696 {
697 u8 status = *((u8 *) skb->data);
698 u8 *type;
699
700 BT_DBG("%s status 0x%2.2x", hdev->name, status);
701
702 if (status)
703 return;
704
705 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
706 if (type)
707 hdev->page_scan_type = *type;
708 }
709
710 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
711 struct sk_buff *skb)
712 {
713 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717 if (rp->status)
718 return;
719
720 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
721 hdev->block_len = __le16_to_cpu(rp->block_len);
722 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
723
724 hdev->block_cnt = hdev->num_blocks;
725
726 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
727 hdev->block_cnt, hdev->block_len);
728 }
729
730 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
731 {
732 struct hci_rp_read_clock *rp = (void *) skb->data;
733 struct hci_cp_read_clock *cp;
734 struct hci_conn *conn;
735
736 BT_DBG("%s", hdev->name);
737
738 if (skb->len < sizeof(*rp))
739 return;
740
741 if (rp->status)
742 return;
743
744 hci_dev_lock(hdev);
745
746 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
747 if (!cp)
748 goto unlock;
749
750 if (cp->which == 0x00) {
751 hdev->clock = le32_to_cpu(rp->clock);
752 goto unlock;
753 }
754
755 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
756 if (conn) {
757 conn->clock = le32_to_cpu(rp->clock);
758 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
759 }
760
761 unlock:
762 hci_dev_unlock(hdev);
763 }
764
765 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
766 struct sk_buff *skb)
767 {
768 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
769
770 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771
772 if (rp->status)
773 goto a2mp_rsp;
774
775 hdev->amp_status = rp->amp_status;
776 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
777 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
778 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
779 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
780 hdev->amp_type = rp->amp_type;
781 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
782 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
783 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
784 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
785
786 a2mp_rsp:
787 a2mp_send_getinfo_rsp(hdev);
788 }
789
790 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
791 struct sk_buff *skb)
792 {
793 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
794 struct amp_assoc *assoc = &hdev->loc_assoc;
795 size_t rem_len, frag_len;
796
797 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798
799 if (rp->status)
800 goto a2mp_rsp;
801
802 frag_len = skb->len - sizeof(*rp);
803 rem_len = __le16_to_cpu(rp->rem_len);
804
805 if (rem_len > frag_len) {
806 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
807
808 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
809 assoc->offset += frag_len;
810
811 /* Read other fragments */
812 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
813
814 return;
815 }
816
817 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
818 assoc->len = assoc->offset + rem_len;
819 assoc->offset = 0;
820
821 a2mp_rsp:
822 /* Send A2MP Rsp when all fragments are received */
823 a2mp_send_getampassoc_rsp(hdev, rp->status);
824 a2mp_send_create_phy_link_req(hdev, rp->status);
825 }
826
827 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
828 struct sk_buff *skb)
829 {
830 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
831
832 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
833
834 if (rp->status)
835 return;
836
837 hdev->inq_tx_power = rp->tx_power;
838 }
839
840 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
841 {
842 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
843 struct hci_cp_pin_code_reply *cp;
844 struct hci_conn *conn;
845
846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
847
848 hci_dev_lock(hdev);
849
850 if (test_bit(HCI_MGMT, &hdev->dev_flags))
851 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
852
853 if (rp->status)
854 goto unlock;
855
856 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
857 if (!cp)
858 goto unlock;
859
860 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
861 if (conn)
862 conn->pin_length = cp->pin_len;
863
864 unlock:
865 hci_dev_unlock(hdev);
866 }
867
868 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
871
872 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
873
874 hci_dev_lock(hdev);
875
876 if (test_bit(HCI_MGMT, &hdev->dev_flags))
877 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
878 rp->status);
879
880 hci_dev_unlock(hdev);
881 }
882
883 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
884 struct sk_buff *skb)
885 {
886 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
887
888 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
889
890 if (rp->status)
891 return;
892
893 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
894 hdev->le_pkts = rp->le_max_pkt;
895
896 hdev->le_cnt = hdev->le_pkts;
897
898 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
899 }
900
901 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
902 struct sk_buff *skb)
903 {
904 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908 if (rp->status)
909 return;
910
911 memcpy(hdev->le_features, rp->features, 8);
912 }
913
914 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
915 struct sk_buff *skb)
916 {
917 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
918
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920
921 if (rp->status)
922 return;
923
924 hdev->adv_tx_power = rp->tx_power;
925 }
926
927 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
928 {
929 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
930
931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
932
933 hci_dev_lock(hdev);
934
935 if (test_bit(HCI_MGMT, &hdev->dev_flags))
936 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
937 rp->status);
938
939 hci_dev_unlock(hdev);
940 }
941
942 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
943 struct sk_buff *skb)
944 {
945 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
946
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948
949 hci_dev_lock(hdev);
950
951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
953 ACL_LINK, 0, rp->status);
954
955 hci_dev_unlock(hdev);
956 }
957
958 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
959 {
960 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
961
962 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
963
964 hci_dev_lock(hdev);
965
966 if (test_bit(HCI_MGMT, &hdev->dev_flags))
967 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
968 0, rp->status);
969
970 hci_dev_unlock(hdev);
971 }
972
973 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
974 struct sk_buff *skb)
975 {
976 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
977
978 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
979
980 hci_dev_lock(hdev);
981
982 if (test_bit(HCI_MGMT, &hdev->dev_flags))
983 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
984 ACL_LINK, 0, rp->status);
985
986 hci_dev_unlock(hdev);
987 }
988
989 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
990 struct sk_buff *skb)
991 {
992 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
993
994 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
995
996 hci_dev_lock(hdev);
997 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
998 rp->status);
999 hci_dev_unlock(hdev);
1000 }
1001
1002 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1004 {
1005 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1006
1007 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008
1009 hci_dev_lock(hdev);
1010 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1011 rp->hash256, rp->rand256,
1012 rp->status);
1013 hci_dev_unlock(hdev);
1014 }
1015
1016
1017 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1018 {
1019 __u8 status = *((__u8 *) skb->data);
1020 bdaddr_t *sent;
1021
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1023
1024 if (status)
1025 return;
1026
1027 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1028 if (!sent)
1029 return;
1030
1031 hci_dev_lock(hdev);
1032
1033 bacpy(&hdev->random_addr, sent);
1034
1035 hci_dev_unlock(hdev);
1036 }
1037
1038 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1039 {
1040 __u8 *sent, status = *((__u8 *) skb->data);
1041
1042 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1043
1044 if (status)
1045 return;
1046
1047 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1048 if (!sent)
1049 return;
1050
1051 hci_dev_lock(hdev);
1052
1053 /* If we're doing connection initiation as peripheral. Set a
1054 * timeout in case something goes wrong.
1055 */
1056 if (*sent) {
1057 struct hci_conn *conn;
1058
1059 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1060
1061 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1062 if (conn)
1063 queue_delayed_work(hdev->workqueue,
1064 &conn->le_conn_timeout,
1065 conn->conn_timeout);
1066 } else {
1067 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1068 }
1069
1070 hci_dev_unlock(hdev);
1071 }
1072
1073 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1074 {
1075 struct hci_cp_le_set_scan_param *cp;
1076 __u8 status = *((__u8 *) skb->data);
1077
1078 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1079
1080 if (status)
1081 return;
1082
1083 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1084 if (!cp)
1085 return;
1086
1087 hci_dev_lock(hdev);
1088
1089 hdev->le_scan_type = cp->type;
1090
1091 hci_dev_unlock(hdev);
1092 }
1093
1094 static bool has_pending_adv_report(struct hci_dev *hdev)
1095 {
1096 struct discovery_state *d = &hdev->discovery;
1097
1098 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1099 }
1100
1101 static void clear_pending_adv_report(struct hci_dev *hdev)
1102 {
1103 struct discovery_state *d = &hdev->discovery;
1104
1105 bacpy(&d->last_adv_addr, BDADDR_ANY);
1106 d->last_adv_data_len = 0;
1107 }
1108
1109 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1110 u8 bdaddr_type, s8 rssi, u32 flags,
1111 u8 *data, u8 len)
1112 {
1113 struct discovery_state *d = &hdev->discovery;
1114
1115 bacpy(&d->last_adv_addr, bdaddr);
1116 d->last_adv_addr_type = bdaddr_type;
1117 d->last_adv_rssi = rssi;
1118 d->last_adv_flags = flags;
1119 memcpy(d->last_adv_data, data, len);
1120 d->last_adv_data_len = len;
1121 }
1122
1123 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1124 struct sk_buff *skb)
1125 {
1126 struct hci_cp_le_set_scan_enable *cp;
1127 __u8 status = *((__u8 *) skb->data);
1128
1129 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1130
1131 if (status)
1132 return;
1133
1134 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1135 if (!cp)
1136 return;
1137
1138 switch (cp->enable) {
1139 case LE_SCAN_ENABLE:
1140 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1141 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1142 clear_pending_adv_report(hdev);
1143 break;
1144
1145 case LE_SCAN_DISABLE:
1146 /* We do this here instead of when setting DISCOVERY_STOPPED
1147 * since the latter would potentially require waiting for
1148 * inquiry to stop too.
1149 */
1150 if (has_pending_adv_report(hdev)) {
1151 struct discovery_state *d = &hdev->discovery;
1152
1153 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1154 d->last_adv_addr_type, NULL,
1155 d->last_adv_rssi, d->last_adv_flags,
1156 d->last_adv_data,
1157 d->last_adv_data_len, NULL, 0);
1158 }
1159
1160 /* Cancel this timer so that we don't try to disable scanning
1161 * when it's already disabled.
1162 */
1163 cancel_delayed_work(&hdev->le_scan_disable);
1164
1165 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1166
1167 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1168 * interrupted scanning due to a connect request. Mark
1169 * therefore discovery as stopped. If this was not
1170 * because of a connect request advertising might have
1171 * been disabled because of active scanning, so
1172 * re-enable it again if necessary.
1173 */
1174 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1175 &hdev->dev_flags))
1176 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1177 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1178 hdev->discovery.state == DISCOVERY_FINDING)
1179 mgmt_reenable_advertising(hdev);
1180
1181 break;
1182
1183 default:
1184 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1185 break;
1186 }
1187 }
1188
1189 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1190 struct sk_buff *skb)
1191 {
1192 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1193
1194 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1195
1196 if (rp->status)
1197 return;
1198
1199 hdev->le_white_list_size = rp->size;
1200 }
1201
1202 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1203 struct sk_buff *skb)
1204 {
1205 __u8 status = *((__u8 *) skb->data);
1206
1207 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1208
1209 if (status)
1210 return;
1211
1212 hci_bdaddr_list_clear(&hdev->le_white_list);
1213 }
1214
1215 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1216 struct sk_buff *skb)
1217 {
1218 struct hci_cp_le_add_to_white_list *sent;
1219 __u8 status = *((__u8 *) skb->data);
1220
1221 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1222
1223 if (status)
1224 return;
1225
1226 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1227 if (!sent)
1228 return;
1229
1230 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1231 sent->bdaddr_type);
1232 }
1233
1234 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1236 {
1237 struct hci_cp_le_del_from_white_list *sent;
1238 __u8 status = *((__u8 *) skb->data);
1239
1240 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1241
1242 if (status)
1243 return;
1244
1245 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1246 if (!sent)
1247 return;
1248
1249 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1250 sent->bdaddr_type);
1251 }
1252
1253 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1254 struct sk_buff *skb)
1255 {
1256 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1257
1258 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1259
1260 if (rp->status)
1261 return;
1262
1263 memcpy(hdev->le_states, rp->le_states, 8);
1264 }
1265
1266 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1267 struct sk_buff *skb)
1268 {
1269 struct hci_cp_write_le_host_supported *sent;
1270 __u8 status = *((__u8 *) skb->data);
1271
1272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1273
1274 if (status)
1275 return;
1276
1277 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1278 if (!sent)
1279 return;
1280
1281 if (sent->le) {
1282 hdev->features[1][0] |= LMP_HOST_LE;
1283 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1284 } else {
1285 hdev->features[1][0] &= ~LMP_HOST_LE;
1286 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1287 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1288 }
1289
1290 if (sent->simul)
1291 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1292 else
1293 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1294 }
1295
1296 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1297 {
1298 struct hci_cp_le_set_adv_param *cp;
1299 u8 status = *((u8 *) skb->data);
1300
1301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1302
1303 if (status)
1304 return;
1305
1306 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1307 if (!cp)
1308 return;
1309
1310 hci_dev_lock(hdev);
1311 hdev->adv_addr_type = cp->own_address_type;
1312 hci_dev_unlock(hdev);
1313 }
1314
1315 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1317 {
1318 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1319
1320 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1321 hdev->name, rp->status, rp->phy_handle);
1322
1323 if (rp->status)
1324 return;
1325
1326 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1327 }
1328
1329 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1330 {
1331 struct hci_rp_read_rssi *rp = (void *) skb->data;
1332 struct hci_conn *conn;
1333
1334 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1335
1336 if (rp->status)
1337 return;
1338
1339 hci_dev_lock(hdev);
1340
1341 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1342 if (conn)
1343 conn->rssi = rp->rssi;
1344
1345 hci_dev_unlock(hdev);
1346 }
1347
1348 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1349 {
1350 struct hci_cp_read_tx_power *sent;
1351 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1352 struct hci_conn *conn;
1353
1354 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1355
1356 if (rp->status)
1357 return;
1358
1359 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1360 if (!sent)
1361 return;
1362
1363 hci_dev_lock(hdev);
1364
1365 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1366 if (!conn)
1367 goto unlock;
1368
1369 switch (sent->type) {
1370 case 0x00:
1371 conn->tx_power = rp->tx_power;
1372 break;
1373 case 0x01:
1374 conn->max_tx_power = rp->tx_power;
1375 break;
1376 }
1377
1378 unlock:
1379 hci_dev_unlock(hdev);
1380 }
1381
1382 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1383 {
1384 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385
1386 if (status) {
1387 hci_conn_check_pending(hdev);
1388 return;
1389 }
1390
1391 set_bit(HCI_INQUIRY, &hdev->flags);
1392 }
1393
1394 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1395 {
1396 struct hci_cp_create_conn *cp;
1397 struct hci_conn *conn;
1398
1399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400
1401 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1402 if (!cp)
1403 return;
1404
1405 hci_dev_lock(hdev);
1406
1407 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1408
1409 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1410
1411 if (status) {
1412 if (conn && conn->state == BT_CONNECT) {
1413 if (status != 0x0c || conn->attempt > 2) {
1414 conn->state = BT_CLOSED;
1415 hci_proto_connect_cfm(conn, status);
1416 hci_conn_del(conn);
1417 } else
1418 conn->state = BT_CONNECT2;
1419 }
1420 } else {
1421 if (!conn) {
1422 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1423 HCI_ROLE_MASTER);
1424 if (!conn)
1425 BT_ERR("No memory for new connection");
1426 }
1427 }
1428
1429 hci_dev_unlock(hdev);
1430 }
1431
1432 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1433 {
1434 struct hci_cp_add_sco *cp;
1435 struct hci_conn *acl, *sco;
1436 __u16 handle;
1437
1438 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1439
1440 if (!status)
1441 return;
1442
1443 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1444 if (!cp)
1445 return;
1446
1447 handle = __le16_to_cpu(cp->handle);
1448
1449 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1450
1451 hci_dev_lock(hdev);
1452
1453 acl = hci_conn_hash_lookup_handle(hdev, handle);
1454 if (acl) {
1455 sco = acl->link;
1456 if (sco) {
1457 sco->state = BT_CLOSED;
1458
1459 hci_proto_connect_cfm(sco, status);
1460 hci_conn_del(sco);
1461 }
1462 }
1463
1464 hci_dev_unlock(hdev);
1465 }
1466
1467 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1468 {
1469 struct hci_cp_auth_requested *cp;
1470 struct hci_conn *conn;
1471
1472 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1473
1474 if (!status)
1475 return;
1476
1477 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1478 if (!cp)
1479 return;
1480
1481 hci_dev_lock(hdev);
1482
1483 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1484 if (conn) {
1485 if (conn->state == BT_CONFIG) {
1486 hci_proto_connect_cfm(conn, status);
1487 hci_conn_drop(conn);
1488 }
1489 }
1490
1491 hci_dev_unlock(hdev);
1492 }
1493
1494 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1495 {
1496 struct hci_cp_set_conn_encrypt *cp;
1497 struct hci_conn *conn;
1498
1499 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1500
1501 if (!status)
1502 return;
1503
1504 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1505 if (!cp)
1506 return;
1507
1508 hci_dev_lock(hdev);
1509
1510 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1511 if (conn) {
1512 if (conn->state == BT_CONFIG) {
1513 hci_proto_connect_cfm(conn, status);
1514 hci_conn_drop(conn);
1515 }
1516 }
1517
1518 hci_dev_unlock(hdev);
1519 }
1520
1521 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1522 struct hci_conn *conn)
1523 {
1524 if (conn->state != BT_CONFIG || !conn->out)
1525 return 0;
1526
1527 if (conn->pending_sec_level == BT_SECURITY_SDP)
1528 return 0;
1529
1530 /* Only request authentication for SSP connections or non-SSP
1531 * devices with sec_level MEDIUM or HIGH or if MITM protection
1532 * is requested.
1533 */
1534 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1535 conn->pending_sec_level != BT_SECURITY_FIPS &&
1536 conn->pending_sec_level != BT_SECURITY_HIGH &&
1537 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1538 return 0;
1539
1540 return 1;
1541 }
1542
1543 static int hci_resolve_name(struct hci_dev *hdev,
1544 struct inquiry_entry *e)
1545 {
1546 struct hci_cp_remote_name_req cp;
1547
1548 memset(&cp, 0, sizeof(cp));
1549
1550 bacpy(&cp.bdaddr, &e->data.bdaddr);
1551 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1552 cp.pscan_mode = e->data.pscan_mode;
1553 cp.clock_offset = e->data.clock_offset;
1554
1555 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1556 }
1557
1558 static bool hci_resolve_next_name(struct hci_dev *hdev)
1559 {
1560 struct discovery_state *discov = &hdev->discovery;
1561 struct inquiry_entry *e;
1562
1563 if (list_empty(&discov->resolve))
1564 return false;
1565
1566 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1567 if (!e)
1568 return false;
1569
1570 if (hci_resolve_name(hdev, e) == 0) {
1571 e->name_state = NAME_PENDING;
1572 return true;
1573 }
1574
1575 return false;
1576 }
1577
1578 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1579 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1580 {
1581 struct discovery_state *discov = &hdev->discovery;
1582 struct inquiry_entry *e;
1583
1584 /* Update the mgmt connected state if necessary. Be careful with
1585 * conn objects that exist but are not (yet) connected however.
1586 * Only those in BT_CONFIG or BT_CONNECTED states can be
1587 * considered connected.
1588 */
1589 if (conn &&
1590 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1591 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1592 mgmt_device_connected(hdev, conn, 0, name, name_len);
1593
1594 if (discov->state == DISCOVERY_STOPPED)
1595 return;
1596
1597 if (discov->state == DISCOVERY_STOPPING)
1598 goto discov_complete;
1599
1600 if (discov->state != DISCOVERY_RESOLVING)
1601 return;
1602
1603 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1604 /* If the device was not found in a list of found devices names of which
1605 * are pending. there is no need to continue resolving a next name as it
1606 * will be done upon receiving another Remote Name Request Complete
1607 * Event */
1608 if (!e)
1609 return;
1610
1611 list_del(&e->list);
1612 if (name) {
1613 e->name_state = NAME_KNOWN;
1614 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1615 e->data.rssi, name, name_len);
1616 } else {
1617 e->name_state = NAME_NOT_KNOWN;
1618 }
1619
1620 if (hci_resolve_next_name(hdev))
1621 return;
1622
1623 discov_complete:
1624 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1625 }
1626
1627 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1628 {
1629 struct hci_cp_remote_name_req *cp;
1630 struct hci_conn *conn;
1631
1632 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1633
1634 /* If successful wait for the name req complete event before
1635 * checking for the need to do authentication */
1636 if (!status)
1637 return;
1638
1639 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1640 if (!cp)
1641 return;
1642
1643 hci_dev_lock(hdev);
1644
1645 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1646
1647 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1648 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1649
1650 if (!conn)
1651 goto unlock;
1652
1653 if (!hci_outgoing_auth_needed(hdev, conn))
1654 goto unlock;
1655
1656 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1657 struct hci_cp_auth_requested auth_cp;
1658
1659 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1660
1661 auth_cp.handle = __cpu_to_le16(conn->handle);
1662 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1663 sizeof(auth_cp), &auth_cp);
1664 }
1665
1666 unlock:
1667 hci_dev_unlock(hdev);
1668 }
1669
1670 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1671 {
1672 struct hci_cp_read_remote_features *cp;
1673 struct hci_conn *conn;
1674
1675 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1676
1677 if (!status)
1678 return;
1679
1680 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1681 if (!cp)
1682 return;
1683
1684 hci_dev_lock(hdev);
1685
1686 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1687 if (conn) {
1688 if (conn->state == BT_CONFIG) {
1689 hci_proto_connect_cfm(conn, status);
1690 hci_conn_drop(conn);
1691 }
1692 }
1693
1694 hci_dev_unlock(hdev);
1695 }
1696
1697 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1698 {
1699 struct hci_cp_read_remote_ext_features *cp;
1700 struct hci_conn *conn;
1701
1702 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1703
1704 if (!status)
1705 return;
1706
1707 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1708 if (!cp)
1709 return;
1710
1711 hci_dev_lock(hdev);
1712
1713 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1714 if (conn) {
1715 if (conn->state == BT_CONFIG) {
1716 hci_proto_connect_cfm(conn, status);
1717 hci_conn_drop(conn);
1718 }
1719 }
1720
1721 hci_dev_unlock(hdev);
1722 }
1723
1724 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1725 {
1726 struct hci_cp_setup_sync_conn *cp;
1727 struct hci_conn *acl, *sco;
1728 __u16 handle;
1729
1730 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1731
1732 if (!status)
1733 return;
1734
1735 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1736 if (!cp)
1737 return;
1738
1739 handle = __le16_to_cpu(cp->handle);
1740
1741 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1742
1743 hci_dev_lock(hdev);
1744
1745 acl = hci_conn_hash_lookup_handle(hdev, handle);
1746 if (acl) {
1747 sco = acl->link;
1748 if (sco) {
1749 sco->state = BT_CLOSED;
1750
1751 hci_proto_connect_cfm(sco, status);
1752 hci_conn_del(sco);
1753 }
1754 }
1755
1756 hci_dev_unlock(hdev);
1757 }
1758
1759 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1760 {
1761 struct hci_cp_sniff_mode *cp;
1762 struct hci_conn *conn;
1763
1764 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1765
1766 if (!status)
1767 return;
1768
1769 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1770 if (!cp)
1771 return;
1772
1773 hci_dev_lock(hdev);
1774
1775 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1776 if (conn) {
1777 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1778
1779 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1780 hci_sco_setup(conn, status);
1781 }
1782
1783 hci_dev_unlock(hdev);
1784 }
1785
1786 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1787 {
1788 struct hci_cp_exit_sniff_mode *cp;
1789 struct hci_conn *conn;
1790
1791 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1792
1793 if (!status)
1794 return;
1795
1796 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1797 if (!cp)
1798 return;
1799
1800 hci_dev_lock(hdev);
1801
1802 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1803 if (conn) {
1804 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1805
1806 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1807 hci_sco_setup(conn, status);
1808 }
1809
1810 hci_dev_unlock(hdev);
1811 }
1812
1813 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1814 {
1815 struct hci_cp_disconnect *cp;
1816 struct hci_conn *conn;
1817
1818 if (!status)
1819 return;
1820
1821 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1822 if (!cp)
1823 return;
1824
1825 hci_dev_lock(hdev);
1826
1827 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1828 if (conn)
1829 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1830 conn->dst_type, status);
1831
1832 hci_dev_unlock(hdev);
1833 }
1834
1835 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1836 {
1837 struct hci_cp_create_phy_link *cp;
1838
1839 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1840
1841 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1842 if (!cp)
1843 return;
1844
1845 hci_dev_lock(hdev);
1846
1847 if (status) {
1848 struct hci_conn *hcon;
1849
1850 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1851 if (hcon)
1852 hci_conn_del(hcon);
1853 } else {
1854 amp_write_remote_assoc(hdev, cp->phy_handle);
1855 }
1856
1857 hci_dev_unlock(hdev);
1858 }
1859
1860 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1861 {
1862 struct hci_cp_accept_phy_link *cp;
1863
1864 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1865
1866 if (status)
1867 return;
1868
1869 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1870 if (!cp)
1871 return;
1872
1873 amp_write_remote_assoc(hdev, cp->phy_handle);
1874 }
1875
1876 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1877 {
1878 struct hci_cp_le_create_conn *cp;
1879 struct hci_conn *conn;
1880
1881 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1882
1883 /* All connection failure handling is taken care of by the
1884 * hci_le_conn_failed function which is triggered by the HCI
1885 * request completion callbacks used for connecting.
1886 */
1887 if (status)
1888 return;
1889
1890 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1891 if (!cp)
1892 return;
1893
1894 hci_dev_lock(hdev);
1895
1896 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1897 if (!conn)
1898 goto unlock;
1899
1900 /* Store the initiator and responder address information which
1901 * is needed for SMP. These values will not change during the
1902 * lifetime of the connection.
1903 */
1904 conn->init_addr_type = cp->own_address_type;
1905 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1906 bacpy(&conn->init_addr, &hdev->random_addr);
1907 else
1908 bacpy(&conn->init_addr, &hdev->bdaddr);
1909
1910 conn->resp_addr_type = cp->peer_addr_type;
1911 bacpy(&conn->resp_addr, &cp->peer_addr);
1912
1913 /* We don't want the connection attempt to stick around
1914 * indefinitely since LE doesn't have a page timeout concept
1915 * like BR/EDR. Set a timer for any connection that doesn't use
1916 * the white list for connecting.
1917 */
1918 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1919 queue_delayed_work(conn->hdev->workqueue,
1920 &conn->le_conn_timeout,
1921 conn->conn_timeout);
1922
1923 unlock:
1924 hci_dev_unlock(hdev);
1925 }
1926
1927 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1928 {
1929 struct hci_cp_le_start_enc *cp;
1930 struct hci_conn *conn;
1931
1932 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1933
1934 if (!status)
1935 return;
1936
1937 hci_dev_lock(hdev);
1938
1939 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1940 if (!cp)
1941 goto unlock;
1942
1943 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1944 if (!conn)
1945 goto unlock;
1946
1947 if (conn->state != BT_CONNECTED)
1948 goto unlock;
1949
1950 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1951 hci_conn_drop(conn);
1952
1953 unlock:
1954 hci_dev_unlock(hdev);
1955 }
1956
1957 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
1958 {
1959 struct hci_cp_switch_role *cp;
1960 struct hci_conn *conn;
1961
1962 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1963
1964 if (!status)
1965 return;
1966
1967 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
1968 if (!cp)
1969 return;
1970
1971 hci_dev_lock(hdev);
1972
1973 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1974 if (conn)
1975 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
1976
1977 hci_dev_unlock(hdev);
1978 }
1979
1980 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1981 {
1982 __u8 status = *((__u8 *) skb->data);
1983 struct discovery_state *discov = &hdev->discovery;
1984 struct inquiry_entry *e;
1985
1986 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1987
1988 hci_conn_check_pending(hdev);
1989
1990 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1991 return;
1992
1993 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1994 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1995
1996 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1997 return;
1998
1999 hci_dev_lock(hdev);
2000
2001 if (discov->state != DISCOVERY_FINDING)
2002 goto unlock;
2003
2004 if (list_empty(&discov->resolve)) {
2005 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2006 goto unlock;
2007 }
2008
2009 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2010 if (e && hci_resolve_name(hdev, e) == 0) {
2011 e->name_state = NAME_PENDING;
2012 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2013 } else {
2014 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2015 }
2016
2017 unlock:
2018 hci_dev_unlock(hdev);
2019 }
2020
2021 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2022 {
2023 struct inquiry_data data;
2024 struct inquiry_info *info = (void *) (skb->data + 1);
2025 int num_rsp = *((__u8 *) skb->data);
2026
2027 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2028
2029 if (!num_rsp)
2030 return;
2031
2032 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2033 return;
2034
2035 hci_dev_lock(hdev);
2036
2037 for (; num_rsp; num_rsp--, info++) {
2038 u32 flags;
2039
2040 bacpy(&data.bdaddr, &info->bdaddr);
2041 data.pscan_rep_mode = info->pscan_rep_mode;
2042 data.pscan_period_mode = info->pscan_period_mode;
2043 data.pscan_mode = info->pscan_mode;
2044 memcpy(data.dev_class, info->dev_class, 3);
2045 data.clock_offset = info->clock_offset;
2046 data.rssi = 0x00;
2047 data.ssp_mode = 0x00;
2048
2049 flags = hci_inquiry_cache_update(hdev, &data, false);
2050
2051 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2052 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2053 }
2054
2055 hci_dev_unlock(hdev);
2056 }
2057
2058 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2059 {
2060 struct hci_ev_conn_complete *ev = (void *) skb->data;
2061 struct hci_conn *conn;
2062
2063 BT_DBG("%s", hdev->name);
2064
2065 hci_dev_lock(hdev);
2066
2067 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2068 if (!conn) {
2069 if (ev->link_type != SCO_LINK)
2070 goto unlock;
2071
2072 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2073 if (!conn)
2074 goto unlock;
2075
2076 conn->type = SCO_LINK;
2077 }
2078
2079 if (!ev->status) {
2080 conn->handle = __le16_to_cpu(ev->handle);
2081
2082 if (conn->type == ACL_LINK) {
2083 conn->state = BT_CONFIG;
2084 hci_conn_hold(conn);
2085
2086 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2087 !hci_find_link_key(hdev, &ev->bdaddr))
2088 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2089 else
2090 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2091 } else
2092 conn->state = BT_CONNECTED;
2093
2094 hci_conn_add_sysfs(conn);
2095
2096 if (test_bit(HCI_AUTH, &hdev->flags))
2097 set_bit(HCI_CONN_AUTH, &conn->flags);
2098
2099 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2100 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2101
2102 /* Get remote features */
2103 if (conn->type == ACL_LINK) {
2104 struct hci_cp_read_remote_features cp;
2105 cp.handle = ev->handle;
2106 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2107 sizeof(cp), &cp);
2108
2109 hci_update_page_scan(hdev, NULL);
2110 }
2111
2112 /* Set packet type for incoming connection */
2113 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2114 struct hci_cp_change_conn_ptype cp;
2115 cp.handle = ev->handle;
2116 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2117 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2118 &cp);
2119 }
2120 } else {
2121 conn->state = BT_CLOSED;
2122 if (conn->type == ACL_LINK)
2123 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2124 conn->dst_type, ev->status);
2125 }
2126
2127 if (conn->type == ACL_LINK)
2128 hci_sco_setup(conn, ev->status);
2129
2130 if (ev->status) {
2131 hci_proto_connect_cfm(conn, ev->status);
2132 hci_conn_del(conn);
2133 } else if (ev->link_type != ACL_LINK)
2134 hci_proto_connect_cfm(conn, ev->status);
2135
2136 unlock:
2137 hci_dev_unlock(hdev);
2138
2139 hci_conn_check_pending(hdev);
2140 }
2141
2142 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2143 {
2144 struct hci_cp_reject_conn_req cp;
2145
2146 bacpy(&cp.bdaddr, bdaddr);
2147 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2148 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2149 }
2150
2151 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2152 {
2153 struct hci_ev_conn_request *ev = (void *) skb->data;
2154 int mask = hdev->link_mode;
2155 struct inquiry_entry *ie;
2156 struct hci_conn *conn;
2157 __u8 flags = 0;
2158
2159 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2160 ev->link_type);
2161
2162 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2163 &flags);
2164
2165 if (!(mask & HCI_LM_ACCEPT)) {
2166 hci_reject_conn(hdev, &ev->bdaddr);
2167 return;
2168 }
2169
2170 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2171 BDADDR_BREDR)) {
2172 hci_reject_conn(hdev, &ev->bdaddr);
2173 return;
2174 }
2175
2176 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2177 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2178 BDADDR_BREDR)) {
2179 hci_reject_conn(hdev, &ev->bdaddr);
2180 return;
2181 }
2182
2183 /* Connection accepted */
2184
2185 hci_dev_lock(hdev);
2186
2187 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2188 if (ie)
2189 memcpy(ie->data.dev_class, ev->dev_class, 3);
2190
2191 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2192 &ev->bdaddr);
2193 if (!conn) {
2194 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2195 HCI_ROLE_SLAVE);
2196 if (!conn) {
2197 BT_ERR("No memory for new connection");
2198 hci_dev_unlock(hdev);
2199 return;
2200 }
2201 }
2202
2203 memcpy(conn->dev_class, ev->dev_class, 3);
2204
2205 hci_dev_unlock(hdev);
2206
2207 if (ev->link_type == ACL_LINK ||
2208 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2209 struct hci_cp_accept_conn_req cp;
2210 conn->state = BT_CONNECT;
2211
2212 bacpy(&cp.bdaddr, &ev->bdaddr);
2213
2214 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2215 cp.role = 0x00; /* Become master */
2216 else
2217 cp.role = 0x01; /* Remain slave */
2218
2219 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2220 } else if (!(flags & HCI_PROTO_DEFER)) {
2221 struct hci_cp_accept_sync_conn_req cp;
2222 conn->state = BT_CONNECT;
2223
2224 bacpy(&cp.bdaddr, &ev->bdaddr);
2225 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2226
2227 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2228 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2229 cp.max_latency = cpu_to_le16(0xffff);
2230 cp.content_format = cpu_to_le16(hdev->voice_setting);
2231 cp.retrans_effort = 0xff;
2232
2233 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2234 &cp);
2235 } else {
2236 conn->state = BT_CONNECT2;
2237 hci_proto_connect_cfm(conn, 0);
2238 }
2239 }
2240
2241 static u8 hci_to_mgmt_reason(u8 err)
2242 {
2243 switch (err) {
2244 case HCI_ERROR_CONNECTION_TIMEOUT:
2245 return MGMT_DEV_DISCONN_TIMEOUT;
2246 case HCI_ERROR_REMOTE_USER_TERM:
2247 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2248 case HCI_ERROR_REMOTE_POWER_OFF:
2249 return MGMT_DEV_DISCONN_REMOTE;
2250 case HCI_ERROR_LOCAL_HOST_TERM:
2251 return MGMT_DEV_DISCONN_LOCAL_HOST;
2252 default:
2253 return MGMT_DEV_DISCONN_UNKNOWN;
2254 }
2255 }
2256
2257 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2258 {
2259 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2260 u8 reason = hci_to_mgmt_reason(ev->reason);
2261 struct hci_conn_params *params;
2262 struct hci_conn *conn;
2263 bool mgmt_connected;
2264 u8 type;
2265
2266 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2267
2268 hci_dev_lock(hdev);
2269
2270 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2271 if (!conn)
2272 goto unlock;
2273
2274 if (ev->status) {
2275 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2276 conn->dst_type, ev->status);
2277 goto unlock;
2278 }
2279
2280 conn->state = BT_CLOSED;
2281
2282 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2283 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2284 reason, mgmt_connected);
2285
2286 if (conn->type == ACL_LINK) {
2287 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2288 hci_remove_link_key(hdev, &conn->dst);
2289
2290 hci_update_page_scan(hdev, NULL);
2291 }
2292
2293 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2294 if (params) {
2295 switch (params->auto_connect) {
2296 case HCI_AUTO_CONN_LINK_LOSS:
2297 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2298 break;
2299 /* Fall through */
2300
2301 case HCI_AUTO_CONN_DIRECT:
2302 case HCI_AUTO_CONN_ALWAYS:
2303 list_del_init(&params->action);
2304 list_add(&params->action, &hdev->pend_le_conns);
2305 hci_update_background_scan(hdev);
2306 break;
2307
2308 default:
2309 break;
2310 }
2311 }
2312
2313 type = conn->type;
2314
2315 hci_proto_disconn_cfm(conn, ev->reason);
2316 hci_conn_del(conn);
2317
2318 /* Re-enable advertising if necessary, since it might
2319 * have been disabled by the connection. From the
2320 * HCI_LE_Set_Advertise_Enable command description in
2321 * the core specification (v4.0):
2322 * "The Controller shall continue advertising until the Host
2323 * issues an LE_Set_Advertise_Enable command with
2324 * Advertising_Enable set to 0x00 (Advertising is disabled)
2325 * or until a connection is created or until the Advertising
2326 * is timed out due to Directed Advertising."
2327 */
2328 if (type == LE_LINK)
2329 mgmt_reenable_advertising(hdev);
2330
2331 unlock:
2332 hci_dev_unlock(hdev);
2333 }
2334
2335 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2336 {
2337 struct hci_ev_auth_complete *ev = (void *) skb->data;
2338 struct hci_conn *conn;
2339
2340 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2341
2342 hci_dev_lock(hdev);
2343
2344 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2345 if (!conn)
2346 goto unlock;
2347
2348 if (!ev->status) {
2349 if (!hci_conn_ssp_enabled(conn) &&
2350 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2351 BT_INFO("re-auth of legacy device is not possible.");
2352 } else {
2353 set_bit(HCI_CONN_AUTH, &conn->flags);
2354 conn->sec_level = conn->pending_sec_level;
2355 }
2356 } else {
2357 mgmt_auth_failed(conn, ev->status);
2358 }
2359
2360 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2361 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2362
2363 if (conn->state == BT_CONFIG) {
2364 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2365 struct hci_cp_set_conn_encrypt cp;
2366 cp.handle = ev->handle;
2367 cp.encrypt = 0x01;
2368 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2369 &cp);
2370 } else {
2371 conn->state = BT_CONNECTED;
2372 hci_proto_connect_cfm(conn, ev->status);
2373 hci_conn_drop(conn);
2374 }
2375 } else {
2376 hci_auth_cfm(conn, ev->status);
2377
2378 hci_conn_hold(conn);
2379 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2380 hci_conn_drop(conn);
2381 }
2382
2383 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2384 if (!ev->status) {
2385 struct hci_cp_set_conn_encrypt cp;
2386 cp.handle = ev->handle;
2387 cp.encrypt = 0x01;
2388 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2389 &cp);
2390 } else {
2391 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2392 hci_encrypt_cfm(conn, ev->status, 0x00);
2393 }
2394 }
2395
2396 unlock:
2397 hci_dev_unlock(hdev);
2398 }
2399
2400 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2401 {
2402 struct hci_ev_remote_name *ev = (void *) skb->data;
2403 struct hci_conn *conn;
2404
2405 BT_DBG("%s", hdev->name);
2406
2407 hci_conn_check_pending(hdev);
2408
2409 hci_dev_lock(hdev);
2410
2411 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2412
2413 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2414 goto check_auth;
2415
2416 if (ev->status == 0)
2417 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2418 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2419 else
2420 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2421
2422 check_auth:
2423 if (!conn)
2424 goto unlock;
2425
2426 if (!hci_outgoing_auth_needed(hdev, conn))
2427 goto unlock;
2428
2429 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2430 struct hci_cp_auth_requested cp;
2431
2432 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2433
2434 cp.handle = __cpu_to_le16(conn->handle);
2435 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2436 }
2437
2438 unlock:
2439 hci_dev_unlock(hdev);
2440 }
2441
2442 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2443 {
2444 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2445 struct hci_conn *conn;
2446
2447 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2448
2449 hci_dev_lock(hdev);
2450
2451 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2452 if (!conn)
2453 goto unlock;
2454
2455 if (!ev->status) {
2456 if (ev->encrypt) {
2457 /* Encryption implies authentication */
2458 set_bit(HCI_CONN_AUTH, &conn->flags);
2459 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2460 conn->sec_level = conn->pending_sec_level;
2461
2462 /* P-256 authentication key implies FIPS */
2463 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2464 set_bit(HCI_CONN_FIPS, &conn->flags);
2465
2466 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2467 conn->type == LE_LINK)
2468 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2469 } else {
2470 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2471 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2472 }
2473 }
2474
2475 /* We should disregard the current RPA and generate a new one
2476 * whenever the encryption procedure fails.
2477 */
2478 if (ev->status && conn->type == LE_LINK)
2479 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2480
2481 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2482
2483 if (ev->status && conn->state == BT_CONNECTED) {
2484 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2485 hci_conn_drop(conn);
2486 goto unlock;
2487 }
2488
2489 if (conn->state == BT_CONFIG) {
2490 if (!ev->status)
2491 conn->state = BT_CONNECTED;
2492
2493 /* In Secure Connections Only mode, do not allow any
2494 * connections that are not encrypted with AES-CCM
2495 * using a P-256 authenticated combination key.
2496 */
2497 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2498 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2499 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2500 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2501 hci_conn_drop(conn);
2502 goto unlock;
2503 }
2504
2505 hci_proto_connect_cfm(conn, ev->status);
2506 hci_conn_drop(conn);
2507 } else
2508 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2509
2510 unlock:
2511 hci_dev_unlock(hdev);
2512 }
2513
2514 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2515 struct sk_buff *skb)
2516 {
2517 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2518 struct hci_conn *conn;
2519
2520 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2521
2522 hci_dev_lock(hdev);
2523
2524 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2525 if (conn) {
2526 if (!ev->status)
2527 set_bit(HCI_CONN_SECURE, &conn->flags);
2528
2529 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2530
2531 hci_key_change_cfm(conn, ev->status);
2532 }
2533
2534 hci_dev_unlock(hdev);
2535 }
2536
2537 static void hci_remote_features_evt(struct hci_dev *hdev,
2538 struct sk_buff *skb)
2539 {
2540 struct hci_ev_remote_features *ev = (void *) skb->data;
2541 struct hci_conn *conn;
2542
2543 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2544
2545 hci_dev_lock(hdev);
2546
2547 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2548 if (!conn)
2549 goto unlock;
2550
2551 if (!ev->status)
2552 memcpy(conn->features[0], ev->features, 8);
2553
2554 if (conn->state != BT_CONFIG)
2555 goto unlock;
2556
2557 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2558 struct hci_cp_read_remote_ext_features cp;
2559 cp.handle = ev->handle;
2560 cp.page = 0x01;
2561 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2562 sizeof(cp), &cp);
2563 goto unlock;
2564 }
2565
2566 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2567 struct hci_cp_remote_name_req cp;
2568 memset(&cp, 0, sizeof(cp));
2569 bacpy(&cp.bdaddr, &conn->dst);
2570 cp.pscan_rep_mode = 0x02;
2571 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2572 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2573 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2574
2575 if (!hci_outgoing_auth_needed(hdev, conn)) {
2576 conn->state = BT_CONNECTED;
2577 hci_proto_connect_cfm(conn, ev->status);
2578 hci_conn_drop(conn);
2579 }
2580
2581 unlock:
2582 hci_dev_unlock(hdev);
2583 }
2584
2585 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2586 {
2587 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2588 u8 status = skb->data[sizeof(*ev)];
2589 __u16 opcode;
2590
2591 skb_pull(skb, sizeof(*ev));
2592
2593 opcode = __le16_to_cpu(ev->opcode);
2594
2595 switch (opcode) {
2596 case HCI_OP_INQUIRY_CANCEL:
2597 hci_cc_inquiry_cancel(hdev, skb);
2598 break;
2599
2600 case HCI_OP_PERIODIC_INQ:
2601 hci_cc_periodic_inq(hdev, skb);
2602 break;
2603
2604 case HCI_OP_EXIT_PERIODIC_INQ:
2605 hci_cc_exit_periodic_inq(hdev, skb);
2606 break;
2607
2608 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2609 hci_cc_remote_name_req_cancel(hdev, skb);
2610 break;
2611
2612 case HCI_OP_ROLE_DISCOVERY:
2613 hci_cc_role_discovery(hdev, skb);
2614 break;
2615
2616 case HCI_OP_READ_LINK_POLICY:
2617 hci_cc_read_link_policy(hdev, skb);
2618 break;
2619
2620 case HCI_OP_WRITE_LINK_POLICY:
2621 hci_cc_write_link_policy(hdev, skb);
2622 break;
2623
2624 case HCI_OP_READ_DEF_LINK_POLICY:
2625 hci_cc_read_def_link_policy(hdev, skb);
2626 break;
2627
2628 case HCI_OP_WRITE_DEF_LINK_POLICY:
2629 hci_cc_write_def_link_policy(hdev, skb);
2630 break;
2631
2632 case HCI_OP_RESET:
2633 hci_cc_reset(hdev, skb);
2634 break;
2635
2636 case HCI_OP_WRITE_LOCAL_NAME:
2637 hci_cc_write_local_name(hdev, skb);
2638 break;
2639
2640 case HCI_OP_READ_LOCAL_NAME:
2641 hci_cc_read_local_name(hdev, skb);
2642 break;
2643
2644 case HCI_OP_WRITE_AUTH_ENABLE:
2645 hci_cc_write_auth_enable(hdev, skb);
2646 break;
2647
2648 case HCI_OP_WRITE_ENCRYPT_MODE:
2649 hci_cc_write_encrypt_mode(hdev, skb);
2650 break;
2651
2652 case HCI_OP_WRITE_SCAN_ENABLE:
2653 hci_cc_write_scan_enable(hdev, skb);
2654 break;
2655
2656 case HCI_OP_READ_CLASS_OF_DEV:
2657 hci_cc_read_class_of_dev(hdev, skb);
2658 break;
2659
2660 case HCI_OP_WRITE_CLASS_OF_DEV:
2661 hci_cc_write_class_of_dev(hdev, skb);
2662 break;
2663
2664 case HCI_OP_READ_VOICE_SETTING:
2665 hci_cc_read_voice_setting(hdev, skb);
2666 break;
2667
2668 case HCI_OP_WRITE_VOICE_SETTING:
2669 hci_cc_write_voice_setting(hdev, skb);
2670 break;
2671
2672 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2673 hci_cc_read_num_supported_iac(hdev, skb);
2674 break;
2675
2676 case HCI_OP_WRITE_SSP_MODE:
2677 hci_cc_write_ssp_mode(hdev, skb);
2678 break;
2679
2680 case HCI_OP_WRITE_SC_SUPPORT:
2681 hci_cc_write_sc_support(hdev, skb);
2682 break;
2683
2684 case HCI_OP_READ_LOCAL_VERSION:
2685 hci_cc_read_local_version(hdev, skb);
2686 break;
2687
2688 case HCI_OP_READ_LOCAL_COMMANDS:
2689 hci_cc_read_local_commands(hdev, skb);
2690 break;
2691
2692 case HCI_OP_READ_LOCAL_FEATURES:
2693 hci_cc_read_local_features(hdev, skb);
2694 break;
2695
2696 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2697 hci_cc_read_local_ext_features(hdev, skb);
2698 break;
2699
2700 case HCI_OP_READ_BUFFER_SIZE:
2701 hci_cc_read_buffer_size(hdev, skb);
2702 break;
2703
2704 case HCI_OP_READ_BD_ADDR:
2705 hci_cc_read_bd_addr(hdev, skb);
2706 break;
2707
2708 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2709 hci_cc_read_page_scan_activity(hdev, skb);
2710 break;
2711
2712 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2713 hci_cc_write_page_scan_activity(hdev, skb);
2714 break;
2715
2716 case HCI_OP_READ_PAGE_SCAN_TYPE:
2717 hci_cc_read_page_scan_type(hdev, skb);
2718 break;
2719
2720 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2721 hci_cc_write_page_scan_type(hdev, skb);
2722 break;
2723
2724 case HCI_OP_READ_DATA_BLOCK_SIZE:
2725 hci_cc_read_data_block_size(hdev, skb);
2726 break;
2727
2728 case HCI_OP_READ_FLOW_CONTROL_MODE:
2729 hci_cc_read_flow_control_mode(hdev, skb);
2730 break;
2731
2732 case HCI_OP_READ_LOCAL_AMP_INFO:
2733 hci_cc_read_local_amp_info(hdev, skb);
2734 break;
2735
2736 case HCI_OP_READ_CLOCK:
2737 hci_cc_read_clock(hdev, skb);
2738 break;
2739
2740 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2741 hci_cc_read_local_amp_assoc(hdev, skb);
2742 break;
2743
2744 case HCI_OP_READ_INQ_RSP_TX_POWER:
2745 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2746 break;
2747
2748 case HCI_OP_PIN_CODE_REPLY:
2749 hci_cc_pin_code_reply(hdev, skb);
2750 break;
2751
2752 case HCI_OP_PIN_CODE_NEG_REPLY:
2753 hci_cc_pin_code_neg_reply(hdev, skb);
2754 break;
2755
2756 case HCI_OP_READ_LOCAL_OOB_DATA:
2757 hci_cc_read_local_oob_data(hdev, skb);
2758 break;
2759
2760 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2761 hci_cc_read_local_oob_ext_data(hdev, skb);
2762 break;
2763
2764 case HCI_OP_LE_READ_BUFFER_SIZE:
2765 hci_cc_le_read_buffer_size(hdev, skb);
2766 break;
2767
2768 case HCI_OP_LE_READ_LOCAL_FEATURES:
2769 hci_cc_le_read_local_features(hdev, skb);
2770 break;
2771
2772 case HCI_OP_LE_READ_ADV_TX_POWER:
2773 hci_cc_le_read_adv_tx_power(hdev, skb);
2774 break;
2775
2776 case HCI_OP_USER_CONFIRM_REPLY:
2777 hci_cc_user_confirm_reply(hdev, skb);
2778 break;
2779
2780 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2781 hci_cc_user_confirm_neg_reply(hdev, skb);
2782 break;
2783
2784 case HCI_OP_USER_PASSKEY_REPLY:
2785 hci_cc_user_passkey_reply(hdev, skb);
2786 break;
2787
2788 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2789 hci_cc_user_passkey_neg_reply(hdev, skb);
2790 break;
2791
2792 case HCI_OP_LE_SET_RANDOM_ADDR:
2793 hci_cc_le_set_random_addr(hdev, skb);
2794 break;
2795
2796 case HCI_OP_LE_SET_ADV_ENABLE:
2797 hci_cc_le_set_adv_enable(hdev, skb);
2798 break;
2799
2800 case HCI_OP_LE_SET_SCAN_PARAM:
2801 hci_cc_le_set_scan_param(hdev, skb);
2802 break;
2803
2804 case HCI_OP_LE_SET_SCAN_ENABLE:
2805 hci_cc_le_set_scan_enable(hdev, skb);
2806 break;
2807
2808 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2809 hci_cc_le_read_white_list_size(hdev, skb);
2810 break;
2811
2812 case HCI_OP_LE_CLEAR_WHITE_LIST:
2813 hci_cc_le_clear_white_list(hdev, skb);
2814 break;
2815
2816 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2817 hci_cc_le_add_to_white_list(hdev, skb);
2818 break;
2819
2820 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2821 hci_cc_le_del_from_white_list(hdev, skb);
2822 break;
2823
2824 case HCI_OP_LE_READ_SUPPORTED_STATES:
2825 hci_cc_le_read_supported_states(hdev, skb);
2826 break;
2827
2828 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2829 hci_cc_write_le_host_supported(hdev, skb);
2830 break;
2831
2832 case HCI_OP_LE_SET_ADV_PARAM:
2833 hci_cc_set_adv_param(hdev, skb);
2834 break;
2835
2836 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2837 hci_cc_write_remote_amp_assoc(hdev, skb);
2838 break;
2839
2840 case HCI_OP_READ_RSSI:
2841 hci_cc_read_rssi(hdev, skb);
2842 break;
2843
2844 case HCI_OP_READ_TX_POWER:
2845 hci_cc_read_tx_power(hdev, skb);
2846 break;
2847
2848 default:
2849 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2850 break;
2851 }
2852
2853 if (opcode != HCI_OP_NOP)
2854 cancel_delayed_work(&hdev->cmd_timer);
2855
2856 hci_req_cmd_complete(hdev, opcode, status);
2857
2858 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2859 atomic_set(&hdev->cmd_cnt, 1);
2860 if (!skb_queue_empty(&hdev->cmd_q))
2861 queue_work(hdev->workqueue, &hdev->cmd_work);
2862 }
2863 }
2864
2865 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2866 {
2867 struct hci_ev_cmd_status *ev = (void *) skb->data;
2868 __u16 opcode;
2869
2870 skb_pull(skb, sizeof(*ev));
2871
2872 opcode = __le16_to_cpu(ev->opcode);
2873
2874 switch (opcode) {
2875 case HCI_OP_INQUIRY:
2876 hci_cs_inquiry(hdev, ev->status);
2877 break;
2878
2879 case HCI_OP_CREATE_CONN:
2880 hci_cs_create_conn(hdev, ev->status);
2881 break;
2882
2883 case HCI_OP_DISCONNECT:
2884 hci_cs_disconnect(hdev, ev->status);
2885 break;
2886
2887 case HCI_OP_ADD_SCO:
2888 hci_cs_add_sco(hdev, ev->status);
2889 break;
2890
2891 case HCI_OP_AUTH_REQUESTED:
2892 hci_cs_auth_requested(hdev, ev->status);
2893 break;
2894
2895 case HCI_OP_SET_CONN_ENCRYPT:
2896 hci_cs_set_conn_encrypt(hdev, ev->status);
2897 break;
2898
2899 case HCI_OP_REMOTE_NAME_REQ:
2900 hci_cs_remote_name_req(hdev, ev->status);
2901 break;
2902
2903 case HCI_OP_READ_REMOTE_FEATURES:
2904 hci_cs_read_remote_features(hdev, ev->status);
2905 break;
2906
2907 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2908 hci_cs_read_remote_ext_features(hdev, ev->status);
2909 break;
2910
2911 case HCI_OP_SETUP_SYNC_CONN:
2912 hci_cs_setup_sync_conn(hdev, ev->status);
2913 break;
2914
2915 case HCI_OP_CREATE_PHY_LINK:
2916 hci_cs_create_phylink(hdev, ev->status);
2917 break;
2918
2919 case HCI_OP_ACCEPT_PHY_LINK:
2920 hci_cs_accept_phylink(hdev, ev->status);
2921 break;
2922
2923 case HCI_OP_SNIFF_MODE:
2924 hci_cs_sniff_mode(hdev, ev->status);
2925 break;
2926
2927 case HCI_OP_EXIT_SNIFF_MODE:
2928 hci_cs_exit_sniff_mode(hdev, ev->status);
2929 break;
2930
2931 case HCI_OP_SWITCH_ROLE:
2932 hci_cs_switch_role(hdev, ev->status);
2933 break;
2934
2935 case HCI_OP_LE_CREATE_CONN:
2936 hci_cs_le_create_conn(hdev, ev->status);
2937 break;
2938
2939 case HCI_OP_LE_START_ENC:
2940 hci_cs_le_start_enc(hdev, ev->status);
2941 break;
2942
2943 default:
2944 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2945 break;
2946 }
2947
2948 if (opcode != HCI_OP_NOP)
2949 cancel_delayed_work(&hdev->cmd_timer);
2950
2951 if (ev->status ||
2952 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2953 hci_req_cmd_complete(hdev, opcode, ev->status);
2954
2955 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2956 atomic_set(&hdev->cmd_cnt, 1);
2957 if (!skb_queue_empty(&hdev->cmd_q))
2958 queue_work(hdev->workqueue, &hdev->cmd_work);
2959 }
2960 }
2961
2962 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
2963 {
2964 struct hci_ev_hardware_error *ev = (void *) skb->data;
2965
2966 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
2967 }
2968
2969 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2970 {
2971 struct hci_ev_role_change *ev = (void *) skb->data;
2972 struct hci_conn *conn;
2973
2974 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2975
2976 hci_dev_lock(hdev);
2977
2978 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2979 if (conn) {
2980 if (!ev->status)
2981 conn->role = ev->role;
2982
2983 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2984
2985 hci_role_switch_cfm(conn, ev->status, ev->role);
2986 }
2987
2988 hci_dev_unlock(hdev);
2989 }
2990
2991 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2992 {
2993 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2994 int i;
2995
2996 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2997 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2998 return;
2999 }
3000
3001 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3002 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3003 BT_DBG("%s bad parameters", hdev->name);
3004 return;
3005 }
3006
3007 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3008
3009 for (i = 0; i < ev->num_hndl; i++) {
3010 struct hci_comp_pkts_info *info = &ev->handles[i];
3011 struct hci_conn *conn;
3012 __u16 handle, count;
3013
3014 handle = __le16_to_cpu(info->handle);
3015 count = __le16_to_cpu(info->count);
3016
3017 conn = hci_conn_hash_lookup_handle(hdev, handle);
3018 if (!conn)
3019 continue;
3020
3021 conn->sent -= count;
3022
3023 switch (conn->type) {
3024 case ACL_LINK:
3025 hdev->acl_cnt += count;
3026 if (hdev->acl_cnt > hdev->acl_pkts)
3027 hdev->acl_cnt = hdev->acl_pkts;
3028 break;
3029
3030 case LE_LINK:
3031 if (hdev->le_pkts) {
3032 hdev->le_cnt += count;
3033 if (hdev->le_cnt > hdev->le_pkts)
3034 hdev->le_cnt = hdev->le_pkts;
3035 } else {
3036 hdev->acl_cnt += count;
3037 if (hdev->acl_cnt > hdev->acl_pkts)
3038 hdev->acl_cnt = hdev->acl_pkts;
3039 }
3040 break;
3041
3042 case SCO_LINK:
3043 hdev->sco_cnt += count;
3044 if (hdev->sco_cnt > hdev->sco_pkts)
3045 hdev->sco_cnt = hdev->sco_pkts;
3046 break;
3047
3048 default:
3049 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3050 break;
3051 }
3052 }
3053
3054 queue_work(hdev->workqueue, &hdev->tx_work);
3055 }
3056
3057 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3058 __u16 handle)
3059 {
3060 struct hci_chan *chan;
3061
3062 switch (hdev->dev_type) {
3063 case HCI_BREDR:
3064 return hci_conn_hash_lookup_handle(hdev, handle);
3065 case HCI_AMP:
3066 chan = hci_chan_lookup_handle(hdev, handle);
3067 if (chan)
3068 return chan->conn;
3069 break;
3070 default:
3071 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3072 break;
3073 }
3074
3075 return NULL;
3076 }
3077
3078 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3079 {
3080 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3081 int i;
3082
3083 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3084 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3085 return;
3086 }
3087
3088 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3089 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3090 BT_DBG("%s bad parameters", hdev->name);
3091 return;
3092 }
3093
3094 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3095 ev->num_hndl);
3096
3097 for (i = 0; i < ev->num_hndl; i++) {
3098 struct hci_comp_blocks_info *info = &ev->handles[i];
3099 struct hci_conn *conn = NULL;
3100 __u16 handle, block_count;
3101
3102 handle = __le16_to_cpu(info->handle);
3103 block_count = __le16_to_cpu(info->blocks);
3104
3105 conn = __hci_conn_lookup_handle(hdev, handle);
3106 if (!conn)
3107 continue;
3108
3109 conn->sent -= block_count;
3110
3111 switch (conn->type) {
3112 case ACL_LINK:
3113 case AMP_LINK:
3114 hdev->block_cnt += block_count;
3115 if (hdev->block_cnt > hdev->num_blocks)
3116 hdev->block_cnt = hdev->num_blocks;
3117 break;
3118
3119 default:
3120 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3121 break;
3122 }
3123 }
3124
3125 queue_work(hdev->workqueue, &hdev->tx_work);
3126 }
3127
3128 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3129 {
3130 struct hci_ev_mode_change *ev = (void *) skb->data;
3131 struct hci_conn *conn;
3132
3133 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3134
3135 hci_dev_lock(hdev);
3136
3137 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3138 if (conn) {
3139 conn->mode = ev->mode;
3140
3141 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3142 &conn->flags)) {
3143 if (conn->mode == HCI_CM_ACTIVE)
3144 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3145 else
3146 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3147 }
3148
3149 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3150 hci_sco_setup(conn, ev->status);
3151 }
3152
3153 hci_dev_unlock(hdev);
3154 }
3155
3156 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3157 {
3158 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3159 struct hci_conn *conn;
3160
3161 BT_DBG("%s", hdev->name);
3162
3163 hci_dev_lock(hdev);
3164
3165 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3166 if (!conn)
3167 goto unlock;
3168
3169 if (conn->state == BT_CONNECTED) {
3170 hci_conn_hold(conn);
3171 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3172 hci_conn_drop(conn);
3173 }
3174
3175 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3176 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3177 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3178 sizeof(ev->bdaddr), &ev->bdaddr);
3179 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3180 u8 secure;
3181
3182 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3183 secure = 1;
3184 else
3185 secure = 0;
3186
3187 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3188 }
3189
3190 unlock:
3191 hci_dev_unlock(hdev);
3192 }
3193
3194 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3195 {
3196 if (key_type == HCI_LK_CHANGED_COMBINATION)
3197 return;
3198
3199 conn->pin_length = pin_len;
3200 conn->key_type = key_type;
3201
3202 switch (key_type) {
3203 case HCI_LK_LOCAL_UNIT:
3204 case HCI_LK_REMOTE_UNIT:
3205 case HCI_LK_DEBUG_COMBINATION:
3206 return;
3207 case HCI_LK_COMBINATION:
3208 if (pin_len == 16)
3209 conn->pending_sec_level = BT_SECURITY_HIGH;
3210 else
3211 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3212 break;
3213 case HCI_LK_UNAUTH_COMBINATION_P192:
3214 case HCI_LK_UNAUTH_COMBINATION_P256:
3215 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3216 break;
3217 case HCI_LK_AUTH_COMBINATION_P192:
3218 conn->pending_sec_level = BT_SECURITY_HIGH;
3219 break;
3220 case HCI_LK_AUTH_COMBINATION_P256:
3221 conn->pending_sec_level = BT_SECURITY_FIPS;
3222 break;
3223 }
3224 }
3225
3226 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3227 {
3228 struct hci_ev_link_key_req *ev = (void *) skb->data;
3229 struct hci_cp_link_key_reply cp;
3230 struct hci_conn *conn;
3231 struct link_key *key;
3232
3233 BT_DBG("%s", hdev->name);
3234
3235 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3236 return;
3237
3238 hci_dev_lock(hdev);
3239
3240 key = hci_find_link_key(hdev, &ev->bdaddr);
3241 if (!key) {
3242 BT_DBG("%s link key not found for %pMR", hdev->name,
3243 &ev->bdaddr);
3244 goto not_found;
3245 }
3246
3247 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3248 &ev->bdaddr);
3249
3250 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3251 if (conn) {
3252 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3253 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3254 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3255 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3256 goto not_found;
3257 }
3258
3259 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3260 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3261 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3262 BT_DBG("%s ignoring key unauthenticated for high security",
3263 hdev->name);
3264 goto not_found;
3265 }
3266
3267 conn_set_key(conn, key->type, key->pin_len);
3268 }
3269
3270 bacpy(&cp.bdaddr, &ev->bdaddr);
3271 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3272
3273 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3274
3275 hci_dev_unlock(hdev);
3276
3277 return;
3278
3279 not_found:
3280 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3281 hci_dev_unlock(hdev);
3282 }
3283
3284 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3285 {
3286 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3287 struct hci_conn *conn;
3288 struct link_key *key;
3289 bool persistent;
3290 u8 pin_len = 0;
3291
3292 BT_DBG("%s", hdev->name);
3293
3294 hci_dev_lock(hdev);
3295
3296 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3297 if (!conn)
3298 goto unlock;
3299
3300 hci_conn_hold(conn);
3301 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3302 hci_conn_drop(conn);
3303
3304 conn_set_key(conn, ev->key_type, conn->pin_length);
3305
3306 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3307 goto unlock;
3308
3309 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3310 ev->key_type, pin_len, &persistent);
3311 if (!key)
3312 goto unlock;
3313
3314 /* Update connection information since adding the key will have
3315 * fixed up the type in the case of changed combination keys.
3316 */
3317 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3318 conn_set_key(conn, key->type, key->pin_len);
3319
3320 mgmt_new_link_key(hdev, key, persistent);
3321
3322 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3323 * is set. If it's not set simply remove the key from the kernel
3324 * list (we've still notified user space about it but with
3325 * store_hint being 0).
3326 */
3327 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3328 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3329 list_del_rcu(&key->list);
3330 kfree_rcu(key, rcu);
3331 goto unlock;
3332 }
3333
3334 if (persistent)
3335 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3336 else
3337 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3338
3339 unlock:
3340 hci_dev_unlock(hdev);
3341 }
3342
3343 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3344 {
3345 struct hci_ev_clock_offset *ev = (void *) skb->data;
3346 struct hci_conn *conn;
3347
3348 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3349
3350 hci_dev_lock(hdev);
3351
3352 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3353 if (conn && !ev->status) {
3354 struct inquiry_entry *ie;
3355
3356 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3357 if (ie) {
3358 ie->data.clock_offset = ev->clock_offset;
3359 ie->timestamp = jiffies;
3360 }
3361 }
3362
3363 hci_dev_unlock(hdev);
3364 }
3365
3366 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3367 {
3368 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3369 struct hci_conn *conn;
3370
3371 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3372
3373 hci_dev_lock(hdev);
3374
3375 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3376 if (conn && !ev->status)
3377 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3378
3379 hci_dev_unlock(hdev);
3380 }
3381
3382 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3383 {
3384 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3385 struct inquiry_entry *ie;
3386
3387 BT_DBG("%s", hdev->name);
3388
3389 hci_dev_lock(hdev);
3390
3391 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3392 if (ie) {
3393 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3394 ie->timestamp = jiffies;
3395 }
3396
3397 hci_dev_unlock(hdev);
3398 }
3399
3400 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3401 struct sk_buff *skb)
3402 {
3403 struct inquiry_data data;
3404 int num_rsp = *((__u8 *) skb->data);
3405
3406 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3407
3408 if (!num_rsp)
3409 return;
3410
3411 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3412 return;
3413
3414 hci_dev_lock(hdev);
3415
3416 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3417 struct inquiry_info_with_rssi_and_pscan_mode *info;
3418 info = (void *) (skb->data + 1);
3419
3420 for (; num_rsp; num_rsp--, info++) {
3421 u32 flags;
3422
3423 bacpy(&data.bdaddr, &info->bdaddr);
3424 data.pscan_rep_mode = info->pscan_rep_mode;
3425 data.pscan_period_mode = info->pscan_period_mode;
3426 data.pscan_mode = info->pscan_mode;
3427 memcpy(data.dev_class, info->dev_class, 3);
3428 data.clock_offset = info->clock_offset;
3429 data.rssi = info->rssi;
3430 data.ssp_mode = 0x00;
3431
3432 flags = hci_inquiry_cache_update(hdev, &data, false);
3433
3434 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3435 info->dev_class, info->rssi,
3436 flags, NULL, 0, NULL, 0);
3437 }
3438 } else {
3439 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3440
3441 for (; num_rsp; num_rsp--, info++) {
3442 u32 flags;
3443
3444 bacpy(&data.bdaddr, &info->bdaddr);
3445 data.pscan_rep_mode = info->pscan_rep_mode;
3446 data.pscan_period_mode = info->pscan_period_mode;
3447 data.pscan_mode = 0x00;
3448 memcpy(data.dev_class, info->dev_class, 3);
3449 data.clock_offset = info->clock_offset;
3450 data.rssi = info->rssi;
3451 data.ssp_mode = 0x00;
3452
3453 flags = hci_inquiry_cache_update(hdev, &data, false);
3454
3455 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3456 info->dev_class, info->rssi,
3457 flags, NULL, 0, NULL, 0);
3458 }
3459 }
3460
3461 hci_dev_unlock(hdev);
3462 }
3463
3464 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3465 struct sk_buff *skb)
3466 {
3467 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3468 struct hci_conn *conn;
3469
3470 BT_DBG("%s", hdev->name);
3471
3472 hci_dev_lock(hdev);
3473
3474 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3475 if (!conn)
3476 goto unlock;
3477
3478 if (ev->page < HCI_MAX_PAGES)
3479 memcpy(conn->features[ev->page], ev->features, 8);
3480
3481 if (!ev->status && ev->page == 0x01) {
3482 struct inquiry_entry *ie;
3483
3484 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3485 if (ie)
3486 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3487
3488 if (ev->features[0] & LMP_HOST_SSP) {
3489 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3490 } else {
3491 /* It is mandatory by the Bluetooth specification that
3492 * Extended Inquiry Results are only used when Secure
3493 * Simple Pairing is enabled, but some devices violate
3494 * this.
3495 *
3496 * To make these devices work, the internal SSP
3497 * enabled flag needs to be cleared if the remote host
3498 * features do not indicate SSP support */
3499 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3500 }
3501
3502 if (ev->features[0] & LMP_HOST_SC)
3503 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3504 }
3505
3506 if (conn->state != BT_CONFIG)
3507 goto unlock;
3508
3509 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3510 struct hci_cp_remote_name_req cp;
3511 memset(&cp, 0, sizeof(cp));
3512 bacpy(&cp.bdaddr, &conn->dst);
3513 cp.pscan_rep_mode = 0x02;
3514 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3515 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3516 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3517
3518 if (!hci_outgoing_auth_needed(hdev, conn)) {
3519 conn->state = BT_CONNECTED;
3520 hci_proto_connect_cfm(conn, ev->status);
3521 hci_conn_drop(conn);
3522 }
3523
3524 unlock:
3525 hci_dev_unlock(hdev);
3526 }
3527
3528 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3529 struct sk_buff *skb)
3530 {
3531 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3532 struct hci_conn *conn;
3533
3534 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3535
3536 hci_dev_lock(hdev);
3537
3538 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3539 if (!conn) {
3540 if (ev->link_type == ESCO_LINK)
3541 goto unlock;
3542
3543 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3544 if (!conn)
3545 goto unlock;
3546
3547 conn->type = SCO_LINK;
3548 }
3549
3550 switch (ev->status) {
3551 case 0x00:
3552 conn->handle = __le16_to_cpu(ev->handle);
3553 conn->state = BT_CONNECTED;
3554
3555 hci_conn_add_sysfs(conn);
3556 break;
3557
3558 case 0x10: /* Connection Accept Timeout */
3559 case 0x0d: /* Connection Rejected due to Limited Resources */
3560 case 0x11: /* Unsupported Feature or Parameter Value */
3561 case 0x1c: /* SCO interval rejected */
3562 case 0x1a: /* Unsupported Remote Feature */
3563 case 0x1f: /* Unspecified error */
3564 case 0x20: /* Unsupported LMP Parameter value */
3565 if (conn->out) {
3566 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3567 (hdev->esco_type & EDR_ESCO_MASK);
3568 if (hci_setup_sync(conn, conn->link->handle))
3569 goto unlock;
3570 }
3571 /* fall through */
3572
3573 default:
3574 conn->state = BT_CLOSED;
3575 break;
3576 }
3577
3578 hci_proto_connect_cfm(conn, ev->status);
3579 if (ev->status)
3580 hci_conn_del(conn);
3581
3582 unlock:
3583 hci_dev_unlock(hdev);
3584 }
3585
3586 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3587 {
3588 size_t parsed = 0;
3589
3590 while (parsed < eir_len) {
3591 u8 field_len = eir[0];
3592
3593 if (field_len == 0)
3594 return parsed;
3595
3596 parsed += field_len + 1;
3597 eir += field_len + 1;
3598 }
3599
3600 return eir_len;
3601 }
3602
3603 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3604 struct sk_buff *skb)
3605 {
3606 struct inquiry_data data;
3607 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3608 int num_rsp = *((__u8 *) skb->data);
3609 size_t eir_len;
3610
3611 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3612
3613 if (!num_rsp)
3614 return;
3615
3616 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3617 return;
3618
3619 hci_dev_lock(hdev);
3620
3621 for (; num_rsp; num_rsp--, info++) {
3622 u32 flags;
3623 bool name_known;
3624
3625 bacpy(&data.bdaddr, &info->bdaddr);
3626 data.pscan_rep_mode = info->pscan_rep_mode;
3627 data.pscan_period_mode = info->pscan_period_mode;
3628 data.pscan_mode = 0x00;
3629 memcpy(data.dev_class, info->dev_class, 3);
3630 data.clock_offset = info->clock_offset;
3631 data.rssi = info->rssi;
3632 data.ssp_mode = 0x01;
3633
3634 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3635 name_known = eir_has_data_type(info->data,
3636 sizeof(info->data),
3637 EIR_NAME_COMPLETE);
3638 else
3639 name_known = true;
3640
3641 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3642
3643 eir_len = eir_get_length(info->data, sizeof(info->data));
3644
3645 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3646 info->dev_class, info->rssi,
3647 flags, info->data, eir_len, NULL, 0);
3648 }
3649
3650 hci_dev_unlock(hdev);
3651 }
3652
3653 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3654 struct sk_buff *skb)
3655 {
3656 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3657 struct hci_conn *conn;
3658
3659 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3660 __le16_to_cpu(ev->handle));
3661
3662 hci_dev_lock(hdev);
3663
3664 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3665 if (!conn)
3666 goto unlock;
3667
3668 /* For BR/EDR the necessary steps are taken through the
3669 * auth_complete event.
3670 */
3671 if (conn->type != LE_LINK)
3672 goto unlock;
3673
3674 if (!ev->status)
3675 conn->sec_level = conn->pending_sec_level;
3676
3677 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3678
3679 if (ev->status && conn->state == BT_CONNECTED) {
3680 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3681 hci_conn_drop(conn);
3682 goto unlock;
3683 }
3684
3685 if (conn->state == BT_CONFIG) {
3686 if (!ev->status)
3687 conn->state = BT_CONNECTED;
3688
3689 hci_proto_connect_cfm(conn, ev->status);
3690 hci_conn_drop(conn);
3691 } else {
3692 hci_auth_cfm(conn, ev->status);
3693
3694 hci_conn_hold(conn);
3695 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3696 hci_conn_drop(conn);
3697 }
3698
3699 unlock:
3700 hci_dev_unlock(hdev);
3701 }
3702
3703 static u8 hci_get_auth_req(struct hci_conn *conn)
3704 {
3705 /* If remote requests no-bonding follow that lead */
3706 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3707 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3708 return conn->remote_auth | (conn->auth_type & 0x01);
3709
3710 /* If both remote and local have enough IO capabilities, require
3711 * MITM protection
3712 */
3713 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3714 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3715 return conn->remote_auth | 0x01;
3716
3717 /* No MITM protection possible so ignore remote requirement */
3718 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3719 }
3720
3721 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3722 {
3723 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3724 struct hci_conn *conn;
3725
3726 BT_DBG("%s", hdev->name);
3727
3728 hci_dev_lock(hdev);
3729
3730 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3731 if (!conn)
3732 goto unlock;
3733
3734 hci_conn_hold(conn);
3735
3736 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3737 goto unlock;
3738
3739 /* Allow pairing if we're pairable, the initiators of the
3740 * pairing or if the remote is not requesting bonding.
3741 */
3742 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3743 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3744 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3745 struct hci_cp_io_capability_reply cp;
3746
3747 bacpy(&cp.bdaddr, &ev->bdaddr);
3748 /* Change the IO capability from KeyboardDisplay
3749 * to DisplayYesNo as it is not supported by BT spec. */
3750 cp.capability = (conn->io_capability == 0x04) ?
3751 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3752
3753 /* If we are initiators, there is no remote information yet */
3754 if (conn->remote_auth == 0xff) {
3755 /* Request MITM protection if our IO caps allow it
3756 * except for the no-bonding case.
3757 */
3758 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3759 conn->auth_type != HCI_AT_NO_BONDING)
3760 conn->auth_type |= 0x01;
3761 } else {
3762 conn->auth_type = hci_get_auth_req(conn);
3763 }
3764
3765 /* If we're not bondable, force one of the non-bondable
3766 * authentication requirement values.
3767 */
3768 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3769 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3770
3771 cp.authentication = conn->auth_type;
3772
3773 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3774 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3775 cp.oob_data = 0x01;
3776 else
3777 cp.oob_data = 0x00;
3778
3779 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3780 sizeof(cp), &cp);
3781 } else {
3782 struct hci_cp_io_capability_neg_reply cp;
3783
3784 bacpy(&cp.bdaddr, &ev->bdaddr);
3785 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3786
3787 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3788 sizeof(cp), &cp);
3789 }
3790
3791 unlock:
3792 hci_dev_unlock(hdev);
3793 }
3794
3795 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3796 {
3797 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3798 struct hci_conn *conn;
3799
3800 BT_DBG("%s", hdev->name);
3801
3802 hci_dev_lock(hdev);
3803
3804 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3805 if (!conn)
3806 goto unlock;
3807
3808 conn->remote_cap = ev->capability;
3809 conn->remote_auth = ev->authentication;
3810 if (ev->oob_data)
3811 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3812
3813 unlock:
3814 hci_dev_unlock(hdev);
3815 }
3816
3817 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3818 struct sk_buff *skb)
3819 {
3820 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3821 int loc_mitm, rem_mitm, confirm_hint = 0;
3822 struct hci_conn *conn;
3823
3824 BT_DBG("%s", hdev->name);
3825
3826 hci_dev_lock(hdev);
3827
3828 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3829 goto unlock;
3830
3831 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3832 if (!conn)
3833 goto unlock;
3834
3835 loc_mitm = (conn->auth_type & 0x01);
3836 rem_mitm = (conn->remote_auth & 0x01);
3837
3838 /* If we require MITM but the remote device can't provide that
3839 * (it has NoInputNoOutput) then reject the confirmation
3840 * request. We check the security level here since it doesn't
3841 * necessarily match conn->auth_type.
3842 */
3843 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3844 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3845 BT_DBG("Rejecting request: remote device can't provide MITM");
3846 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3847 sizeof(ev->bdaddr), &ev->bdaddr);
3848 goto unlock;
3849 }
3850
3851 /* If no side requires MITM protection; auto-accept */
3852 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3853 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3854
3855 /* If we're not the initiators request authorization to
3856 * proceed from user space (mgmt_user_confirm with
3857 * confirm_hint set to 1). The exception is if neither
3858 * side had MITM or if the local IO capability is
3859 * NoInputNoOutput, in which case we do auto-accept
3860 */
3861 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3862 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3863 (loc_mitm || rem_mitm)) {
3864 BT_DBG("Confirming auto-accept as acceptor");
3865 confirm_hint = 1;
3866 goto confirm;
3867 }
3868
3869 BT_DBG("Auto-accept of user confirmation with %ums delay",
3870 hdev->auto_accept_delay);
3871
3872 if (hdev->auto_accept_delay > 0) {
3873 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3874 queue_delayed_work(conn->hdev->workqueue,
3875 &conn->auto_accept_work, delay);
3876 goto unlock;
3877 }
3878
3879 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3880 sizeof(ev->bdaddr), &ev->bdaddr);
3881 goto unlock;
3882 }
3883
3884 confirm:
3885 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3886 le32_to_cpu(ev->passkey), confirm_hint);
3887
3888 unlock:
3889 hci_dev_unlock(hdev);
3890 }
3891
3892 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3893 struct sk_buff *skb)
3894 {
3895 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3896
3897 BT_DBG("%s", hdev->name);
3898
3899 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3900 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3901 }
3902
3903 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3904 struct sk_buff *skb)
3905 {
3906 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3907 struct hci_conn *conn;
3908
3909 BT_DBG("%s", hdev->name);
3910
3911 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3912 if (!conn)
3913 return;
3914
3915 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3916 conn->passkey_entered = 0;
3917
3918 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3919 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3920 conn->dst_type, conn->passkey_notify,
3921 conn->passkey_entered);
3922 }
3923
3924 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3925 {
3926 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3927 struct hci_conn *conn;
3928
3929 BT_DBG("%s", hdev->name);
3930
3931 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3932 if (!conn)
3933 return;
3934
3935 switch (ev->type) {
3936 case HCI_KEYPRESS_STARTED:
3937 conn->passkey_entered = 0;
3938 return;
3939
3940 case HCI_KEYPRESS_ENTERED:
3941 conn->passkey_entered++;
3942 break;
3943
3944 case HCI_KEYPRESS_ERASED:
3945 conn->passkey_entered--;
3946 break;
3947
3948 case HCI_KEYPRESS_CLEARED:
3949 conn->passkey_entered = 0;
3950 break;
3951
3952 case HCI_KEYPRESS_COMPLETED:
3953 return;
3954 }
3955
3956 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3957 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3958 conn->dst_type, conn->passkey_notify,
3959 conn->passkey_entered);
3960 }
3961
3962 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3963 struct sk_buff *skb)
3964 {
3965 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3966 struct hci_conn *conn;
3967
3968 BT_DBG("%s", hdev->name);
3969
3970 hci_dev_lock(hdev);
3971
3972 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3973 if (!conn)
3974 goto unlock;
3975
3976 /* Reset the authentication requirement to unknown */
3977 conn->remote_auth = 0xff;
3978
3979 /* To avoid duplicate auth_failed events to user space we check
3980 * the HCI_CONN_AUTH_PEND flag which will be set if we
3981 * initiated the authentication. A traditional auth_complete
3982 * event gets always produced as initiator and is also mapped to
3983 * the mgmt_auth_failed event */
3984 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3985 mgmt_auth_failed(conn, ev->status);
3986
3987 hci_conn_drop(conn);
3988
3989 unlock:
3990 hci_dev_unlock(hdev);
3991 }
3992
3993 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3994 struct sk_buff *skb)
3995 {
3996 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3997 struct inquiry_entry *ie;
3998 struct hci_conn *conn;
3999
4000 BT_DBG("%s", hdev->name);
4001
4002 hci_dev_lock(hdev);
4003
4004 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4005 if (conn)
4006 memcpy(conn->features[1], ev->features, 8);
4007
4008 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4009 if (ie)
4010 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4011
4012 hci_dev_unlock(hdev);
4013 }
4014
4015 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4016 struct sk_buff *skb)
4017 {
4018 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4019 struct oob_data *data;
4020
4021 BT_DBG("%s", hdev->name);
4022
4023 hci_dev_lock(hdev);
4024
4025 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4026 goto unlock;
4027
4028 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
4029 if (data) {
4030 if (bredr_sc_enabled(hdev)) {
4031 struct hci_cp_remote_oob_ext_data_reply cp;
4032
4033 bacpy(&cp.bdaddr, &ev->bdaddr);
4034 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4035 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4036 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4037 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4038
4039 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4040 sizeof(cp), &cp);
4041 } else {
4042 struct hci_cp_remote_oob_data_reply cp;
4043
4044 bacpy(&cp.bdaddr, &ev->bdaddr);
4045 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4046 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4047
4048 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4049 sizeof(cp), &cp);
4050 }
4051 } else {
4052 struct hci_cp_remote_oob_data_neg_reply cp;
4053
4054 bacpy(&cp.bdaddr, &ev->bdaddr);
4055 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4056 sizeof(cp), &cp);
4057 }
4058
4059 unlock:
4060 hci_dev_unlock(hdev);
4061 }
4062
4063 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4064 struct sk_buff *skb)
4065 {
4066 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4067 struct hci_conn *hcon, *bredr_hcon;
4068
4069 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4070 ev->status);
4071
4072 hci_dev_lock(hdev);
4073
4074 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4075 if (!hcon) {
4076 hci_dev_unlock(hdev);
4077 return;
4078 }
4079
4080 if (ev->status) {
4081 hci_conn_del(hcon);
4082 hci_dev_unlock(hdev);
4083 return;
4084 }
4085
4086 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4087
4088 hcon->state = BT_CONNECTED;
4089 bacpy(&hcon->dst, &bredr_hcon->dst);
4090
4091 hci_conn_hold(hcon);
4092 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4093 hci_conn_drop(hcon);
4094
4095 hci_conn_add_sysfs(hcon);
4096
4097 amp_physical_cfm(bredr_hcon, hcon);
4098
4099 hci_dev_unlock(hdev);
4100 }
4101
4102 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4103 {
4104 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4105 struct hci_conn *hcon;
4106 struct hci_chan *hchan;
4107 struct amp_mgr *mgr;
4108
4109 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4110 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4111 ev->status);
4112
4113 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4114 if (!hcon)
4115 return;
4116
4117 /* Create AMP hchan */
4118 hchan = hci_chan_create(hcon);
4119 if (!hchan)
4120 return;
4121
4122 hchan->handle = le16_to_cpu(ev->handle);
4123
4124 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4125
4126 mgr = hcon->amp_mgr;
4127 if (mgr && mgr->bredr_chan) {
4128 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4129
4130 l2cap_chan_lock(bredr_chan);
4131
4132 bredr_chan->conn->mtu = hdev->block_mtu;
4133 l2cap_logical_cfm(bredr_chan, hchan, 0);
4134 hci_conn_hold(hcon);
4135
4136 l2cap_chan_unlock(bredr_chan);
4137 }
4138 }
4139
4140 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4141 struct sk_buff *skb)
4142 {
4143 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4144 struct hci_chan *hchan;
4145
4146 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4147 le16_to_cpu(ev->handle), ev->status);
4148
4149 if (ev->status)
4150 return;
4151
4152 hci_dev_lock(hdev);
4153
4154 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4155 if (!hchan)
4156 goto unlock;
4157
4158 amp_destroy_logical_link(hchan, ev->reason);
4159
4160 unlock:
4161 hci_dev_unlock(hdev);
4162 }
4163
4164 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4165 struct sk_buff *skb)
4166 {
4167 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4168 struct hci_conn *hcon;
4169
4170 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4171
4172 if (ev->status)
4173 return;
4174
4175 hci_dev_lock(hdev);
4176
4177 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4178 if (hcon) {
4179 hcon->state = BT_CLOSED;
4180 hci_conn_del(hcon);
4181 }
4182
4183 hci_dev_unlock(hdev);
4184 }
4185
4186 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4187 {
4188 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4189 struct hci_conn_params *params;
4190 struct hci_conn *conn;
4191 struct smp_irk *irk;
4192 u8 addr_type;
4193
4194 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4195
4196 hci_dev_lock(hdev);
4197
4198 /* All controllers implicitly stop advertising in the event of a
4199 * connection, so ensure that the state bit is cleared.
4200 */
4201 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4202
4203 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4204 if (!conn) {
4205 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4206 if (!conn) {
4207 BT_ERR("No memory for new connection");
4208 goto unlock;
4209 }
4210
4211 conn->dst_type = ev->bdaddr_type;
4212
4213 /* If we didn't have a hci_conn object previously
4214 * but we're in master role this must be something
4215 * initiated using a white list. Since white list based
4216 * connections are not "first class citizens" we don't
4217 * have full tracking of them. Therefore, we go ahead
4218 * with a "best effort" approach of determining the
4219 * initiator address based on the HCI_PRIVACY flag.
4220 */
4221 if (conn->out) {
4222 conn->resp_addr_type = ev->bdaddr_type;
4223 bacpy(&conn->resp_addr, &ev->bdaddr);
4224 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4225 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4226 bacpy(&conn->init_addr, &hdev->rpa);
4227 } else {
4228 hci_copy_identity_address(hdev,
4229 &conn->init_addr,
4230 &conn->init_addr_type);
4231 }
4232 }
4233 } else {
4234 cancel_delayed_work(&conn->le_conn_timeout);
4235 }
4236
4237 if (!conn->out) {
4238 /* Set the responder (our side) address type based on
4239 * the advertising address type.
4240 */
4241 conn->resp_addr_type = hdev->adv_addr_type;
4242 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4243 bacpy(&conn->resp_addr, &hdev->random_addr);
4244 else
4245 bacpy(&conn->resp_addr, &hdev->bdaddr);
4246
4247 conn->init_addr_type = ev->bdaddr_type;
4248 bacpy(&conn->init_addr, &ev->bdaddr);
4249
4250 /* For incoming connections, set the default minimum
4251 * and maximum connection interval. They will be used
4252 * to check if the parameters are in range and if not
4253 * trigger the connection update procedure.
4254 */
4255 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4256 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4257 }
4258
4259 /* Lookup the identity address from the stored connection
4260 * address and address type.
4261 *
4262 * When establishing connections to an identity address, the
4263 * connection procedure will store the resolvable random
4264 * address first. Now if it can be converted back into the
4265 * identity address, start using the identity address from
4266 * now on.
4267 */
4268 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4269 if (irk) {
4270 bacpy(&conn->dst, &irk->bdaddr);
4271 conn->dst_type = irk->addr_type;
4272 }
4273
4274 if (ev->status) {
4275 hci_le_conn_failed(conn, ev->status);
4276 goto unlock;
4277 }
4278
4279 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4280 addr_type = BDADDR_LE_PUBLIC;
4281 else
4282 addr_type = BDADDR_LE_RANDOM;
4283
4284 /* Drop the connection if the device is blocked */
4285 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4286 hci_conn_drop(conn);
4287 goto unlock;
4288 }
4289
4290 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4291 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4292
4293 conn->sec_level = BT_SECURITY_LOW;
4294 conn->handle = __le16_to_cpu(ev->handle);
4295 conn->state = BT_CONNECTED;
4296
4297 conn->le_conn_interval = le16_to_cpu(ev->interval);
4298 conn->le_conn_latency = le16_to_cpu(ev->latency);
4299 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4300
4301 hci_conn_add_sysfs(conn);
4302
4303 hci_proto_connect_cfm(conn, ev->status);
4304
4305 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4306 conn->dst_type);
4307 if (params) {
4308 list_del_init(&params->action);
4309 if (params->conn) {
4310 hci_conn_drop(params->conn);
4311 hci_conn_put(params->conn);
4312 params->conn = NULL;
4313 }
4314 }
4315
4316 unlock:
4317 hci_update_background_scan(hdev);
4318 hci_dev_unlock(hdev);
4319 }
4320
4321 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4322 struct sk_buff *skb)
4323 {
4324 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4325 struct hci_conn *conn;
4326
4327 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4328
4329 if (ev->status)
4330 return;
4331
4332 hci_dev_lock(hdev);
4333
4334 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4335 if (conn) {
4336 conn->le_conn_interval = le16_to_cpu(ev->interval);
4337 conn->le_conn_latency = le16_to_cpu(ev->latency);
4338 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4339 }
4340
4341 hci_dev_unlock(hdev);
4342 }
4343
4344 /* This function requires the caller holds hdev->lock */
4345 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4346 bdaddr_t *addr,
4347 u8 addr_type, u8 adv_type)
4348 {
4349 struct hci_conn *conn;
4350 struct hci_conn_params *params;
4351
4352 /* If the event is not connectable don't proceed further */
4353 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4354 return NULL;
4355
4356 /* Ignore if the device is blocked */
4357 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4358 return NULL;
4359
4360 /* Most controller will fail if we try to create new connections
4361 * while we have an existing one in slave role.
4362 */
4363 if (hdev->conn_hash.le_num_slave > 0)
4364 return NULL;
4365
4366 /* If we're not connectable only connect devices that we have in
4367 * our pend_le_conns list.
4368 */
4369 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4370 addr, addr_type);
4371 if (!params)
4372 return NULL;
4373
4374 switch (params->auto_connect) {
4375 case HCI_AUTO_CONN_DIRECT:
4376 /* Only devices advertising with ADV_DIRECT_IND are
4377 * triggering a connection attempt. This is allowing
4378 * incoming connections from slave devices.
4379 */
4380 if (adv_type != LE_ADV_DIRECT_IND)
4381 return NULL;
4382 break;
4383 case HCI_AUTO_CONN_ALWAYS:
4384 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4385 * are triggering a connection attempt. This means
4386 * that incoming connectioms from slave device are
4387 * accepted and also outgoing connections to slave
4388 * devices are established when found.
4389 */
4390 break;
4391 default:
4392 return NULL;
4393 }
4394
4395 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4396 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4397 if (!IS_ERR(conn)) {
4398 /* Store the pointer since we don't really have any
4399 * other owner of the object besides the params that
4400 * triggered it. This way we can abort the connection if
4401 * the parameters get removed and keep the reference
4402 * count consistent once the connection is established.
4403 */
4404 params->conn = hci_conn_get(conn);
4405 return conn;
4406 }
4407
4408 switch (PTR_ERR(conn)) {
4409 case -EBUSY:
4410 /* If hci_connect() returns -EBUSY it means there is already
4411 * an LE connection attempt going on. Since controllers don't
4412 * support more than one connection attempt at the time, we
4413 * don't consider this an error case.
4414 */
4415 break;
4416 default:
4417 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4418 return NULL;
4419 }
4420
4421 return NULL;
4422 }
4423
4424 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4425 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4426 {
4427 struct discovery_state *d = &hdev->discovery;
4428 struct smp_irk *irk;
4429 struct hci_conn *conn;
4430 bool match;
4431 u32 flags;
4432
4433 /* Check if we need to convert to identity address */
4434 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4435 if (irk) {
4436 bdaddr = &irk->bdaddr;
4437 bdaddr_type = irk->addr_type;
4438 }
4439
4440 /* Check if we have been requested to connect to this device */
4441 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4442 if (conn && type == LE_ADV_IND) {
4443 /* Store report for later inclusion by
4444 * mgmt_device_connected
4445 */
4446 memcpy(conn->le_adv_data, data, len);
4447 conn->le_adv_data_len = len;
4448 }
4449
4450 /* Passive scanning shouldn't trigger any device found events,
4451 * except for devices marked as CONN_REPORT for which we do send
4452 * device found events.
4453 */
4454 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4455 if (type == LE_ADV_DIRECT_IND)
4456 return;
4457
4458 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4459 bdaddr, bdaddr_type))
4460 return;
4461
4462 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4463 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4464 else
4465 flags = 0;
4466 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4467 rssi, flags, data, len, NULL, 0);
4468 return;
4469 }
4470
4471 /* When receiving non-connectable or scannable undirected
4472 * advertising reports, this means that the remote device is
4473 * not connectable and then clearly indicate this in the
4474 * device found event.
4475 *
4476 * When receiving a scan response, then there is no way to
4477 * know if the remote device is connectable or not. However
4478 * since scan responses are merged with a previously seen
4479 * advertising report, the flags field from that report
4480 * will be used.
4481 *
4482 * In the really unlikely case that a controller get confused
4483 * and just sends a scan response event, then it is marked as
4484 * not connectable as well.
4485 */
4486 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4487 type == LE_ADV_SCAN_RSP)
4488 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4489 else
4490 flags = 0;
4491
4492 /* If there's nothing pending either store the data from this
4493 * event or send an immediate device found event if the data
4494 * should not be stored for later.
4495 */
4496 if (!has_pending_adv_report(hdev)) {
4497 /* If the report will trigger a SCAN_REQ store it for
4498 * later merging.
4499 */
4500 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4501 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4502 rssi, flags, data, len);
4503 return;
4504 }
4505
4506 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4507 rssi, flags, data, len, NULL, 0);
4508 return;
4509 }
4510
4511 /* Check if the pending report is for the same device as the new one */
4512 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4513 bdaddr_type == d->last_adv_addr_type);
4514
4515 /* If the pending data doesn't match this report or this isn't a
4516 * scan response (e.g. we got a duplicate ADV_IND) then force
4517 * sending of the pending data.
4518 */
4519 if (type != LE_ADV_SCAN_RSP || !match) {
4520 /* Send out whatever is in the cache, but skip duplicates */
4521 if (!match)
4522 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4523 d->last_adv_addr_type, NULL,
4524 d->last_adv_rssi, d->last_adv_flags,
4525 d->last_adv_data,
4526 d->last_adv_data_len, NULL, 0);
4527
4528 /* If the new report will trigger a SCAN_REQ store it for
4529 * later merging.
4530 */
4531 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4532 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4533 rssi, flags, data, len);
4534 return;
4535 }
4536
4537 /* The advertising reports cannot be merged, so clear
4538 * the pending report and send out a device found event.
4539 */
4540 clear_pending_adv_report(hdev);
4541 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4542 rssi, flags, data, len, NULL, 0);
4543 return;
4544 }
4545
4546 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4547 * the new event is a SCAN_RSP. We can therefore proceed with
4548 * sending a merged device found event.
4549 */
4550 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4551 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4552 d->last_adv_data, d->last_adv_data_len, data, len);
4553 clear_pending_adv_report(hdev);
4554 }
4555
4556 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4557 {
4558 u8 num_reports = skb->data[0];
4559 void *ptr = &skb->data[1];
4560
4561 hci_dev_lock(hdev);
4562
4563 while (num_reports--) {
4564 struct hci_ev_le_advertising_info *ev = ptr;
4565 s8 rssi;
4566
4567 rssi = ev->data[ev->length];
4568 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4569 ev->bdaddr_type, rssi, ev->data, ev->length);
4570
4571 ptr += sizeof(*ev) + ev->length + 1;
4572 }
4573
4574 hci_dev_unlock(hdev);
4575 }
4576
4577 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4578 {
4579 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4580 struct hci_cp_le_ltk_reply cp;
4581 struct hci_cp_le_ltk_neg_reply neg;
4582 struct hci_conn *conn;
4583 struct smp_ltk *ltk;
4584
4585 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4586
4587 hci_dev_lock(hdev);
4588
4589 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4590 if (conn == NULL)
4591 goto not_found;
4592
4593 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4594 if (!ltk)
4595 goto not_found;
4596
4597 if (smp_ltk_is_sc(ltk)) {
4598 /* With SC both EDiv and Rand are set to zero */
4599 if (ev->ediv || ev->rand)
4600 goto not_found;
4601 } else {
4602 /* For non-SC keys check that EDiv and Rand match */
4603 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4604 goto not_found;
4605 }
4606
4607 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4608 cp.handle = cpu_to_le16(conn->handle);
4609
4610 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4611
4612 conn->enc_key_size = ltk->enc_size;
4613
4614 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4615
4616 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4617 * temporary key used to encrypt a connection following
4618 * pairing. It is used during the Encrypted Session Setup to
4619 * distribute the keys. Later, security can be re-established
4620 * using a distributed LTK.
4621 */
4622 if (ltk->type == SMP_STK) {
4623 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4624 list_del_rcu(&ltk->list);
4625 kfree_rcu(ltk, rcu);
4626 } else {
4627 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4628 }
4629
4630 hci_dev_unlock(hdev);
4631
4632 return;
4633
4634 not_found:
4635 neg.handle = ev->handle;
4636 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4637 hci_dev_unlock(hdev);
4638 }
4639
4640 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4641 u8 reason)
4642 {
4643 struct hci_cp_le_conn_param_req_neg_reply cp;
4644
4645 cp.handle = cpu_to_le16(handle);
4646 cp.reason = reason;
4647
4648 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4649 &cp);
4650 }
4651
4652 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4653 struct sk_buff *skb)
4654 {
4655 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4656 struct hci_cp_le_conn_param_req_reply cp;
4657 struct hci_conn *hcon;
4658 u16 handle, min, max, latency, timeout;
4659
4660 handle = le16_to_cpu(ev->handle);
4661 min = le16_to_cpu(ev->interval_min);
4662 max = le16_to_cpu(ev->interval_max);
4663 latency = le16_to_cpu(ev->latency);
4664 timeout = le16_to_cpu(ev->timeout);
4665
4666 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4667 if (!hcon || hcon->state != BT_CONNECTED)
4668 return send_conn_param_neg_reply(hdev, handle,
4669 HCI_ERROR_UNKNOWN_CONN_ID);
4670
4671 if (hci_check_conn_params(min, max, latency, timeout))
4672 return send_conn_param_neg_reply(hdev, handle,
4673 HCI_ERROR_INVALID_LL_PARAMS);
4674
4675 if (hcon->role == HCI_ROLE_MASTER) {
4676 struct hci_conn_params *params;
4677 u8 store_hint;
4678
4679 hci_dev_lock(hdev);
4680
4681 params = hci_conn_params_lookup(hdev, &hcon->dst,
4682 hcon->dst_type);
4683 if (params) {
4684 params->conn_min_interval = min;
4685 params->conn_max_interval = max;
4686 params->conn_latency = latency;
4687 params->supervision_timeout = timeout;
4688 store_hint = 0x01;
4689 } else{
4690 store_hint = 0x00;
4691 }
4692
4693 hci_dev_unlock(hdev);
4694
4695 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4696 store_hint, min, max, latency, timeout);
4697 }
4698
4699 cp.handle = ev->handle;
4700 cp.interval_min = ev->interval_min;
4701 cp.interval_max = ev->interval_max;
4702 cp.latency = ev->latency;
4703 cp.timeout = ev->timeout;
4704 cp.min_ce_len = 0;
4705 cp.max_ce_len = 0;
4706
4707 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4708 }
4709
4710 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4711 {
4712 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4713
4714 skb_pull(skb, sizeof(*le_ev));
4715
4716 switch (le_ev->subevent) {
4717 case HCI_EV_LE_CONN_COMPLETE:
4718 hci_le_conn_complete_evt(hdev, skb);
4719 break;
4720
4721 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4722 hci_le_conn_update_complete_evt(hdev, skb);
4723 break;
4724
4725 case HCI_EV_LE_ADVERTISING_REPORT:
4726 hci_le_adv_report_evt(hdev, skb);
4727 break;
4728
4729 case HCI_EV_LE_LTK_REQ:
4730 hci_le_ltk_request_evt(hdev, skb);
4731 break;
4732
4733 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4734 hci_le_remote_conn_param_req_evt(hdev, skb);
4735 break;
4736
4737 default:
4738 break;
4739 }
4740 }
4741
4742 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4743 {
4744 struct hci_ev_channel_selected *ev = (void *) skb->data;
4745 struct hci_conn *hcon;
4746
4747 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4748
4749 skb_pull(skb, sizeof(*ev));
4750
4751 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4752 if (!hcon)
4753 return;
4754
4755 amp_read_loc_assoc_final_data(hdev, hcon);
4756 }
4757
4758 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4759 {
4760 struct hci_event_hdr *hdr = (void *) skb->data;
4761 __u8 event = hdr->evt;
4762
4763 hci_dev_lock(hdev);
4764
4765 /* Received events are (currently) only needed when a request is
4766 * ongoing so avoid unnecessary memory allocation.
4767 */
4768 if (hci_req_pending(hdev)) {
4769 kfree_skb(hdev->recv_evt);
4770 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4771 }
4772
4773 hci_dev_unlock(hdev);
4774
4775 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4776
4777 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4778 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4779 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4780
4781 hci_req_cmd_complete(hdev, opcode, 0);
4782 }
4783
4784 switch (event) {
4785 case HCI_EV_INQUIRY_COMPLETE:
4786 hci_inquiry_complete_evt(hdev, skb);
4787 break;
4788
4789 case HCI_EV_INQUIRY_RESULT:
4790 hci_inquiry_result_evt(hdev, skb);
4791 break;
4792
4793 case HCI_EV_CONN_COMPLETE:
4794 hci_conn_complete_evt(hdev, skb);
4795 break;
4796
4797 case HCI_EV_CONN_REQUEST:
4798 hci_conn_request_evt(hdev, skb);
4799 break;
4800
4801 case HCI_EV_DISCONN_COMPLETE:
4802 hci_disconn_complete_evt(hdev, skb);
4803 break;
4804
4805 case HCI_EV_AUTH_COMPLETE:
4806 hci_auth_complete_evt(hdev, skb);
4807 break;
4808
4809 case HCI_EV_REMOTE_NAME:
4810 hci_remote_name_evt(hdev, skb);
4811 break;
4812
4813 case HCI_EV_ENCRYPT_CHANGE:
4814 hci_encrypt_change_evt(hdev, skb);
4815 break;
4816
4817 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4818 hci_change_link_key_complete_evt(hdev, skb);
4819 break;
4820
4821 case HCI_EV_REMOTE_FEATURES:
4822 hci_remote_features_evt(hdev, skb);
4823 break;
4824
4825 case HCI_EV_CMD_COMPLETE:
4826 hci_cmd_complete_evt(hdev, skb);
4827 break;
4828
4829 case HCI_EV_CMD_STATUS:
4830 hci_cmd_status_evt(hdev, skb);
4831 break;
4832
4833 case HCI_EV_HARDWARE_ERROR:
4834 hci_hardware_error_evt(hdev, skb);
4835 break;
4836
4837 case HCI_EV_ROLE_CHANGE:
4838 hci_role_change_evt(hdev, skb);
4839 break;
4840
4841 case HCI_EV_NUM_COMP_PKTS:
4842 hci_num_comp_pkts_evt(hdev, skb);
4843 break;
4844
4845 case HCI_EV_MODE_CHANGE:
4846 hci_mode_change_evt(hdev, skb);
4847 break;
4848
4849 case HCI_EV_PIN_CODE_REQ:
4850 hci_pin_code_request_evt(hdev, skb);
4851 break;
4852
4853 case HCI_EV_LINK_KEY_REQ:
4854 hci_link_key_request_evt(hdev, skb);
4855 break;
4856
4857 case HCI_EV_LINK_KEY_NOTIFY:
4858 hci_link_key_notify_evt(hdev, skb);
4859 break;
4860
4861 case HCI_EV_CLOCK_OFFSET:
4862 hci_clock_offset_evt(hdev, skb);
4863 break;
4864
4865 case HCI_EV_PKT_TYPE_CHANGE:
4866 hci_pkt_type_change_evt(hdev, skb);
4867 break;
4868
4869 case HCI_EV_PSCAN_REP_MODE:
4870 hci_pscan_rep_mode_evt(hdev, skb);
4871 break;
4872
4873 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4874 hci_inquiry_result_with_rssi_evt(hdev, skb);
4875 break;
4876
4877 case HCI_EV_REMOTE_EXT_FEATURES:
4878 hci_remote_ext_features_evt(hdev, skb);
4879 break;
4880
4881 case HCI_EV_SYNC_CONN_COMPLETE:
4882 hci_sync_conn_complete_evt(hdev, skb);
4883 break;
4884
4885 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4886 hci_extended_inquiry_result_evt(hdev, skb);
4887 break;
4888
4889 case HCI_EV_KEY_REFRESH_COMPLETE:
4890 hci_key_refresh_complete_evt(hdev, skb);
4891 break;
4892
4893 case HCI_EV_IO_CAPA_REQUEST:
4894 hci_io_capa_request_evt(hdev, skb);
4895 break;
4896
4897 case HCI_EV_IO_CAPA_REPLY:
4898 hci_io_capa_reply_evt(hdev, skb);
4899 break;
4900
4901 case HCI_EV_USER_CONFIRM_REQUEST:
4902 hci_user_confirm_request_evt(hdev, skb);
4903 break;
4904
4905 case HCI_EV_USER_PASSKEY_REQUEST:
4906 hci_user_passkey_request_evt(hdev, skb);
4907 break;
4908
4909 case HCI_EV_USER_PASSKEY_NOTIFY:
4910 hci_user_passkey_notify_evt(hdev, skb);
4911 break;
4912
4913 case HCI_EV_KEYPRESS_NOTIFY:
4914 hci_keypress_notify_evt(hdev, skb);
4915 break;
4916
4917 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4918 hci_simple_pair_complete_evt(hdev, skb);
4919 break;
4920
4921 case HCI_EV_REMOTE_HOST_FEATURES:
4922 hci_remote_host_features_evt(hdev, skb);
4923 break;
4924
4925 case HCI_EV_LE_META:
4926 hci_le_meta_evt(hdev, skb);
4927 break;
4928
4929 case HCI_EV_CHANNEL_SELECTED:
4930 hci_chan_selected_evt(hdev, skb);
4931 break;
4932
4933 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4934 hci_remote_oob_data_request_evt(hdev, skb);
4935 break;
4936
4937 case HCI_EV_PHY_LINK_COMPLETE:
4938 hci_phy_link_complete_evt(hdev, skb);
4939 break;
4940
4941 case HCI_EV_LOGICAL_LINK_COMPLETE:
4942 hci_loglink_complete_evt(hdev, skb);
4943 break;
4944
4945 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4946 hci_disconn_loglink_complete_evt(hdev, skb);
4947 break;
4948
4949 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4950 hci_disconn_phylink_complete_evt(hdev, skb);
4951 break;
4952
4953 case HCI_EV_NUM_COMP_BLOCKS:
4954 hci_num_comp_blocks_evt(hdev, skb);
4955 break;
4956
4957 default:
4958 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4959 break;
4960 }
4961
4962 kfree_skb(skb);
4963 hdev->stat.evt_rx++;
4964 }
This page took 0.194736 seconds and 5 git commands to generate.