Bluetooth: Trigger SMP for the appropriate LE CoC errors
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36
37 /* Handle HCI Event packets */
38
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn)
105 conn->role = rp->role;
106
107 hci_dev_unlock(hdev);
108 }
109
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
114
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116
117 if (rp->status)
118 return;
119
120 hci_dev_lock(hdev);
121
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 if (conn)
124 conn->link_policy = __le16_to_cpu(rp->policy);
125
126 hci_dev_unlock(hdev);
127 }
128
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
133 void *sent;
134
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136
137 if (rp->status)
138 return;
139
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 if (!sent)
142 return;
143
144 hci_dev_lock(hdev);
145
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 if (conn)
148 conn->link_policy = get_unaligned_le16(sent + 2);
149
150 hci_dev_unlock(hdev);
151 }
152
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 struct sk_buff *skb)
155 {
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159
160 if (rp->status)
161 return;
162
163 hdev->link_policy = __le16_to_cpu(rp->policy);
164 }
165
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 struct sk_buff *skb)
168 {
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
171
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173
174 if (status)
175 return;
176
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent)
179 return;
180
181 hdev->link_policy = get_unaligned_le16(sent);
182 }
183
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 __u8 status = *((__u8 *) skb->data);
187
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
189
190 clear_bit(HCI_RESET, &hdev->flags);
191
192 if (status)
193 return;
194
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
197
198 hdev->discovery.state = DISCOVERY_STOPPED;
199 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
200 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
201
202 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
203 hdev->adv_data_len = 0;
204
205 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
206 hdev->scan_rsp_data_len = 0;
207
208 hdev->le_scan_type = LE_SCAN_PASSIVE;
209
210 hdev->ssp_debug_mode = 0;
211
212 hci_bdaddr_list_clear(&hdev->le_white_list);
213 }
214
215 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216 {
217 __u8 status = *((__u8 *) skb->data);
218 void *sent;
219
220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
221
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (!sent)
224 return;
225
226 hci_dev_lock(hdev);
227
228 if (test_bit(HCI_MGMT, &hdev->dev_flags))
229 mgmt_set_local_name_complete(hdev, sent, status);
230 else if (!status)
231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
232
233 hci_dev_unlock(hdev);
234 }
235
236 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 struct hci_rp_read_local_name *rp = (void *) skb->data;
239
240 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
241
242 if (rp->status)
243 return;
244
245 if (test_bit(HCI_SETUP, &hdev->dev_flags))
246 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
247 }
248
249 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
250 {
251 __u8 status = *((__u8 *) skb->data);
252 void *sent;
253
254 BT_DBG("%s status 0x%2.2x", hdev->name, status);
255
256 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
257 if (!sent)
258 return;
259
260 if (!status) {
261 __u8 param = *((__u8 *) sent);
262
263 if (param == AUTH_ENABLED)
264 set_bit(HCI_AUTH, &hdev->flags);
265 else
266 clear_bit(HCI_AUTH, &hdev->flags);
267 }
268
269 if (test_bit(HCI_MGMT, &hdev->dev_flags))
270 mgmt_auth_enable_complete(hdev, status);
271 }
272
273 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
274 {
275 __u8 status = *((__u8 *) skb->data);
276 __u8 param;
277 void *sent;
278
279 BT_DBG("%s status 0x%2.2x", hdev->name, status);
280
281 if (status)
282 return;
283
284 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
285 if (!sent)
286 return;
287
288 param = *((__u8 *) sent);
289
290 if (param)
291 set_bit(HCI_ENCRYPT, &hdev->flags);
292 else
293 clear_bit(HCI_ENCRYPT, &hdev->flags);
294 }
295
296 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
297 {
298 __u8 status = *((__u8 *) skb->data);
299 __u8 param;
300 void *sent;
301
302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303
304 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
305 if (!sent)
306 return;
307
308 param = *((__u8 *) sent);
309
310 hci_dev_lock(hdev);
311
312 if (status) {
313 hdev->discov_timeout = 0;
314 goto done;
315 }
316
317 if (param & SCAN_INQUIRY)
318 set_bit(HCI_ISCAN, &hdev->flags);
319 else
320 clear_bit(HCI_ISCAN, &hdev->flags);
321
322 if (param & SCAN_PAGE)
323 set_bit(HCI_PSCAN, &hdev->flags);
324 else
325 clear_bit(HCI_PSCAN, &hdev->flags);
326
327 done:
328 hci_dev_unlock(hdev);
329 }
330
331 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332 {
333 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334
335 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
336
337 if (rp->status)
338 return;
339
340 memcpy(hdev->dev_class, rp->dev_class, 3);
341
342 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
344 }
345
346 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347 {
348 __u8 status = *((__u8 *) skb->data);
349 void *sent;
350
351 BT_DBG("%s status 0x%2.2x", hdev->name, status);
352
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
354 if (!sent)
355 return;
356
357 hci_dev_lock(hdev);
358
359 if (status == 0)
360 memcpy(hdev->dev_class, sent, 3);
361
362 if (test_bit(HCI_MGMT, &hdev->dev_flags))
363 mgmt_set_class_of_dev_complete(hdev, sent, status);
364
365 hci_dev_unlock(hdev);
366 }
367
368 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
369 {
370 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
371 __u16 setting;
372
373 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
374
375 if (rp->status)
376 return;
377
378 setting = __le16_to_cpu(rp->voice_setting);
379
380 if (hdev->voice_setting == setting)
381 return;
382
383 hdev->voice_setting = setting;
384
385 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
386
387 if (hdev->notify)
388 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
389 }
390
391 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
392 struct sk_buff *skb)
393 {
394 __u8 status = *((__u8 *) skb->data);
395 __u16 setting;
396 void *sent;
397
398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
399
400 if (status)
401 return;
402
403 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
404 if (!sent)
405 return;
406
407 setting = get_unaligned_le16(sent);
408
409 if (hdev->voice_setting == setting)
410 return;
411
412 hdev->voice_setting = setting;
413
414 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
415
416 if (hdev->notify)
417 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
418 }
419
420 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
421 struct sk_buff *skb)
422 {
423 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
424
425 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
426
427 if (rp->status)
428 return;
429
430 hdev->num_iac = rp->num_iac;
431
432 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
433 }
434
435 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
436 {
437 __u8 status = *((__u8 *) skb->data);
438 struct hci_cp_write_ssp_mode *sent;
439
440 BT_DBG("%s status 0x%2.2x", hdev->name, status);
441
442 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
443 if (!sent)
444 return;
445
446 if (!status) {
447 if (sent->mode)
448 hdev->features[1][0] |= LMP_HOST_SSP;
449 else
450 hdev->features[1][0] &= ~LMP_HOST_SSP;
451 }
452
453 if (test_bit(HCI_MGMT, &hdev->dev_flags))
454 mgmt_ssp_enable_complete(hdev, sent->mode, status);
455 else if (!status) {
456 if (sent->mode)
457 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 else
459 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
460 }
461 }
462
463 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
464 {
465 u8 status = *((u8 *) skb->data);
466 struct hci_cp_write_sc_support *sent;
467
468 BT_DBG("%s status 0x%2.2x", hdev->name, status);
469
470 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
471 if (!sent)
472 return;
473
474 if (!status) {
475 if (sent->support)
476 hdev->features[1][0] |= LMP_HOST_SC;
477 else
478 hdev->features[1][0] &= ~LMP_HOST_SC;
479 }
480
481 if (test_bit(HCI_MGMT, &hdev->dev_flags))
482 mgmt_sc_enable_complete(hdev, sent->support, status);
483 else if (!status) {
484 if (sent->support)
485 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
486 else
487 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
488 }
489 }
490
491 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
492 {
493 struct hci_rp_read_local_version *rp = (void *) skb->data;
494
495 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
496
497 if (rp->status)
498 return;
499
500 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
501 hdev->hci_ver = rp->hci_ver;
502 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
503 hdev->lmp_ver = rp->lmp_ver;
504 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
505 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
506 }
507 }
508
509 static void hci_cc_read_local_commands(struct hci_dev *hdev,
510 struct sk_buff *skb)
511 {
512 struct hci_rp_read_local_commands *rp = (void *) skb->data;
513
514 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
515
516 if (rp->status)
517 return;
518
519 if (test_bit(HCI_SETUP, &hdev->dev_flags))
520 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
521 }
522
523 static void hci_cc_read_local_features(struct hci_dev *hdev,
524 struct sk_buff *skb)
525 {
526 struct hci_rp_read_local_features *rp = (void *) skb->data;
527
528 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
529
530 if (rp->status)
531 return;
532
533 memcpy(hdev->features, rp->features, 8);
534
535 /* Adjust default settings according to features
536 * supported by device. */
537
538 if (hdev->features[0][0] & LMP_3SLOT)
539 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
540
541 if (hdev->features[0][0] & LMP_5SLOT)
542 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
543
544 if (hdev->features[0][1] & LMP_HV2) {
545 hdev->pkt_type |= (HCI_HV2);
546 hdev->esco_type |= (ESCO_HV2);
547 }
548
549 if (hdev->features[0][1] & LMP_HV3) {
550 hdev->pkt_type |= (HCI_HV3);
551 hdev->esco_type |= (ESCO_HV3);
552 }
553
554 if (lmp_esco_capable(hdev))
555 hdev->esco_type |= (ESCO_EV3);
556
557 if (hdev->features[0][4] & LMP_EV4)
558 hdev->esco_type |= (ESCO_EV4);
559
560 if (hdev->features[0][4] & LMP_EV5)
561 hdev->esco_type |= (ESCO_EV5);
562
563 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
564 hdev->esco_type |= (ESCO_2EV3);
565
566 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
567 hdev->esco_type |= (ESCO_3EV3);
568
569 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
570 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
571 }
572
573 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
574 struct sk_buff *skb)
575 {
576 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
577
578 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
579
580 if (rp->status)
581 return;
582
583 if (hdev->max_page < rp->max_page)
584 hdev->max_page = rp->max_page;
585
586 if (rp->page < HCI_MAX_PAGES)
587 memcpy(hdev->features[rp->page], rp->features, 8);
588 }
589
590 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
591 struct sk_buff *skb)
592 {
593 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
594
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596
597 if (rp->status)
598 return;
599
600 hdev->flow_ctl_mode = rp->mode;
601 }
602
603 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
604 {
605 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
606
607 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
608
609 if (rp->status)
610 return;
611
612 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
613 hdev->sco_mtu = rp->sco_mtu;
614 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
615 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
616
617 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
618 hdev->sco_mtu = 64;
619 hdev->sco_pkts = 8;
620 }
621
622 hdev->acl_cnt = hdev->acl_pkts;
623 hdev->sco_cnt = hdev->sco_pkts;
624
625 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
626 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
627 }
628
629 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
630 {
631 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
632
633 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
634
635 if (rp->status)
636 return;
637
638 if (test_bit(HCI_INIT, &hdev->flags))
639 bacpy(&hdev->bdaddr, &rp->bdaddr);
640
641 if (test_bit(HCI_SETUP, &hdev->dev_flags))
642 bacpy(&hdev->setup_addr, &rp->bdaddr);
643 }
644
645 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
646 struct sk_buff *skb)
647 {
648 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
649
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652 if (rp->status)
653 return;
654
655 if (test_bit(HCI_INIT, &hdev->flags)) {
656 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
657 hdev->page_scan_window = __le16_to_cpu(rp->window);
658 }
659 }
660
661 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
662 struct sk_buff *skb)
663 {
664 u8 status = *((u8 *) skb->data);
665 struct hci_cp_write_page_scan_activity *sent;
666
667 BT_DBG("%s status 0x%2.2x", hdev->name, status);
668
669 if (status)
670 return;
671
672 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
673 if (!sent)
674 return;
675
676 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
677 hdev->page_scan_window = __le16_to_cpu(sent->window);
678 }
679
680 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
681 struct sk_buff *skb)
682 {
683 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
684
685 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
686
687 if (rp->status)
688 return;
689
690 if (test_bit(HCI_INIT, &hdev->flags))
691 hdev->page_scan_type = rp->type;
692 }
693
694 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
695 struct sk_buff *skb)
696 {
697 u8 status = *((u8 *) skb->data);
698 u8 *type;
699
700 BT_DBG("%s status 0x%2.2x", hdev->name, status);
701
702 if (status)
703 return;
704
705 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
706 if (type)
707 hdev->page_scan_type = *type;
708 }
709
710 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
711 struct sk_buff *skb)
712 {
713 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
716
717 if (rp->status)
718 return;
719
720 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
721 hdev->block_len = __le16_to_cpu(rp->block_len);
722 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
723
724 hdev->block_cnt = hdev->num_blocks;
725
726 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
727 hdev->block_cnt, hdev->block_len);
728 }
729
730 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
731 {
732 struct hci_rp_read_clock *rp = (void *) skb->data;
733 struct hci_cp_read_clock *cp;
734 struct hci_conn *conn;
735
736 BT_DBG("%s", hdev->name);
737
738 if (skb->len < sizeof(*rp))
739 return;
740
741 if (rp->status)
742 return;
743
744 hci_dev_lock(hdev);
745
746 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
747 if (!cp)
748 goto unlock;
749
750 if (cp->which == 0x00) {
751 hdev->clock = le32_to_cpu(rp->clock);
752 goto unlock;
753 }
754
755 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
756 if (conn) {
757 conn->clock = le32_to_cpu(rp->clock);
758 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
759 }
760
761 unlock:
762 hci_dev_unlock(hdev);
763 }
764
765 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
766 struct sk_buff *skb)
767 {
768 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
769
770 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
771
772 if (rp->status)
773 goto a2mp_rsp;
774
775 hdev->amp_status = rp->amp_status;
776 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
777 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
778 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
779 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
780 hdev->amp_type = rp->amp_type;
781 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
782 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
783 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
784 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
785
786 a2mp_rsp:
787 a2mp_send_getinfo_rsp(hdev);
788 }
789
790 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
791 struct sk_buff *skb)
792 {
793 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
794 struct amp_assoc *assoc = &hdev->loc_assoc;
795 size_t rem_len, frag_len;
796
797 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798
799 if (rp->status)
800 goto a2mp_rsp;
801
802 frag_len = skb->len - sizeof(*rp);
803 rem_len = __le16_to_cpu(rp->rem_len);
804
805 if (rem_len > frag_len) {
806 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
807
808 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
809 assoc->offset += frag_len;
810
811 /* Read other fragments */
812 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
813
814 return;
815 }
816
817 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
818 assoc->len = assoc->offset + rem_len;
819 assoc->offset = 0;
820
821 a2mp_rsp:
822 /* Send A2MP Rsp when all fragments are received */
823 a2mp_send_getampassoc_rsp(hdev, rp->status);
824 a2mp_send_create_phy_link_req(hdev, rp->status);
825 }
826
827 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
828 struct sk_buff *skb)
829 {
830 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
831
832 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
833
834 if (rp->status)
835 return;
836
837 hdev->inq_tx_power = rp->tx_power;
838 }
839
840 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
841 {
842 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
843 struct hci_cp_pin_code_reply *cp;
844 struct hci_conn *conn;
845
846 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
847
848 hci_dev_lock(hdev);
849
850 if (test_bit(HCI_MGMT, &hdev->dev_flags))
851 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
852
853 if (rp->status)
854 goto unlock;
855
856 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
857 if (!cp)
858 goto unlock;
859
860 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
861 if (conn)
862 conn->pin_length = cp->pin_len;
863
864 unlock:
865 hci_dev_unlock(hdev);
866 }
867
868 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
869 {
870 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
871
872 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
873
874 hci_dev_lock(hdev);
875
876 if (test_bit(HCI_MGMT, &hdev->dev_flags))
877 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
878 rp->status);
879
880 hci_dev_unlock(hdev);
881 }
882
883 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
884 struct sk_buff *skb)
885 {
886 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
887
888 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
889
890 if (rp->status)
891 return;
892
893 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
894 hdev->le_pkts = rp->le_max_pkt;
895
896 hdev->le_cnt = hdev->le_pkts;
897
898 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
899 }
900
901 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
902 struct sk_buff *skb)
903 {
904 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908 if (rp->status)
909 return;
910
911 memcpy(hdev->le_features, rp->features, 8);
912 }
913
914 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
915 struct sk_buff *skb)
916 {
917 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
918
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920
921 if (rp->status)
922 return;
923
924 hdev->adv_tx_power = rp->tx_power;
925 }
926
927 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
928 {
929 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
930
931 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
932
933 hci_dev_lock(hdev);
934
935 if (test_bit(HCI_MGMT, &hdev->dev_flags))
936 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
937 rp->status);
938
939 hci_dev_unlock(hdev);
940 }
941
942 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
943 struct sk_buff *skb)
944 {
945 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
946
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948
949 hci_dev_lock(hdev);
950
951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
953 ACL_LINK, 0, rp->status);
954
955 hci_dev_unlock(hdev);
956 }
957
958 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
959 {
960 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
961
962 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
963
964 hci_dev_lock(hdev);
965
966 if (test_bit(HCI_MGMT, &hdev->dev_flags))
967 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
968 0, rp->status);
969
970 hci_dev_unlock(hdev);
971 }
972
973 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
974 struct sk_buff *skb)
975 {
976 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
977
978 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
979
980 hci_dev_lock(hdev);
981
982 if (test_bit(HCI_MGMT, &hdev->dev_flags))
983 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
984 ACL_LINK, 0, rp->status);
985
986 hci_dev_unlock(hdev);
987 }
988
989 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
990 struct sk_buff *skb)
991 {
992 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
993
994 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
995
996 hci_dev_lock(hdev);
997 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
998 NULL, NULL, rp->status);
999 hci_dev_unlock(hdev);
1000 }
1001
1002 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1004 {
1005 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1006
1007 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008
1009 hci_dev_lock(hdev);
1010 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1011 rp->hash256, rp->randomizer256,
1012 rp->status);
1013 hci_dev_unlock(hdev);
1014 }
1015
1016
1017 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1018 {
1019 __u8 status = *((__u8 *) skb->data);
1020 bdaddr_t *sent;
1021
1022 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1023
1024 if (status)
1025 return;
1026
1027 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1028 if (!sent)
1029 return;
1030
1031 hci_dev_lock(hdev);
1032
1033 bacpy(&hdev->random_addr, sent);
1034
1035 hci_dev_unlock(hdev);
1036 }
1037
1038 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1039 {
1040 __u8 *sent, status = *((__u8 *) skb->data);
1041
1042 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1043
1044 if (status)
1045 return;
1046
1047 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1048 if (!sent)
1049 return;
1050
1051 hci_dev_lock(hdev);
1052
1053 /* If we're doing connection initiation as peripheral. Set a
1054 * timeout in case something goes wrong.
1055 */
1056 if (*sent) {
1057 struct hci_conn *conn;
1058
1059 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1060
1061 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1062 if (conn)
1063 queue_delayed_work(hdev->workqueue,
1064 &conn->le_conn_timeout,
1065 conn->conn_timeout);
1066 } else {
1067 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1068 }
1069
1070 hci_dev_unlock(hdev);
1071 }
1072
1073 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1074 {
1075 struct hci_cp_le_set_scan_param *cp;
1076 __u8 status = *((__u8 *) skb->data);
1077
1078 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1079
1080 if (status)
1081 return;
1082
1083 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1084 if (!cp)
1085 return;
1086
1087 hci_dev_lock(hdev);
1088
1089 hdev->le_scan_type = cp->type;
1090
1091 hci_dev_unlock(hdev);
1092 }
1093
1094 static bool has_pending_adv_report(struct hci_dev *hdev)
1095 {
1096 struct discovery_state *d = &hdev->discovery;
1097
1098 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1099 }
1100
1101 static void clear_pending_adv_report(struct hci_dev *hdev)
1102 {
1103 struct discovery_state *d = &hdev->discovery;
1104
1105 bacpy(&d->last_adv_addr, BDADDR_ANY);
1106 d->last_adv_data_len = 0;
1107 }
1108
1109 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1110 u8 bdaddr_type, s8 rssi, u32 flags,
1111 u8 *data, u8 len)
1112 {
1113 struct discovery_state *d = &hdev->discovery;
1114
1115 bacpy(&d->last_adv_addr, bdaddr);
1116 d->last_adv_addr_type = bdaddr_type;
1117 d->last_adv_rssi = rssi;
1118 d->last_adv_flags = flags;
1119 memcpy(d->last_adv_data, data, len);
1120 d->last_adv_data_len = len;
1121 }
1122
1123 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1124 struct sk_buff *skb)
1125 {
1126 struct hci_cp_le_set_scan_enable *cp;
1127 __u8 status = *((__u8 *) skb->data);
1128
1129 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1130
1131 if (status)
1132 return;
1133
1134 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1135 if (!cp)
1136 return;
1137
1138 switch (cp->enable) {
1139 case LE_SCAN_ENABLE:
1140 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1141 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1142 clear_pending_adv_report(hdev);
1143 break;
1144
1145 case LE_SCAN_DISABLE:
1146 /* We do this here instead of when setting DISCOVERY_STOPPED
1147 * since the latter would potentially require waiting for
1148 * inquiry to stop too.
1149 */
1150 if (has_pending_adv_report(hdev)) {
1151 struct discovery_state *d = &hdev->discovery;
1152
1153 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1154 d->last_adv_addr_type, NULL,
1155 d->last_adv_rssi, d->last_adv_flags,
1156 d->last_adv_data,
1157 d->last_adv_data_len, NULL, 0);
1158 }
1159
1160 /* Cancel this timer so that we don't try to disable scanning
1161 * when it's already disabled.
1162 */
1163 cancel_delayed_work(&hdev->le_scan_disable);
1164
1165 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1166
1167 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1168 * interrupted scanning due to a connect request. Mark
1169 * therefore discovery as stopped. If this was not
1170 * because of a connect request advertising might have
1171 * been disabled because of active scanning, so
1172 * re-enable it again if necessary.
1173 */
1174 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1175 &hdev->dev_flags))
1176 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1177 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1178 hdev->discovery.state == DISCOVERY_FINDING)
1179 mgmt_reenable_advertising(hdev);
1180
1181 break;
1182
1183 default:
1184 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1185 break;
1186 }
1187 }
1188
1189 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1190 struct sk_buff *skb)
1191 {
1192 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1193
1194 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1195
1196 if (rp->status)
1197 return;
1198
1199 hdev->le_white_list_size = rp->size;
1200 }
1201
1202 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1203 struct sk_buff *skb)
1204 {
1205 __u8 status = *((__u8 *) skb->data);
1206
1207 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1208
1209 if (status)
1210 return;
1211
1212 hci_bdaddr_list_clear(&hdev->le_white_list);
1213 }
1214
1215 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1216 struct sk_buff *skb)
1217 {
1218 struct hci_cp_le_add_to_white_list *sent;
1219 __u8 status = *((__u8 *) skb->data);
1220
1221 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1222
1223 if (status)
1224 return;
1225
1226 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1227 if (!sent)
1228 return;
1229
1230 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1231 sent->bdaddr_type);
1232 }
1233
1234 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1236 {
1237 struct hci_cp_le_del_from_white_list *sent;
1238 __u8 status = *((__u8 *) skb->data);
1239
1240 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1241
1242 if (status)
1243 return;
1244
1245 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1246 if (!sent)
1247 return;
1248
1249 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1250 sent->bdaddr_type);
1251 }
1252
1253 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1254 struct sk_buff *skb)
1255 {
1256 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1257
1258 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1259
1260 if (rp->status)
1261 return;
1262
1263 memcpy(hdev->le_states, rp->le_states, 8);
1264 }
1265
1266 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1267 struct sk_buff *skb)
1268 {
1269 struct hci_cp_write_le_host_supported *sent;
1270 __u8 status = *((__u8 *) skb->data);
1271
1272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1273
1274 if (status)
1275 return;
1276
1277 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1278 if (!sent)
1279 return;
1280
1281 if (sent->le) {
1282 hdev->features[1][0] |= LMP_HOST_LE;
1283 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1284 } else {
1285 hdev->features[1][0] &= ~LMP_HOST_LE;
1286 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1287 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1288 }
1289
1290 if (sent->simul)
1291 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1292 else
1293 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1294 }
1295
1296 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1297 {
1298 struct hci_cp_le_set_adv_param *cp;
1299 u8 status = *((u8 *) skb->data);
1300
1301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1302
1303 if (status)
1304 return;
1305
1306 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1307 if (!cp)
1308 return;
1309
1310 hci_dev_lock(hdev);
1311 hdev->adv_addr_type = cp->own_address_type;
1312 hci_dev_unlock(hdev);
1313 }
1314
1315 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1316 struct sk_buff *skb)
1317 {
1318 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1319
1320 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1321 hdev->name, rp->status, rp->phy_handle);
1322
1323 if (rp->status)
1324 return;
1325
1326 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1327 }
1328
1329 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1330 {
1331 struct hci_rp_read_rssi *rp = (void *) skb->data;
1332 struct hci_conn *conn;
1333
1334 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1335
1336 if (rp->status)
1337 return;
1338
1339 hci_dev_lock(hdev);
1340
1341 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1342 if (conn)
1343 conn->rssi = rp->rssi;
1344
1345 hci_dev_unlock(hdev);
1346 }
1347
1348 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1349 {
1350 struct hci_cp_read_tx_power *sent;
1351 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1352 struct hci_conn *conn;
1353
1354 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1355
1356 if (rp->status)
1357 return;
1358
1359 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1360 if (!sent)
1361 return;
1362
1363 hci_dev_lock(hdev);
1364
1365 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1366 if (!conn)
1367 goto unlock;
1368
1369 switch (sent->type) {
1370 case 0x00:
1371 conn->tx_power = rp->tx_power;
1372 break;
1373 case 0x01:
1374 conn->max_tx_power = rp->tx_power;
1375 break;
1376 }
1377
1378 unlock:
1379 hci_dev_unlock(hdev);
1380 }
1381
1382 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1383 {
1384 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1385
1386 if (status) {
1387 hci_conn_check_pending(hdev);
1388 return;
1389 }
1390
1391 set_bit(HCI_INQUIRY, &hdev->flags);
1392 }
1393
1394 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1395 {
1396 struct hci_cp_create_conn *cp;
1397 struct hci_conn *conn;
1398
1399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1400
1401 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1402 if (!cp)
1403 return;
1404
1405 hci_dev_lock(hdev);
1406
1407 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1408
1409 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1410
1411 if (status) {
1412 if (conn && conn->state == BT_CONNECT) {
1413 if (status != 0x0c || conn->attempt > 2) {
1414 conn->state = BT_CLOSED;
1415 hci_proto_connect_cfm(conn, status);
1416 hci_conn_del(conn);
1417 } else
1418 conn->state = BT_CONNECT2;
1419 }
1420 } else {
1421 if (!conn) {
1422 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1423 HCI_ROLE_MASTER);
1424 if (!conn)
1425 BT_ERR("No memory for new connection");
1426 }
1427 }
1428
1429 hci_dev_unlock(hdev);
1430 }
1431
1432 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1433 {
1434 struct hci_cp_add_sco *cp;
1435 struct hci_conn *acl, *sco;
1436 __u16 handle;
1437
1438 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1439
1440 if (!status)
1441 return;
1442
1443 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1444 if (!cp)
1445 return;
1446
1447 handle = __le16_to_cpu(cp->handle);
1448
1449 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1450
1451 hci_dev_lock(hdev);
1452
1453 acl = hci_conn_hash_lookup_handle(hdev, handle);
1454 if (acl) {
1455 sco = acl->link;
1456 if (sco) {
1457 sco->state = BT_CLOSED;
1458
1459 hci_proto_connect_cfm(sco, status);
1460 hci_conn_del(sco);
1461 }
1462 }
1463
1464 hci_dev_unlock(hdev);
1465 }
1466
1467 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1468 {
1469 struct hci_cp_auth_requested *cp;
1470 struct hci_conn *conn;
1471
1472 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1473
1474 if (!status)
1475 return;
1476
1477 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1478 if (!cp)
1479 return;
1480
1481 hci_dev_lock(hdev);
1482
1483 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1484 if (conn) {
1485 if (conn->state == BT_CONFIG) {
1486 hci_proto_connect_cfm(conn, status);
1487 hci_conn_drop(conn);
1488 }
1489 }
1490
1491 hci_dev_unlock(hdev);
1492 }
1493
1494 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1495 {
1496 struct hci_cp_set_conn_encrypt *cp;
1497 struct hci_conn *conn;
1498
1499 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1500
1501 if (!status)
1502 return;
1503
1504 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1505 if (!cp)
1506 return;
1507
1508 hci_dev_lock(hdev);
1509
1510 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1511 if (conn) {
1512 if (conn->state == BT_CONFIG) {
1513 hci_proto_connect_cfm(conn, status);
1514 hci_conn_drop(conn);
1515 }
1516 }
1517
1518 hci_dev_unlock(hdev);
1519 }
1520
1521 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1522 struct hci_conn *conn)
1523 {
1524 if (conn->state != BT_CONFIG || !conn->out)
1525 return 0;
1526
1527 if (conn->pending_sec_level == BT_SECURITY_SDP)
1528 return 0;
1529
1530 /* Only request authentication for SSP connections or non-SSP
1531 * devices with sec_level MEDIUM or HIGH or if MITM protection
1532 * is requested.
1533 */
1534 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1535 conn->pending_sec_level != BT_SECURITY_FIPS &&
1536 conn->pending_sec_level != BT_SECURITY_HIGH &&
1537 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1538 return 0;
1539
1540 return 1;
1541 }
1542
1543 static int hci_resolve_name(struct hci_dev *hdev,
1544 struct inquiry_entry *e)
1545 {
1546 struct hci_cp_remote_name_req cp;
1547
1548 memset(&cp, 0, sizeof(cp));
1549
1550 bacpy(&cp.bdaddr, &e->data.bdaddr);
1551 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1552 cp.pscan_mode = e->data.pscan_mode;
1553 cp.clock_offset = e->data.clock_offset;
1554
1555 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1556 }
1557
1558 static bool hci_resolve_next_name(struct hci_dev *hdev)
1559 {
1560 struct discovery_state *discov = &hdev->discovery;
1561 struct inquiry_entry *e;
1562
1563 if (list_empty(&discov->resolve))
1564 return false;
1565
1566 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1567 if (!e)
1568 return false;
1569
1570 if (hci_resolve_name(hdev, e) == 0) {
1571 e->name_state = NAME_PENDING;
1572 return true;
1573 }
1574
1575 return false;
1576 }
1577
1578 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1579 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1580 {
1581 struct discovery_state *discov = &hdev->discovery;
1582 struct inquiry_entry *e;
1583
1584 /* Update the mgmt connected state if necessary. Be careful with
1585 * conn objects that exist but are not (yet) connected however.
1586 * Only those in BT_CONFIG or BT_CONNECTED states can be
1587 * considered connected.
1588 */
1589 if (conn &&
1590 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1591 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1592 mgmt_device_connected(hdev, conn, 0, name, name_len);
1593
1594 if (discov->state == DISCOVERY_STOPPED)
1595 return;
1596
1597 if (discov->state == DISCOVERY_STOPPING)
1598 goto discov_complete;
1599
1600 if (discov->state != DISCOVERY_RESOLVING)
1601 return;
1602
1603 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1604 /* If the device was not found in a list of found devices names of which
1605 * are pending. there is no need to continue resolving a next name as it
1606 * will be done upon receiving another Remote Name Request Complete
1607 * Event */
1608 if (!e)
1609 return;
1610
1611 list_del(&e->list);
1612 if (name) {
1613 e->name_state = NAME_KNOWN;
1614 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1615 e->data.rssi, name, name_len);
1616 } else {
1617 e->name_state = NAME_NOT_KNOWN;
1618 }
1619
1620 if (hci_resolve_next_name(hdev))
1621 return;
1622
1623 discov_complete:
1624 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1625 }
1626
1627 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1628 {
1629 struct hci_cp_remote_name_req *cp;
1630 struct hci_conn *conn;
1631
1632 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1633
1634 /* If successful wait for the name req complete event before
1635 * checking for the need to do authentication */
1636 if (!status)
1637 return;
1638
1639 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1640 if (!cp)
1641 return;
1642
1643 hci_dev_lock(hdev);
1644
1645 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1646
1647 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1648 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1649
1650 if (!conn)
1651 goto unlock;
1652
1653 if (!hci_outgoing_auth_needed(hdev, conn))
1654 goto unlock;
1655
1656 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1657 struct hci_cp_auth_requested auth_cp;
1658
1659 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1660
1661 auth_cp.handle = __cpu_to_le16(conn->handle);
1662 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1663 sizeof(auth_cp), &auth_cp);
1664 }
1665
1666 unlock:
1667 hci_dev_unlock(hdev);
1668 }
1669
1670 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1671 {
1672 struct hci_cp_read_remote_features *cp;
1673 struct hci_conn *conn;
1674
1675 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1676
1677 if (!status)
1678 return;
1679
1680 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1681 if (!cp)
1682 return;
1683
1684 hci_dev_lock(hdev);
1685
1686 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1687 if (conn) {
1688 if (conn->state == BT_CONFIG) {
1689 hci_proto_connect_cfm(conn, status);
1690 hci_conn_drop(conn);
1691 }
1692 }
1693
1694 hci_dev_unlock(hdev);
1695 }
1696
1697 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1698 {
1699 struct hci_cp_read_remote_ext_features *cp;
1700 struct hci_conn *conn;
1701
1702 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1703
1704 if (!status)
1705 return;
1706
1707 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1708 if (!cp)
1709 return;
1710
1711 hci_dev_lock(hdev);
1712
1713 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1714 if (conn) {
1715 if (conn->state == BT_CONFIG) {
1716 hci_proto_connect_cfm(conn, status);
1717 hci_conn_drop(conn);
1718 }
1719 }
1720
1721 hci_dev_unlock(hdev);
1722 }
1723
1724 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1725 {
1726 struct hci_cp_setup_sync_conn *cp;
1727 struct hci_conn *acl, *sco;
1728 __u16 handle;
1729
1730 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1731
1732 if (!status)
1733 return;
1734
1735 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1736 if (!cp)
1737 return;
1738
1739 handle = __le16_to_cpu(cp->handle);
1740
1741 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1742
1743 hci_dev_lock(hdev);
1744
1745 acl = hci_conn_hash_lookup_handle(hdev, handle);
1746 if (acl) {
1747 sco = acl->link;
1748 if (sco) {
1749 sco->state = BT_CLOSED;
1750
1751 hci_proto_connect_cfm(sco, status);
1752 hci_conn_del(sco);
1753 }
1754 }
1755
1756 hci_dev_unlock(hdev);
1757 }
1758
1759 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1760 {
1761 struct hci_cp_sniff_mode *cp;
1762 struct hci_conn *conn;
1763
1764 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1765
1766 if (!status)
1767 return;
1768
1769 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1770 if (!cp)
1771 return;
1772
1773 hci_dev_lock(hdev);
1774
1775 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1776 if (conn) {
1777 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1778
1779 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1780 hci_sco_setup(conn, status);
1781 }
1782
1783 hci_dev_unlock(hdev);
1784 }
1785
1786 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1787 {
1788 struct hci_cp_exit_sniff_mode *cp;
1789 struct hci_conn *conn;
1790
1791 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1792
1793 if (!status)
1794 return;
1795
1796 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1797 if (!cp)
1798 return;
1799
1800 hci_dev_lock(hdev);
1801
1802 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1803 if (conn) {
1804 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1805
1806 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1807 hci_sco_setup(conn, status);
1808 }
1809
1810 hci_dev_unlock(hdev);
1811 }
1812
1813 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1814 {
1815 struct hci_cp_disconnect *cp;
1816 struct hci_conn *conn;
1817
1818 if (!status)
1819 return;
1820
1821 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1822 if (!cp)
1823 return;
1824
1825 hci_dev_lock(hdev);
1826
1827 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1828 if (conn)
1829 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1830 conn->dst_type, status);
1831
1832 hci_dev_unlock(hdev);
1833 }
1834
1835 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1836 {
1837 struct hci_cp_create_phy_link *cp;
1838
1839 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1840
1841 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1842 if (!cp)
1843 return;
1844
1845 hci_dev_lock(hdev);
1846
1847 if (status) {
1848 struct hci_conn *hcon;
1849
1850 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1851 if (hcon)
1852 hci_conn_del(hcon);
1853 } else {
1854 amp_write_remote_assoc(hdev, cp->phy_handle);
1855 }
1856
1857 hci_dev_unlock(hdev);
1858 }
1859
1860 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1861 {
1862 struct hci_cp_accept_phy_link *cp;
1863
1864 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1865
1866 if (status)
1867 return;
1868
1869 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1870 if (!cp)
1871 return;
1872
1873 amp_write_remote_assoc(hdev, cp->phy_handle);
1874 }
1875
1876 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1877 {
1878 struct hci_cp_le_create_conn *cp;
1879 struct hci_conn *conn;
1880
1881 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1882
1883 /* All connection failure handling is taken care of by the
1884 * hci_le_conn_failed function which is triggered by the HCI
1885 * request completion callbacks used for connecting.
1886 */
1887 if (status)
1888 return;
1889
1890 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1891 if (!cp)
1892 return;
1893
1894 hci_dev_lock(hdev);
1895
1896 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1897 if (!conn)
1898 goto unlock;
1899
1900 /* Store the initiator and responder address information which
1901 * is needed for SMP. These values will not change during the
1902 * lifetime of the connection.
1903 */
1904 conn->init_addr_type = cp->own_address_type;
1905 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1906 bacpy(&conn->init_addr, &hdev->random_addr);
1907 else
1908 bacpy(&conn->init_addr, &hdev->bdaddr);
1909
1910 conn->resp_addr_type = cp->peer_addr_type;
1911 bacpy(&conn->resp_addr, &cp->peer_addr);
1912
1913 /* We don't want the connection attempt to stick around
1914 * indefinitely since LE doesn't have a page timeout concept
1915 * like BR/EDR. Set a timer for any connection that doesn't use
1916 * the white list for connecting.
1917 */
1918 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1919 queue_delayed_work(conn->hdev->workqueue,
1920 &conn->le_conn_timeout,
1921 conn->conn_timeout);
1922
1923 unlock:
1924 hci_dev_unlock(hdev);
1925 }
1926
1927 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1928 {
1929 struct hci_cp_le_start_enc *cp;
1930 struct hci_conn *conn;
1931
1932 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1933
1934 if (!status)
1935 return;
1936
1937 hci_dev_lock(hdev);
1938
1939 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1940 if (!cp)
1941 goto unlock;
1942
1943 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1944 if (!conn)
1945 goto unlock;
1946
1947 if (conn->state != BT_CONNECTED)
1948 goto unlock;
1949
1950 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1951 hci_conn_drop(conn);
1952
1953 unlock:
1954 hci_dev_unlock(hdev);
1955 }
1956
1957 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
1958 {
1959 struct hci_cp_switch_role *cp;
1960 struct hci_conn *conn;
1961
1962 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1963
1964 if (!status)
1965 return;
1966
1967 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
1968 if (!cp)
1969 return;
1970
1971 hci_dev_lock(hdev);
1972
1973 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1974 if (conn)
1975 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
1976
1977 hci_dev_unlock(hdev);
1978 }
1979
1980 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1981 {
1982 __u8 status = *((__u8 *) skb->data);
1983 struct discovery_state *discov = &hdev->discovery;
1984 struct inquiry_entry *e;
1985
1986 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1987
1988 hci_conn_check_pending(hdev);
1989
1990 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1991 return;
1992
1993 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1994 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1995
1996 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1997 return;
1998
1999 hci_dev_lock(hdev);
2000
2001 if (discov->state != DISCOVERY_FINDING)
2002 goto unlock;
2003
2004 if (list_empty(&discov->resolve)) {
2005 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2006 goto unlock;
2007 }
2008
2009 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2010 if (e && hci_resolve_name(hdev, e) == 0) {
2011 e->name_state = NAME_PENDING;
2012 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2013 } else {
2014 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2015 }
2016
2017 unlock:
2018 hci_dev_unlock(hdev);
2019 }
2020
2021 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2022 {
2023 struct inquiry_data data;
2024 struct inquiry_info *info = (void *) (skb->data + 1);
2025 int num_rsp = *((__u8 *) skb->data);
2026
2027 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2028
2029 if (!num_rsp)
2030 return;
2031
2032 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2033 return;
2034
2035 hci_dev_lock(hdev);
2036
2037 for (; num_rsp; num_rsp--, info++) {
2038 u32 flags;
2039
2040 bacpy(&data.bdaddr, &info->bdaddr);
2041 data.pscan_rep_mode = info->pscan_rep_mode;
2042 data.pscan_period_mode = info->pscan_period_mode;
2043 data.pscan_mode = info->pscan_mode;
2044 memcpy(data.dev_class, info->dev_class, 3);
2045 data.clock_offset = info->clock_offset;
2046 data.rssi = 0x00;
2047 data.ssp_mode = 0x00;
2048
2049 flags = hci_inquiry_cache_update(hdev, &data, false);
2050
2051 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2052 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2053 }
2054
2055 hci_dev_unlock(hdev);
2056 }
2057
2058 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2059 {
2060 struct hci_ev_conn_complete *ev = (void *) skb->data;
2061 struct hci_conn *conn;
2062
2063 BT_DBG("%s", hdev->name);
2064
2065 hci_dev_lock(hdev);
2066
2067 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2068 if (!conn) {
2069 if (ev->link_type != SCO_LINK)
2070 goto unlock;
2071
2072 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2073 if (!conn)
2074 goto unlock;
2075
2076 conn->type = SCO_LINK;
2077 }
2078
2079 if (!ev->status) {
2080 conn->handle = __le16_to_cpu(ev->handle);
2081
2082 if (conn->type == ACL_LINK) {
2083 conn->state = BT_CONFIG;
2084 hci_conn_hold(conn);
2085
2086 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2087 !hci_find_link_key(hdev, &ev->bdaddr))
2088 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2089 else
2090 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2091 } else
2092 conn->state = BT_CONNECTED;
2093
2094 hci_conn_add_sysfs(conn);
2095
2096 if (test_bit(HCI_AUTH, &hdev->flags))
2097 set_bit(HCI_CONN_AUTH, &conn->flags);
2098
2099 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2100 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2101
2102 /* Get remote features */
2103 if (conn->type == ACL_LINK) {
2104 struct hci_cp_read_remote_features cp;
2105 cp.handle = ev->handle;
2106 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2107 sizeof(cp), &cp);
2108
2109 hci_update_page_scan(hdev, NULL);
2110 }
2111
2112 /* Set packet type for incoming connection */
2113 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2114 struct hci_cp_change_conn_ptype cp;
2115 cp.handle = ev->handle;
2116 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2117 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2118 &cp);
2119 }
2120 } else {
2121 conn->state = BT_CLOSED;
2122 if (conn->type == ACL_LINK)
2123 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2124 conn->dst_type, ev->status);
2125 }
2126
2127 if (conn->type == ACL_LINK)
2128 hci_sco_setup(conn, ev->status);
2129
2130 if (ev->status) {
2131 hci_proto_connect_cfm(conn, ev->status);
2132 hci_conn_del(conn);
2133 } else if (ev->link_type != ACL_LINK)
2134 hci_proto_connect_cfm(conn, ev->status);
2135
2136 unlock:
2137 hci_dev_unlock(hdev);
2138
2139 hci_conn_check_pending(hdev);
2140 }
2141
2142 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2143 {
2144 struct hci_cp_reject_conn_req cp;
2145
2146 bacpy(&cp.bdaddr, bdaddr);
2147 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2148 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2149 }
2150
2151 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2152 {
2153 struct hci_ev_conn_request *ev = (void *) skb->data;
2154 int mask = hdev->link_mode;
2155 struct inquiry_entry *ie;
2156 struct hci_conn *conn;
2157 __u8 flags = 0;
2158
2159 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2160 ev->link_type);
2161
2162 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2163 &flags);
2164
2165 if (!(mask & HCI_LM_ACCEPT)) {
2166 hci_reject_conn(hdev, &ev->bdaddr);
2167 return;
2168 }
2169
2170 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2171 BDADDR_BREDR)) {
2172 hci_reject_conn(hdev, &ev->bdaddr);
2173 return;
2174 }
2175
2176 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2177 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2178 BDADDR_BREDR)) {
2179 hci_reject_conn(hdev, &ev->bdaddr);
2180 return;
2181 }
2182
2183 /* Connection accepted */
2184
2185 hci_dev_lock(hdev);
2186
2187 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2188 if (ie)
2189 memcpy(ie->data.dev_class, ev->dev_class, 3);
2190
2191 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2192 &ev->bdaddr);
2193 if (!conn) {
2194 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2195 HCI_ROLE_SLAVE);
2196 if (!conn) {
2197 BT_ERR("No memory for new connection");
2198 hci_dev_unlock(hdev);
2199 return;
2200 }
2201 }
2202
2203 memcpy(conn->dev_class, ev->dev_class, 3);
2204
2205 hci_dev_unlock(hdev);
2206
2207 if (ev->link_type == ACL_LINK ||
2208 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2209 struct hci_cp_accept_conn_req cp;
2210 conn->state = BT_CONNECT;
2211
2212 bacpy(&cp.bdaddr, &ev->bdaddr);
2213
2214 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2215 cp.role = 0x00; /* Become master */
2216 else
2217 cp.role = 0x01; /* Remain slave */
2218
2219 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2220 } else if (!(flags & HCI_PROTO_DEFER)) {
2221 struct hci_cp_accept_sync_conn_req cp;
2222 conn->state = BT_CONNECT;
2223
2224 bacpy(&cp.bdaddr, &ev->bdaddr);
2225 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2226
2227 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2228 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2229 cp.max_latency = cpu_to_le16(0xffff);
2230 cp.content_format = cpu_to_le16(hdev->voice_setting);
2231 cp.retrans_effort = 0xff;
2232
2233 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2234 &cp);
2235 } else {
2236 conn->state = BT_CONNECT2;
2237 hci_proto_connect_cfm(conn, 0);
2238 }
2239 }
2240
2241 static u8 hci_to_mgmt_reason(u8 err)
2242 {
2243 switch (err) {
2244 case HCI_ERROR_CONNECTION_TIMEOUT:
2245 return MGMT_DEV_DISCONN_TIMEOUT;
2246 case HCI_ERROR_REMOTE_USER_TERM:
2247 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2248 case HCI_ERROR_REMOTE_POWER_OFF:
2249 return MGMT_DEV_DISCONN_REMOTE;
2250 case HCI_ERROR_LOCAL_HOST_TERM:
2251 return MGMT_DEV_DISCONN_LOCAL_HOST;
2252 default:
2253 return MGMT_DEV_DISCONN_UNKNOWN;
2254 }
2255 }
2256
2257 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2258 {
2259 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2260 u8 reason = hci_to_mgmt_reason(ev->reason);
2261 struct hci_conn_params *params;
2262 struct hci_conn *conn;
2263 bool mgmt_connected;
2264 u8 type;
2265
2266 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2267
2268 hci_dev_lock(hdev);
2269
2270 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2271 if (!conn)
2272 goto unlock;
2273
2274 if (ev->status) {
2275 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2276 conn->dst_type, ev->status);
2277 goto unlock;
2278 }
2279
2280 conn->state = BT_CLOSED;
2281
2282 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2283 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2284 reason, mgmt_connected);
2285
2286 if (conn->type == ACL_LINK) {
2287 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2288 hci_remove_link_key(hdev, &conn->dst);
2289
2290 hci_update_page_scan(hdev, NULL);
2291 }
2292
2293 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2294 if (params) {
2295 switch (params->auto_connect) {
2296 case HCI_AUTO_CONN_LINK_LOSS:
2297 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2298 break;
2299 /* Fall through */
2300
2301 case HCI_AUTO_CONN_DIRECT:
2302 case HCI_AUTO_CONN_ALWAYS:
2303 list_del_init(&params->action);
2304 list_add(&params->action, &hdev->pend_le_conns);
2305 hci_update_background_scan(hdev);
2306 break;
2307
2308 default:
2309 break;
2310 }
2311 }
2312
2313 type = conn->type;
2314
2315 hci_proto_disconn_cfm(conn, ev->reason);
2316 hci_conn_del(conn);
2317
2318 /* Re-enable advertising if necessary, since it might
2319 * have been disabled by the connection. From the
2320 * HCI_LE_Set_Advertise_Enable command description in
2321 * the core specification (v4.0):
2322 * "The Controller shall continue advertising until the Host
2323 * issues an LE_Set_Advertise_Enable command with
2324 * Advertising_Enable set to 0x00 (Advertising is disabled)
2325 * or until a connection is created or until the Advertising
2326 * is timed out due to Directed Advertising."
2327 */
2328 if (type == LE_LINK)
2329 mgmt_reenable_advertising(hdev);
2330
2331 unlock:
2332 hci_dev_unlock(hdev);
2333 }
2334
2335 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2336 {
2337 struct hci_ev_auth_complete *ev = (void *) skb->data;
2338 struct hci_conn *conn;
2339
2340 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2341
2342 hci_dev_lock(hdev);
2343
2344 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2345 if (!conn)
2346 goto unlock;
2347
2348 if (!ev->status) {
2349 if (!hci_conn_ssp_enabled(conn) &&
2350 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2351 BT_INFO("re-auth of legacy device is not possible.");
2352 } else {
2353 set_bit(HCI_CONN_AUTH, &conn->flags);
2354 conn->sec_level = conn->pending_sec_level;
2355 }
2356 } else {
2357 mgmt_auth_failed(conn, ev->status);
2358 }
2359
2360 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2361 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2362
2363 if (conn->state == BT_CONFIG) {
2364 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2365 struct hci_cp_set_conn_encrypt cp;
2366 cp.handle = ev->handle;
2367 cp.encrypt = 0x01;
2368 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2369 &cp);
2370 } else {
2371 conn->state = BT_CONNECTED;
2372 hci_proto_connect_cfm(conn, ev->status);
2373 hci_conn_drop(conn);
2374 }
2375 } else {
2376 hci_auth_cfm(conn, ev->status);
2377
2378 hci_conn_hold(conn);
2379 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2380 hci_conn_drop(conn);
2381 }
2382
2383 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2384 if (!ev->status) {
2385 struct hci_cp_set_conn_encrypt cp;
2386 cp.handle = ev->handle;
2387 cp.encrypt = 0x01;
2388 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2389 &cp);
2390 } else {
2391 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2392 hci_encrypt_cfm(conn, ev->status, 0x00);
2393 }
2394 }
2395
2396 unlock:
2397 hci_dev_unlock(hdev);
2398 }
2399
2400 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2401 {
2402 struct hci_ev_remote_name *ev = (void *) skb->data;
2403 struct hci_conn *conn;
2404
2405 BT_DBG("%s", hdev->name);
2406
2407 hci_conn_check_pending(hdev);
2408
2409 hci_dev_lock(hdev);
2410
2411 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2412
2413 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2414 goto check_auth;
2415
2416 if (ev->status == 0)
2417 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2418 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2419 else
2420 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2421
2422 check_auth:
2423 if (!conn)
2424 goto unlock;
2425
2426 if (!hci_outgoing_auth_needed(hdev, conn))
2427 goto unlock;
2428
2429 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2430 struct hci_cp_auth_requested cp;
2431
2432 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2433
2434 cp.handle = __cpu_to_le16(conn->handle);
2435 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2436 }
2437
2438 unlock:
2439 hci_dev_unlock(hdev);
2440 }
2441
2442 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2443 {
2444 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2445 struct hci_conn *conn;
2446
2447 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2448
2449 hci_dev_lock(hdev);
2450
2451 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2452 if (!conn)
2453 goto unlock;
2454
2455 if (!ev->status) {
2456 if (ev->encrypt) {
2457 /* Encryption implies authentication */
2458 set_bit(HCI_CONN_AUTH, &conn->flags);
2459 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2460 conn->sec_level = conn->pending_sec_level;
2461
2462 /* P-256 authentication key implies FIPS */
2463 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2464 set_bit(HCI_CONN_FIPS, &conn->flags);
2465
2466 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2467 conn->type == LE_LINK)
2468 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2469 } else {
2470 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2471 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2472 }
2473 }
2474
2475 /* We should disregard the current RPA and generate a new one
2476 * whenever the encryption procedure fails.
2477 */
2478 if (ev->status && conn->type == LE_LINK)
2479 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2480
2481 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2482
2483 if (ev->status && conn->state == BT_CONNECTED) {
2484 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2485 hci_conn_drop(conn);
2486 goto unlock;
2487 }
2488
2489 if (conn->state == BT_CONFIG) {
2490 if (!ev->status)
2491 conn->state = BT_CONNECTED;
2492
2493 /* In Secure Connections Only mode, do not allow any
2494 * connections that are not encrypted with AES-CCM
2495 * using a P-256 authenticated combination key.
2496 */
2497 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2498 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2499 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2500 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2501 hci_conn_drop(conn);
2502 goto unlock;
2503 }
2504
2505 hci_proto_connect_cfm(conn, ev->status);
2506 hci_conn_drop(conn);
2507 } else
2508 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2509
2510 unlock:
2511 hci_dev_unlock(hdev);
2512 }
2513
2514 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2515 struct sk_buff *skb)
2516 {
2517 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2518 struct hci_conn *conn;
2519
2520 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2521
2522 hci_dev_lock(hdev);
2523
2524 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2525 if (conn) {
2526 if (!ev->status)
2527 set_bit(HCI_CONN_SECURE, &conn->flags);
2528
2529 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2530
2531 hci_key_change_cfm(conn, ev->status);
2532 }
2533
2534 hci_dev_unlock(hdev);
2535 }
2536
2537 static void hci_remote_features_evt(struct hci_dev *hdev,
2538 struct sk_buff *skb)
2539 {
2540 struct hci_ev_remote_features *ev = (void *) skb->data;
2541 struct hci_conn *conn;
2542
2543 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2544
2545 hci_dev_lock(hdev);
2546
2547 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2548 if (!conn)
2549 goto unlock;
2550
2551 if (!ev->status)
2552 memcpy(conn->features[0], ev->features, 8);
2553
2554 if (conn->state != BT_CONFIG)
2555 goto unlock;
2556
2557 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2558 struct hci_cp_read_remote_ext_features cp;
2559 cp.handle = ev->handle;
2560 cp.page = 0x01;
2561 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2562 sizeof(cp), &cp);
2563 goto unlock;
2564 }
2565
2566 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2567 struct hci_cp_remote_name_req cp;
2568 memset(&cp, 0, sizeof(cp));
2569 bacpy(&cp.bdaddr, &conn->dst);
2570 cp.pscan_rep_mode = 0x02;
2571 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2572 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2573 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2574
2575 if (!hci_outgoing_auth_needed(hdev, conn)) {
2576 conn->state = BT_CONNECTED;
2577 hci_proto_connect_cfm(conn, ev->status);
2578 hci_conn_drop(conn);
2579 }
2580
2581 unlock:
2582 hci_dev_unlock(hdev);
2583 }
2584
2585 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2586 {
2587 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2588 u8 status = skb->data[sizeof(*ev)];
2589 __u16 opcode;
2590
2591 skb_pull(skb, sizeof(*ev));
2592
2593 opcode = __le16_to_cpu(ev->opcode);
2594
2595 switch (opcode) {
2596 case HCI_OP_INQUIRY_CANCEL:
2597 hci_cc_inquiry_cancel(hdev, skb);
2598 break;
2599
2600 case HCI_OP_PERIODIC_INQ:
2601 hci_cc_periodic_inq(hdev, skb);
2602 break;
2603
2604 case HCI_OP_EXIT_PERIODIC_INQ:
2605 hci_cc_exit_periodic_inq(hdev, skb);
2606 break;
2607
2608 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2609 hci_cc_remote_name_req_cancel(hdev, skb);
2610 break;
2611
2612 case HCI_OP_ROLE_DISCOVERY:
2613 hci_cc_role_discovery(hdev, skb);
2614 break;
2615
2616 case HCI_OP_READ_LINK_POLICY:
2617 hci_cc_read_link_policy(hdev, skb);
2618 break;
2619
2620 case HCI_OP_WRITE_LINK_POLICY:
2621 hci_cc_write_link_policy(hdev, skb);
2622 break;
2623
2624 case HCI_OP_READ_DEF_LINK_POLICY:
2625 hci_cc_read_def_link_policy(hdev, skb);
2626 break;
2627
2628 case HCI_OP_WRITE_DEF_LINK_POLICY:
2629 hci_cc_write_def_link_policy(hdev, skb);
2630 break;
2631
2632 case HCI_OP_RESET:
2633 hci_cc_reset(hdev, skb);
2634 break;
2635
2636 case HCI_OP_WRITE_LOCAL_NAME:
2637 hci_cc_write_local_name(hdev, skb);
2638 break;
2639
2640 case HCI_OP_READ_LOCAL_NAME:
2641 hci_cc_read_local_name(hdev, skb);
2642 break;
2643
2644 case HCI_OP_WRITE_AUTH_ENABLE:
2645 hci_cc_write_auth_enable(hdev, skb);
2646 break;
2647
2648 case HCI_OP_WRITE_ENCRYPT_MODE:
2649 hci_cc_write_encrypt_mode(hdev, skb);
2650 break;
2651
2652 case HCI_OP_WRITE_SCAN_ENABLE:
2653 hci_cc_write_scan_enable(hdev, skb);
2654 break;
2655
2656 case HCI_OP_READ_CLASS_OF_DEV:
2657 hci_cc_read_class_of_dev(hdev, skb);
2658 break;
2659
2660 case HCI_OP_WRITE_CLASS_OF_DEV:
2661 hci_cc_write_class_of_dev(hdev, skb);
2662 break;
2663
2664 case HCI_OP_READ_VOICE_SETTING:
2665 hci_cc_read_voice_setting(hdev, skb);
2666 break;
2667
2668 case HCI_OP_WRITE_VOICE_SETTING:
2669 hci_cc_write_voice_setting(hdev, skb);
2670 break;
2671
2672 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2673 hci_cc_read_num_supported_iac(hdev, skb);
2674 break;
2675
2676 case HCI_OP_WRITE_SSP_MODE:
2677 hci_cc_write_ssp_mode(hdev, skb);
2678 break;
2679
2680 case HCI_OP_WRITE_SC_SUPPORT:
2681 hci_cc_write_sc_support(hdev, skb);
2682 break;
2683
2684 case HCI_OP_READ_LOCAL_VERSION:
2685 hci_cc_read_local_version(hdev, skb);
2686 break;
2687
2688 case HCI_OP_READ_LOCAL_COMMANDS:
2689 hci_cc_read_local_commands(hdev, skb);
2690 break;
2691
2692 case HCI_OP_READ_LOCAL_FEATURES:
2693 hci_cc_read_local_features(hdev, skb);
2694 break;
2695
2696 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2697 hci_cc_read_local_ext_features(hdev, skb);
2698 break;
2699
2700 case HCI_OP_READ_BUFFER_SIZE:
2701 hci_cc_read_buffer_size(hdev, skb);
2702 break;
2703
2704 case HCI_OP_READ_BD_ADDR:
2705 hci_cc_read_bd_addr(hdev, skb);
2706 break;
2707
2708 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2709 hci_cc_read_page_scan_activity(hdev, skb);
2710 break;
2711
2712 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2713 hci_cc_write_page_scan_activity(hdev, skb);
2714 break;
2715
2716 case HCI_OP_READ_PAGE_SCAN_TYPE:
2717 hci_cc_read_page_scan_type(hdev, skb);
2718 break;
2719
2720 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2721 hci_cc_write_page_scan_type(hdev, skb);
2722 break;
2723
2724 case HCI_OP_READ_DATA_BLOCK_SIZE:
2725 hci_cc_read_data_block_size(hdev, skb);
2726 break;
2727
2728 case HCI_OP_READ_FLOW_CONTROL_MODE:
2729 hci_cc_read_flow_control_mode(hdev, skb);
2730 break;
2731
2732 case HCI_OP_READ_LOCAL_AMP_INFO:
2733 hci_cc_read_local_amp_info(hdev, skb);
2734 break;
2735
2736 case HCI_OP_READ_CLOCK:
2737 hci_cc_read_clock(hdev, skb);
2738 break;
2739
2740 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2741 hci_cc_read_local_amp_assoc(hdev, skb);
2742 break;
2743
2744 case HCI_OP_READ_INQ_RSP_TX_POWER:
2745 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2746 break;
2747
2748 case HCI_OP_PIN_CODE_REPLY:
2749 hci_cc_pin_code_reply(hdev, skb);
2750 break;
2751
2752 case HCI_OP_PIN_CODE_NEG_REPLY:
2753 hci_cc_pin_code_neg_reply(hdev, skb);
2754 break;
2755
2756 case HCI_OP_READ_LOCAL_OOB_DATA:
2757 hci_cc_read_local_oob_data(hdev, skb);
2758 break;
2759
2760 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2761 hci_cc_read_local_oob_ext_data(hdev, skb);
2762 break;
2763
2764 case HCI_OP_LE_READ_BUFFER_SIZE:
2765 hci_cc_le_read_buffer_size(hdev, skb);
2766 break;
2767
2768 case HCI_OP_LE_READ_LOCAL_FEATURES:
2769 hci_cc_le_read_local_features(hdev, skb);
2770 break;
2771
2772 case HCI_OP_LE_READ_ADV_TX_POWER:
2773 hci_cc_le_read_adv_tx_power(hdev, skb);
2774 break;
2775
2776 case HCI_OP_USER_CONFIRM_REPLY:
2777 hci_cc_user_confirm_reply(hdev, skb);
2778 break;
2779
2780 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2781 hci_cc_user_confirm_neg_reply(hdev, skb);
2782 break;
2783
2784 case HCI_OP_USER_PASSKEY_REPLY:
2785 hci_cc_user_passkey_reply(hdev, skb);
2786 break;
2787
2788 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2789 hci_cc_user_passkey_neg_reply(hdev, skb);
2790 break;
2791
2792 case HCI_OP_LE_SET_RANDOM_ADDR:
2793 hci_cc_le_set_random_addr(hdev, skb);
2794 break;
2795
2796 case HCI_OP_LE_SET_ADV_ENABLE:
2797 hci_cc_le_set_adv_enable(hdev, skb);
2798 break;
2799
2800 case HCI_OP_LE_SET_SCAN_PARAM:
2801 hci_cc_le_set_scan_param(hdev, skb);
2802 break;
2803
2804 case HCI_OP_LE_SET_SCAN_ENABLE:
2805 hci_cc_le_set_scan_enable(hdev, skb);
2806 break;
2807
2808 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2809 hci_cc_le_read_white_list_size(hdev, skb);
2810 break;
2811
2812 case HCI_OP_LE_CLEAR_WHITE_LIST:
2813 hci_cc_le_clear_white_list(hdev, skb);
2814 break;
2815
2816 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2817 hci_cc_le_add_to_white_list(hdev, skb);
2818 break;
2819
2820 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2821 hci_cc_le_del_from_white_list(hdev, skb);
2822 break;
2823
2824 case HCI_OP_LE_READ_SUPPORTED_STATES:
2825 hci_cc_le_read_supported_states(hdev, skb);
2826 break;
2827
2828 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2829 hci_cc_write_le_host_supported(hdev, skb);
2830 break;
2831
2832 case HCI_OP_LE_SET_ADV_PARAM:
2833 hci_cc_set_adv_param(hdev, skb);
2834 break;
2835
2836 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2837 hci_cc_write_remote_amp_assoc(hdev, skb);
2838 break;
2839
2840 case HCI_OP_READ_RSSI:
2841 hci_cc_read_rssi(hdev, skb);
2842 break;
2843
2844 case HCI_OP_READ_TX_POWER:
2845 hci_cc_read_tx_power(hdev, skb);
2846 break;
2847
2848 default:
2849 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2850 break;
2851 }
2852
2853 if (opcode != HCI_OP_NOP)
2854 cancel_delayed_work(&hdev->cmd_timer);
2855
2856 hci_req_cmd_complete(hdev, opcode, status);
2857
2858 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2859 atomic_set(&hdev->cmd_cnt, 1);
2860 if (!skb_queue_empty(&hdev->cmd_q))
2861 queue_work(hdev->workqueue, &hdev->cmd_work);
2862 }
2863 }
2864
2865 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2866 {
2867 struct hci_ev_cmd_status *ev = (void *) skb->data;
2868 __u16 opcode;
2869
2870 skb_pull(skb, sizeof(*ev));
2871
2872 opcode = __le16_to_cpu(ev->opcode);
2873
2874 switch (opcode) {
2875 case HCI_OP_INQUIRY:
2876 hci_cs_inquiry(hdev, ev->status);
2877 break;
2878
2879 case HCI_OP_CREATE_CONN:
2880 hci_cs_create_conn(hdev, ev->status);
2881 break;
2882
2883 case HCI_OP_DISCONNECT:
2884 hci_cs_disconnect(hdev, ev->status);
2885 break;
2886
2887 case HCI_OP_ADD_SCO:
2888 hci_cs_add_sco(hdev, ev->status);
2889 break;
2890
2891 case HCI_OP_AUTH_REQUESTED:
2892 hci_cs_auth_requested(hdev, ev->status);
2893 break;
2894
2895 case HCI_OP_SET_CONN_ENCRYPT:
2896 hci_cs_set_conn_encrypt(hdev, ev->status);
2897 break;
2898
2899 case HCI_OP_REMOTE_NAME_REQ:
2900 hci_cs_remote_name_req(hdev, ev->status);
2901 break;
2902
2903 case HCI_OP_READ_REMOTE_FEATURES:
2904 hci_cs_read_remote_features(hdev, ev->status);
2905 break;
2906
2907 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2908 hci_cs_read_remote_ext_features(hdev, ev->status);
2909 break;
2910
2911 case HCI_OP_SETUP_SYNC_CONN:
2912 hci_cs_setup_sync_conn(hdev, ev->status);
2913 break;
2914
2915 case HCI_OP_CREATE_PHY_LINK:
2916 hci_cs_create_phylink(hdev, ev->status);
2917 break;
2918
2919 case HCI_OP_ACCEPT_PHY_LINK:
2920 hci_cs_accept_phylink(hdev, ev->status);
2921 break;
2922
2923 case HCI_OP_SNIFF_MODE:
2924 hci_cs_sniff_mode(hdev, ev->status);
2925 break;
2926
2927 case HCI_OP_EXIT_SNIFF_MODE:
2928 hci_cs_exit_sniff_mode(hdev, ev->status);
2929 break;
2930
2931 case HCI_OP_SWITCH_ROLE:
2932 hci_cs_switch_role(hdev, ev->status);
2933 break;
2934
2935 case HCI_OP_LE_CREATE_CONN:
2936 hci_cs_le_create_conn(hdev, ev->status);
2937 break;
2938
2939 case HCI_OP_LE_START_ENC:
2940 hci_cs_le_start_enc(hdev, ev->status);
2941 break;
2942
2943 default:
2944 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2945 break;
2946 }
2947
2948 if (opcode != HCI_OP_NOP)
2949 cancel_delayed_work(&hdev->cmd_timer);
2950
2951 if (ev->status ||
2952 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2953 hci_req_cmd_complete(hdev, opcode, ev->status);
2954
2955 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2956 atomic_set(&hdev->cmd_cnt, 1);
2957 if (!skb_queue_empty(&hdev->cmd_q))
2958 queue_work(hdev->workqueue, &hdev->cmd_work);
2959 }
2960 }
2961
2962 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
2963 {
2964 struct hci_ev_hardware_error *ev = (void *) skb->data;
2965
2966 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
2967 }
2968
2969 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2970 {
2971 struct hci_ev_role_change *ev = (void *) skb->data;
2972 struct hci_conn *conn;
2973
2974 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2975
2976 hci_dev_lock(hdev);
2977
2978 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2979 if (conn) {
2980 if (!ev->status)
2981 conn->role = ev->role;
2982
2983 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2984
2985 hci_role_switch_cfm(conn, ev->status, ev->role);
2986 }
2987
2988 hci_dev_unlock(hdev);
2989 }
2990
2991 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2992 {
2993 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2994 int i;
2995
2996 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2997 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2998 return;
2999 }
3000
3001 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3002 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3003 BT_DBG("%s bad parameters", hdev->name);
3004 return;
3005 }
3006
3007 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3008
3009 for (i = 0; i < ev->num_hndl; i++) {
3010 struct hci_comp_pkts_info *info = &ev->handles[i];
3011 struct hci_conn *conn;
3012 __u16 handle, count;
3013
3014 handle = __le16_to_cpu(info->handle);
3015 count = __le16_to_cpu(info->count);
3016
3017 conn = hci_conn_hash_lookup_handle(hdev, handle);
3018 if (!conn)
3019 continue;
3020
3021 conn->sent -= count;
3022
3023 switch (conn->type) {
3024 case ACL_LINK:
3025 hdev->acl_cnt += count;
3026 if (hdev->acl_cnt > hdev->acl_pkts)
3027 hdev->acl_cnt = hdev->acl_pkts;
3028 break;
3029
3030 case LE_LINK:
3031 if (hdev->le_pkts) {
3032 hdev->le_cnt += count;
3033 if (hdev->le_cnt > hdev->le_pkts)
3034 hdev->le_cnt = hdev->le_pkts;
3035 } else {
3036 hdev->acl_cnt += count;
3037 if (hdev->acl_cnt > hdev->acl_pkts)
3038 hdev->acl_cnt = hdev->acl_pkts;
3039 }
3040 break;
3041
3042 case SCO_LINK:
3043 hdev->sco_cnt += count;
3044 if (hdev->sco_cnt > hdev->sco_pkts)
3045 hdev->sco_cnt = hdev->sco_pkts;
3046 break;
3047
3048 default:
3049 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3050 break;
3051 }
3052 }
3053
3054 queue_work(hdev->workqueue, &hdev->tx_work);
3055 }
3056
3057 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3058 __u16 handle)
3059 {
3060 struct hci_chan *chan;
3061
3062 switch (hdev->dev_type) {
3063 case HCI_BREDR:
3064 return hci_conn_hash_lookup_handle(hdev, handle);
3065 case HCI_AMP:
3066 chan = hci_chan_lookup_handle(hdev, handle);
3067 if (chan)
3068 return chan->conn;
3069 break;
3070 default:
3071 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3072 break;
3073 }
3074
3075 return NULL;
3076 }
3077
3078 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3079 {
3080 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3081 int i;
3082
3083 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3084 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3085 return;
3086 }
3087
3088 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3089 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3090 BT_DBG("%s bad parameters", hdev->name);
3091 return;
3092 }
3093
3094 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3095 ev->num_hndl);
3096
3097 for (i = 0; i < ev->num_hndl; i++) {
3098 struct hci_comp_blocks_info *info = &ev->handles[i];
3099 struct hci_conn *conn = NULL;
3100 __u16 handle, block_count;
3101
3102 handle = __le16_to_cpu(info->handle);
3103 block_count = __le16_to_cpu(info->blocks);
3104
3105 conn = __hci_conn_lookup_handle(hdev, handle);
3106 if (!conn)
3107 continue;
3108
3109 conn->sent -= block_count;
3110
3111 switch (conn->type) {
3112 case ACL_LINK:
3113 case AMP_LINK:
3114 hdev->block_cnt += block_count;
3115 if (hdev->block_cnt > hdev->num_blocks)
3116 hdev->block_cnt = hdev->num_blocks;
3117 break;
3118
3119 default:
3120 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3121 break;
3122 }
3123 }
3124
3125 queue_work(hdev->workqueue, &hdev->tx_work);
3126 }
3127
3128 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3129 {
3130 struct hci_ev_mode_change *ev = (void *) skb->data;
3131 struct hci_conn *conn;
3132
3133 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3134
3135 hci_dev_lock(hdev);
3136
3137 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3138 if (conn) {
3139 conn->mode = ev->mode;
3140
3141 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3142 &conn->flags)) {
3143 if (conn->mode == HCI_CM_ACTIVE)
3144 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3145 else
3146 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3147 }
3148
3149 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3150 hci_sco_setup(conn, ev->status);
3151 }
3152
3153 hci_dev_unlock(hdev);
3154 }
3155
3156 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3157 {
3158 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3159 struct hci_conn *conn;
3160
3161 BT_DBG("%s", hdev->name);
3162
3163 hci_dev_lock(hdev);
3164
3165 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3166 if (!conn)
3167 goto unlock;
3168
3169 if (conn->state == BT_CONNECTED) {
3170 hci_conn_hold(conn);
3171 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3172 hci_conn_drop(conn);
3173 }
3174
3175 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3176 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3177 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3178 sizeof(ev->bdaddr), &ev->bdaddr);
3179 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3180 u8 secure;
3181
3182 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3183 secure = 1;
3184 else
3185 secure = 0;
3186
3187 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3188 }
3189
3190 unlock:
3191 hci_dev_unlock(hdev);
3192 }
3193
3194 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3195 {
3196 struct hci_ev_link_key_req *ev = (void *) skb->data;
3197 struct hci_cp_link_key_reply cp;
3198 struct hci_conn *conn;
3199 struct link_key *key;
3200
3201 BT_DBG("%s", hdev->name);
3202
3203 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3204 return;
3205
3206 hci_dev_lock(hdev);
3207
3208 key = hci_find_link_key(hdev, &ev->bdaddr);
3209 if (!key) {
3210 BT_DBG("%s link key not found for %pMR", hdev->name,
3211 &ev->bdaddr);
3212 goto not_found;
3213 }
3214
3215 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3216 &ev->bdaddr);
3217
3218 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3219 if (conn) {
3220 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3221 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3222 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3223 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3224 goto not_found;
3225 }
3226
3227 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3228 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3229 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3230 BT_DBG("%s ignoring key unauthenticated for high security",
3231 hdev->name);
3232 goto not_found;
3233 }
3234
3235 conn->key_type = key->type;
3236 conn->pin_length = key->pin_len;
3237 }
3238
3239 bacpy(&cp.bdaddr, &ev->bdaddr);
3240 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3241
3242 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3243
3244 hci_dev_unlock(hdev);
3245
3246 return;
3247
3248 not_found:
3249 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3250 hci_dev_unlock(hdev);
3251 }
3252
3253 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3254 {
3255 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3256 struct hci_conn *conn;
3257 struct link_key *key;
3258 bool persistent;
3259 u8 pin_len = 0;
3260
3261 BT_DBG("%s", hdev->name);
3262
3263 hci_dev_lock(hdev);
3264
3265 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3266 if (conn) {
3267 hci_conn_hold(conn);
3268 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3269 pin_len = conn->pin_length;
3270
3271 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3272 conn->key_type = ev->key_type;
3273
3274 hci_conn_drop(conn);
3275 }
3276
3277 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3278 goto unlock;
3279
3280 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3281 ev->key_type, pin_len, &persistent);
3282 if (!key)
3283 goto unlock;
3284
3285 mgmt_new_link_key(hdev, key, persistent);
3286
3287 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3288 * is set. If it's not set simply remove the key from the kernel
3289 * list (we've still notified user space about it but with
3290 * store_hint being 0).
3291 */
3292 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3293 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3294 list_del(&key->list);
3295 kfree(key);
3296 } else if (conn) {
3297 if (persistent)
3298 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3299 else
3300 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3301 }
3302
3303 unlock:
3304 hci_dev_unlock(hdev);
3305 }
3306
3307 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3308 {
3309 struct hci_ev_clock_offset *ev = (void *) skb->data;
3310 struct hci_conn *conn;
3311
3312 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3313
3314 hci_dev_lock(hdev);
3315
3316 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3317 if (conn && !ev->status) {
3318 struct inquiry_entry *ie;
3319
3320 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3321 if (ie) {
3322 ie->data.clock_offset = ev->clock_offset;
3323 ie->timestamp = jiffies;
3324 }
3325 }
3326
3327 hci_dev_unlock(hdev);
3328 }
3329
3330 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3331 {
3332 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3333 struct hci_conn *conn;
3334
3335 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3336
3337 hci_dev_lock(hdev);
3338
3339 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3340 if (conn && !ev->status)
3341 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3342
3343 hci_dev_unlock(hdev);
3344 }
3345
3346 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3347 {
3348 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3349 struct inquiry_entry *ie;
3350
3351 BT_DBG("%s", hdev->name);
3352
3353 hci_dev_lock(hdev);
3354
3355 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3356 if (ie) {
3357 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3358 ie->timestamp = jiffies;
3359 }
3360
3361 hci_dev_unlock(hdev);
3362 }
3363
3364 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3365 struct sk_buff *skb)
3366 {
3367 struct inquiry_data data;
3368 int num_rsp = *((__u8 *) skb->data);
3369
3370 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3371
3372 if (!num_rsp)
3373 return;
3374
3375 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3376 return;
3377
3378 hci_dev_lock(hdev);
3379
3380 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3381 struct inquiry_info_with_rssi_and_pscan_mode *info;
3382 info = (void *) (skb->data + 1);
3383
3384 for (; num_rsp; num_rsp--, info++) {
3385 u32 flags;
3386
3387 bacpy(&data.bdaddr, &info->bdaddr);
3388 data.pscan_rep_mode = info->pscan_rep_mode;
3389 data.pscan_period_mode = info->pscan_period_mode;
3390 data.pscan_mode = info->pscan_mode;
3391 memcpy(data.dev_class, info->dev_class, 3);
3392 data.clock_offset = info->clock_offset;
3393 data.rssi = info->rssi;
3394 data.ssp_mode = 0x00;
3395
3396 flags = hci_inquiry_cache_update(hdev, &data, false);
3397
3398 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3399 info->dev_class, info->rssi,
3400 flags, NULL, 0, NULL, 0);
3401 }
3402 } else {
3403 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3404
3405 for (; num_rsp; num_rsp--, info++) {
3406 u32 flags;
3407
3408 bacpy(&data.bdaddr, &info->bdaddr);
3409 data.pscan_rep_mode = info->pscan_rep_mode;
3410 data.pscan_period_mode = info->pscan_period_mode;
3411 data.pscan_mode = 0x00;
3412 memcpy(data.dev_class, info->dev_class, 3);
3413 data.clock_offset = info->clock_offset;
3414 data.rssi = info->rssi;
3415 data.ssp_mode = 0x00;
3416
3417 flags = hci_inquiry_cache_update(hdev, &data, false);
3418
3419 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3420 info->dev_class, info->rssi,
3421 flags, NULL, 0, NULL, 0);
3422 }
3423 }
3424
3425 hci_dev_unlock(hdev);
3426 }
3427
3428 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3429 struct sk_buff *skb)
3430 {
3431 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3432 struct hci_conn *conn;
3433
3434 BT_DBG("%s", hdev->name);
3435
3436 hci_dev_lock(hdev);
3437
3438 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3439 if (!conn)
3440 goto unlock;
3441
3442 if (ev->page < HCI_MAX_PAGES)
3443 memcpy(conn->features[ev->page], ev->features, 8);
3444
3445 if (!ev->status && ev->page == 0x01) {
3446 struct inquiry_entry *ie;
3447
3448 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3449 if (ie)
3450 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3451
3452 if (ev->features[0] & LMP_HOST_SSP) {
3453 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3454 } else {
3455 /* It is mandatory by the Bluetooth specification that
3456 * Extended Inquiry Results are only used when Secure
3457 * Simple Pairing is enabled, but some devices violate
3458 * this.
3459 *
3460 * To make these devices work, the internal SSP
3461 * enabled flag needs to be cleared if the remote host
3462 * features do not indicate SSP support */
3463 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3464 }
3465
3466 if (ev->features[0] & LMP_HOST_SC)
3467 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3468 }
3469
3470 if (conn->state != BT_CONFIG)
3471 goto unlock;
3472
3473 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3474 struct hci_cp_remote_name_req cp;
3475 memset(&cp, 0, sizeof(cp));
3476 bacpy(&cp.bdaddr, &conn->dst);
3477 cp.pscan_rep_mode = 0x02;
3478 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3479 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3480 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3481
3482 if (!hci_outgoing_auth_needed(hdev, conn)) {
3483 conn->state = BT_CONNECTED;
3484 hci_proto_connect_cfm(conn, ev->status);
3485 hci_conn_drop(conn);
3486 }
3487
3488 unlock:
3489 hci_dev_unlock(hdev);
3490 }
3491
3492 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3493 struct sk_buff *skb)
3494 {
3495 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3496 struct hci_conn *conn;
3497
3498 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3499
3500 hci_dev_lock(hdev);
3501
3502 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3503 if (!conn) {
3504 if (ev->link_type == ESCO_LINK)
3505 goto unlock;
3506
3507 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3508 if (!conn)
3509 goto unlock;
3510
3511 conn->type = SCO_LINK;
3512 }
3513
3514 switch (ev->status) {
3515 case 0x00:
3516 conn->handle = __le16_to_cpu(ev->handle);
3517 conn->state = BT_CONNECTED;
3518
3519 hci_conn_add_sysfs(conn);
3520 break;
3521
3522 case 0x10: /* Connection Accept Timeout */
3523 case 0x0d: /* Connection Rejected due to Limited Resources */
3524 case 0x11: /* Unsupported Feature or Parameter Value */
3525 case 0x1c: /* SCO interval rejected */
3526 case 0x1a: /* Unsupported Remote Feature */
3527 case 0x1f: /* Unspecified error */
3528 case 0x20: /* Unsupported LMP Parameter value */
3529 if (conn->out) {
3530 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3531 (hdev->esco_type & EDR_ESCO_MASK);
3532 if (hci_setup_sync(conn, conn->link->handle))
3533 goto unlock;
3534 }
3535 /* fall through */
3536
3537 default:
3538 conn->state = BT_CLOSED;
3539 break;
3540 }
3541
3542 hci_proto_connect_cfm(conn, ev->status);
3543 if (ev->status)
3544 hci_conn_del(conn);
3545
3546 unlock:
3547 hci_dev_unlock(hdev);
3548 }
3549
3550 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3551 {
3552 size_t parsed = 0;
3553
3554 while (parsed < eir_len) {
3555 u8 field_len = eir[0];
3556
3557 if (field_len == 0)
3558 return parsed;
3559
3560 parsed += field_len + 1;
3561 eir += field_len + 1;
3562 }
3563
3564 return eir_len;
3565 }
3566
3567 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3568 struct sk_buff *skb)
3569 {
3570 struct inquiry_data data;
3571 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3572 int num_rsp = *((__u8 *) skb->data);
3573 size_t eir_len;
3574
3575 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3576
3577 if (!num_rsp)
3578 return;
3579
3580 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3581 return;
3582
3583 hci_dev_lock(hdev);
3584
3585 for (; num_rsp; num_rsp--, info++) {
3586 u32 flags;
3587 bool name_known;
3588
3589 bacpy(&data.bdaddr, &info->bdaddr);
3590 data.pscan_rep_mode = info->pscan_rep_mode;
3591 data.pscan_period_mode = info->pscan_period_mode;
3592 data.pscan_mode = 0x00;
3593 memcpy(data.dev_class, info->dev_class, 3);
3594 data.clock_offset = info->clock_offset;
3595 data.rssi = info->rssi;
3596 data.ssp_mode = 0x01;
3597
3598 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3599 name_known = eir_has_data_type(info->data,
3600 sizeof(info->data),
3601 EIR_NAME_COMPLETE);
3602 else
3603 name_known = true;
3604
3605 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3606
3607 eir_len = eir_get_length(info->data, sizeof(info->data));
3608
3609 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3610 info->dev_class, info->rssi,
3611 flags, info->data, eir_len, NULL, 0);
3612 }
3613
3614 hci_dev_unlock(hdev);
3615 }
3616
3617 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3618 struct sk_buff *skb)
3619 {
3620 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3621 struct hci_conn *conn;
3622
3623 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3624 __le16_to_cpu(ev->handle));
3625
3626 hci_dev_lock(hdev);
3627
3628 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3629 if (!conn)
3630 goto unlock;
3631
3632 /* For BR/EDR the necessary steps are taken through the
3633 * auth_complete event.
3634 */
3635 if (conn->type != LE_LINK)
3636 goto unlock;
3637
3638 if (!ev->status)
3639 conn->sec_level = conn->pending_sec_level;
3640
3641 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3642
3643 if (ev->status && conn->state == BT_CONNECTED) {
3644 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3645 hci_conn_drop(conn);
3646 goto unlock;
3647 }
3648
3649 if (conn->state == BT_CONFIG) {
3650 if (!ev->status)
3651 conn->state = BT_CONNECTED;
3652
3653 hci_proto_connect_cfm(conn, ev->status);
3654 hci_conn_drop(conn);
3655 } else {
3656 hci_auth_cfm(conn, ev->status);
3657
3658 hci_conn_hold(conn);
3659 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3660 hci_conn_drop(conn);
3661 }
3662
3663 unlock:
3664 hci_dev_unlock(hdev);
3665 }
3666
3667 static u8 hci_get_auth_req(struct hci_conn *conn)
3668 {
3669 /* If remote requests no-bonding follow that lead */
3670 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3671 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3672 return conn->remote_auth | (conn->auth_type & 0x01);
3673
3674 /* If both remote and local have enough IO capabilities, require
3675 * MITM protection
3676 */
3677 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3678 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3679 return conn->remote_auth | 0x01;
3680
3681 /* No MITM protection possible so ignore remote requirement */
3682 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3683 }
3684
3685 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3686 {
3687 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3688 struct hci_conn *conn;
3689
3690 BT_DBG("%s", hdev->name);
3691
3692 hci_dev_lock(hdev);
3693
3694 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3695 if (!conn)
3696 goto unlock;
3697
3698 hci_conn_hold(conn);
3699
3700 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3701 goto unlock;
3702
3703 /* Allow pairing if we're pairable, the initiators of the
3704 * pairing or if the remote is not requesting bonding.
3705 */
3706 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3707 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3708 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3709 struct hci_cp_io_capability_reply cp;
3710
3711 bacpy(&cp.bdaddr, &ev->bdaddr);
3712 /* Change the IO capability from KeyboardDisplay
3713 * to DisplayYesNo as it is not supported by BT spec. */
3714 cp.capability = (conn->io_capability == 0x04) ?
3715 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3716
3717 /* If we are initiators, there is no remote information yet */
3718 if (conn->remote_auth == 0xff) {
3719 /* Request MITM protection if our IO caps allow it
3720 * except for the no-bonding case.
3721 */
3722 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3723 conn->auth_type != HCI_AT_NO_BONDING)
3724 conn->auth_type |= 0x01;
3725 } else {
3726 conn->auth_type = hci_get_auth_req(conn);
3727 }
3728
3729 /* If we're not bondable, force one of the non-bondable
3730 * authentication requirement values.
3731 */
3732 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3733 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3734
3735 cp.authentication = conn->auth_type;
3736
3737 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3738 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3739 cp.oob_data = 0x01;
3740 else
3741 cp.oob_data = 0x00;
3742
3743 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3744 sizeof(cp), &cp);
3745 } else {
3746 struct hci_cp_io_capability_neg_reply cp;
3747
3748 bacpy(&cp.bdaddr, &ev->bdaddr);
3749 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3750
3751 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3752 sizeof(cp), &cp);
3753 }
3754
3755 unlock:
3756 hci_dev_unlock(hdev);
3757 }
3758
3759 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3760 {
3761 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3762 struct hci_conn *conn;
3763
3764 BT_DBG("%s", hdev->name);
3765
3766 hci_dev_lock(hdev);
3767
3768 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3769 if (!conn)
3770 goto unlock;
3771
3772 conn->remote_cap = ev->capability;
3773 conn->remote_auth = ev->authentication;
3774 if (ev->oob_data)
3775 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3776
3777 unlock:
3778 hci_dev_unlock(hdev);
3779 }
3780
3781 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3782 struct sk_buff *skb)
3783 {
3784 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3785 int loc_mitm, rem_mitm, confirm_hint = 0;
3786 struct hci_conn *conn;
3787
3788 BT_DBG("%s", hdev->name);
3789
3790 hci_dev_lock(hdev);
3791
3792 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3793 goto unlock;
3794
3795 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3796 if (!conn)
3797 goto unlock;
3798
3799 loc_mitm = (conn->auth_type & 0x01);
3800 rem_mitm = (conn->remote_auth & 0x01);
3801
3802 /* If we require MITM but the remote device can't provide that
3803 * (it has NoInputNoOutput) then reject the confirmation
3804 * request. We check the security level here since it doesn't
3805 * necessarily match conn->auth_type.
3806 */
3807 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3808 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3809 BT_DBG("Rejecting request: remote device can't provide MITM");
3810 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3811 sizeof(ev->bdaddr), &ev->bdaddr);
3812 goto unlock;
3813 }
3814
3815 /* If no side requires MITM protection; auto-accept */
3816 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3817 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3818
3819 /* If we're not the initiators request authorization to
3820 * proceed from user space (mgmt_user_confirm with
3821 * confirm_hint set to 1). The exception is if neither
3822 * side had MITM or if the local IO capability is
3823 * NoInputNoOutput, in which case we do auto-accept
3824 */
3825 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3826 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3827 (loc_mitm || rem_mitm)) {
3828 BT_DBG("Confirming auto-accept as acceptor");
3829 confirm_hint = 1;
3830 goto confirm;
3831 }
3832
3833 BT_DBG("Auto-accept of user confirmation with %ums delay",
3834 hdev->auto_accept_delay);
3835
3836 if (hdev->auto_accept_delay > 0) {
3837 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3838 queue_delayed_work(conn->hdev->workqueue,
3839 &conn->auto_accept_work, delay);
3840 goto unlock;
3841 }
3842
3843 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3844 sizeof(ev->bdaddr), &ev->bdaddr);
3845 goto unlock;
3846 }
3847
3848 confirm:
3849 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3850 le32_to_cpu(ev->passkey), confirm_hint);
3851
3852 unlock:
3853 hci_dev_unlock(hdev);
3854 }
3855
3856 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3857 struct sk_buff *skb)
3858 {
3859 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3860
3861 BT_DBG("%s", hdev->name);
3862
3863 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3864 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3865 }
3866
3867 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3868 struct sk_buff *skb)
3869 {
3870 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3871 struct hci_conn *conn;
3872
3873 BT_DBG("%s", hdev->name);
3874
3875 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3876 if (!conn)
3877 return;
3878
3879 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3880 conn->passkey_entered = 0;
3881
3882 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3883 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3884 conn->dst_type, conn->passkey_notify,
3885 conn->passkey_entered);
3886 }
3887
3888 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3889 {
3890 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3891 struct hci_conn *conn;
3892
3893 BT_DBG("%s", hdev->name);
3894
3895 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3896 if (!conn)
3897 return;
3898
3899 switch (ev->type) {
3900 case HCI_KEYPRESS_STARTED:
3901 conn->passkey_entered = 0;
3902 return;
3903
3904 case HCI_KEYPRESS_ENTERED:
3905 conn->passkey_entered++;
3906 break;
3907
3908 case HCI_KEYPRESS_ERASED:
3909 conn->passkey_entered--;
3910 break;
3911
3912 case HCI_KEYPRESS_CLEARED:
3913 conn->passkey_entered = 0;
3914 break;
3915
3916 case HCI_KEYPRESS_COMPLETED:
3917 return;
3918 }
3919
3920 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3921 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3922 conn->dst_type, conn->passkey_notify,
3923 conn->passkey_entered);
3924 }
3925
3926 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3927 struct sk_buff *skb)
3928 {
3929 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3930 struct hci_conn *conn;
3931
3932 BT_DBG("%s", hdev->name);
3933
3934 hci_dev_lock(hdev);
3935
3936 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3937 if (!conn)
3938 goto unlock;
3939
3940 /* Reset the authentication requirement to unknown */
3941 conn->remote_auth = 0xff;
3942
3943 /* To avoid duplicate auth_failed events to user space we check
3944 * the HCI_CONN_AUTH_PEND flag which will be set if we
3945 * initiated the authentication. A traditional auth_complete
3946 * event gets always produced as initiator and is also mapped to
3947 * the mgmt_auth_failed event */
3948 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3949 mgmt_auth_failed(conn, ev->status);
3950
3951 hci_conn_drop(conn);
3952
3953 unlock:
3954 hci_dev_unlock(hdev);
3955 }
3956
3957 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3958 struct sk_buff *skb)
3959 {
3960 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3961 struct inquiry_entry *ie;
3962 struct hci_conn *conn;
3963
3964 BT_DBG("%s", hdev->name);
3965
3966 hci_dev_lock(hdev);
3967
3968 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3969 if (conn)
3970 memcpy(conn->features[1], ev->features, 8);
3971
3972 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3973 if (ie)
3974 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3975
3976 hci_dev_unlock(hdev);
3977 }
3978
3979 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3980 struct sk_buff *skb)
3981 {
3982 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3983 struct oob_data *data;
3984
3985 BT_DBG("%s", hdev->name);
3986
3987 hci_dev_lock(hdev);
3988
3989 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3990 goto unlock;
3991
3992 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3993 if (data) {
3994 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3995 struct hci_cp_remote_oob_ext_data_reply cp;
3996
3997 bacpy(&cp.bdaddr, &ev->bdaddr);
3998 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3999 memcpy(cp.randomizer192, data->randomizer192,
4000 sizeof(cp.randomizer192));
4001 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4002 memcpy(cp.randomizer256, data->randomizer256,
4003 sizeof(cp.randomizer256));
4004
4005 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4006 sizeof(cp), &cp);
4007 } else {
4008 struct hci_cp_remote_oob_data_reply cp;
4009
4010 bacpy(&cp.bdaddr, &ev->bdaddr);
4011 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4012 memcpy(cp.randomizer, data->randomizer192,
4013 sizeof(cp.randomizer));
4014
4015 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4016 sizeof(cp), &cp);
4017 }
4018 } else {
4019 struct hci_cp_remote_oob_data_neg_reply cp;
4020
4021 bacpy(&cp.bdaddr, &ev->bdaddr);
4022 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4023 sizeof(cp), &cp);
4024 }
4025
4026 unlock:
4027 hci_dev_unlock(hdev);
4028 }
4029
4030 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4031 struct sk_buff *skb)
4032 {
4033 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4034 struct hci_conn *hcon, *bredr_hcon;
4035
4036 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4037 ev->status);
4038
4039 hci_dev_lock(hdev);
4040
4041 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4042 if (!hcon) {
4043 hci_dev_unlock(hdev);
4044 return;
4045 }
4046
4047 if (ev->status) {
4048 hci_conn_del(hcon);
4049 hci_dev_unlock(hdev);
4050 return;
4051 }
4052
4053 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4054
4055 hcon->state = BT_CONNECTED;
4056 bacpy(&hcon->dst, &bredr_hcon->dst);
4057
4058 hci_conn_hold(hcon);
4059 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4060 hci_conn_drop(hcon);
4061
4062 hci_conn_add_sysfs(hcon);
4063
4064 amp_physical_cfm(bredr_hcon, hcon);
4065
4066 hci_dev_unlock(hdev);
4067 }
4068
4069 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4070 {
4071 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4072 struct hci_conn *hcon;
4073 struct hci_chan *hchan;
4074 struct amp_mgr *mgr;
4075
4076 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4077 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4078 ev->status);
4079
4080 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4081 if (!hcon)
4082 return;
4083
4084 /* Create AMP hchan */
4085 hchan = hci_chan_create(hcon);
4086 if (!hchan)
4087 return;
4088
4089 hchan->handle = le16_to_cpu(ev->handle);
4090
4091 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4092
4093 mgr = hcon->amp_mgr;
4094 if (mgr && mgr->bredr_chan) {
4095 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4096
4097 l2cap_chan_lock(bredr_chan);
4098
4099 bredr_chan->conn->mtu = hdev->block_mtu;
4100 l2cap_logical_cfm(bredr_chan, hchan, 0);
4101 hci_conn_hold(hcon);
4102
4103 l2cap_chan_unlock(bredr_chan);
4104 }
4105 }
4106
4107 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4108 struct sk_buff *skb)
4109 {
4110 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4111 struct hci_chan *hchan;
4112
4113 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4114 le16_to_cpu(ev->handle), ev->status);
4115
4116 if (ev->status)
4117 return;
4118
4119 hci_dev_lock(hdev);
4120
4121 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4122 if (!hchan)
4123 goto unlock;
4124
4125 amp_destroy_logical_link(hchan, ev->reason);
4126
4127 unlock:
4128 hci_dev_unlock(hdev);
4129 }
4130
4131 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4132 struct sk_buff *skb)
4133 {
4134 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4135 struct hci_conn *hcon;
4136
4137 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4138
4139 if (ev->status)
4140 return;
4141
4142 hci_dev_lock(hdev);
4143
4144 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4145 if (hcon) {
4146 hcon->state = BT_CLOSED;
4147 hci_conn_del(hcon);
4148 }
4149
4150 hci_dev_unlock(hdev);
4151 }
4152
4153 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4154 {
4155 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4156 struct hci_conn_params *params;
4157 struct hci_conn *conn;
4158 struct smp_irk *irk;
4159 u8 addr_type;
4160
4161 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4162
4163 hci_dev_lock(hdev);
4164
4165 /* All controllers implicitly stop advertising in the event of a
4166 * connection, so ensure that the state bit is cleared.
4167 */
4168 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4169
4170 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4171 if (!conn) {
4172 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4173 if (!conn) {
4174 BT_ERR("No memory for new connection");
4175 goto unlock;
4176 }
4177
4178 conn->dst_type = ev->bdaddr_type;
4179
4180 /* If we didn't have a hci_conn object previously
4181 * but we're in master role this must be something
4182 * initiated using a white list. Since white list based
4183 * connections are not "first class citizens" we don't
4184 * have full tracking of them. Therefore, we go ahead
4185 * with a "best effort" approach of determining the
4186 * initiator address based on the HCI_PRIVACY flag.
4187 */
4188 if (conn->out) {
4189 conn->resp_addr_type = ev->bdaddr_type;
4190 bacpy(&conn->resp_addr, &ev->bdaddr);
4191 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4192 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4193 bacpy(&conn->init_addr, &hdev->rpa);
4194 } else {
4195 hci_copy_identity_address(hdev,
4196 &conn->init_addr,
4197 &conn->init_addr_type);
4198 }
4199 }
4200 } else {
4201 cancel_delayed_work(&conn->le_conn_timeout);
4202 }
4203
4204 if (!conn->out) {
4205 /* Set the responder (our side) address type based on
4206 * the advertising address type.
4207 */
4208 conn->resp_addr_type = hdev->adv_addr_type;
4209 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4210 bacpy(&conn->resp_addr, &hdev->random_addr);
4211 else
4212 bacpy(&conn->resp_addr, &hdev->bdaddr);
4213
4214 conn->init_addr_type = ev->bdaddr_type;
4215 bacpy(&conn->init_addr, &ev->bdaddr);
4216
4217 /* For incoming connections, set the default minimum
4218 * and maximum connection interval. They will be used
4219 * to check if the parameters are in range and if not
4220 * trigger the connection update procedure.
4221 */
4222 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4223 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4224 }
4225
4226 /* Lookup the identity address from the stored connection
4227 * address and address type.
4228 *
4229 * When establishing connections to an identity address, the
4230 * connection procedure will store the resolvable random
4231 * address first. Now if it can be converted back into the
4232 * identity address, start using the identity address from
4233 * now on.
4234 */
4235 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4236 if (irk) {
4237 bacpy(&conn->dst, &irk->bdaddr);
4238 conn->dst_type = irk->addr_type;
4239 }
4240
4241 if (ev->status) {
4242 hci_le_conn_failed(conn, ev->status);
4243 goto unlock;
4244 }
4245
4246 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4247 addr_type = BDADDR_LE_PUBLIC;
4248 else
4249 addr_type = BDADDR_LE_RANDOM;
4250
4251 /* Drop the connection if the device is blocked */
4252 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4253 hci_conn_drop(conn);
4254 goto unlock;
4255 }
4256
4257 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4258 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4259
4260 conn->sec_level = BT_SECURITY_LOW;
4261 conn->handle = __le16_to_cpu(ev->handle);
4262 conn->state = BT_CONNECTED;
4263
4264 conn->le_conn_interval = le16_to_cpu(ev->interval);
4265 conn->le_conn_latency = le16_to_cpu(ev->latency);
4266 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4267
4268 hci_conn_add_sysfs(conn);
4269
4270 hci_proto_connect_cfm(conn, ev->status);
4271
4272 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4273 conn->dst_type);
4274 if (params) {
4275 list_del_init(&params->action);
4276 if (params->conn) {
4277 hci_conn_drop(params->conn);
4278 hci_conn_put(params->conn);
4279 params->conn = NULL;
4280 }
4281 }
4282
4283 unlock:
4284 hci_update_background_scan(hdev);
4285 hci_dev_unlock(hdev);
4286 }
4287
4288 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4289 struct sk_buff *skb)
4290 {
4291 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4292 struct hci_conn *conn;
4293
4294 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4295
4296 if (ev->status)
4297 return;
4298
4299 hci_dev_lock(hdev);
4300
4301 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4302 if (conn) {
4303 conn->le_conn_interval = le16_to_cpu(ev->interval);
4304 conn->le_conn_latency = le16_to_cpu(ev->latency);
4305 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4306 }
4307
4308 hci_dev_unlock(hdev);
4309 }
4310
4311 /* This function requires the caller holds hdev->lock */
4312 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4313 bdaddr_t *addr,
4314 u8 addr_type, u8 adv_type)
4315 {
4316 struct hci_conn *conn;
4317 struct hci_conn_params *params;
4318
4319 /* If the event is not connectable don't proceed further */
4320 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4321 return NULL;
4322
4323 /* Ignore if the device is blocked */
4324 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4325 return NULL;
4326
4327 /* Most controller will fail if we try to create new connections
4328 * while we have an existing one in slave role.
4329 */
4330 if (hdev->conn_hash.le_num_slave > 0)
4331 return NULL;
4332
4333 /* If we're not connectable only connect devices that we have in
4334 * our pend_le_conns list.
4335 */
4336 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4337 addr, addr_type);
4338 if (!params)
4339 return NULL;
4340
4341 switch (params->auto_connect) {
4342 case HCI_AUTO_CONN_DIRECT:
4343 /* Only devices advertising with ADV_DIRECT_IND are
4344 * triggering a connection attempt. This is allowing
4345 * incoming connections from slave devices.
4346 */
4347 if (adv_type != LE_ADV_DIRECT_IND)
4348 return NULL;
4349 break;
4350 case HCI_AUTO_CONN_ALWAYS:
4351 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4352 * are triggering a connection attempt. This means
4353 * that incoming connectioms from slave device are
4354 * accepted and also outgoing connections to slave
4355 * devices are established when found.
4356 */
4357 break;
4358 default:
4359 return NULL;
4360 }
4361
4362 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4363 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4364 if (!IS_ERR(conn)) {
4365 /* Store the pointer since we don't really have any
4366 * other owner of the object besides the params that
4367 * triggered it. This way we can abort the connection if
4368 * the parameters get removed and keep the reference
4369 * count consistent once the connection is established.
4370 */
4371 params->conn = hci_conn_get(conn);
4372 return conn;
4373 }
4374
4375 switch (PTR_ERR(conn)) {
4376 case -EBUSY:
4377 /* If hci_connect() returns -EBUSY it means there is already
4378 * an LE connection attempt going on. Since controllers don't
4379 * support more than one connection attempt at the time, we
4380 * don't consider this an error case.
4381 */
4382 break;
4383 default:
4384 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4385 return NULL;
4386 }
4387
4388 return NULL;
4389 }
4390
4391 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4392 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4393 {
4394 struct discovery_state *d = &hdev->discovery;
4395 struct smp_irk *irk;
4396 struct hci_conn *conn;
4397 bool match;
4398 u32 flags;
4399
4400 /* Check if we need to convert to identity address */
4401 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4402 if (irk) {
4403 bdaddr = &irk->bdaddr;
4404 bdaddr_type = irk->addr_type;
4405 }
4406
4407 /* Check if we have been requested to connect to this device */
4408 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4409 if (conn && type == LE_ADV_IND) {
4410 /* Store report for later inclusion by
4411 * mgmt_device_connected
4412 */
4413 memcpy(conn->le_adv_data, data, len);
4414 conn->le_adv_data_len = len;
4415 }
4416
4417 /* Passive scanning shouldn't trigger any device found events,
4418 * except for devices marked as CONN_REPORT for which we do send
4419 * device found events.
4420 */
4421 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4422 if (type == LE_ADV_DIRECT_IND)
4423 return;
4424
4425 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4426 bdaddr, bdaddr_type))
4427 return;
4428
4429 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4430 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4431 else
4432 flags = 0;
4433 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4434 rssi, flags, data, len, NULL, 0);
4435 return;
4436 }
4437
4438 /* When receiving non-connectable or scannable undirected
4439 * advertising reports, this means that the remote device is
4440 * not connectable and then clearly indicate this in the
4441 * device found event.
4442 *
4443 * When receiving a scan response, then there is no way to
4444 * know if the remote device is connectable or not. However
4445 * since scan responses are merged with a previously seen
4446 * advertising report, the flags field from that report
4447 * will be used.
4448 *
4449 * In the really unlikely case that a controller get confused
4450 * and just sends a scan response event, then it is marked as
4451 * not connectable as well.
4452 */
4453 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4454 type == LE_ADV_SCAN_RSP)
4455 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4456 else
4457 flags = 0;
4458
4459 /* If there's nothing pending either store the data from this
4460 * event or send an immediate device found event if the data
4461 * should not be stored for later.
4462 */
4463 if (!has_pending_adv_report(hdev)) {
4464 /* If the report will trigger a SCAN_REQ store it for
4465 * later merging.
4466 */
4467 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4468 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4469 rssi, flags, data, len);
4470 return;
4471 }
4472
4473 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4474 rssi, flags, data, len, NULL, 0);
4475 return;
4476 }
4477
4478 /* Check if the pending report is for the same device as the new one */
4479 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4480 bdaddr_type == d->last_adv_addr_type);
4481
4482 /* If the pending data doesn't match this report or this isn't a
4483 * scan response (e.g. we got a duplicate ADV_IND) then force
4484 * sending of the pending data.
4485 */
4486 if (type != LE_ADV_SCAN_RSP || !match) {
4487 /* Send out whatever is in the cache, but skip duplicates */
4488 if (!match)
4489 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4490 d->last_adv_addr_type, NULL,
4491 d->last_adv_rssi, d->last_adv_flags,
4492 d->last_adv_data,
4493 d->last_adv_data_len, NULL, 0);
4494
4495 /* If the new report will trigger a SCAN_REQ store it for
4496 * later merging.
4497 */
4498 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4499 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4500 rssi, flags, data, len);
4501 return;
4502 }
4503
4504 /* The advertising reports cannot be merged, so clear
4505 * the pending report and send out a device found event.
4506 */
4507 clear_pending_adv_report(hdev);
4508 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4509 rssi, flags, data, len, NULL, 0);
4510 return;
4511 }
4512
4513 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4514 * the new event is a SCAN_RSP. We can therefore proceed with
4515 * sending a merged device found event.
4516 */
4517 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4518 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4519 d->last_adv_data, d->last_adv_data_len, data, len);
4520 clear_pending_adv_report(hdev);
4521 }
4522
4523 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4524 {
4525 u8 num_reports = skb->data[0];
4526 void *ptr = &skb->data[1];
4527
4528 hci_dev_lock(hdev);
4529
4530 while (num_reports--) {
4531 struct hci_ev_le_advertising_info *ev = ptr;
4532 s8 rssi;
4533
4534 rssi = ev->data[ev->length];
4535 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4536 ev->bdaddr_type, rssi, ev->data, ev->length);
4537
4538 ptr += sizeof(*ev) + ev->length + 1;
4539 }
4540
4541 hci_dev_unlock(hdev);
4542 }
4543
4544 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4545 {
4546 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4547 struct hci_cp_le_ltk_reply cp;
4548 struct hci_cp_le_ltk_neg_reply neg;
4549 struct hci_conn *conn;
4550 struct smp_ltk *ltk;
4551
4552 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4553
4554 hci_dev_lock(hdev);
4555
4556 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4557 if (conn == NULL)
4558 goto not_found;
4559
4560 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
4561 if (ltk == NULL)
4562 goto not_found;
4563
4564 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4565 cp.handle = cpu_to_le16(conn->handle);
4566
4567 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4568
4569 conn->enc_key_size = ltk->enc_size;
4570
4571 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4572
4573 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4574 * temporary key used to encrypt a connection following
4575 * pairing. It is used during the Encrypted Session Setup to
4576 * distribute the keys. Later, security can be re-established
4577 * using a distributed LTK.
4578 */
4579 if (ltk->type == SMP_STK) {
4580 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4581 list_del(&ltk->list);
4582 kfree(ltk);
4583 } else {
4584 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4585 }
4586
4587 hci_dev_unlock(hdev);
4588
4589 return;
4590
4591 not_found:
4592 neg.handle = ev->handle;
4593 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4594 hci_dev_unlock(hdev);
4595 }
4596
4597 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4598 u8 reason)
4599 {
4600 struct hci_cp_le_conn_param_req_neg_reply cp;
4601
4602 cp.handle = cpu_to_le16(handle);
4603 cp.reason = reason;
4604
4605 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4606 &cp);
4607 }
4608
4609 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4610 struct sk_buff *skb)
4611 {
4612 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4613 struct hci_cp_le_conn_param_req_reply cp;
4614 struct hci_conn *hcon;
4615 u16 handle, min, max, latency, timeout;
4616
4617 handle = le16_to_cpu(ev->handle);
4618 min = le16_to_cpu(ev->interval_min);
4619 max = le16_to_cpu(ev->interval_max);
4620 latency = le16_to_cpu(ev->latency);
4621 timeout = le16_to_cpu(ev->timeout);
4622
4623 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4624 if (!hcon || hcon->state != BT_CONNECTED)
4625 return send_conn_param_neg_reply(hdev, handle,
4626 HCI_ERROR_UNKNOWN_CONN_ID);
4627
4628 if (hci_check_conn_params(min, max, latency, timeout))
4629 return send_conn_param_neg_reply(hdev, handle,
4630 HCI_ERROR_INVALID_LL_PARAMS);
4631
4632 if (hcon->role == HCI_ROLE_MASTER) {
4633 struct hci_conn_params *params;
4634 u8 store_hint;
4635
4636 hci_dev_lock(hdev);
4637
4638 params = hci_conn_params_lookup(hdev, &hcon->dst,
4639 hcon->dst_type);
4640 if (params) {
4641 params->conn_min_interval = min;
4642 params->conn_max_interval = max;
4643 params->conn_latency = latency;
4644 params->supervision_timeout = timeout;
4645 store_hint = 0x01;
4646 } else{
4647 store_hint = 0x00;
4648 }
4649
4650 hci_dev_unlock(hdev);
4651
4652 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4653 store_hint, min, max, latency, timeout);
4654 }
4655
4656 cp.handle = ev->handle;
4657 cp.interval_min = ev->interval_min;
4658 cp.interval_max = ev->interval_max;
4659 cp.latency = ev->latency;
4660 cp.timeout = ev->timeout;
4661 cp.min_ce_len = 0;
4662 cp.max_ce_len = 0;
4663
4664 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4665 }
4666
4667 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4668 {
4669 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4670
4671 skb_pull(skb, sizeof(*le_ev));
4672
4673 switch (le_ev->subevent) {
4674 case HCI_EV_LE_CONN_COMPLETE:
4675 hci_le_conn_complete_evt(hdev, skb);
4676 break;
4677
4678 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4679 hci_le_conn_update_complete_evt(hdev, skb);
4680 break;
4681
4682 case HCI_EV_LE_ADVERTISING_REPORT:
4683 hci_le_adv_report_evt(hdev, skb);
4684 break;
4685
4686 case HCI_EV_LE_LTK_REQ:
4687 hci_le_ltk_request_evt(hdev, skb);
4688 break;
4689
4690 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4691 hci_le_remote_conn_param_req_evt(hdev, skb);
4692 break;
4693
4694 default:
4695 break;
4696 }
4697 }
4698
4699 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4700 {
4701 struct hci_ev_channel_selected *ev = (void *) skb->data;
4702 struct hci_conn *hcon;
4703
4704 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4705
4706 skb_pull(skb, sizeof(*ev));
4707
4708 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4709 if (!hcon)
4710 return;
4711
4712 amp_read_loc_assoc_final_data(hdev, hcon);
4713 }
4714
4715 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4716 {
4717 struct hci_event_hdr *hdr = (void *) skb->data;
4718 __u8 event = hdr->evt;
4719
4720 hci_dev_lock(hdev);
4721
4722 /* Received events are (currently) only needed when a request is
4723 * ongoing so avoid unnecessary memory allocation.
4724 */
4725 if (hci_req_pending(hdev)) {
4726 kfree_skb(hdev->recv_evt);
4727 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4728 }
4729
4730 hci_dev_unlock(hdev);
4731
4732 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4733
4734 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4735 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4736 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4737
4738 hci_req_cmd_complete(hdev, opcode, 0);
4739 }
4740
4741 switch (event) {
4742 case HCI_EV_INQUIRY_COMPLETE:
4743 hci_inquiry_complete_evt(hdev, skb);
4744 break;
4745
4746 case HCI_EV_INQUIRY_RESULT:
4747 hci_inquiry_result_evt(hdev, skb);
4748 break;
4749
4750 case HCI_EV_CONN_COMPLETE:
4751 hci_conn_complete_evt(hdev, skb);
4752 break;
4753
4754 case HCI_EV_CONN_REQUEST:
4755 hci_conn_request_evt(hdev, skb);
4756 break;
4757
4758 case HCI_EV_DISCONN_COMPLETE:
4759 hci_disconn_complete_evt(hdev, skb);
4760 break;
4761
4762 case HCI_EV_AUTH_COMPLETE:
4763 hci_auth_complete_evt(hdev, skb);
4764 break;
4765
4766 case HCI_EV_REMOTE_NAME:
4767 hci_remote_name_evt(hdev, skb);
4768 break;
4769
4770 case HCI_EV_ENCRYPT_CHANGE:
4771 hci_encrypt_change_evt(hdev, skb);
4772 break;
4773
4774 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4775 hci_change_link_key_complete_evt(hdev, skb);
4776 break;
4777
4778 case HCI_EV_REMOTE_FEATURES:
4779 hci_remote_features_evt(hdev, skb);
4780 break;
4781
4782 case HCI_EV_CMD_COMPLETE:
4783 hci_cmd_complete_evt(hdev, skb);
4784 break;
4785
4786 case HCI_EV_CMD_STATUS:
4787 hci_cmd_status_evt(hdev, skb);
4788 break;
4789
4790 case HCI_EV_HARDWARE_ERROR:
4791 hci_hardware_error_evt(hdev, skb);
4792 break;
4793
4794 case HCI_EV_ROLE_CHANGE:
4795 hci_role_change_evt(hdev, skb);
4796 break;
4797
4798 case HCI_EV_NUM_COMP_PKTS:
4799 hci_num_comp_pkts_evt(hdev, skb);
4800 break;
4801
4802 case HCI_EV_MODE_CHANGE:
4803 hci_mode_change_evt(hdev, skb);
4804 break;
4805
4806 case HCI_EV_PIN_CODE_REQ:
4807 hci_pin_code_request_evt(hdev, skb);
4808 break;
4809
4810 case HCI_EV_LINK_KEY_REQ:
4811 hci_link_key_request_evt(hdev, skb);
4812 break;
4813
4814 case HCI_EV_LINK_KEY_NOTIFY:
4815 hci_link_key_notify_evt(hdev, skb);
4816 break;
4817
4818 case HCI_EV_CLOCK_OFFSET:
4819 hci_clock_offset_evt(hdev, skb);
4820 break;
4821
4822 case HCI_EV_PKT_TYPE_CHANGE:
4823 hci_pkt_type_change_evt(hdev, skb);
4824 break;
4825
4826 case HCI_EV_PSCAN_REP_MODE:
4827 hci_pscan_rep_mode_evt(hdev, skb);
4828 break;
4829
4830 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4831 hci_inquiry_result_with_rssi_evt(hdev, skb);
4832 break;
4833
4834 case HCI_EV_REMOTE_EXT_FEATURES:
4835 hci_remote_ext_features_evt(hdev, skb);
4836 break;
4837
4838 case HCI_EV_SYNC_CONN_COMPLETE:
4839 hci_sync_conn_complete_evt(hdev, skb);
4840 break;
4841
4842 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4843 hci_extended_inquiry_result_evt(hdev, skb);
4844 break;
4845
4846 case HCI_EV_KEY_REFRESH_COMPLETE:
4847 hci_key_refresh_complete_evt(hdev, skb);
4848 break;
4849
4850 case HCI_EV_IO_CAPA_REQUEST:
4851 hci_io_capa_request_evt(hdev, skb);
4852 break;
4853
4854 case HCI_EV_IO_CAPA_REPLY:
4855 hci_io_capa_reply_evt(hdev, skb);
4856 break;
4857
4858 case HCI_EV_USER_CONFIRM_REQUEST:
4859 hci_user_confirm_request_evt(hdev, skb);
4860 break;
4861
4862 case HCI_EV_USER_PASSKEY_REQUEST:
4863 hci_user_passkey_request_evt(hdev, skb);
4864 break;
4865
4866 case HCI_EV_USER_PASSKEY_NOTIFY:
4867 hci_user_passkey_notify_evt(hdev, skb);
4868 break;
4869
4870 case HCI_EV_KEYPRESS_NOTIFY:
4871 hci_keypress_notify_evt(hdev, skb);
4872 break;
4873
4874 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4875 hci_simple_pair_complete_evt(hdev, skb);
4876 break;
4877
4878 case HCI_EV_REMOTE_HOST_FEATURES:
4879 hci_remote_host_features_evt(hdev, skb);
4880 break;
4881
4882 case HCI_EV_LE_META:
4883 hci_le_meta_evt(hdev, skb);
4884 break;
4885
4886 case HCI_EV_CHANNEL_SELECTED:
4887 hci_chan_selected_evt(hdev, skb);
4888 break;
4889
4890 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4891 hci_remote_oob_data_request_evt(hdev, skb);
4892 break;
4893
4894 case HCI_EV_PHY_LINK_COMPLETE:
4895 hci_phy_link_complete_evt(hdev, skb);
4896 break;
4897
4898 case HCI_EV_LOGICAL_LINK_COMPLETE:
4899 hci_loglink_complete_evt(hdev, skb);
4900 break;
4901
4902 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4903 hci_disconn_loglink_complete_evt(hdev, skb);
4904 break;
4905
4906 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4907 hci_disconn_phylink_complete_evt(hdev, skb);
4908 break;
4909
4910 case HCI_EV_NUM_COMP_BLOCKS:
4911 hci_num_comp_blocks_evt(hdev, skb);
4912 break;
4913
4914 default:
4915 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4916 break;
4917 }
4918
4919 kfree_skb(skb);
4920 hdev->stat.evt_rx++;
4921 }
This page took 0.202266 seconds and 6 git commands to generate.