Bluetooth: Introduce a flag to track who really initiates authentication
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36
37 /* Handle HCI Event packets */
38
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn)
105 conn->role = rp->role;
106
107 hci_dev_unlock(hdev);
108 }
109
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
114
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116
117 if (rp->status)
118 return;
119
120 hci_dev_lock(hdev);
121
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 if (conn)
124 conn->link_policy = __le16_to_cpu(rp->policy);
125
126 hci_dev_unlock(hdev);
127 }
128
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
133 void *sent;
134
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136
137 if (rp->status)
138 return;
139
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 if (!sent)
142 return;
143
144 hci_dev_lock(hdev);
145
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 if (conn)
148 conn->link_policy = get_unaligned_le16(sent + 2);
149
150 hci_dev_unlock(hdev);
151 }
152
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 struct sk_buff *skb)
155 {
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159
160 if (rp->status)
161 return;
162
163 hdev->link_policy = __le16_to_cpu(rp->policy);
164 }
165
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 struct sk_buff *skb)
168 {
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
171
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173
174 if (status)
175 return;
176
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent)
179 return;
180
181 hdev->link_policy = get_unaligned_le16(sent);
182 }
183
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 __u8 status = *((__u8 *) skb->data);
187
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
189
190 clear_bit(HCI_RESET, &hdev->flags);
191
192 /* Reset all non-persistent flags */
193 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
194
195 hdev->discovery.state = DISCOVERY_STOPPED;
196 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
197 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
198
199 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
200 hdev->adv_data_len = 0;
201
202 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
203 hdev->scan_rsp_data_len = 0;
204
205 hdev->le_scan_type = LE_SCAN_PASSIVE;
206
207 hdev->ssp_debug_mode = 0;
208 }
209
210 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
211 {
212 __u8 status = *((__u8 *) skb->data);
213 void *sent;
214
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
216
217 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
218 if (!sent)
219 return;
220
221 hci_dev_lock(hdev);
222
223 if (test_bit(HCI_MGMT, &hdev->dev_flags))
224 mgmt_set_local_name_complete(hdev, sent, status);
225 else if (!status)
226 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
227
228 hci_dev_unlock(hdev);
229 }
230
231 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
232 {
233 struct hci_rp_read_local_name *rp = (void *) skb->data;
234
235 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
236
237 if (rp->status)
238 return;
239
240 if (test_bit(HCI_SETUP, &hdev->dev_flags))
241 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
242 }
243
244 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
245 {
246 __u8 status = *((__u8 *) skb->data);
247 void *sent;
248
249 BT_DBG("%s status 0x%2.2x", hdev->name, status);
250
251 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
252 if (!sent)
253 return;
254
255 if (!status) {
256 __u8 param = *((__u8 *) sent);
257
258 if (param == AUTH_ENABLED)
259 set_bit(HCI_AUTH, &hdev->flags);
260 else
261 clear_bit(HCI_AUTH, &hdev->flags);
262 }
263
264 if (test_bit(HCI_MGMT, &hdev->dev_flags))
265 mgmt_auth_enable_complete(hdev, status);
266 }
267
268 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
269 {
270 __u8 status = *((__u8 *) skb->data);
271 __u8 param;
272 void *sent;
273
274 BT_DBG("%s status 0x%2.2x", hdev->name, status);
275
276 if (status)
277 return;
278
279 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
280 if (!sent)
281 return;
282
283 param = *((__u8 *) sent);
284
285 if (param)
286 set_bit(HCI_ENCRYPT, &hdev->flags);
287 else
288 clear_bit(HCI_ENCRYPT, &hdev->flags);
289 }
290
291 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
292 {
293 __u8 status = *((__u8 *) skb->data);
294 __u8 param;
295 void *sent;
296
297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
298
299 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
300 if (!sent)
301 return;
302
303 param = *((__u8 *) sent);
304
305 hci_dev_lock(hdev);
306
307 if (status) {
308 hdev->discov_timeout = 0;
309 goto done;
310 }
311
312 if (param & SCAN_INQUIRY)
313 set_bit(HCI_ISCAN, &hdev->flags);
314 else
315 clear_bit(HCI_ISCAN, &hdev->flags);
316
317 if (param & SCAN_PAGE)
318 set_bit(HCI_PSCAN, &hdev->flags);
319 else
320 clear_bit(HCI_ISCAN, &hdev->flags);
321
322 done:
323 hci_dev_unlock(hdev);
324 }
325
326 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
327 {
328 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
329
330 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
331
332 if (rp->status)
333 return;
334
335 memcpy(hdev->dev_class, rp->dev_class, 3);
336
337 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
338 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
339 }
340
341 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
342 {
343 __u8 status = *((__u8 *) skb->data);
344 void *sent;
345
346 BT_DBG("%s status 0x%2.2x", hdev->name, status);
347
348 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
349 if (!sent)
350 return;
351
352 hci_dev_lock(hdev);
353
354 if (status == 0)
355 memcpy(hdev->dev_class, sent, 3);
356
357 if (test_bit(HCI_MGMT, &hdev->dev_flags))
358 mgmt_set_class_of_dev_complete(hdev, sent, status);
359
360 hci_dev_unlock(hdev);
361 }
362
363 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
364 {
365 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
366 __u16 setting;
367
368 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
369
370 if (rp->status)
371 return;
372
373 setting = __le16_to_cpu(rp->voice_setting);
374
375 if (hdev->voice_setting == setting)
376 return;
377
378 hdev->voice_setting = setting;
379
380 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
381
382 if (hdev->notify)
383 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
384 }
385
386 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
387 struct sk_buff *skb)
388 {
389 __u8 status = *((__u8 *) skb->data);
390 __u16 setting;
391 void *sent;
392
393 BT_DBG("%s status 0x%2.2x", hdev->name, status);
394
395 if (status)
396 return;
397
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
399 if (!sent)
400 return;
401
402 setting = get_unaligned_le16(sent);
403
404 if (hdev->voice_setting == setting)
405 return;
406
407 hdev->voice_setting = setting;
408
409 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
410
411 if (hdev->notify)
412 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
413 }
414
415 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
416 struct sk_buff *skb)
417 {
418 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
419
420 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
421
422 if (rp->status)
423 return;
424
425 hdev->num_iac = rp->num_iac;
426
427 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
428 }
429
430 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
431 {
432 __u8 status = *((__u8 *) skb->data);
433 struct hci_cp_write_ssp_mode *sent;
434
435 BT_DBG("%s status 0x%2.2x", hdev->name, status);
436
437 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
438 if (!sent)
439 return;
440
441 if (!status) {
442 if (sent->mode)
443 hdev->features[1][0] |= LMP_HOST_SSP;
444 else
445 hdev->features[1][0] &= ~LMP_HOST_SSP;
446 }
447
448 if (test_bit(HCI_MGMT, &hdev->dev_flags))
449 mgmt_ssp_enable_complete(hdev, sent->mode, status);
450 else if (!status) {
451 if (sent->mode)
452 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
453 else
454 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455 }
456 }
457
458 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
459 {
460 u8 status = *((u8 *) skb->data);
461 struct hci_cp_write_sc_support *sent;
462
463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
464
465 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
466 if (!sent)
467 return;
468
469 if (!status) {
470 if (sent->support)
471 hdev->features[1][0] |= LMP_HOST_SC;
472 else
473 hdev->features[1][0] &= ~LMP_HOST_SC;
474 }
475
476 if (test_bit(HCI_MGMT, &hdev->dev_flags))
477 mgmt_sc_enable_complete(hdev, sent->support, status);
478 else if (!status) {
479 if (sent->support)
480 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
481 else
482 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
483 }
484 }
485
486 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
487 {
488 struct hci_rp_read_local_version *rp = (void *) skb->data;
489
490 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
491
492 if (rp->status)
493 return;
494
495 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
496 hdev->hci_ver = rp->hci_ver;
497 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
498 hdev->lmp_ver = rp->lmp_ver;
499 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
500 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
501 }
502 }
503
504 static void hci_cc_read_local_commands(struct hci_dev *hdev,
505 struct sk_buff *skb)
506 {
507 struct hci_rp_read_local_commands *rp = (void *) skb->data;
508
509 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
510
511 if (rp->status)
512 return;
513
514 if (test_bit(HCI_SETUP, &hdev->dev_flags))
515 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
516 }
517
518 static void hci_cc_read_local_features(struct hci_dev *hdev,
519 struct sk_buff *skb)
520 {
521 struct hci_rp_read_local_features *rp = (void *) skb->data;
522
523 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
524
525 if (rp->status)
526 return;
527
528 memcpy(hdev->features, rp->features, 8);
529
530 /* Adjust default settings according to features
531 * supported by device. */
532
533 if (hdev->features[0][0] & LMP_3SLOT)
534 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
535
536 if (hdev->features[0][0] & LMP_5SLOT)
537 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
538
539 if (hdev->features[0][1] & LMP_HV2) {
540 hdev->pkt_type |= (HCI_HV2);
541 hdev->esco_type |= (ESCO_HV2);
542 }
543
544 if (hdev->features[0][1] & LMP_HV3) {
545 hdev->pkt_type |= (HCI_HV3);
546 hdev->esco_type |= (ESCO_HV3);
547 }
548
549 if (lmp_esco_capable(hdev))
550 hdev->esco_type |= (ESCO_EV3);
551
552 if (hdev->features[0][4] & LMP_EV4)
553 hdev->esco_type |= (ESCO_EV4);
554
555 if (hdev->features[0][4] & LMP_EV5)
556 hdev->esco_type |= (ESCO_EV5);
557
558 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
559 hdev->esco_type |= (ESCO_2EV3);
560
561 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
562 hdev->esco_type |= (ESCO_3EV3);
563
564 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
565 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
566 }
567
568 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
569 struct sk_buff *skb)
570 {
571 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
572
573 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
574
575 if (rp->status)
576 return;
577
578 if (hdev->max_page < rp->max_page)
579 hdev->max_page = rp->max_page;
580
581 if (rp->page < HCI_MAX_PAGES)
582 memcpy(hdev->features[rp->page], rp->features, 8);
583 }
584
585 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
586 struct sk_buff *skb)
587 {
588 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
589
590 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
591
592 if (rp->status)
593 return;
594
595 hdev->flow_ctl_mode = rp->mode;
596 }
597
598 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
599 {
600 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
601
602 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603
604 if (rp->status)
605 return;
606
607 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
608 hdev->sco_mtu = rp->sco_mtu;
609 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
610 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
611
612 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
613 hdev->sco_mtu = 64;
614 hdev->sco_pkts = 8;
615 }
616
617 hdev->acl_cnt = hdev->acl_pkts;
618 hdev->sco_cnt = hdev->sco_pkts;
619
620 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
621 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
622 }
623
624 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
625 {
626 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
627
628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
629
630 if (rp->status)
631 return;
632
633 if (test_bit(HCI_INIT, &hdev->flags))
634 bacpy(&hdev->bdaddr, &rp->bdaddr);
635
636 if (test_bit(HCI_SETUP, &hdev->dev_flags))
637 bacpy(&hdev->setup_addr, &rp->bdaddr);
638 }
639
640 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
641 struct sk_buff *skb)
642 {
643 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
644
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
646
647 if (rp->status)
648 return;
649
650 if (test_bit(HCI_INIT, &hdev->flags)) {
651 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
652 hdev->page_scan_window = __le16_to_cpu(rp->window);
653 }
654 }
655
656 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
657 struct sk_buff *skb)
658 {
659 u8 status = *((u8 *) skb->data);
660 struct hci_cp_write_page_scan_activity *sent;
661
662 BT_DBG("%s status 0x%2.2x", hdev->name, status);
663
664 if (status)
665 return;
666
667 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
668 if (!sent)
669 return;
670
671 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
672 hdev->page_scan_window = __le16_to_cpu(sent->window);
673 }
674
675 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
676 struct sk_buff *skb)
677 {
678 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
679
680 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
681
682 if (rp->status)
683 return;
684
685 if (test_bit(HCI_INIT, &hdev->flags))
686 hdev->page_scan_type = rp->type;
687 }
688
689 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
690 struct sk_buff *skb)
691 {
692 u8 status = *((u8 *) skb->data);
693 u8 *type;
694
695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
696
697 if (status)
698 return;
699
700 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
701 if (type)
702 hdev->page_scan_type = *type;
703 }
704
705 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
706 struct sk_buff *skb)
707 {
708 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
709
710 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
711
712 if (rp->status)
713 return;
714
715 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
716 hdev->block_len = __le16_to_cpu(rp->block_len);
717 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
718
719 hdev->block_cnt = hdev->num_blocks;
720
721 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
722 hdev->block_cnt, hdev->block_len);
723 }
724
725 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
726 {
727 struct hci_rp_read_clock *rp = (void *) skb->data;
728 struct hci_cp_read_clock *cp;
729 struct hci_conn *conn;
730
731 BT_DBG("%s", hdev->name);
732
733 if (skb->len < sizeof(*rp))
734 return;
735
736 if (rp->status)
737 return;
738
739 hci_dev_lock(hdev);
740
741 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
742 if (!cp)
743 goto unlock;
744
745 if (cp->which == 0x00) {
746 hdev->clock = le32_to_cpu(rp->clock);
747 goto unlock;
748 }
749
750 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
751 if (conn) {
752 conn->clock = le32_to_cpu(rp->clock);
753 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
754 }
755
756 unlock:
757 hci_dev_unlock(hdev);
758 }
759
760 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
761 struct sk_buff *skb)
762 {
763 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
764
765 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
766
767 if (rp->status)
768 goto a2mp_rsp;
769
770 hdev->amp_status = rp->amp_status;
771 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
772 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
773 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
774 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
775 hdev->amp_type = rp->amp_type;
776 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
777 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
778 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
779 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
780
781 a2mp_rsp:
782 a2mp_send_getinfo_rsp(hdev);
783 }
784
785 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
786 struct sk_buff *skb)
787 {
788 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
789 struct amp_assoc *assoc = &hdev->loc_assoc;
790 size_t rem_len, frag_len;
791
792 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
793
794 if (rp->status)
795 goto a2mp_rsp;
796
797 frag_len = skb->len - sizeof(*rp);
798 rem_len = __le16_to_cpu(rp->rem_len);
799
800 if (rem_len > frag_len) {
801 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
802
803 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
804 assoc->offset += frag_len;
805
806 /* Read other fragments */
807 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
808
809 return;
810 }
811
812 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
813 assoc->len = assoc->offset + rem_len;
814 assoc->offset = 0;
815
816 a2mp_rsp:
817 /* Send A2MP Rsp when all fragments are received */
818 a2mp_send_getampassoc_rsp(hdev, rp->status);
819 a2mp_send_create_phy_link_req(hdev, rp->status);
820 }
821
822 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
823 struct sk_buff *skb)
824 {
825 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
826
827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828
829 if (rp->status)
830 return;
831
832 hdev->inq_tx_power = rp->tx_power;
833 }
834
835 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
836 {
837 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
838 struct hci_cp_pin_code_reply *cp;
839 struct hci_conn *conn;
840
841 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
842
843 hci_dev_lock(hdev);
844
845 if (test_bit(HCI_MGMT, &hdev->dev_flags))
846 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
847
848 if (rp->status)
849 goto unlock;
850
851 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
852 if (!cp)
853 goto unlock;
854
855 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
856 if (conn)
857 conn->pin_length = cp->pin_len;
858
859 unlock:
860 hci_dev_unlock(hdev);
861 }
862
863 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
864 {
865 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
866
867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868
869 hci_dev_lock(hdev);
870
871 if (test_bit(HCI_MGMT, &hdev->dev_flags))
872 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
873 rp->status);
874
875 hci_dev_unlock(hdev);
876 }
877
878 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
879 struct sk_buff *skb)
880 {
881 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
882
883 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884
885 if (rp->status)
886 return;
887
888 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
889 hdev->le_pkts = rp->le_max_pkt;
890
891 hdev->le_cnt = hdev->le_pkts;
892
893 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
894 }
895
896 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
897 struct sk_buff *skb)
898 {
899 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
900
901 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
902
903 if (rp->status)
904 return;
905
906 memcpy(hdev->le_features, rp->features, 8);
907 }
908
909 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
910 struct sk_buff *skb)
911 {
912 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
913
914 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
915
916 if (rp->status)
917 return;
918
919 hdev->adv_tx_power = rp->tx_power;
920 }
921
922 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
923 {
924 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
925
926 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927
928 hci_dev_lock(hdev);
929
930 if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
932 rp->status);
933
934 hci_dev_unlock(hdev);
935 }
936
937 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
938 struct sk_buff *skb)
939 {
940 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
941
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943
944 hci_dev_lock(hdev);
945
946 if (test_bit(HCI_MGMT, &hdev->dev_flags))
947 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
948 ACL_LINK, 0, rp->status);
949
950 hci_dev_unlock(hdev);
951 }
952
953 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
954 {
955 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
956
957 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
958
959 hci_dev_lock(hdev);
960
961 if (test_bit(HCI_MGMT, &hdev->dev_flags))
962 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
963 0, rp->status);
964
965 hci_dev_unlock(hdev);
966 }
967
968 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
969 struct sk_buff *skb)
970 {
971 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972
973 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974
975 hci_dev_lock(hdev);
976
977 if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
979 ACL_LINK, 0, rp->status);
980
981 hci_dev_unlock(hdev);
982 }
983
984 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
985 struct sk_buff *skb)
986 {
987 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
988
989 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
990
991 hci_dev_lock(hdev);
992 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
993 NULL, NULL, rp->status);
994 hci_dev_unlock(hdev);
995 }
996
997 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
998 struct sk_buff *skb)
999 {
1000 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1001
1002 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003
1004 hci_dev_lock(hdev);
1005 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1006 rp->hash256, rp->randomizer256,
1007 rp->status);
1008 hci_dev_unlock(hdev);
1009 }
1010
1011
1012 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1013 {
1014 __u8 status = *((__u8 *) skb->data);
1015 bdaddr_t *sent;
1016
1017 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1018
1019 if (status)
1020 return;
1021
1022 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1023 if (!sent)
1024 return;
1025
1026 hci_dev_lock(hdev);
1027
1028 bacpy(&hdev->random_addr, sent);
1029
1030 hci_dev_unlock(hdev);
1031 }
1032
1033 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1034 {
1035 __u8 *sent, status = *((__u8 *) skb->data);
1036
1037 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1038
1039 if (status)
1040 return;
1041
1042 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1043 if (!sent)
1044 return;
1045
1046 hci_dev_lock(hdev);
1047
1048 /* If we're doing connection initation as peripheral. Set a
1049 * timeout in case something goes wrong.
1050 */
1051 if (*sent) {
1052 struct hci_conn *conn;
1053
1054 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1055
1056 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1057 if (conn)
1058 queue_delayed_work(hdev->workqueue,
1059 &conn->le_conn_timeout,
1060 conn->conn_timeout);
1061 } else {
1062 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1063 }
1064
1065 hci_dev_unlock(hdev);
1066 }
1067
1068 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1069 {
1070 struct hci_cp_le_set_scan_param *cp;
1071 __u8 status = *((__u8 *) skb->data);
1072
1073 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1074
1075 if (status)
1076 return;
1077
1078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1079 if (!cp)
1080 return;
1081
1082 hci_dev_lock(hdev);
1083
1084 hdev->le_scan_type = cp->type;
1085
1086 hci_dev_unlock(hdev);
1087 }
1088
1089 static bool has_pending_adv_report(struct hci_dev *hdev)
1090 {
1091 struct discovery_state *d = &hdev->discovery;
1092
1093 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1094 }
1095
1096 static void clear_pending_adv_report(struct hci_dev *hdev)
1097 {
1098 struct discovery_state *d = &hdev->discovery;
1099
1100 bacpy(&d->last_adv_addr, BDADDR_ANY);
1101 d->last_adv_data_len = 0;
1102 }
1103
1104 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1105 u8 bdaddr_type, s8 rssi, u32 flags,
1106 u8 *data, u8 len)
1107 {
1108 struct discovery_state *d = &hdev->discovery;
1109
1110 bacpy(&d->last_adv_addr, bdaddr);
1111 d->last_adv_addr_type = bdaddr_type;
1112 d->last_adv_rssi = rssi;
1113 d->last_adv_flags = flags;
1114 memcpy(d->last_adv_data, data, len);
1115 d->last_adv_data_len = len;
1116 }
1117
1118 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1119 struct sk_buff *skb)
1120 {
1121 struct hci_cp_le_set_scan_enable *cp;
1122 __u8 status = *((__u8 *) skb->data);
1123
1124 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1125
1126 if (status)
1127 return;
1128
1129 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1130 if (!cp)
1131 return;
1132
1133 switch (cp->enable) {
1134 case LE_SCAN_ENABLE:
1135 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1136 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1137 clear_pending_adv_report(hdev);
1138 break;
1139
1140 case LE_SCAN_DISABLE:
1141 /* We do this here instead of when setting DISCOVERY_STOPPED
1142 * since the latter would potentially require waiting for
1143 * inquiry to stop too.
1144 */
1145 if (has_pending_adv_report(hdev)) {
1146 struct discovery_state *d = &hdev->discovery;
1147
1148 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1149 d->last_adv_addr_type, NULL,
1150 d->last_adv_rssi, d->last_adv_flags,
1151 d->last_adv_data,
1152 d->last_adv_data_len, NULL, 0);
1153 }
1154
1155 /* Cancel this timer so that we don't try to disable scanning
1156 * when it's already disabled.
1157 */
1158 cancel_delayed_work(&hdev->le_scan_disable);
1159
1160 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1161
1162 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1163 * interrupted scanning due to a connect request. Mark
1164 * therefore discovery as stopped. If this was not
1165 * because of a connect request advertising might have
1166 * been disabled because of active scanning, so
1167 * re-enable it again if necessary.
1168 */
1169 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1170 &hdev->dev_flags))
1171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1172 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1173 hdev->discovery.state == DISCOVERY_FINDING)
1174 mgmt_reenable_advertising(hdev);
1175
1176 break;
1177
1178 default:
1179 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1180 break;
1181 }
1182 }
1183
1184 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1185 struct sk_buff *skb)
1186 {
1187 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1188
1189 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1190
1191 if (rp->status)
1192 return;
1193
1194 hdev->le_white_list_size = rp->size;
1195 }
1196
1197 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1198 struct sk_buff *skb)
1199 {
1200 __u8 status = *((__u8 *) skb->data);
1201
1202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1203
1204 if (status)
1205 return;
1206
1207 hci_bdaddr_list_clear(&hdev->le_white_list);
1208 }
1209
1210 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1211 struct sk_buff *skb)
1212 {
1213 struct hci_cp_le_add_to_white_list *sent;
1214 __u8 status = *((__u8 *) skb->data);
1215
1216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1217
1218 if (status)
1219 return;
1220
1221 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1222 if (!sent)
1223 return;
1224
1225 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1226 sent->bdaddr_type);
1227 }
1228
1229 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1230 struct sk_buff *skb)
1231 {
1232 struct hci_cp_le_del_from_white_list *sent;
1233 __u8 status = *((__u8 *) skb->data);
1234
1235 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1236
1237 if (status)
1238 return;
1239
1240 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1241 if (!sent)
1242 return;
1243
1244 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1245 sent->bdaddr_type);
1246 }
1247
1248 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1249 struct sk_buff *skb)
1250 {
1251 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1252
1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1254
1255 if (rp->status)
1256 return;
1257
1258 memcpy(hdev->le_states, rp->le_states, 8);
1259 }
1260
1261 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1262 struct sk_buff *skb)
1263 {
1264 struct hci_cp_write_le_host_supported *sent;
1265 __u8 status = *((__u8 *) skb->data);
1266
1267 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1268
1269 if (status)
1270 return;
1271
1272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1273 if (!sent)
1274 return;
1275
1276 if (sent->le) {
1277 hdev->features[1][0] |= LMP_HOST_LE;
1278 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1279 } else {
1280 hdev->features[1][0] &= ~LMP_HOST_LE;
1281 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1282 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1283 }
1284
1285 if (sent->simul)
1286 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1287 else
1288 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1289 }
1290
1291 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1292 {
1293 struct hci_cp_le_set_adv_param *cp;
1294 u8 status = *((u8 *) skb->data);
1295
1296 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1297
1298 if (status)
1299 return;
1300
1301 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1302 if (!cp)
1303 return;
1304
1305 hci_dev_lock(hdev);
1306 hdev->adv_addr_type = cp->own_address_type;
1307 hci_dev_unlock(hdev);
1308 }
1309
1310 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1311 struct sk_buff *skb)
1312 {
1313 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1314
1315 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1316 hdev->name, rp->status, rp->phy_handle);
1317
1318 if (rp->status)
1319 return;
1320
1321 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1322 }
1323
1324 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1325 {
1326 struct hci_rp_read_rssi *rp = (void *) skb->data;
1327 struct hci_conn *conn;
1328
1329 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1330
1331 if (rp->status)
1332 return;
1333
1334 hci_dev_lock(hdev);
1335
1336 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1337 if (conn)
1338 conn->rssi = rp->rssi;
1339
1340 hci_dev_unlock(hdev);
1341 }
1342
1343 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1344 {
1345 struct hci_cp_read_tx_power *sent;
1346 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1347 struct hci_conn *conn;
1348
1349 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1350
1351 if (rp->status)
1352 return;
1353
1354 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1355 if (!sent)
1356 return;
1357
1358 hci_dev_lock(hdev);
1359
1360 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1361 if (!conn)
1362 goto unlock;
1363
1364 switch (sent->type) {
1365 case 0x00:
1366 conn->tx_power = rp->tx_power;
1367 break;
1368 case 0x01:
1369 conn->max_tx_power = rp->tx_power;
1370 break;
1371 }
1372
1373 unlock:
1374 hci_dev_unlock(hdev);
1375 }
1376
1377 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1378 {
1379 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1380
1381 if (status) {
1382 hci_conn_check_pending(hdev);
1383 return;
1384 }
1385
1386 set_bit(HCI_INQUIRY, &hdev->flags);
1387 }
1388
1389 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1390 {
1391 struct hci_cp_create_conn *cp;
1392 struct hci_conn *conn;
1393
1394 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1395
1396 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1397 if (!cp)
1398 return;
1399
1400 hci_dev_lock(hdev);
1401
1402 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1403
1404 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1405
1406 if (status) {
1407 if (conn && conn->state == BT_CONNECT) {
1408 if (status != 0x0c || conn->attempt > 2) {
1409 conn->state = BT_CLOSED;
1410 hci_proto_connect_cfm(conn, status);
1411 hci_conn_del(conn);
1412 } else
1413 conn->state = BT_CONNECT2;
1414 }
1415 } else {
1416 if (!conn) {
1417 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1418 HCI_ROLE_MASTER);
1419 if (!conn)
1420 BT_ERR("No memory for new connection");
1421 }
1422 }
1423
1424 hci_dev_unlock(hdev);
1425 }
1426
1427 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1428 {
1429 struct hci_cp_add_sco *cp;
1430 struct hci_conn *acl, *sco;
1431 __u16 handle;
1432
1433 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1434
1435 if (!status)
1436 return;
1437
1438 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1439 if (!cp)
1440 return;
1441
1442 handle = __le16_to_cpu(cp->handle);
1443
1444 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1445
1446 hci_dev_lock(hdev);
1447
1448 acl = hci_conn_hash_lookup_handle(hdev, handle);
1449 if (acl) {
1450 sco = acl->link;
1451 if (sco) {
1452 sco->state = BT_CLOSED;
1453
1454 hci_proto_connect_cfm(sco, status);
1455 hci_conn_del(sco);
1456 }
1457 }
1458
1459 hci_dev_unlock(hdev);
1460 }
1461
1462 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1463 {
1464 struct hci_cp_auth_requested *cp;
1465 struct hci_conn *conn;
1466
1467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1468
1469 if (!status)
1470 return;
1471
1472 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1473 if (!cp)
1474 return;
1475
1476 hci_dev_lock(hdev);
1477
1478 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1479 if (conn) {
1480 if (conn->state == BT_CONFIG) {
1481 hci_proto_connect_cfm(conn, status);
1482 hci_conn_drop(conn);
1483 }
1484 }
1485
1486 hci_dev_unlock(hdev);
1487 }
1488
1489 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1490 {
1491 struct hci_cp_set_conn_encrypt *cp;
1492 struct hci_conn *conn;
1493
1494 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1495
1496 if (!status)
1497 return;
1498
1499 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1500 if (!cp)
1501 return;
1502
1503 hci_dev_lock(hdev);
1504
1505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1506 if (conn) {
1507 if (conn->state == BT_CONFIG) {
1508 hci_proto_connect_cfm(conn, status);
1509 hci_conn_drop(conn);
1510 }
1511 }
1512
1513 hci_dev_unlock(hdev);
1514 }
1515
1516 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1517 struct hci_conn *conn)
1518 {
1519 if (conn->state != BT_CONFIG || !conn->out)
1520 return 0;
1521
1522 if (conn->pending_sec_level == BT_SECURITY_SDP)
1523 return 0;
1524
1525 /* Only request authentication for SSP connections or non-SSP
1526 * devices with sec_level MEDIUM or HIGH or if MITM protection
1527 * is requested.
1528 */
1529 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1530 conn->pending_sec_level != BT_SECURITY_FIPS &&
1531 conn->pending_sec_level != BT_SECURITY_HIGH &&
1532 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1533 return 0;
1534
1535 return 1;
1536 }
1537
1538 static int hci_resolve_name(struct hci_dev *hdev,
1539 struct inquiry_entry *e)
1540 {
1541 struct hci_cp_remote_name_req cp;
1542
1543 memset(&cp, 0, sizeof(cp));
1544
1545 bacpy(&cp.bdaddr, &e->data.bdaddr);
1546 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1547 cp.pscan_mode = e->data.pscan_mode;
1548 cp.clock_offset = e->data.clock_offset;
1549
1550 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1551 }
1552
1553 static bool hci_resolve_next_name(struct hci_dev *hdev)
1554 {
1555 struct discovery_state *discov = &hdev->discovery;
1556 struct inquiry_entry *e;
1557
1558 if (list_empty(&discov->resolve))
1559 return false;
1560
1561 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1562 if (!e)
1563 return false;
1564
1565 if (hci_resolve_name(hdev, e) == 0) {
1566 e->name_state = NAME_PENDING;
1567 return true;
1568 }
1569
1570 return false;
1571 }
1572
1573 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1574 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1575 {
1576 struct discovery_state *discov = &hdev->discovery;
1577 struct inquiry_entry *e;
1578
1579 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1580 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1581 name_len, conn->dev_class);
1582
1583 if (discov->state == DISCOVERY_STOPPED)
1584 return;
1585
1586 if (discov->state == DISCOVERY_STOPPING)
1587 goto discov_complete;
1588
1589 if (discov->state != DISCOVERY_RESOLVING)
1590 return;
1591
1592 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1593 /* If the device was not found in a list of found devices names of which
1594 * are pending. there is no need to continue resolving a next name as it
1595 * will be done upon receiving another Remote Name Request Complete
1596 * Event */
1597 if (!e)
1598 return;
1599
1600 list_del(&e->list);
1601 if (name) {
1602 e->name_state = NAME_KNOWN;
1603 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1604 e->data.rssi, name, name_len);
1605 } else {
1606 e->name_state = NAME_NOT_KNOWN;
1607 }
1608
1609 if (hci_resolve_next_name(hdev))
1610 return;
1611
1612 discov_complete:
1613 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1614 }
1615
1616 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1617 {
1618 struct hci_cp_remote_name_req *cp;
1619 struct hci_conn *conn;
1620
1621 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1622
1623 /* If successful wait for the name req complete event before
1624 * checking for the need to do authentication */
1625 if (!status)
1626 return;
1627
1628 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1629 if (!cp)
1630 return;
1631
1632 hci_dev_lock(hdev);
1633
1634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1635
1636 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1637 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1638
1639 if (!conn)
1640 goto unlock;
1641
1642 if (!hci_outgoing_auth_needed(hdev, conn))
1643 goto unlock;
1644
1645 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1646 struct hci_cp_auth_requested auth_cp;
1647
1648 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1649
1650 auth_cp.handle = __cpu_to_le16(conn->handle);
1651 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1652 sizeof(auth_cp), &auth_cp);
1653 }
1654
1655 unlock:
1656 hci_dev_unlock(hdev);
1657 }
1658
1659 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1660 {
1661 struct hci_cp_read_remote_features *cp;
1662 struct hci_conn *conn;
1663
1664 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1665
1666 if (!status)
1667 return;
1668
1669 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1670 if (!cp)
1671 return;
1672
1673 hci_dev_lock(hdev);
1674
1675 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1676 if (conn) {
1677 if (conn->state == BT_CONFIG) {
1678 hci_proto_connect_cfm(conn, status);
1679 hci_conn_drop(conn);
1680 }
1681 }
1682
1683 hci_dev_unlock(hdev);
1684 }
1685
1686 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1687 {
1688 struct hci_cp_read_remote_ext_features *cp;
1689 struct hci_conn *conn;
1690
1691 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1692
1693 if (!status)
1694 return;
1695
1696 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1697 if (!cp)
1698 return;
1699
1700 hci_dev_lock(hdev);
1701
1702 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1703 if (conn) {
1704 if (conn->state == BT_CONFIG) {
1705 hci_proto_connect_cfm(conn, status);
1706 hci_conn_drop(conn);
1707 }
1708 }
1709
1710 hci_dev_unlock(hdev);
1711 }
1712
1713 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1714 {
1715 struct hci_cp_setup_sync_conn *cp;
1716 struct hci_conn *acl, *sco;
1717 __u16 handle;
1718
1719 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1720
1721 if (!status)
1722 return;
1723
1724 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1725 if (!cp)
1726 return;
1727
1728 handle = __le16_to_cpu(cp->handle);
1729
1730 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1731
1732 hci_dev_lock(hdev);
1733
1734 acl = hci_conn_hash_lookup_handle(hdev, handle);
1735 if (acl) {
1736 sco = acl->link;
1737 if (sco) {
1738 sco->state = BT_CLOSED;
1739
1740 hci_proto_connect_cfm(sco, status);
1741 hci_conn_del(sco);
1742 }
1743 }
1744
1745 hci_dev_unlock(hdev);
1746 }
1747
1748 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1749 {
1750 struct hci_cp_sniff_mode *cp;
1751 struct hci_conn *conn;
1752
1753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1754
1755 if (!status)
1756 return;
1757
1758 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1759 if (!cp)
1760 return;
1761
1762 hci_dev_lock(hdev);
1763
1764 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1765 if (conn) {
1766 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1767
1768 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1769 hci_sco_setup(conn, status);
1770 }
1771
1772 hci_dev_unlock(hdev);
1773 }
1774
1775 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1776 {
1777 struct hci_cp_exit_sniff_mode *cp;
1778 struct hci_conn *conn;
1779
1780 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781
1782 if (!status)
1783 return;
1784
1785 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1786 if (!cp)
1787 return;
1788
1789 hci_dev_lock(hdev);
1790
1791 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1792 if (conn) {
1793 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1794
1795 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1796 hci_sco_setup(conn, status);
1797 }
1798
1799 hci_dev_unlock(hdev);
1800 }
1801
1802 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1803 {
1804 struct hci_cp_disconnect *cp;
1805 struct hci_conn *conn;
1806
1807 if (!status)
1808 return;
1809
1810 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1811 if (!cp)
1812 return;
1813
1814 hci_dev_lock(hdev);
1815
1816 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1817 if (conn)
1818 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1819 conn->dst_type, status);
1820
1821 hci_dev_unlock(hdev);
1822 }
1823
1824 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1825 {
1826 struct hci_cp_create_phy_link *cp;
1827
1828 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1829
1830 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1831 if (!cp)
1832 return;
1833
1834 hci_dev_lock(hdev);
1835
1836 if (status) {
1837 struct hci_conn *hcon;
1838
1839 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1840 if (hcon)
1841 hci_conn_del(hcon);
1842 } else {
1843 amp_write_remote_assoc(hdev, cp->phy_handle);
1844 }
1845
1846 hci_dev_unlock(hdev);
1847 }
1848
1849 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1850 {
1851 struct hci_cp_accept_phy_link *cp;
1852
1853 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1854
1855 if (status)
1856 return;
1857
1858 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1859 if (!cp)
1860 return;
1861
1862 amp_write_remote_assoc(hdev, cp->phy_handle);
1863 }
1864
1865 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1866 {
1867 struct hci_cp_le_create_conn *cp;
1868 struct hci_conn *conn;
1869
1870 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1871
1872 /* All connection failure handling is taken care of by the
1873 * hci_le_conn_failed function which is triggered by the HCI
1874 * request completion callbacks used for connecting.
1875 */
1876 if (status)
1877 return;
1878
1879 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1880 if (!cp)
1881 return;
1882
1883 hci_dev_lock(hdev);
1884
1885 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1886 if (!conn)
1887 goto unlock;
1888
1889 /* Store the initiator and responder address information which
1890 * is needed for SMP. These values will not change during the
1891 * lifetime of the connection.
1892 */
1893 conn->init_addr_type = cp->own_address_type;
1894 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1895 bacpy(&conn->init_addr, &hdev->random_addr);
1896 else
1897 bacpy(&conn->init_addr, &hdev->bdaddr);
1898
1899 conn->resp_addr_type = cp->peer_addr_type;
1900 bacpy(&conn->resp_addr, &cp->peer_addr);
1901
1902 /* We don't want the connection attempt to stick around
1903 * indefinitely since LE doesn't have a page timeout concept
1904 * like BR/EDR. Set a timer for any connection that doesn't use
1905 * the white list for connecting.
1906 */
1907 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1908 queue_delayed_work(conn->hdev->workqueue,
1909 &conn->le_conn_timeout,
1910 conn->conn_timeout);
1911
1912 unlock:
1913 hci_dev_unlock(hdev);
1914 }
1915
1916 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1917 {
1918 struct hci_cp_le_start_enc *cp;
1919 struct hci_conn *conn;
1920
1921 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1922
1923 if (!status)
1924 return;
1925
1926 hci_dev_lock(hdev);
1927
1928 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1929 if (!cp)
1930 goto unlock;
1931
1932 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1933 if (!conn)
1934 goto unlock;
1935
1936 if (conn->state != BT_CONNECTED)
1937 goto unlock;
1938
1939 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1940 hci_conn_drop(conn);
1941
1942 unlock:
1943 hci_dev_unlock(hdev);
1944 }
1945
1946 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1947 {
1948 __u8 status = *((__u8 *) skb->data);
1949 struct discovery_state *discov = &hdev->discovery;
1950 struct inquiry_entry *e;
1951
1952 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1953
1954 hci_conn_check_pending(hdev);
1955
1956 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1957 return;
1958
1959 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1960 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1961
1962 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1963 return;
1964
1965 hci_dev_lock(hdev);
1966
1967 if (discov->state != DISCOVERY_FINDING)
1968 goto unlock;
1969
1970 if (list_empty(&discov->resolve)) {
1971 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1972 goto unlock;
1973 }
1974
1975 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1976 if (e && hci_resolve_name(hdev, e) == 0) {
1977 e->name_state = NAME_PENDING;
1978 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1979 } else {
1980 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1981 }
1982
1983 unlock:
1984 hci_dev_unlock(hdev);
1985 }
1986
1987 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1988 {
1989 struct inquiry_data data;
1990 struct inquiry_info *info = (void *) (skb->data + 1);
1991 int num_rsp = *((__u8 *) skb->data);
1992
1993 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1994
1995 if (!num_rsp)
1996 return;
1997
1998 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1999 return;
2000
2001 hci_dev_lock(hdev);
2002
2003 for (; num_rsp; num_rsp--, info++) {
2004 u32 flags;
2005
2006 bacpy(&data.bdaddr, &info->bdaddr);
2007 data.pscan_rep_mode = info->pscan_rep_mode;
2008 data.pscan_period_mode = info->pscan_period_mode;
2009 data.pscan_mode = info->pscan_mode;
2010 memcpy(data.dev_class, info->dev_class, 3);
2011 data.clock_offset = info->clock_offset;
2012 data.rssi = 0x00;
2013 data.ssp_mode = 0x00;
2014
2015 flags = hci_inquiry_cache_update(hdev, &data, false);
2016
2017 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2018 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2019 }
2020
2021 hci_dev_unlock(hdev);
2022 }
2023
2024 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2025 {
2026 struct hci_ev_conn_complete *ev = (void *) skb->data;
2027 struct hci_conn *conn;
2028
2029 BT_DBG("%s", hdev->name);
2030
2031 hci_dev_lock(hdev);
2032
2033 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2034 if (!conn) {
2035 if (ev->link_type != SCO_LINK)
2036 goto unlock;
2037
2038 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2039 if (!conn)
2040 goto unlock;
2041
2042 conn->type = SCO_LINK;
2043 }
2044
2045 if (!ev->status) {
2046 conn->handle = __le16_to_cpu(ev->handle);
2047
2048 if (conn->type == ACL_LINK) {
2049 conn->state = BT_CONFIG;
2050 hci_conn_hold(conn);
2051
2052 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2053 !hci_find_link_key(hdev, &ev->bdaddr))
2054 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2055 else
2056 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2057 } else
2058 conn->state = BT_CONNECTED;
2059
2060 hci_conn_add_sysfs(conn);
2061
2062 if (test_bit(HCI_AUTH, &hdev->flags))
2063 set_bit(HCI_CONN_AUTH, &conn->flags);
2064
2065 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2066 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2067
2068 /* Get remote features */
2069 if (conn->type == ACL_LINK) {
2070 struct hci_cp_read_remote_features cp;
2071 cp.handle = ev->handle;
2072 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2073 sizeof(cp), &cp);
2074 }
2075
2076 /* Set packet type for incoming connection */
2077 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2078 struct hci_cp_change_conn_ptype cp;
2079 cp.handle = ev->handle;
2080 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2081 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2082 &cp);
2083 }
2084 } else {
2085 conn->state = BT_CLOSED;
2086 if (conn->type == ACL_LINK)
2087 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2088 conn->dst_type, ev->status);
2089 }
2090
2091 if (conn->type == ACL_LINK)
2092 hci_sco_setup(conn, ev->status);
2093
2094 if (ev->status) {
2095 hci_proto_connect_cfm(conn, ev->status);
2096 hci_conn_del(conn);
2097 } else if (ev->link_type != ACL_LINK)
2098 hci_proto_connect_cfm(conn, ev->status);
2099
2100 unlock:
2101 hci_dev_unlock(hdev);
2102
2103 hci_conn_check_pending(hdev);
2104 }
2105
2106 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2107 {
2108 struct hci_cp_reject_conn_req cp;
2109
2110 bacpy(&cp.bdaddr, bdaddr);
2111 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2112 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2113 }
2114
2115 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2116 {
2117 struct hci_ev_conn_request *ev = (void *) skb->data;
2118 int mask = hdev->link_mode;
2119 struct inquiry_entry *ie;
2120 struct hci_conn *conn;
2121 __u8 flags = 0;
2122
2123 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2124 ev->link_type);
2125
2126 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2127 &flags);
2128
2129 if (!(mask & HCI_LM_ACCEPT)) {
2130 hci_reject_conn(hdev, &ev->bdaddr);
2131 return;
2132 }
2133
2134 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2135 BDADDR_BREDR)) {
2136 hci_reject_conn(hdev, &ev->bdaddr);
2137 return;
2138 }
2139
2140 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2141 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2142 BDADDR_BREDR)) {
2143 hci_reject_conn(hdev, &ev->bdaddr);
2144 return;
2145 }
2146
2147 /* Connection accepted */
2148
2149 hci_dev_lock(hdev);
2150
2151 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2152 if (ie)
2153 memcpy(ie->data.dev_class, ev->dev_class, 3);
2154
2155 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2156 &ev->bdaddr);
2157 if (!conn) {
2158 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2159 HCI_ROLE_SLAVE);
2160 if (!conn) {
2161 BT_ERR("No memory for new connection");
2162 hci_dev_unlock(hdev);
2163 return;
2164 }
2165 }
2166
2167 memcpy(conn->dev_class, ev->dev_class, 3);
2168
2169 hci_dev_unlock(hdev);
2170
2171 if (ev->link_type == ACL_LINK ||
2172 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2173 struct hci_cp_accept_conn_req cp;
2174 conn->state = BT_CONNECT;
2175
2176 bacpy(&cp.bdaddr, &ev->bdaddr);
2177
2178 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2179 cp.role = 0x00; /* Become master */
2180 else
2181 cp.role = 0x01; /* Remain slave */
2182
2183 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2184 } else if (!(flags & HCI_PROTO_DEFER)) {
2185 struct hci_cp_accept_sync_conn_req cp;
2186 conn->state = BT_CONNECT;
2187
2188 bacpy(&cp.bdaddr, &ev->bdaddr);
2189 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2190
2191 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2192 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2193 cp.max_latency = cpu_to_le16(0xffff);
2194 cp.content_format = cpu_to_le16(hdev->voice_setting);
2195 cp.retrans_effort = 0xff;
2196
2197 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2198 &cp);
2199 } else {
2200 conn->state = BT_CONNECT2;
2201 hci_proto_connect_cfm(conn, 0);
2202 }
2203 }
2204
2205 static u8 hci_to_mgmt_reason(u8 err)
2206 {
2207 switch (err) {
2208 case HCI_ERROR_CONNECTION_TIMEOUT:
2209 return MGMT_DEV_DISCONN_TIMEOUT;
2210 case HCI_ERROR_REMOTE_USER_TERM:
2211 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2212 case HCI_ERROR_REMOTE_POWER_OFF:
2213 return MGMT_DEV_DISCONN_REMOTE;
2214 case HCI_ERROR_LOCAL_HOST_TERM:
2215 return MGMT_DEV_DISCONN_LOCAL_HOST;
2216 default:
2217 return MGMT_DEV_DISCONN_UNKNOWN;
2218 }
2219 }
2220
2221 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2222 {
2223 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2224 u8 reason = hci_to_mgmt_reason(ev->reason);
2225 struct hci_conn_params *params;
2226 struct hci_conn *conn;
2227 bool mgmt_connected;
2228 u8 type;
2229
2230 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2231
2232 hci_dev_lock(hdev);
2233
2234 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2235 if (!conn)
2236 goto unlock;
2237
2238 if (ev->status) {
2239 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2240 conn->dst_type, ev->status);
2241 goto unlock;
2242 }
2243
2244 conn->state = BT_CLOSED;
2245
2246 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2247 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2248 reason, mgmt_connected);
2249
2250 if (conn->type == ACL_LINK &&
2251 test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2252 hci_remove_link_key(hdev, &conn->dst);
2253
2254 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2255 if (params) {
2256 switch (params->auto_connect) {
2257 case HCI_AUTO_CONN_LINK_LOSS:
2258 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2259 break;
2260 /* Fall through */
2261
2262 case HCI_AUTO_CONN_ALWAYS:
2263 list_del_init(&params->action);
2264 list_add(&params->action, &hdev->pend_le_conns);
2265 hci_update_background_scan(hdev);
2266 break;
2267
2268 default:
2269 break;
2270 }
2271 }
2272
2273 type = conn->type;
2274
2275 hci_proto_disconn_cfm(conn, ev->reason);
2276 hci_conn_del(conn);
2277
2278 /* Re-enable advertising if necessary, since it might
2279 * have been disabled by the connection. From the
2280 * HCI_LE_Set_Advertise_Enable command description in
2281 * the core specification (v4.0):
2282 * "The Controller shall continue advertising until the Host
2283 * issues an LE_Set_Advertise_Enable command with
2284 * Advertising_Enable set to 0x00 (Advertising is disabled)
2285 * or until a connection is created or until the Advertising
2286 * is timed out due to Directed Advertising."
2287 */
2288 if (type == LE_LINK)
2289 mgmt_reenable_advertising(hdev);
2290
2291 unlock:
2292 hci_dev_unlock(hdev);
2293 }
2294
2295 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2296 {
2297 struct hci_ev_auth_complete *ev = (void *) skb->data;
2298 struct hci_conn *conn;
2299
2300 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2301
2302 hci_dev_lock(hdev);
2303
2304 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2305 if (!conn)
2306 goto unlock;
2307
2308 if (!ev->status) {
2309 if (!hci_conn_ssp_enabled(conn) &&
2310 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2311 BT_INFO("re-auth of legacy device is not possible.");
2312 } else {
2313 set_bit(HCI_CONN_AUTH, &conn->flags);
2314 conn->sec_level = conn->pending_sec_level;
2315 }
2316 } else {
2317 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2318 ev->status);
2319 }
2320
2321 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2322 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2323
2324 if (conn->state == BT_CONFIG) {
2325 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2326 struct hci_cp_set_conn_encrypt cp;
2327 cp.handle = ev->handle;
2328 cp.encrypt = 0x01;
2329 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2330 &cp);
2331 } else {
2332 conn->state = BT_CONNECTED;
2333 hci_proto_connect_cfm(conn, ev->status);
2334 hci_conn_drop(conn);
2335 }
2336 } else {
2337 hci_auth_cfm(conn, ev->status);
2338
2339 hci_conn_hold(conn);
2340 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2341 hci_conn_drop(conn);
2342 }
2343
2344 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2345 if (!ev->status) {
2346 struct hci_cp_set_conn_encrypt cp;
2347 cp.handle = ev->handle;
2348 cp.encrypt = 0x01;
2349 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2350 &cp);
2351 } else {
2352 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2353 hci_encrypt_cfm(conn, ev->status, 0x00);
2354 }
2355 }
2356
2357 unlock:
2358 hci_dev_unlock(hdev);
2359 }
2360
2361 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2362 {
2363 struct hci_ev_remote_name *ev = (void *) skb->data;
2364 struct hci_conn *conn;
2365
2366 BT_DBG("%s", hdev->name);
2367
2368 hci_conn_check_pending(hdev);
2369
2370 hci_dev_lock(hdev);
2371
2372 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2373
2374 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2375 goto check_auth;
2376
2377 if (ev->status == 0)
2378 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2379 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2380 else
2381 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2382
2383 check_auth:
2384 if (!conn)
2385 goto unlock;
2386
2387 if (!hci_outgoing_auth_needed(hdev, conn))
2388 goto unlock;
2389
2390 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2391 struct hci_cp_auth_requested cp;
2392
2393 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2394
2395 cp.handle = __cpu_to_le16(conn->handle);
2396 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2397 }
2398
2399 unlock:
2400 hci_dev_unlock(hdev);
2401 }
2402
2403 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2404 {
2405 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2406 struct hci_conn *conn;
2407
2408 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2409
2410 hci_dev_lock(hdev);
2411
2412 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2413 if (!conn)
2414 goto unlock;
2415
2416 if (!ev->status) {
2417 if (ev->encrypt) {
2418 /* Encryption implies authentication */
2419 set_bit(HCI_CONN_AUTH, &conn->flags);
2420 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2421 conn->sec_level = conn->pending_sec_level;
2422
2423 /* P-256 authentication key implies FIPS */
2424 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2425 set_bit(HCI_CONN_FIPS, &conn->flags);
2426
2427 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2428 conn->type == LE_LINK)
2429 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2430 } else {
2431 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2432 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2433 }
2434 }
2435
2436 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2437
2438 if (ev->status && conn->state == BT_CONNECTED) {
2439 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2440 hci_conn_drop(conn);
2441 goto unlock;
2442 }
2443
2444 if (conn->state == BT_CONFIG) {
2445 if (!ev->status)
2446 conn->state = BT_CONNECTED;
2447
2448 /* In Secure Connections Only mode, do not allow any
2449 * connections that are not encrypted with AES-CCM
2450 * using a P-256 authenticated combination key.
2451 */
2452 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2453 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2454 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2455 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2456 hci_conn_drop(conn);
2457 goto unlock;
2458 }
2459
2460 hci_proto_connect_cfm(conn, ev->status);
2461 hci_conn_drop(conn);
2462 } else
2463 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2464
2465 unlock:
2466 hci_dev_unlock(hdev);
2467 }
2468
2469 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2470 struct sk_buff *skb)
2471 {
2472 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2473 struct hci_conn *conn;
2474
2475 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2476
2477 hci_dev_lock(hdev);
2478
2479 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2480 if (conn) {
2481 if (!ev->status)
2482 set_bit(HCI_CONN_SECURE, &conn->flags);
2483
2484 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2485
2486 hci_key_change_cfm(conn, ev->status);
2487 }
2488
2489 hci_dev_unlock(hdev);
2490 }
2491
2492 static void hci_remote_features_evt(struct hci_dev *hdev,
2493 struct sk_buff *skb)
2494 {
2495 struct hci_ev_remote_features *ev = (void *) skb->data;
2496 struct hci_conn *conn;
2497
2498 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2499
2500 hci_dev_lock(hdev);
2501
2502 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2503 if (!conn)
2504 goto unlock;
2505
2506 if (!ev->status)
2507 memcpy(conn->features[0], ev->features, 8);
2508
2509 if (conn->state != BT_CONFIG)
2510 goto unlock;
2511
2512 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2513 struct hci_cp_read_remote_ext_features cp;
2514 cp.handle = ev->handle;
2515 cp.page = 0x01;
2516 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2517 sizeof(cp), &cp);
2518 goto unlock;
2519 }
2520
2521 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2522 struct hci_cp_remote_name_req cp;
2523 memset(&cp, 0, sizeof(cp));
2524 bacpy(&cp.bdaddr, &conn->dst);
2525 cp.pscan_rep_mode = 0x02;
2526 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2527 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2528 mgmt_device_connected(hdev, &conn->dst, conn->type,
2529 conn->dst_type, 0, NULL, 0,
2530 conn->dev_class);
2531
2532 if (!hci_outgoing_auth_needed(hdev, conn)) {
2533 conn->state = BT_CONNECTED;
2534 hci_proto_connect_cfm(conn, ev->status);
2535 hci_conn_drop(conn);
2536 }
2537
2538 unlock:
2539 hci_dev_unlock(hdev);
2540 }
2541
2542 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2543 {
2544 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2545 u8 status = skb->data[sizeof(*ev)];
2546 __u16 opcode;
2547
2548 skb_pull(skb, sizeof(*ev));
2549
2550 opcode = __le16_to_cpu(ev->opcode);
2551
2552 switch (opcode) {
2553 case HCI_OP_INQUIRY_CANCEL:
2554 hci_cc_inquiry_cancel(hdev, skb);
2555 break;
2556
2557 case HCI_OP_PERIODIC_INQ:
2558 hci_cc_periodic_inq(hdev, skb);
2559 break;
2560
2561 case HCI_OP_EXIT_PERIODIC_INQ:
2562 hci_cc_exit_periodic_inq(hdev, skb);
2563 break;
2564
2565 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2566 hci_cc_remote_name_req_cancel(hdev, skb);
2567 break;
2568
2569 case HCI_OP_ROLE_DISCOVERY:
2570 hci_cc_role_discovery(hdev, skb);
2571 break;
2572
2573 case HCI_OP_READ_LINK_POLICY:
2574 hci_cc_read_link_policy(hdev, skb);
2575 break;
2576
2577 case HCI_OP_WRITE_LINK_POLICY:
2578 hci_cc_write_link_policy(hdev, skb);
2579 break;
2580
2581 case HCI_OP_READ_DEF_LINK_POLICY:
2582 hci_cc_read_def_link_policy(hdev, skb);
2583 break;
2584
2585 case HCI_OP_WRITE_DEF_LINK_POLICY:
2586 hci_cc_write_def_link_policy(hdev, skb);
2587 break;
2588
2589 case HCI_OP_RESET:
2590 hci_cc_reset(hdev, skb);
2591 break;
2592
2593 case HCI_OP_WRITE_LOCAL_NAME:
2594 hci_cc_write_local_name(hdev, skb);
2595 break;
2596
2597 case HCI_OP_READ_LOCAL_NAME:
2598 hci_cc_read_local_name(hdev, skb);
2599 break;
2600
2601 case HCI_OP_WRITE_AUTH_ENABLE:
2602 hci_cc_write_auth_enable(hdev, skb);
2603 break;
2604
2605 case HCI_OP_WRITE_ENCRYPT_MODE:
2606 hci_cc_write_encrypt_mode(hdev, skb);
2607 break;
2608
2609 case HCI_OP_WRITE_SCAN_ENABLE:
2610 hci_cc_write_scan_enable(hdev, skb);
2611 break;
2612
2613 case HCI_OP_READ_CLASS_OF_DEV:
2614 hci_cc_read_class_of_dev(hdev, skb);
2615 break;
2616
2617 case HCI_OP_WRITE_CLASS_OF_DEV:
2618 hci_cc_write_class_of_dev(hdev, skb);
2619 break;
2620
2621 case HCI_OP_READ_VOICE_SETTING:
2622 hci_cc_read_voice_setting(hdev, skb);
2623 break;
2624
2625 case HCI_OP_WRITE_VOICE_SETTING:
2626 hci_cc_write_voice_setting(hdev, skb);
2627 break;
2628
2629 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2630 hci_cc_read_num_supported_iac(hdev, skb);
2631 break;
2632
2633 case HCI_OP_WRITE_SSP_MODE:
2634 hci_cc_write_ssp_mode(hdev, skb);
2635 break;
2636
2637 case HCI_OP_WRITE_SC_SUPPORT:
2638 hci_cc_write_sc_support(hdev, skb);
2639 break;
2640
2641 case HCI_OP_READ_LOCAL_VERSION:
2642 hci_cc_read_local_version(hdev, skb);
2643 break;
2644
2645 case HCI_OP_READ_LOCAL_COMMANDS:
2646 hci_cc_read_local_commands(hdev, skb);
2647 break;
2648
2649 case HCI_OP_READ_LOCAL_FEATURES:
2650 hci_cc_read_local_features(hdev, skb);
2651 break;
2652
2653 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2654 hci_cc_read_local_ext_features(hdev, skb);
2655 break;
2656
2657 case HCI_OP_READ_BUFFER_SIZE:
2658 hci_cc_read_buffer_size(hdev, skb);
2659 break;
2660
2661 case HCI_OP_READ_BD_ADDR:
2662 hci_cc_read_bd_addr(hdev, skb);
2663 break;
2664
2665 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2666 hci_cc_read_page_scan_activity(hdev, skb);
2667 break;
2668
2669 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2670 hci_cc_write_page_scan_activity(hdev, skb);
2671 break;
2672
2673 case HCI_OP_READ_PAGE_SCAN_TYPE:
2674 hci_cc_read_page_scan_type(hdev, skb);
2675 break;
2676
2677 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2678 hci_cc_write_page_scan_type(hdev, skb);
2679 break;
2680
2681 case HCI_OP_READ_DATA_BLOCK_SIZE:
2682 hci_cc_read_data_block_size(hdev, skb);
2683 break;
2684
2685 case HCI_OP_READ_FLOW_CONTROL_MODE:
2686 hci_cc_read_flow_control_mode(hdev, skb);
2687 break;
2688
2689 case HCI_OP_READ_LOCAL_AMP_INFO:
2690 hci_cc_read_local_amp_info(hdev, skb);
2691 break;
2692
2693 case HCI_OP_READ_CLOCK:
2694 hci_cc_read_clock(hdev, skb);
2695 break;
2696
2697 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2698 hci_cc_read_local_amp_assoc(hdev, skb);
2699 break;
2700
2701 case HCI_OP_READ_INQ_RSP_TX_POWER:
2702 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2703 break;
2704
2705 case HCI_OP_PIN_CODE_REPLY:
2706 hci_cc_pin_code_reply(hdev, skb);
2707 break;
2708
2709 case HCI_OP_PIN_CODE_NEG_REPLY:
2710 hci_cc_pin_code_neg_reply(hdev, skb);
2711 break;
2712
2713 case HCI_OP_READ_LOCAL_OOB_DATA:
2714 hci_cc_read_local_oob_data(hdev, skb);
2715 break;
2716
2717 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2718 hci_cc_read_local_oob_ext_data(hdev, skb);
2719 break;
2720
2721 case HCI_OP_LE_READ_BUFFER_SIZE:
2722 hci_cc_le_read_buffer_size(hdev, skb);
2723 break;
2724
2725 case HCI_OP_LE_READ_LOCAL_FEATURES:
2726 hci_cc_le_read_local_features(hdev, skb);
2727 break;
2728
2729 case HCI_OP_LE_READ_ADV_TX_POWER:
2730 hci_cc_le_read_adv_tx_power(hdev, skb);
2731 break;
2732
2733 case HCI_OP_USER_CONFIRM_REPLY:
2734 hci_cc_user_confirm_reply(hdev, skb);
2735 break;
2736
2737 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2738 hci_cc_user_confirm_neg_reply(hdev, skb);
2739 break;
2740
2741 case HCI_OP_USER_PASSKEY_REPLY:
2742 hci_cc_user_passkey_reply(hdev, skb);
2743 break;
2744
2745 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2746 hci_cc_user_passkey_neg_reply(hdev, skb);
2747 break;
2748
2749 case HCI_OP_LE_SET_RANDOM_ADDR:
2750 hci_cc_le_set_random_addr(hdev, skb);
2751 break;
2752
2753 case HCI_OP_LE_SET_ADV_ENABLE:
2754 hci_cc_le_set_adv_enable(hdev, skb);
2755 break;
2756
2757 case HCI_OP_LE_SET_SCAN_PARAM:
2758 hci_cc_le_set_scan_param(hdev, skb);
2759 break;
2760
2761 case HCI_OP_LE_SET_SCAN_ENABLE:
2762 hci_cc_le_set_scan_enable(hdev, skb);
2763 break;
2764
2765 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2766 hci_cc_le_read_white_list_size(hdev, skb);
2767 break;
2768
2769 case HCI_OP_LE_CLEAR_WHITE_LIST:
2770 hci_cc_le_clear_white_list(hdev, skb);
2771 break;
2772
2773 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2774 hci_cc_le_add_to_white_list(hdev, skb);
2775 break;
2776
2777 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2778 hci_cc_le_del_from_white_list(hdev, skb);
2779 break;
2780
2781 case HCI_OP_LE_READ_SUPPORTED_STATES:
2782 hci_cc_le_read_supported_states(hdev, skb);
2783 break;
2784
2785 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2786 hci_cc_write_le_host_supported(hdev, skb);
2787 break;
2788
2789 case HCI_OP_LE_SET_ADV_PARAM:
2790 hci_cc_set_adv_param(hdev, skb);
2791 break;
2792
2793 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2794 hci_cc_write_remote_amp_assoc(hdev, skb);
2795 break;
2796
2797 case HCI_OP_READ_RSSI:
2798 hci_cc_read_rssi(hdev, skb);
2799 break;
2800
2801 case HCI_OP_READ_TX_POWER:
2802 hci_cc_read_tx_power(hdev, skb);
2803 break;
2804
2805 default:
2806 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2807 break;
2808 }
2809
2810 if (opcode != HCI_OP_NOP)
2811 cancel_delayed_work(&hdev->cmd_timer);
2812
2813 hci_req_cmd_complete(hdev, opcode, status);
2814
2815 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2816 atomic_set(&hdev->cmd_cnt, 1);
2817 if (!skb_queue_empty(&hdev->cmd_q))
2818 queue_work(hdev->workqueue, &hdev->cmd_work);
2819 }
2820 }
2821
2822 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2823 {
2824 struct hci_ev_cmd_status *ev = (void *) skb->data;
2825 __u16 opcode;
2826
2827 skb_pull(skb, sizeof(*ev));
2828
2829 opcode = __le16_to_cpu(ev->opcode);
2830
2831 switch (opcode) {
2832 case HCI_OP_INQUIRY:
2833 hci_cs_inquiry(hdev, ev->status);
2834 break;
2835
2836 case HCI_OP_CREATE_CONN:
2837 hci_cs_create_conn(hdev, ev->status);
2838 break;
2839
2840 case HCI_OP_ADD_SCO:
2841 hci_cs_add_sco(hdev, ev->status);
2842 break;
2843
2844 case HCI_OP_AUTH_REQUESTED:
2845 hci_cs_auth_requested(hdev, ev->status);
2846 break;
2847
2848 case HCI_OP_SET_CONN_ENCRYPT:
2849 hci_cs_set_conn_encrypt(hdev, ev->status);
2850 break;
2851
2852 case HCI_OP_REMOTE_NAME_REQ:
2853 hci_cs_remote_name_req(hdev, ev->status);
2854 break;
2855
2856 case HCI_OP_READ_REMOTE_FEATURES:
2857 hci_cs_read_remote_features(hdev, ev->status);
2858 break;
2859
2860 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2861 hci_cs_read_remote_ext_features(hdev, ev->status);
2862 break;
2863
2864 case HCI_OP_SETUP_SYNC_CONN:
2865 hci_cs_setup_sync_conn(hdev, ev->status);
2866 break;
2867
2868 case HCI_OP_SNIFF_MODE:
2869 hci_cs_sniff_mode(hdev, ev->status);
2870 break;
2871
2872 case HCI_OP_EXIT_SNIFF_MODE:
2873 hci_cs_exit_sniff_mode(hdev, ev->status);
2874 break;
2875
2876 case HCI_OP_DISCONNECT:
2877 hci_cs_disconnect(hdev, ev->status);
2878 break;
2879
2880 case HCI_OP_CREATE_PHY_LINK:
2881 hci_cs_create_phylink(hdev, ev->status);
2882 break;
2883
2884 case HCI_OP_ACCEPT_PHY_LINK:
2885 hci_cs_accept_phylink(hdev, ev->status);
2886 break;
2887
2888 case HCI_OP_LE_CREATE_CONN:
2889 hci_cs_le_create_conn(hdev, ev->status);
2890 break;
2891
2892 case HCI_OP_LE_START_ENC:
2893 hci_cs_le_start_enc(hdev, ev->status);
2894 break;
2895
2896 default:
2897 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2898 break;
2899 }
2900
2901 if (opcode != HCI_OP_NOP)
2902 cancel_delayed_work(&hdev->cmd_timer);
2903
2904 if (ev->status ||
2905 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2906 hci_req_cmd_complete(hdev, opcode, ev->status);
2907
2908 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2909 atomic_set(&hdev->cmd_cnt, 1);
2910 if (!skb_queue_empty(&hdev->cmd_q))
2911 queue_work(hdev->workqueue, &hdev->cmd_work);
2912 }
2913 }
2914
2915 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2916 {
2917 struct hci_ev_role_change *ev = (void *) skb->data;
2918 struct hci_conn *conn;
2919
2920 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2921
2922 hci_dev_lock(hdev);
2923
2924 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2925 if (conn) {
2926 if (!ev->status)
2927 conn->role = ev->role;
2928
2929 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2930
2931 hci_role_switch_cfm(conn, ev->status, ev->role);
2932 }
2933
2934 hci_dev_unlock(hdev);
2935 }
2936
2937 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2938 {
2939 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2940 int i;
2941
2942 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2943 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2944 return;
2945 }
2946
2947 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2948 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2949 BT_DBG("%s bad parameters", hdev->name);
2950 return;
2951 }
2952
2953 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2954
2955 for (i = 0; i < ev->num_hndl; i++) {
2956 struct hci_comp_pkts_info *info = &ev->handles[i];
2957 struct hci_conn *conn;
2958 __u16 handle, count;
2959
2960 handle = __le16_to_cpu(info->handle);
2961 count = __le16_to_cpu(info->count);
2962
2963 conn = hci_conn_hash_lookup_handle(hdev, handle);
2964 if (!conn)
2965 continue;
2966
2967 conn->sent -= count;
2968
2969 switch (conn->type) {
2970 case ACL_LINK:
2971 hdev->acl_cnt += count;
2972 if (hdev->acl_cnt > hdev->acl_pkts)
2973 hdev->acl_cnt = hdev->acl_pkts;
2974 break;
2975
2976 case LE_LINK:
2977 if (hdev->le_pkts) {
2978 hdev->le_cnt += count;
2979 if (hdev->le_cnt > hdev->le_pkts)
2980 hdev->le_cnt = hdev->le_pkts;
2981 } else {
2982 hdev->acl_cnt += count;
2983 if (hdev->acl_cnt > hdev->acl_pkts)
2984 hdev->acl_cnt = hdev->acl_pkts;
2985 }
2986 break;
2987
2988 case SCO_LINK:
2989 hdev->sco_cnt += count;
2990 if (hdev->sco_cnt > hdev->sco_pkts)
2991 hdev->sco_cnt = hdev->sco_pkts;
2992 break;
2993
2994 default:
2995 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2996 break;
2997 }
2998 }
2999
3000 queue_work(hdev->workqueue, &hdev->tx_work);
3001 }
3002
3003 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3004 __u16 handle)
3005 {
3006 struct hci_chan *chan;
3007
3008 switch (hdev->dev_type) {
3009 case HCI_BREDR:
3010 return hci_conn_hash_lookup_handle(hdev, handle);
3011 case HCI_AMP:
3012 chan = hci_chan_lookup_handle(hdev, handle);
3013 if (chan)
3014 return chan->conn;
3015 break;
3016 default:
3017 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3018 break;
3019 }
3020
3021 return NULL;
3022 }
3023
3024 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3025 {
3026 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3027 int i;
3028
3029 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3030 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3031 return;
3032 }
3033
3034 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3035 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3036 BT_DBG("%s bad parameters", hdev->name);
3037 return;
3038 }
3039
3040 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3041 ev->num_hndl);
3042
3043 for (i = 0; i < ev->num_hndl; i++) {
3044 struct hci_comp_blocks_info *info = &ev->handles[i];
3045 struct hci_conn *conn = NULL;
3046 __u16 handle, block_count;
3047
3048 handle = __le16_to_cpu(info->handle);
3049 block_count = __le16_to_cpu(info->blocks);
3050
3051 conn = __hci_conn_lookup_handle(hdev, handle);
3052 if (!conn)
3053 continue;
3054
3055 conn->sent -= block_count;
3056
3057 switch (conn->type) {
3058 case ACL_LINK:
3059 case AMP_LINK:
3060 hdev->block_cnt += block_count;
3061 if (hdev->block_cnt > hdev->num_blocks)
3062 hdev->block_cnt = hdev->num_blocks;
3063 break;
3064
3065 default:
3066 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3067 break;
3068 }
3069 }
3070
3071 queue_work(hdev->workqueue, &hdev->tx_work);
3072 }
3073
3074 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3075 {
3076 struct hci_ev_mode_change *ev = (void *) skb->data;
3077 struct hci_conn *conn;
3078
3079 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3080
3081 hci_dev_lock(hdev);
3082
3083 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3084 if (conn) {
3085 conn->mode = ev->mode;
3086
3087 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3088 &conn->flags)) {
3089 if (conn->mode == HCI_CM_ACTIVE)
3090 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3091 else
3092 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3093 }
3094
3095 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3096 hci_sco_setup(conn, ev->status);
3097 }
3098
3099 hci_dev_unlock(hdev);
3100 }
3101
3102 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3103 {
3104 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3105 struct hci_conn *conn;
3106
3107 BT_DBG("%s", hdev->name);
3108
3109 hci_dev_lock(hdev);
3110
3111 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3112 if (!conn)
3113 goto unlock;
3114
3115 if (conn->state == BT_CONNECTED) {
3116 hci_conn_hold(conn);
3117 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3118 hci_conn_drop(conn);
3119 }
3120
3121 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3122 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3123 sizeof(ev->bdaddr), &ev->bdaddr);
3124 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3125 u8 secure;
3126
3127 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3128 secure = 1;
3129 else
3130 secure = 0;
3131
3132 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3133 }
3134
3135 unlock:
3136 hci_dev_unlock(hdev);
3137 }
3138
3139 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3140 {
3141 struct hci_ev_link_key_req *ev = (void *) skb->data;
3142 struct hci_cp_link_key_reply cp;
3143 struct hci_conn *conn;
3144 struct link_key *key;
3145
3146 BT_DBG("%s", hdev->name);
3147
3148 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3149 return;
3150
3151 hci_dev_lock(hdev);
3152
3153 key = hci_find_link_key(hdev, &ev->bdaddr);
3154 if (!key) {
3155 BT_DBG("%s link key not found for %pMR", hdev->name,
3156 &ev->bdaddr);
3157 goto not_found;
3158 }
3159
3160 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3161 &ev->bdaddr);
3162
3163 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3164 if (conn) {
3165 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3166 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3167 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3168 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3169 goto not_found;
3170 }
3171
3172 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3173 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3174 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3175 BT_DBG("%s ignoring key unauthenticated for high security",
3176 hdev->name);
3177 goto not_found;
3178 }
3179
3180 conn->key_type = key->type;
3181 conn->pin_length = key->pin_len;
3182 }
3183
3184 bacpy(&cp.bdaddr, &ev->bdaddr);
3185 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3186
3187 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3188
3189 hci_dev_unlock(hdev);
3190
3191 return;
3192
3193 not_found:
3194 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3195 hci_dev_unlock(hdev);
3196 }
3197
3198 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3199 {
3200 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3201 struct hci_conn *conn;
3202 struct link_key *key;
3203 bool persistent;
3204 u8 pin_len = 0;
3205
3206 BT_DBG("%s", hdev->name);
3207
3208 hci_dev_lock(hdev);
3209
3210 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3211 if (conn) {
3212 hci_conn_hold(conn);
3213 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3214 pin_len = conn->pin_length;
3215
3216 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3217 conn->key_type = ev->key_type;
3218
3219 hci_conn_drop(conn);
3220 }
3221
3222 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3223 goto unlock;
3224
3225 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3226 ev->key_type, pin_len, &persistent);
3227 if (!key)
3228 goto unlock;
3229
3230 mgmt_new_link_key(hdev, key, persistent);
3231
3232 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3233 * is set. If it's not set simply remove the key from the kernel
3234 * list (we've still notified user space about it but with
3235 * store_hint being 0).
3236 */
3237 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3238 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3239 list_del(&key->list);
3240 kfree(key);
3241 } else if (conn) {
3242 if (persistent)
3243 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3244 else
3245 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3246 }
3247
3248 unlock:
3249 hci_dev_unlock(hdev);
3250 }
3251
3252 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3253 {
3254 struct hci_ev_clock_offset *ev = (void *) skb->data;
3255 struct hci_conn *conn;
3256
3257 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3258
3259 hci_dev_lock(hdev);
3260
3261 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3262 if (conn && !ev->status) {
3263 struct inquiry_entry *ie;
3264
3265 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3266 if (ie) {
3267 ie->data.clock_offset = ev->clock_offset;
3268 ie->timestamp = jiffies;
3269 }
3270 }
3271
3272 hci_dev_unlock(hdev);
3273 }
3274
3275 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3276 {
3277 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3278 struct hci_conn *conn;
3279
3280 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3281
3282 hci_dev_lock(hdev);
3283
3284 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3285 if (conn && !ev->status)
3286 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3287
3288 hci_dev_unlock(hdev);
3289 }
3290
3291 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3292 {
3293 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3294 struct inquiry_entry *ie;
3295
3296 BT_DBG("%s", hdev->name);
3297
3298 hci_dev_lock(hdev);
3299
3300 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3301 if (ie) {
3302 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3303 ie->timestamp = jiffies;
3304 }
3305
3306 hci_dev_unlock(hdev);
3307 }
3308
3309 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3310 struct sk_buff *skb)
3311 {
3312 struct inquiry_data data;
3313 int num_rsp = *((__u8 *) skb->data);
3314
3315 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3316
3317 if (!num_rsp)
3318 return;
3319
3320 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3321 return;
3322
3323 hci_dev_lock(hdev);
3324
3325 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3326 struct inquiry_info_with_rssi_and_pscan_mode *info;
3327 info = (void *) (skb->data + 1);
3328
3329 for (; num_rsp; num_rsp--, info++) {
3330 u32 flags;
3331
3332 bacpy(&data.bdaddr, &info->bdaddr);
3333 data.pscan_rep_mode = info->pscan_rep_mode;
3334 data.pscan_period_mode = info->pscan_period_mode;
3335 data.pscan_mode = info->pscan_mode;
3336 memcpy(data.dev_class, info->dev_class, 3);
3337 data.clock_offset = info->clock_offset;
3338 data.rssi = info->rssi;
3339 data.ssp_mode = 0x00;
3340
3341 flags = hci_inquiry_cache_update(hdev, &data, false);
3342
3343 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3344 info->dev_class, info->rssi,
3345 flags, NULL, 0, NULL, 0);
3346 }
3347 } else {
3348 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3349
3350 for (; num_rsp; num_rsp--, info++) {
3351 u32 flags;
3352
3353 bacpy(&data.bdaddr, &info->bdaddr);
3354 data.pscan_rep_mode = info->pscan_rep_mode;
3355 data.pscan_period_mode = info->pscan_period_mode;
3356 data.pscan_mode = 0x00;
3357 memcpy(data.dev_class, info->dev_class, 3);
3358 data.clock_offset = info->clock_offset;
3359 data.rssi = info->rssi;
3360 data.ssp_mode = 0x00;
3361
3362 flags = hci_inquiry_cache_update(hdev, &data, false);
3363
3364 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3365 info->dev_class, info->rssi,
3366 flags, NULL, 0, NULL, 0);
3367 }
3368 }
3369
3370 hci_dev_unlock(hdev);
3371 }
3372
3373 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3374 struct sk_buff *skb)
3375 {
3376 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3377 struct hci_conn *conn;
3378
3379 BT_DBG("%s", hdev->name);
3380
3381 hci_dev_lock(hdev);
3382
3383 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3384 if (!conn)
3385 goto unlock;
3386
3387 if (ev->page < HCI_MAX_PAGES)
3388 memcpy(conn->features[ev->page], ev->features, 8);
3389
3390 if (!ev->status && ev->page == 0x01) {
3391 struct inquiry_entry *ie;
3392
3393 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3394 if (ie)
3395 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3396
3397 if (ev->features[0] & LMP_HOST_SSP) {
3398 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3399 } else {
3400 /* It is mandatory by the Bluetooth specification that
3401 * Extended Inquiry Results are only used when Secure
3402 * Simple Pairing is enabled, but some devices violate
3403 * this.
3404 *
3405 * To make these devices work, the internal SSP
3406 * enabled flag needs to be cleared if the remote host
3407 * features do not indicate SSP support */
3408 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3409 }
3410
3411 if (ev->features[0] & LMP_HOST_SC)
3412 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3413 }
3414
3415 if (conn->state != BT_CONFIG)
3416 goto unlock;
3417
3418 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3419 struct hci_cp_remote_name_req cp;
3420 memset(&cp, 0, sizeof(cp));
3421 bacpy(&cp.bdaddr, &conn->dst);
3422 cp.pscan_rep_mode = 0x02;
3423 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3424 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3425 mgmt_device_connected(hdev, &conn->dst, conn->type,
3426 conn->dst_type, 0, NULL, 0,
3427 conn->dev_class);
3428
3429 if (!hci_outgoing_auth_needed(hdev, conn)) {
3430 conn->state = BT_CONNECTED;
3431 hci_proto_connect_cfm(conn, ev->status);
3432 hci_conn_drop(conn);
3433 }
3434
3435 unlock:
3436 hci_dev_unlock(hdev);
3437 }
3438
3439 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3440 struct sk_buff *skb)
3441 {
3442 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3443 struct hci_conn *conn;
3444
3445 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3446
3447 hci_dev_lock(hdev);
3448
3449 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3450 if (!conn) {
3451 if (ev->link_type == ESCO_LINK)
3452 goto unlock;
3453
3454 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3455 if (!conn)
3456 goto unlock;
3457
3458 conn->type = SCO_LINK;
3459 }
3460
3461 switch (ev->status) {
3462 case 0x00:
3463 conn->handle = __le16_to_cpu(ev->handle);
3464 conn->state = BT_CONNECTED;
3465
3466 hci_conn_add_sysfs(conn);
3467 break;
3468
3469 case 0x10: /* Connection Accept Timeout */
3470 case 0x0d: /* Connection Rejected due to Limited Resources */
3471 case 0x11: /* Unsupported Feature or Parameter Value */
3472 case 0x1c: /* SCO interval rejected */
3473 case 0x1a: /* Unsupported Remote Feature */
3474 case 0x1f: /* Unspecified error */
3475 case 0x20: /* Unsupported LMP Parameter value */
3476 if (conn->out) {
3477 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3478 (hdev->esco_type & EDR_ESCO_MASK);
3479 if (hci_setup_sync(conn, conn->link->handle))
3480 goto unlock;
3481 }
3482 /* fall through */
3483
3484 default:
3485 conn->state = BT_CLOSED;
3486 break;
3487 }
3488
3489 hci_proto_connect_cfm(conn, ev->status);
3490 if (ev->status)
3491 hci_conn_del(conn);
3492
3493 unlock:
3494 hci_dev_unlock(hdev);
3495 }
3496
3497 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3498 {
3499 size_t parsed = 0;
3500
3501 while (parsed < eir_len) {
3502 u8 field_len = eir[0];
3503
3504 if (field_len == 0)
3505 return parsed;
3506
3507 parsed += field_len + 1;
3508 eir += field_len + 1;
3509 }
3510
3511 return eir_len;
3512 }
3513
3514 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3515 struct sk_buff *skb)
3516 {
3517 struct inquiry_data data;
3518 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3519 int num_rsp = *((__u8 *) skb->data);
3520 size_t eir_len;
3521
3522 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3523
3524 if (!num_rsp)
3525 return;
3526
3527 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3528 return;
3529
3530 hci_dev_lock(hdev);
3531
3532 for (; num_rsp; num_rsp--, info++) {
3533 u32 flags;
3534 bool name_known;
3535
3536 bacpy(&data.bdaddr, &info->bdaddr);
3537 data.pscan_rep_mode = info->pscan_rep_mode;
3538 data.pscan_period_mode = info->pscan_period_mode;
3539 data.pscan_mode = 0x00;
3540 memcpy(data.dev_class, info->dev_class, 3);
3541 data.clock_offset = info->clock_offset;
3542 data.rssi = info->rssi;
3543 data.ssp_mode = 0x01;
3544
3545 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3546 name_known = eir_has_data_type(info->data,
3547 sizeof(info->data),
3548 EIR_NAME_COMPLETE);
3549 else
3550 name_known = true;
3551
3552 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3553
3554 eir_len = eir_get_length(info->data, sizeof(info->data));
3555
3556 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3557 info->dev_class, info->rssi,
3558 flags, info->data, eir_len, NULL, 0);
3559 }
3560
3561 hci_dev_unlock(hdev);
3562 }
3563
3564 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3565 struct sk_buff *skb)
3566 {
3567 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3568 struct hci_conn *conn;
3569
3570 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3571 __le16_to_cpu(ev->handle));
3572
3573 hci_dev_lock(hdev);
3574
3575 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3576 if (!conn)
3577 goto unlock;
3578
3579 /* For BR/EDR the necessary steps are taken through the
3580 * auth_complete event.
3581 */
3582 if (conn->type != LE_LINK)
3583 goto unlock;
3584
3585 if (!ev->status)
3586 conn->sec_level = conn->pending_sec_level;
3587
3588 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3589
3590 if (ev->status && conn->state == BT_CONNECTED) {
3591 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3592 hci_conn_drop(conn);
3593 goto unlock;
3594 }
3595
3596 if (conn->state == BT_CONFIG) {
3597 if (!ev->status)
3598 conn->state = BT_CONNECTED;
3599
3600 hci_proto_connect_cfm(conn, ev->status);
3601 hci_conn_drop(conn);
3602 } else {
3603 hci_auth_cfm(conn, ev->status);
3604
3605 hci_conn_hold(conn);
3606 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3607 hci_conn_drop(conn);
3608 }
3609
3610 unlock:
3611 hci_dev_unlock(hdev);
3612 }
3613
3614 static u8 hci_get_auth_req(struct hci_conn *conn)
3615 {
3616 /* If remote requests no-bonding follow that lead */
3617 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3618 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3619 return conn->remote_auth | (conn->auth_type & 0x01);
3620
3621 /* If both remote and local have enough IO capabilities, require
3622 * MITM protection
3623 */
3624 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3625 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3626 return conn->remote_auth | 0x01;
3627
3628 /* No MITM protection possible so ignore remote requirement */
3629 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3630 }
3631
3632 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3633 {
3634 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3635 struct hci_conn *conn;
3636
3637 BT_DBG("%s", hdev->name);
3638
3639 hci_dev_lock(hdev);
3640
3641 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3642 if (!conn)
3643 goto unlock;
3644
3645 hci_conn_hold(conn);
3646
3647 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3648 goto unlock;
3649
3650 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3651 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3652 struct hci_cp_io_capability_reply cp;
3653
3654 bacpy(&cp.bdaddr, &ev->bdaddr);
3655 /* Change the IO capability from KeyboardDisplay
3656 * to DisplayYesNo as it is not supported by BT spec. */
3657 cp.capability = (conn->io_capability == 0x04) ?
3658 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3659
3660 /* If we are initiators, there is no remote information yet */
3661 if (conn->remote_auth == 0xff) {
3662 /* Request MITM protection if our IO caps allow it
3663 * except for the no-bonding case.
3664 */
3665 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3666 conn->auth_type != HCI_AT_NO_BONDING)
3667 conn->auth_type |= 0x01;
3668
3669 cp.authentication = conn->auth_type;
3670 } else {
3671 conn->auth_type = hci_get_auth_req(conn);
3672 cp.authentication = conn->auth_type;
3673 }
3674
3675 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3676 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3677 cp.oob_data = 0x01;
3678 else
3679 cp.oob_data = 0x00;
3680
3681 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3682 sizeof(cp), &cp);
3683 } else {
3684 struct hci_cp_io_capability_neg_reply cp;
3685
3686 bacpy(&cp.bdaddr, &ev->bdaddr);
3687 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3688
3689 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3690 sizeof(cp), &cp);
3691 }
3692
3693 unlock:
3694 hci_dev_unlock(hdev);
3695 }
3696
3697 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3698 {
3699 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3700 struct hci_conn *conn;
3701
3702 BT_DBG("%s", hdev->name);
3703
3704 hci_dev_lock(hdev);
3705
3706 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3707 if (!conn)
3708 goto unlock;
3709
3710 conn->remote_cap = ev->capability;
3711 conn->remote_auth = ev->authentication;
3712 if (ev->oob_data)
3713 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3714
3715 unlock:
3716 hci_dev_unlock(hdev);
3717 }
3718
3719 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3720 struct sk_buff *skb)
3721 {
3722 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3723 int loc_mitm, rem_mitm, confirm_hint = 0;
3724 struct hci_conn *conn;
3725
3726 BT_DBG("%s", hdev->name);
3727
3728 hci_dev_lock(hdev);
3729
3730 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3731 goto unlock;
3732
3733 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3734 if (!conn)
3735 goto unlock;
3736
3737 loc_mitm = (conn->auth_type & 0x01);
3738 rem_mitm = (conn->remote_auth & 0x01);
3739
3740 /* If we require MITM but the remote device can't provide that
3741 * (it has NoInputNoOutput) then reject the confirmation
3742 * request. We check the security level here since it doesn't
3743 * necessarily match conn->auth_type.
3744 */
3745 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3746 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3747 BT_DBG("Rejecting request: remote device can't provide MITM");
3748 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3749 sizeof(ev->bdaddr), &ev->bdaddr);
3750 goto unlock;
3751 }
3752
3753 /* If no side requires MITM protection; auto-accept */
3754 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3755 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3756
3757 /* If we're not the initiators request authorization to
3758 * proceed from user space (mgmt_user_confirm with
3759 * confirm_hint set to 1). The exception is if neither
3760 * side had MITM or if the local IO capability is
3761 * NoInputNoOutput, in which case we do auto-accept
3762 */
3763 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3764 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3765 (loc_mitm || rem_mitm)) {
3766 BT_DBG("Confirming auto-accept as acceptor");
3767 confirm_hint = 1;
3768 goto confirm;
3769 }
3770
3771 BT_DBG("Auto-accept of user confirmation with %ums delay",
3772 hdev->auto_accept_delay);
3773
3774 if (hdev->auto_accept_delay > 0) {
3775 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3776 queue_delayed_work(conn->hdev->workqueue,
3777 &conn->auto_accept_work, delay);
3778 goto unlock;
3779 }
3780
3781 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3782 sizeof(ev->bdaddr), &ev->bdaddr);
3783 goto unlock;
3784 }
3785
3786 confirm:
3787 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3788 le32_to_cpu(ev->passkey), confirm_hint);
3789
3790 unlock:
3791 hci_dev_unlock(hdev);
3792 }
3793
3794 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3795 struct sk_buff *skb)
3796 {
3797 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3798
3799 BT_DBG("%s", hdev->name);
3800
3801 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3802 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3803 }
3804
3805 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3806 struct sk_buff *skb)
3807 {
3808 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3809 struct hci_conn *conn;
3810
3811 BT_DBG("%s", hdev->name);
3812
3813 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3814 if (!conn)
3815 return;
3816
3817 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3818 conn->passkey_entered = 0;
3819
3820 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3821 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3822 conn->dst_type, conn->passkey_notify,
3823 conn->passkey_entered);
3824 }
3825
3826 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3827 {
3828 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3829 struct hci_conn *conn;
3830
3831 BT_DBG("%s", hdev->name);
3832
3833 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3834 if (!conn)
3835 return;
3836
3837 switch (ev->type) {
3838 case HCI_KEYPRESS_STARTED:
3839 conn->passkey_entered = 0;
3840 return;
3841
3842 case HCI_KEYPRESS_ENTERED:
3843 conn->passkey_entered++;
3844 break;
3845
3846 case HCI_KEYPRESS_ERASED:
3847 conn->passkey_entered--;
3848 break;
3849
3850 case HCI_KEYPRESS_CLEARED:
3851 conn->passkey_entered = 0;
3852 break;
3853
3854 case HCI_KEYPRESS_COMPLETED:
3855 return;
3856 }
3857
3858 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3859 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3860 conn->dst_type, conn->passkey_notify,
3861 conn->passkey_entered);
3862 }
3863
3864 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3865 struct sk_buff *skb)
3866 {
3867 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3868 struct hci_conn *conn;
3869
3870 BT_DBG("%s", hdev->name);
3871
3872 hci_dev_lock(hdev);
3873
3874 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3875 if (!conn)
3876 goto unlock;
3877
3878 /* Reset the authentication requirement to unknown */
3879 conn->remote_auth = 0xff;
3880
3881 /* To avoid duplicate auth_failed events to user space we check
3882 * the HCI_CONN_AUTH_PEND flag which will be set if we
3883 * initiated the authentication. A traditional auth_complete
3884 * event gets always produced as initiator and is also mapped to
3885 * the mgmt_auth_failed event */
3886 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3887 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3888 ev->status);
3889
3890 hci_conn_drop(conn);
3891
3892 unlock:
3893 hci_dev_unlock(hdev);
3894 }
3895
3896 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3897 struct sk_buff *skb)
3898 {
3899 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3900 struct inquiry_entry *ie;
3901 struct hci_conn *conn;
3902
3903 BT_DBG("%s", hdev->name);
3904
3905 hci_dev_lock(hdev);
3906
3907 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3908 if (conn)
3909 memcpy(conn->features[1], ev->features, 8);
3910
3911 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3912 if (ie)
3913 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3914
3915 hci_dev_unlock(hdev);
3916 }
3917
3918 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3919 struct sk_buff *skb)
3920 {
3921 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3922 struct oob_data *data;
3923
3924 BT_DBG("%s", hdev->name);
3925
3926 hci_dev_lock(hdev);
3927
3928 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3929 goto unlock;
3930
3931 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3932 if (data) {
3933 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3934 struct hci_cp_remote_oob_ext_data_reply cp;
3935
3936 bacpy(&cp.bdaddr, &ev->bdaddr);
3937 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3938 memcpy(cp.randomizer192, data->randomizer192,
3939 sizeof(cp.randomizer192));
3940 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3941 memcpy(cp.randomizer256, data->randomizer256,
3942 sizeof(cp.randomizer256));
3943
3944 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3945 sizeof(cp), &cp);
3946 } else {
3947 struct hci_cp_remote_oob_data_reply cp;
3948
3949 bacpy(&cp.bdaddr, &ev->bdaddr);
3950 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3951 memcpy(cp.randomizer, data->randomizer192,
3952 sizeof(cp.randomizer));
3953
3954 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3955 sizeof(cp), &cp);
3956 }
3957 } else {
3958 struct hci_cp_remote_oob_data_neg_reply cp;
3959
3960 bacpy(&cp.bdaddr, &ev->bdaddr);
3961 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3962 sizeof(cp), &cp);
3963 }
3964
3965 unlock:
3966 hci_dev_unlock(hdev);
3967 }
3968
3969 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3970 struct sk_buff *skb)
3971 {
3972 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3973 struct hci_conn *hcon, *bredr_hcon;
3974
3975 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3976 ev->status);
3977
3978 hci_dev_lock(hdev);
3979
3980 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3981 if (!hcon) {
3982 hci_dev_unlock(hdev);
3983 return;
3984 }
3985
3986 if (ev->status) {
3987 hci_conn_del(hcon);
3988 hci_dev_unlock(hdev);
3989 return;
3990 }
3991
3992 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3993
3994 hcon->state = BT_CONNECTED;
3995 bacpy(&hcon->dst, &bredr_hcon->dst);
3996
3997 hci_conn_hold(hcon);
3998 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3999 hci_conn_drop(hcon);
4000
4001 hci_conn_add_sysfs(hcon);
4002
4003 amp_physical_cfm(bredr_hcon, hcon);
4004
4005 hci_dev_unlock(hdev);
4006 }
4007
4008 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4009 {
4010 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4011 struct hci_conn *hcon;
4012 struct hci_chan *hchan;
4013 struct amp_mgr *mgr;
4014
4015 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4016 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4017 ev->status);
4018
4019 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4020 if (!hcon)
4021 return;
4022
4023 /* Create AMP hchan */
4024 hchan = hci_chan_create(hcon);
4025 if (!hchan)
4026 return;
4027
4028 hchan->handle = le16_to_cpu(ev->handle);
4029
4030 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4031
4032 mgr = hcon->amp_mgr;
4033 if (mgr && mgr->bredr_chan) {
4034 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4035
4036 l2cap_chan_lock(bredr_chan);
4037
4038 bredr_chan->conn->mtu = hdev->block_mtu;
4039 l2cap_logical_cfm(bredr_chan, hchan, 0);
4040 hci_conn_hold(hcon);
4041
4042 l2cap_chan_unlock(bredr_chan);
4043 }
4044 }
4045
4046 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4047 struct sk_buff *skb)
4048 {
4049 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4050 struct hci_chan *hchan;
4051
4052 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4053 le16_to_cpu(ev->handle), ev->status);
4054
4055 if (ev->status)
4056 return;
4057
4058 hci_dev_lock(hdev);
4059
4060 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4061 if (!hchan)
4062 goto unlock;
4063
4064 amp_destroy_logical_link(hchan, ev->reason);
4065
4066 unlock:
4067 hci_dev_unlock(hdev);
4068 }
4069
4070 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4071 struct sk_buff *skb)
4072 {
4073 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4074 struct hci_conn *hcon;
4075
4076 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4077
4078 if (ev->status)
4079 return;
4080
4081 hci_dev_lock(hdev);
4082
4083 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4084 if (hcon) {
4085 hcon->state = BT_CLOSED;
4086 hci_conn_del(hcon);
4087 }
4088
4089 hci_dev_unlock(hdev);
4090 }
4091
4092 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4093 {
4094 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4095 struct hci_conn_params *params;
4096 struct hci_conn *conn;
4097 struct smp_irk *irk;
4098 u8 addr_type;
4099
4100 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4101
4102 hci_dev_lock(hdev);
4103
4104 /* All controllers implicitly stop advertising in the event of a
4105 * connection, so ensure that the state bit is cleared.
4106 */
4107 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4108
4109 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4110 if (!conn) {
4111 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4112 if (!conn) {
4113 BT_ERR("No memory for new connection");
4114 goto unlock;
4115 }
4116
4117 conn->dst_type = ev->bdaddr_type;
4118
4119 /* If we didn't have a hci_conn object previously
4120 * but we're in master role this must be something
4121 * initiated using a white list. Since white list based
4122 * connections are not "first class citizens" we don't
4123 * have full tracking of them. Therefore, we go ahead
4124 * with a "best effort" approach of determining the
4125 * initiator address based on the HCI_PRIVACY flag.
4126 */
4127 if (conn->out) {
4128 conn->resp_addr_type = ev->bdaddr_type;
4129 bacpy(&conn->resp_addr, &ev->bdaddr);
4130 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4131 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4132 bacpy(&conn->init_addr, &hdev->rpa);
4133 } else {
4134 hci_copy_identity_address(hdev,
4135 &conn->init_addr,
4136 &conn->init_addr_type);
4137 }
4138 }
4139 } else {
4140 cancel_delayed_work(&conn->le_conn_timeout);
4141 }
4142
4143 if (!conn->out) {
4144 /* Set the responder (our side) address type based on
4145 * the advertising address type.
4146 */
4147 conn->resp_addr_type = hdev->adv_addr_type;
4148 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4149 bacpy(&conn->resp_addr, &hdev->random_addr);
4150 else
4151 bacpy(&conn->resp_addr, &hdev->bdaddr);
4152
4153 conn->init_addr_type = ev->bdaddr_type;
4154 bacpy(&conn->init_addr, &ev->bdaddr);
4155
4156 /* For incoming connections, set the default minimum
4157 * and maximum connection interval. They will be used
4158 * to check if the parameters are in range and if not
4159 * trigger the connection update procedure.
4160 */
4161 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4162 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4163 }
4164
4165 /* Lookup the identity address from the stored connection
4166 * address and address type.
4167 *
4168 * When establishing connections to an identity address, the
4169 * connection procedure will store the resolvable random
4170 * address first. Now if it can be converted back into the
4171 * identity address, start using the identity address from
4172 * now on.
4173 */
4174 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4175 if (irk) {
4176 bacpy(&conn->dst, &irk->bdaddr);
4177 conn->dst_type = irk->addr_type;
4178 }
4179
4180 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4181 addr_type = BDADDR_LE_PUBLIC;
4182 else
4183 addr_type = BDADDR_LE_RANDOM;
4184
4185 if (ev->status) {
4186 hci_le_conn_failed(conn, ev->status);
4187 goto unlock;
4188 }
4189
4190 /* Drop the connection if the device is blocked */
4191 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4192 hci_conn_drop(conn);
4193 goto unlock;
4194 }
4195
4196 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4197 mgmt_device_connected(hdev, &conn->dst, conn->type,
4198 conn->dst_type, 0, NULL, 0, NULL);
4199
4200 conn->sec_level = BT_SECURITY_LOW;
4201 conn->handle = __le16_to_cpu(ev->handle);
4202 conn->state = BT_CONNECTED;
4203
4204 conn->le_conn_interval = le16_to_cpu(ev->interval);
4205 conn->le_conn_latency = le16_to_cpu(ev->latency);
4206 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4207
4208 hci_conn_add_sysfs(conn);
4209
4210 hci_proto_connect_cfm(conn, ev->status);
4211
4212 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4213 if (params)
4214 list_del_init(&params->action);
4215
4216 unlock:
4217 hci_update_background_scan(hdev);
4218 hci_dev_unlock(hdev);
4219 }
4220
4221 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4222 struct sk_buff *skb)
4223 {
4224 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4225 struct hci_conn *conn;
4226
4227 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4228
4229 if (ev->status)
4230 return;
4231
4232 hci_dev_lock(hdev);
4233
4234 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4235 if (conn) {
4236 conn->le_conn_interval = le16_to_cpu(ev->interval);
4237 conn->le_conn_latency = le16_to_cpu(ev->latency);
4238 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4239 }
4240
4241 hci_dev_unlock(hdev);
4242 }
4243
4244 /* This function requires the caller holds hdev->lock */
4245 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4246 u8 addr_type, u8 adv_type)
4247 {
4248 struct hci_conn *conn;
4249
4250 /* If the event is not connectable don't proceed further */
4251 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4252 return;
4253
4254 /* Ignore if the device is blocked */
4255 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4256 return;
4257
4258 /* Most controller will fail if we try to create new connections
4259 * while we have an existing one in slave role.
4260 */
4261 if (hdev->conn_hash.le_num_slave > 0)
4262 return;
4263
4264 /* If we're connectable, always connect any ADV_DIRECT_IND event */
4265 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
4266 adv_type == LE_ADV_DIRECT_IND)
4267 goto connect;
4268
4269 /* If we're not connectable only connect devices that we have in
4270 * our pend_le_conns list.
4271 */
4272 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
4273 return;
4274
4275 connect:
4276 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4277 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4278 if (!IS_ERR(conn))
4279 return;
4280
4281 switch (PTR_ERR(conn)) {
4282 case -EBUSY:
4283 /* If hci_connect() returns -EBUSY it means there is already
4284 * an LE connection attempt going on. Since controllers don't
4285 * support more than one connection attempt at the time, we
4286 * don't consider this an error case.
4287 */
4288 break;
4289 default:
4290 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4291 }
4292 }
4293
4294 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4295 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4296 {
4297 struct discovery_state *d = &hdev->discovery;
4298 struct smp_irk *irk;
4299 bool match;
4300 u32 flags;
4301
4302 /* Check if we need to convert to identity address */
4303 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4304 if (irk) {
4305 bdaddr = &irk->bdaddr;
4306 bdaddr_type = irk->addr_type;
4307 }
4308
4309 /* Check if we have been requested to connect to this device */
4310 check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4311
4312 /* Passive scanning shouldn't trigger any device found events,
4313 * except for devices marked as CONN_REPORT for which we do send
4314 * device found events.
4315 */
4316 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4317 if (type == LE_ADV_DIRECT_IND)
4318 return;
4319
4320 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4321 bdaddr, bdaddr_type))
4322 return;
4323
4324 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4325 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4326 else
4327 flags = 0;
4328 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4329 rssi, flags, data, len, NULL, 0);
4330 return;
4331 }
4332
4333 /* When receiving non-connectable or scannable undirected
4334 * advertising reports, this means that the remote device is
4335 * not connectable and then clearly indicate this in the
4336 * device found event.
4337 *
4338 * When receiving a scan response, then there is no way to
4339 * know if the remote device is connectable or not. However
4340 * since scan responses are merged with a previously seen
4341 * advertising report, the flags field from that report
4342 * will be used.
4343 *
4344 * In the really unlikely case that a controller get confused
4345 * and just sends a scan response event, then it is marked as
4346 * not connectable as well.
4347 */
4348 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4349 type == LE_ADV_SCAN_RSP)
4350 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4351 else
4352 flags = 0;
4353
4354 /* If there's nothing pending either store the data from this
4355 * event or send an immediate device found event if the data
4356 * should not be stored for later.
4357 */
4358 if (!has_pending_adv_report(hdev)) {
4359 /* If the report will trigger a SCAN_REQ store it for
4360 * later merging.
4361 */
4362 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4363 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4364 rssi, flags, data, len);
4365 return;
4366 }
4367
4368 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4369 rssi, flags, data, len, NULL, 0);
4370 return;
4371 }
4372
4373 /* Check if the pending report is for the same device as the new one */
4374 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4375 bdaddr_type == d->last_adv_addr_type);
4376
4377 /* If the pending data doesn't match this report or this isn't a
4378 * scan response (e.g. we got a duplicate ADV_IND) then force
4379 * sending of the pending data.
4380 */
4381 if (type != LE_ADV_SCAN_RSP || !match) {
4382 /* Send out whatever is in the cache, but skip duplicates */
4383 if (!match)
4384 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4385 d->last_adv_addr_type, NULL,
4386 d->last_adv_rssi, d->last_adv_flags,
4387 d->last_adv_data,
4388 d->last_adv_data_len, NULL, 0);
4389
4390 /* If the new report will trigger a SCAN_REQ store it for
4391 * later merging.
4392 */
4393 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4394 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4395 rssi, flags, data, len);
4396 return;
4397 }
4398
4399 /* The advertising reports cannot be merged, so clear
4400 * the pending report and send out a device found event.
4401 */
4402 clear_pending_adv_report(hdev);
4403 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4404 rssi, flags, data, len, NULL, 0);
4405 return;
4406 }
4407
4408 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4409 * the new event is a SCAN_RSP. We can therefore proceed with
4410 * sending a merged device found event.
4411 */
4412 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4413 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4414 d->last_adv_data, d->last_adv_data_len, data, len);
4415 clear_pending_adv_report(hdev);
4416 }
4417
4418 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4419 {
4420 u8 num_reports = skb->data[0];
4421 void *ptr = &skb->data[1];
4422
4423 hci_dev_lock(hdev);
4424
4425 while (num_reports--) {
4426 struct hci_ev_le_advertising_info *ev = ptr;
4427 s8 rssi;
4428
4429 rssi = ev->data[ev->length];
4430 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4431 ev->bdaddr_type, rssi, ev->data, ev->length);
4432
4433 ptr += sizeof(*ev) + ev->length + 1;
4434 }
4435
4436 hci_dev_unlock(hdev);
4437 }
4438
4439 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4440 {
4441 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4442 struct hci_cp_le_ltk_reply cp;
4443 struct hci_cp_le_ltk_neg_reply neg;
4444 struct hci_conn *conn;
4445 struct smp_ltk *ltk;
4446
4447 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4448
4449 hci_dev_lock(hdev);
4450
4451 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4452 if (conn == NULL)
4453 goto not_found;
4454
4455 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
4456 if (ltk == NULL)
4457 goto not_found;
4458
4459 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4460 cp.handle = cpu_to_le16(conn->handle);
4461
4462 if (ltk->authenticated)
4463 conn->pending_sec_level = BT_SECURITY_HIGH;
4464 else
4465 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4466
4467 conn->enc_key_size = ltk->enc_size;
4468
4469 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4470
4471 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4472 * temporary key used to encrypt a connection following
4473 * pairing. It is used during the Encrypted Session Setup to
4474 * distribute the keys. Later, security can be re-established
4475 * using a distributed LTK.
4476 */
4477 if (ltk->type == SMP_STK) {
4478 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4479 list_del(&ltk->list);
4480 kfree(ltk);
4481 } else {
4482 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4483 }
4484
4485 hci_dev_unlock(hdev);
4486
4487 return;
4488
4489 not_found:
4490 neg.handle = ev->handle;
4491 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4492 hci_dev_unlock(hdev);
4493 }
4494
4495 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4496 u8 reason)
4497 {
4498 struct hci_cp_le_conn_param_req_neg_reply cp;
4499
4500 cp.handle = cpu_to_le16(handle);
4501 cp.reason = reason;
4502
4503 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4504 &cp);
4505 }
4506
4507 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4508 struct sk_buff *skb)
4509 {
4510 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4511 struct hci_cp_le_conn_param_req_reply cp;
4512 struct hci_conn *hcon;
4513 u16 handle, min, max, latency, timeout;
4514
4515 handle = le16_to_cpu(ev->handle);
4516 min = le16_to_cpu(ev->interval_min);
4517 max = le16_to_cpu(ev->interval_max);
4518 latency = le16_to_cpu(ev->latency);
4519 timeout = le16_to_cpu(ev->timeout);
4520
4521 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4522 if (!hcon || hcon->state != BT_CONNECTED)
4523 return send_conn_param_neg_reply(hdev, handle,
4524 HCI_ERROR_UNKNOWN_CONN_ID);
4525
4526 if (hci_check_conn_params(min, max, latency, timeout))
4527 return send_conn_param_neg_reply(hdev, handle,
4528 HCI_ERROR_INVALID_LL_PARAMS);
4529
4530 if (hcon->role == HCI_ROLE_MASTER) {
4531 struct hci_conn_params *params;
4532 u8 store_hint;
4533
4534 hci_dev_lock(hdev);
4535
4536 params = hci_conn_params_lookup(hdev, &hcon->dst,
4537 hcon->dst_type);
4538 if (params) {
4539 params->conn_min_interval = min;
4540 params->conn_max_interval = max;
4541 params->conn_latency = latency;
4542 params->supervision_timeout = timeout;
4543 store_hint = 0x01;
4544 } else{
4545 store_hint = 0x00;
4546 }
4547
4548 hci_dev_unlock(hdev);
4549
4550 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4551 store_hint, min, max, latency, timeout);
4552 }
4553
4554 cp.handle = ev->handle;
4555 cp.interval_min = ev->interval_min;
4556 cp.interval_max = ev->interval_max;
4557 cp.latency = ev->latency;
4558 cp.timeout = ev->timeout;
4559 cp.min_ce_len = 0;
4560 cp.max_ce_len = 0;
4561
4562 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4563 }
4564
4565 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4566 {
4567 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4568
4569 skb_pull(skb, sizeof(*le_ev));
4570
4571 switch (le_ev->subevent) {
4572 case HCI_EV_LE_CONN_COMPLETE:
4573 hci_le_conn_complete_evt(hdev, skb);
4574 break;
4575
4576 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4577 hci_le_conn_update_complete_evt(hdev, skb);
4578 break;
4579
4580 case HCI_EV_LE_ADVERTISING_REPORT:
4581 hci_le_adv_report_evt(hdev, skb);
4582 break;
4583
4584 case HCI_EV_LE_LTK_REQ:
4585 hci_le_ltk_request_evt(hdev, skb);
4586 break;
4587
4588 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4589 hci_le_remote_conn_param_req_evt(hdev, skb);
4590 break;
4591
4592 default:
4593 break;
4594 }
4595 }
4596
4597 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4598 {
4599 struct hci_ev_channel_selected *ev = (void *) skb->data;
4600 struct hci_conn *hcon;
4601
4602 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4603
4604 skb_pull(skb, sizeof(*ev));
4605
4606 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4607 if (!hcon)
4608 return;
4609
4610 amp_read_loc_assoc_final_data(hdev, hcon);
4611 }
4612
4613 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4614 {
4615 struct hci_event_hdr *hdr = (void *) skb->data;
4616 __u8 event = hdr->evt;
4617
4618 hci_dev_lock(hdev);
4619
4620 /* Received events are (currently) only needed when a request is
4621 * ongoing so avoid unnecessary memory allocation.
4622 */
4623 if (hci_req_pending(hdev)) {
4624 kfree_skb(hdev->recv_evt);
4625 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4626 }
4627
4628 hci_dev_unlock(hdev);
4629
4630 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4631
4632 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4633 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4634 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4635
4636 hci_req_cmd_complete(hdev, opcode, 0);
4637 }
4638
4639 switch (event) {
4640 case HCI_EV_INQUIRY_COMPLETE:
4641 hci_inquiry_complete_evt(hdev, skb);
4642 break;
4643
4644 case HCI_EV_INQUIRY_RESULT:
4645 hci_inquiry_result_evt(hdev, skb);
4646 break;
4647
4648 case HCI_EV_CONN_COMPLETE:
4649 hci_conn_complete_evt(hdev, skb);
4650 break;
4651
4652 case HCI_EV_CONN_REQUEST:
4653 hci_conn_request_evt(hdev, skb);
4654 break;
4655
4656 case HCI_EV_DISCONN_COMPLETE:
4657 hci_disconn_complete_evt(hdev, skb);
4658 break;
4659
4660 case HCI_EV_AUTH_COMPLETE:
4661 hci_auth_complete_evt(hdev, skb);
4662 break;
4663
4664 case HCI_EV_REMOTE_NAME:
4665 hci_remote_name_evt(hdev, skb);
4666 break;
4667
4668 case HCI_EV_ENCRYPT_CHANGE:
4669 hci_encrypt_change_evt(hdev, skb);
4670 break;
4671
4672 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4673 hci_change_link_key_complete_evt(hdev, skb);
4674 break;
4675
4676 case HCI_EV_REMOTE_FEATURES:
4677 hci_remote_features_evt(hdev, skb);
4678 break;
4679
4680 case HCI_EV_CMD_COMPLETE:
4681 hci_cmd_complete_evt(hdev, skb);
4682 break;
4683
4684 case HCI_EV_CMD_STATUS:
4685 hci_cmd_status_evt(hdev, skb);
4686 break;
4687
4688 case HCI_EV_ROLE_CHANGE:
4689 hci_role_change_evt(hdev, skb);
4690 break;
4691
4692 case HCI_EV_NUM_COMP_PKTS:
4693 hci_num_comp_pkts_evt(hdev, skb);
4694 break;
4695
4696 case HCI_EV_MODE_CHANGE:
4697 hci_mode_change_evt(hdev, skb);
4698 break;
4699
4700 case HCI_EV_PIN_CODE_REQ:
4701 hci_pin_code_request_evt(hdev, skb);
4702 break;
4703
4704 case HCI_EV_LINK_KEY_REQ:
4705 hci_link_key_request_evt(hdev, skb);
4706 break;
4707
4708 case HCI_EV_LINK_KEY_NOTIFY:
4709 hci_link_key_notify_evt(hdev, skb);
4710 break;
4711
4712 case HCI_EV_CLOCK_OFFSET:
4713 hci_clock_offset_evt(hdev, skb);
4714 break;
4715
4716 case HCI_EV_PKT_TYPE_CHANGE:
4717 hci_pkt_type_change_evt(hdev, skb);
4718 break;
4719
4720 case HCI_EV_PSCAN_REP_MODE:
4721 hci_pscan_rep_mode_evt(hdev, skb);
4722 break;
4723
4724 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4725 hci_inquiry_result_with_rssi_evt(hdev, skb);
4726 break;
4727
4728 case HCI_EV_REMOTE_EXT_FEATURES:
4729 hci_remote_ext_features_evt(hdev, skb);
4730 break;
4731
4732 case HCI_EV_SYNC_CONN_COMPLETE:
4733 hci_sync_conn_complete_evt(hdev, skb);
4734 break;
4735
4736 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4737 hci_extended_inquiry_result_evt(hdev, skb);
4738 break;
4739
4740 case HCI_EV_KEY_REFRESH_COMPLETE:
4741 hci_key_refresh_complete_evt(hdev, skb);
4742 break;
4743
4744 case HCI_EV_IO_CAPA_REQUEST:
4745 hci_io_capa_request_evt(hdev, skb);
4746 break;
4747
4748 case HCI_EV_IO_CAPA_REPLY:
4749 hci_io_capa_reply_evt(hdev, skb);
4750 break;
4751
4752 case HCI_EV_USER_CONFIRM_REQUEST:
4753 hci_user_confirm_request_evt(hdev, skb);
4754 break;
4755
4756 case HCI_EV_USER_PASSKEY_REQUEST:
4757 hci_user_passkey_request_evt(hdev, skb);
4758 break;
4759
4760 case HCI_EV_USER_PASSKEY_NOTIFY:
4761 hci_user_passkey_notify_evt(hdev, skb);
4762 break;
4763
4764 case HCI_EV_KEYPRESS_NOTIFY:
4765 hci_keypress_notify_evt(hdev, skb);
4766 break;
4767
4768 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4769 hci_simple_pair_complete_evt(hdev, skb);
4770 break;
4771
4772 case HCI_EV_REMOTE_HOST_FEATURES:
4773 hci_remote_host_features_evt(hdev, skb);
4774 break;
4775
4776 case HCI_EV_LE_META:
4777 hci_le_meta_evt(hdev, skb);
4778 break;
4779
4780 case HCI_EV_CHANNEL_SELECTED:
4781 hci_chan_selected_evt(hdev, skb);
4782 break;
4783
4784 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4785 hci_remote_oob_data_request_evt(hdev, skb);
4786 break;
4787
4788 case HCI_EV_PHY_LINK_COMPLETE:
4789 hci_phy_link_complete_evt(hdev, skb);
4790 break;
4791
4792 case HCI_EV_LOGICAL_LINK_COMPLETE:
4793 hci_loglink_complete_evt(hdev, skb);
4794 break;
4795
4796 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4797 hci_disconn_loglink_complete_evt(hdev, skb);
4798 break;
4799
4800 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4801 hci_disconn_phylink_complete_evt(hdev, skb);
4802 break;
4803
4804 case HCI_EV_NUM_COMP_BLOCKS:
4805 hci_num_comp_blocks_evt(hdev, skb);
4806 break;
4807
4808 default:
4809 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4810 break;
4811 }
4812
4813 kfree_skb(skb);
4814 hdev->stat.evt_rx++;
4815 }
This page took 0.126066 seconds and 6 git commands to generate.