Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "hci_request.h"
34 #include "hci_debugfs.h"
35 #include "a2mp.h"
36 #include "amp.h"
37 #include "smp.h"
38
39 /* Handle HCI Event packets */
40
41 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
42 {
43 __u8 status = *((__u8 *) skb->data);
44
45 BT_DBG("%s status 0x%2.2x", hdev->name, status);
46
47 if (status)
48 return;
49
50 clear_bit(HCI_INQUIRY, &hdev->flags);
51 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
52 wake_up_bit(&hdev->flags, HCI_INQUIRY);
53
54 hci_dev_lock(hdev);
55 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
56 hci_dev_unlock(hdev);
57
58 hci_conn_check_pending(hdev);
59 }
60
61 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
62 {
63 __u8 status = *((__u8 *) skb->data);
64
65 BT_DBG("%s status 0x%2.2x", hdev->name, status);
66
67 if (status)
68 return;
69
70 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%2.2x", hdev->name, status);
78
79 if (status)
80 return;
81
82 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
83
84 hci_conn_check_pending(hdev);
85 }
86
87 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
88 struct sk_buff *skb)
89 {
90 BT_DBG("%s", hdev->name);
91 }
92
93 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94 {
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn)
107 conn->role = rp->role;
108
109 hci_dev_unlock(hdev);
110 }
111
112 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
113 {
114 struct hci_rp_read_link_policy *rp = (void *) skb->data;
115 struct hci_conn *conn;
116
117 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
118
119 if (rp->status)
120 return;
121
122 hci_dev_lock(hdev);
123
124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
125 if (conn)
126 conn->link_policy = __le16_to_cpu(rp->policy);
127
128 hci_dev_unlock(hdev);
129 }
130
131 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
132 {
133 struct hci_rp_write_link_policy *rp = (void *) skb->data;
134 struct hci_conn *conn;
135 void *sent;
136
137 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
138
139 if (rp->status)
140 return;
141
142 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
143 if (!sent)
144 return;
145
146 hci_dev_lock(hdev);
147
148 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
149 if (conn)
150 conn->link_policy = get_unaligned_le16(sent + 2);
151
152 hci_dev_unlock(hdev);
153 }
154
155 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
156 struct sk_buff *skb)
157 {
158 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
159
160 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
161
162 if (rp->status)
163 return;
164
165 hdev->link_policy = __le16_to_cpu(rp->policy);
166 }
167
168 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
169 struct sk_buff *skb)
170 {
171 __u8 status = *((__u8 *) skb->data);
172 void *sent;
173
174 BT_DBG("%s status 0x%2.2x", hdev->name, status);
175
176 if (status)
177 return;
178
179 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
180 if (!sent)
181 return;
182
183 hdev->link_policy = get_unaligned_le16(sent);
184 }
185
186 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
187 {
188 __u8 status = *((__u8 *) skb->data);
189
190 BT_DBG("%s status 0x%2.2x", hdev->name, status);
191
192 clear_bit(HCI_RESET, &hdev->flags);
193
194 if (status)
195 return;
196
197 /* Reset all non-persistent flags */
198 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
199
200 hdev->discovery.state = DISCOVERY_STOPPED;
201 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
202 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
203
204 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
205 hdev->adv_data_len = 0;
206
207 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
208 hdev->scan_rsp_data_len = 0;
209
210 hdev->le_scan_type = LE_SCAN_PASSIVE;
211
212 hdev->ssp_debug_mode = 0;
213
214 hci_bdaddr_list_clear(&hdev->le_white_list);
215 }
216
217 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
218 {
219 __u8 status = *((__u8 *) skb->data);
220 void *sent;
221
222 BT_DBG("%s status 0x%2.2x", hdev->name, status);
223
224 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
225 if (!sent)
226 return;
227
228 hci_dev_lock(hdev);
229
230 if (test_bit(HCI_MGMT, &hdev->dev_flags))
231 mgmt_set_local_name_complete(hdev, sent, status);
232 else if (!status)
233 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
234
235 hci_dev_unlock(hdev);
236 }
237
238 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
239 {
240 struct hci_rp_read_local_name *rp = (void *) skb->data;
241
242 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
243
244 if (rp->status)
245 return;
246
247 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
248 test_bit(HCI_CONFIG, &hdev->dev_flags))
249 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
250 }
251
252 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
253 {
254 __u8 status = *((__u8 *) skb->data);
255 void *sent;
256
257 BT_DBG("%s status 0x%2.2x", hdev->name, status);
258
259 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
260 if (!sent)
261 return;
262
263 hci_dev_lock(hdev);
264
265 if (!status) {
266 __u8 param = *((__u8 *) sent);
267
268 if (param == AUTH_ENABLED)
269 set_bit(HCI_AUTH, &hdev->flags);
270 else
271 clear_bit(HCI_AUTH, &hdev->flags);
272 }
273
274 if (test_bit(HCI_MGMT, &hdev->dev_flags))
275 mgmt_auth_enable_complete(hdev, status);
276
277 hci_dev_unlock(hdev);
278 }
279
280 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
281 {
282 __u8 status = *((__u8 *) skb->data);
283 __u8 param;
284 void *sent;
285
286 BT_DBG("%s status 0x%2.2x", hdev->name, status);
287
288 if (status)
289 return;
290
291 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
292 if (!sent)
293 return;
294
295 param = *((__u8 *) sent);
296
297 if (param)
298 set_bit(HCI_ENCRYPT, &hdev->flags);
299 else
300 clear_bit(HCI_ENCRYPT, &hdev->flags);
301 }
302
303 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
304 {
305 __u8 status = *((__u8 *) skb->data);
306 __u8 param;
307 void *sent;
308
309 BT_DBG("%s status 0x%2.2x", hdev->name, status);
310
311 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
312 if (!sent)
313 return;
314
315 param = *((__u8 *) sent);
316
317 hci_dev_lock(hdev);
318
319 if (status) {
320 hdev->discov_timeout = 0;
321 goto done;
322 }
323
324 if (param & SCAN_INQUIRY)
325 set_bit(HCI_ISCAN, &hdev->flags);
326 else
327 clear_bit(HCI_ISCAN, &hdev->flags);
328
329 if (param & SCAN_PAGE)
330 set_bit(HCI_PSCAN, &hdev->flags);
331 else
332 clear_bit(HCI_PSCAN, &hdev->flags);
333
334 done:
335 hci_dev_unlock(hdev);
336 }
337
338 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
339 {
340 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
341
342 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
343
344 if (rp->status)
345 return;
346
347 memcpy(hdev->dev_class, rp->dev_class, 3);
348
349 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
350 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
351 }
352
353 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
354 {
355 __u8 status = *((__u8 *) skb->data);
356 void *sent;
357
358 BT_DBG("%s status 0x%2.2x", hdev->name, status);
359
360 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
361 if (!sent)
362 return;
363
364 hci_dev_lock(hdev);
365
366 if (status == 0)
367 memcpy(hdev->dev_class, sent, 3);
368
369 if (test_bit(HCI_MGMT, &hdev->dev_flags))
370 mgmt_set_class_of_dev_complete(hdev, sent, status);
371
372 hci_dev_unlock(hdev);
373 }
374
375 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
376 {
377 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
378 __u16 setting;
379
380 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
381
382 if (rp->status)
383 return;
384
385 setting = __le16_to_cpu(rp->voice_setting);
386
387 if (hdev->voice_setting == setting)
388 return;
389
390 hdev->voice_setting = setting;
391
392 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
393
394 if (hdev->notify)
395 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
396 }
397
398 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
399 struct sk_buff *skb)
400 {
401 __u8 status = *((__u8 *) skb->data);
402 __u16 setting;
403 void *sent;
404
405 BT_DBG("%s status 0x%2.2x", hdev->name, status);
406
407 if (status)
408 return;
409
410 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
411 if (!sent)
412 return;
413
414 setting = get_unaligned_le16(sent);
415
416 if (hdev->voice_setting == setting)
417 return;
418
419 hdev->voice_setting = setting;
420
421 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
422
423 if (hdev->notify)
424 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
425 }
426
427 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
428 struct sk_buff *skb)
429 {
430 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
431
432 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
433
434 if (rp->status)
435 return;
436
437 hdev->num_iac = rp->num_iac;
438
439 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
440 }
441
442 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
443 {
444 __u8 status = *((__u8 *) skb->data);
445 struct hci_cp_write_ssp_mode *sent;
446
447 BT_DBG("%s status 0x%2.2x", hdev->name, status);
448
449 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
450 if (!sent)
451 return;
452
453 hci_dev_lock(hdev);
454
455 if (!status) {
456 if (sent->mode)
457 hdev->features[1][0] |= LMP_HOST_SSP;
458 else
459 hdev->features[1][0] &= ~LMP_HOST_SSP;
460 }
461
462 if (test_bit(HCI_MGMT, &hdev->dev_flags))
463 mgmt_ssp_enable_complete(hdev, sent->mode, status);
464 else if (!status) {
465 if (sent->mode)
466 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
467 else
468 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
469 }
470
471 hci_dev_unlock(hdev);
472 }
473
474 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
475 {
476 u8 status = *((u8 *) skb->data);
477 struct hci_cp_write_sc_support *sent;
478
479 BT_DBG("%s status 0x%2.2x", hdev->name, status);
480
481 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
482 if (!sent)
483 return;
484
485 hci_dev_lock(hdev);
486
487 if (!status) {
488 if (sent->support)
489 hdev->features[1][0] |= LMP_HOST_SC;
490 else
491 hdev->features[1][0] &= ~LMP_HOST_SC;
492 }
493
494 if (test_bit(HCI_MGMT, &hdev->dev_flags))
495 mgmt_sc_enable_complete(hdev, sent->support, status);
496 else if (!status) {
497 if (sent->support)
498 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
499 else
500 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
501 }
502
503 hci_dev_unlock(hdev);
504 }
505
506 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
507 {
508 struct hci_rp_read_local_version *rp = (void *) skb->data;
509
510 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
511
512 if (rp->status)
513 return;
514
515 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
516 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
517 hdev->hci_ver = rp->hci_ver;
518 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
519 hdev->lmp_ver = rp->lmp_ver;
520 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
521 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
522 }
523 }
524
525 static void hci_cc_read_local_commands(struct hci_dev *hdev,
526 struct sk_buff *skb)
527 {
528 struct hci_rp_read_local_commands *rp = (void *) skb->data;
529
530 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
531
532 if (rp->status)
533 return;
534
535 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
536 test_bit(HCI_CONFIG, &hdev->dev_flags))
537 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
538 }
539
540 static void hci_cc_read_local_features(struct hci_dev *hdev,
541 struct sk_buff *skb)
542 {
543 struct hci_rp_read_local_features *rp = (void *) skb->data;
544
545 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
546
547 if (rp->status)
548 return;
549
550 memcpy(hdev->features, rp->features, 8);
551
552 /* Adjust default settings according to features
553 * supported by device. */
554
555 if (hdev->features[0][0] & LMP_3SLOT)
556 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
557
558 if (hdev->features[0][0] & LMP_5SLOT)
559 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
560
561 if (hdev->features[0][1] & LMP_HV2) {
562 hdev->pkt_type |= (HCI_HV2);
563 hdev->esco_type |= (ESCO_HV2);
564 }
565
566 if (hdev->features[0][1] & LMP_HV3) {
567 hdev->pkt_type |= (HCI_HV3);
568 hdev->esco_type |= (ESCO_HV3);
569 }
570
571 if (lmp_esco_capable(hdev))
572 hdev->esco_type |= (ESCO_EV3);
573
574 if (hdev->features[0][4] & LMP_EV4)
575 hdev->esco_type |= (ESCO_EV4);
576
577 if (hdev->features[0][4] & LMP_EV5)
578 hdev->esco_type |= (ESCO_EV5);
579
580 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
581 hdev->esco_type |= (ESCO_2EV3);
582
583 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
584 hdev->esco_type |= (ESCO_3EV3);
585
586 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
587 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
588 }
589
590 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
591 struct sk_buff *skb)
592 {
593 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
594
595 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
596
597 if (rp->status)
598 return;
599
600 if (hdev->max_page < rp->max_page)
601 hdev->max_page = rp->max_page;
602
603 if (rp->page < HCI_MAX_PAGES)
604 memcpy(hdev->features[rp->page], rp->features, 8);
605 }
606
607 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
608 struct sk_buff *skb)
609 {
610 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
611
612 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
613
614 if (rp->status)
615 return;
616
617 hdev->flow_ctl_mode = rp->mode;
618 }
619
620 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
621 {
622 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
623
624 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
625
626 if (rp->status)
627 return;
628
629 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
630 hdev->sco_mtu = rp->sco_mtu;
631 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
632 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
633
634 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
635 hdev->sco_mtu = 64;
636 hdev->sco_pkts = 8;
637 }
638
639 hdev->acl_cnt = hdev->acl_pkts;
640 hdev->sco_cnt = hdev->sco_pkts;
641
642 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
643 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
644 }
645
646 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
647 {
648 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
649
650 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
651
652 if (rp->status)
653 return;
654
655 if (test_bit(HCI_INIT, &hdev->flags))
656 bacpy(&hdev->bdaddr, &rp->bdaddr);
657
658 if (test_bit(HCI_SETUP, &hdev->dev_flags))
659 bacpy(&hdev->setup_addr, &rp->bdaddr);
660 }
661
662 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
663 struct sk_buff *skb)
664 {
665 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
666
667 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
668
669 if (rp->status)
670 return;
671
672 if (test_bit(HCI_INIT, &hdev->flags)) {
673 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
674 hdev->page_scan_window = __le16_to_cpu(rp->window);
675 }
676 }
677
678 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
679 struct sk_buff *skb)
680 {
681 u8 status = *((u8 *) skb->data);
682 struct hci_cp_write_page_scan_activity *sent;
683
684 BT_DBG("%s status 0x%2.2x", hdev->name, status);
685
686 if (status)
687 return;
688
689 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
690 if (!sent)
691 return;
692
693 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
694 hdev->page_scan_window = __le16_to_cpu(sent->window);
695 }
696
697 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
698 struct sk_buff *skb)
699 {
700 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
701
702 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
703
704 if (rp->status)
705 return;
706
707 if (test_bit(HCI_INIT, &hdev->flags))
708 hdev->page_scan_type = rp->type;
709 }
710
711 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
712 struct sk_buff *skb)
713 {
714 u8 status = *((u8 *) skb->data);
715 u8 *type;
716
717 BT_DBG("%s status 0x%2.2x", hdev->name, status);
718
719 if (status)
720 return;
721
722 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
723 if (type)
724 hdev->page_scan_type = *type;
725 }
726
727 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
728 struct sk_buff *skb)
729 {
730 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
731
732 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
733
734 if (rp->status)
735 return;
736
737 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
738 hdev->block_len = __le16_to_cpu(rp->block_len);
739 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
740
741 hdev->block_cnt = hdev->num_blocks;
742
743 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
744 hdev->block_cnt, hdev->block_len);
745 }
746
747 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
748 {
749 struct hci_rp_read_clock *rp = (void *) skb->data;
750 struct hci_cp_read_clock *cp;
751 struct hci_conn *conn;
752
753 BT_DBG("%s", hdev->name);
754
755 if (skb->len < sizeof(*rp))
756 return;
757
758 if (rp->status)
759 return;
760
761 hci_dev_lock(hdev);
762
763 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
764 if (!cp)
765 goto unlock;
766
767 if (cp->which == 0x00) {
768 hdev->clock = le32_to_cpu(rp->clock);
769 goto unlock;
770 }
771
772 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
773 if (conn) {
774 conn->clock = le32_to_cpu(rp->clock);
775 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
776 }
777
778 unlock:
779 hci_dev_unlock(hdev);
780 }
781
782 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
783 struct sk_buff *skb)
784 {
785 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
786
787 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
788
789 if (rp->status)
790 goto a2mp_rsp;
791
792 hdev->amp_status = rp->amp_status;
793 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
794 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
795 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
796 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
797 hdev->amp_type = rp->amp_type;
798 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
799 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
800 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
801 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
802
803 a2mp_rsp:
804 a2mp_send_getinfo_rsp(hdev);
805 }
806
807 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
808 struct sk_buff *skb)
809 {
810 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
811 struct amp_assoc *assoc = &hdev->loc_assoc;
812 size_t rem_len, frag_len;
813
814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815
816 if (rp->status)
817 goto a2mp_rsp;
818
819 frag_len = skb->len - sizeof(*rp);
820 rem_len = __le16_to_cpu(rp->rem_len);
821
822 if (rem_len > frag_len) {
823 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
824
825 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
826 assoc->offset += frag_len;
827
828 /* Read other fragments */
829 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
830
831 return;
832 }
833
834 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
835 assoc->len = assoc->offset + rem_len;
836 assoc->offset = 0;
837
838 a2mp_rsp:
839 /* Send A2MP Rsp when all fragments are received */
840 a2mp_send_getampassoc_rsp(hdev, rp->status);
841 a2mp_send_create_phy_link_req(hdev, rp->status);
842 }
843
844 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
845 struct sk_buff *skb)
846 {
847 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
848
849 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
850
851 if (rp->status)
852 return;
853
854 hdev->inq_tx_power = rp->tx_power;
855 }
856
857 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
858 {
859 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
860 struct hci_cp_pin_code_reply *cp;
861 struct hci_conn *conn;
862
863 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
864
865 hci_dev_lock(hdev);
866
867 if (test_bit(HCI_MGMT, &hdev->dev_flags))
868 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
869
870 if (rp->status)
871 goto unlock;
872
873 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
874 if (!cp)
875 goto unlock;
876
877 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
878 if (conn)
879 conn->pin_length = cp->pin_len;
880
881 unlock:
882 hci_dev_unlock(hdev);
883 }
884
885 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
886 {
887 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
888
889 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
890
891 hci_dev_lock(hdev);
892
893 if (test_bit(HCI_MGMT, &hdev->dev_flags))
894 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
895 rp->status);
896
897 hci_dev_unlock(hdev);
898 }
899
900 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
901 struct sk_buff *skb)
902 {
903 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
904
905 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
906
907 if (rp->status)
908 return;
909
910 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
911 hdev->le_pkts = rp->le_max_pkt;
912
913 hdev->le_cnt = hdev->le_pkts;
914
915 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
916 }
917
918 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
919 struct sk_buff *skb)
920 {
921 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
922
923 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
924
925 if (rp->status)
926 return;
927
928 memcpy(hdev->le_features, rp->features, 8);
929 }
930
931 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
932 struct sk_buff *skb)
933 {
934 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
935
936 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
937
938 if (rp->status)
939 return;
940
941 hdev->adv_tx_power = rp->tx_power;
942 }
943
944 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
945 {
946 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
947
948 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
949
950 hci_dev_lock(hdev);
951
952 if (test_bit(HCI_MGMT, &hdev->dev_flags))
953 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
954 rp->status);
955
956 hci_dev_unlock(hdev);
957 }
958
959 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
960 struct sk_buff *skb)
961 {
962 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
963
964 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
965
966 hci_dev_lock(hdev);
967
968 if (test_bit(HCI_MGMT, &hdev->dev_flags))
969 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
970 ACL_LINK, 0, rp->status);
971
972 hci_dev_unlock(hdev);
973 }
974
975 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
976 {
977 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
978
979 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
980
981 hci_dev_lock(hdev);
982
983 if (test_bit(HCI_MGMT, &hdev->dev_flags))
984 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
985 0, rp->status);
986
987 hci_dev_unlock(hdev);
988 }
989
990 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
991 struct sk_buff *skb)
992 {
993 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
994
995 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
996
997 hci_dev_lock(hdev);
998
999 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1000 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1001 ACL_LINK, 0, rp->status);
1002
1003 hci_dev_unlock(hdev);
1004 }
1005
1006 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1007 struct sk_buff *skb)
1008 {
1009 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1010
1011 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1012
1013 hci_dev_lock(hdev);
1014 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1015 rp->status);
1016 hci_dev_unlock(hdev);
1017 }
1018
1019 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1020 struct sk_buff *skb)
1021 {
1022 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1023
1024 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1025
1026 hci_dev_lock(hdev);
1027 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1028 rp->hash256, rp->rand256,
1029 rp->status);
1030 hci_dev_unlock(hdev);
1031 }
1032
1033
1034 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1035 {
1036 __u8 status = *((__u8 *) skb->data);
1037 bdaddr_t *sent;
1038
1039 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1040
1041 if (status)
1042 return;
1043
1044 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1045 if (!sent)
1046 return;
1047
1048 hci_dev_lock(hdev);
1049
1050 bacpy(&hdev->random_addr, sent);
1051
1052 hci_dev_unlock(hdev);
1053 }
1054
1055 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1056 {
1057 __u8 *sent, status = *((__u8 *) skb->data);
1058
1059 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1060
1061 if (status)
1062 return;
1063
1064 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1065 if (!sent)
1066 return;
1067
1068 hci_dev_lock(hdev);
1069
1070 /* If we're doing connection initiation as peripheral. Set a
1071 * timeout in case something goes wrong.
1072 */
1073 if (*sent) {
1074 struct hci_conn *conn;
1075
1076 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1077
1078 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1079 if (conn)
1080 queue_delayed_work(hdev->workqueue,
1081 &conn->le_conn_timeout,
1082 conn->conn_timeout);
1083 } else {
1084 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1085 }
1086
1087 hci_dev_unlock(hdev);
1088 }
1089
1090 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1091 {
1092 struct hci_cp_le_set_scan_param *cp;
1093 __u8 status = *((__u8 *) skb->data);
1094
1095 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1096
1097 if (status)
1098 return;
1099
1100 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1101 if (!cp)
1102 return;
1103
1104 hci_dev_lock(hdev);
1105
1106 hdev->le_scan_type = cp->type;
1107
1108 hci_dev_unlock(hdev);
1109 }
1110
1111 static bool has_pending_adv_report(struct hci_dev *hdev)
1112 {
1113 struct discovery_state *d = &hdev->discovery;
1114
1115 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1116 }
1117
1118 static void clear_pending_adv_report(struct hci_dev *hdev)
1119 {
1120 struct discovery_state *d = &hdev->discovery;
1121
1122 bacpy(&d->last_adv_addr, BDADDR_ANY);
1123 d->last_adv_data_len = 0;
1124 }
1125
1126 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1127 u8 bdaddr_type, s8 rssi, u32 flags,
1128 u8 *data, u8 len)
1129 {
1130 struct discovery_state *d = &hdev->discovery;
1131
1132 bacpy(&d->last_adv_addr, bdaddr);
1133 d->last_adv_addr_type = bdaddr_type;
1134 d->last_adv_rssi = rssi;
1135 d->last_adv_flags = flags;
1136 memcpy(d->last_adv_data, data, len);
1137 d->last_adv_data_len = len;
1138 }
1139
1140 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1141 struct sk_buff *skb)
1142 {
1143 struct hci_cp_le_set_scan_enable *cp;
1144 __u8 status = *((__u8 *) skb->data);
1145
1146 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1147
1148 if (status)
1149 return;
1150
1151 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1152 if (!cp)
1153 return;
1154
1155 hci_dev_lock(hdev);
1156
1157 switch (cp->enable) {
1158 case LE_SCAN_ENABLE:
1159 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1160 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1161 clear_pending_adv_report(hdev);
1162 break;
1163
1164 case LE_SCAN_DISABLE:
1165 /* We do this here instead of when setting DISCOVERY_STOPPED
1166 * since the latter would potentially require waiting for
1167 * inquiry to stop too.
1168 */
1169 if (has_pending_adv_report(hdev)) {
1170 struct discovery_state *d = &hdev->discovery;
1171
1172 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1173 d->last_adv_addr_type, NULL,
1174 d->last_adv_rssi, d->last_adv_flags,
1175 d->last_adv_data,
1176 d->last_adv_data_len, NULL, 0);
1177 }
1178
1179 /* Cancel this timer so that we don't try to disable scanning
1180 * when it's already disabled.
1181 */
1182 cancel_delayed_work(&hdev->le_scan_disable);
1183
1184 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1185
1186 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1187 * interrupted scanning due to a connect request. Mark
1188 * therefore discovery as stopped. If this was not
1189 * because of a connect request advertising might have
1190 * been disabled because of active scanning, so
1191 * re-enable it again if necessary.
1192 */
1193 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1194 &hdev->dev_flags))
1195 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1196 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1197 hdev->discovery.state == DISCOVERY_FINDING)
1198 mgmt_reenable_advertising(hdev);
1199
1200 break;
1201
1202 default:
1203 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1204 break;
1205 }
1206
1207 hci_dev_unlock(hdev);
1208 }
1209
1210 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1211 struct sk_buff *skb)
1212 {
1213 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1214
1215 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1216
1217 if (rp->status)
1218 return;
1219
1220 hdev->le_white_list_size = rp->size;
1221 }
1222
1223 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1224 struct sk_buff *skb)
1225 {
1226 __u8 status = *((__u8 *) skb->data);
1227
1228 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1229
1230 if (status)
1231 return;
1232
1233 hci_bdaddr_list_clear(&hdev->le_white_list);
1234 }
1235
1236 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1237 struct sk_buff *skb)
1238 {
1239 struct hci_cp_le_add_to_white_list *sent;
1240 __u8 status = *((__u8 *) skb->data);
1241
1242 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1243
1244 if (status)
1245 return;
1246
1247 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1248 if (!sent)
1249 return;
1250
1251 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1252 sent->bdaddr_type);
1253 }
1254
1255 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1256 struct sk_buff *skb)
1257 {
1258 struct hci_cp_le_del_from_white_list *sent;
1259 __u8 status = *((__u8 *) skb->data);
1260
1261 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1262
1263 if (status)
1264 return;
1265
1266 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1267 if (!sent)
1268 return;
1269
1270 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1271 sent->bdaddr_type);
1272 }
1273
1274 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1275 struct sk_buff *skb)
1276 {
1277 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1278
1279 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1280
1281 if (rp->status)
1282 return;
1283
1284 memcpy(hdev->le_states, rp->le_states, 8);
1285 }
1286
1287 static void hci_cc_le_read_def_data_len(struct hci_dev *hdev,
1288 struct sk_buff *skb)
1289 {
1290 struct hci_rp_le_read_def_data_len *rp = (void *) skb->data;
1291
1292 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1293
1294 if (rp->status)
1295 return;
1296
1297 hdev->le_def_tx_len = le16_to_cpu(rp->tx_len);
1298 hdev->le_def_tx_time = le16_to_cpu(rp->tx_time);
1299 }
1300
1301 static void hci_cc_le_write_def_data_len(struct hci_dev *hdev,
1302 struct sk_buff *skb)
1303 {
1304 struct hci_cp_le_write_def_data_len *sent;
1305 __u8 status = *((__u8 *) skb->data);
1306
1307 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1308
1309 if (status)
1310 return;
1311
1312 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN);
1313 if (!sent)
1314 return;
1315
1316 hdev->le_def_tx_len = le16_to_cpu(sent->tx_len);
1317 hdev->le_def_tx_time = le16_to_cpu(sent->tx_time);
1318 }
1319
1320 static void hci_cc_le_read_max_data_len(struct hci_dev *hdev,
1321 struct sk_buff *skb)
1322 {
1323 struct hci_rp_le_read_max_data_len *rp = (void *) skb->data;
1324
1325 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1326
1327 if (rp->status)
1328 return;
1329
1330 hdev->le_max_tx_len = le16_to_cpu(rp->tx_len);
1331 hdev->le_max_tx_time = le16_to_cpu(rp->tx_time);
1332 hdev->le_max_rx_len = le16_to_cpu(rp->rx_len);
1333 hdev->le_max_rx_time = le16_to_cpu(rp->rx_time);
1334 }
1335
1336 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1337 struct sk_buff *skb)
1338 {
1339 struct hci_cp_write_le_host_supported *sent;
1340 __u8 status = *((__u8 *) skb->data);
1341
1342 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1343
1344 if (status)
1345 return;
1346
1347 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1348 if (!sent)
1349 return;
1350
1351 hci_dev_lock(hdev);
1352
1353 if (sent->le) {
1354 hdev->features[1][0] |= LMP_HOST_LE;
1355 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1356 } else {
1357 hdev->features[1][0] &= ~LMP_HOST_LE;
1358 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1359 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1360 }
1361
1362 if (sent->simul)
1363 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1364 else
1365 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1366
1367 hci_dev_unlock(hdev);
1368 }
1369
1370 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1371 {
1372 struct hci_cp_le_set_adv_param *cp;
1373 u8 status = *((u8 *) skb->data);
1374
1375 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1376
1377 if (status)
1378 return;
1379
1380 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1381 if (!cp)
1382 return;
1383
1384 hci_dev_lock(hdev);
1385 hdev->adv_addr_type = cp->own_address_type;
1386 hci_dev_unlock(hdev);
1387 }
1388
1389 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1390 struct sk_buff *skb)
1391 {
1392 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1393
1394 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1395 hdev->name, rp->status, rp->phy_handle);
1396
1397 if (rp->status)
1398 return;
1399
1400 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1401 }
1402
1403 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1404 {
1405 struct hci_rp_read_rssi *rp = (void *) skb->data;
1406 struct hci_conn *conn;
1407
1408 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1409
1410 if (rp->status)
1411 return;
1412
1413 hci_dev_lock(hdev);
1414
1415 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1416 if (conn)
1417 conn->rssi = rp->rssi;
1418
1419 hci_dev_unlock(hdev);
1420 }
1421
1422 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1423 {
1424 struct hci_cp_read_tx_power *sent;
1425 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1426 struct hci_conn *conn;
1427
1428 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1429
1430 if (rp->status)
1431 return;
1432
1433 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1434 if (!sent)
1435 return;
1436
1437 hci_dev_lock(hdev);
1438
1439 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1440 if (!conn)
1441 goto unlock;
1442
1443 switch (sent->type) {
1444 case 0x00:
1445 conn->tx_power = rp->tx_power;
1446 break;
1447 case 0x01:
1448 conn->max_tx_power = rp->tx_power;
1449 break;
1450 }
1451
1452 unlock:
1453 hci_dev_unlock(hdev);
1454 }
1455
1456 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1457 {
1458 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1459
1460 if (status) {
1461 hci_conn_check_pending(hdev);
1462 return;
1463 }
1464
1465 set_bit(HCI_INQUIRY, &hdev->flags);
1466 }
1467
1468 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1469 {
1470 struct hci_cp_create_conn *cp;
1471 struct hci_conn *conn;
1472
1473 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1474
1475 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1476 if (!cp)
1477 return;
1478
1479 hci_dev_lock(hdev);
1480
1481 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1482
1483 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1484
1485 if (status) {
1486 if (conn && conn->state == BT_CONNECT) {
1487 if (status != 0x0c || conn->attempt > 2) {
1488 conn->state = BT_CLOSED;
1489 hci_proto_connect_cfm(conn, status);
1490 hci_conn_del(conn);
1491 } else
1492 conn->state = BT_CONNECT2;
1493 }
1494 } else {
1495 if (!conn) {
1496 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1497 HCI_ROLE_MASTER);
1498 if (!conn)
1499 BT_ERR("No memory for new connection");
1500 }
1501 }
1502
1503 hci_dev_unlock(hdev);
1504 }
1505
1506 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1507 {
1508 struct hci_cp_add_sco *cp;
1509 struct hci_conn *acl, *sco;
1510 __u16 handle;
1511
1512 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1513
1514 if (!status)
1515 return;
1516
1517 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1518 if (!cp)
1519 return;
1520
1521 handle = __le16_to_cpu(cp->handle);
1522
1523 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1524
1525 hci_dev_lock(hdev);
1526
1527 acl = hci_conn_hash_lookup_handle(hdev, handle);
1528 if (acl) {
1529 sco = acl->link;
1530 if (sco) {
1531 sco->state = BT_CLOSED;
1532
1533 hci_proto_connect_cfm(sco, status);
1534 hci_conn_del(sco);
1535 }
1536 }
1537
1538 hci_dev_unlock(hdev);
1539 }
1540
1541 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1542 {
1543 struct hci_cp_auth_requested *cp;
1544 struct hci_conn *conn;
1545
1546 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1547
1548 if (!status)
1549 return;
1550
1551 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1552 if (!cp)
1553 return;
1554
1555 hci_dev_lock(hdev);
1556
1557 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1558 if (conn) {
1559 if (conn->state == BT_CONFIG) {
1560 hci_proto_connect_cfm(conn, status);
1561 hci_conn_drop(conn);
1562 }
1563 }
1564
1565 hci_dev_unlock(hdev);
1566 }
1567
1568 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1569 {
1570 struct hci_cp_set_conn_encrypt *cp;
1571 struct hci_conn *conn;
1572
1573 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1574
1575 if (!status)
1576 return;
1577
1578 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1579 if (!cp)
1580 return;
1581
1582 hci_dev_lock(hdev);
1583
1584 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1585 if (conn) {
1586 if (conn->state == BT_CONFIG) {
1587 hci_proto_connect_cfm(conn, status);
1588 hci_conn_drop(conn);
1589 }
1590 }
1591
1592 hci_dev_unlock(hdev);
1593 }
1594
1595 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1596 struct hci_conn *conn)
1597 {
1598 if (conn->state != BT_CONFIG || !conn->out)
1599 return 0;
1600
1601 if (conn->pending_sec_level == BT_SECURITY_SDP)
1602 return 0;
1603
1604 /* Only request authentication for SSP connections or non-SSP
1605 * devices with sec_level MEDIUM or HIGH or if MITM protection
1606 * is requested.
1607 */
1608 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1609 conn->pending_sec_level != BT_SECURITY_FIPS &&
1610 conn->pending_sec_level != BT_SECURITY_HIGH &&
1611 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1612 return 0;
1613
1614 return 1;
1615 }
1616
1617 static int hci_resolve_name(struct hci_dev *hdev,
1618 struct inquiry_entry *e)
1619 {
1620 struct hci_cp_remote_name_req cp;
1621
1622 memset(&cp, 0, sizeof(cp));
1623
1624 bacpy(&cp.bdaddr, &e->data.bdaddr);
1625 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1626 cp.pscan_mode = e->data.pscan_mode;
1627 cp.clock_offset = e->data.clock_offset;
1628
1629 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1630 }
1631
1632 static bool hci_resolve_next_name(struct hci_dev *hdev)
1633 {
1634 struct discovery_state *discov = &hdev->discovery;
1635 struct inquiry_entry *e;
1636
1637 if (list_empty(&discov->resolve))
1638 return false;
1639
1640 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1641 if (!e)
1642 return false;
1643
1644 if (hci_resolve_name(hdev, e) == 0) {
1645 e->name_state = NAME_PENDING;
1646 return true;
1647 }
1648
1649 return false;
1650 }
1651
1652 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1653 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1654 {
1655 struct discovery_state *discov = &hdev->discovery;
1656 struct inquiry_entry *e;
1657
1658 /* Update the mgmt connected state if necessary. Be careful with
1659 * conn objects that exist but are not (yet) connected however.
1660 * Only those in BT_CONFIG or BT_CONNECTED states can be
1661 * considered connected.
1662 */
1663 if (conn &&
1664 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1665 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1666 mgmt_device_connected(hdev, conn, 0, name, name_len);
1667
1668 if (discov->state == DISCOVERY_STOPPED)
1669 return;
1670
1671 if (discov->state == DISCOVERY_STOPPING)
1672 goto discov_complete;
1673
1674 if (discov->state != DISCOVERY_RESOLVING)
1675 return;
1676
1677 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1678 /* If the device was not found in a list of found devices names of which
1679 * are pending. there is no need to continue resolving a next name as it
1680 * will be done upon receiving another Remote Name Request Complete
1681 * Event */
1682 if (!e)
1683 return;
1684
1685 list_del(&e->list);
1686 if (name) {
1687 e->name_state = NAME_KNOWN;
1688 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1689 e->data.rssi, name, name_len);
1690 } else {
1691 e->name_state = NAME_NOT_KNOWN;
1692 }
1693
1694 if (hci_resolve_next_name(hdev))
1695 return;
1696
1697 discov_complete:
1698 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1699 }
1700
1701 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1702 {
1703 struct hci_cp_remote_name_req *cp;
1704 struct hci_conn *conn;
1705
1706 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1707
1708 /* If successful wait for the name req complete event before
1709 * checking for the need to do authentication */
1710 if (!status)
1711 return;
1712
1713 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1714 if (!cp)
1715 return;
1716
1717 hci_dev_lock(hdev);
1718
1719 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1720
1721 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1722 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1723
1724 if (!conn)
1725 goto unlock;
1726
1727 if (!hci_outgoing_auth_needed(hdev, conn))
1728 goto unlock;
1729
1730 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1731 struct hci_cp_auth_requested auth_cp;
1732
1733 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1734
1735 auth_cp.handle = __cpu_to_le16(conn->handle);
1736 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1737 sizeof(auth_cp), &auth_cp);
1738 }
1739
1740 unlock:
1741 hci_dev_unlock(hdev);
1742 }
1743
1744 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1745 {
1746 struct hci_cp_read_remote_features *cp;
1747 struct hci_conn *conn;
1748
1749 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1750
1751 if (!status)
1752 return;
1753
1754 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1755 if (!cp)
1756 return;
1757
1758 hci_dev_lock(hdev);
1759
1760 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1761 if (conn) {
1762 if (conn->state == BT_CONFIG) {
1763 hci_proto_connect_cfm(conn, status);
1764 hci_conn_drop(conn);
1765 }
1766 }
1767
1768 hci_dev_unlock(hdev);
1769 }
1770
1771 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1772 {
1773 struct hci_cp_read_remote_ext_features *cp;
1774 struct hci_conn *conn;
1775
1776 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1777
1778 if (!status)
1779 return;
1780
1781 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1782 if (!cp)
1783 return;
1784
1785 hci_dev_lock(hdev);
1786
1787 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1788 if (conn) {
1789 if (conn->state == BT_CONFIG) {
1790 hci_proto_connect_cfm(conn, status);
1791 hci_conn_drop(conn);
1792 }
1793 }
1794
1795 hci_dev_unlock(hdev);
1796 }
1797
1798 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1799 {
1800 struct hci_cp_setup_sync_conn *cp;
1801 struct hci_conn *acl, *sco;
1802 __u16 handle;
1803
1804 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1805
1806 if (!status)
1807 return;
1808
1809 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1810 if (!cp)
1811 return;
1812
1813 handle = __le16_to_cpu(cp->handle);
1814
1815 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1816
1817 hci_dev_lock(hdev);
1818
1819 acl = hci_conn_hash_lookup_handle(hdev, handle);
1820 if (acl) {
1821 sco = acl->link;
1822 if (sco) {
1823 sco->state = BT_CLOSED;
1824
1825 hci_proto_connect_cfm(sco, status);
1826 hci_conn_del(sco);
1827 }
1828 }
1829
1830 hci_dev_unlock(hdev);
1831 }
1832
1833 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1834 {
1835 struct hci_cp_sniff_mode *cp;
1836 struct hci_conn *conn;
1837
1838 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1839
1840 if (!status)
1841 return;
1842
1843 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1844 if (!cp)
1845 return;
1846
1847 hci_dev_lock(hdev);
1848
1849 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1850 if (conn) {
1851 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1852
1853 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1854 hci_sco_setup(conn, status);
1855 }
1856
1857 hci_dev_unlock(hdev);
1858 }
1859
1860 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1861 {
1862 struct hci_cp_exit_sniff_mode *cp;
1863 struct hci_conn *conn;
1864
1865 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1866
1867 if (!status)
1868 return;
1869
1870 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1871 if (!cp)
1872 return;
1873
1874 hci_dev_lock(hdev);
1875
1876 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1877 if (conn) {
1878 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1879
1880 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1881 hci_sco_setup(conn, status);
1882 }
1883
1884 hci_dev_unlock(hdev);
1885 }
1886
1887 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1888 {
1889 struct hci_cp_disconnect *cp;
1890 struct hci_conn *conn;
1891
1892 if (!status)
1893 return;
1894
1895 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1896 if (!cp)
1897 return;
1898
1899 hci_dev_lock(hdev);
1900
1901 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1902 if (conn)
1903 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1904 conn->dst_type, status);
1905
1906 hci_dev_unlock(hdev);
1907 }
1908
1909 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1910 {
1911 struct hci_cp_create_phy_link *cp;
1912
1913 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1914
1915 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1916 if (!cp)
1917 return;
1918
1919 hci_dev_lock(hdev);
1920
1921 if (status) {
1922 struct hci_conn *hcon;
1923
1924 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1925 if (hcon)
1926 hci_conn_del(hcon);
1927 } else {
1928 amp_write_remote_assoc(hdev, cp->phy_handle);
1929 }
1930
1931 hci_dev_unlock(hdev);
1932 }
1933
1934 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1935 {
1936 struct hci_cp_accept_phy_link *cp;
1937
1938 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1939
1940 if (status)
1941 return;
1942
1943 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1944 if (!cp)
1945 return;
1946
1947 amp_write_remote_assoc(hdev, cp->phy_handle);
1948 }
1949
1950 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1951 {
1952 struct hci_cp_le_create_conn *cp;
1953 struct hci_conn *conn;
1954
1955 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1956
1957 /* All connection failure handling is taken care of by the
1958 * hci_le_conn_failed function which is triggered by the HCI
1959 * request completion callbacks used for connecting.
1960 */
1961 if (status)
1962 return;
1963
1964 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1965 if (!cp)
1966 return;
1967
1968 hci_dev_lock(hdev);
1969
1970 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1971 if (!conn)
1972 goto unlock;
1973
1974 /* Store the initiator and responder address information which
1975 * is needed for SMP. These values will not change during the
1976 * lifetime of the connection.
1977 */
1978 conn->init_addr_type = cp->own_address_type;
1979 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1980 bacpy(&conn->init_addr, &hdev->random_addr);
1981 else
1982 bacpy(&conn->init_addr, &hdev->bdaddr);
1983
1984 conn->resp_addr_type = cp->peer_addr_type;
1985 bacpy(&conn->resp_addr, &cp->peer_addr);
1986
1987 /* We don't want the connection attempt to stick around
1988 * indefinitely since LE doesn't have a page timeout concept
1989 * like BR/EDR. Set a timer for any connection that doesn't use
1990 * the white list for connecting.
1991 */
1992 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1993 queue_delayed_work(conn->hdev->workqueue,
1994 &conn->le_conn_timeout,
1995 conn->conn_timeout);
1996
1997 unlock:
1998 hci_dev_unlock(hdev);
1999 }
2000
2001 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
2002 {
2003 struct hci_cp_le_start_enc *cp;
2004 struct hci_conn *conn;
2005
2006 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2007
2008 if (!status)
2009 return;
2010
2011 hci_dev_lock(hdev);
2012
2013 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
2014 if (!cp)
2015 goto unlock;
2016
2017 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
2018 if (!conn)
2019 goto unlock;
2020
2021 if (conn->state != BT_CONNECTED)
2022 goto unlock;
2023
2024 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2025 hci_conn_drop(conn);
2026
2027 unlock:
2028 hci_dev_unlock(hdev);
2029 }
2030
2031 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
2032 {
2033 struct hci_cp_switch_role *cp;
2034 struct hci_conn *conn;
2035
2036 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2037
2038 if (!status)
2039 return;
2040
2041 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
2042 if (!cp)
2043 return;
2044
2045 hci_dev_lock(hdev);
2046
2047 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
2048 if (conn)
2049 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2050
2051 hci_dev_unlock(hdev);
2052 }
2053
2054 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2055 {
2056 __u8 status = *((__u8 *) skb->data);
2057 struct discovery_state *discov = &hdev->discovery;
2058 struct inquiry_entry *e;
2059
2060 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2061
2062 hci_conn_check_pending(hdev);
2063
2064 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2065 return;
2066
2067 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2068 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2069
2070 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2071 return;
2072
2073 hci_dev_lock(hdev);
2074
2075 if (discov->state != DISCOVERY_FINDING)
2076 goto unlock;
2077
2078 if (list_empty(&discov->resolve)) {
2079 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2080 goto unlock;
2081 }
2082
2083 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2084 if (e && hci_resolve_name(hdev, e) == 0) {
2085 e->name_state = NAME_PENDING;
2086 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2087 } else {
2088 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2089 }
2090
2091 unlock:
2092 hci_dev_unlock(hdev);
2093 }
2094
2095 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2096 {
2097 struct inquiry_data data;
2098 struct inquiry_info *info = (void *) (skb->data + 1);
2099 int num_rsp = *((__u8 *) skb->data);
2100
2101 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2102
2103 if (!num_rsp)
2104 return;
2105
2106 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2107 return;
2108
2109 hci_dev_lock(hdev);
2110
2111 for (; num_rsp; num_rsp--, info++) {
2112 u32 flags;
2113
2114 bacpy(&data.bdaddr, &info->bdaddr);
2115 data.pscan_rep_mode = info->pscan_rep_mode;
2116 data.pscan_period_mode = info->pscan_period_mode;
2117 data.pscan_mode = info->pscan_mode;
2118 memcpy(data.dev_class, info->dev_class, 3);
2119 data.clock_offset = info->clock_offset;
2120 data.rssi = HCI_RSSI_INVALID;
2121 data.ssp_mode = 0x00;
2122
2123 flags = hci_inquiry_cache_update(hdev, &data, false);
2124
2125 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2126 info->dev_class, HCI_RSSI_INVALID,
2127 flags, NULL, 0, NULL, 0);
2128 }
2129
2130 hci_dev_unlock(hdev);
2131 }
2132
2133 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2134 {
2135 struct hci_ev_conn_complete *ev = (void *) skb->data;
2136 struct hci_conn *conn;
2137
2138 BT_DBG("%s", hdev->name);
2139
2140 hci_dev_lock(hdev);
2141
2142 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2143 if (!conn) {
2144 if (ev->link_type != SCO_LINK)
2145 goto unlock;
2146
2147 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2148 if (!conn)
2149 goto unlock;
2150
2151 conn->type = SCO_LINK;
2152 }
2153
2154 if (!ev->status) {
2155 conn->handle = __le16_to_cpu(ev->handle);
2156
2157 if (conn->type == ACL_LINK) {
2158 conn->state = BT_CONFIG;
2159 hci_conn_hold(conn);
2160
2161 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2162 !hci_find_link_key(hdev, &ev->bdaddr))
2163 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2164 else
2165 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2166 } else
2167 conn->state = BT_CONNECTED;
2168
2169 hci_debugfs_create_conn(conn);
2170 hci_conn_add_sysfs(conn);
2171
2172 if (test_bit(HCI_AUTH, &hdev->flags))
2173 set_bit(HCI_CONN_AUTH, &conn->flags);
2174
2175 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2176 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2177
2178 /* Get remote features */
2179 if (conn->type == ACL_LINK) {
2180 struct hci_cp_read_remote_features cp;
2181 cp.handle = ev->handle;
2182 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2183 sizeof(cp), &cp);
2184
2185 hci_update_page_scan(hdev);
2186 }
2187
2188 /* Set packet type for incoming connection */
2189 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2190 struct hci_cp_change_conn_ptype cp;
2191 cp.handle = ev->handle;
2192 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2193 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2194 &cp);
2195 }
2196 } else {
2197 conn->state = BT_CLOSED;
2198 if (conn->type == ACL_LINK)
2199 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2200 conn->dst_type, ev->status);
2201 }
2202
2203 if (conn->type == ACL_LINK)
2204 hci_sco_setup(conn, ev->status);
2205
2206 if (ev->status) {
2207 hci_proto_connect_cfm(conn, ev->status);
2208 hci_conn_del(conn);
2209 } else if (ev->link_type != ACL_LINK)
2210 hci_proto_connect_cfm(conn, ev->status);
2211
2212 unlock:
2213 hci_dev_unlock(hdev);
2214
2215 hci_conn_check_pending(hdev);
2216 }
2217
2218 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2219 {
2220 struct hci_cp_reject_conn_req cp;
2221
2222 bacpy(&cp.bdaddr, bdaddr);
2223 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2224 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2225 }
2226
2227 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2228 {
2229 struct hci_ev_conn_request *ev = (void *) skb->data;
2230 int mask = hdev->link_mode;
2231 struct inquiry_entry *ie;
2232 struct hci_conn *conn;
2233 __u8 flags = 0;
2234
2235 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2236 ev->link_type);
2237
2238 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2239 &flags);
2240
2241 if (!(mask & HCI_LM_ACCEPT)) {
2242 hci_reject_conn(hdev, &ev->bdaddr);
2243 return;
2244 }
2245
2246 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2247 BDADDR_BREDR)) {
2248 hci_reject_conn(hdev, &ev->bdaddr);
2249 return;
2250 }
2251
2252 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2253 * connection. These features are only touched through mgmt so
2254 * only do the checks if HCI_MGMT is set.
2255 */
2256 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2257 !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2258 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2259 BDADDR_BREDR)) {
2260 hci_reject_conn(hdev, &ev->bdaddr);
2261 return;
2262 }
2263
2264 /* Connection accepted */
2265
2266 hci_dev_lock(hdev);
2267
2268 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2269 if (ie)
2270 memcpy(ie->data.dev_class, ev->dev_class, 3);
2271
2272 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2273 &ev->bdaddr);
2274 if (!conn) {
2275 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2276 HCI_ROLE_SLAVE);
2277 if (!conn) {
2278 BT_ERR("No memory for new connection");
2279 hci_dev_unlock(hdev);
2280 return;
2281 }
2282 }
2283
2284 memcpy(conn->dev_class, ev->dev_class, 3);
2285
2286 hci_dev_unlock(hdev);
2287
2288 if (ev->link_type == ACL_LINK ||
2289 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2290 struct hci_cp_accept_conn_req cp;
2291 conn->state = BT_CONNECT;
2292
2293 bacpy(&cp.bdaddr, &ev->bdaddr);
2294
2295 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2296 cp.role = 0x00; /* Become master */
2297 else
2298 cp.role = 0x01; /* Remain slave */
2299
2300 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2301 } else if (!(flags & HCI_PROTO_DEFER)) {
2302 struct hci_cp_accept_sync_conn_req cp;
2303 conn->state = BT_CONNECT;
2304
2305 bacpy(&cp.bdaddr, &ev->bdaddr);
2306 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2307
2308 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2309 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2310 cp.max_latency = cpu_to_le16(0xffff);
2311 cp.content_format = cpu_to_le16(hdev->voice_setting);
2312 cp.retrans_effort = 0xff;
2313
2314 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2315 &cp);
2316 } else {
2317 conn->state = BT_CONNECT2;
2318 hci_proto_connect_cfm(conn, 0);
2319 }
2320 }
2321
2322 static u8 hci_to_mgmt_reason(u8 err)
2323 {
2324 switch (err) {
2325 case HCI_ERROR_CONNECTION_TIMEOUT:
2326 return MGMT_DEV_DISCONN_TIMEOUT;
2327 case HCI_ERROR_REMOTE_USER_TERM:
2328 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2329 case HCI_ERROR_REMOTE_POWER_OFF:
2330 return MGMT_DEV_DISCONN_REMOTE;
2331 case HCI_ERROR_LOCAL_HOST_TERM:
2332 return MGMT_DEV_DISCONN_LOCAL_HOST;
2333 default:
2334 return MGMT_DEV_DISCONN_UNKNOWN;
2335 }
2336 }
2337
2338 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2339 {
2340 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2341 u8 reason = hci_to_mgmt_reason(ev->reason);
2342 struct hci_conn_params *params;
2343 struct hci_conn *conn;
2344 bool mgmt_connected;
2345 u8 type;
2346
2347 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2348
2349 hci_dev_lock(hdev);
2350
2351 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2352 if (!conn)
2353 goto unlock;
2354
2355 if (ev->status) {
2356 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2357 conn->dst_type, ev->status);
2358 goto unlock;
2359 }
2360
2361 conn->state = BT_CLOSED;
2362
2363 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2364 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2365 reason, mgmt_connected);
2366
2367 if (conn->type == ACL_LINK) {
2368 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2369 hci_remove_link_key(hdev, &conn->dst);
2370
2371 hci_update_page_scan(hdev);
2372 }
2373
2374 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2375 if (params) {
2376 switch (params->auto_connect) {
2377 case HCI_AUTO_CONN_LINK_LOSS:
2378 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2379 break;
2380 /* Fall through */
2381
2382 case HCI_AUTO_CONN_DIRECT:
2383 case HCI_AUTO_CONN_ALWAYS:
2384 list_del_init(&params->action);
2385 list_add(&params->action, &hdev->pend_le_conns);
2386 hci_update_background_scan(hdev);
2387 break;
2388
2389 default:
2390 break;
2391 }
2392 }
2393
2394 type = conn->type;
2395
2396 hci_proto_disconn_cfm(conn, ev->reason);
2397 hci_conn_del(conn);
2398
2399 /* Re-enable advertising if necessary, since it might
2400 * have been disabled by the connection. From the
2401 * HCI_LE_Set_Advertise_Enable command description in
2402 * the core specification (v4.0):
2403 * "The Controller shall continue advertising until the Host
2404 * issues an LE_Set_Advertise_Enable command with
2405 * Advertising_Enable set to 0x00 (Advertising is disabled)
2406 * or until a connection is created or until the Advertising
2407 * is timed out due to Directed Advertising."
2408 */
2409 if (type == LE_LINK)
2410 mgmt_reenable_advertising(hdev);
2411
2412 unlock:
2413 hci_dev_unlock(hdev);
2414 }
2415
2416 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2417 {
2418 struct hci_ev_auth_complete *ev = (void *) skb->data;
2419 struct hci_conn *conn;
2420
2421 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2422
2423 hci_dev_lock(hdev);
2424
2425 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2426 if (!conn)
2427 goto unlock;
2428
2429 if (!ev->status) {
2430 if (!hci_conn_ssp_enabled(conn) &&
2431 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2432 BT_INFO("re-auth of legacy device is not possible.");
2433 } else {
2434 set_bit(HCI_CONN_AUTH, &conn->flags);
2435 conn->sec_level = conn->pending_sec_level;
2436 }
2437 } else {
2438 mgmt_auth_failed(conn, ev->status);
2439 }
2440
2441 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2442 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2443
2444 if (conn->state == BT_CONFIG) {
2445 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2446 struct hci_cp_set_conn_encrypt cp;
2447 cp.handle = ev->handle;
2448 cp.encrypt = 0x01;
2449 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2450 &cp);
2451 } else {
2452 conn->state = BT_CONNECTED;
2453 hci_proto_connect_cfm(conn, ev->status);
2454 hci_conn_drop(conn);
2455 }
2456 } else {
2457 hci_auth_cfm(conn, ev->status);
2458
2459 hci_conn_hold(conn);
2460 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2461 hci_conn_drop(conn);
2462 }
2463
2464 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2465 if (!ev->status) {
2466 struct hci_cp_set_conn_encrypt cp;
2467 cp.handle = ev->handle;
2468 cp.encrypt = 0x01;
2469 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2470 &cp);
2471 } else {
2472 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2473 hci_encrypt_cfm(conn, ev->status, 0x00);
2474 }
2475 }
2476
2477 unlock:
2478 hci_dev_unlock(hdev);
2479 }
2480
2481 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2482 {
2483 struct hci_ev_remote_name *ev = (void *) skb->data;
2484 struct hci_conn *conn;
2485
2486 BT_DBG("%s", hdev->name);
2487
2488 hci_conn_check_pending(hdev);
2489
2490 hci_dev_lock(hdev);
2491
2492 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2493
2494 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2495 goto check_auth;
2496
2497 if (ev->status == 0)
2498 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2499 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2500 else
2501 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2502
2503 check_auth:
2504 if (!conn)
2505 goto unlock;
2506
2507 if (!hci_outgoing_auth_needed(hdev, conn))
2508 goto unlock;
2509
2510 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2511 struct hci_cp_auth_requested cp;
2512
2513 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2514
2515 cp.handle = __cpu_to_le16(conn->handle);
2516 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2517 }
2518
2519 unlock:
2520 hci_dev_unlock(hdev);
2521 }
2522
2523 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2524 {
2525 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2526 struct hci_conn *conn;
2527
2528 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2529
2530 hci_dev_lock(hdev);
2531
2532 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2533 if (!conn)
2534 goto unlock;
2535
2536 if (!ev->status) {
2537 if (ev->encrypt) {
2538 /* Encryption implies authentication */
2539 set_bit(HCI_CONN_AUTH, &conn->flags);
2540 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2541 conn->sec_level = conn->pending_sec_level;
2542
2543 /* P-256 authentication key implies FIPS */
2544 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2545 set_bit(HCI_CONN_FIPS, &conn->flags);
2546
2547 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2548 conn->type == LE_LINK)
2549 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2550 } else {
2551 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2552 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2553 }
2554 }
2555
2556 /* We should disregard the current RPA and generate a new one
2557 * whenever the encryption procedure fails.
2558 */
2559 if (ev->status && conn->type == LE_LINK)
2560 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2561
2562 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2563
2564 if (ev->status && conn->state == BT_CONNECTED) {
2565 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2566 hci_conn_drop(conn);
2567 goto unlock;
2568 }
2569
2570 if (conn->state == BT_CONFIG) {
2571 if (!ev->status)
2572 conn->state = BT_CONNECTED;
2573
2574 /* In Secure Connections Only mode, do not allow any
2575 * connections that are not encrypted with AES-CCM
2576 * using a P-256 authenticated combination key.
2577 */
2578 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2579 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2580 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2581 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2582 hci_conn_drop(conn);
2583 goto unlock;
2584 }
2585
2586 hci_proto_connect_cfm(conn, ev->status);
2587 hci_conn_drop(conn);
2588 } else
2589 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2590
2591 unlock:
2592 hci_dev_unlock(hdev);
2593 }
2594
2595 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2596 struct sk_buff *skb)
2597 {
2598 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2599 struct hci_conn *conn;
2600
2601 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2602
2603 hci_dev_lock(hdev);
2604
2605 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2606 if (conn) {
2607 if (!ev->status)
2608 set_bit(HCI_CONN_SECURE, &conn->flags);
2609
2610 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2611
2612 hci_key_change_cfm(conn, ev->status);
2613 }
2614
2615 hci_dev_unlock(hdev);
2616 }
2617
2618 static void hci_remote_features_evt(struct hci_dev *hdev,
2619 struct sk_buff *skb)
2620 {
2621 struct hci_ev_remote_features *ev = (void *) skb->data;
2622 struct hci_conn *conn;
2623
2624 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2625
2626 hci_dev_lock(hdev);
2627
2628 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2629 if (!conn)
2630 goto unlock;
2631
2632 if (!ev->status)
2633 memcpy(conn->features[0], ev->features, 8);
2634
2635 if (conn->state != BT_CONFIG)
2636 goto unlock;
2637
2638 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2639 struct hci_cp_read_remote_ext_features cp;
2640 cp.handle = ev->handle;
2641 cp.page = 0x01;
2642 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2643 sizeof(cp), &cp);
2644 goto unlock;
2645 }
2646
2647 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2648 struct hci_cp_remote_name_req cp;
2649 memset(&cp, 0, sizeof(cp));
2650 bacpy(&cp.bdaddr, &conn->dst);
2651 cp.pscan_rep_mode = 0x02;
2652 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2653 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2654 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2655
2656 if (!hci_outgoing_auth_needed(hdev, conn)) {
2657 conn->state = BT_CONNECTED;
2658 hci_proto_connect_cfm(conn, ev->status);
2659 hci_conn_drop(conn);
2660 }
2661
2662 unlock:
2663 hci_dev_unlock(hdev);
2664 }
2665
2666 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2667 {
2668 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2669 u8 status = skb->data[sizeof(*ev)];
2670 __u16 opcode;
2671
2672 skb_pull(skb, sizeof(*ev));
2673
2674 opcode = __le16_to_cpu(ev->opcode);
2675
2676 switch (opcode) {
2677 case HCI_OP_INQUIRY_CANCEL:
2678 hci_cc_inquiry_cancel(hdev, skb);
2679 break;
2680
2681 case HCI_OP_PERIODIC_INQ:
2682 hci_cc_periodic_inq(hdev, skb);
2683 break;
2684
2685 case HCI_OP_EXIT_PERIODIC_INQ:
2686 hci_cc_exit_periodic_inq(hdev, skb);
2687 break;
2688
2689 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2690 hci_cc_remote_name_req_cancel(hdev, skb);
2691 break;
2692
2693 case HCI_OP_ROLE_DISCOVERY:
2694 hci_cc_role_discovery(hdev, skb);
2695 break;
2696
2697 case HCI_OP_READ_LINK_POLICY:
2698 hci_cc_read_link_policy(hdev, skb);
2699 break;
2700
2701 case HCI_OP_WRITE_LINK_POLICY:
2702 hci_cc_write_link_policy(hdev, skb);
2703 break;
2704
2705 case HCI_OP_READ_DEF_LINK_POLICY:
2706 hci_cc_read_def_link_policy(hdev, skb);
2707 break;
2708
2709 case HCI_OP_WRITE_DEF_LINK_POLICY:
2710 hci_cc_write_def_link_policy(hdev, skb);
2711 break;
2712
2713 case HCI_OP_RESET:
2714 hci_cc_reset(hdev, skb);
2715 break;
2716
2717 case HCI_OP_WRITE_LOCAL_NAME:
2718 hci_cc_write_local_name(hdev, skb);
2719 break;
2720
2721 case HCI_OP_READ_LOCAL_NAME:
2722 hci_cc_read_local_name(hdev, skb);
2723 break;
2724
2725 case HCI_OP_WRITE_AUTH_ENABLE:
2726 hci_cc_write_auth_enable(hdev, skb);
2727 break;
2728
2729 case HCI_OP_WRITE_ENCRYPT_MODE:
2730 hci_cc_write_encrypt_mode(hdev, skb);
2731 break;
2732
2733 case HCI_OP_WRITE_SCAN_ENABLE:
2734 hci_cc_write_scan_enable(hdev, skb);
2735 break;
2736
2737 case HCI_OP_READ_CLASS_OF_DEV:
2738 hci_cc_read_class_of_dev(hdev, skb);
2739 break;
2740
2741 case HCI_OP_WRITE_CLASS_OF_DEV:
2742 hci_cc_write_class_of_dev(hdev, skb);
2743 break;
2744
2745 case HCI_OP_READ_VOICE_SETTING:
2746 hci_cc_read_voice_setting(hdev, skb);
2747 break;
2748
2749 case HCI_OP_WRITE_VOICE_SETTING:
2750 hci_cc_write_voice_setting(hdev, skb);
2751 break;
2752
2753 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2754 hci_cc_read_num_supported_iac(hdev, skb);
2755 break;
2756
2757 case HCI_OP_WRITE_SSP_MODE:
2758 hci_cc_write_ssp_mode(hdev, skb);
2759 break;
2760
2761 case HCI_OP_WRITE_SC_SUPPORT:
2762 hci_cc_write_sc_support(hdev, skb);
2763 break;
2764
2765 case HCI_OP_READ_LOCAL_VERSION:
2766 hci_cc_read_local_version(hdev, skb);
2767 break;
2768
2769 case HCI_OP_READ_LOCAL_COMMANDS:
2770 hci_cc_read_local_commands(hdev, skb);
2771 break;
2772
2773 case HCI_OP_READ_LOCAL_FEATURES:
2774 hci_cc_read_local_features(hdev, skb);
2775 break;
2776
2777 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2778 hci_cc_read_local_ext_features(hdev, skb);
2779 break;
2780
2781 case HCI_OP_READ_BUFFER_SIZE:
2782 hci_cc_read_buffer_size(hdev, skb);
2783 break;
2784
2785 case HCI_OP_READ_BD_ADDR:
2786 hci_cc_read_bd_addr(hdev, skb);
2787 break;
2788
2789 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2790 hci_cc_read_page_scan_activity(hdev, skb);
2791 break;
2792
2793 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2794 hci_cc_write_page_scan_activity(hdev, skb);
2795 break;
2796
2797 case HCI_OP_READ_PAGE_SCAN_TYPE:
2798 hci_cc_read_page_scan_type(hdev, skb);
2799 break;
2800
2801 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2802 hci_cc_write_page_scan_type(hdev, skb);
2803 break;
2804
2805 case HCI_OP_READ_DATA_BLOCK_SIZE:
2806 hci_cc_read_data_block_size(hdev, skb);
2807 break;
2808
2809 case HCI_OP_READ_FLOW_CONTROL_MODE:
2810 hci_cc_read_flow_control_mode(hdev, skb);
2811 break;
2812
2813 case HCI_OP_READ_LOCAL_AMP_INFO:
2814 hci_cc_read_local_amp_info(hdev, skb);
2815 break;
2816
2817 case HCI_OP_READ_CLOCK:
2818 hci_cc_read_clock(hdev, skb);
2819 break;
2820
2821 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2822 hci_cc_read_local_amp_assoc(hdev, skb);
2823 break;
2824
2825 case HCI_OP_READ_INQ_RSP_TX_POWER:
2826 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2827 break;
2828
2829 case HCI_OP_PIN_CODE_REPLY:
2830 hci_cc_pin_code_reply(hdev, skb);
2831 break;
2832
2833 case HCI_OP_PIN_CODE_NEG_REPLY:
2834 hci_cc_pin_code_neg_reply(hdev, skb);
2835 break;
2836
2837 case HCI_OP_READ_LOCAL_OOB_DATA:
2838 hci_cc_read_local_oob_data(hdev, skb);
2839 break;
2840
2841 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2842 hci_cc_read_local_oob_ext_data(hdev, skb);
2843 break;
2844
2845 case HCI_OP_LE_READ_BUFFER_SIZE:
2846 hci_cc_le_read_buffer_size(hdev, skb);
2847 break;
2848
2849 case HCI_OP_LE_READ_LOCAL_FEATURES:
2850 hci_cc_le_read_local_features(hdev, skb);
2851 break;
2852
2853 case HCI_OP_LE_READ_ADV_TX_POWER:
2854 hci_cc_le_read_adv_tx_power(hdev, skb);
2855 break;
2856
2857 case HCI_OP_USER_CONFIRM_REPLY:
2858 hci_cc_user_confirm_reply(hdev, skb);
2859 break;
2860
2861 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2862 hci_cc_user_confirm_neg_reply(hdev, skb);
2863 break;
2864
2865 case HCI_OP_USER_PASSKEY_REPLY:
2866 hci_cc_user_passkey_reply(hdev, skb);
2867 break;
2868
2869 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2870 hci_cc_user_passkey_neg_reply(hdev, skb);
2871 break;
2872
2873 case HCI_OP_LE_SET_RANDOM_ADDR:
2874 hci_cc_le_set_random_addr(hdev, skb);
2875 break;
2876
2877 case HCI_OP_LE_SET_ADV_ENABLE:
2878 hci_cc_le_set_adv_enable(hdev, skb);
2879 break;
2880
2881 case HCI_OP_LE_SET_SCAN_PARAM:
2882 hci_cc_le_set_scan_param(hdev, skb);
2883 break;
2884
2885 case HCI_OP_LE_SET_SCAN_ENABLE:
2886 hci_cc_le_set_scan_enable(hdev, skb);
2887 break;
2888
2889 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2890 hci_cc_le_read_white_list_size(hdev, skb);
2891 break;
2892
2893 case HCI_OP_LE_CLEAR_WHITE_LIST:
2894 hci_cc_le_clear_white_list(hdev, skb);
2895 break;
2896
2897 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2898 hci_cc_le_add_to_white_list(hdev, skb);
2899 break;
2900
2901 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2902 hci_cc_le_del_from_white_list(hdev, skb);
2903 break;
2904
2905 case HCI_OP_LE_READ_SUPPORTED_STATES:
2906 hci_cc_le_read_supported_states(hdev, skb);
2907 break;
2908
2909 case HCI_OP_LE_READ_DEF_DATA_LEN:
2910 hci_cc_le_read_def_data_len(hdev, skb);
2911 break;
2912
2913 case HCI_OP_LE_WRITE_DEF_DATA_LEN:
2914 hci_cc_le_write_def_data_len(hdev, skb);
2915 break;
2916
2917 case HCI_OP_LE_READ_MAX_DATA_LEN:
2918 hci_cc_le_read_max_data_len(hdev, skb);
2919 break;
2920
2921 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2922 hci_cc_write_le_host_supported(hdev, skb);
2923 break;
2924
2925 case HCI_OP_LE_SET_ADV_PARAM:
2926 hci_cc_set_adv_param(hdev, skb);
2927 break;
2928
2929 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2930 hci_cc_write_remote_amp_assoc(hdev, skb);
2931 break;
2932
2933 case HCI_OP_READ_RSSI:
2934 hci_cc_read_rssi(hdev, skb);
2935 break;
2936
2937 case HCI_OP_READ_TX_POWER:
2938 hci_cc_read_tx_power(hdev, skb);
2939 break;
2940
2941 default:
2942 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2943 break;
2944 }
2945
2946 if (opcode != HCI_OP_NOP)
2947 cancel_delayed_work(&hdev->cmd_timer);
2948
2949 hci_req_cmd_complete(hdev, opcode, status);
2950
2951 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2952 atomic_set(&hdev->cmd_cnt, 1);
2953 if (!skb_queue_empty(&hdev->cmd_q))
2954 queue_work(hdev->workqueue, &hdev->cmd_work);
2955 }
2956 }
2957
2958 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2959 {
2960 struct hci_ev_cmd_status *ev = (void *) skb->data;
2961 __u16 opcode;
2962
2963 skb_pull(skb, sizeof(*ev));
2964
2965 opcode = __le16_to_cpu(ev->opcode);
2966
2967 switch (opcode) {
2968 case HCI_OP_INQUIRY:
2969 hci_cs_inquiry(hdev, ev->status);
2970 break;
2971
2972 case HCI_OP_CREATE_CONN:
2973 hci_cs_create_conn(hdev, ev->status);
2974 break;
2975
2976 case HCI_OP_DISCONNECT:
2977 hci_cs_disconnect(hdev, ev->status);
2978 break;
2979
2980 case HCI_OP_ADD_SCO:
2981 hci_cs_add_sco(hdev, ev->status);
2982 break;
2983
2984 case HCI_OP_AUTH_REQUESTED:
2985 hci_cs_auth_requested(hdev, ev->status);
2986 break;
2987
2988 case HCI_OP_SET_CONN_ENCRYPT:
2989 hci_cs_set_conn_encrypt(hdev, ev->status);
2990 break;
2991
2992 case HCI_OP_REMOTE_NAME_REQ:
2993 hci_cs_remote_name_req(hdev, ev->status);
2994 break;
2995
2996 case HCI_OP_READ_REMOTE_FEATURES:
2997 hci_cs_read_remote_features(hdev, ev->status);
2998 break;
2999
3000 case HCI_OP_READ_REMOTE_EXT_FEATURES:
3001 hci_cs_read_remote_ext_features(hdev, ev->status);
3002 break;
3003
3004 case HCI_OP_SETUP_SYNC_CONN:
3005 hci_cs_setup_sync_conn(hdev, ev->status);
3006 break;
3007
3008 case HCI_OP_CREATE_PHY_LINK:
3009 hci_cs_create_phylink(hdev, ev->status);
3010 break;
3011
3012 case HCI_OP_ACCEPT_PHY_LINK:
3013 hci_cs_accept_phylink(hdev, ev->status);
3014 break;
3015
3016 case HCI_OP_SNIFF_MODE:
3017 hci_cs_sniff_mode(hdev, ev->status);
3018 break;
3019
3020 case HCI_OP_EXIT_SNIFF_MODE:
3021 hci_cs_exit_sniff_mode(hdev, ev->status);
3022 break;
3023
3024 case HCI_OP_SWITCH_ROLE:
3025 hci_cs_switch_role(hdev, ev->status);
3026 break;
3027
3028 case HCI_OP_LE_CREATE_CONN:
3029 hci_cs_le_create_conn(hdev, ev->status);
3030 break;
3031
3032 case HCI_OP_LE_START_ENC:
3033 hci_cs_le_start_enc(hdev, ev->status);
3034 break;
3035
3036 default:
3037 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
3038 break;
3039 }
3040
3041 if (opcode != HCI_OP_NOP)
3042 cancel_delayed_work(&hdev->cmd_timer);
3043
3044 if (ev->status ||
3045 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
3046 hci_req_cmd_complete(hdev, opcode, ev->status);
3047
3048 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
3049 atomic_set(&hdev->cmd_cnt, 1);
3050 if (!skb_queue_empty(&hdev->cmd_q))
3051 queue_work(hdev->workqueue, &hdev->cmd_work);
3052 }
3053 }
3054
3055 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
3056 {
3057 struct hci_ev_hardware_error *ev = (void *) skb->data;
3058
3059 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
3060 }
3061
3062 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3063 {
3064 struct hci_ev_role_change *ev = (void *) skb->data;
3065 struct hci_conn *conn;
3066
3067 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3068
3069 hci_dev_lock(hdev);
3070
3071 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3072 if (conn) {
3073 if (!ev->status)
3074 conn->role = ev->role;
3075
3076 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3077
3078 hci_role_switch_cfm(conn, ev->status, ev->role);
3079 }
3080
3081 hci_dev_unlock(hdev);
3082 }
3083
3084 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3085 {
3086 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3087 int i;
3088
3089 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3090 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3091 return;
3092 }
3093
3094 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3095 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3096 BT_DBG("%s bad parameters", hdev->name);
3097 return;
3098 }
3099
3100 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3101
3102 for (i = 0; i < ev->num_hndl; i++) {
3103 struct hci_comp_pkts_info *info = &ev->handles[i];
3104 struct hci_conn *conn;
3105 __u16 handle, count;
3106
3107 handle = __le16_to_cpu(info->handle);
3108 count = __le16_to_cpu(info->count);
3109
3110 conn = hci_conn_hash_lookup_handle(hdev, handle);
3111 if (!conn)
3112 continue;
3113
3114 conn->sent -= count;
3115
3116 switch (conn->type) {
3117 case ACL_LINK:
3118 hdev->acl_cnt += count;
3119 if (hdev->acl_cnt > hdev->acl_pkts)
3120 hdev->acl_cnt = hdev->acl_pkts;
3121 break;
3122
3123 case LE_LINK:
3124 if (hdev->le_pkts) {
3125 hdev->le_cnt += count;
3126 if (hdev->le_cnt > hdev->le_pkts)
3127 hdev->le_cnt = hdev->le_pkts;
3128 } else {
3129 hdev->acl_cnt += count;
3130 if (hdev->acl_cnt > hdev->acl_pkts)
3131 hdev->acl_cnt = hdev->acl_pkts;
3132 }
3133 break;
3134
3135 case SCO_LINK:
3136 hdev->sco_cnt += count;
3137 if (hdev->sco_cnt > hdev->sco_pkts)
3138 hdev->sco_cnt = hdev->sco_pkts;
3139 break;
3140
3141 default:
3142 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3143 break;
3144 }
3145 }
3146
3147 queue_work(hdev->workqueue, &hdev->tx_work);
3148 }
3149
3150 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3151 __u16 handle)
3152 {
3153 struct hci_chan *chan;
3154
3155 switch (hdev->dev_type) {
3156 case HCI_BREDR:
3157 return hci_conn_hash_lookup_handle(hdev, handle);
3158 case HCI_AMP:
3159 chan = hci_chan_lookup_handle(hdev, handle);
3160 if (chan)
3161 return chan->conn;
3162 break;
3163 default:
3164 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3165 break;
3166 }
3167
3168 return NULL;
3169 }
3170
3171 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3172 {
3173 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3174 int i;
3175
3176 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3177 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3178 return;
3179 }
3180
3181 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3182 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3183 BT_DBG("%s bad parameters", hdev->name);
3184 return;
3185 }
3186
3187 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3188 ev->num_hndl);
3189
3190 for (i = 0; i < ev->num_hndl; i++) {
3191 struct hci_comp_blocks_info *info = &ev->handles[i];
3192 struct hci_conn *conn = NULL;
3193 __u16 handle, block_count;
3194
3195 handle = __le16_to_cpu(info->handle);
3196 block_count = __le16_to_cpu(info->blocks);
3197
3198 conn = __hci_conn_lookup_handle(hdev, handle);
3199 if (!conn)
3200 continue;
3201
3202 conn->sent -= block_count;
3203
3204 switch (conn->type) {
3205 case ACL_LINK:
3206 case AMP_LINK:
3207 hdev->block_cnt += block_count;
3208 if (hdev->block_cnt > hdev->num_blocks)
3209 hdev->block_cnt = hdev->num_blocks;
3210 break;
3211
3212 default:
3213 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3214 break;
3215 }
3216 }
3217
3218 queue_work(hdev->workqueue, &hdev->tx_work);
3219 }
3220
3221 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3222 {
3223 struct hci_ev_mode_change *ev = (void *) skb->data;
3224 struct hci_conn *conn;
3225
3226 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3227
3228 hci_dev_lock(hdev);
3229
3230 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3231 if (conn) {
3232 conn->mode = ev->mode;
3233
3234 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3235 &conn->flags)) {
3236 if (conn->mode == HCI_CM_ACTIVE)
3237 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3238 else
3239 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3240 }
3241
3242 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3243 hci_sco_setup(conn, ev->status);
3244 }
3245
3246 hci_dev_unlock(hdev);
3247 }
3248
3249 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3250 {
3251 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3252 struct hci_conn *conn;
3253
3254 BT_DBG("%s", hdev->name);
3255
3256 hci_dev_lock(hdev);
3257
3258 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3259 if (!conn)
3260 goto unlock;
3261
3262 if (conn->state == BT_CONNECTED) {
3263 hci_conn_hold(conn);
3264 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3265 hci_conn_drop(conn);
3266 }
3267
3268 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3269 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3270 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3271 sizeof(ev->bdaddr), &ev->bdaddr);
3272 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3273 u8 secure;
3274
3275 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3276 secure = 1;
3277 else
3278 secure = 0;
3279
3280 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3281 }
3282
3283 unlock:
3284 hci_dev_unlock(hdev);
3285 }
3286
3287 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3288 {
3289 if (key_type == HCI_LK_CHANGED_COMBINATION)
3290 return;
3291
3292 conn->pin_length = pin_len;
3293 conn->key_type = key_type;
3294
3295 switch (key_type) {
3296 case HCI_LK_LOCAL_UNIT:
3297 case HCI_LK_REMOTE_UNIT:
3298 case HCI_LK_DEBUG_COMBINATION:
3299 return;
3300 case HCI_LK_COMBINATION:
3301 if (pin_len == 16)
3302 conn->pending_sec_level = BT_SECURITY_HIGH;
3303 else
3304 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3305 break;
3306 case HCI_LK_UNAUTH_COMBINATION_P192:
3307 case HCI_LK_UNAUTH_COMBINATION_P256:
3308 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3309 break;
3310 case HCI_LK_AUTH_COMBINATION_P192:
3311 conn->pending_sec_level = BT_SECURITY_HIGH;
3312 break;
3313 case HCI_LK_AUTH_COMBINATION_P256:
3314 conn->pending_sec_level = BT_SECURITY_FIPS;
3315 break;
3316 }
3317 }
3318
3319 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3320 {
3321 struct hci_ev_link_key_req *ev = (void *) skb->data;
3322 struct hci_cp_link_key_reply cp;
3323 struct hci_conn *conn;
3324 struct link_key *key;
3325
3326 BT_DBG("%s", hdev->name);
3327
3328 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3329 return;
3330
3331 hci_dev_lock(hdev);
3332
3333 key = hci_find_link_key(hdev, &ev->bdaddr);
3334 if (!key) {
3335 BT_DBG("%s link key not found for %pMR", hdev->name,
3336 &ev->bdaddr);
3337 goto not_found;
3338 }
3339
3340 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3341 &ev->bdaddr);
3342
3343 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3344 if (conn) {
3345 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3346
3347 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3348 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3349 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3350 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3351 goto not_found;
3352 }
3353
3354 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3355 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3356 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3357 BT_DBG("%s ignoring key unauthenticated for high security",
3358 hdev->name);
3359 goto not_found;
3360 }
3361
3362 conn_set_key(conn, key->type, key->pin_len);
3363 }
3364
3365 bacpy(&cp.bdaddr, &ev->bdaddr);
3366 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3367
3368 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3369
3370 hci_dev_unlock(hdev);
3371
3372 return;
3373
3374 not_found:
3375 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3376 hci_dev_unlock(hdev);
3377 }
3378
3379 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3380 {
3381 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3382 struct hci_conn *conn;
3383 struct link_key *key;
3384 bool persistent;
3385 u8 pin_len = 0;
3386
3387 BT_DBG("%s", hdev->name);
3388
3389 hci_dev_lock(hdev);
3390
3391 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3392 if (!conn)
3393 goto unlock;
3394
3395 hci_conn_hold(conn);
3396 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3397 hci_conn_drop(conn);
3398
3399 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3400 conn_set_key(conn, ev->key_type, conn->pin_length);
3401
3402 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3403 goto unlock;
3404
3405 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3406 ev->key_type, pin_len, &persistent);
3407 if (!key)
3408 goto unlock;
3409
3410 /* Update connection information since adding the key will have
3411 * fixed up the type in the case of changed combination keys.
3412 */
3413 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3414 conn_set_key(conn, key->type, key->pin_len);
3415
3416 mgmt_new_link_key(hdev, key, persistent);
3417
3418 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3419 * is set. If it's not set simply remove the key from the kernel
3420 * list (we've still notified user space about it but with
3421 * store_hint being 0).
3422 */
3423 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3424 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3425 list_del_rcu(&key->list);
3426 kfree_rcu(key, rcu);
3427 goto unlock;
3428 }
3429
3430 if (persistent)
3431 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3432 else
3433 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3434
3435 unlock:
3436 hci_dev_unlock(hdev);
3437 }
3438
3439 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3440 {
3441 struct hci_ev_clock_offset *ev = (void *) skb->data;
3442 struct hci_conn *conn;
3443
3444 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3445
3446 hci_dev_lock(hdev);
3447
3448 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3449 if (conn && !ev->status) {
3450 struct inquiry_entry *ie;
3451
3452 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3453 if (ie) {
3454 ie->data.clock_offset = ev->clock_offset;
3455 ie->timestamp = jiffies;
3456 }
3457 }
3458
3459 hci_dev_unlock(hdev);
3460 }
3461
3462 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3463 {
3464 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3465 struct hci_conn *conn;
3466
3467 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3468
3469 hci_dev_lock(hdev);
3470
3471 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3472 if (conn && !ev->status)
3473 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3474
3475 hci_dev_unlock(hdev);
3476 }
3477
3478 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3479 {
3480 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3481 struct inquiry_entry *ie;
3482
3483 BT_DBG("%s", hdev->name);
3484
3485 hci_dev_lock(hdev);
3486
3487 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3488 if (ie) {
3489 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3490 ie->timestamp = jiffies;
3491 }
3492
3493 hci_dev_unlock(hdev);
3494 }
3495
3496 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3497 struct sk_buff *skb)
3498 {
3499 struct inquiry_data data;
3500 int num_rsp = *((__u8 *) skb->data);
3501
3502 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3503
3504 if (!num_rsp)
3505 return;
3506
3507 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3508 return;
3509
3510 hci_dev_lock(hdev);
3511
3512 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3513 struct inquiry_info_with_rssi_and_pscan_mode *info;
3514 info = (void *) (skb->data + 1);
3515
3516 for (; num_rsp; num_rsp--, info++) {
3517 u32 flags;
3518
3519 bacpy(&data.bdaddr, &info->bdaddr);
3520 data.pscan_rep_mode = info->pscan_rep_mode;
3521 data.pscan_period_mode = info->pscan_period_mode;
3522 data.pscan_mode = info->pscan_mode;
3523 memcpy(data.dev_class, info->dev_class, 3);
3524 data.clock_offset = info->clock_offset;
3525 data.rssi = info->rssi;
3526 data.ssp_mode = 0x00;
3527
3528 flags = hci_inquiry_cache_update(hdev, &data, false);
3529
3530 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3531 info->dev_class, info->rssi,
3532 flags, NULL, 0, NULL, 0);
3533 }
3534 } else {
3535 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3536
3537 for (; num_rsp; num_rsp--, info++) {
3538 u32 flags;
3539
3540 bacpy(&data.bdaddr, &info->bdaddr);
3541 data.pscan_rep_mode = info->pscan_rep_mode;
3542 data.pscan_period_mode = info->pscan_period_mode;
3543 data.pscan_mode = 0x00;
3544 memcpy(data.dev_class, info->dev_class, 3);
3545 data.clock_offset = info->clock_offset;
3546 data.rssi = info->rssi;
3547 data.ssp_mode = 0x00;
3548
3549 flags = hci_inquiry_cache_update(hdev, &data, false);
3550
3551 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3552 info->dev_class, info->rssi,
3553 flags, NULL, 0, NULL, 0);
3554 }
3555 }
3556
3557 hci_dev_unlock(hdev);
3558 }
3559
3560 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3561 struct sk_buff *skb)
3562 {
3563 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3564 struct hci_conn *conn;
3565
3566 BT_DBG("%s", hdev->name);
3567
3568 hci_dev_lock(hdev);
3569
3570 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3571 if (!conn)
3572 goto unlock;
3573
3574 if (ev->page < HCI_MAX_PAGES)
3575 memcpy(conn->features[ev->page], ev->features, 8);
3576
3577 if (!ev->status && ev->page == 0x01) {
3578 struct inquiry_entry *ie;
3579
3580 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3581 if (ie)
3582 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3583
3584 if (ev->features[0] & LMP_HOST_SSP) {
3585 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3586 } else {
3587 /* It is mandatory by the Bluetooth specification that
3588 * Extended Inquiry Results are only used when Secure
3589 * Simple Pairing is enabled, but some devices violate
3590 * this.
3591 *
3592 * To make these devices work, the internal SSP
3593 * enabled flag needs to be cleared if the remote host
3594 * features do not indicate SSP support */
3595 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3596 }
3597
3598 if (ev->features[0] & LMP_HOST_SC)
3599 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3600 }
3601
3602 if (conn->state != BT_CONFIG)
3603 goto unlock;
3604
3605 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3606 struct hci_cp_remote_name_req cp;
3607 memset(&cp, 0, sizeof(cp));
3608 bacpy(&cp.bdaddr, &conn->dst);
3609 cp.pscan_rep_mode = 0x02;
3610 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3611 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3612 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3613
3614 if (!hci_outgoing_auth_needed(hdev, conn)) {
3615 conn->state = BT_CONNECTED;
3616 hci_proto_connect_cfm(conn, ev->status);
3617 hci_conn_drop(conn);
3618 }
3619
3620 unlock:
3621 hci_dev_unlock(hdev);
3622 }
3623
3624 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3625 struct sk_buff *skb)
3626 {
3627 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3628 struct hci_conn *conn;
3629
3630 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3631
3632 hci_dev_lock(hdev);
3633
3634 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3635 if (!conn) {
3636 if (ev->link_type == ESCO_LINK)
3637 goto unlock;
3638
3639 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3640 if (!conn)
3641 goto unlock;
3642
3643 conn->type = SCO_LINK;
3644 }
3645
3646 switch (ev->status) {
3647 case 0x00:
3648 conn->handle = __le16_to_cpu(ev->handle);
3649 conn->state = BT_CONNECTED;
3650
3651 hci_debugfs_create_conn(conn);
3652 hci_conn_add_sysfs(conn);
3653 break;
3654
3655 case 0x10: /* Connection Accept Timeout */
3656 case 0x0d: /* Connection Rejected due to Limited Resources */
3657 case 0x11: /* Unsupported Feature or Parameter Value */
3658 case 0x1c: /* SCO interval rejected */
3659 case 0x1a: /* Unsupported Remote Feature */
3660 case 0x1f: /* Unspecified error */
3661 case 0x20: /* Unsupported LMP Parameter value */
3662 if (conn->out) {
3663 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3664 (hdev->esco_type & EDR_ESCO_MASK);
3665 if (hci_setup_sync(conn, conn->link->handle))
3666 goto unlock;
3667 }
3668 /* fall through */
3669
3670 default:
3671 conn->state = BT_CLOSED;
3672 break;
3673 }
3674
3675 hci_proto_connect_cfm(conn, ev->status);
3676 if (ev->status)
3677 hci_conn_del(conn);
3678
3679 unlock:
3680 hci_dev_unlock(hdev);
3681 }
3682
3683 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3684 {
3685 size_t parsed = 0;
3686
3687 while (parsed < eir_len) {
3688 u8 field_len = eir[0];
3689
3690 if (field_len == 0)
3691 return parsed;
3692
3693 parsed += field_len + 1;
3694 eir += field_len + 1;
3695 }
3696
3697 return eir_len;
3698 }
3699
3700 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3701 struct sk_buff *skb)
3702 {
3703 struct inquiry_data data;
3704 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3705 int num_rsp = *((__u8 *) skb->data);
3706 size_t eir_len;
3707
3708 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3709
3710 if (!num_rsp)
3711 return;
3712
3713 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3714 return;
3715
3716 hci_dev_lock(hdev);
3717
3718 for (; num_rsp; num_rsp--, info++) {
3719 u32 flags;
3720 bool name_known;
3721
3722 bacpy(&data.bdaddr, &info->bdaddr);
3723 data.pscan_rep_mode = info->pscan_rep_mode;
3724 data.pscan_period_mode = info->pscan_period_mode;
3725 data.pscan_mode = 0x00;
3726 memcpy(data.dev_class, info->dev_class, 3);
3727 data.clock_offset = info->clock_offset;
3728 data.rssi = info->rssi;
3729 data.ssp_mode = 0x01;
3730
3731 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3732 name_known = eir_has_data_type(info->data,
3733 sizeof(info->data),
3734 EIR_NAME_COMPLETE);
3735 else
3736 name_known = true;
3737
3738 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3739
3740 eir_len = eir_get_length(info->data, sizeof(info->data));
3741
3742 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3743 info->dev_class, info->rssi,
3744 flags, info->data, eir_len, NULL, 0);
3745 }
3746
3747 hci_dev_unlock(hdev);
3748 }
3749
3750 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3751 struct sk_buff *skb)
3752 {
3753 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3754 struct hci_conn *conn;
3755
3756 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3757 __le16_to_cpu(ev->handle));
3758
3759 hci_dev_lock(hdev);
3760
3761 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3762 if (!conn)
3763 goto unlock;
3764
3765 /* For BR/EDR the necessary steps are taken through the
3766 * auth_complete event.
3767 */
3768 if (conn->type != LE_LINK)
3769 goto unlock;
3770
3771 if (!ev->status)
3772 conn->sec_level = conn->pending_sec_level;
3773
3774 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3775
3776 if (ev->status && conn->state == BT_CONNECTED) {
3777 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3778 hci_conn_drop(conn);
3779 goto unlock;
3780 }
3781
3782 if (conn->state == BT_CONFIG) {
3783 if (!ev->status)
3784 conn->state = BT_CONNECTED;
3785
3786 hci_proto_connect_cfm(conn, ev->status);
3787 hci_conn_drop(conn);
3788 } else {
3789 hci_auth_cfm(conn, ev->status);
3790
3791 hci_conn_hold(conn);
3792 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3793 hci_conn_drop(conn);
3794 }
3795
3796 unlock:
3797 hci_dev_unlock(hdev);
3798 }
3799
3800 static u8 hci_get_auth_req(struct hci_conn *conn)
3801 {
3802 /* If remote requests no-bonding follow that lead */
3803 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3804 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3805 return conn->remote_auth | (conn->auth_type & 0x01);
3806
3807 /* If both remote and local have enough IO capabilities, require
3808 * MITM protection
3809 */
3810 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3811 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3812 return conn->remote_auth | 0x01;
3813
3814 /* No MITM protection possible so ignore remote requirement */
3815 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3816 }
3817
3818 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3819 {
3820 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3821 struct hci_conn *conn;
3822
3823 BT_DBG("%s", hdev->name);
3824
3825 hci_dev_lock(hdev);
3826
3827 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3828 if (!conn)
3829 goto unlock;
3830
3831 hci_conn_hold(conn);
3832
3833 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3834 goto unlock;
3835
3836 /* Allow pairing if we're pairable, the initiators of the
3837 * pairing or if the remote is not requesting bonding.
3838 */
3839 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3840 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3841 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3842 struct hci_cp_io_capability_reply cp;
3843
3844 bacpy(&cp.bdaddr, &ev->bdaddr);
3845 /* Change the IO capability from KeyboardDisplay
3846 * to DisplayYesNo as it is not supported by BT spec. */
3847 cp.capability = (conn->io_capability == 0x04) ?
3848 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3849
3850 /* If we are initiators, there is no remote information yet */
3851 if (conn->remote_auth == 0xff) {
3852 /* Request MITM protection if our IO caps allow it
3853 * except for the no-bonding case.
3854 */
3855 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3856 conn->auth_type != HCI_AT_NO_BONDING)
3857 conn->auth_type |= 0x01;
3858 } else {
3859 conn->auth_type = hci_get_auth_req(conn);
3860 }
3861
3862 /* If we're not bondable, force one of the non-bondable
3863 * authentication requirement values.
3864 */
3865 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3866 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3867
3868 cp.authentication = conn->auth_type;
3869
3870 if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
3871 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3872 cp.oob_data = 0x01;
3873 else
3874 cp.oob_data = 0x00;
3875
3876 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3877 sizeof(cp), &cp);
3878 } else {
3879 struct hci_cp_io_capability_neg_reply cp;
3880
3881 bacpy(&cp.bdaddr, &ev->bdaddr);
3882 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3883
3884 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3885 sizeof(cp), &cp);
3886 }
3887
3888 unlock:
3889 hci_dev_unlock(hdev);
3890 }
3891
3892 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3893 {
3894 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3895 struct hci_conn *conn;
3896
3897 BT_DBG("%s", hdev->name);
3898
3899 hci_dev_lock(hdev);
3900
3901 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3902 if (!conn)
3903 goto unlock;
3904
3905 conn->remote_cap = ev->capability;
3906 conn->remote_auth = ev->authentication;
3907 if (ev->oob_data)
3908 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3909
3910 unlock:
3911 hci_dev_unlock(hdev);
3912 }
3913
3914 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3915 struct sk_buff *skb)
3916 {
3917 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3918 int loc_mitm, rem_mitm, confirm_hint = 0;
3919 struct hci_conn *conn;
3920
3921 BT_DBG("%s", hdev->name);
3922
3923 hci_dev_lock(hdev);
3924
3925 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3926 goto unlock;
3927
3928 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3929 if (!conn)
3930 goto unlock;
3931
3932 loc_mitm = (conn->auth_type & 0x01);
3933 rem_mitm = (conn->remote_auth & 0x01);
3934
3935 /* If we require MITM but the remote device can't provide that
3936 * (it has NoInputNoOutput) then reject the confirmation
3937 * request. We check the security level here since it doesn't
3938 * necessarily match conn->auth_type.
3939 */
3940 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3941 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3942 BT_DBG("Rejecting request: remote device can't provide MITM");
3943 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3944 sizeof(ev->bdaddr), &ev->bdaddr);
3945 goto unlock;
3946 }
3947
3948 /* If no side requires MITM protection; auto-accept */
3949 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3950 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3951
3952 /* If we're not the initiators request authorization to
3953 * proceed from user space (mgmt_user_confirm with
3954 * confirm_hint set to 1). The exception is if neither
3955 * side had MITM or if the local IO capability is
3956 * NoInputNoOutput, in which case we do auto-accept
3957 */
3958 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3959 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3960 (loc_mitm || rem_mitm)) {
3961 BT_DBG("Confirming auto-accept as acceptor");
3962 confirm_hint = 1;
3963 goto confirm;
3964 }
3965
3966 BT_DBG("Auto-accept of user confirmation with %ums delay",
3967 hdev->auto_accept_delay);
3968
3969 if (hdev->auto_accept_delay > 0) {
3970 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3971 queue_delayed_work(conn->hdev->workqueue,
3972 &conn->auto_accept_work, delay);
3973 goto unlock;
3974 }
3975
3976 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3977 sizeof(ev->bdaddr), &ev->bdaddr);
3978 goto unlock;
3979 }
3980
3981 confirm:
3982 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3983 le32_to_cpu(ev->passkey), confirm_hint);
3984
3985 unlock:
3986 hci_dev_unlock(hdev);
3987 }
3988
3989 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3990 struct sk_buff *skb)
3991 {
3992 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3993
3994 BT_DBG("%s", hdev->name);
3995
3996 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3997 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3998 }
3999
4000 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
4001 struct sk_buff *skb)
4002 {
4003 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
4004 struct hci_conn *conn;
4005
4006 BT_DBG("%s", hdev->name);
4007
4008 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4009 if (!conn)
4010 return;
4011
4012 conn->passkey_notify = __le32_to_cpu(ev->passkey);
4013 conn->passkey_entered = 0;
4014
4015 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4016 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4017 conn->dst_type, conn->passkey_notify,
4018 conn->passkey_entered);
4019 }
4020
4021 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
4022 {
4023 struct hci_ev_keypress_notify *ev = (void *) skb->data;
4024 struct hci_conn *conn;
4025
4026 BT_DBG("%s", hdev->name);
4027
4028 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4029 if (!conn)
4030 return;
4031
4032 switch (ev->type) {
4033 case HCI_KEYPRESS_STARTED:
4034 conn->passkey_entered = 0;
4035 return;
4036
4037 case HCI_KEYPRESS_ENTERED:
4038 conn->passkey_entered++;
4039 break;
4040
4041 case HCI_KEYPRESS_ERASED:
4042 conn->passkey_entered--;
4043 break;
4044
4045 case HCI_KEYPRESS_CLEARED:
4046 conn->passkey_entered = 0;
4047 break;
4048
4049 case HCI_KEYPRESS_COMPLETED:
4050 return;
4051 }
4052
4053 if (test_bit(HCI_MGMT, &hdev->dev_flags))
4054 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
4055 conn->dst_type, conn->passkey_notify,
4056 conn->passkey_entered);
4057 }
4058
4059 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
4060 struct sk_buff *skb)
4061 {
4062 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
4063 struct hci_conn *conn;
4064
4065 BT_DBG("%s", hdev->name);
4066
4067 hci_dev_lock(hdev);
4068
4069 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4070 if (!conn)
4071 goto unlock;
4072
4073 /* Reset the authentication requirement to unknown */
4074 conn->remote_auth = 0xff;
4075
4076 /* To avoid duplicate auth_failed events to user space we check
4077 * the HCI_CONN_AUTH_PEND flag which will be set if we
4078 * initiated the authentication. A traditional auth_complete
4079 * event gets always produced as initiator and is also mapped to
4080 * the mgmt_auth_failed event */
4081 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4082 mgmt_auth_failed(conn, ev->status);
4083
4084 hci_conn_drop(conn);
4085
4086 unlock:
4087 hci_dev_unlock(hdev);
4088 }
4089
4090 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4091 struct sk_buff *skb)
4092 {
4093 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4094 struct inquiry_entry *ie;
4095 struct hci_conn *conn;
4096
4097 BT_DBG("%s", hdev->name);
4098
4099 hci_dev_lock(hdev);
4100
4101 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4102 if (conn)
4103 memcpy(conn->features[1], ev->features, 8);
4104
4105 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4106 if (ie)
4107 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4108
4109 hci_dev_unlock(hdev);
4110 }
4111
4112 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4113 struct sk_buff *skb)
4114 {
4115 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4116 struct oob_data *data;
4117
4118 BT_DBG("%s", hdev->name);
4119
4120 hci_dev_lock(hdev);
4121
4122 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4123 goto unlock;
4124
4125 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4126 if (data) {
4127 if (bredr_sc_enabled(hdev)) {
4128 struct hci_cp_remote_oob_ext_data_reply cp;
4129
4130 bacpy(&cp.bdaddr, &ev->bdaddr);
4131 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4132 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4133 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4134 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4135
4136 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4137 sizeof(cp), &cp);
4138 } else {
4139 struct hci_cp_remote_oob_data_reply cp;
4140
4141 bacpy(&cp.bdaddr, &ev->bdaddr);
4142 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4143 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4144
4145 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4146 sizeof(cp), &cp);
4147 }
4148 } else {
4149 struct hci_cp_remote_oob_data_neg_reply cp;
4150
4151 bacpy(&cp.bdaddr, &ev->bdaddr);
4152 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4153 sizeof(cp), &cp);
4154 }
4155
4156 unlock:
4157 hci_dev_unlock(hdev);
4158 }
4159
4160 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4161 struct sk_buff *skb)
4162 {
4163 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4164 struct hci_conn *hcon, *bredr_hcon;
4165
4166 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4167 ev->status);
4168
4169 hci_dev_lock(hdev);
4170
4171 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4172 if (!hcon) {
4173 hci_dev_unlock(hdev);
4174 return;
4175 }
4176
4177 if (ev->status) {
4178 hci_conn_del(hcon);
4179 hci_dev_unlock(hdev);
4180 return;
4181 }
4182
4183 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4184
4185 hcon->state = BT_CONNECTED;
4186 bacpy(&hcon->dst, &bredr_hcon->dst);
4187
4188 hci_conn_hold(hcon);
4189 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4190 hci_conn_drop(hcon);
4191
4192 hci_debugfs_create_conn(hcon);
4193 hci_conn_add_sysfs(hcon);
4194
4195 amp_physical_cfm(bredr_hcon, hcon);
4196
4197 hci_dev_unlock(hdev);
4198 }
4199
4200 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4201 {
4202 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4203 struct hci_conn *hcon;
4204 struct hci_chan *hchan;
4205 struct amp_mgr *mgr;
4206
4207 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4208 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4209 ev->status);
4210
4211 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4212 if (!hcon)
4213 return;
4214
4215 /* Create AMP hchan */
4216 hchan = hci_chan_create(hcon);
4217 if (!hchan)
4218 return;
4219
4220 hchan->handle = le16_to_cpu(ev->handle);
4221
4222 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4223
4224 mgr = hcon->amp_mgr;
4225 if (mgr && mgr->bredr_chan) {
4226 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4227
4228 l2cap_chan_lock(bredr_chan);
4229
4230 bredr_chan->conn->mtu = hdev->block_mtu;
4231 l2cap_logical_cfm(bredr_chan, hchan, 0);
4232 hci_conn_hold(hcon);
4233
4234 l2cap_chan_unlock(bredr_chan);
4235 }
4236 }
4237
4238 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4239 struct sk_buff *skb)
4240 {
4241 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4242 struct hci_chan *hchan;
4243
4244 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4245 le16_to_cpu(ev->handle), ev->status);
4246
4247 if (ev->status)
4248 return;
4249
4250 hci_dev_lock(hdev);
4251
4252 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4253 if (!hchan)
4254 goto unlock;
4255
4256 amp_destroy_logical_link(hchan, ev->reason);
4257
4258 unlock:
4259 hci_dev_unlock(hdev);
4260 }
4261
4262 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4263 struct sk_buff *skb)
4264 {
4265 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4266 struct hci_conn *hcon;
4267
4268 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4269
4270 if (ev->status)
4271 return;
4272
4273 hci_dev_lock(hdev);
4274
4275 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4276 if (hcon) {
4277 hcon->state = BT_CLOSED;
4278 hci_conn_del(hcon);
4279 }
4280
4281 hci_dev_unlock(hdev);
4282 }
4283
4284 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4285 {
4286 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4287 struct hci_conn_params *params;
4288 struct hci_conn *conn;
4289 struct smp_irk *irk;
4290 u8 addr_type;
4291
4292 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4293
4294 hci_dev_lock(hdev);
4295
4296 /* All controllers implicitly stop advertising in the event of a
4297 * connection, so ensure that the state bit is cleared.
4298 */
4299 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4300
4301 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4302 if (!conn) {
4303 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4304 if (!conn) {
4305 BT_ERR("No memory for new connection");
4306 goto unlock;
4307 }
4308
4309 conn->dst_type = ev->bdaddr_type;
4310
4311 /* If we didn't have a hci_conn object previously
4312 * but we're in master role this must be something
4313 * initiated using a white list. Since white list based
4314 * connections are not "first class citizens" we don't
4315 * have full tracking of them. Therefore, we go ahead
4316 * with a "best effort" approach of determining the
4317 * initiator address based on the HCI_PRIVACY flag.
4318 */
4319 if (conn->out) {
4320 conn->resp_addr_type = ev->bdaddr_type;
4321 bacpy(&conn->resp_addr, &ev->bdaddr);
4322 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4323 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4324 bacpy(&conn->init_addr, &hdev->rpa);
4325 } else {
4326 hci_copy_identity_address(hdev,
4327 &conn->init_addr,
4328 &conn->init_addr_type);
4329 }
4330 }
4331 } else {
4332 cancel_delayed_work(&conn->le_conn_timeout);
4333 }
4334
4335 if (!conn->out) {
4336 /* Set the responder (our side) address type based on
4337 * the advertising address type.
4338 */
4339 conn->resp_addr_type = hdev->adv_addr_type;
4340 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4341 bacpy(&conn->resp_addr, &hdev->random_addr);
4342 else
4343 bacpy(&conn->resp_addr, &hdev->bdaddr);
4344
4345 conn->init_addr_type = ev->bdaddr_type;
4346 bacpy(&conn->init_addr, &ev->bdaddr);
4347
4348 /* For incoming connections, set the default minimum
4349 * and maximum connection interval. They will be used
4350 * to check if the parameters are in range and if not
4351 * trigger the connection update procedure.
4352 */
4353 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4354 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4355 }
4356
4357 /* Lookup the identity address from the stored connection
4358 * address and address type.
4359 *
4360 * When establishing connections to an identity address, the
4361 * connection procedure will store the resolvable random
4362 * address first. Now if it can be converted back into the
4363 * identity address, start using the identity address from
4364 * now on.
4365 */
4366 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4367 if (irk) {
4368 bacpy(&conn->dst, &irk->bdaddr);
4369 conn->dst_type = irk->addr_type;
4370 }
4371
4372 if (ev->status) {
4373 hci_le_conn_failed(conn, ev->status);
4374 goto unlock;
4375 }
4376
4377 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4378 addr_type = BDADDR_LE_PUBLIC;
4379 else
4380 addr_type = BDADDR_LE_RANDOM;
4381
4382 /* Drop the connection if the device is blocked */
4383 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4384 hci_conn_drop(conn);
4385 goto unlock;
4386 }
4387
4388 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4389 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4390
4391 conn->sec_level = BT_SECURITY_LOW;
4392 conn->handle = __le16_to_cpu(ev->handle);
4393 conn->state = BT_CONNECTED;
4394
4395 conn->le_conn_interval = le16_to_cpu(ev->interval);
4396 conn->le_conn_latency = le16_to_cpu(ev->latency);
4397 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4398
4399 hci_debugfs_create_conn(conn);
4400 hci_conn_add_sysfs(conn);
4401
4402 hci_proto_connect_cfm(conn, ev->status);
4403
4404 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4405 conn->dst_type);
4406 if (params) {
4407 list_del_init(&params->action);
4408 if (params->conn) {
4409 hci_conn_drop(params->conn);
4410 hci_conn_put(params->conn);
4411 params->conn = NULL;
4412 }
4413 }
4414
4415 unlock:
4416 hci_update_background_scan(hdev);
4417 hci_dev_unlock(hdev);
4418 }
4419
4420 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4421 struct sk_buff *skb)
4422 {
4423 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4424 struct hci_conn *conn;
4425
4426 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4427
4428 if (ev->status)
4429 return;
4430
4431 hci_dev_lock(hdev);
4432
4433 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4434 if (conn) {
4435 conn->le_conn_interval = le16_to_cpu(ev->interval);
4436 conn->le_conn_latency = le16_to_cpu(ev->latency);
4437 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4438 }
4439
4440 hci_dev_unlock(hdev);
4441 }
4442
4443 /* This function requires the caller holds hdev->lock */
4444 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4445 bdaddr_t *addr,
4446 u8 addr_type, u8 adv_type)
4447 {
4448 struct hci_conn *conn;
4449 struct hci_conn_params *params;
4450
4451 /* If the event is not connectable don't proceed further */
4452 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4453 return NULL;
4454
4455 /* Ignore if the device is blocked */
4456 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4457 return NULL;
4458
4459 /* Most controller will fail if we try to create new connections
4460 * while we have an existing one in slave role.
4461 */
4462 if (hdev->conn_hash.le_num_slave > 0)
4463 return NULL;
4464
4465 /* If we're not connectable only connect devices that we have in
4466 * our pend_le_conns list.
4467 */
4468 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4469 addr, addr_type);
4470 if (!params)
4471 return NULL;
4472
4473 switch (params->auto_connect) {
4474 case HCI_AUTO_CONN_DIRECT:
4475 /* Only devices advertising with ADV_DIRECT_IND are
4476 * triggering a connection attempt. This is allowing
4477 * incoming connections from slave devices.
4478 */
4479 if (adv_type != LE_ADV_DIRECT_IND)
4480 return NULL;
4481 break;
4482 case HCI_AUTO_CONN_ALWAYS:
4483 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4484 * are triggering a connection attempt. This means
4485 * that incoming connectioms from slave device are
4486 * accepted and also outgoing connections to slave
4487 * devices are established when found.
4488 */
4489 break;
4490 default:
4491 return NULL;
4492 }
4493
4494 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4495 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4496 if (!IS_ERR(conn)) {
4497 /* Store the pointer since we don't really have any
4498 * other owner of the object besides the params that
4499 * triggered it. This way we can abort the connection if
4500 * the parameters get removed and keep the reference
4501 * count consistent once the connection is established.
4502 */
4503 params->conn = hci_conn_get(conn);
4504 return conn;
4505 }
4506
4507 switch (PTR_ERR(conn)) {
4508 case -EBUSY:
4509 /* If hci_connect() returns -EBUSY it means there is already
4510 * an LE connection attempt going on. Since controllers don't
4511 * support more than one connection attempt at the time, we
4512 * don't consider this an error case.
4513 */
4514 break;
4515 default:
4516 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4517 return NULL;
4518 }
4519
4520 return NULL;
4521 }
4522
4523 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4524 u8 bdaddr_type, bdaddr_t *direct_addr,
4525 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4526 {
4527 struct discovery_state *d = &hdev->discovery;
4528 struct smp_irk *irk;
4529 struct hci_conn *conn;
4530 bool match;
4531 u32 flags;
4532
4533 /* If the direct address is present, then this report is from
4534 * a LE Direct Advertising Report event. In that case it is
4535 * important to see if the address is matching the local
4536 * controller address.
4537 */
4538 if (direct_addr) {
4539 /* Only resolvable random addresses are valid for these
4540 * kind of reports and others can be ignored.
4541 */
4542 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4543 return;
4544
4545 /* If the controller is not using resolvable random
4546 * addresses, then this report can be ignored.
4547 */
4548 if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
4549 return;
4550
4551 /* If the local IRK of the controller does not match
4552 * with the resolvable random address provided, then
4553 * this report can be ignored.
4554 */
4555 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4556 return;
4557 }
4558
4559 /* Check if we need to convert to identity address */
4560 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4561 if (irk) {
4562 bdaddr = &irk->bdaddr;
4563 bdaddr_type = irk->addr_type;
4564 }
4565
4566 /* Check if we have been requested to connect to this device */
4567 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4568 if (conn && type == LE_ADV_IND) {
4569 /* Store report for later inclusion by
4570 * mgmt_device_connected
4571 */
4572 memcpy(conn->le_adv_data, data, len);
4573 conn->le_adv_data_len = len;
4574 }
4575
4576 /* Passive scanning shouldn't trigger any device found events,
4577 * except for devices marked as CONN_REPORT for which we do send
4578 * device found events.
4579 */
4580 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4581 if (type == LE_ADV_DIRECT_IND)
4582 return;
4583
4584 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4585 bdaddr, bdaddr_type))
4586 return;
4587
4588 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4589 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4590 else
4591 flags = 0;
4592 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4593 rssi, flags, data, len, NULL, 0);
4594 return;
4595 }
4596
4597 /* When receiving non-connectable or scannable undirected
4598 * advertising reports, this means that the remote device is
4599 * not connectable and then clearly indicate this in the
4600 * device found event.
4601 *
4602 * When receiving a scan response, then there is no way to
4603 * know if the remote device is connectable or not. However
4604 * since scan responses are merged with a previously seen
4605 * advertising report, the flags field from that report
4606 * will be used.
4607 *
4608 * In the really unlikely case that a controller get confused
4609 * and just sends a scan response event, then it is marked as
4610 * not connectable as well.
4611 */
4612 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4613 type == LE_ADV_SCAN_RSP)
4614 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4615 else
4616 flags = 0;
4617
4618 /* If there's nothing pending either store the data from this
4619 * event or send an immediate device found event if the data
4620 * should not be stored for later.
4621 */
4622 if (!has_pending_adv_report(hdev)) {
4623 /* If the report will trigger a SCAN_REQ store it for
4624 * later merging.
4625 */
4626 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4627 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4628 rssi, flags, data, len);
4629 return;
4630 }
4631
4632 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4633 rssi, flags, data, len, NULL, 0);
4634 return;
4635 }
4636
4637 /* Check if the pending report is for the same device as the new one */
4638 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4639 bdaddr_type == d->last_adv_addr_type);
4640
4641 /* If the pending data doesn't match this report or this isn't a
4642 * scan response (e.g. we got a duplicate ADV_IND) then force
4643 * sending of the pending data.
4644 */
4645 if (type != LE_ADV_SCAN_RSP || !match) {
4646 /* Send out whatever is in the cache, but skip duplicates */
4647 if (!match)
4648 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4649 d->last_adv_addr_type, NULL,
4650 d->last_adv_rssi, d->last_adv_flags,
4651 d->last_adv_data,
4652 d->last_adv_data_len, NULL, 0);
4653
4654 /* If the new report will trigger a SCAN_REQ store it for
4655 * later merging.
4656 */
4657 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4658 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4659 rssi, flags, data, len);
4660 return;
4661 }
4662
4663 /* The advertising reports cannot be merged, so clear
4664 * the pending report and send out a device found event.
4665 */
4666 clear_pending_adv_report(hdev);
4667 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4668 rssi, flags, data, len, NULL, 0);
4669 return;
4670 }
4671
4672 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4673 * the new event is a SCAN_RSP. We can therefore proceed with
4674 * sending a merged device found event.
4675 */
4676 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4677 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4678 d->last_adv_data, d->last_adv_data_len, data, len);
4679 clear_pending_adv_report(hdev);
4680 }
4681
4682 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4683 {
4684 u8 num_reports = skb->data[0];
4685 void *ptr = &skb->data[1];
4686
4687 hci_dev_lock(hdev);
4688
4689 while (num_reports--) {
4690 struct hci_ev_le_advertising_info *ev = ptr;
4691 s8 rssi;
4692
4693 rssi = ev->data[ev->length];
4694 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4695 ev->bdaddr_type, NULL, 0, rssi,
4696 ev->data, ev->length);
4697
4698 ptr += sizeof(*ev) + ev->length + 1;
4699 }
4700
4701 hci_dev_unlock(hdev);
4702 }
4703
4704 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4705 {
4706 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4707 struct hci_cp_le_ltk_reply cp;
4708 struct hci_cp_le_ltk_neg_reply neg;
4709 struct hci_conn *conn;
4710 struct smp_ltk *ltk;
4711
4712 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4713
4714 hci_dev_lock(hdev);
4715
4716 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4717 if (conn == NULL)
4718 goto not_found;
4719
4720 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4721 if (!ltk)
4722 goto not_found;
4723
4724 if (smp_ltk_is_sc(ltk)) {
4725 /* With SC both EDiv and Rand are set to zero */
4726 if (ev->ediv || ev->rand)
4727 goto not_found;
4728 } else {
4729 /* For non-SC keys check that EDiv and Rand match */
4730 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4731 goto not_found;
4732 }
4733
4734 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4735 cp.handle = cpu_to_le16(conn->handle);
4736
4737 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4738
4739 conn->enc_key_size = ltk->enc_size;
4740
4741 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4742
4743 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4744 * temporary key used to encrypt a connection following
4745 * pairing. It is used during the Encrypted Session Setup to
4746 * distribute the keys. Later, security can be re-established
4747 * using a distributed LTK.
4748 */
4749 if (ltk->type == SMP_STK) {
4750 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4751 list_del_rcu(&ltk->list);
4752 kfree_rcu(ltk, rcu);
4753 } else {
4754 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4755 }
4756
4757 hci_dev_unlock(hdev);
4758
4759 return;
4760
4761 not_found:
4762 neg.handle = ev->handle;
4763 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4764 hci_dev_unlock(hdev);
4765 }
4766
4767 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4768 u8 reason)
4769 {
4770 struct hci_cp_le_conn_param_req_neg_reply cp;
4771
4772 cp.handle = cpu_to_le16(handle);
4773 cp.reason = reason;
4774
4775 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4776 &cp);
4777 }
4778
4779 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4780 struct sk_buff *skb)
4781 {
4782 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4783 struct hci_cp_le_conn_param_req_reply cp;
4784 struct hci_conn *hcon;
4785 u16 handle, min, max, latency, timeout;
4786
4787 handle = le16_to_cpu(ev->handle);
4788 min = le16_to_cpu(ev->interval_min);
4789 max = le16_to_cpu(ev->interval_max);
4790 latency = le16_to_cpu(ev->latency);
4791 timeout = le16_to_cpu(ev->timeout);
4792
4793 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4794 if (!hcon || hcon->state != BT_CONNECTED)
4795 return send_conn_param_neg_reply(hdev, handle,
4796 HCI_ERROR_UNKNOWN_CONN_ID);
4797
4798 if (hci_check_conn_params(min, max, latency, timeout))
4799 return send_conn_param_neg_reply(hdev, handle,
4800 HCI_ERROR_INVALID_LL_PARAMS);
4801
4802 if (hcon->role == HCI_ROLE_MASTER) {
4803 struct hci_conn_params *params;
4804 u8 store_hint;
4805
4806 hci_dev_lock(hdev);
4807
4808 params = hci_conn_params_lookup(hdev, &hcon->dst,
4809 hcon->dst_type);
4810 if (params) {
4811 params->conn_min_interval = min;
4812 params->conn_max_interval = max;
4813 params->conn_latency = latency;
4814 params->supervision_timeout = timeout;
4815 store_hint = 0x01;
4816 } else{
4817 store_hint = 0x00;
4818 }
4819
4820 hci_dev_unlock(hdev);
4821
4822 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4823 store_hint, min, max, latency, timeout);
4824 }
4825
4826 cp.handle = ev->handle;
4827 cp.interval_min = ev->interval_min;
4828 cp.interval_max = ev->interval_max;
4829 cp.latency = ev->latency;
4830 cp.timeout = ev->timeout;
4831 cp.min_ce_len = 0;
4832 cp.max_ce_len = 0;
4833
4834 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4835 }
4836
4837 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4838 struct sk_buff *skb)
4839 {
4840 u8 num_reports = skb->data[0];
4841 void *ptr = &skb->data[1];
4842
4843 hci_dev_lock(hdev);
4844
4845 while (num_reports--) {
4846 struct hci_ev_le_direct_adv_info *ev = ptr;
4847
4848 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4849 ev->bdaddr_type, &ev->direct_addr,
4850 ev->direct_addr_type, ev->rssi, NULL, 0);
4851
4852 ptr += sizeof(*ev);
4853 }
4854
4855 hci_dev_unlock(hdev);
4856 }
4857
4858 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4859 {
4860 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4861
4862 skb_pull(skb, sizeof(*le_ev));
4863
4864 switch (le_ev->subevent) {
4865 case HCI_EV_LE_CONN_COMPLETE:
4866 hci_le_conn_complete_evt(hdev, skb);
4867 break;
4868
4869 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4870 hci_le_conn_update_complete_evt(hdev, skb);
4871 break;
4872
4873 case HCI_EV_LE_ADVERTISING_REPORT:
4874 hci_le_adv_report_evt(hdev, skb);
4875 break;
4876
4877 case HCI_EV_LE_LTK_REQ:
4878 hci_le_ltk_request_evt(hdev, skb);
4879 break;
4880
4881 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4882 hci_le_remote_conn_param_req_evt(hdev, skb);
4883 break;
4884
4885 case HCI_EV_LE_DIRECT_ADV_REPORT:
4886 hci_le_direct_adv_report_evt(hdev, skb);
4887 break;
4888
4889 default:
4890 break;
4891 }
4892 }
4893
4894 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4895 {
4896 struct hci_ev_channel_selected *ev = (void *) skb->data;
4897 struct hci_conn *hcon;
4898
4899 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4900
4901 skb_pull(skb, sizeof(*ev));
4902
4903 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4904 if (!hcon)
4905 return;
4906
4907 amp_read_loc_assoc_final_data(hdev, hcon);
4908 }
4909
4910 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4911 {
4912 struct hci_event_hdr *hdr = (void *) skb->data;
4913 __u8 event = hdr->evt;
4914
4915 hci_dev_lock(hdev);
4916
4917 /* Received events are (currently) only needed when a request is
4918 * ongoing so avoid unnecessary memory allocation.
4919 */
4920 if (hci_req_pending(hdev)) {
4921 kfree_skb(hdev->recv_evt);
4922 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4923 }
4924
4925 hci_dev_unlock(hdev);
4926
4927 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4928
4929 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4930 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4931 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4932
4933 hci_req_cmd_complete(hdev, opcode, 0);
4934 }
4935
4936 switch (event) {
4937 case HCI_EV_INQUIRY_COMPLETE:
4938 hci_inquiry_complete_evt(hdev, skb);
4939 break;
4940
4941 case HCI_EV_INQUIRY_RESULT:
4942 hci_inquiry_result_evt(hdev, skb);
4943 break;
4944
4945 case HCI_EV_CONN_COMPLETE:
4946 hci_conn_complete_evt(hdev, skb);
4947 break;
4948
4949 case HCI_EV_CONN_REQUEST:
4950 hci_conn_request_evt(hdev, skb);
4951 break;
4952
4953 case HCI_EV_DISCONN_COMPLETE:
4954 hci_disconn_complete_evt(hdev, skb);
4955 break;
4956
4957 case HCI_EV_AUTH_COMPLETE:
4958 hci_auth_complete_evt(hdev, skb);
4959 break;
4960
4961 case HCI_EV_REMOTE_NAME:
4962 hci_remote_name_evt(hdev, skb);
4963 break;
4964
4965 case HCI_EV_ENCRYPT_CHANGE:
4966 hci_encrypt_change_evt(hdev, skb);
4967 break;
4968
4969 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4970 hci_change_link_key_complete_evt(hdev, skb);
4971 break;
4972
4973 case HCI_EV_REMOTE_FEATURES:
4974 hci_remote_features_evt(hdev, skb);
4975 break;
4976
4977 case HCI_EV_CMD_COMPLETE:
4978 hci_cmd_complete_evt(hdev, skb);
4979 break;
4980
4981 case HCI_EV_CMD_STATUS:
4982 hci_cmd_status_evt(hdev, skb);
4983 break;
4984
4985 case HCI_EV_HARDWARE_ERROR:
4986 hci_hardware_error_evt(hdev, skb);
4987 break;
4988
4989 case HCI_EV_ROLE_CHANGE:
4990 hci_role_change_evt(hdev, skb);
4991 break;
4992
4993 case HCI_EV_NUM_COMP_PKTS:
4994 hci_num_comp_pkts_evt(hdev, skb);
4995 break;
4996
4997 case HCI_EV_MODE_CHANGE:
4998 hci_mode_change_evt(hdev, skb);
4999 break;
5000
5001 case HCI_EV_PIN_CODE_REQ:
5002 hci_pin_code_request_evt(hdev, skb);
5003 break;
5004
5005 case HCI_EV_LINK_KEY_REQ:
5006 hci_link_key_request_evt(hdev, skb);
5007 break;
5008
5009 case HCI_EV_LINK_KEY_NOTIFY:
5010 hci_link_key_notify_evt(hdev, skb);
5011 break;
5012
5013 case HCI_EV_CLOCK_OFFSET:
5014 hci_clock_offset_evt(hdev, skb);
5015 break;
5016
5017 case HCI_EV_PKT_TYPE_CHANGE:
5018 hci_pkt_type_change_evt(hdev, skb);
5019 break;
5020
5021 case HCI_EV_PSCAN_REP_MODE:
5022 hci_pscan_rep_mode_evt(hdev, skb);
5023 break;
5024
5025 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
5026 hci_inquiry_result_with_rssi_evt(hdev, skb);
5027 break;
5028
5029 case HCI_EV_REMOTE_EXT_FEATURES:
5030 hci_remote_ext_features_evt(hdev, skb);
5031 break;
5032
5033 case HCI_EV_SYNC_CONN_COMPLETE:
5034 hci_sync_conn_complete_evt(hdev, skb);
5035 break;
5036
5037 case HCI_EV_EXTENDED_INQUIRY_RESULT:
5038 hci_extended_inquiry_result_evt(hdev, skb);
5039 break;
5040
5041 case HCI_EV_KEY_REFRESH_COMPLETE:
5042 hci_key_refresh_complete_evt(hdev, skb);
5043 break;
5044
5045 case HCI_EV_IO_CAPA_REQUEST:
5046 hci_io_capa_request_evt(hdev, skb);
5047 break;
5048
5049 case HCI_EV_IO_CAPA_REPLY:
5050 hci_io_capa_reply_evt(hdev, skb);
5051 break;
5052
5053 case HCI_EV_USER_CONFIRM_REQUEST:
5054 hci_user_confirm_request_evt(hdev, skb);
5055 break;
5056
5057 case HCI_EV_USER_PASSKEY_REQUEST:
5058 hci_user_passkey_request_evt(hdev, skb);
5059 break;
5060
5061 case HCI_EV_USER_PASSKEY_NOTIFY:
5062 hci_user_passkey_notify_evt(hdev, skb);
5063 break;
5064
5065 case HCI_EV_KEYPRESS_NOTIFY:
5066 hci_keypress_notify_evt(hdev, skb);
5067 break;
5068
5069 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5070 hci_simple_pair_complete_evt(hdev, skb);
5071 break;
5072
5073 case HCI_EV_REMOTE_HOST_FEATURES:
5074 hci_remote_host_features_evt(hdev, skb);
5075 break;
5076
5077 case HCI_EV_LE_META:
5078 hci_le_meta_evt(hdev, skb);
5079 break;
5080
5081 case HCI_EV_CHANNEL_SELECTED:
5082 hci_chan_selected_evt(hdev, skb);
5083 break;
5084
5085 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5086 hci_remote_oob_data_request_evt(hdev, skb);
5087 break;
5088
5089 case HCI_EV_PHY_LINK_COMPLETE:
5090 hci_phy_link_complete_evt(hdev, skb);
5091 break;
5092
5093 case HCI_EV_LOGICAL_LINK_COMPLETE:
5094 hci_loglink_complete_evt(hdev, skb);
5095 break;
5096
5097 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5098 hci_disconn_loglink_complete_evt(hdev, skb);
5099 break;
5100
5101 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5102 hci_disconn_phylink_complete_evt(hdev, skb);
5103 break;
5104
5105 case HCI_EV_NUM_COMP_BLOCKS:
5106 hci_num_comp_blocks_evt(hdev, skb);
5107 break;
5108
5109 default:
5110 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5111 break;
5112 }
5113
5114 kfree_skb(skb);
5115 hdev->stat.evt_rx++;
5116 }
This page took 0.195632 seconds and 6 git commands to generate.