Merge tag 'for-linus-3' of git://git.code.sf.net/p/openipmi/linux-ipmi
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36
37 /* Handle HCI Event packets */
38
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn)
105 conn->role = rp->role;
106
107 hci_dev_unlock(hdev);
108 }
109
110 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111 {
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
114
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116
117 if (rp->status)
118 return;
119
120 hci_dev_lock(hdev);
121
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 if (conn)
124 conn->link_policy = __le16_to_cpu(rp->policy);
125
126 hci_dev_unlock(hdev);
127 }
128
129 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130 {
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
133 void *sent;
134
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136
137 if (rp->status)
138 return;
139
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 if (!sent)
142 return;
143
144 hci_dev_lock(hdev);
145
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 if (conn)
148 conn->link_policy = get_unaligned_le16(sent + 2);
149
150 hci_dev_unlock(hdev);
151 }
152
153 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 struct sk_buff *skb)
155 {
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159
160 if (rp->status)
161 return;
162
163 hdev->link_policy = __le16_to_cpu(rp->policy);
164 }
165
166 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 struct sk_buff *skb)
168 {
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
171
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173
174 if (status)
175 return;
176
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent)
179 return;
180
181 hdev->link_policy = get_unaligned_le16(sent);
182 }
183
184 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185 {
186 __u8 status = *((__u8 *) skb->data);
187
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
189
190 clear_bit(HCI_RESET, &hdev->flags);
191
192 if (status)
193 return;
194
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
197
198 hdev->discovery.state = DISCOVERY_STOPPED;
199 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
200 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
201
202 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
203 hdev->adv_data_len = 0;
204
205 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
206 hdev->scan_rsp_data_len = 0;
207
208 hdev->le_scan_type = LE_SCAN_PASSIVE;
209
210 hdev->ssp_debug_mode = 0;
211
212 hci_bdaddr_list_clear(&hdev->le_white_list);
213 }
214
215 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
216 {
217 __u8 status = *((__u8 *) skb->data);
218 void *sent;
219
220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
221
222 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
223 if (!sent)
224 return;
225
226 hci_dev_lock(hdev);
227
228 if (test_bit(HCI_MGMT, &hdev->dev_flags))
229 mgmt_set_local_name_complete(hdev, sent, status);
230 else if (!status)
231 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
232
233 hci_dev_unlock(hdev);
234 }
235
236 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
237 {
238 struct hci_rp_read_local_name *rp = (void *) skb->data;
239
240 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
241
242 if (rp->status)
243 return;
244
245 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
246 test_bit(HCI_CONFIG, &hdev->dev_flags))
247 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
248 }
249
250 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
251 {
252 __u8 status = *((__u8 *) skb->data);
253 void *sent;
254
255 BT_DBG("%s status 0x%2.2x", hdev->name, status);
256
257 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
258 if (!sent)
259 return;
260
261 hci_dev_lock(hdev);
262
263 if (!status) {
264 __u8 param = *((__u8 *) sent);
265
266 if (param == AUTH_ENABLED)
267 set_bit(HCI_AUTH, &hdev->flags);
268 else
269 clear_bit(HCI_AUTH, &hdev->flags);
270 }
271
272 if (test_bit(HCI_MGMT, &hdev->dev_flags))
273 mgmt_auth_enable_complete(hdev, status);
274
275 hci_dev_unlock(hdev);
276 }
277
278 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
279 {
280 __u8 status = *((__u8 *) skb->data);
281 __u8 param;
282 void *sent;
283
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
285
286 if (status)
287 return;
288
289 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
290 if (!sent)
291 return;
292
293 param = *((__u8 *) sent);
294
295 if (param)
296 set_bit(HCI_ENCRYPT, &hdev->flags);
297 else
298 clear_bit(HCI_ENCRYPT, &hdev->flags);
299 }
300
301 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
302 {
303 __u8 status = *((__u8 *) skb->data);
304 __u8 param;
305 void *sent;
306
307 BT_DBG("%s status 0x%2.2x", hdev->name, status);
308
309 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
310 if (!sent)
311 return;
312
313 param = *((__u8 *) sent);
314
315 hci_dev_lock(hdev);
316
317 if (status) {
318 hdev->discov_timeout = 0;
319 goto done;
320 }
321
322 if (param & SCAN_INQUIRY)
323 set_bit(HCI_ISCAN, &hdev->flags);
324 else
325 clear_bit(HCI_ISCAN, &hdev->flags);
326
327 if (param & SCAN_PAGE)
328 set_bit(HCI_PSCAN, &hdev->flags);
329 else
330 clear_bit(HCI_PSCAN, &hdev->flags);
331
332 done:
333 hci_dev_unlock(hdev);
334 }
335
336 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
337 {
338 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
339
340 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
341
342 if (rp->status)
343 return;
344
345 memcpy(hdev->dev_class, rp->dev_class, 3);
346
347 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
348 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
349 }
350
351 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
352 {
353 __u8 status = *((__u8 *) skb->data);
354 void *sent;
355
356 BT_DBG("%s status 0x%2.2x", hdev->name, status);
357
358 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
359 if (!sent)
360 return;
361
362 hci_dev_lock(hdev);
363
364 if (status == 0)
365 memcpy(hdev->dev_class, sent, 3);
366
367 if (test_bit(HCI_MGMT, &hdev->dev_flags))
368 mgmt_set_class_of_dev_complete(hdev, sent, status);
369
370 hci_dev_unlock(hdev);
371 }
372
373 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
374 {
375 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
376 __u16 setting;
377
378 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
379
380 if (rp->status)
381 return;
382
383 setting = __le16_to_cpu(rp->voice_setting);
384
385 if (hdev->voice_setting == setting)
386 return;
387
388 hdev->voice_setting = setting;
389
390 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
391
392 if (hdev->notify)
393 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
394 }
395
396 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
397 struct sk_buff *skb)
398 {
399 __u8 status = *((__u8 *) skb->data);
400 __u16 setting;
401 void *sent;
402
403 BT_DBG("%s status 0x%2.2x", hdev->name, status);
404
405 if (status)
406 return;
407
408 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
409 if (!sent)
410 return;
411
412 setting = get_unaligned_le16(sent);
413
414 if (hdev->voice_setting == setting)
415 return;
416
417 hdev->voice_setting = setting;
418
419 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
420
421 if (hdev->notify)
422 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
423 }
424
425 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
426 struct sk_buff *skb)
427 {
428 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
429
430 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
431
432 if (rp->status)
433 return;
434
435 hdev->num_iac = rp->num_iac;
436
437 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
438 }
439
440 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
441 {
442 __u8 status = *((__u8 *) skb->data);
443 struct hci_cp_write_ssp_mode *sent;
444
445 BT_DBG("%s status 0x%2.2x", hdev->name, status);
446
447 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
448 if (!sent)
449 return;
450
451 hci_dev_lock(hdev);
452
453 if (!status) {
454 if (sent->mode)
455 hdev->features[1][0] |= LMP_HOST_SSP;
456 else
457 hdev->features[1][0] &= ~LMP_HOST_SSP;
458 }
459
460 if (test_bit(HCI_MGMT, &hdev->dev_flags))
461 mgmt_ssp_enable_complete(hdev, sent->mode, status);
462 else if (!status) {
463 if (sent->mode)
464 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
465 else
466 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
467 }
468
469 hci_dev_unlock(hdev);
470 }
471
472 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
473 {
474 u8 status = *((u8 *) skb->data);
475 struct hci_cp_write_sc_support *sent;
476
477 BT_DBG("%s status 0x%2.2x", hdev->name, status);
478
479 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
480 if (!sent)
481 return;
482
483 hci_dev_lock(hdev);
484
485 if (!status) {
486 if (sent->support)
487 hdev->features[1][0] |= LMP_HOST_SC;
488 else
489 hdev->features[1][0] &= ~LMP_HOST_SC;
490 }
491
492 if (test_bit(HCI_MGMT, &hdev->dev_flags))
493 mgmt_sc_enable_complete(hdev, sent->support, status);
494 else if (!status) {
495 if (sent->support)
496 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
497 else
498 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
499 }
500
501 hci_dev_unlock(hdev);
502 }
503
504 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
505 {
506 struct hci_rp_read_local_version *rp = (void *) skb->data;
507
508 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
509
510 if (rp->status)
511 return;
512
513 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
514 test_bit(HCI_CONFIG, &hdev->dev_flags)) {
515 hdev->hci_ver = rp->hci_ver;
516 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
517 hdev->lmp_ver = rp->lmp_ver;
518 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
519 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
520 }
521 }
522
523 static void hci_cc_read_local_commands(struct hci_dev *hdev,
524 struct sk_buff *skb)
525 {
526 struct hci_rp_read_local_commands *rp = (void *) skb->data;
527
528 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
529
530 if (rp->status)
531 return;
532
533 if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
534 test_bit(HCI_CONFIG, &hdev->dev_flags))
535 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
536 }
537
538 static void hci_cc_read_local_features(struct hci_dev *hdev,
539 struct sk_buff *skb)
540 {
541 struct hci_rp_read_local_features *rp = (void *) skb->data;
542
543 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
544
545 if (rp->status)
546 return;
547
548 memcpy(hdev->features, rp->features, 8);
549
550 /* Adjust default settings according to features
551 * supported by device. */
552
553 if (hdev->features[0][0] & LMP_3SLOT)
554 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
555
556 if (hdev->features[0][0] & LMP_5SLOT)
557 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
558
559 if (hdev->features[0][1] & LMP_HV2) {
560 hdev->pkt_type |= (HCI_HV2);
561 hdev->esco_type |= (ESCO_HV2);
562 }
563
564 if (hdev->features[0][1] & LMP_HV3) {
565 hdev->pkt_type |= (HCI_HV3);
566 hdev->esco_type |= (ESCO_HV3);
567 }
568
569 if (lmp_esco_capable(hdev))
570 hdev->esco_type |= (ESCO_EV3);
571
572 if (hdev->features[0][4] & LMP_EV4)
573 hdev->esco_type |= (ESCO_EV4);
574
575 if (hdev->features[0][4] & LMP_EV5)
576 hdev->esco_type |= (ESCO_EV5);
577
578 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
579 hdev->esco_type |= (ESCO_2EV3);
580
581 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
582 hdev->esco_type |= (ESCO_3EV3);
583
584 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
585 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
586 }
587
588 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
589 struct sk_buff *skb)
590 {
591 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
592
593 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
594
595 if (rp->status)
596 return;
597
598 if (hdev->max_page < rp->max_page)
599 hdev->max_page = rp->max_page;
600
601 if (rp->page < HCI_MAX_PAGES)
602 memcpy(hdev->features[rp->page], rp->features, 8);
603 }
604
605 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
606 struct sk_buff *skb)
607 {
608 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
609
610 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
611
612 if (rp->status)
613 return;
614
615 hdev->flow_ctl_mode = rp->mode;
616 }
617
618 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
619 {
620 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
621
622 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
623
624 if (rp->status)
625 return;
626
627 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
628 hdev->sco_mtu = rp->sco_mtu;
629 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
630 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
631
632 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
633 hdev->sco_mtu = 64;
634 hdev->sco_pkts = 8;
635 }
636
637 hdev->acl_cnt = hdev->acl_pkts;
638 hdev->sco_cnt = hdev->sco_pkts;
639
640 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
641 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
642 }
643
644 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
645 {
646 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
647
648 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
649
650 if (rp->status)
651 return;
652
653 if (test_bit(HCI_INIT, &hdev->flags))
654 bacpy(&hdev->bdaddr, &rp->bdaddr);
655
656 if (test_bit(HCI_SETUP, &hdev->dev_flags))
657 bacpy(&hdev->setup_addr, &rp->bdaddr);
658 }
659
660 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
661 struct sk_buff *skb)
662 {
663 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
664
665 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
666
667 if (rp->status)
668 return;
669
670 if (test_bit(HCI_INIT, &hdev->flags)) {
671 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
672 hdev->page_scan_window = __le16_to_cpu(rp->window);
673 }
674 }
675
676 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
677 struct sk_buff *skb)
678 {
679 u8 status = *((u8 *) skb->data);
680 struct hci_cp_write_page_scan_activity *sent;
681
682 BT_DBG("%s status 0x%2.2x", hdev->name, status);
683
684 if (status)
685 return;
686
687 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
688 if (!sent)
689 return;
690
691 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
692 hdev->page_scan_window = __le16_to_cpu(sent->window);
693 }
694
695 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
696 struct sk_buff *skb)
697 {
698 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
699
700 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
701
702 if (rp->status)
703 return;
704
705 if (test_bit(HCI_INIT, &hdev->flags))
706 hdev->page_scan_type = rp->type;
707 }
708
709 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
710 struct sk_buff *skb)
711 {
712 u8 status = *((u8 *) skb->data);
713 u8 *type;
714
715 BT_DBG("%s status 0x%2.2x", hdev->name, status);
716
717 if (status)
718 return;
719
720 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
721 if (type)
722 hdev->page_scan_type = *type;
723 }
724
725 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
726 struct sk_buff *skb)
727 {
728 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
729
730 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
731
732 if (rp->status)
733 return;
734
735 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
736 hdev->block_len = __le16_to_cpu(rp->block_len);
737 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
738
739 hdev->block_cnt = hdev->num_blocks;
740
741 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
742 hdev->block_cnt, hdev->block_len);
743 }
744
745 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
746 {
747 struct hci_rp_read_clock *rp = (void *) skb->data;
748 struct hci_cp_read_clock *cp;
749 struct hci_conn *conn;
750
751 BT_DBG("%s", hdev->name);
752
753 if (skb->len < sizeof(*rp))
754 return;
755
756 if (rp->status)
757 return;
758
759 hci_dev_lock(hdev);
760
761 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
762 if (!cp)
763 goto unlock;
764
765 if (cp->which == 0x00) {
766 hdev->clock = le32_to_cpu(rp->clock);
767 goto unlock;
768 }
769
770 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
771 if (conn) {
772 conn->clock = le32_to_cpu(rp->clock);
773 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
774 }
775
776 unlock:
777 hci_dev_unlock(hdev);
778 }
779
780 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
781 struct sk_buff *skb)
782 {
783 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
784
785 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
786
787 if (rp->status)
788 goto a2mp_rsp;
789
790 hdev->amp_status = rp->amp_status;
791 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
792 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
793 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
794 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
795 hdev->amp_type = rp->amp_type;
796 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
797 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
798 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
799 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
800
801 a2mp_rsp:
802 a2mp_send_getinfo_rsp(hdev);
803 }
804
805 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
806 struct sk_buff *skb)
807 {
808 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
809 struct amp_assoc *assoc = &hdev->loc_assoc;
810 size_t rem_len, frag_len;
811
812 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
813
814 if (rp->status)
815 goto a2mp_rsp;
816
817 frag_len = skb->len - sizeof(*rp);
818 rem_len = __le16_to_cpu(rp->rem_len);
819
820 if (rem_len > frag_len) {
821 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
822
823 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
824 assoc->offset += frag_len;
825
826 /* Read other fragments */
827 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
828
829 return;
830 }
831
832 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
833 assoc->len = assoc->offset + rem_len;
834 assoc->offset = 0;
835
836 a2mp_rsp:
837 /* Send A2MP Rsp when all fragments are received */
838 a2mp_send_getampassoc_rsp(hdev, rp->status);
839 a2mp_send_create_phy_link_req(hdev, rp->status);
840 }
841
842 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
843 struct sk_buff *skb)
844 {
845 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
846
847 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
848
849 if (rp->status)
850 return;
851
852 hdev->inq_tx_power = rp->tx_power;
853 }
854
855 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
856 {
857 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
858 struct hci_cp_pin_code_reply *cp;
859 struct hci_conn *conn;
860
861 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
862
863 hci_dev_lock(hdev);
864
865 if (test_bit(HCI_MGMT, &hdev->dev_flags))
866 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
867
868 if (rp->status)
869 goto unlock;
870
871 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
872 if (!cp)
873 goto unlock;
874
875 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
876 if (conn)
877 conn->pin_length = cp->pin_len;
878
879 unlock:
880 hci_dev_unlock(hdev);
881 }
882
883 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
884 {
885 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
886
887 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
888
889 hci_dev_lock(hdev);
890
891 if (test_bit(HCI_MGMT, &hdev->dev_flags))
892 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
893 rp->status);
894
895 hci_dev_unlock(hdev);
896 }
897
898 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
899 struct sk_buff *skb)
900 {
901 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
902
903 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
904
905 if (rp->status)
906 return;
907
908 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
909 hdev->le_pkts = rp->le_max_pkt;
910
911 hdev->le_cnt = hdev->le_pkts;
912
913 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
914 }
915
916 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
917 struct sk_buff *skb)
918 {
919 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
920
921 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
922
923 if (rp->status)
924 return;
925
926 memcpy(hdev->le_features, rp->features, 8);
927 }
928
929 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
930 struct sk_buff *skb)
931 {
932 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
933
934 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
935
936 if (rp->status)
937 return;
938
939 hdev->adv_tx_power = rp->tx_power;
940 }
941
942 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
943 {
944 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
945
946 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
947
948 hci_dev_lock(hdev);
949
950 if (test_bit(HCI_MGMT, &hdev->dev_flags))
951 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
952 rp->status);
953
954 hci_dev_unlock(hdev);
955 }
956
957 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
958 struct sk_buff *skb)
959 {
960 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
961
962 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
963
964 hci_dev_lock(hdev);
965
966 if (test_bit(HCI_MGMT, &hdev->dev_flags))
967 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
968 ACL_LINK, 0, rp->status);
969
970 hci_dev_unlock(hdev);
971 }
972
973 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
974 {
975 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
976
977 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
978
979 hci_dev_lock(hdev);
980
981 if (test_bit(HCI_MGMT, &hdev->dev_flags))
982 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
983 0, rp->status);
984
985 hci_dev_unlock(hdev);
986 }
987
988 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
989 struct sk_buff *skb)
990 {
991 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
992
993 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
994
995 hci_dev_lock(hdev);
996
997 if (test_bit(HCI_MGMT, &hdev->dev_flags))
998 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
999 ACL_LINK, 0, rp->status);
1000
1001 hci_dev_unlock(hdev);
1002 }
1003
1004 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1005 struct sk_buff *skb)
1006 {
1007 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1008
1009 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1010
1011 hci_dev_lock(hdev);
1012 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->rand, NULL, NULL,
1013 rp->status);
1014 hci_dev_unlock(hdev);
1015 }
1016
1017 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1018 struct sk_buff *skb)
1019 {
1020 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1021
1022 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1023
1024 hci_dev_lock(hdev);
1025 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->rand192,
1026 rp->hash256, rp->rand256,
1027 rp->status);
1028 hci_dev_unlock(hdev);
1029 }
1030
1031
1032 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1033 {
1034 __u8 status = *((__u8 *) skb->data);
1035 bdaddr_t *sent;
1036
1037 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1038
1039 if (status)
1040 return;
1041
1042 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1043 if (!sent)
1044 return;
1045
1046 hci_dev_lock(hdev);
1047
1048 bacpy(&hdev->random_addr, sent);
1049
1050 hci_dev_unlock(hdev);
1051 }
1052
1053 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1054 {
1055 __u8 *sent, status = *((__u8 *) skb->data);
1056
1057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1058
1059 if (status)
1060 return;
1061
1062 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1063 if (!sent)
1064 return;
1065
1066 hci_dev_lock(hdev);
1067
1068 /* If we're doing connection initiation as peripheral. Set a
1069 * timeout in case something goes wrong.
1070 */
1071 if (*sent) {
1072 struct hci_conn *conn;
1073
1074 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1075
1076 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1077 if (conn)
1078 queue_delayed_work(hdev->workqueue,
1079 &conn->le_conn_timeout,
1080 conn->conn_timeout);
1081 } else {
1082 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1083 }
1084
1085 hci_dev_unlock(hdev);
1086 }
1087
1088 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1089 {
1090 struct hci_cp_le_set_scan_param *cp;
1091 __u8 status = *((__u8 *) skb->data);
1092
1093 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1094
1095 if (status)
1096 return;
1097
1098 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1099 if (!cp)
1100 return;
1101
1102 hci_dev_lock(hdev);
1103
1104 hdev->le_scan_type = cp->type;
1105
1106 hci_dev_unlock(hdev);
1107 }
1108
1109 static bool has_pending_adv_report(struct hci_dev *hdev)
1110 {
1111 struct discovery_state *d = &hdev->discovery;
1112
1113 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1114 }
1115
1116 static void clear_pending_adv_report(struct hci_dev *hdev)
1117 {
1118 struct discovery_state *d = &hdev->discovery;
1119
1120 bacpy(&d->last_adv_addr, BDADDR_ANY);
1121 d->last_adv_data_len = 0;
1122 }
1123
1124 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1125 u8 bdaddr_type, s8 rssi, u32 flags,
1126 u8 *data, u8 len)
1127 {
1128 struct discovery_state *d = &hdev->discovery;
1129
1130 bacpy(&d->last_adv_addr, bdaddr);
1131 d->last_adv_addr_type = bdaddr_type;
1132 d->last_adv_rssi = rssi;
1133 d->last_adv_flags = flags;
1134 memcpy(d->last_adv_data, data, len);
1135 d->last_adv_data_len = len;
1136 }
1137
1138 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1139 struct sk_buff *skb)
1140 {
1141 struct hci_cp_le_set_scan_enable *cp;
1142 __u8 status = *((__u8 *) skb->data);
1143
1144 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1145
1146 if (status)
1147 return;
1148
1149 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1150 if (!cp)
1151 return;
1152
1153 hci_dev_lock(hdev);
1154
1155 switch (cp->enable) {
1156 case LE_SCAN_ENABLE:
1157 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1158 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1159 clear_pending_adv_report(hdev);
1160 break;
1161
1162 case LE_SCAN_DISABLE:
1163 /* We do this here instead of when setting DISCOVERY_STOPPED
1164 * since the latter would potentially require waiting for
1165 * inquiry to stop too.
1166 */
1167 if (has_pending_adv_report(hdev)) {
1168 struct discovery_state *d = &hdev->discovery;
1169
1170 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1171 d->last_adv_addr_type, NULL,
1172 d->last_adv_rssi, d->last_adv_flags,
1173 d->last_adv_data,
1174 d->last_adv_data_len, NULL, 0);
1175 }
1176
1177 /* Cancel this timer so that we don't try to disable scanning
1178 * when it's already disabled.
1179 */
1180 cancel_delayed_work(&hdev->le_scan_disable);
1181
1182 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1183
1184 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1185 * interrupted scanning due to a connect request. Mark
1186 * therefore discovery as stopped. If this was not
1187 * because of a connect request advertising might have
1188 * been disabled because of active scanning, so
1189 * re-enable it again if necessary.
1190 */
1191 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1192 &hdev->dev_flags))
1193 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1194 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1195 hdev->discovery.state == DISCOVERY_FINDING)
1196 mgmt_reenable_advertising(hdev);
1197
1198 break;
1199
1200 default:
1201 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1202 break;
1203 }
1204
1205 hci_dev_unlock(hdev);
1206 }
1207
1208 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1209 struct sk_buff *skb)
1210 {
1211 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1212
1213 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1214
1215 if (rp->status)
1216 return;
1217
1218 hdev->le_white_list_size = rp->size;
1219 }
1220
1221 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1222 struct sk_buff *skb)
1223 {
1224 __u8 status = *((__u8 *) skb->data);
1225
1226 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1227
1228 if (status)
1229 return;
1230
1231 hci_bdaddr_list_clear(&hdev->le_white_list);
1232 }
1233
1234 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1235 struct sk_buff *skb)
1236 {
1237 struct hci_cp_le_add_to_white_list *sent;
1238 __u8 status = *((__u8 *) skb->data);
1239
1240 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1241
1242 if (status)
1243 return;
1244
1245 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1246 if (!sent)
1247 return;
1248
1249 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1250 sent->bdaddr_type);
1251 }
1252
1253 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1254 struct sk_buff *skb)
1255 {
1256 struct hci_cp_le_del_from_white_list *sent;
1257 __u8 status = *((__u8 *) skb->data);
1258
1259 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1260
1261 if (status)
1262 return;
1263
1264 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1265 if (!sent)
1266 return;
1267
1268 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1269 sent->bdaddr_type);
1270 }
1271
1272 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1273 struct sk_buff *skb)
1274 {
1275 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1276
1277 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1278
1279 if (rp->status)
1280 return;
1281
1282 memcpy(hdev->le_states, rp->le_states, 8);
1283 }
1284
1285 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1286 struct sk_buff *skb)
1287 {
1288 struct hci_cp_write_le_host_supported *sent;
1289 __u8 status = *((__u8 *) skb->data);
1290
1291 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1292
1293 if (status)
1294 return;
1295
1296 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1297 if (!sent)
1298 return;
1299
1300 hci_dev_lock(hdev);
1301
1302 if (sent->le) {
1303 hdev->features[1][0] |= LMP_HOST_LE;
1304 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1305 } else {
1306 hdev->features[1][0] &= ~LMP_HOST_LE;
1307 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1308 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1309 }
1310
1311 if (sent->simul)
1312 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1313 else
1314 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1315
1316 hci_dev_unlock(hdev);
1317 }
1318
1319 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1320 {
1321 struct hci_cp_le_set_adv_param *cp;
1322 u8 status = *((u8 *) skb->data);
1323
1324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1325
1326 if (status)
1327 return;
1328
1329 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1330 if (!cp)
1331 return;
1332
1333 hci_dev_lock(hdev);
1334 hdev->adv_addr_type = cp->own_address_type;
1335 hci_dev_unlock(hdev);
1336 }
1337
1338 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1339 struct sk_buff *skb)
1340 {
1341 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1342
1343 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1344 hdev->name, rp->status, rp->phy_handle);
1345
1346 if (rp->status)
1347 return;
1348
1349 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1350 }
1351
1352 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1353 {
1354 struct hci_rp_read_rssi *rp = (void *) skb->data;
1355 struct hci_conn *conn;
1356
1357 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1358
1359 if (rp->status)
1360 return;
1361
1362 hci_dev_lock(hdev);
1363
1364 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1365 if (conn)
1366 conn->rssi = rp->rssi;
1367
1368 hci_dev_unlock(hdev);
1369 }
1370
1371 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1372 {
1373 struct hci_cp_read_tx_power *sent;
1374 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1375 struct hci_conn *conn;
1376
1377 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1378
1379 if (rp->status)
1380 return;
1381
1382 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1383 if (!sent)
1384 return;
1385
1386 hci_dev_lock(hdev);
1387
1388 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1389 if (!conn)
1390 goto unlock;
1391
1392 switch (sent->type) {
1393 case 0x00:
1394 conn->tx_power = rp->tx_power;
1395 break;
1396 case 0x01:
1397 conn->max_tx_power = rp->tx_power;
1398 break;
1399 }
1400
1401 unlock:
1402 hci_dev_unlock(hdev);
1403 }
1404
1405 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1406 {
1407 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1408
1409 if (status) {
1410 hci_conn_check_pending(hdev);
1411 return;
1412 }
1413
1414 set_bit(HCI_INQUIRY, &hdev->flags);
1415 }
1416
1417 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1418 {
1419 struct hci_cp_create_conn *cp;
1420 struct hci_conn *conn;
1421
1422 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1423
1424 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1425 if (!cp)
1426 return;
1427
1428 hci_dev_lock(hdev);
1429
1430 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1431
1432 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1433
1434 if (status) {
1435 if (conn && conn->state == BT_CONNECT) {
1436 if (status != 0x0c || conn->attempt > 2) {
1437 conn->state = BT_CLOSED;
1438 hci_proto_connect_cfm(conn, status);
1439 hci_conn_del(conn);
1440 } else
1441 conn->state = BT_CONNECT2;
1442 }
1443 } else {
1444 if (!conn) {
1445 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1446 HCI_ROLE_MASTER);
1447 if (!conn)
1448 BT_ERR("No memory for new connection");
1449 }
1450 }
1451
1452 hci_dev_unlock(hdev);
1453 }
1454
1455 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1456 {
1457 struct hci_cp_add_sco *cp;
1458 struct hci_conn *acl, *sco;
1459 __u16 handle;
1460
1461 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1462
1463 if (!status)
1464 return;
1465
1466 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1467 if (!cp)
1468 return;
1469
1470 handle = __le16_to_cpu(cp->handle);
1471
1472 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1473
1474 hci_dev_lock(hdev);
1475
1476 acl = hci_conn_hash_lookup_handle(hdev, handle);
1477 if (acl) {
1478 sco = acl->link;
1479 if (sco) {
1480 sco->state = BT_CLOSED;
1481
1482 hci_proto_connect_cfm(sco, status);
1483 hci_conn_del(sco);
1484 }
1485 }
1486
1487 hci_dev_unlock(hdev);
1488 }
1489
1490 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1491 {
1492 struct hci_cp_auth_requested *cp;
1493 struct hci_conn *conn;
1494
1495 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1496
1497 if (!status)
1498 return;
1499
1500 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1501 if (!cp)
1502 return;
1503
1504 hci_dev_lock(hdev);
1505
1506 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1507 if (conn) {
1508 if (conn->state == BT_CONFIG) {
1509 hci_proto_connect_cfm(conn, status);
1510 hci_conn_drop(conn);
1511 }
1512 }
1513
1514 hci_dev_unlock(hdev);
1515 }
1516
1517 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1518 {
1519 struct hci_cp_set_conn_encrypt *cp;
1520 struct hci_conn *conn;
1521
1522 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1523
1524 if (!status)
1525 return;
1526
1527 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1528 if (!cp)
1529 return;
1530
1531 hci_dev_lock(hdev);
1532
1533 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1534 if (conn) {
1535 if (conn->state == BT_CONFIG) {
1536 hci_proto_connect_cfm(conn, status);
1537 hci_conn_drop(conn);
1538 }
1539 }
1540
1541 hci_dev_unlock(hdev);
1542 }
1543
1544 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1545 struct hci_conn *conn)
1546 {
1547 if (conn->state != BT_CONFIG || !conn->out)
1548 return 0;
1549
1550 if (conn->pending_sec_level == BT_SECURITY_SDP)
1551 return 0;
1552
1553 /* Only request authentication for SSP connections or non-SSP
1554 * devices with sec_level MEDIUM or HIGH or if MITM protection
1555 * is requested.
1556 */
1557 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1558 conn->pending_sec_level != BT_SECURITY_FIPS &&
1559 conn->pending_sec_level != BT_SECURITY_HIGH &&
1560 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1561 return 0;
1562
1563 return 1;
1564 }
1565
1566 static int hci_resolve_name(struct hci_dev *hdev,
1567 struct inquiry_entry *e)
1568 {
1569 struct hci_cp_remote_name_req cp;
1570
1571 memset(&cp, 0, sizeof(cp));
1572
1573 bacpy(&cp.bdaddr, &e->data.bdaddr);
1574 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1575 cp.pscan_mode = e->data.pscan_mode;
1576 cp.clock_offset = e->data.clock_offset;
1577
1578 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1579 }
1580
1581 static bool hci_resolve_next_name(struct hci_dev *hdev)
1582 {
1583 struct discovery_state *discov = &hdev->discovery;
1584 struct inquiry_entry *e;
1585
1586 if (list_empty(&discov->resolve))
1587 return false;
1588
1589 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1590 if (!e)
1591 return false;
1592
1593 if (hci_resolve_name(hdev, e) == 0) {
1594 e->name_state = NAME_PENDING;
1595 return true;
1596 }
1597
1598 return false;
1599 }
1600
1601 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1602 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1603 {
1604 struct discovery_state *discov = &hdev->discovery;
1605 struct inquiry_entry *e;
1606
1607 /* Update the mgmt connected state if necessary. Be careful with
1608 * conn objects that exist but are not (yet) connected however.
1609 * Only those in BT_CONFIG or BT_CONNECTED states can be
1610 * considered connected.
1611 */
1612 if (conn &&
1613 (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) &&
1614 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1615 mgmt_device_connected(hdev, conn, 0, name, name_len);
1616
1617 if (discov->state == DISCOVERY_STOPPED)
1618 return;
1619
1620 if (discov->state == DISCOVERY_STOPPING)
1621 goto discov_complete;
1622
1623 if (discov->state != DISCOVERY_RESOLVING)
1624 return;
1625
1626 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1627 /* If the device was not found in a list of found devices names of which
1628 * are pending. there is no need to continue resolving a next name as it
1629 * will be done upon receiving another Remote Name Request Complete
1630 * Event */
1631 if (!e)
1632 return;
1633
1634 list_del(&e->list);
1635 if (name) {
1636 e->name_state = NAME_KNOWN;
1637 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1638 e->data.rssi, name, name_len);
1639 } else {
1640 e->name_state = NAME_NOT_KNOWN;
1641 }
1642
1643 if (hci_resolve_next_name(hdev))
1644 return;
1645
1646 discov_complete:
1647 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1648 }
1649
1650 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1651 {
1652 struct hci_cp_remote_name_req *cp;
1653 struct hci_conn *conn;
1654
1655 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1656
1657 /* If successful wait for the name req complete event before
1658 * checking for the need to do authentication */
1659 if (!status)
1660 return;
1661
1662 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1663 if (!cp)
1664 return;
1665
1666 hci_dev_lock(hdev);
1667
1668 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1669
1670 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1671 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1672
1673 if (!conn)
1674 goto unlock;
1675
1676 if (!hci_outgoing_auth_needed(hdev, conn))
1677 goto unlock;
1678
1679 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1680 struct hci_cp_auth_requested auth_cp;
1681
1682 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1683
1684 auth_cp.handle = __cpu_to_le16(conn->handle);
1685 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1686 sizeof(auth_cp), &auth_cp);
1687 }
1688
1689 unlock:
1690 hci_dev_unlock(hdev);
1691 }
1692
1693 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1694 {
1695 struct hci_cp_read_remote_features *cp;
1696 struct hci_conn *conn;
1697
1698 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1699
1700 if (!status)
1701 return;
1702
1703 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1704 if (!cp)
1705 return;
1706
1707 hci_dev_lock(hdev);
1708
1709 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1710 if (conn) {
1711 if (conn->state == BT_CONFIG) {
1712 hci_proto_connect_cfm(conn, status);
1713 hci_conn_drop(conn);
1714 }
1715 }
1716
1717 hci_dev_unlock(hdev);
1718 }
1719
1720 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1721 {
1722 struct hci_cp_read_remote_ext_features *cp;
1723 struct hci_conn *conn;
1724
1725 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1726
1727 if (!status)
1728 return;
1729
1730 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1731 if (!cp)
1732 return;
1733
1734 hci_dev_lock(hdev);
1735
1736 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1737 if (conn) {
1738 if (conn->state == BT_CONFIG) {
1739 hci_proto_connect_cfm(conn, status);
1740 hci_conn_drop(conn);
1741 }
1742 }
1743
1744 hci_dev_unlock(hdev);
1745 }
1746
1747 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1748 {
1749 struct hci_cp_setup_sync_conn *cp;
1750 struct hci_conn *acl, *sco;
1751 __u16 handle;
1752
1753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1754
1755 if (!status)
1756 return;
1757
1758 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1759 if (!cp)
1760 return;
1761
1762 handle = __le16_to_cpu(cp->handle);
1763
1764 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1765
1766 hci_dev_lock(hdev);
1767
1768 acl = hci_conn_hash_lookup_handle(hdev, handle);
1769 if (acl) {
1770 sco = acl->link;
1771 if (sco) {
1772 sco->state = BT_CLOSED;
1773
1774 hci_proto_connect_cfm(sco, status);
1775 hci_conn_del(sco);
1776 }
1777 }
1778
1779 hci_dev_unlock(hdev);
1780 }
1781
1782 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1783 {
1784 struct hci_cp_sniff_mode *cp;
1785 struct hci_conn *conn;
1786
1787 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1788
1789 if (!status)
1790 return;
1791
1792 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1793 if (!cp)
1794 return;
1795
1796 hci_dev_lock(hdev);
1797
1798 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1799 if (conn) {
1800 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1801
1802 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1803 hci_sco_setup(conn, status);
1804 }
1805
1806 hci_dev_unlock(hdev);
1807 }
1808
1809 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1810 {
1811 struct hci_cp_exit_sniff_mode *cp;
1812 struct hci_conn *conn;
1813
1814 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1815
1816 if (!status)
1817 return;
1818
1819 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1820 if (!cp)
1821 return;
1822
1823 hci_dev_lock(hdev);
1824
1825 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1826 if (conn) {
1827 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1828
1829 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1830 hci_sco_setup(conn, status);
1831 }
1832
1833 hci_dev_unlock(hdev);
1834 }
1835
1836 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1837 {
1838 struct hci_cp_disconnect *cp;
1839 struct hci_conn *conn;
1840
1841 if (!status)
1842 return;
1843
1844 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1845 if (!cp)
1846 return;
1847
1848 hci_dev_lock(hdev);
1849
1850 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1851 if (conn)
1852 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1853 conn->dst_type, status);
1854
1855 hci_dev_unlock(hdev);
1856 }
1857
1858 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1859 {
1860 struct hci_cp_create_phy_link *cp;
1861
1862 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1863
1864 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1865 if (!cp)
1866 return;
1867
1868 hci_dev_lock(hdev);
1869
1870 if (status) {
1871 struct hci_conn *hcon;
1872
1873 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1874 if (hcon)
1875 hci_conn_del(hcon);
1876 } else {
1877 amp_write_remote_assoc(hdev, cp->phy_handle);
1878 }
1879
1880 hci_dev_unlock(hdev);
1881 }
1882
1883 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1884 {
1885 struct hci_cp_accept_phy_link *cp;
1886
1887 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1888
1889 if (status)
1890 return;
1891
1892 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1893 if (!cp)
1894 return;
1895
1896 amp_write_remote_assoc(hdev, cp->phy_handle);
1897 }
1898
1899 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1900 {
1901 struct hci_cp_le_create_conn *cp;
1902 struct hci_conn *conn;
1903
1904 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1905
1906 /* All connection failure handling is taken care of by the
1907 * hci_le_conn_failed function which is triggered by the HCI
1908 * request completion callbacks used for connecting.
1909 */
1910 if (status)
1911 return;
1912
1913 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1914 if (!cp)
1915 return;
1916
1917 hci_dev_lock(hdev);
1918
1919 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1920 if (!conn)
1921 goto unlock;
1922
1923 /* Store the initiator and responder address information which
1924 * is needed for SMP. These values will not change during the
1925 * lifetime of the connection.
1926 */
1927 conn->init_addr_type = cp->own_address_type;
1928 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1929 bacpy(&conn->init_addr, &hdev->random_addr);
1930 else
1931 bacpy(&conn->init_addr, &hdev->bdaddr);
1932
1933 conn->resp_addr_type = cp->peer_addr_type;
1934 bacpy(&conn->resp_addr, &cp->peer_addr);
1935
1936 /* We don't want the connection attempt to stick around
1937 * indefinitely since LE doesn't have a page timeout concept
1938 * like BR/EDR. Set a timer for any connection that doesn't use
1939 * the white list for connecting.
1940 */
1941 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1942 queue_delayed_work(conn->hdev->workqueue,
1943 &conn->le_conn_timeout,
1944 conn->conn_timeout);
1945
1946 unlock:
1947 hci_dev_unlock(hdev);
1948 }
1949
1950 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1951 {
1952 struct hci_cp_le_start_enc *cp;
1953 struct hci_conn *conn;
1954
1955 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1956
1957 if (!status)
1958 return;
1959
1960 hci_dev_lock(hdev);
1961
1962 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1963 if (!cp)
1964 goto unlock;
1965
1966 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1967 if (!conn)
1968 goto unlock;
1969
1970 if (conn->state != BT_CONNECTED)
1971 goto unlock;
1972
1973 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1974 hci_conn_drop(conn);
1975
1976 unlock:
1977 hci_dev_unlock(hdev);
1978 }
1979
1980 static void hci_cs_switch_role(struct hci_dev *hdev, u8 status)
1981 {
1982 struct hci_cp_switch_role *cp;
1983 struct hci_conn *conn;
1984
1985 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1986
1987 if (!status)
1988 return;
1989
1990 cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE);
1991 if (!cp)
1992 return;
1993
1994 hci_dev_lock(hdev);
1995
1996 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1997 if (conn)
1998 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
1999
2000 hci_dev_unlock(hdev);
2001 }
2002
2003 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2004 {
2005 __u8 status = *((__u8 *) skb->data);
2006 struct discovery_state *discov = &hdev->discovery;
2007 struct inquiry_entry *e;
2008
2009 BT_DBG("%s status 0x%2.2x", hdev->name, status);
2010
2011 hci_conn_check_pending(hdev);
2012
2013 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
2014 return;
2015
2016 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
2017 wake_up_bit(&hdev->flags, HCI_INQUIRY);
2018
2019 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2020 return;
2021
2022 hci_dev_lock(hdev);
2023
2024 if (discov->state != DISCOVERY_FINDING)
2025 goto unlock;
2026
2027 if (list_empty(&discov->resolve)) {
2028 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2029 goto unlock;
2030 }
2031
2032 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
2033 if (e && hci_resolve_name(hdev, e) == 0) {
2034 e->name_state = NAME_PENDING;
2035 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
2036 } else {
2037 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2038 }
2039
2040 unlock:
2041 hci_dev_unlock(hdev);
2042 }
2043
2044 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2045 {
2046 struct inquiry_data data;
2047 struct inquiry_info *info = (void *) (skb->data + 1);
2048 int num_rsp = *((__u8 *) skb->data);
2049
2050 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2051
2052 if (!num_rsp)
2053 return;
2054
2055 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2056 return;
2057
2058 hci_dev_lock(hdev);
2059
2060 for (; num_rsp; num_rsp--, info++) {
2061 u32 flags;
2062
2063 bacpy(&data.bdaddr, &info->bdaddr);
2064 data.pscan_rep_mode = info->pscan_rep_mode;
2065 data.pscan_period_mode = info->pscan_period_mode;
2066 data.pscan_mode = info->pscan_mode;
2067 memcpy(data.dev_class, info->dev_class, 3);
2068 data.clock_offset = info->clock_offset;
2069 data.rssi = HCI_RSSI_INVALID;
2070 data.ssp_mode = 0x00;
2071
2072 flags = hci_inquiry_cache_update(hdev, &data, false);
2073
2074 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2075 info->dev_class, HCI_RSSI_INVALID,
2076 flags, NULL, 0, NULL, 0);
2077 }
2078
2079 hci_dev_unlock(hdev);
2080 }
2081
2082 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2083 {
2084 struct hci_ev_conn_complete *ev = (void *) skb->data;
2085 struct hci_conn *conn;
2086
2087 BT_DBG("%s", hdev->name);
2088
2089 hci_dev_lock(hdev);
2090
2091 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2092 if (!conn) {
2093 if (ev->link_type != SCO_LINK)
2094 goto unlock;
2095
2096 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2097 if (!conn)
2098 goto unlock;
2099
2100 conn->type = SCO_LINK;
2101 }
2102
2103 if (!ev->status) {
2104 conn->handle = __le16_to_cpu(ev->handle);
2105
2106 if (conn->type == ACL_LINK) {
2107 conn->state = BT_CONFIG;
2108 hci_conn_hold(conn);
2109
2110 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2111 !hci_find_link_key(hdev, &ev->bdaddr))
2112 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2113 else
2114 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2115 } else
2116 conn->state = BT_CONNECTED;
2117
2118 hci_conn_add_sysfs(conn);
2119
2120 if (test_bit(HCI_AUTH, &hdev->flags))
2121 set_bit(HCI_CONN_AUTH, &conn->flags);
2122
2123 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2124 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2125
2126 /* Get remote features */
2127 if (conn->type == ACL_LINK) {
2128 struct hci_cp_read_remote_features cp;
2129 cp.handle = ev->handle;
2130 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2131 sizeof(cp), &cp);
2132
2133 hci_update_page_scan(hdev, NULL);
2134 }
2135
2136 /* Set packet type for incoming connection */
2137 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2138 struct hci_cp_change_conn_ptype cp;
2139 cp.handle = ev->handle;
2140 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2141 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2142 &cp);
2143 }
2144 } else {
2145 conn->state = BT_CLOSED;
2146 if (conn->type == ACL_LINK)
2147 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2148 conn->dst_type, ev->status);
2149 }
2150
2151 if (conn->type == ACL_LINK)
2152 hci_sco_setup(conn, ev->status);
2153
2154 if (ev->status) {
2155 hci_proto_connect_cfm(conn, ev->status);
2156 hci_conn_del(conn);
2157 } else if (ev->link_type != ACL_LINK)
2158 hci_proto_connect_cfm(conn, ev->status);
2159
2160 unlock:
2161 hci_dev_unlock(hdev);
2162
2163 hci_conn_check_pending(hdev);
2164 }
2165
2166 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2167 {
2168 struct hci_cp_reject_conn_req cp;
2169
2170 bacpy(&cp.bdaddr, bdaddr);
2171 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2172 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2173 }
2174
2175 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2176 {
2177 struct hci_ev_conn_request *ev = (void *) skb->data;
2178 int mask = hdev->link_mode;
2179 struct inquiry_entry *ie;
2180 struct hci_conn *conn;
2181 __u8 flags = 0;
2182
2183 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2184 ev->link_type);
2185
2186 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2187 &flags);
2188
2189 if (!(mask & HCI_LM_ACCEPT)) {
2190 hci_reject_conn(hdev, &ev->bdaddr);
2191 return;
2192 }
2193
2194 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2195 BDADDR_BREDR)) {
2196 hci_reject_conn(hdev, &ev->bdaddr);
2197 return;
2198 }
2199
2200 /* Require HCI_CONNECTABLE or a whitelist entry to accept the
2201 * connection. These features are only touched through mgmt so
2202 * only do the checks if HCI_MGMT is set.
2203 */
2204 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2205 !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2206 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2207 BDADDR_BREDR)) {
2208 hci_reject_conn(hdev, &ev->bdaddr);
2209 return;
2210 }
2211
2212 /* Connection accepted */
2213
2214 hci_dev_lock(hdev);
2215
2216 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2217 if (ie)
2218 memcpy(ie->data.dev_class, ev->dev_class, 3);
2219
2220 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2221 &ev->bdaddr);
2222 if (!conn) {
2223 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2224 HCI_ROLE_SLAVE);
2225 if (!conn) {
2226 BT_ERR("No memory for new connection");
2227 hci_dev_unlock(hdev);
2228 return;
2229 }
2230 }
2231
2232 memcpy(conn->dev_class, ev->dev_class, 3);
2233
2234 hci_dev_unlock(hdev);
2235
2236 if (ev->link_type == ACL_LINK ||
2237 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2238 struct hci_cp_accept_conn_req cp;
2239 conn->state = BT_CONNECT;
2240
2241 bacpy(&cp.bdaddr, &ev->bdaddr);
2242
2243 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2244 cp.role = 0x00; /* Become master */
2245 else
2246 cp.role = 0x01; /* Remain slave */
2247
2248 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2249 } else if (!(flags & HCI_PROTO_DEFER)) {
2250 struct hci_cp_accept_sync_conn_req cp;
2251 conn->state = BT_CONNECT;
2252
2253 bacpy(&cp.bdaddr, &ev->bdaddr);
2254 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2255
2256 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2257 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2258 cp.max_latency = cpu_to_le16(0xffff);
2259 cp.content_format = cpu_to_le16(hdev->voice_setting);
2260 cp.retrans_effort = 0xff;
2261
2262 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2263 &cp);
2264 } else {
2265 conn->state = BT_CONNECT2;
2266 hci_proto_connect_cfm(conn, 0);
2267 }
2268 }
2269
2270 static u8 hci_to_mgmt_reason(u8 err)
2271 {
2272 switch (err) {
2273 case HCI_ERROR_CONNECTION_TIMEOUT:
2274 return MGMT_DEV_DISCONN_TIMEOUT;
2275 case HCI_ERROR_REMOTE_USER_TERM:
2276 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2277 case HCI_ERROR_REMOTE_POWER_OFF:
2278 return MGMT_DEV_DISCONN_REMOTE;
2279 case HCI_ERROR_LOCAL_HOST_TERM:
2280 return MGMT_DEV_DISCONN_LOCAL_HOST;
2281 default:
2282 return MGMT_DEV_DISCONN_UNKNOWN;
2283 }
2284 }
2285
2286 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2287 {
2288 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2289 u8 reason = hci_to_mgmt_reason(ev->reason);
2290 struct hci_conn_params *params;
2291 struct hci_conn *conn;
2292 bool mgmt_connected;
2293 u8 type;
2294
2295 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2296
2297 hci_dev_lock(hdev);
2298
2299 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2300 if (!conn)
2301 goto unlock;
2302
2303 if (ev->status) {
2304 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2305 conn->dst_type, ev->status);
2306 goto unlock;
2307 }
2308
2309 conn->state = BT_CLOSED;
2310
2311 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2312 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2313 reason, mgmt_connected);
2314
2315 if (conn->type == ACL_LINK) {
2316 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2317 hci_remove_link_key(hdev, &conn->dst);
2318
2319 hci_update_page_scan(hdev, NULL);
2320 }
2321
2322 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2323 if (params) {
2324 switch (params->auto_connect) {
2325 case HCI_AUTO_CONN_LINK_LOSS:
2326 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2327 break;
2328 /* Fall through */
2329
2330 case HCI_AUTO_CONN_DIRECT:
2331 case HCI_AUTO_CONN_ALWAYS:
2332 list_del_init(&params->action);
2333 list_add(&params->action, &hdev->pend_le_conns);
2334 hci_update_background_scan(hdev);
2335 break;
2336
2337 default:
2338 break;
2339 }
2340 }
2341
2342 type = conn->type;
2343
2344 hci_proto_disconn_cfm(conn, ev->reason);
2345 hci_conn_del(conn);
2346
2347 /* Re-enable advertising if necessary, since it might
2348 * have been disabled by the connection. From the
2349 * HCI_LE_Set_Advertise_Enable command description in
2350 * the core specification (v4.0):
2351 * "The Controller shall continue advertising until the Host
2352 * issues an LE_Set_Advertise_Enable command with
2353 * Advertising_Enable set to 0x00 (Advertising is disabled)
2354 * or until a connection is created or until the Advertising
2355 * is timed out due to Directed Advertising."
2356 */
2357 if (type == LE_LINK)
2358 mgmt_reenable_advertising(hdev);
2359
2360 unlock:
2361 hci_dev_unlock(hdev);
2362 }
2363
2364 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2365 {
2366 struct hci_ev_auth_complete *ev = (void *) skb->data;
2367 struct hci_conn *conn;
2368
2369 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2370
2371 hci_dev_lock(hdev);
2372
2373 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2374 if (!conn)
2375 goto unlock;
2376
2377 if (!ev->status) {
2378 if (!hci_conn_ssp_enabled(conn) &&
2379 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2380 BT_INFO("re-auth of legacy device is not possible.");
2381 } else {
2382 set_bit(HCI_CONN_AUTH, &conn->flags);
2383 conn->sec_level = conn->pending_sec_level;
2384 }
2385 } else {
2386 mgmt_auth_failed(conn, ev->status);
2387 }
2388
2389 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2390 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2391
2392 if (conn->state == BT_CONFIG) {
2393 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2394 struct hci_cp_set_conn_encrypt cp;
2395 cp.handle = ev->handle;
2396 cp.encrypt = 0x01;
2397 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2398 &cp);
2399 } else {
2400 conn->state = BT_CONNECTED;
2401 hci_proto_connect_cfm(conn, ev->status);
2402 hci_conn_drop(conn);
2403 }
2404 } else {
2405 hci_auth_cfm(conn, ev->status);
2406
2407 hci_conn_hold(conn);
2408 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2409 hci_conn_drop(conn);
2410 }
2411
2412 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2413 if (!ev->status) {
2414 struct hci_cp_set_conn_encrypt cp;
2415 cp.handle = ev->handle;
2416 cp.encrypt = 0x01;
2417 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2418 &cp);
2419 } else {
2420 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2421 hci_encrypt_cfm(conn, ev->status, 0x00);
2422 }
2423 }
2424
2425 unlock:
2426 hci_dev_unlock(hdev);
2427 }
2428
2429 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2430 {
2431 struct hci_ev_remote_name *ev = (void *) skb->data;
2432 struct hci_conn *conn;
2433
2434 BT_DBG("%s", hdev->name);
2435
2436 hci_conn_check_pending(hdev);
2437
2438 hci_dev_lock(hdev);
2439
2440 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2441
2442 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2443 goto check_auth;
2444
2445 if (ev->status == 0)
2446 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2447 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2448 else
2449 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2450
2451 check_auth:
2452 if (!conn)
2453 goto unlock;
2454
2455 if (!hci_outgoing_auth_needed(hdev, conn))
2456 goto unlock;
2457
2458 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2459 struct hci_cp_auth_requested cp;
2460
2461 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2462
2463 cp.handle = __cpu_to_le16(conn->handle);
2464 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2465 }
2466
2467 unlock:
2468 hci_dev_unlock(hdev);
2469 }
2470
2471 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2472 {
2473 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2474 struct hci_conn *conn;
2475
2476 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2477
2478 hci_dev_lock(hdev);
2479
2480 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2481 if (!conn)
2482 goto unlock;
2483
2484 if (!ev->status) {
2485 if (ev->encrypt) {
2486 /* Encryption implies authentication */
2487 set_bit(HCI_CONN_AUTH, &conn->flags);
2488 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2489 conn->sec_level = conn->pending_sec_level;
2490
2491 /* P-256 authentication key implies FIPS */
2492 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2493 set_bit(HCI_CONN_FIPS, &conn->flags);
2494
2495 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2496 conn->type == LE_LINK)
2497 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2498 } else {
2499 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2500 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2501 }
2502 }
2503
2504 /* We should disregard the current RPA and generate a new one
2505 * whenever the encryption procedure fails.
2506 */
2507 if (ev->status && conn->type == LE_LINK)
2508 set_bit(HCI_RPA_EXPIRED, &hdev->dev_flags);
2509
2510 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2511
2512 if (ev->status && conn->state == BT_CONNECTED) {
2513 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2514 hci_conn_drop(conn);
2515 goto unlock;
2516 }
2517
2518 if (conn->state == BT_CONFIG) {
2519 if (!ev->status)
2520 conn->state = BT_CONNECTED;
2521
2522 /* In Secure Connections Only mode, do not allow any
2523 * connections that are not encrypted with AES-CCM
2524 * using a P-256 authenticated combination key.
2525 */
2526 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2527 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2528 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2529 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2530 hci_conn_drop(conn);
2531 goto unlock;
2532 }
2533
2534 hci_proto_connect_cfm(conn, ev->status);
2535 hci_conn_drop(conn);
2536 } else
2537 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2538
2539 unlock:
2540 hci_dev_unlock(hdev);
2541 }
2542
2543 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2544 struct sk_buff *skb)
2545 {
2546 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2547 struct hci_conn *conn;
2548
2549 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2550
2551 hci_dev_lock(hdev);
2552
2553 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2554 if (conn) {
2555 if (!ev->status)
2556 set_bit(HCI_CONN_SECURE, &conn->flags);
2557
2558 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2559
2560 hci_key_change_cfm(conn, ev->status);
2561 }
2562
2563 hci_dev_unlock(hdev);
2564 }
2565
2566 static void hci_remote_features_evt(struct hci_dev *hdev,
2567 struct sk_buff *skb)
2568 {
2569 struct hci_ev_remote_features *ev = (void *) skb->data;
2570 struct hci_conn *conn;
2571
2572 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2573
2574 hci_dev_lock(hdev);
2575
2576 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2577 if (!conn)
2578 goto unlock;
2579
2580 if (!ev->status)
2581 memcpy(conn->features[0], ev->features, 8);
2582
2583 if (conn->state != BT_CONFIG)
2584 goto unlock;
2585
2586 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2587 struct hci_cp_read_remote_ext_features cp;
2588 cp.handle = ev->handle;
2589 cp.page = 0x01;
2590 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2591 sizeof(cp), &cp);
2592 goto unlock;
2593 }
2594
2595 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2596 struct hci_cp_remote_name_req cp;
2597 memset(&cp, 0, sizeof(cp));
2598 bacpy(&cp.bdaddr, &conn->dst);
2599 cp.pscan_rep_mode = 0x02;
2600 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2601 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2602 mgmt_device_connected(hdev, conn, 0, NULL, 0);
2603
2604 if (!hci_outgoing_auth_needed(hdev, conn)) {
2605 conn->state = BT_CONNECTED;
2606 hci_proto_connect_cfm(conn, ev->status);
2607 hci_conn_drop(conn);
2608 }
2609
2610 unlock:
2611 hci_dev_unlock(hdev);
2612 }
2613
2614 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2615 {
2616 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2617 u8 status = skb->data[sizeof(*ev)];
2618 __u16 opcode;
2619
2620 skb_pull(skb, sizeof(*ev));
2621
2622 opcode = __le16_to_cpu(ev->opcode);
2623
2624 switch (opcode) {
2625 case HCI_OP_INQUIRY_CANCEL:
2626 hci_cc_inquiry_cancel(hdev, skb);
2627 break;
2628
2629 case HCI_OP_PERIODIC_INQ:
2630 hci_cc_periodic_inq(hdev, skb);
2631 break;
2632
2633 case HCI_OP_EXIT_PERIODIC_INQ:
2634 hci_cc_exit_periodic_inq(hdev, skb);
2635 break;
2636
2637 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2638 hci_cc_remote_name_req_cancel(hdev, skb);
2639 break;
2640
2641 case HCI_OP_ROLE_DISCOVERY:
2642 hci_cc_role_discovery(hdev, skb);
2643 break;
2644
2645 case HCI_OP_READ_LINK_POLICY:
2646 hci_cc_read_link_policy(hdev, skb);
2647 break;
2648
2649 case HCI_OP_WRITE_LINK_POLICY:
2650 hci_cc_write_link_policy(hdev, skb);
2651 break;
2652
2653 case HCI_OP_READ_DEF_LINK_POLICY:
2654 hci_cc_read_def_link_policy(hdev, skb);
2655 break;
2656
2657 case HCI_OP_WRITE_DEF_LINK_POLICY:
2658 hci_cc_write_def_link_policy(hdev, skb);
2659 break;
2660
2661 case HCI_OP_RESET:
2662 hci_cc_reset(hdev, skb);
2663 break;
2664
2665 case HCI_OP_WRITE_LOCAL_NAME:
2666 hci_cc_write_local_name(hdev, skb);
2667 break;
2668
2669 case HCI_OP_READ_LOCAL_NAME:
2670 hci_cc_read_local_name(hdev, skb);
2671 break;
2672
2673 case HCI_OP_WRITE_AUTH_ENABLE:
2674 hci_cc_write_auth_enable(hdev, skb);
2675 break;
2676
2677 case HCI_OP_WRITE_ENCRYPT_MODE:
2678 hci_cc_write_encrypt_mode(hdev, skb);
2679 break;
2680
2681 case HCI_OP_WRITE_SCAN_ENABLE:
2682 hci_cc_write_scan_enable(hdev, skb);
2683 break;
2684
2685 case HCI_OP_READ_CLASS_OF_DEV:
2686 hci_cc_read_class_of_dev(hdev, skb);
2687 break;
2688
2689 case HCI_OP_WRITE_CLASS_OF_DEV:
2690 hci_cc_write_class_of_dev(hdev, skb);
2691 break;
2692
2693 case HCI_OP_READ_VOICE_SETTING:
2694 hci_cc_read_voice_setting(hdev, skb);
2695 break;
2696
2697 case HCI_OP_WRITE_VOICE_SETTING:
2698 hci_cc_write_voice_setting(hdev, skb);
2699 break;
2700
2701 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2702 hci_cc_read_num_supported_iac(hdev, skb);
2703 break;
2704
2705 case HCI_OP_WRITE_SSP_MODE:
2706 hci_cc_write_ssp_mode(hdev, skb);
2707 break;
2708
2709 case HCI_OP_WRITE_SC_SUPPORT:
2710 hci_cc_write_sc_support(hdev, skb);
2711 break;
2712
2713 case HCI_OP_READ_LOCAL_VERSION:
2714 hci_cc_read_local_version(hdev, skb);
2715 break;
2716
2717 case HCI_OP_READ_LOCAL_COMMANDS:
2718 hci_cc_read_local_commands(hdev, skb);
2719 break;
2720
2721 case HCI_OP_READ_LOCAL_FEATURES:
2722 hci_cc_read_local_features(hdev, skb);
2723 break;
2724
2725 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2726 hci_cc_read_local_ext_features(hdev, skb);
2727 break;
2728
2729 case HCI_OP_READ_BUFFER_SIZE:
2730 hci_cc_read_buffer_size(hdev, skb);
2731 break;
2732
2733 case HCI_OP_READ_BD_ADDR:
2734 hci_cc_read_bd_addr(hdev, skb);
2735 break;
2736
2737 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2738 hci_cc_read_page_scan_activity(hdev, skb);
2739 break;
2740
2741 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2742 hci_cc_write_page_scan_activity(hdev, skb);
2743 break;
2744
2745 case HCI_OP_READ_PAGE_SCAN_TYPE:
2746 hci_cc_read_page_scan_type(hdev, skb);
2747 break;
2748
2749 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2750 hci_cc_write_page_scan_type(hdev, skb);
2751 break;
2752
2753 case HCI_OP_READ_DATA_BLOCK_SIZE:
2754 hci_cc_read_data_block_size(hdev, skb);
2755 break;
2756
2757 case HCI_OP_READ_FLOW_CONTROL_MODE:
2758 hci_cc_read_flow_control_mode(hdev, skb);
2759 break;
2760
2761 case HCI_OP_READ_LOCAL_AMP_INFO:
2762 hci_cc_read_local_amp_info(hdev, skb);
2763 break;
2764
2765 case HCI_OP_READ_CLOCK:
2766 hci_cc_read_clock(hdev, skb);
2767 break;
2768
2769 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2770 hci_cc_read_local_amp_assoc(hdev, skb);
2771 break;
2772
2773 case HCI_OP_READ_INQ_RSP_TX_POWER:
2774 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2775 break;
2776
2777 case HCI_OP_PIN_CODE_REPLY:
2778 hci_cc_pin_code_reply(hdev, skb);
2779 break;
2780
2781 case HCI_OP_PIN_CODE_NEG_REPLY:
2782 hci_cc_pin_code_neg_reply(hdev, skb);
2783 break;
2784
2785 case HCI_OP_READ_LOCAL_OOB_DATA:
2786 hci_cc_read_local_oob_data(hdev, skb);
2787 break;
2788
2789 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2790 hci_cc_read_local_oob_ext_data(hdev, skb);
2791 break;
2792
2793 case HCI_OP_LE_READ_BUFFER_SIZE:
2794 hci_cc_le_read_buffer_size(hdev, skb);
2795 break;
2796
2797 case HCI_OP_LE_READ_LOCAL_FEATURES:
2798 hci_cc_le_read_local_features(hdev, skb);
2799 break;
2800
2801 case HCI_OP_LE_READ_ADV_TX_POWER:
2802 hci_cc_le_read_adv_tx_power(hdev, skb);
2803 break;
2804
2805 case HCI_OP_USER_CONFIRM_REPLY:
2806 hci_cc_user_confirm_reply(hdev, skb);
2807 break;
2808
2809 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2810 hci_cc_user_confirm_neg_reply(hdev, skb);
2811 break;
2812
2813 case HCI_OP_USER_PASSKEY_REPLY:
2814 hci_cc_user_passkey_reply(hdev, skb);
2815 break;
2816
2817 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2818 hci_cc_user_passkey_neg_reply(hdev, skb);
2819 break;
2820
2821 case HCI_OP_LE_SET_RANDOM_ADDR:
2822 hci_cc_le_set_random_addr(hdev, skb);
2823 break;
2824
2825 case HCI_OP_LE_SET_ADV_ENABLE:
2826 hci_cc_le_set_adv_enable(hdev, skb);
2827 break;
2828
2829 case HCI_OP_LE_SET_SCAN_PARAM:
2830 hci_cc_le_set_scan_param(hdev, skb);
2831 break;
2832
2833 case HCI_OP_LE_SET_SCAN_ENABLE:
2834 hci_cc_le_set_scan_enable(hdev, skb);
2835 break;
2836
2837 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2838 hci_cc_le_read_white_list_size(hdev, skb);
2839 break;
2840
2841 case HCI_OP_LE_CLEAR_WHITE_LIST:
2842 hci_cc_le_clear_white_list(hdev, skb);
2843 break;
2844
2845 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2846 hci_cc_le_add_to_white_list(hdev, skb);
2847 break;
2848
2849 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2850 hci_cc_le_del_from_white_list(hdev, skb);
2851 break;
2852
2853 case HCI_OP_LE_READ_SUPPORTED_STATES:
2854 hci_cc_le_read_supported_states(hdev, skb);
2855 break;
2856
2857 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2858 hci_cc_write_le_host_supported(hdev, skb);
2859 break;
2860
2861 case HCI_OP_LE_SET_ADV_PARAM:
2862 hci_cc_set_adv_param(hdev, skb);
2863 break;
2864
2865 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2866 hci_cc_write_remote_amp_assoc(hdev, skb);
2867 break;
2868
2869 case HCI_OP_READ_RSSI:
2870 hci_cc_read_rssi(hdev, skb);
2871 break;
2872
2873 case HCI_OP_READ_TX_POWER:
2874 hci_cc_read_tx_power(hdev, skb);
2875 break;
2876
2877 default:
2878 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2879 break;
2880 }
2881
2882 if (opcode != HCI_OP_NOP)
2883 cancel_delayed_work(&hdev->cmd_timer);
2884
2885 hci_req_cmd_complete(hdev, opcode, status);
2886
2887 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2888 atomic_set(&hdev->cmd_cnt, 1);
2889 if (!skb_queue_empty(&hdev->cmd_q))
2890 queue_work(hdev->workqueue, &hdev->cmd_work);
2891 }
2892 }
2893
2894 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2895 {
2896 struct hci_ev_cmd_status *ev = (void *) skb->data;
2897 __u16 opcode;
2898
2899 skb_pull(skb, sizeof(*ev));
2900
2901 opcode = __le16_to_cpu(ev->opcode);
2902
2903 switch (opcode) {
2904 case HCI_OP_INQUIRY:
2905 hci_cs_inquiry(hdev, ev->status);
2906 break;
2907
2908 case HCI_OP_CREATE_CONN:
2909 hci_cs_create_conn(hdev, ev->status);
2910 break;
2911
2912 case HCI_OP_DISCONNECT:
2913 hci_cs_disconnect(hdev, ev->status);
2914 break;
2915
2916 case HCI_OP_ADD_SCO:
2917 hci_cs_add_sco(hdev, ev->status);
2918 break;
2919
2920 case HCI_OP_AUTH_REQUESTED:
2921 hci_cs_auth_requested(hdev, ev->status);
2922 break;
2923
2924 case HCI_OP_SET_CONN_ENCRYPT:
2925 hci_cs_set_conn_encrypt(hdev, ev->status);
2926 break;
2927
2928 case HCI_OP_REMOTE_NAME_REQ:
2929 hci_cs_remote_name_req(hdev, ev->status);
2930 break;
2931
2932 case HCI_OP_READ_REMOTE_FEATURES:
2933 hci_cs_read_remote_features(hdev, ev->status);
2934 break;
2935
2936 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2937 hci_cs_read_remote_ext_features(hdev, ev->status);
2938 break;
2939
2940 case HCI_OP_SETUP_SYNC_CONN:
2941 hci_cs_setup_sync_conn(hdev, ev->status);
2942 break;
2943
2944 case HCI_OP_CREATE_PHY_LINK:
2945 hci_cs_create_phylink(hdev, ev->status);
2946 break;
2947
2948 case HCI_OP_ACCEPT_PHY_LINK:
2949 hci_cs_accept_phylink(hdev, ev->status);
2950 break;
2951
2952 case HCI_OP_SNIFF_MODE:
2953 hci_cs_sniff_mode(hdev, ev->status);
2954 break;
2955
2956 case HCI_OP_EXIT_SNIFF_MODE:
2957 hci_cs_exit_sniff_mode(hdev, ev->status);
2958 break;
2959
2960 case HCI_OP_SWITCH_ROLE:
2961 hci_cs_switch_role(hdev, ev->status);
2962 break;
2963
2964 case HCI_OP_LE_CREATE_CONN:
2965 hci_cs_le_create_conn(hdev, ev->status);
2966 break;
2967
2968 case HCI_OP_LE_START_ENC:
2969 hci_cs_le_start_enc(hdev, ev->status);
2970 break;
2971
2972 default:
2973 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2974 break;
2975 }
2976
2977 if (opcode != HCI_OP_NOP)
2978 cancel_delayed_work(&hdev->cmd_timer);
2979
2980 if (ev->status ||
2981 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2982 hci_req_cmd_complete(hdev, opcode, ev->status);
2983
2984 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2985 atomic_set(&hdev->cmd_cnt, 1);
2986 if (!skb_queue_empty(&hdev->cmd_q))
2987 queue_work(hdev->workqueue, &hdev->cmd_work);
2988 }
2989 }
2990
2991 static void hci_hardware_error_evt(struct hci_dev *hdev, struct sk_buff *skb)
2992 {
2993 struct hci_ev_hardware_error *ev = (void *) skb->data;
2994
2995 BT_ERR("%s hardware error 0x%2.2x", hdev->name, ev->code);
2996 }
2997
2998 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2999 {
3000 struct hci_ev_role_change *ev = (void *) skb->data;
3001 struct hci_conn *conn;
3002
3003 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3004
3005 hci_dev_lock(hdev);
3006
3007 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3008 if (conn) {
3009 if (!ev->status)
3010 conn->role = ev->role;
3011
3012 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
3013
3014 hci_role_switch_cfm(conn, ev->status, ev->role);
3015 }
3016
3017 hci_dev_unlock(hdev);
3018 }
3019
3020 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
3021 {
3022 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
3023 int i;
3024
3025 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
3026 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3027 return;
3028 }
3029
3030 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3031 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
3032 BT_DBG("%s bad parameters", hdev->name);
3033 return;
3034 }
3035
3036 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
3037
3038 for (i = 0; i < ev->num_hndl; i++) {
3039 struct hci_comp_pkts_info *info = &ev->handles[i];
3040 struct hci_conn *conn;
3041 __u16 handle, count;
3042
3043 handle = __le16_to_cpu(info->handle);
3044 count = __le16_to_cpu(info->count);
3045
3046 conn = hci_conn_hash_lookup_handle(hdev, handle);
3047 if (!conn)
3048 continue;
3049
3050 conn->sent -= count;
3051
3052 switch (conn->type) {
3053 case ACL_LINK:
3054 hdev->acl_cnt += count;
3055 if (hdev->acl_cnt > hdev->acl_pkts)
3056 hdev->acl_cnt = hdev->acl_pkts;
3057 break;
3058
3059 case LE_LINK:
3060 if (hdev->le_pkts) {
3061 hdev->le_cnt += count;
3062 if (hdev->le_cnt > hdev->le_pkts)
3063 hdev->le_cnt = hdev->le_pkts;
3064 } else {
3065 hdev->acl_cnt += count;
3066 if (hdev->acl_cnt > hdev->acl_pkts)
3067 hdev->acl_cnt = hdev->acl_pkts;
3068 }
3069 break;
3070
3071 case SCO_LINK:
3072 hdev->sco_cnt += count;
3073 if (hdev->sco_cnt > hdev->sco_pkts)
3074 hdev->sco_cnt = hdev->sco_pkts;
3075 break;
3076
3077 default:
3078 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3079 break;
3080 }
3081 }
3082
3083 queue_work(hdev->workqueue, &hdev->tx_work);
3084 }
3085
3086 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3087 __u16 handle)
3088 {
3089 struct hci_chan *chan;
3090
3091 switch (hdev->dev_type) {
3092 case HCI_BREDR:
3093 return hci_conn_hash_lookup_handle(hdev, handle);
3094 case HCI_AMP:
3095 chan = hci_chan_lookup_handle(hdev, handle);
3096 if (chan)
3097 return chan->conn;
3098 break;
3099 default:
3100 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3101 break;
3102 }
3103
3104 return NULL;
3105 }
3106
3107 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3108 {
3109 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3110 int i;
3111
3112 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3113 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3114 return;
3115 }
3116
3117 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3118 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3119 BT_DBG("%s bad parameters", hdev->name);
3120 return;
3121 }
3122
3123 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3124 ev->num_hndl);
3125
3126 for (i = 0; i < ev->num_hndl; i++) {
3127 struct hci_comp_blocks_info *info = &ev->handles[i];
3128 struct hci_conn *conn = NULL;
3129 __u16 handle, block_count;
3130
3131 handle = __le16_to_cpu(info->handle);
3132 block_count = __le16_to_cpu(info->blocks);
3133
3134 conn = __hci_conn_lookup_handle(hdev, handle);
3135 if (!conn)
3136 continue;
3137
3138 conn->sent -= block_count;
3139
3140 switch (conn->type) {
3141 case ACL_LINK:
3142 case AMP_LINK:
3143 hdev->block_cnt += block_count;
3144 if (hdev->block_cnt > hdev->num_blocks)
3145 hdev->block_cnt = hdev->num_blocks;
3146 break;
3147
3148 default:
3149 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3150 break;
3151 }
3152 }
3153
3154 queue_work(hdev->workqueue, &hdev->tx_work);
3155 }
3156
3157 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3158 {
3159 struct hci_ev_mode_change *ev = (void *) skb->data;
3160 struct hci_conn *conn;
3161
3162 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3163
3164 hci_dev_lock(hdev);
3165
3166 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3167 if (conn) {
3168 conn->mode = ev->mode;
3169
3170 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3171 &conn->flags)) {
3172 if (conn->mode == HCI_CM_ACTIVE)
3173 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3174 else
3175 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3176 }
3177
3178 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3179 hci_sco_setup(conn, ev->status);
3180 }
3181
3182 hci_dev_unlock(hdev);
3183 }
3184
3185 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3186 {
3187 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3188 struct hci_conn *conn;
3189
3190 BT_DBG("%s", hdev->name);
3191
3192 hci_dev_lock(hdev);
3193
3194 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3195 if (!conn)
3196 goto unlock;
3197
3198 if (conn->state == BT_CONNECTED) {
3199 hci_conn_hold(conn);
3200 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3201 hci_conn_drop(conn);
3202 }
3203
3204 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3205 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3206 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3207 sizeof(ev->bdaddr), &ev->bdaddr);
3208 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3209 u8 secure;
3210
3211 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3212 secure = 1;
3213 else
3214 secure = 0;
3215
3216 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3217 }
3218
3219 unlock:
3220 hci_dev_unlock(hdev);
3221 }
3222
3223 static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len)
3224 {
3225 if (key_type == HCI_LK_CHANGED_COMBINATION)
3226 return;
3227
3228 conn->pin_length = pin_len;
3229 conn->key_type = key_type;
3230
3231 switch (key_type) {
3232 case HCI_LK_LOCAL_UNIT:
3233 case HCI_LK_REMOTE_UNIT:
3234 case HCI_LK_DEBUG_COMBINATION:
3235 return;
3236 case HCI_LK_COMBINATION:
3237 if (pin_len == 16)
3238 conn->pending_sec_level = BT_SECURITY_HIGH;
3239 else
3240 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3241 break;
3242 case HCI_LK_UNAUTH_COMBINATION_P192:
3243 case HCI_LK_UNAUTH_COMBINATION_P256:
3244 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3245 break;
3246 case HCI_LK_AUTH_COMBINATION_P192:
3247 conn->pending_sec_level = BT_SECURITY_HIGH;
3248 break;
3249 case HCI_LK_AUTH_COMBINATION_P256:
3250 conn->pending_sec_level = BT_SECURITY_FIPS;
3251 break;
3252 }
3253 }
3254
3255 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3256 {
3257 struct hci_ev_link_key_req *ev = (void *) skb->data;
3258 struct hci_cp_link_key_reply cp;
3259 struct hci_conn *conn;
3260 struct link_key *key;
3261
3262 BT_DBG("%s", hdev->name);
3263
3264 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3265 return;
3266
3267 hci_dev_lock(hdev);
3268
3269 key = hci_find_link_key(hdev, &ev->bdaddr);
3270 if (!key) {
3271 BT_DBG("%s link key not found for %pMR", hdev->name,
3272 &ev->bdaddr);
3273 goto not_found;
3274 }
3275
3276 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3277 &ev->bdaddr);
3278
3279 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3280 if (conn) {
3281 clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3282
3283 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3284 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3285 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3286 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3287 goto not_found;
3288 }
3289
3290 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3291 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3292 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3293 BT_DBG("%s ignoring key unauthenticated for high security",
3294 hdev->name);
3295 goto not_found;
3296 }
3297
3298 conn_set_key(conn, key->type, key->pin_len);
3299 }
3300
3301 bacpy(&cp.bdaddr, &ev->bdaddr);
3302 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3303
3304 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3305
3306 hci_dev_unlock(hdev);
3307
3308 return;
3309
3310 not_found:
3311 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3312 hci_dev_unlock(hdev);
3313 }
3314
3315 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3316 {
3317 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3318 struct hci_conn *conn;
3319 struct link_key *key;
3320 bool persistent;
3321 u8 pin_len = 0;
3322
3323 BT_DBG("%s", hdev->name);
3324
3325 hci_dev_lock(hdev);
3326
3327 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3328 if (!conn)
3329 goto unlock;
3330
3331 hci_conn_hold(conn);
3332 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3333 hci_conn_drop(conn);
3334
3335 set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags);
3336 conn_set_key(conn, ev->key_type, conn->pin_length);
3337
3338 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3339 goto unlock;
3340
3341 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3342 ev->key_type, pin_len, &persistent);
3343 if (!key)
3344 goto unlock;
3345
3346 /* Update connection information since adding the key will have
3347 * fixed up the type in the case of changed combination keys.
3348 */
3349 if (ev->key_type == HCI_LK_CHANGED_COMBINATION)
3350 conn_set_key(conn, key->type, key->pin_len);
3351
3352 mgmt_new_link_key(hdev, key, persistent);
3353
3354 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3355 * is set. If it's not set simply remove the key from the kernel
3356 * list (we've still notified user space about it but with
3357 * store_hint being 0).
3358 */
3359 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3360 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3361 list_del_rcu(&key->list);
3362 kfree_rcu(key, rcu);
3363 goto unlock;
3364 }
3365
3366 if (persistent)
3367 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3368 else
3369 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3370
3371 unlock:
3372 hci_dev_unlock(hdev);
3373 }
3374
3375 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3376 {
3377 struct hci_ev_clock_offset *ev = (void *) skb->data;
3378 struct hci_conn *conn;
3379
3380 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3381
3382 hci_dev_lock(hdev);
3383
3384 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3385 if (conn && !ev->status) {
3386 struct inquiry_entry *ie;
3387
3388 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3389 if (ie) {
3390 ie->data.clock_offset = ev->clock_offset;
3391 ie->timestamp = jiffies;
3392 }
3393 }
3394
3395 hci_dev_unlock(hdev);
3396 }
3397
3398 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3399 {
3400 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3401 struct hci_conn *conn;
3402
3403 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3404
3405 hci_dev_lock(hdev);
3406
3407 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3408 if (conn && !ev->status)
3409 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3410
3411 hci_dev_unlock(hdev);
3412 }
3413
3414 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3415 {
3416 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3417 struct inquiry_entry *ie;
3418
3419 BT_DBG("%s", hdev->name);
3420
3421 hci_dev_lock(hdev);
3422
3423 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3424 if (ie) {
3425 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3426 ie->timestamp = jiffies;
3427 }
3428
3429 hci_dev_unlock(hdev);
3430 }
3431
3432 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3433 struct sk_buff *skb)
3434 {
3435 struct inquiry_data data;
3436 int num_rsp = *((__u8 *) skb->data);
3437
3438 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3439
3440 if (!num_rsp)
3441 return;
3442
3443 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3444 return;
3445
3446 hci_dev_lock(hdev);
3447
3448 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3449 struct inquiry_info_with_rssi_and_pscan_mode *info;
3450 info = (void *) (skb->data + 1);
3451
3452 for (; num_rsp; num_rsp--, info++) {
3453 u32 flags;
3454
3455 bacpy(&data.bdaddr, &info->bdaddr);
3456 data.pscan_rep_mode = info->pscan_rep_mode;
3457 data.pscan_period_mode = info->pscan_period_mode;
3458 data.pscan_mode = info->pscan_mode;
3459 memcpy(data.dev_class, info->dev_class, 3);
3460 data.clock_offset = info->clock_offset;
3461 data.rssi = info->rssi;
3462 data.ssp_mode = 0x00;
3463
3464 flags = hci_inquiry_cache_update(hdev, &data, false);
3465
3466 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3467 info->dev_class, info->rssi,
3468 flags, NULL, 0, NULL, 0);
3469 }
3470 } else {
3471 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3472
3473 for (; num_rsp; num_rsp--, info++) {
3474 u32 flags;
3475
3476 bacpy(&data.bdaddr, &info->bdaddr);
3477 data.pscan_rep_mode = info->pscan_rep_mode;
3478 data.pscan_period_mode = info->pscan_period_mode;
3479 data.pscan_mode = 0x00;
3480 memcpy(data.dev_class, info->dev_class, 3);
3481 data.clock_offset = info->clock_offset;
3482 data.rssi = info->rssi;
3483 data.ssp_mode = 0x00;
3484
3485 flags = hci_inquiry_cache_update(hdev, &data, false);
3486
3487 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3488 info->dev_class, info->rssi,
3489 flags, NULL, 0, NULL, 0);
3490 }
3491 }
3492
3493 hci_dev_unlock(hdev);
3494 }
3495
3496 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3497 struct sk_buff *skb)
3498 {
3499 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3500 struct hci_conn *conn;
3501
3502 BT_DBG("%s", hdev->name);
3503
3504 hci_dev_lock(hdev);
3505
3506 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3507 if (!conn)
3508 goto unlock;
3509
3510 if (ev->page < HCI_MAX_PAGES)
3511 memcpy(conn->features[ev->page], ev->features, 8);
3512
3513 if (!ev->status && ev->page == 0x01) {
3514 struct inquiry_entry *ie;
3515
3516 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3517 if (ie)
3518 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3519
3520 if (ev->features[0] & LMP_HOST_SSP) {
3521 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3522 } else {
3523 /* It is mandatory by the Bluetooth specification that
3524 * Extended Inquiry Results are only used when Secure
3525 * Simple Pairing is enabled, but some devices violate
3526 * this.
3527 *
3528 * To make these devices work, the internal SSP
3529 * enabled flag needs to be cleared if the remote host
3530 * features do not indicate SSP support */
3531 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3532 }
3533
3534 if (ev->features[0] & LMP_HOST_SC)
3535 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3536 }
3537
3538 if (conn->state != BT_CONFIG)
3539 goto unlock;
3540
3541 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3542 struct hci_cp_remote_name_req cp;
3543 memset(&cp, 0, sizeof(cp));
3544 bacpy(&cp.bdaddr, &conn->dst);
3545 cp.pscan_rep_mode = 0x02;
3546 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3547 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3548 mgmt_device_connected(hdev, conn, 0, NULL, 0);
3549
3550 if (!hci_outgoing_auth_needed(hdev, conn)) {
3551 conn->state = BT_CONNECTED;
3552 hci_proto_connect_cfm(conn, ev->status);
3553 hci_conn_drop(conn);
3554 }
3555
3556 unlock:
3557 hci_dev_unlock(hdev);
3558 }
3559
3560 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3561 struct sk_buff *skb)
3562 {
3563 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3564 struct hci_conn *conn;
3565
3566 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3567
3568 hci_dev_lock(hdev);
3569
3570 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3571 if (!conn) {
3572 if (ev->link_type == ESCO_LINK)
3573 goto unlock;
3574
3575 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3576 if (!conn)
3577 goto unlock;
3578
3579 conn->type = SCO_LINK;
3580 }
3581
3582 switch (ev->status) {
3583 case 0x00:
3584 conn->handle = __le16_to_cpu(ev->handle);
3585 conn->state = BT_CONNECTED;
3586
3587 hci_conn_add_sysfs(conn);
3588 break;
3589
3590 case 0x10: /* Connection Accept Timeout */
3591 case 0x0d: /* Connection Rejected due to Limited Resources */
3592 case 0x11: /* Unsupported Feature or Parameter Value */
3593 case 0x1c: /* SCO interval rejected */
3594 case 0x1a: /* Unsupported Remote Feature */
3595 case 0x1f: /* Unspecified error */
3596 case 0x20: /* Unsupported LMP Parameter value */
3597 if (conn->out) {
3598 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3599 (hdev->esco_type & EDR_ESCO_MASK);
3600 if (hci_setup_sync(conn, conn->link->handle))
3601 goto unlock;
3602 }
3603 /* fall through */
3604
3605 default:
3606 conn->state = BT_CLOSED;
3607 break;
3608 }
3609
3610 hci_proto_connect_cfm(conn, ev->status);
3611 if (ev->status)
3612 hci_conn_del(conn);
3613
3614 unlock:
3615 hci_dev_unlock(hdev);
3616 }
3617
3618 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3619 {
3620 size_t parsed = 0;
3621
3622 while (parsed < eir_len) {
3623 u8 field_len = eir[0];
3624
3625 if (field_len == 0)
3626 return parsed;
3627
3628 parsed += field_len + 1;
3629 eir += field_len + 1;
3630 }
3631
3632 return eir_len;
3633 }
3634
3635 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3636 struct sk_buff *skb)
3637 {
3638 struct inquiry_data data;
3639 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3640 int num_rsp = *((__u8 *) skb->data);
3641 size_t eir_len;
3642
3643 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3644
3645 if (!num_rsp)
3646 return;
3647
3648 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3649 return;
3650
3651 hci_dev_lock(hdev);
3652
3653 for (; num_rsp; num_rsp--, info++) {
3654 u32 flags;
3655 bool name_known;
3656
3657 bacpy(&data.bdaddr, &info->bdaddr);
3658 data.pscan_rep_mode = info->pscan_rep_mode;
3659 data.pscan_period_mode = info->pscan_period_mode;
3660 data.pscan_mode = 0x00;
3661 memcpy(data.dev_class, info->dev_class, 3);
3662 data.clock_offset = info->clock_offset;
3663 data.rssi = info->rssi;
3664 data.ssp_mode = 0x01;
3665
3666 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3667 name_known = eir_has_data_type(info->data,
3668 sizeof(info->data),
3669 EIR_NAME_COMPLETE);
3670 else
3671 name_known = true;
3672
3673 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3674
3675 eir_len = eir_get_length(info->data, sizeof(info->data));
3676
3677 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3678 info->dev_class, info->rssi,
3679 flags, info->data, eir_len, NULL, 0);
3680 }
3681
3682 hci_dev_unlock(hdev);
3683 }
3684
3685 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3686 struct sk_buff *skb)
3687 {
3688 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3689 struct hci_conn *conn;
3690
3691 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3692 __le16_to_cpu(ev->handle));
3693
3694 hci_dev_lock(hdev);
3695
3696 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3697 if (!conn)
3698 goto unlock;
3699
3700 /* For BR/EDR the necessary steps are taken through the
3701 * auth_complete event.
3702 */
3703 if (conn->type != LE_LINK)
3704 goto unlock;
3705
3706 if (!ev->status)
3707 conn->sec_level = conn->pending_sec_level;
3708
3709 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3710
3711 if (ev->status && conn->state == BT_CONNECTED) {
3712 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3713 hci_conn_drop(conn);
3714 goto unlock;
3715 }
3716
3717 if (conn->state == BT_CONFIG) {
3718 if (!ev->status)
3719 conn->state = BT_CONNECTED;
3720
3721 hci_proto_connect_cfm(conn, ev->status);
3722 hci_conn_drop(conn);
3723 } else {
3724 hci_auth_cfm(conn, ev->status);
3725
3726 hci_conn_hold(conn);
3727 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3728 hci_conn_drop(conn);
3729 }
3730
3731 unlock:
3732 hci_dev_unlock(hdev);
3733 }
3734
3735 static u8 hci_get_auth_req(struct hci_conn *conn)
3736 {
3737 /* If remote requests no-bonding follow that lead */
3738 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3739 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3740 return conn->remote_auth | (conn->auth_type & 0x01);
3741
3742 /* If both remote and local have enough IO capabilities, require
3743 * MITM protection
3744 */
3745 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3746 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3747 return conn->remote_auth | 0x01;
3748
3749 /* No MITM protection possible so ignore remote requirement */
3750 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3751 }
3752
3753 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3754 {
3755 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3756 struct hci_conn *conn;
3757
3758 BT_DBG("%s", hdev->name);
3759
3760 hci_dev_lock(hdev);
3761
3762 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3763 if (!conn)
3764 goto unlock;
3765
3766 hci_conn_hold(conn);
3767
3768 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3769 goto unlock;
3770
3771 /* Allow pairing if we're pairable, the initiators of the
3772 * pairing or if the remote is not requesting bonding.
3773 */
3774 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3775 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3776 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3777 struct hci_cp_io_capability_reply cp;
3778
3779 bacpy(&cp.bdaddr, &ev->bdaddr);
3780 /* Change the IO capability from KeyboardDisplay
3781 * to DisplayYesNo as it is not supported by BT spec. */
3782 cp.capability = (conn->io_capability == 0x04) ?
3783 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3784
3785 /* If we are initiators, there is no remote information yet */
3786 if (conn->remote_auth == 0xff) {
3787 /* Request MITM protection if our IO caps allow it
3788 * except for the no-bonding case.
3789 */
3790 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3791 conn->auth_type != HCI_AT_NO_BONDING)
3792 conn->auth_type |= 0x01;
3793 } else {
3794 conn->auth_type = hci_get_auth_req(conn);
3795 }
3796
3797 /* If we're not bondable, force one of the non-bondable
3798 * authentication requirement values.
3799 */
3800 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3801 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3802
3803 cp.authentication = conn->auth_type;
3804
3805 if (hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR) &&
3806 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3807 cp.oob_data = 0x01;
3808 else
3809 cp.oob_data = 0x00;
3810
3811 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3812 sizeof(cp), &cp);
3813 } else {
3814 struct hci_cp_io_capability_neg_reply cp;
3815
3816 bacpy(&cp.bdaddr, &ev->bdaddr);
3817 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3818
3819 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3820 sizeof(cp), &cp);
3821 }
3822
3823 unlock:
3824 hci_dev_unlock(hdev);
3825 }
3826
3827 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3828 {
3829 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3830 struct hci_conn *conn;
3831
3832 BT_DBG("%s", hdev->name);
3833
3834 hci_dev_lock(hdev);
3835
3836 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3837 if (!conn)
3838 goto unlock;
3839
3840 conn->remote_cap = ev->capability;
3841 conn->remote_auth = ev->authentication;
3842 if (ev->oob_data)
3843 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3844
3845 unlock:
3846 hci_dev_unlock(hdev);
3847 }
3848
3849 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3850 struct sk_buff *skb)
3851 {
3852 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3853 int loc_mitm, rem_mitm, confirm_hint = 0;
3854 struct hci_conn *conn;
3855
3856 BT_DBG("%s", hdev->name);
3857
3858 hci_dev_lock(hdev);
3859
3860 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3861 goto unlock;
3862
3863 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3864 if (!conn)
3865 goto unlock;
3866
3867 loc_mitm = (conn->auth_type & 0x01);
3868 rem_mitm = (conn->remote_auth & 0x01);
3869
3870 /* If we require MITM but the remote device can't provide that
3871 * (it has NoInputNoOutput) then reject the confirmation
3872 * request. We check the security level here since it doesn't
3873 * necessarily match conn->auth_type.
3874 */
3875 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3876 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3877 BT_DBG("Rejecting request: remote device can't provide MITM");
3878 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3879 sizeof(ev->bdaddr), &ev->bdaddr);
3880 goto unlock;
3881 }
3882
3883 /* If no side requires MITM protection; auto-accept */
3884 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3885 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3886
3887 /* If we're not the initiators request authorization to
3888 * proceed from user space (mgmt_user_confirm with
3889 * confirm_hint set to 1). The exception is if neither
3890 * side had MITM or if the local IO capability is
3891 * NoInputNoOutput, in which case we do auto-accept
3892 */
3893 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3894 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3895 (loc_mitm || rem_mitm)) {
3896 BT_DBG("Confirming auto-accept as acceptor");
3897 confirm_hint = 1;
3898 goto confirm;
3899 }
3900
3901 BT_DBG("Auto-accept of user confirmation with %ums delay",
3902 hdev->auto_accept_delay);
3903
3904 if (hdev->auto_accept_delay > 0) {
3905 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3906 queue_delayed_work(conn->hdev->workqueue,
3907 &conn->auto_accept_work, delay);
3908 goto unlock;
3909 }
3910
3911 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3912 sizeof(ev->bdaddr), &ev->bdaddr);
3913 goto unlock;
3914 }
3915
3916 confirm:
3917 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3918 le32_to_cpu(ev->passkey), confirm_hint);
3919
3920 unlock:
3921 hci_dev_unlock(hdev);
3922 }
3923
3924 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3925 struct sk_buff *skb)
3926 {
3927 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3928
3929 BT_DBG("%s", hdev->name);
3930
3931 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3932 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3933 }
3934
3935 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3936 struct sk_buff *skb)
3937 {
3938 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3939 struct hci_conn *conn;
3940
3941 BT_DBG("%s", hdev->name);
3942
3943 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3944 if (!conn)
3945 return;
3946
3947 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3948 conn->passkey_entered = 0;
3949
3950 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3951 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3952 conn->dst_type, conn->passkey_notify,
3953 conn->passkey_entered);
3954 }
3955
3956 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3957 {
3958 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3959 struct hci_conn *conn;
3960
3961 BT_DBG("%s", hdev->name);
3962
3963 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3964 if (!conn)
3965 return;
3966
3967 switch (ev->type) {
3968 case HCI_KEYPRESS_STARTED:
3969 conn->passkey_entered = 0;
3970 return;
3971
3972 case HCI_KEYPRESS_ENTERED:
3973 conn->passkey_entered++;
3974 break;
3975
3976 case HCI_KEYPRESS_ERASED:
3977 conn->passkey_entered--;
3978 break;
3979
3980 case HCI_KEYPRESS_CLEARED:
3981 conn->passkey_entered = 0;
3982 break;
3983
3984 case HCI_KEYPRESS_COMPLETED:
3985 return;
3986 }
3987
3988 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3989 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3990 conn->dst_type, conn->passkey_notify,
3991 conn->passkey_entered);
3992 }
3993
3994 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3995 struct sk_buff *skb)
3996 {
3997 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3998 struct hci_conn *conn;
3999
4000 BT_DBG("%s", hdev->name);
4001
4002 hci_dev_lock(hdev);
4003
4004 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4005 if (!conn)
4006 goto unlock;
4007
4008 /* Reset the authentication requirement to unknown */
4009 conn->remote_auth = 0xff;
4010
4011 /* To avoid duplicate auth_failed events to user space we check
4012 * the HCI_CONN_AUTH_PEND flag which will be set if we
4013 * initiated the authentication. A traditional auth_complete
4014 * event gets always produced as initiator and is also mapped to
4015 * the mgmt_auth_failed event */
4016 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
4017 mgmt_auth_failed(conn, ev->status);
4018
4019 hci_conn_drop(conn);
4020
4021 unlock:
4022 hci_dev_unlock(hdev);
4023 }
4024
4025 static void hci_remote_host_features_evt(struct hci_dev *hdev,
4026 struct sk_buff *skb)
4027 {
4028 struct hci_ev_remote_host_features *ev = (void *) skb->data;
4029 struct inquiry_entry *ie;
4030 struct hci_conn *conn;
4031
4032 BT_DBG("%s", hdev->name);
4033
4034 hci_dev_lock(hdev);
4035
4036 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
4037 if (conn)
4038 memcpy(conn->features[1], ev->features, 8);
4039
4040 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
4041 if (ie)
4042 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
4043
4044 hci_dev_unlock(hdev);
4045 }
4046
4047 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
4048 struct sk_buff *skb)
4049 {
4050 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
4051 struct oob_data *data;
4052
4053 BT_DBG("%s", hdev->name);
4054
4055 hci_dev_lock(hdev);
4056
4057 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
4058 goto unlock;
4059
4060 data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR);
4061 if (data) {
4062 if (bredr_sc_enabled(hdev)) {
4063 struct hci_cp_remote_oob_ext_data_reply cp;
4064
4065 bacpy(&cp.bdaddr, &ev->bdaddr);
4066 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
4067 memcpy(cp.rand192, data->rand192, sizeof(cp.rand192));
4068 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
4069 memcpy(cp.rand256, data->rand256, sizeof(cp.rand256));
4070
4071 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
4072 sizeof(cp), &cp);
4073 } else {
4074 struct hci_cp_remote_oob_data_reply cp;
4075
4076 bacpy(&cp.bdaddr, &ev->bdaddr);
4077 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
4078 memcpy(cp.rand, data->rand192, sizeof(cp.rand));
4079
4080 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
4081 sizeof(cp), &cp);
4082 }
4083 } else {
4084 struct hci_cp_remote_oob_data_neg_reply cp;
4085
4086 bacpy(&cp.bdaddr, &ev->bdaddr);
4087 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
4088 sizeof(cp), &cp);
4089 }
4090
4091 unlock:
4092 hci_dev_unlock(hdev);
4093 }
4094
4095 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
4096 struct sk_buff *skb)
4097 {
4098 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
4099 struct hci_conn *hcon, *bredr_hcon;
4100
4101 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
4102 ev->status);
4103
4104 hci_dev_lock(hdev);
4105
4106 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4107 if (!hcon) {
4108 hci_dev_unlock(hdev);
4109 return;
4110 }
4111
4112 if (ev->status) {
4113 hci_conn_del(hcon);
4114 hci_dev_unlock(hdev);
4115 return;
4116 }
4117
4118 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4119
4120 hcon->state = BT_CONNECTED;
4121 bacpy(&hcon->dst, &bredr_hcon->dst);
4122
4123 hci_conn_hold(hcon);
4124 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4125 hci_conn_drop(hcon);
4126
4127 hci_conn_add_sysfs(hcon);
4128
4129 amp_physical_cfm(bredr_hcon, hcon);
4130
4131 hci_dev_unlock(hdev);
4132 }
4133
4134 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4135 {
4136 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4137 struct hci_conn *hcon;
4138 struct hci_chan *hchan;
4139 struct amp_mgr *mgr;
4140
4141 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4142 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4143 ev->status);
4144
4145 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4146 if (!hcon)
4147 return;
4148
4149 /* Create AMP hchan */
4150 hchan = hci_chan_create(hcon);
4151 if (!hchan)
4152 return;
4153
4154 hchan->handle = le16_to_cpu(ev->handle);
4155
4156 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4157
4158 mgr = hcon->amp_mgr;
4159 if (mgr && mgr->bredr_chan) {
4160 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4161
4162 l2cap_chan_lock(bredr_chan);
4163
4164 bredr_chan->conn->mtu = hdev->block_mtu;
4165 l2cap_logical_cfm(bredr_chan, hchan, 0);
4166 hci_conn_hold(hcon);
4167
4168 l2cap_chan_unlock(bredr_chan);
4169 }
4170 }
4171
4172 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4173 struct sk_buff *skb)
4174 {
4175 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4176 struct hci_chan *hchan;
4177
4178 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4179 le16_to_cpu(ev->handle), ev->status);
4180
4181 if (ev->status)
4182 return;
4183
4184 hci_dev_lock(hdev);
4185
4186 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4187 if (!hchan)
4188 goto unlock;
4189
4190 amp_destroy_logical_link(hchan, ev->reason);
4191
4192 unlock:
4193 hci_dev_unlock(hdev);
4194 }
4195
4196 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4197 struct sk_buff *skb)
4198 {
4199 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4200 struct hci_conn *hcon;
4201
4202 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4203
4204 if (ev->status)
4205 return;
4206
4207 hci_dev_lock(hdev);
4208
4209 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4210 if (hcon) {
4211 hcon->state = BT_CLOSED;
4212 hci_conn_del(hcon);
4213 }
4214
4215 hci_dev_unlock(hdev);
4216 }
4217
4218 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4219 {
4220 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4221 struct hci_conn_params *params;
4222 struct hci_conn *conn;
4223 struct smp_irk *irk;
4224 u8 addr_type;
4225
4226 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4227
4228 hci_dev_lock(hdev);
4229
4230 /* All controllers implicitly stop advertising in the event of a
4231 * connection, so ensure that the state bit is cleared.
4232 */
4233 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4234
4235 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4236 if (!conn) {
4237 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4238 if (!conn) {
4239 BT_ERR("No memory for new connection");
4240 goto unlock;
4241 }
4242
4243 conn->dst_type = ev->bdaddr_type;
4244
4245 /* If we didn't have a hci_conn object previously
4246 * but we're in master role this must be something
4247 * initiated using a white list. Since white list based
4248 * connections are not "first class citizens" we don't
4249 * have full tracking of them. Therefore, we go ahead
4250 * with a "best effort" approach of determining the
4251 * initiator address based on the HCI_PRIVACY flag.
4252 */
4253 if (conn->out) {
4254 conn->resp_addr_type = ev->bdaddr_type;
4255 bacpy(&conn->resp_addr, &ev->bdaddr);
4256 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4257 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4258 bacpy(&conn->init_addr, &hdev->rpa);
4259 } else {
4260 hci_copy_identity_address(hdev,
4261 &conn->init_addr,
4262 &conn->init_addr_type);
4263 }
4264 }
4265 } else {
4266 cancel_delayed_work(&conn->le_conn_timeout);
4267 }
4268
4269 if (!conn->out) {
4270 /* Set the responder (our side) address type based on
4271 * the advertising address type.
4272 */
4273 conn->resp_addr_type = hdev->adv_addr_type;
4274 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4275 bacpy(&conn->resp_addr, &hdev->random_addr);
4276 else
4277 bacpy(&conn->resp_addr, &hdev->bdaddr);
4278
4279 conn->init_addr_type = ev->bdaddr_type;
4280 bacpy(&conn->init_addr, &ev->bdaddr);
4281
4282 /* For incoming connections, set the default minimum
4283 * and maximum connection interval. They will be used
4284 * to check if the parameters are in range and if not
4285 * trigger the connection update procedure.
4286 */
4287 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4288 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4289 }
4290
4291 /* Lookup the identity address from the stored connection
4292 * address and address type.
4293 *
4294 * When establishing connections to an identity address, the
4295 * connection procedure will store the resolvable random
4296 * address first. Now if it can be converted back into the
4297 * identity address, start using the identity address from
4298 * now on.
4299 */
4300 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4301 if (irk) {
4302 bacpy(&conn->dst, &irk->bdaddr);
4303 conn->dst_type = irk->addr_type;
4304 }
4305
4306 if (ev->status) {
4307 hci_le_conn_failed(conn, ev->status);
4308 goto unlock;
4309 }
4310
4311 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4312 addr_type = BDADDR_LE_PUBLIC;
4313 else
4314 addr_type = BDADDR_LE_RANDOM;
4315
4316 /* Drop the connection if the device is blocked */
4317 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4318 hci_conn_drop(conn);
4319 goto unlock;
4320 }
4321
4322 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4323 mgmt_device_connected(hdev, conn, 0, NULL, 0);
4324
4325 conn->sec_level = BT_SECURITY_LOW;
4326 conn->handle = __le16_to_cpu(ev->handle);
4327 conn->state = BT_CONNECTED;
4328
4329 conn->le_conn_interval = le16_to_cpu(ev->interval);
4330 conn->le_conn_latency = le16_to_cpu(ev->latency);
4331 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4332
4333 hci_conn_add_sysfs(conn);
4334
4335 hci_proto_connect_cfm(conn, ev->status);
4336
4337 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4338 conn->dst_type);
4339 if (params) {
4340 list_del_init(&params->action);
4341 if (params->conn) {
4342 hci_conn_drop(params->conn);
4343 hci_conn_put(params->conn);
4344 params->conn = NULL;
4345 }
4346 }
4347
4348 unlock:
4349 hci_update_background_scan(hdev);
4350 hci_dev_unlock(hdev);
4351 }
4352
4353 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4354 struct sk_buff *skb)
4355 {
4356 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4357 struct hci_conn *conn;
4358
4359 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4360
4361 if (ev->status)
4362 return;
4363
4364 hci_dev_lock(hdev);
4365
4366 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4367 if (conn) {
4368 conn->le_conn_interval = le16_to_cpu(ev->interval);
4369 conn->le_conn_latency = le16_to_cpu(ev->latency);
4370 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4371 }
4372
4373 hci_dev_unlock(hdev);
4374 }
4375
4376 /* This function requires the caller holds hdev->lock */
4377 static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,
4378 bdaddr_t *addr,
4379 u8 addr_type, u8 adv_type)
4380 {
4381 struct hci_conn *conn;
4382 struct hci_conn_params *params;
4383
4384 /* If the event is not connectable don't proceed further */
4385 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4386 return NULL;
4387
4388 /* Ignore if the device is blocked */
4389 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4390 return NULL;
4391
4392 /* Most controller will fail if we try to create new connections
4393 * while we have an existing one in slave role.
4394 */
4395 if (hdev->conn_hash.le_num_slave > 0)
4396 return NULL;
4397
4398 /* If we're not connectable only connect devices that we have in
4399 * our pend_le_conns list.
4400 */
4401 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4402 addr, addr_type);
4403 if (!params)
4404 return NULL;
4405
4406 switch (params->auto_connect) {
4407 case HCI_AUTO_CONN_DIRECT:
4408 /* Only devices advertising with ADV_DIRECT_IND are
4409 * triggering a connection attempt. This is allowing
4410 * incoming connections from slave devices.
4411 */
4412 if (adv_type != LE_ADV_DIRECT_IND)
4413 return NULL;
4414 break;
4415 case HCI_AUTO_CONN_ALWAYS:
4416 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4417 * are triggering a connection attempt. This means
4418 * that incoming connectioms from slave device are
4419 * accepted and also outgoing connections to slave
4420 * devices are established when found.
4421 */
4422 break;
4423 default:
4424 return NULL;
4425 }
4426
4427 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4428 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4429 if (!IS_ERR(conn)) {
4430 /* Store the pointer since we don't really have any
4431 * other owner of the object besides the params that
4432 * triggered it. This way we can abort the connection if
4433 * the parameters get removed and keep the reference
4434 * count consistent once the connection is established.
4435 */
4436 params->conn = hci_conn_get(conn);
4437 return conn;
4438 }
4439
4440 switch (PTR_ERR(conn)) {
4441 case -EBUSY:
4442 /* If hci_connect() returns -EBUSY it means there is already
4443 * an LE connection attempt going on. Since controllers don't
4444 * support more than one connection attempt at the time, we
4445 * don't consider this an error case.
4446 */
4447 break;
4448 default:
4449 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4450 return NULL;
4451 }
4452
4453 return NULL;
4454 }
4455
4456 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4457 u8 bdaddr_type, bdaddr_t *direct_addr,
4458 u8 direct_addr_type, s8 rssi, u8 *data, u8 len)
4459 {
4460 struct discovery_state *d = &hdev->discovery;
4461 struct smp_irk *irk;
4462 struct hci_conn *conn;
4463 bool match;
4464 u32 flags;
4465
4466 /* If the direct address is present, then this report is from
4467 * a LE Direct Advertising Report event. In that case it is
4468 * important to see if the address is matching the local
4469 * controller address.
4470 */
4471 if (direct_addr) {
4472 /* Only resolvable random addresses are valid for these
4473 * kind of reports and others can be ignored.
4474 */
4475 if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type))
4476 return;
4477
4478 /* If the controller is not using resolvable random
4479 * addresses, then this report can be ignored.
4480 */
4481 if (!test_bit(HCI_PRIVACY, &hdev->dev_flags))
4482 return;
4483
4484 /* If the local IRK of the controller does not match
4485 * with the resolvable random address provided, then
4486 * this report can be ignored.
4487 */
4488 if (!smp_irk_matches(hdev, hdev->irk, direct_addr))
4489 return;
4490 }
4491
4492 /* Check if we need to convert to identity address */
4493 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4494 if (irk) {
4495 bdaddr = &irk->bdaddr;
4496 bdaddr_type = irk->addr_type;
4497 }
4498
4499 /* Check if we have been requested to connect to this device */
4500 conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4501 if (conn && type == LE_ADV_IND) {
4502 /* Store report for later inclusion by
4503 * mgmt_device_connected
4504 */
4505 memcpy(conn->le_adv_data, data, len);
4506 conn->le_adv_data_len = len;
4507 }
4508
4509 /* Passive scanning shouldn't trigger any device found events,
4510 * except for devices marked as CONN_REPORT for which we do send
4511 * device found events.
4512 */
4513 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4514 if (type == LE_ADV_DIRECT_IND)
4515 return;
4516
4517 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4518 bdaddr, bdaddr_type))
4519 return;
4520
4521 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4522 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4523 else
4524 flags = 0;
4525 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4526 rssi, flags, data, len, NULL, 0);
4527 return;
4528 }
4529
4530 /* When receiving non-connectable or scannable undirected
4531 * advertising reports, this means that the remote device is
4532 * not connectable and then clearly indicate this in the
4533 * device found event.
4534 *
4535 * When receiving a scan response, then there is no way to
4536 * know if the remote device is connectable or not. However
4537 * since scan responses are merged with a previously seen
4538 * advertising report, the flags field from that report
4539 * will be used.
4540 *
4541 * In the really unlikely case that a controller get confused
4542 * and just sends a scan response event, then it is marked as
4543 * not connectable as well.
4544 */
4545 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4546 type == LE_ADV_SCAN_RSP)
4547 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4548 else
4549 flags = 0;
4550
4551 /* If there's nothing pending either store the data from this
4552 * event or send an immediate device found event if the data
4553 * should not be stored for later.
4554 */
4555 if (!has_pending_adv_report(hdev)) {
4556 /* If the report will trigger a SCAN_REQ store it for
4557 * later merging.
4558 */
4559 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4560 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4561 rssi, flags, data, len);
4562 return;
4563 }
4564
4565 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4566 rssi, flags, data, len, NULL, 0);
4567 return;
4568 }
4569
4570 /* Check if the pending report is for the same device as the new one */
4571 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4572 bdaddr_type == d->last_adv_addr_type);
4573
4574 /* If the pending data doesn't match this report or this isn't a
4575 * scan response (e.g. we got a duplicate ADV_IND) then force
4576 * sending of the pending data.
4577 */
4578 if (type != LE_ADV_SCAN_RSP || !match) {
4579 /* Send out whatever is in the cache, but skip duplicates */
4580 if (!match)
4581 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4582 d->last_adv_addr_type, NULL,
4583 d->last_adv_rssi, d->last_adv_flags,
4584 d->last_adv_data,
4585 d->last_adv_data_len, NULL, 0);
4586
4587 /* If the new report will trigger a SCAN_REQ store it for
4588 * later merging.
4589 */
4590 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4591 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4592 rssi, flags, data, len);
4593 return;
4594 }
4595
4596 /* The advertising reports cannot be merged, so clear
4597 * the pending report and send out a device found event.
4598 */
4599 clear_pending_adv_report(hdev);
4600 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4601 rssi, flags, data, len, NULL, 0);
4602 return;
4603 }
4604
4605 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4606 * the new event is a SCAN_RSP. We can therefore proceed with
4607 * sending a merged device found event.
4608 */
4609 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4610 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4611 d->last_adv_data, d->last_adv_data_len, data, len);
4612 clear_pending_adv_report(hdev);
4613 }
4614
4615 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4616 {
4617 u8 num_reports = skb->data[0];
4618 void *ptr = &skb->data[1];
4619
4620 hci_dev_lock(hdev);
4621
4622 while (num_reports--) {
4623 struct hci_ev_le_advertising_info *ev = ptr;
4624 s8 rssi;
4625
4626 rssi = ev->data[ev->length];
4627 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4628 ev->bdaddr_type, NULL, 0, rssi,
4629 ev->data, ev->length);
4630
4631 ptr += sizeof(*ev) + ev->length + 1;
4632 }
4633
4634 hci_dev_unlock(hdev);
4635 }
4636
4637 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4638 {
4639 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4640 struct hci_cp_le_ltk_reply cp;
4641 struct hci_cp_le_ltk_neg_reply neg;
4642 struct hci_conn *conn;
4643 struct smp_ltk *ltk;
4644
4645 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4646
4647 hci_dev_lock(hdev);
4648
4649 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4650 if (conn == NULL)
4651 goto not_found;
4652
4653 ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role);
4654 if (!ltk)
4655 goto not_found;
4656
4657 if (smp_ltk_is_sc(ltk)) {
4658 /* With SC both EDiv and Rand are set to zero */
4659 if (ev->ediv || ev->rand)
4660 goto not_found;
4661 } else {
4662 /* For non-SC keys check that EDiv and Rand match */
4663 if (ev->ediv != ltk->ediv || ev->rand != ltk->rand)
4664 goto not_found;
4665 }
4666
4667 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4668 cp.handle = cpu_to_le16(conn->handle);
4669
4670 conn->pending_sec_level = smp_ltk_sec_level(ltk);
4671
4672 conn->enc_key_size = ltk->enc_size;
4673
4674 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4675
4676 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4677 * temporary key used to encrypt a connection following
4678 * pairing. It is used during the Encrypted Session Setup to
4679 * distribute the keys. Later, security can be re-established
4680 * using a distributed LTK.
4681 */
4682 if (ltk->type == SMP_STK) {
4683 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4684 list_del_rcu(&ltk->list);
4685 kfree_rcu(ltk, rcu);
4686 } else {
4687 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4688 }
4689
4690 hci_dev_unlock(hdev);
4691
4692 return;
4693
4694 not_found:
4695 neg.handle = ev->handle;
4696 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4697 hci_dev_unlock(hdev);
4698 }
4699
4700 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4701 u8 reason)
4702 {
4703 struct hci_cp_le_conn_param_req_neg_reply cp;
4704
4705 cp.handle = cpu_to_le16(handle);
4706 cp.reason = reason;
4707
4708 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4709 &cp);
4710 }
4711
4712 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4713 struct sk_buff *skb)
4714 {
4715 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4716 struct hci_cp_le_conn_param_req_reply cp;
4717 struct hci_conn *hcon;
4718 u16 handle, min, max, latency, timeout;
4719
4720 handle = le16_to_cpu(ev->handle);
4721 min = le16_to_cpu(ev->interval_min);
4722 max = le16_to_cpu(ev->interval_max);
4723 latency = le16_to_cpu(ev->latency);
4724 timeout = le16_to_cpu(ev->timeout);
4725
4726 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4727 if (!hcon || hcon->state != BT_CONNECTED)
4728 return send_conn_param_neg_reply(hdev, handle,
4729 HCI_ERROR_UNKNOWN_CONN_ID);
4730
4731 if (hci_check_conn_params(min, max, latency, timeout))
4732 return send_conn_param_neg_reply(hdev, handle,
4733 HCI_ERROR_INVALID_LL_PARAMS);
4734
4735 if (hcon->role == HCI_ROLE_MASTER) {
4736 struct hci_conn_params *params;
4737 u8 store_hint;
4738
4739 hci_dev_lock(hdev);
4740
4741 params = hci_conn_params_lookup(hdev, &hcon->dst,
4742 hcon->dst_type);
4743 if (params) {
4744 params->conn_min_interval = min;
4745 params->conn_max_interval = max;
4746 params->conn_latency = latency;
4747 params->supervision_timeout = timeout;
4748 store_hint = 0x01;
4749 } else{
4750 store_hint = 0x00;
4751 }
4752
4753 hci_dev_unlock(hdev);
4754
4755 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4756 store_hint, min, max, latency, timeout);
4757 }
4758
4759 cp.handle = ev->handle;
4760 cp.interval_min = ev->interval_min;
4761 cp.interval_max = ev->interval_max;
4762 cp.latency = ev->latency;
4763 cp.timeout = ev->timeout;
4764 cp.min_ce_len = 0;
4765 cp.max_ce_len = 0;
4766
4767 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4768 }
4769
4770 static void hci_le_direct_adv_report_evt(struct hci_dev *hdev,
4771 struct sk_buff *skb)
4772 {
4773 u8 num_reports = skb->data[0];
4774 void *ptr = &skb->data[1];
4775
4776 hci_dev_lock(hdev);
4777
4778 while (num_reports--) {
4779 struct hci_ev_le_direct_adv_info *ev = ptr;
4780
4781 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4782 ev->bdaddr_type, &ev->direct_addr,
4783 ev->direct_addr_type, ev->rssi, NULL, 0);
4784
4785 ptr += sizeof(*ev);
4786 }
4787
4788 hci_dev_unlock(hdev);
4789 }
4790
4791 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4792 {
4793 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4794
4795 skb_pull(skb, sizeof(*le_ev));
4796
4797 switch (le_ev->subevent) {
4798 case HCI_EV_LE_CONN_COMPLETE:
4799 hci_le_conn_complete_evt(hdev, skb);
4800 break;
4801
4802 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4803 hci_le_conn_update_complete_evt(hdev, skb);
4804 break;
4805
4806 case HCI_EV_LE_ADVERTISING_REPORT:
4807 hci_le_adv_report_evt(hdev, skb);
4808 break;
4809
4810 case HCI_EV_LE_LTK_REQ:
4811 hci_le_ltk_request_evt(hdev, skb);
4812 break;
4813
4814 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4815 hci_le_remote_conn_param_req_evt(hdev, skb);
4816 break;
4817
4818 case HCI_EV_LE_DIRECT_ADV_REPORT:
4819 hci_le_direct_adv_report_evt(hdev, skb);
4820 break;
4821
4822 default:
4823 break;
4824 }
4825 }
4826
4827 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4828 {
4829 struct hci_ev_channel_selected *ev = (void *) skb->data;
4830 struct hci_conn *hcon;
4831
4832 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4833
4834 skb_pull(skb, sizeof(*ev));
4835
4836 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4837 if (!hcon)
4838 return;
4839
4840 amp_read_loc_assoc_final_data(hdev, hcon);
4841 }
4842
4843 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4844 {
4845 struct hci_event_hdr *hdr = (void *) skb->data;
4846 __u8 event = hdr->evt;
4847
4848 hci_dev_lock(hdev);
4849
4850 /* Received events are (currently) only needed when a request is
4851 * ongoing so avoid unnecessary memory allocation.
4852 */
4853 if (hci_req_pending(hdev)) {
4854 kfree_skb(hdev->recv_evt);
4855 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4856 }
4857
4858 hci_dev_unlock(hdev);
4859
4860 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4861
4862 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4863 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4864 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4865
4866 hci_req_cmd_complete(hdev, opcode, 0);
4867 }
4868
4869 switch (event) {
4870 case HCI_EV_INQUIRY_COMPLETE:
4871 hci_inquiry_complete_evt(hdev, skb);
4872 break;
4873
4874 case HCI_EV_INQUIRY_RESULT:
4875 hci_inquiry_result_evt(hdev, skb);
4876 break;
4877
4878 case HCI_EV_CONN_COMPLETE:
4879 hci_conn_complete_evt(hdev, skb);
4880 break;
4881
4882 case HCI_EV_CONN_REQUEST:
4883 hci_conn_request_evt(hdev, skb);
4884 break;
4885
4886 case HCI_EV_DISCONN_COMPLETE:
4887 hci_disconn_complete_evt(hdev, skb);
4888 break;
4889
4890 case HCI_EV_AUTH_COMPLETE:
4891 hci_auth_complete_evt(hdev, skb);
4892 break;
4893
4894 case HCI_EV_REMOTE_NAME:
4895 hci_remote_name_evt(hdev, skb);
4896 break;
4897
4898 case HCI_EV_ENCRYPT_CHANGE:
4899 hci_encrypt_change_evt(hdev, skb);
4900 break;
4901
4902 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4903 hci_change_link_key_complete_evt(hdev, skb);
4904 break;
4905
4906 case HCI_EV_REMOTE_FEATURES:
4907 hci_remote_features_evt(hdev, skb);
4908 break;
4909
4910 case HCI_EV_CMD_COMPLETE:
4911 hci_cmd_complete_evt(hdev, skb);
4912 break;
4913
4914 case HCI_EV_CMD_STATUS:
4915 hci_cmd_status_evt(hdev, skb);
4916 break;
4917
4918 case HCI_EV_HARDWARE_ERROR:
4919 hci_hardware_error_evt(hdev, skb);
4920 break;
4921
4922 case HCI_EV_ROLE_CHANGE:
4923 hci_role_change_evt(hdev, skb);
4924 break;
4925
4926 case HCI_EV_NUM_COMP_PKTS:
4927 hci_num_comp_pkts_evt(hdev, skb);
4928 break;
4929
4930 case HCI_EV_MODE_CHANGE:
4931 hci_mode_change_evt(hdev, skb);
4932 break;
4933
4934 case HCI_EV_PIN_CODE_REQ:
4935 hci_pin_code_request_evt(hdev, skb);
4936 break;
4937
4938 case HCI_EV_LINK_KEY_REQ:
4939 hci_link_key_request_evt(hdev, skb);
4940 break;
4941
4942 case HCI_EV_LINK_KEY_NOTIFY:
4943 hci_link_key_notify_evt(hdev, skb);
4944 break;
4945
4946 case HCI_EV_CLOCK_OFFSET:
4947 hci_clock_offset_evt(hdev, skb);
4948 break;
4949
4950 case HCI_EV_PKT_TYPE_CHANGE:
4951 hci_pkt_type_change_evt(hdev, skb);
4952 break;
4953
4954 case HCI_EV_PSCAN_REP_MODE:
4955 hci_pscan_rep_mode_evt(hdev, skb);
4956 break;
4957
4958 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4959 hci_inquiry_result_with_rssi_evt(hdev, skb);
4960 break;
4961
4962 case HCI_EV_REMOTE_EXT_FEATURES:
4963 hci_remote_ext_features_evt(hdev, skb);
4964 break;
4965
4966 case HCI_EV_SYNC_CONN_COMPLETE:
4967 hci_sync_conn_complete_evt(hdev, skb);
4968 break;
4969
4970 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4971 hci_extended_inquiry_result_evt(hdev, skb);
4972 break;
4973
4974 case HCI_EV_KEY_REFRESH_COMPLETE:
4975 hci_key_refresh_complete_evt(hdev, skb);
4976 break;
4977
4978 case HCI_EV_IO_CAPA_REQUEST:
4979 hci_io_capa_request_evt(hdev, skb);
4980 break;
4981
4982 case HCI_EV_IO_CAPA_REPLY:
4983 hci_io_capa_reply_evt(hdev, skb);
4984 break;
4985
4986 case HCI_EV_USER_CONFIRM_REQUEST:
4987 hci_user_confirm_request_evt(hdev, skb);
4988 break;
4989
4990 case HCI_EV_USER_PASSKEY_REQUEST:
4991 hci_user_passkey_request_evt(hdev, skb);
4992 break;
4993
4994 case HCI_EV_USER_PASSKEY_NOTIFY:
4995 hci_user_passkey_notify_evt(hdev, skb);
4996 break;
4997
4998 case HCI_EV_KEYPRESS_NOTIFY:
4999 hci_keypress_notify_evt(hdev, skb);
5000 break;
5001
5002 case HCI_EV_SIMPLE_PAIR_COMPLETE:
5003 hci_simple_pair_complete_evt(hdev, skb);
5004 break;
5005
5006 case HCI_EV_REMOTE_HOST_FEATURES:
5007 hci_remote_host_features_evt(hdev, skb);
5008 break;
5009
5010 case HCI_EV_LE_META:
5011 hci_le_meta_evt(hdev, skb);
5012 break;
5013
5014 case HCI_EV_CHANNEL_SELECTED:
5015 hci_chan_selected_evt(hdev, skb);
5016 break;
5017
5018 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
5019 hci_remote_oob_data_request_evt(hdev, skb);
5020 break;
5021
5022 case HCI_EV_PHY_LINK_COMPLETE:
5023 hci_phy_link_complete_evt(hdev, skb);
5024 break;
5025
5026 case HCI_EV_LOGICAL_LINK_COMPLETE:
5027 hci_loglink_complete_evt(hdev, skb);
5028 break;
5029
5030 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
5031 hci_disconn_loglink_complete_evt(hdev, skb);
5032 break;
5033
5034 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
5035 hci_disconn_phylink_complete_evt(hdev, skb);
5036 break;
5037
5038 case HCI_EV_NUM_COMP_BLOCKS:
5039 hci_num_comp_blocks_evt(hdev, skb);
5040 break;
5041
5042 default:
5043 BT_DBG("%s event 0x%2.2x", hdev->name, event);
5044 break;
5045 }
5046
5047 kfree_skb(skb);
5048 hdev->stat.evt_rx++;
5049 }
This page took 0.126074 seconds and 6 git commands to generate.