Bluetooth: Add support for external configuration with UART driver
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36
37 /* Handle HCI Event packets */
38
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn) {
105 if (rp->role)
106 clear_bit(HCI_CONN_MASTER, &conn->flags);
107 else
108 set_bit(HCI_CONN_MASTER, &conn->flags);
109 }
110
111 hci_dev_unlock(hdev);
112 }
113
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
118
119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120
121 if (rp->status)
122 return;
123
124 hci_dev_lock(hdev);
125
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 if (conn)
128 conn->link_policy = __le16_to_cpu(rp->policy);
129
130 hci_dev_unlock(hdev);
131 }
132
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
137 void *sent;
138
139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140
141 if (rp->status)
142 return;
143
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 if (!sent)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = get_unaligned_le16(sent + 2);
153
154 hci_dev_unlock(hdev);
155 }
156
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
159 {
160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164 if (rp->status)
165 return;
166
167 hdev->link_policy = __le16_to_cpu(rp->policy);
168 }
169
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177
178 if (status)
179 return;
180
181 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
182 if (!sent)
183 return;
184
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205
206 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
207 hdev->scan_rsp_data_len = 0;
208
209 hdev->le_scan_type = LE_SCAN_PASSIVE;
210
211 hdev->ssp_debug_mode = 0;
212 }
213
214 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
215 {
216 __u8 status = *((__u8 *) skb->data);
217 void *sent;
218
219 BT_DBG("%s status 0x%2.2x", hdev->name, status);
220
221 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
222 if (!sent)
223 return;
224
225 hci_dev_lock(hdev);
226
227 if (test_bit(HCI_MGMT, &hdev->dev_flags))
228 mgmt_set_local_name_complete(hdev, sent, status);
229 else if (!status)
230 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
231
232 hci_dev_unlock(hdev);
233 }
234
235 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
236 {
237 struct hci_rp_read_local_name *rp = (void *) skb->data;
238
239 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
240
241 if (rp->status)
242 return;
243
244 if (test_bit(HCI_SETUP, &hdev->dev_flags))
245 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
246 }
247
248 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
249 {
250 __u8 status = *((__u8 *) skb->data);
251 void *sent;
252
253 BT_DBG("%s status 0x%2.2x", hdev->name, status);
254
255 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
256 if (!sent)
257 return;
258
259 if (!status) {
260 __u8 param = *((__u8 *) sent);
261
262 if (param == AUTH_ENABLED)
263 set_bit(HCI_AUTH, &hdev->flags);
264 else
265 clear_bit(HCI_AUTH, &hdev->flags);
266 }
267
268 if (test_bit(HCI_MGMT, &hdev->dev_flags))
269 mgmt_auth_enable_complete(hdev, status);
270 }
271
272 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
273 {
274 __u8 status = *((__u8 *) skb->data);
275 __u8 param;
276 void *sent;
277
278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
279
280 if (status)
281 return;
282
283 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
284 if (!sent)
285 return;
286
287 param = *((__u8 *) sent);
288
289 if (param)
290 set_bit(HCI_ENCRYPT, &hdev->flags);
291 else
292 clear_bit(HCI_ENCRYPT, &hdev->flags);
293 }
294
295 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
296 {
297 __u8 status = *((__u8 *) skb->data);
298 __u8 param;
299 void *sent;
300
301 BT_DBG("%s status 0x%2.2x", hdev->name, status);
302
303 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
304 if (!sent)
305 return;
306
307 param = *((__u8 *) sent);
308
309 hci_dev_lock(hdev);
310
311 if (status) {
312 hdev->discov_timeout = 0;
313 goto done;
314 }
315
316 if (param & SCAN_INQUIRY)
317 set_bit(HCI_ISCAN, &hdev->flags);
318 else
319 clear_bit(HCI_ISCAN, &hdev->flags);
320
321 if (param & SCAN_PAGE)
322 set_bit(HCI_PSCAN, &hdev->flags);
323 else
324 clear_bit(HCI_ISCAN, &hdev->flags);
325
326 done:
327 hci_dev_unlock(hdev);
328 }
329
330 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
331 {
332 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
333
334 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
335
336 if (rp->status)
337 return;
338
339 memcpy(hdev->dev_class, rp->dev_class, 3);
340
341 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
342 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
343 }
344
345 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
346 {
347 __u8 status = *((__u8 *) skb->data);
348 void *sent;
349
350 BT_DBG("%s status 0x%2.2x", hdev->name, status);
351
352 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
353 if (!sent)
354 return;
355
356 hci_dev_lock(hdev);
357
358 if (status == 0)
359 memcpy(hdev->dev_class, sent, 3);
360
361 if (test_bit(HCI_MGMT, &hdev->dev_flags))
362 mgmt_set_class_of_dev_complete(hdev, sent, status);
363
364 hci_dev_unlock(hdev);
365 }
366
367 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
368 {
369 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
370 __u16 setting;
371
372 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
373
374 if (rp->status)
375 return;
376
377 setting = __le16_to_cpu(rp->voice_setting);
378
379 if (hdev->voice_setting == setting)
380 return;
381
382 hdev->voice_setting = setting;
383
384 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
385
386 if (hdev->notify)
387 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
388 }
389
390 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
391 struct sk_buff *skb)
392 {
393 __u8 status = *((__u8 *) skb->data);
394 __u16 setting;
395 void *sent;
396
397 BT_DBG("%s status 0x%2.2x", hdev->name, status);
398
399 if (status)
400 return;
401
402 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
403 if (!sent)
404 return;
405
406 setting = get_unaligned_le16(sent);
407
408 if (hdev->voice_setting == setting)
409 return;
410
411 hdev->voice_setting = setting;
412
413 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
414
415 if (hdev->notify)
416 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
417 }
418
419 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
420 struct sk_buff *skb)
421 {
422 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
423
424 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
425
426 if (rp->status)
427 return;
428
429 hdev->num_iac = rp->num_iac;
430
431 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
432 }
433
434 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
435 {
436 __u8 status = *((__u8 *) skb->data);
437 struct hci_cp_write_ssp_mode *sent;
438
439 BT_DBG("%s status 0x%2.2x", hdev->name, status);
440
441 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
442 if (!sent)
443 return;
444
445 if (!status) {
446 if (sent->mode)
447 hdev->features[1][0] |= LMP_HOST_SSP;
448 else
449 hdev->features[1][0] &= ~LMP_HOST_SSP;
450 }
451
452 if (test_bit(HCI_MGMT, &hdev->dev_flags))
453 mgmt_ssp_enable_complete(hdev, sent->mode, status);
454 else if (!status) {
455 if (sent->mode)
456 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
457 else
458 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459 }
460 }
461
462 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
463 {
464 u8 status = *((u8 *) skb->data);
465 struct hci_cp_write_sc_support *sent;
466
467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
468
469 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
470 if (!sent)
471 return;
472
473 if (!status) {
474 if (sent->support)
475 hdev->features[1][0] |= LMP_HOST_SC;
476 else
477 hdev->features[1][0] &= ~LMP_HOST_SC;
478 }
479
480 if (test_bit(HCI_MGMT, &hdev->dev_flags))
481 mgmt_sc_enable_complete(hdev, sent->support, status);
482 else if (!status) {
483 if (sent->support)
484 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
485 else
486 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
487 }
488 }
489
490 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
491 {
492 struct hci_rp_read_local_version *rp = (void *) skb->data;
493
494 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
495
496 if (rp->status)
497 return;
498
499 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
500 hdev->hci_ver = rp->hci_ver;
501 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
502 hdev->lmp_ver = rp->lmp_ver;
503 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
504 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
505 }
506 }
507
508 static void hci_cc_read_local_commands(struct hci_dev *hdev,
509 struct sk_buff *skb)
510 {
511 struct hci_rp_read_local_commands *rp = (void *) skb->data;
512
513 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
514
515 if (rp->status)
516 return;
517
518 if (test_bit(HCI_SETUP, &hdev->dev_flags))
519 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
520 }
521
522 static void hci_cc_read_local_features(struct hci_dev *hdev,
523 struct sk_buff *skb)
524 {
525 struct hci_rp_read_local_features *rp = (void *) skb->data;
526
527 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
528
529 if (rp->status)
530 return;
531
532 memcpy(hdev->features, rp->features, 8);
533
534 /* Adjust default settings according to features
535 * supported by device. */
536
537 if (hdev->features[0][0] & LMP_3SLOT)
538 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
539
540 if (hdev->features[0][0] & LMP_5SLOT)
541 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
542
543 if (hdev->features[0][1] & LMP_HV2) {
544 hdev->pkt_type |= (HCI_HV2);
545 hdev->esco_type |= (ESCO_HV2);
546 }
547
548 if (hdev->features[0][1] & LMP_HV3) {
549 hdev->pkt_type |= (HCI_HV3);
550 hdev->esco_type |= (ESCO_HV3);
551 }
552
553 if (lmp_esco_capable(hdev))
554 hdev->esco_type |= (ESCO_EV3);
555
556 if (hdev->features[0][4] & LMP_EV4)
557 hdev->esco_type |= (ESCO_EV4);
558
559 if (hdev->features[0][4] & LMP_EV5)
560 hdev->esco_type |= (ESCO_EV5);
561
562 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
563 hdev->esco_type |= (ESCO_2EV3);
564
565 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
566 hdev->esco_type |= (ESCO_3EV3);
567
568 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
569 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
570 }
571
572 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
573 struct sk_buff *skb)
574 {
575 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
576
577 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
578
579 if (rp->status)
580 return;
581
582 if (hdev->max_page < rp->max_page)
583 hdev->max_page = rp->max_page;
584
585 if (rp->page < HCI_MAX_PAGES)
586 memcpy(hdev->features[rp->page], rp->features, 8);
587 }
588
589 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
590 struct sk_buff *skb)
591 {
592 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
593
594 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
595
596 if (rp->status)
597 return;
598
599 hdev->flow_ctl_mode = rp->mode;
600 }
601
602 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
603 {
604 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
605
606 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607
608 if (rp->status)
609 return;
610
611 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
612 hdev->sco_mtu = rp->sco_mtu;
613 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
614 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
615
616 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
617 hdev->sco_mtu = 64;
618 hdev->sco_pkts = 8;
619 }
620
621 hdev->acl_cnt = hdev->acl_pkts;
622 hdev->sco_cnt = hdev->sco_pkts;
623
624 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
625 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
626 }
627
628 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
629 {
630 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
631
632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633
634 if (rp->status)
635 return;
636
637 if (test_bit(HCI_INIT, &hdev->flags))
638 bacpy(&hdev->bdaddr, &rp->bdaddr);
639
640 if (test_bit(HCI_SETUP, &hdev->dev_flags))
641 bacpy(&hdev->setup_addr, &rp->bdaddr);
642 }
643
644 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
645 struct sk_buff *skb)
646 {
647 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
648
649 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
650
651 if (rp->status)
652 return;
653
654 if (test_bit(HCI_INIT, &hdev->flags)) {
655 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
656 hdev->page_scan_window = __le16_to_cpu(rp->window);
657 }
658 }
659
660 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
661 struct sk_buff *skb)
662 {
663 u8 status = *((u8 *) skb->data);
664 struct hci_cp_write_page_scan_activity *sent;
665
666 BT_DBG("%s status 0x%2.2x", hdev->name, status);
667
668 if (status)
669 return;
670
671 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
672 if (!sent)
673 return;
674
675 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
676 hdev->page_scan_window = __le16_to_cpu(sent->window);
677 }
678
679 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
680 struct sk_buff *skb)
681 {
682 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
683
684 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
685
686 if (rp->status)
687 return;
688
689 if (test_bit(HCI_INIT, &hdev->flags))
690 hdev->page_scan_type = rp->type;
691 }
692
693 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
694 struct sk_buff *skb)
695 {
696 u8 status = *((u8 *) skb->data);
697 u8 *type;
698
699 BT_DBG("%s status 0x%2.2x", hdev->name, status);
700
701 if (status)
702 return;
703
704 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
705 if (type)
706 hdev->page_scan_type = *type;
707 }
708
709 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
710 struct sk_buff *skb)
711 {
712 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
713
714 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
715
716 if (rp->status)
717 return;
718
719 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
720 hdev->block_len = __le16_to_cpu(rp->block_len);
721 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
722
723 hdev->block_cnt = hdev->num_blocks;
724
725 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
726 hdev->block_cnt, hdev->block_len);
727 }
728
729 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
730 {
731 struct hci_rp_read_clock *rp = (void *) skb->data;
732 struct hci_cp_read_clock *cp;
733 struct hci_conn *conn;
734
735 BT_DBG("%s", hdev->name);
736
737 if (skb->len < sizeof(*rp))
738 return;
739
740 if (rp->status)
741 return;
742
743 hci_dev_lock(hdev);
744
745 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
746 if (!cp)
747 goto unlock;
748
749 if (cp->which == 0x00) {
750 hdev->clock = le32_to_cpu(rp->clock);
751 goto unlock;
752 }
753
754 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
755 if (conn) {
756 conn->clock = le32_to_cpu(rp->clock);
757 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
758 }
759
760 unlock:
761 hci_dev_unlock(hdev);
762 }
763
764 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
765 struct sk_buff *skb)
766 {
767 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
768
769 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
770
771 if (rp->status)
772 goto a2mp_rsp;
773
774 hdev->amp_status = rp->amp_status;
775 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
776 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
777 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
778 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
779 hdev->amp_type = rp->amp_type;
780 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
781 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
782 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
783 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
784
785 a2mp_rsp:
786 a2mp_send_getinfo_rsp(hdev);
787 }
788
789 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
790 struct sk_buff *skb)
791 {
792 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
793 struct amp_assoc *assoc = &hdev->loc_assoc;
794 size_t rem_len, frag_len;
795
796 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
797
798 if (rp->status)
799 goto a2mp_rsp;
800
801 frag_len = skb->len - sizeof(*rp);
802 rem_len = __le16_to_cpu(rp->rem_len);
803
804 if (rem_len > frag_len) {
805 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
806
807 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
808 assoc->offset += frag_len;
809
810 /* Read other fragments */
811 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
812
813 return;
814 }
815
816 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
817 assoc->len = assoc->offset + rem_len;
818 assoc->offset = 0;
819
820 a2mp_rsp:
821 /* Send A2MP Rsp when all fragments are received */
822 a2mp_send_getampassoc_rsp(hdev, rp->status);
823 a2mp_send_create_phy_link_req(hdev, rp->status);
824 }
825
826 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
827 struct sk_buff *skb)
828 {
829 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
830
831 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
832
833 if (rp->status)
834 return;
835
836 hdev->inq_tx_power = rp->tx_power;
837 }
838
839 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
840 {
841 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
842 struct hci_cp_pin_code_reply *cp;
843 struct hci_conn *conn;
844
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846
847 hci_dev_lock(hdev);
848
849 if (test_bit(HCI_MGMT, &hdev->dev_flags))
850 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
851
852 if (rp->status)
853 goto unlock;
854
855 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
856 if (!cp)
857 goto unlock;
858
859 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
860 if (conn)
861 conn->pin_length = cp->pin_len;
862
863 unlock:
864 hci_dev_unlock(hdev);
865 }
866
867 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
868 {
869 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
870
871 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
872
873 hci_dev_lock(hdev);
874
875 if (test_bit(HCI_MGMT, &hdev->dev_flags))
876 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
877 rp->status);
878
879 hci_dev_unlock(hdev);
880 }
881
882 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
883 struct sk_buff *skb)
884 {
885 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
886
887 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
888
889 if (rp->status)
890 return;
891
892 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
893 hdev->le_pkts = rp->le_max_pkt;
894
895 hdev->le_cnt = hdev->le_pkts;
896
897 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
898 }
899
900 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
901 struct sk_buff *skb)
902 {
903 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
904
905 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
906
907 if (rp->status)
908 return;
909
910 memcpy(hdev->le_features, rp->features, 8);
911 }
912
913 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
914 struct sk_buff *skb)
915 {
916 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
917
918 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
919
920 if (rp->status)
921 return;
922
923 hdev->adv_tx_power = rp->tx_power;
924 }
925
926 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
927 {
928 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
929
930 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
931
932 hci_dev_lock(hdev);
933
934 if (test_bit(HCI_MGMT, &hdev->dev_flags))
935 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
936 rp->status);
937
938 hci_dev_unlock(hdev);
939 }
940
941 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
942 struct sk_buff *skb)
943 {
944 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
945
946 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
947
948 hci_dev_lock(hdev);
949
950 if (test_bit(HCI_MGMT, &hdev->dev_flags))
951 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
952 ACL_LINK, 0, rp->status);
953
954 hci_dev_unlock(hdev);
955 }
956
957 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
958 {
959 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
960
961 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
962
963 hci_dev_lock(hdev);
964
965 if (test_bit(HCI_MGMT, &hdev->dev_flags))
966 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
967 0, rp->status);
968
969 hci_dev_unlock(hdev);
970 }
971
972 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
973 struct sk_buff *skb)
974 {
975 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
976
977 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
978
979 hci_dev_lock(hdev);
980
981 if (test_bit(HCI_MGMT, &hdev->dev_flags))
982 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
983 ACL_LINK, 0, rp->status);
984
985 hci_dev_unlock(hdev);
986 }
987
988 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
989 struct sk_buff *skb)
990 {
991 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
992
993 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
994
995 hci_dev_lock(hdev);
996 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
997 NULL, NULL, rp->status);
998 hci_dev_unlock(hdev);
999 }
1000
1001 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1002 struct sk_buff *skb)
1003 {
1004 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1005
1006 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1007
1008 hci_dev_lock(hdev);
1009 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1010 rp->hash256, rp->randomizer256,
1011 rp->status);
1012 hci_dev_unlock(hdev);
1013 }
1014
1015
1016 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1017 {
1018 __u8 status = *((__u8 *) skb->data);
1019 bdaddr_t *sent;
1020
1021 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1022
1023 if (status)
1024 return;
1025
1026 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1027 if (!sent)
1028 return;
1029
1030 hci_dev_lock(hdev);
1031
1032 bacpy(&hdev->random_addr, sent);
1033
1034 hci_dev_unlock(hdev);
1035 }
1036
1037 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1038 {
1039 __u8 *sent, status = *((__u8 *) skb->data);
1040
1041 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1042
1043 if (status)
1044 return;
1045
1046 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1047 if (!sent)
1048 return;
1049
1050 hci_dev_lock(hdev);
1051
1052 /* If we're doing connection initation as peripheral. Set a
1053 * timeout in case something goes wrong.
1054 */
1055 if (*sent) {
1056 struct hci_conn *conn;
1057
1058 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1059
1060 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1061 if (conn)
1062 queue_delayed_work(hdev->workqueue,
1063 &conn->le_conn_timeout,
1064 conn->conn_timeout);
1065 } else {
1066 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1067 }
1068
1069 hci_dev_unlock(hdev);
1070 }
1071
1072 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1073 {
1074 struct hci_cp_le_set_scan_param *cp;
1075 __u8 status = *((__u8 *) skb->data);
1076
1077 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1078
1079 if (status)
1080 return;
1081
1082 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1083 if (!cp)
1084 return;
1085
1086 hci_dev_lock(hdev);
1087
1088 hdev->le_scan_type = cp->type;
1089
1090 hci_dev_unlock(hdev);
1091 }
1092
1093 static bool has_pending_adv_report(struct hci_dev *hdev)
1094 {
1095 struct discovery_state *d = &hdev->discovery;
1096
1097 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1098 }
1099
1100 static void clear_pending_adv_report(struct hci_dev *hdev)
1101 {
1102 struct discovery_state *d = &hdev->discovery;
1103
1104 bacpy(&d->last_adv_addr, BDADDR_ANY);
1105 d->last_adv_data_len = 0;
1106 }
1107
1108 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1109 u8 bdaddr_type, s8 rssi, u32 flags,
1110 u8 *data, u8 len)
1111 {
1112 struct discovery_state *d = &hdev->discovery;
1113
1114 bacpy(&d->last_adv_addr, bdaddr);
1115 d->last_adv_addr_type = bdaddr_type;
1116 d->last_adv_rssi = rssi;
1117 d->last_adv_flags = flags;
1118 memcpy(d->last_adv_data, data, len);
1119 d->last_adv_data_len = len;
1120 }
1121
1122 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1123 struct sk_buff *skb)
1124 {
1125 struct hci_cp_le_set_scan_enable *cp;
1126 __u8 status = *((__u8 *) skb->data);
1127
1128 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1129
1130 if (status)
1131 return;
1132
1133 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1134 if (!cp)
1135 return;
1136
1137 switch (cp->enable) {
1138 case LE_SCAN_ENABLE:
1139 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1140 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1141 clear_pending_adv_report(hdev);
1142 break;
1143
1144 case LE_SCAN_DISABLE:
1145 /* We do this here instead of when setting DISCOVERY_STOPPED
1146 * since the latter would potentially require waiting for
1147 * inquiry to stop too.
1148 */
1149 if (has_pending_adv_report(hdev)) {
1150 struct discovery_state *d = &hdev->discovery;
1151
1152 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1153 d->last_adv_addr_type, NULL,
1154 d->last_adv_rssi, d->last_adv_flags,
1155 d->last_adv_data,
1156 d->last_adv_data_len, NULL, 0);
1157 }
1158
1159 /* Cancel this timer so that we don't try to disable scanning
1160 * when it's already disabled.
1161 */
1162 cancel_delayed_work(&hdev->le_scan_disable);
1163
1164 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1165
1166 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1167 * interrupted scanning due to a connect request. Mark
1168 * therefore discovery as stopped. If this was not
1169 * because of a connect request advertising might have
1170 * been disabled because of active scanning, so
1171 * re-enable it again if necessary.
1172 */
1173 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1174 &hdev->dev_flags))
1175 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1176 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1177 hdev->discovery.state == DISCOVERY_FINDING)
1178 mgmt_reenable_advertising(hdev);
1179
1180 break;
1181
1182 default:
1183 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1184 break;
1185 }
1186 }
1187
1188 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1189 struct sk_buff *skb)
1190 {
1191 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1192
1193 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1194
1195 if (rp->status)
1196 return;
1197
1198 hdev->le_white_list_size = rp->size;
1199 }
1200
1201 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1202 struct sk_buff *skb)
1203 {
1204 __u8 status = *((__u8 *) skb->data);
1205
1206 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1207
1208 if (status)
1209 return;
1210
1211 hci_bdaddr_list_clear(&hdev->le_white_list);
1212 }
1213
1214 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1215 struct sk_buff *skb)
1216 {
1217 struct hci_cp_le_add_to_white_list *sent;
1218 __u8 status = *((__u8 *) skb->data);
1219
1220 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1221
1222 if (status)
1223 return;
1224
1225 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1226 if (!sent)
1227 return;
1228
1229 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1230 sent->bdaddr_type);
1231 }
1232
1233 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1234 struct sk_buff *skb)
1235 {
1236 struct hci_cp_le_del_from_white_list *sent;
1237 __u8 status = *((__u8 *) skb->data);
1238
1239 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1240
1241 if (status)
1242 return;
1243
1244 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1245 if (!sent)
1246 return;
1247
1248 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1249 sent->bdaddr_type);
1250 }
1251
1252 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1253 struct sk_buff *skb)
1254 {
1255 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1256
1257 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1258
1259 if (rp->status)
1260 return;
1261
1262 memcpy(hdev->le_states, rp->le_states, 8);
1263 }
1264
1265 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1266 struct sk_buff *skb)
1267 {
1268 struct hci_cp_write_le_host_supported *sent;
1269 __u8 status = *((__u8 *) skb->data);
1270
1271 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1272
1273 if (status)
1274 return;
1275
1276 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1277 if (!sent)
1278 return;
1279
1280 if (sent->le) {
1281 hdev->features[1][0] |= LMP_HOST_LE;
1282 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1283 } else {
1284 hdev->features[1][0] &= ~LMP_HOST_LE;
1285 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1286 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1287 }
1288
1289 if (sent->simul)
1290 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1291 else
1292 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1293 }
1294
1295 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1296 {
1297 struct hci_cp_le_set_adv_param *cp;
1298 u8 status = *((u8 *) skb->data);
1299
1300 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1301
1302 if (status)
1303 return;
1304
1305 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1306 if (!cp)
1307 return;
1308
1309 hci_dev_lock(hdev);
1310 hdev->adv_addr_type = cp->own_address_type;
1311 hci_dev_unlock(hdev);
1312 }
1313
1314 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1315 struct sk_buff *skb)
1316 {
1317 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1318
1319 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1320 hdev->name, rp->status, rp->phy_handle);
1321
1322 if (rp->status)
1323 return;
1324
1325 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1326 }
1327
1328 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1329 {
1330 struct hci_rp_read_rssi *rp = (void *) skb->data;
1331 struct hci_conn *conn;
1332
1333 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1334
1335 if (rp->status)
1336 return;
1337
1338 hci_dev_lock(hdev);
1339
1340 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1341 if (conn)
1342 conn->rssi = rp->rssi;
1343
1344 hci_dev_unlock(hdev);
1345 }
1346
1347 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1348 {
1349 struct hci_cp_read_tx_power *sent;
1350 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1351 struct hci_conn *conn;
1352
1353 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1354
1355 if (rp->status)
1356 return;
1357
1358 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1359 if (!sent)
1360 return;
1361
1362 hci_dev_lock(hdev);
1363
1364 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1365 if (!conn)
1366 goto unlock;
1367
1368 switch (sent->type) {
1369 case 0x00:
1370 conn->tx_power = rp->tx_power;
1371 break;
1372 case 0x01:
1373 conn->max_tx_power = rp->tx_power;
1374 break;
1375 }
1376
1377 unlock:
1378 hci_dev_unlock(hdev);
1379 }
1380
1381 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1382 {
1383 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1384
1385 if (status) {
1386 hci_conn_check_pending(hdev);
1387 return;
1388 }
1389
1390 set_bit(HCI_INQUIRY, &hdev->flags);
1391 }
1392
1393 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1394 {
1395 struct hci_cp_create_conn *cp;
1396 struct hci_conn *conn;
1397
1398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1399
1400 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1401 if (!cp)
1402 return;
1403
1404 hci_dev_lock(hdev);
1405
1406 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1407
1408 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1409
1410 if (status) {
1411 if (conn && conn->state == BT_CONNECT) {
1412 if (status != 0x0c || conn->attempt > 2) {
1413 conn->state = BT_CLOSED;
1414 hci_proto_connect_cfm(conn, status);
1415 hci_conn_del(conn);
1416 } else
1417 conn->state = BT_CONNECT2;
1418 }
1419 } else {
1420 if (!conn) {
1421 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1422 if (conn) {
1423 conn->out = true;
1424 set_bit(HCI_CONN_MASTER, &conn->flags);
1425 } else
1426 BT_ERR("No memory for new connection");
1427 }
1428 }
1429
1430 hci_dev_unlock(hdev);
1431 }
1432
1433 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1434 {
1435 struct hci_cp_add_sco *cp;
1436 struct hci_conn *acl, *sco;
1437 __u16 handle;
1438
1439 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1440
1441 if (!status)
1442 return;
1443
1444 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1445 if (!cp)
1446 return;
1447
1448 handle = __le16_to_cpu(cp->handle);
1449
1450 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1451
1452 hci_dev_lock(hdev);
1453
1454 acl = hci_conn_hash_lookup_handle(hdev, handle);
1455 if (acl) {
1456 sco = acl->link;
1457 if (sco) {
1458 sco->state = BT_CLOSED;
1459
1460 hci_proto_connect_cfm(sco, status);
1461 hci_conn_del(sco);
1462 }
1463 }
1464
1465 hci_dev_unlock(hdev);
1466 }
1467
1468 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1469 {
1470 struct hci_cp_auth_requested *cp;
1471 struct hci_conn *conn;
1472
1473 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1474
1475 if (!status)
1476 return;
1477
1478 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1479 if (!cp)
1480 return;
1481
1482 hci_dev_lock(hdev);
1483
1484 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1485 if (conn) {
1486 if (conn->state == BT_CONFIG) {
1487 hci_proto_connect_cfm(conn, status);
1488 hci_conn_drop(conn);
1489 }
1490 }
1491
1492 hci_dev_unlock(hdev);
1493 }
1494
1495 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1496 {
1497 struct hci_cp_set_conn_encrypt *cp;
1498 struct hci_conn *conn;
1499
1500 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1501
1502 if (!status)
1503 return;
1504
1505 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1506 if (!cp)
1507 return;
1508
1509 hci_dev_lock(hdev);
1510
1511 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1512 if (conn) {
1513 if (conn->state == BT_CONFIG) {
1514 hci_proto_connect_cfm(conn, status);
1515 hci_conn_drop(conn);
1516 }
1517 }
1518
1519 hci_dev_unlock(hdev);
1520 }
1521
1522 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1523 struct hci_conn *conn)
1524 {
1525 if (conn->state != BT_CONFIG || !conn->out)
1526 return 0;
1527
1528 if (conn->pending_sec_level == BT_SECURITY_SDP)
1529 return 0;
1530
1531 /* Only request authentication for SSP connections or non-SSP
1532 * devices with sec_level MEDIUM or HIGH or if MITM protection
1533 * is requested.
1534 */
1535 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1536 conn->pending_sec_level != BT_SECURITY_FIPS &&
1537 conn->pending_sec_level != BT_SECURITY_HIGH &&
1538 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1539 return 0;
1540
1541 return 1;
1542 }
1543
1544 static int hci_resolve_name(struct hci_dev *hdev,
1545 struct inquiry_entry *e)
1546 {
1547 struct hci_cp_remote_name_req cp;
1548
1549 memset(&cp, 0, sizeof(cp));
1550
1551 bacpy(&cp.bdaddr, &e->data.bdaddr);
1552 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1553 cp.pscan_mode = e->data.pscan_mode;
1554 cp.clock_offset = e->data.clock_offset;
1555
1556 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1557 }
1558
1559 static bool hci_resolve_next_name(struct hci_dev *hdev)
1560 {
1561 struct discovery_state *discov = &hdev->discovery;
1562 struct inquiry_entry *e;
1563
1564 if (list_empty(&discov->resolve))
1565 return false;
1566
1567 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1568 if (!e)
1569 return false;
1570
1571 if (hci_resolve_name(hdev, e) == 0) {
1572 e->name_state = NAME_PENDING;
1573 return true;
1574 }
1575
1576 return false;
1577 }
1578
1579 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1580 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1581 {
1582 struct discovery_state *discov = &hdev->discovery;
1583 struct inquiry_entry *e;
1584
1585 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1586 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1587 name_len, conn->dev_class);
1588
1589 if (discov->state == DISCOVERY_STOPPED)
1590 return;
1591
1592 if (discov->state == DISCOVERY_STOPPING)
1593 goto discov_complete;
1594
1595 if (discov->state != DISCOVERY_RESOLVING)
1596 return;
1597
1598 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1599 /* If the device was not found in a list of found devices names of which
1600 * are pending. there is no need to continue resolving a next name as it
1601 * will be done upon receiving another Remote Name Request Complete
1602 * Event */
1603 if (!e)
1604 return;
1605
1606 list_del(&e->list);
1607 if (name) {
1608 e->name_state = NAME_KNOWN;
1609 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1610 e->data.rssi, name, name_len);
1611 } else {
1612 e->name_state = NAME_NOT_KNOWN;
1613 }
1614
1615 if (hci_resolve_next_name(hdev))
1616 return;
1617
1618 discov_complete:
1619 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1620 }
1621
1622 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1623 {
1624 struct hci_cp_remote_name_req *cp;
1625 struct hci_conn *conn;
1626
1627 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1628
1629 /* If successful wait for the name req complete event before
1630 * checking for the need to do authentication */
1631 if (!status)
1632 return;
1633
1634 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1635 if (!cp)
1636 return;
1637
1638 hci_dev_lock(hdev);
1639
1640 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1641
1642 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1643 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1644
1645 if (!conn)
1646 goto unlock;
1647
1648 if (!hci_outgoing_auth_needed(hdev, conn))
1649 goto unlock;
1650
1651 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1652 struct hci_cp_auth_requested auth_cp;
1653
1654 auth_cp.handle = __cpu_to_le16(conn->handle);
1655 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1656 sizeof(auth_cp), &auth_cp);
1657 }
1658
1659 unlock:
1660 hci_dev_unlock(hdev);
1661 }
1662
1663 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1664 {
1665 struct hci_cp_read_remote_features *cp;
1666 struct hci_conn *conn;
1667
1668 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1669
1670 if (!status)
1671 return;
1672
1673 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1674 if (!cp)
1675 return;
1676
1677 hci_dev_lock(hdev);
1678
1679 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1680 if (conn) {
1681 if (conn->state == BT_CONFIG) {
1682 hci_proto_connect_cfm(conn, status);
1683 hci_conn_drop(conn);
1684 }
1685 }
1686
1687 hci_dev_unlock(hdev);
1688 }
1689
1690 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1691 {
1692 struct hci_cp_read_remote_ext_features *cp;
1693 struct hci_conn *conn;
1694
1695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1696
1697 if (!status)
1698 return;
1699
1700 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1701 if (!cp)
1702 return;
1703
1704 hci_dev_lock(hdev);
1705
1706 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1707 if (conn) {
1708 if (conn->state == BT_CONFIG) {
1709 hci_proto_connect_cfm(conn, status);
1710 hci_conn_drop(conn);
1711 }
1712 }
1713
1714 hci_dev_unlock(hdev);
1715 }
1716
1717 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1718 {
1719 struct hci_cp_setup_sync_conn *cp;
1720 struct hci_conn *acl, *sco;
1721 __u16 handle;
1722
1723 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1724
1725 if (!status)
1726 return;
1727
1728 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1729 if (!cp)
1730 return;
1731
1732 handle = __le16_to_cpu(cp->handle);
1733
1734 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1735
1736 hci_dev_lock(hdev);
1737
1738 acl = hci_conn_hash_lookup_handle(hdev, handle);
1739 if (acl) {
1740 sco = acl->link;
1741 if (sco) {
1742 sco->state = BT_CLOSED;
1743
1744 hci_proto_connect_cfm(sco, status);
1745 hci_conn_del(sco);
1746 }
1747 }
1748
1749 hci_dev_unlock(hdev);
1750 }
1751
1752 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1753 {
1754 struct hci_cp_sniff_mode *cp;
1755 struct hci_conn *conn;
1756
1757 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1758
1759 if (!status)
1760 return;
1761
1762 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1763 if (!cp)
1764 return;
1765
1766 hci_dev_lock(hdev);
1767
1768 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1769 if (conn) {
1770 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1771
1772 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1773 hci_sco_setup(conn, status);
1774 }
1775
1776 hci_dev_unlock(hdev);
1777 }
1778
1779 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1780 {
1781 struct hci_cp_exit_sniff_mode *cp;
1782 struct hci_conn *conn;
1783
1784 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1785
1786 if (!status)
1787 return;
1788
1789 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1790 if (!cp)
1791 return;
1792
1793 hci_dev_lock(hdev);
1794
1795 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1796 if (conn) {
1797 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1798
1799 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1800 hci_sco_setup(conn, status);
1801 }
1802
1803 hci_dev_unlock(hdev);
1804 }
1805
1806 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1807 {
1808 struct hci_cp_disconnect *cp;
1809 struct hci_conn *conn;
1810
1811 if (!status)
1812 return;
1813
1814 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1815 if (!cp)
1816 return;
1817
1818 hci_dev_lock(hdev);
1819
1820 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1821 if (conn)
1822 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1823 conn->dst_type, status);
1824
1825 hci_dev_unlock(hdev);
1826 }
1827
1828 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1829 {
1830 struct hci_cp_create_phy_link *cp;
1831
1832 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1833
1834 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1835 if (!cp)
1836 return;
1837
1838 hci_dev_lock(hdev);
1839
1840 if (status) {
1841 struct hci_conn *hcon;
1842
1843 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1844 if (hcon)
1845 hci_conn_del(hcon);
1846 } else {
1847 amp_write_remote_assoc(hdev, cp->phy_handle);
1848 }
1849
1850 hci_dev_unlock(hdev);
1851 }
1852
1853 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1854 {
1855 struct hci_cp_accept_phy_link *cp;
1856
1857 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1858
1859 if (status)
1860 return;
1861
1862 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1863 if (!cp)
1864 return;
1865
1866 amp_write_remote_assoc(hdev, cp->phy_handle);
1867 }
1868
1869 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1870 {
1871 struct hci_cp_le_create_conn *cp;
1872 struct hci_conn *conn;
1873
1874 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1875
1876 /* All connection failure handling is taken care of by the
1877 * hci_le_conn_failed function which is triggered by the HCI
1878 * request completion callbacks used for connecting.
1879 */
1880 if (status)
1881 return;
1882
1883 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1884 if (!cp)
1885 return;
1886
1887 hci_dev_lock(hdev);
1888
1889 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1890 if (!conn)
1891 goto unlock;
1892
1893 /* Store the initiator and responder address information which
1894 * is needed for SMP. These values will not change during the
1895 * lifetime of the connection.
1896 */
1897 conn->init_addr_type = cp->own_address_type;
1898 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1899 bacpy(&conn->init_addr, &hdev->random_addr);
1900 else
1901 bacpy(&conn->init_addr, &hdev->bdaddr);
1902
1903 conn->resp_addr_type = cp->peer_addr_type;
1904 bacpy(&conn->resp_addr, &cp->peer_addr);
1905
1906 /* We don't want the connection attempt to stick around
1907 * indefinitely since LE doesn't have a page timeout concept
1908 * like BR/EDR. Set a timer for any connection that doesn't use
1909 * the white list for connecting.
1910 */
1911 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1912 queue_delayed_work(conn->hdev->workqueue,
1913 &conn->le_conn_timeout,
1914 conn->conn_timeout);
1915
1916 unlock:
1917 hci_dev_unlock(hdev);
1918 }
1919
1920 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1921 {
1922 struct hci_cp_le_start_enc *cp;
1923 struct hci_conn *conn;
1924
1925 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1926
1927 if (!status)
1928 return;
1929
1930 hci_dev_lock(hdev);
1931
1932 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1933 if (!cp)
1934 goto unlock;
1935
1936 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1937 if (!conn)
1938 goto unlock;
1939
1940 if (conn->state != BT_CONNECTED)
1941 goto unlock;
1942
1943 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1944 hci_conn_drop(conn);
1945
1946 unlock:
1947 hci_dev_unlock(hdev);
1948 }
1949
1950 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1951 {
1952 __u8 status = *((__u8 *) skb->data);
1953 struct discovery_state *discov = &hdev->discovery;
1954 struct inquiry_entry *e;
1955
1956 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1957
1958 hci_conn_check_pending(hdev);
1959
1960 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1961 return;
1962
1963 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1964 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1965
1966 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1967 return;
1968
1969 hci_dev_lock(hdev);
1970
1971 if (discov->state != DISCOVERY_FINDING)
1972 goto unlock;
1973
1974 if (list_empty(&discov->resolve)) {
1975 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1976 goto unlock;
1977 }
1978
1979 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1980 if (e && hci_resolve_name(hdev, e) == 0) {
1981 e->name_state = NAME_PENDING;
1982 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1983 } else {
1984 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1985 }
1986
1987 unlock:
1988 hci_dev_unlock(hdev);
1989 }
1990
1991 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1992 {
1993 struct inquiry_data data;
1994 struct inquiry_info *info = (void *) (skb->data + 1);
1995 int num_rsp = *((__u8 *) skb->data);
1996
1997 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1998
1999 if (!num_rsp)
2000 return;
2001
2002 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2003 return;
2004
2005 hci_dev_lock(hdev);
2006
2007 for (; num_rsp; num_rsp--, info++) {
2008 u32 flags;
2009
2010 bacpy(&data.bdaddr, &info->bdaddr);
2011 data.pscan_rep_mode = info->pscan_rep_mode;
2012 data.pscan_period_mode = info->pscan_period_mode;
2013 data.pscan_mode = info->pscan_mode;
2014 memcpy(data.dev_class, info->dev_class, 3);
2015 data.clock_offset = info->clock_offset;
2016 data.rssi = 0x00;
2017 data.ssp_mode = 0x00;
2018
2019 flags = hci_inquiry_cache_update(hdev, &data, false);
2020
2021 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2022 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2023 }
2024
2025 hci_dev_unlock(hdev);
2026 }
2027
2028 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2029 {
2030 struct hci_ev_conn_complete *ev = (void *) skb->data;
2031 struct hci_conn *conn;
2032
2033 BT_DBG("%s", hdev->name);
2034
2035 hci_dev_lock(hdev);
2036
2037 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2038 if (!conn) {
2039 if (ev->link_type != SCO_LINK)
2040 goto unlock;
2041
2042 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2043 if (!conn)
2044 goto unlock;
2045
2046 conn->type = SCO_LINK;
2047 }
2048
2049 if (!ev->status) {
2050 conn->handle = __le16_to_cpu(ev->handle);
2051
2052 if (conn->type == ACL_LINK) {
2053 conn->state = BT_CONFIG;
2054 hci_conn_hold(conn);
2055
2056 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2057 !hci_find_link_key(hdev, &ev->bdaddr))
2058 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2059 else
2060 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2061 } else
2062 conn->state = BT_CONNECTED;
2063
2064 hci_conn_add_sysfs(conn);
2065
2066 if (test_bit(HCI_AUTH, &hdev->flags))
2067 set_bit(HCI_CONN_AUTH, &conn->flags);
2068
2069 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2070 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2071
2072 /* Get remote features */
2073 if (conn->type == ACL_LINK) {
2074 struct hci_cp_read_remote_features cp;
2075 cp.handle = ev->handle;
2076 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2077 sizeof(cp), &cp);
2078 }
2079
2080 /* Set packet type for incoming connection */
2081 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2082 struct hci_cp_change_conn_ptype cp;
2083 cp.handle = ev->handle;
2084 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2085 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2086 &cp);
2087 }
2088 } else {
2089 conn->state = BT_CLOSED;
2090 if (conn->type == ACL_LINK)
2091 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2092 conn->dst_type, ev->status);
2093 }
2094
2095 if (conn->type == ACL_LINK)
2096 hci_sco_setup(conn, ev->status);
2097
2098 if (ev->status) {
2099 hci_proto_connect_cfm(conn, ev->status);
2100 hci_conn_del(conn);
2101 } else if (ev->link_type != ACL_LINK)
2102 hci_proto_connect_cfm(conn, ev->status);
2103
2104 unlock:
2105 hci_dev_unlock(hdev);
2106
2107 hci_conn_check_pending(hdev);
2108 }
2109
2110 static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2111 {
2112 struct hci_cp_reject_conn_req cp;
2113
2114 bacpy(&cp.bdaddr, bdaddr);
2115 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2116 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2117 }
2118
2119 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2120 {
2121 struct hci_ev_conn_request *ev = (void *) skb->data;
2122 int mask = hdev->link_mode;
2123 struct inquiry_entry *ie;
2124 struct hci_conn *conn;
2125 __u8 flags = 0;
2126
2127 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2128 ev->link_type);
2129
2130 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2131 &flags);
2132
2133 if (!(mask & HCI_LM_ACCEPT)) {
2134 hci_reject_conn(hdev, &ev->bdaddr);
2135 return;
2136 }
2137
2138 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags)) {
2139 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2140 BDADDR_BREDR)) {
2141 hci_reject_conn(hdev, &ev->bdaddr);
2142 return;
2143 }
2144 } else {
2145 if (!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2146 BDADDR_BREDR)) {
2147 hci_reject_conn(hdev, &ev->bdaddr);
2148 return;
2149 }
2150 }
2151
2152 /* Connection accepted */
2153
2154 hci_dev_lock(hdev);
2155
2156 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2157 if (ie)
2158 memcpy(ie->data.dev_class, ev->dev_class, 3);
2159
2160 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2161 &ev->bdaddr);
2162 if (!conn) {
2163 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2164 if (!conn) {
2165 BT_ERR("No memory for new connection");
2166 hci_dev_unlock(hdev);
2167 return;
2168 }
2169 }
2170
2171 memcpy(conn->dev_class, ev->dev_class, 3);
2172
2173 hci_dev_unlock(hdev);
2174
2175 if (ev->link_type == ACL_LINK ||
2176 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2177 struct hci_cp_accept_conn_req cp;
2178 conn->state = BT_CONNECT;
2179
2180 bacpy(&cp.bdaddr, &ev->bdaddr);
2181
2182 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2183 cp.role = 0x00; /* Become master */
2184 else
2185 cp.role = 0x01; /* Remain slave */
2186
2187 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2188 } else if (!(flags & HCI_PROTO_DEFER)) {
2189 struct hci_cp_accept_sync_conn_req cp;
2190 conn->state = BT_CONNECT;
2191
2192 bacpy(&cp.bdaddr, &ev->bdaddr);
2193 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2194
2195 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2196 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2197 cp.max_latency = cpu_to_le16(0xffff);
2198 cp.content_format = cpu_to_le16(hdev->voice_setting);
2199 cp.retrans_effort = 0xff;
2200
2201 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2202 &cp);
2203 } else {
2204 conn->state = BT_CONNECT2;
2205 hci_proto_connect_cfm(conn, 0);
2206 }
2207 }
2208
2209 static u8 hci_to_mgmt_reason(u8 err)
2210 {
2211 switch (err) {
2212 case HCI_ERROR_CONNECTION_TIMEOUT:
2213 return MGMT_DEV_DISCONN_TIMEOUT;
2214 case HCI_ERROR_REMOTE_USER_TERM:
2215 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2216 case HCI_ERROR_REMOTE_POWER_OFF:
2217 return MGMT_DEV_DISCONN_REMOTE;
2218 case HCI_ERROR_LOCAL_HOST_TERM:
2219 return MGMT_DEV_DISCONN_LOCAL_HOST;
2220 default:
2221 return MGMT_DEV_DISCONN_UNKNOWN;
2222 }
2223 }
2224
2225 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2226 {
2227 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2228 u8 reason = hci_to_mgmt_reason(ev->reason);
2229 struct hci_conn_params *params;
2230 struct hci_conn *conn;
2231 bool mgmt_connected;
2232 u8 type;
2233
2234 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2235
2236 hci_dev_lock(hdev);
2237
2238 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2239 if (!conn)
2240 goto unlock;
2241
2242 if (ev->status) {
2243 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2244 conn->dst_type, ev->status);
2245 goto unlock;
2246 }
2247
2248 conn->state = BT_CLOSED;
2249
2250 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2251 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2252 reason, mgmt_connected);
2253
2254 if (conn->type == ACL_LINK &&
2255 test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2256 hci_remove_link_key(hdev, &conn->dst);
2257
2258 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2259 if (params) {
2260 switch (params->auto_connect) {
2261 case HCI_AUTO_CONN_LINK_LOSS:
2262 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2263 break;
2264 /* Fall through */
2265
2266 case HCI_AUTO_CONN_ALWAYS:
2267 list_del_init(&params->action);
2268 list_add(&params->action, &hdev->pend_le_conns);
2269 hci_update_background_scan(hdev);
2270 break;
2271
2272 default:
2273 break;
2274 }
2275 }
2276
2277 type = conn->type;
2278
2279 hci_proto_disconn_cfm(conn, ev->reason);
2280 hci_conn_del(conn);
2281
2282 /* Re-enable advertising if necessary, since it might
2283 * have been disabled by the connection. From the
2284 * HCI_LE_Set_Advertise_Enable command description in
2285 * the core specification (v4.0):
2286 * "The Controller shall continue advertising until the Host
2287 * issues an LE_Set_Advertise_Enable command with
2288 * Advertising_Enable set to 0x00 (Advertising is disabled)
2289 * or until a connection is created or until the Advertising
2290 * is timed out due to Directed Advertising."
2291 */
2292 if (type == LE_LINK)
2293 mgmt_reenable_advertising(hdev);
2294
2295 unlock:
2296 hci_dev_unlock(hdev);
2297 }
2298
2299 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2300 {
2301 struct hci_ev_auth_complete *ev = (void *) skb->data;
2302 struct hci_conn *conn;
2303
2304 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2305
2306 hci_dev_lock(hdev);
2307
2308 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2309 if (!conn)
2310 goto unlock;
2311
2312 if (!ev->status) {
2313 if (!hci_conn_ssp_enabled(conn) &&
2314 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2315 BT_INFO("re-auth of legacy device is not possible.");
2316 } else {
2317 set_bit(HCI_CONN_AUTH, &conn->flags);
2318 conn->sec_level = conn->pending_sec_level;
2319 }
2320 } else {
2321 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2322 ev->status);
2323 }
2324
2325 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2326 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2327
2328 if (conn->state == BT_CONFIG) {
2329 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2330 struct hci_cp_set_conn_encrypt cp;
2331 cp.handle = ev->handle;
2332 cp.encrypt = 0x01;
2333 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2334 &cp);
2335 } else {
2336 conn->state = BT_CONNECTED;
2337 hci_proto_connect_cfm(conn, ev->status);
2338 hci_conn_drop(conn);
2339 }
2340 } else {
2341 hci_auth_cfm(conn, ev->status);
2342
2343 hci_conn_hold(conn);
2344 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2345 hci_conn_drop(conn);
2346 }
2347
2348 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2349 if (!ev->status) {
2350 struct hci_cp_set_conn_encrypt cp;
2351 cp.handle = ev->handle;
2352 cp.encrypt = 0x01;
2353 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2354 &cp);
2355 } else {
2356 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2357 hci_encrypt_cfm(conn, ev->status, 0x00);
2358 }
2359 }
2360
2361 unlock:
2362 hci_dev_unlock(hdev);
2363 }
2364
2365 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2366 {
2367 struct hci_ev_remote_name *ev = (void *) skb->data;
2368 struct hci_conn *conn;
2369
2370 BT_DBG("%s", hdev->name);
2371
2372 hci_conn_check_pending(hdev);
2373
2374 hci_dev_lock(hdev);
2375
2376 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2377
2378 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2379 goto check_auth;
2380
2381 if (ev->status == 0)
2382 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2383 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2384 else
2385 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2386
2387 check_auth:
2388 if (!conn)
2389 goto unlock;
2390
2391 if (!hci_outgoing_auth_needed(hdev, conn))
2392 goto unlock;
2393
2394 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2395 struct hci_cp_auth_requested cp;
2396 cp.handle = __cpu_to_le16(conn->handle);
2397 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2398 }
2399
2400 unlock:
2401 hci_dev_unlock(hdev);
2402 }
2403
2404 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2405 {
2406 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2407 struct hci_conn *conn;
2408
2409 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2410
2411 hci_dev_lock(hdev);
2412
2413 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2414 if (!conn)
2415 goto unlock;
2416
2417 if (!ev->status) {
2418 if (ev->encrypt) {
2419 /* Encryption implies authentication */
2420 set_bit(HCI_CONN_AUTH, &conn->flags);
2421 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2422 conn->sec_level = conn->pending_sec_level;
2423
2424 /* P-256 authentication key implies FIPS */
2425 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2426 set_bit(HCI_CONN_FIPS, &conn->flags);
2427
2428 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2429 conn->type == LE_LINK)
2430 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2431 } else {
2432 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2433 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2434 }
2435 }
2436
2437 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2438
2439 if (ev->status && conn->state == BT_CONNECTED) {
2440 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2441 hci_conn_drop(conn);
2442 goto unlock;
2443 }
2444
2445 if (conn->state == BT_CONFIG) {
2446 if (!ev->status)
2447 conn->state = BT_CONNECTED;
2448
2449 /* In Secure Connections Only mode, do not allow any
2450 * connections that are not encrypted with AES-CCM
2451 * using a P-256 authenticated combination key.
2452 */
2453 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2454 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2455 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2456 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2457 hci_conn_drop(conn);
2458 goto unlock;
2459 }
2460
2461 hci_proto_connect_cfm(conn, ev->status);
2462 hci_conn_drop(conn);
2463 } else
2464 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2465
2466 unlock:
2467 hci_dev_unlock(hdev);
2468 }
2469
2470 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2471 struct sk_buff *skb)
2472 {
2473 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2474 struct hci_conn *conn;
2475
2476 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2477
2478 hci_dev_lock(hdev);
2479
2480 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2481 if (conn) {
2482 if (!ev->status)
2483 set_bit(HCI_CONN_SECURE, &conn->flags);
2484
2485 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2486
2487 hci_key_change_cfm(conn, ev->status);
2488 }
2489
2490 hci_dev_unlock(hdev);
2491 }
2492
2493 static void hci_remote_features_evt(struct hci_dev *hdev,
2494 struct sk_buff *skb)
2495 {
2496 struct hci_ev_remote_features *ev = (void *) skb->data;
2497 struct hci_conn *conn;
2498
2499 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2500
2501 hci_dev_lock(hdev);
2502
2503 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2504 if (!conn)
2505 goto unlock;
2506
2507 if (!ev->status)
2508 memcpy(conn->features[0], ev->features, 8);
2509
2510 if (conn->state != BT_CONFIG)
2511 goto unlock;
2512
2513 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2514 struct hci_cp_read_remote_ext_features cp;
2515 cp.handle = ev->handle;
2516 cp.page = 0x01;
2517 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2518 sizeof(cp), &cp);
2519 goto unlock;
2520 }
2521
2522 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2523 struct hci_cp_remote_name_req cp;
2524 memset(&cp, 0, sizeof(cp));
2525 bacpy(&cp.bdaddr, &conn->dst);
2526 cp.pscan_rep_mode = 0x02;
2527 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2528 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2529 mgmt_device_connected(hdev, &conn->dst, conn->type,
2530 conn->dst_type, 0, NULL, 0,
2531 conn->dev_class);
2532
2533 if (!hci_outgoing_auth_needed(hdev, conn)) {
2534 conn->state = BT_CONNECTED;
2535 hci_proto_connect_cfm(conn, ev->status);
2536 hci_conn_drop(conn);
2537 }
2538
2539 unlock:
2540 hci_dev_unlock(hdev);
2541 }
2542
2543 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2544 {
2545 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2546 u8 status = skb->data[sizeof(*ev)];
2547 __u16 opcode;
2548
2549 skb_pull(skb, sizeof(*ev));
2550
2551 opcode = __le16_to_cpu(ev->opcode);
2552
2553 switch (opcode) {
2554 case HCI_OP_INQUIRY_CANCEL:
2555 hci_cc_inquiry_cancel(hdev, skb);
2556 break;
2557
2558 case HCI_OP_PERIODIC_INQ:
2559 hci_cc_periodic_inq(hdev, skb);
2560 break;
2561
2562 case HCI_OP_EXIT_PERIODIC_INQ:
2563 hci_cc_exit_periodic_inq(hdev, skb);
2564 break;
2565
2566 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2567 hci_cc_remote_name_req_cancel(hdev, skb);
2568 break;
2569
2570 case HCI_OP_ROLE_DISCOVERY:
2571 hci_cc_role_discovery(hdev, skb);
2572 break;
2573
2574 case HCI_OP_READ_LINK_POLICY:
2575 hci_cc_read_link_policy(hdev, skb);
2576 break;
2577
2578 case HCI_OP_WRITE_LINK_POLICY:
2579 hci_cc_write_link_policy(hdev, skb);
2580 break;
2581
2582 case HCI_OP_READ_DEF_LINK_POLICY:
2583 hci_cc_read_def_link_policy(hdev, skb);
2584 break;
2585
2586 case HCI_OP_WRITE_DEF_LINK_POLICY:
2587 hci_cc_write_def_link_policy(hdev, skb);
2588 break;
2589
2590 case HCI_OP_RESET:
2591 hci_cc_reset(hdev, skb);
2592 break;
2593
2594 case HCI_OP_WRITE_LOCAL_NAME:
2595 hci_cc_write_local_name(hdev, skb);
2596 break;
2597
2598 case HCI_OP_READ_LOCAL_NAME:
2599 hci_cc_read_local_name(hdev, skb);
2600 break;
2601
2602 case HCI_OP_WRITE_AUTH_ENABLE:
2603 hci_cc_write_auth_enable(hdev, skb);
2604 break;
2605
2606 case HCI_OP_WRITE_ENCRYPT_MODE:
2607 hci_cc_write_encrypt_mode(hdev, skb);
2608 break;
2609
2610 case HCI_OP_WRITE_SCAN_ENABLE:
2611 hci_cc_write_scan_enable(hdev, skb);
2612 break;
2613
2614 case HCI_OP_READ_CLASS_OF_DEV:
2615 hci_cc_read_class_of_dev(hdev, skb);
2616 break;
2617
2618 case HCI_OP_WRITE_CLASS_OF_DEV:
2619 hci_cc_write_class_of_dev(hdev, skb);
2620 break;
2621
2622 case HCI_OP_READ_VOICE_SETTING:
2623 hci_cc_read_voice_setting(hdev, skb);
2624 break;
2625
2626 case HCI_OP_WRITE_VOICE_SETTING:
2627 hci_cc_write_voice_setting(hdev, skb);
2628 break;
2629
2630 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2631 hci_cc_read_num_supported_iac(hdev, skb);
2632 break;
2633
2634 case HCI_OP_WRITE_SSP_MODE:
2635 hci_cc_write_ssp_mode(hdev, skb);
2636 break;
2637
2638 case HCI_OP_WRITE_SC_SUPPORT:
2639 hci_cc_write_sc_support(hdev, skb);
2640 break;
2641
2642 case HCI_OP_READ_LOCAL_VERSION:
2643 hci_cc_read_local_version(hdev, skb);
2644 break;
2645
2646 case HCI_OP_READ_LOCAL_COMMANDS:
2647 hci_cc_read_local_commands(hdev, skb);
2648 break;
2649
2650 case HCI_OP_READ_LOCAL_FEATURES:
2651 hci_cc_read_local_features(hdev, skb);
2652 break;
2653
2654 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2655 hci_cc_read_local_ext_features(hdev, skb);
2656 break;
2657
2658 case HCI_OP_READ_BUFFER_SIZE:
2659 hci_cc_read_buffer_size(hdev, skb);
2660 break;
2661
2662 case HCI_OP_READ_BD_ADDR:
2663 hci_cc_read_bd_addr(hdev, skb);
2664 break;
2665
2666 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2667 hci_cc_read_page_scan_activity(hdev, skb);
2668 break;
2669
2670 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2671 hci_cc_write_page_scan_activity(hdev, skb);
2672 break;
2673
2674 case HCI_OP_READ_PAGE_SCAN_TYPE:
2675 hci_cc_read_page_scan_type(hdev, skb);
2676 break;
2677
2678 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2679 hci_cc_write_page_scan_type(hdev, skb);
2680 break;
2681
2682 case HCI_OP_READ_DATA_BLOCK_SIZE:
2683 hci_cc_read_data_block_size(hdev, skb);
2684 break;
2685
2686 case HCI_OP_READ_FLOW_CONTROL_MODE:
2687 hci_cc_read_flow_control_mode(hdev, skb);
2688 break;
2689
2690 case HCI_OP_READ_LOCAL_AMP_INFO:
2691 hci_cc_read_local_amp_info(hdev, skb);
2692 break;
2693
2694 case HCI_OP_READ_CLOCK:
2695 hci_cc_read_clock(hdev, skb);
2696 break;
2697
2698 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2699 hci_cc_read_local_amp_assoc(hdev, skb);
2700 break;
2701
2702 case HCI_OP_READ_INQ_RSP_TX_POWER:
2703 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2704 break;
2705
2706 case HCI_OP_PIN_CODE_REPLY:
2707 hci_cc_pin_code_reply(hdev, skb);
2708 break;
2709
2710 case HCI_OP_PIN_CODE_NEG_REPLY:
2711 hci_cc_pin_code_neg_reply(hdev, skb);
2712 break;
2713
2714 case HCI_OP_READ_LOCAL_OOB_DATA:
2715 hci_cc_read_local_oob_data(hdev, skb);
2716 break;
2717
2718 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2719 hci_cc_read_local_oob_ext_data(hdev, skb);
2720 break;
2721
2722 case HCI_OP_LE_READ_BUFFER_SIZE:
2723 hci_cc_le_read_buffer_size(hdev, skb);
2724 break;
2725
2726 case HCI_OP_LE_READ_LOCAL_FEATURES:
2727 hci_cc_le_read_local_features(hdev, skb);
2728 break;
2729
2730 case HCI_OP_LE_READ_ADV_TX_POWER:
2731 hci_cc_le_read_adv_tx_power(hdev, skb);
2732 break;
2733
2734 case HCI_OP_USER_CONFIRM_REPLY:
2735 hci_cc_user_confirm_reply(hdev, skb);
2736 break;
2737
2738 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2739 hci_cc_user_confirm_neg_reply(hdev, skb);
2740 break;
2741
2742 case HCI_OP_USER_PASSKEY_REPLY:
2743 hci_cc_user_passkey_reply(hdev, skb);
2744 break;
2745
2746 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2747 hci_cc_user_passkey_neg_reply(hdev, skb);
2748 break;
2749
2750 case HCI_OP_LE_SET_RANDOM_ADDR:
2751 hci_cc_le_set_random_addr(hdev, skb);
2752 break;
2753
2754 case HCI_OP_LE_SET_ADV_ENABLE:
2755 hci_cc_le_set_adv_enable(hdev, skb);
2756 break;
2757
2758 case HCI_OP_LE_SET_SCAN_PARAM:
2759 hci_cc_le_set_scan_param(hdev, skb);
2760 break;
2761
2762 case HCI_OP_LE_SET_SCAN_ENABLE:
2763 hci_cc_le_set_scan_enable(hdev, skb);
2764 break;
2765
2766 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2767 hci_cc_le_read_white_list_size(hdev, skb);
2768 break;
2769
2770 case HCI_OP_LE_CLEAR_WHITE_LIST:
2771 hci_cc_le_clear_white_list(hdev, skb);
2772 break;
2773
2774 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2775 hci_cc_le_add_to_white_list(hdev, skb);
2776 break;
2777
2778 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2779 hci_cc_le_del_from_white_list(hdev, skb);
2780 break;
2781
2782 case HCI_OP_LE_READ_SUPPORTED_STATES:
2783 hci_cc_le_read_supported_states(hdev, skb);
2784 break;
2785
2786 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2787 hci_cc_write_le_host_supported(hdev, skb);
2788 break;
2789
2790 case HCI_OP_LE_SET_ADV_PARAM:
2791 hci_cc_set_adv_param(hdev, skb);
2792 break;
2793
2794 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2795 hci_cc_write_remote_amp_assoc(hdev, skb);
2796 break;
2797
2798 case HCI_OP_READ_RSSI:
2799 hci_cc_read_rssi(hdev, skb);
2800 break;
2801
2802 case HCI_OP_READ_TX_POWER:
2803 hci_cc_read_tx_power(hdev, skb);
2804 break;
2805
2806 default:
2807 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2808 break;
2809 }
2810
2811 if (opcode != HCI_OP_NOP)
2812 cancel_delayed_work(&hdev->cmd_timer);
2813
2814 hci_req_cmd_complete(hdev, opcode, status);
2815
2816 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2817 atomic_set(&hdev->cmd_cnt, 1);
2818 if (!skb_queue_empty(&hdev->cmd_q))
2819 queue_work(hdev->workqueue, &hdev->cmd_work);
2820 }
2821 }
2822
2823 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2824 {
2825 struct hci_ev_cmd_status *ev = (void *) skb->data;
2826 __u16 opcode;
2827
2828 skb_pull(skb, sizeof(*ev));
2829
2830 opcode = __le16_to_cpu(ev->opcode);
2831
2832 switch (opcode) {
2833 case HCI_OP_INQUIRY:
2834 hci_cs_inquiry(hdev, ev->status);
2835 break;
2836
2837 case HCI_OP_CREATE_CONN:
2838 hci_cs_create_conn(hdev, ev->status);
2839 break;
2840
2841 case HCI_OP_ADD_SCO:
2842 hci_cs_add_sco(hdev, ev->status);
2843 break;
2844
2845 case HCI_OP_AUTH_REQUESTED:
2846 hci_cs_auth_requested(hdev, ev->status);
2847 break;
2848
2849 case HCI_OP_SET_CONN_ENCRYPT:
2850 hci_cs_set_conn_encrypt(hdev, ev->status);
2851 break;
2852
2853 case HCI_OP_REMOTE_NAME_REQ:
2854 hci_cs_remote_name_req(hdev, ev->status);
2855 break;
2856
2857 case HCI_OP_READ_REMOTE_FEATURES:
2858 hci_cs_read_remote_features(hdev, ev->status);
2859 break;
2860
2861 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2862 hci_cs_read_remote_ext_features(hdev, ev->status);
2863 break;
2864
2865 case HCI_OP_SETUP_SYNC_CONN:
2866 hci_cs_setup_sync_conn(hdev, ev->status);
2867 break;
2868
2869 case HCI_OP_SNIFF_MODE:
2870 hci_cs_sniff_mode(hdev, ev->status);
2871 break;
2872
2873 case HCI_OP_EXIT_SNIFF_MODE:
2874 hci_cs_exit_sniff_mode(hdev, ev->status);
2875 break;
2876
2877 case HCI_OP_DISCONNECT:
2878 hci_cs_disconnect(hdev, ev->status);
2879 break;
2880
2881 case HCI_OP_CREATE_PHY_LINK:
2882 hci_cs_create_phylink(hdev, ev->status);
2883 break;
2884
2885 case HCI_OP_ACCEPT_PHY_LINK:
2886 hci_cs_accept_phylink(hdev, ev->status);
2887 break;
2888
2889 case HCI_OP_LE_CREATE_CONN:
2890 hci_cs_le_create_conn(hdev, ev->status);
2891 break;
2892
2893 case HCI_OP_LE_START_ENC:
2894 hci_cs_le_start_enc(hdev, ev->status);
2895 break;
2896
2897 default:
2898 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2899 break;
2900 }
2901
2902 if (opcode != HCI_OP_NOP)
2903 cancel_delayed_work(&hdev->cmd_timer);
2904
2905 if (ev->status ||
2906 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2907 hci_req_cmd_complete(hdev, opcode, ev->status);
2908
2909 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2910 atomic_set(&hdev->cmd_cnt, 1);
2911 if (!skb_queue_empty(&hdev->cmd_q))
2912 queue_work(hdev->workqueue, &hdev->cmd_work);
2913 }
2914 }
2915
2916 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2917 {
2918 struct hci_ev_role_change *ev = (void *) skb->data;
2919 struct hci_conn *conn;
2920
2921 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2922
2923 hci_dev_lock(hdev);
2924
2925 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2926 if (conn) {
2927 if (!ev->status) {
2928 if (ev->role)
2929 clear_bit(HCI_CONN_MASTER, &conn->flags);
2930 else
2931 set_bit(HCI_CONN_MASTER, &conn->flags);
2932 }
2933
2934 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2935
2936 hci_role_switch_cfm(conn, ev->status, ev->role);
2937 }
2938
2939 hci_dev_unlock(hdev);
2940 }
2941
2942 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2943 {
2944 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2945 int i;
2946
2947 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2948 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2949 return;
2950 }
2951
2952 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2953 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2954 BT_DBG("%s bad parameters", hdev->name);
2955 return;
2956 }
2957
2958 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2959
2960 for (i = 0; i < ev->num_hndl; i++) {
2961 struct hci_comp_pkts_info *info = &ev->handles[i];
2962 struct hci_conn *conn;
2963 __u16 handle, count;
2964
2965 handle = __le16_to_cpu(info->handle);
2966 count = __le16_to_cpu(info->count);
2967
2968 conn = hci_conn_hash_lookup_handle(hdev, handle);
2969 if (!conn)
2970 continue;
2971
2972 conn->sent -= count;
2973
2974 switch (conn->type) {
2975 case ACL_LINK:
2976 hdev->acl_cnt += count;
2977 if (hdev->acl_cnt > hdev->acl_pkts)
2978 hdev->acl_cnt = hdev->acl_pkts;
2979 break;
2980
2981 case LE_LINK:
2982 if (hdev->le_pkts) {
2983 hdev->le_cnt += count;
2984 if (hdev->le_cnt > hdev->le_pkts)
2985 hdev->le_cnt = hdev->le_pkts;
2986 } else {
2987 hdev->acl_cnt += count;
2988 if (hdev->acl_cnt > hdev->acl_pkts)
2989 hdev->acl_cnt = hdev->acl_pkts;
2990 }
2991 break;
2992
2993 case SCO_LINK:
2994 hdev->sco_cnt += count;
2995 if (hdev->sco_cnt > hdev->sco_pkts)
2996 hdev->sco_cnt = hdev->sco_pkts;
2997 break;
2998
2999 default:
3000 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3001 break;
3002 }
3003 }
3004
3005 queue_work(hdev->workqueue, &hdev->tx_work);
3006 }
3007
3008 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3009 __u16 handle)
3010 {
3011 struct hci_chan *chan;
3012
3013 switch (hdev->dev_type) {
3014 case HCI_BREDR:
3015 return hci_conn_hash_lookup_handle(hdev, handle);
3016 case HCI_AMP:
3017 chan = hci_chan_lookup_handle(hdev, handle);
3018 if (chan)
3019 return chan->conn;
3020 break;
3021 default:
3022 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3023 break;
3024 }
3025
3026 return NULL;
3027 }
3028
3029 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3030 {
3031 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3032 int i;
3033
3034 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3035 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3036 return;
3037 }
3038
3039 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3040 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3041 BT_DBG("%s bad parameters", hdev->name);
3042 return;
3043 }
3044
3045 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3046 ev->num_hndl);
3047
3048 for (i = 0; i < ev->num_hndl; i++) {
3049 struct hci_comp_blocks_info *info = &ev->handles[i];
3050 struct hci_conn *conn = NULL;
3051 __u16 handle, block_count;
3052
3053 handle = __le16_to_cpu(info->handle);
3054 block_count = __le16_to_cpu(info->blocks);
3055
3056 conn = __hci_conn_lookup_handle(hdev, handle);
3057 if (!conn)
3058 continue;
3059
3060 conn->sent -= block_count;
3061
3062 switch (conn->type) {
3063 case ACL_LINK:
3064 case AMP_LINK:
3065 hdev->block_cnt += block_count;
3066 if (hdev->block_cnt > hdev->num_blocks)
3067 hdev->block_cnt = hdev->num_blocks;
3068 break;
3069
3070 default:
3071 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3072 break;
3073 }
3074 }
3075
3076 queue_work(hdev->workqueue, &hdev->tx_work);
3077 }
3078
3079 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3080 {
3081 struct hci_ev_mode_change *ev = (void *) skb->data;
3082 struct hci_conn *conn;
3083
3084 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3085
3086 hci_dev_lock(hdev);
3087
3088 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3089 if (conn) {
3090 conn->mode = ev->mode;
3091
3092 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3093 &conn->flags)) {
3094 if (conn->mode == HCI_CM_ACTIVE)
3095 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3096 else
3097 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3098 }
3099
3100 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3101 hci_sco_setup(conn, ev->status);
3102 }
3103
3104 hci_dev_unlock(hdev);
3105 }
3106
3107 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3108 {
3109 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3110 struct hci_conn *conn;
3111
3112 BT_DBG("%s", hdev->name);
3113
3114 hci_dev_lock(hdev);
3115
3116 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3117 if (!conn)
3118 goto unlock;
3119
3120 if (conn->state == BT_CONNECTED) {
3121 hci_conn_hold(conn);
3122 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3123 hci_conn_drop(conn);
3124 }
3125
3126 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3127 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3128 sizeof(ev->bdaddr), &ev->bdaddr);
3129 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3130 u8 secure;
3131
3132 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3133 secure = 1;
3134 else
3135 secure = 0;
3136
3137 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3138 }
3139
3140 unlock:
3141 hci_dev_unlock(hdev);
3142 }
3143
3144 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3145 {
3146 struct hci_ev_link_key_req *ev = (void *) skb->data;
3147 struct hci_cp_link_key_reply cp;
3148 struct hci_conn *conn;
3149 struct link_key *key;
3150
3151 BT_DBG("%s", hdev->name);
3152
3153 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3154 return;
3155
3156 hci_dev_lock(hdev);
3157
3158 key = hci_find_link_key(hdev, &ev->bdaddr);
3159 if (!key) {
3160 BT_DBG("%s link key not found for %pMR", hdev->name,
3161 &ev->bdaddr);
3162 goto not_found;
3163 }
3164
3165 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3166 &ev->bdaddr);
3167
3168 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3169 if (conn) {
3170 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3171 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3172 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3173 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3174 goto not_found;
3175 }
3176
3177 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3178 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3179 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3180 BT_DBG("%s ignoring key unauthenticated for high security",
3181 hdev->name);
3182 goto not_found;
3183 }
3184
3185 conn->key_type = key->type;
3186 conn->pin_length = key->pin_len;
3187 }
3188
3189 bacpy(&cp.bdaddr, &ev->bdaddr);
3190 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3191
3192 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3193
3194 hci_dev_unlock(hdev);
3195
3196 return;
3197
3198 not_found:
3199 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3200 hci_dev_unlock(hdev);
3201 }
3202
3203 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3204 {
3205 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3206 struct hci_conn *conn;
3207 struct link_key *key;
3208 bool persistent;
3209 u8 pin_len = 0;
3210
3211 BT_DBG("%s", hdev->name);
3212
3213 hci_dev_lock(hdev);
3214
3215 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3216 if (conn) {
3217 hci_conn_hold(conn);
3218 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3219 pin_len = conn->pin_length;
3220
3221 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3222 conn->key_type = ev->key_type;
3223
3224 hci_conn_drop(conn);
3225 }
3226
3227 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3228 goto unlock;
3229
3230 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3231 ev->key_type, pin_len, &persistent);
3232 if (!key)
3233 goto unlock;
3234
3235 mgmt_new_link_key(hdev, key, persistent);
3236
3237 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3238 * is set. If it's not set simply remove the key from the kernel
3239 * list (we've still notified user space about it but with
3240 * store_hint being 0).
3241 */
3242 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3243 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3244 list_del(&key->list);
3245 kfree(key);
3246 } else if (conn) {
3247 if (persistent)
3248 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3249 else
3250 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3251 }
3252
3253 unlock:
3254 hci_dev_unlock(hdev);
3255 }
3256
3257 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3258 {
3259 struct hci_ev_clock_offset *ev = (void *) skb->data;
3260 struct hci_conn *conn;
3261
3262 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3263
3264 hci_dev_lock(hdev);
3265
3266 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3267 if (conn && !ev->status) {
3268 struct inquiry_entry *ie;
3269
3270 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3271 if (ie) {
3272 ie->data.clock_offset = ev->clock_offset;
3273 ie->timestamp = jiffies;
3274 }
3275 }
3276
3277 hci_dev_unlock(hdev);
3278 }
3279
3280 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3281 {
3282 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3283 struct hci_conn *conn;
3284
3285 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3286
3287 hci_dev_lock(hdev);
3288
3289 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3290 if (conn && !ev->status)
3291 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3292
3293 hci_dev_unlock(hdev);
3294 }
3295
3296 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3297 {
3298 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3299 struct inquiry_entry *ie;
3300
3301 BT_DBG("%s", hdev->name);
3302
3303 hci_dev_lock(hdev);
3304
3305 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3306 if (ie) {
3307 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3308 ie->timestamp = jiffies;
3309 }
3310
3311 hci_dev_unlock(hdev);
3312 }
3313
3314 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3315 struct sk_buff *skb)
3316 {
3317 struct inquiry_data data;
3318 int num_rsp = *((__u8 *) skb->data);
3319
3320 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3321
3322 if (!num_rsp)
3323 return;
3324
3325 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3326 return;
3327
3328 hci_dev_lock(hdev);
3329
3330 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3331 struct inquiry_info_with_rssi_and_pscan_mode *info;
3332 info = (void *) (skb->data + 1);
3333
3334 for (; num_rsp; num_rsp--, info++) {
3335 u32 flags;
3336
3337 bacpy(&data.bdaddr, &info->bdaddr);
3338 data.pscan_rep_mode = info->pscan_rep_mode;
3339 data.pscan_period_mode = info->pscan_period_mode;
3340 data.pscan_mode = info->pscan_mode;
3341 memcpy(data.dev_class, info->dev_class, 3);
3342 data.clock_offset = info->clock_offset;
3343 data.rssi = info->rssi;
3344 data.ssp_mode = 0x00;
3345
3346 flags = hci_inquiry_cache_update(hdev, &data, false);
3347
3348 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3349 info->dev_class, info->rssi,
3350 flags, NULL, 0, NULL, 0);
3351 }
3352 } else {
3353 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3354
3355 for (; num_rsp; num_rsp--, info++) {
3356 u32 flags;
3357
3358 bacpy(&data.bdaddr, &info->bdaddr);
3359 data.pscan_rep_mode = info->pscan_rep_mode;
3360 data.pscan_period_mode = info->pscan_period_mode;
3361 data.pscan_mode = 0x00;
3362 memcpy(data.dev_class, info->dev_class, 3);
3363 data.clock_offset = info->clock_offset;
3364 data.rssi = info->rssi;
3365 data.ssp_mode = 0x00;
3366
3367 flags = hci_inquiry_cache_update(hdev, &data, false);
3368
3369 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3370 info->dev_class, info->rssi,
3371 flags, NULL, 0, NULL, 0);
3372 }
3373 }
3374
3375 hci_dev_unlock(hdev);
3376 }
3377
3378 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3379 struct sk_buff *skb)
3380 {
3381 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3382 struct hci_conn *conn;
3383
3384 BT_DBG("%s", hdev->name);
3385
3386 hci_dev_lock(hdev);
3387
3388 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3389 if (!conn)
3390 goto unlock;
3391
3392 if (ev->page < HCI_MAX_PAGES)
3393 memcpy(conn->features[ev->page], ev->features, 8);
3394
3395 if (!ev->status && ev->page == 0x01) {
3396 struct inquiry_entry *ie;
3397
3398 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3399 if (ie)
3400 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3401
3402 if (ev->features[0] & LMP_HOST_SSP) {
3403 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3404 } else {
3405 /* It is mandatory by the Bluetooth specification that
3406 * Extended Inquiry Results are only used when Secure
3407 * Simple Pairing is enabled, but some devices violate
3408 * this.
3409 *
3410 * To make these devices work, the internal SSP
3411 * enabled flag needs to be cleared if the remote host
3412 * features do not indicate SSP support */
3413 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3414 }
3415
3416 if (ev->features[0] & LMP_HOST_SC)
3417 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3418 }
3419
3420 if (conn->state != BT_CONFIG)
3421 goto unlock;
3422
3423 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3424 struct hci_cp_remote_name_req cp;
3425 memset(&cp, 0, sizeof(cp));
3426 bacpy(&cp.bdaddr, &conn->dst);
3427 cp.pscan_rep_mode = 0x02;
3428 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3429 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3430 mgmt_device_connected(hdev, &conn->dst, conn->type,
3431 conn->dst_type, 0, NULL, 0,
3432 conn->dev_class);
3433
3434 if (!hci_outgoing_auth_needed(hdev, conn)) {
3435 conn->state = BT_CONNECTED;
3436 hci_proto_connect_cfm(conn, ev->status);
3437 hci_conn_drop(conn);
3438 }
3439
3440 unlock:
3441 hci_dev_unlock(hdev);
3442 }
3443
3444 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3445 struct sk_buff *skb)
3446 {
3447 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3448 struct hci_conn *conn;
3449
3450 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3451
3452 hci_dev_lock(hdev);
3453
3454 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3455 if (!conn) {
3456 if (ev->link_type == ESCO_LINK)
3457 goto unlock;
3458
3459 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3460 if (!conn)
3461 goto unlock;
3462
3463 conn->type = SCO_LINK;
3464 }
3465
3466 switch (ev->status) {
3467 case 0x00:
3468 conn->handle = __le16_to_cpu(ev->handle);
3469 conn->state = BT_CONNECTED;
3470
3471 hci_conn_add_sysfs(conn);
3472 break;
3473
3474 case 0x10: /* Connection Accept Timeout */
3475 case 0x0d: /* Connection Rejected due to Limited Resources */
3476 case 0x11: /* Unsupported Feature or Parameter Value */
3477 case 0x1c: /* SCO interval rejected */
3478 case 0x1a: /* Unsupported Remote Feature */
3479 case 0x1f: /* Unspecified error */
3480 case 0x20: /* Unsupported LMP Parameter value */
3481 if (conn->out) {
3482 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3483 (hdev->esco_type & EDR_ESCO_MASK);
3484 if (hci_setup_sync(conn, conn->link->handle))
3485 goto unlock;
3486 }
3487 /* fall through */
3488
3489 default:
3490 conn->state = BT_CLOSED;
3491 break;
3492 }
3493
3494 hci_proto_connect_cfm(conn, ev->status);
3495 if (ev->status)
3496 hci_conn_del(conn);
3497
3498 unlock:
3499 hci_dev_unlock(hdev);
3500 }
3501
3502 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3503 {
3504 size_t parsed = 0;
3505
3506 while (parsed < eir_len) {
3507 u8 field_len = eir[0];
3508
3509 if (field_len == 0)
3510 return parsed;
3511
3512 parsed += field_len + 1;
3513 eir += field_len + 1;
3514 }
3515
3516 return eir_len;
3517 }
3518
3519 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3520 struct sk_buff *skb)
3521 {
3522 struct inquiry_data data;
3523 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3524 int num_rsp = *((__u8 *) skb->data);
3525 size_t eir_len;
3526
3527 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3528
3529 if (!num_rsp)
3530 return;
3531
3532 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3533 return;
3534
3535 hci_dev_lock(hdev);
3536
3537 for (; num_rsp; num_rsp--, info++) {
3538 u32 flags;
3539 bool name_known;
3540
3541 bacpy(&data.bdaddr, &info->bdaddr);
3542 data.pscan_rep_mode = info->pscan_rep_mode;
3543 data.pscan_period_mode = info->pscan_period_mode;
3544 data.pscan_mode = 0x00;
3545 memcpy(data.dev_class, info->dev_class, 3);
3546 data.clock_offset = info->clock_offset;
3547 data.rssi = info->rssi;
3548 data.ssp_mode = 0x01;
3549
3550 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3551 name_known = eir_has_data_type(info->data,
3552 sizeof(info->data),
3553 EIR_NAME_COMPLETE);
3554 else
3555 name_known = true;
3556
3557 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3558
3559 eir_len = eir_get_length(info->data, sizeof(info->data));
3560
3561 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3562 info->dev_class, info->rssi,
3563 flags, info->data, eir_len, NULL, 0);
3564 }
3565
3566 hci_dev_unlock(hdev);
3567 }
3568
3569 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3570 struct sk_buff *skb)
3571 {
3572 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3573 struct hci_conn *conn;
3574
3575 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3576 __le16_to_cpu(ev->handle));
3577
3578 hci_dev_lock(hdev);
3579
3580 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3581 if (!conn)
3582 goto unlock;
3583
3584 /* For BR/EDR the necessary steps are taken through the
3585 * auth_complete event.
3586 */
3587 if (conn->type != LE_LINK)
3588 goto unlock;
3589
3590 if (!ev->status)
3591 conn->sec_level = conn->pending_sec_level;
3592
3593 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3594
3595 if (ev->status && conn->state == BT_CONNECTED) {
3596 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3597 hci_conn_drop(conn);
3598 goto unlock;
3599 }
3600
3601 if (conn->state == BT_CONFIG) {
3602 if (!ev->status)
3603 conn->state = BT_CONNECTED;
3604
3605 hci_proto_connect_cfm(conn, ev->status);
3606 hci_conn_drop(conn);
3607 } else {
3608 hci_auth_cfm(conn, ev->status);
3609
3610 hci_conn_hold(conn);
3611 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3612 hci_conn_drop(conn);
3613 }
3614
3615 unlock:
3616 hci_dev_unlock(hdev);
3617 }
3618
3619 static u8 hci_get_auth_req(struct hci_conn *conn)
3620 {
3621 /* If remote requests no-bonding follow that lead */
3622 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3623 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3624 return conn->remote_auth | (conn->auth_type & 0x01);
3625
3626 /* If both remote and local have enough IO capabilities, require
3627 * MITM protection
3628 */
3629 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3630 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3631 return conn->remote_auth | 0x01;
3632
3633 /* No MITM protection possible so ignore remote requirement */
3634 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3635 }
3636
3637 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3638 {
3639 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3640 struct hci_conn *conn;
3641
3642 BT_DBG("%s", hdev->name);
3643
3644 hci_dev_lock(hdev);
3645
3646 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3647 if (!conn)
3648 goto unlock;
3649
3650 hci_conn_hold(conn);
3651
3652 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3653 goto unlock;
3654
3655 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3656 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3657 struct hci_cp_io_capability_reply cp;
3658
3659 bacpy(&cp.bdaddr, &ev->bdaddr);
3660 /* Change the IO capability from KeyboardDisplay
3661 * to DisplayYesNo as it is not supported by BT spec. */
3662 cp.capability = (conn->io_capability == 0x04) ?
3663 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3664
3665 /* If we are initiators, there is no remote information yet */
3666 if (conn->remote_auth == 0xff) {
3667 cp.authentication = conn->auth_type;
3668
3669 /* Request MITM protection if our IO caps allow it
3670 * except for the no-bonding case.
3671 * conn->auth_type is not updated here since
3672 * that might cause the user confirmation to be
3673 * rejected in case the remote doesn't have the
3674 * IO capabilities for MITM.
3675 */
3676 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3677 cp.authentication != HCI_AT_NO_BONDING)
3678 cp.authentication |= 0x01;
3679 } else {
3680 conn->auth_type = hci_get_auth_req(conn);
3681 cp.authentication = conn->auth_type;
3682 }
3683
3684 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3685 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3686 cp.oob_data = 0x01;
3687 else
3688 cp.oob_data = 0x00;
3689
3690 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3691 sizeof(cp), &cp);
3692 } else {
3693 struct hci_cp_io_capability_neg_reply cp;
3694
3695 bacpy(&cp.bdaddr, &ev->bdaddr);
3696 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3697
3698 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3699 sizeof(cp), &cp);
3700 }
3701
3702 unlock:
3703 hci_dev_unlock(hdev);
3704 }
3705
3706 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3707 {
3708 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3709 struct hci_conn *conn;
3710
3711 BT_DBG("%s", hdev->name);
3712
3713 hci_dev_lock(hdev);
3714
3715 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3716 if (!conn)
3717 goto unlock;
3718
3719 conn->remote_cap = ev->capability;
3720 conn->remote_auth = ev->authentication;
3721 if (ev->oob_data)
3722 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3723
3724 unlock:
3725 hci_dev_unlock(hdev);
3726 }
3727
3728 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3729 struct sk_buff *skb)
3730 {
3731 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3732 int loc_mitm, rem_mitm, confirm_hint = 0;
3733 struct hci_conn *conn;
3734
3735 BT_DBG("%s", hdev->name);
3736
3737 hci_dev_lock(hdev);
3738
3739 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3740 goto unlock;
3741
3742 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3743 if (!conn)
3744 goto unlock;
3745
3746 loc_mitm = (conn->auth_type & 0x01);
3747 rem_mitm = (conn->remote_auth & 0x01);
3748
3749 /* If we require MITM but the remote device can't provide that
3750 * (it has NoInputNoOutput) then reject the confirmation request
3751 */
3752 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3753 BT_DBG("Rejecting request: remote device can't provide MITM");
3754 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3755 sizeof(ev->bdaddr), &ev->bdaddr);
3756 goto unlock;
3757 }
3758
3759 /* If no side requires MITM protection; auto-accept */
3760 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3761 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3762
3763 /* If we're not the initiators request authorization to
3764 * proceed from user space (mgmt_user_confirm with
3765 * confirm_hint set to 1). The exception is if neither
3766 * side had MITM in which case we do auto-accept.
3767 */
3768 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3769 (loc_mitm || rem_mitm)) {
3770 BT_DBG("Confirming auto-accept as acceptor");
3771 confirm_hint = 1;
3772 goto confirm;
3773 }
3774
3775 BT_DBG("Auto-accept of user confirmation with %ums delay",
3776 hdev->auto_accept_delay);
3777
3778 if (hdev->auto_accept_delay > 0) {
3779 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3780 queue_delayed_work(conn->hdev->workqueue,
3781 &conn->auto_accept_work, delay);
3782 goto unlock;
3783 }
3784
3785 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3786 sizeof(ev->bdaddr), &ev->bdaddr);
3787 goto unlock;
3788 }
3789
3790 confirm:
3791 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3792 le32_to_cpu(ev->passkey), confirm_hint);
3793
3794 unlock:
3795 hci_dev_unlock(hdev);
3796 }
3797
3798 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3799 struct sk_buff *skb)
3800 {
3801 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3802
3803 BT_DBG("%s", hdev->name);
3804
3805 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3806 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3807 }
3808
3809 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3810 struct sk_buff *skb)
3811 {
3812 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3813 struct hci_conn *conn;
3814
3815 BT_DBG("%s", hdev->name);
3816
3817 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3818 if (!conn)
3819 return;
3820
3821 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3822 conn->passkey_entered = 0;
3823
3824 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3825 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3826 conn->dst_type, conn->passkey_notify,
3827 conn->passkey_entered);
3828 }
3829
3830 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3831 {
3832 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3833 struct hci_conn *conn;
3834
3835 BT_DBG("%s", hdev->name);
3836
3837 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3838 if (!conn)
3839 return;
3840
3841 switch (ev->type) {
3842 case HCI_KEYPRESS_STARTED:
3843 conn->passkey_entered = 0;
3844 return;
3845
3846 case HCI_KEYPRESS_ENTERED:
3847 conn->passkey_entered++;
3848 break;
3849
3850 case HCI_KEYPRESS_ERASED:
3851 conn->passkey_entered--;
3852 break;
3853
3854 case HCI_KEYPRESS_CLEARED:
3855 conn->passkey_entered = 0;
3856 break;
3857
3858 case HCI_KEYPRESS_COMPLETED:
3859 return;
3860 }
3861
3862 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3863 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3864 conn->dst_type, conn->passkey_notify,
3865 conn->passkey_entered);
3866 }
3867
3868 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3869 struct sk_buff *skb)
3870 {
3871 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3872 struct hci_conn *conn;
3873
3874 BT_DBG("%s", hdev->name);
3875
3876 hci_dev_lock(hdev);
3877
3878 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3879 if (!conn)
3880 goto unlock;
3881
3882 /* To avoid duplicate auth_failed events to user space we check
3883 * the HCI_CONN_AUTH_PEND flag which will be set if we
3884 * initiated the authentication. A traditional auth_complete
3885 * event gets always produced as initiator and is also mapped to
3886 * the mgmt_auth_failed event */
3887 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3888 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3889 ev->status);
3890
3891 hci_conn_drop(conn);
3892
3893 unlock:
3894 hci_dev_unlock(hdev);
3895 }
3896
3897 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3898 struct sk_buff *skb)
3899 {
3900 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3901 struct inquiry_entry *ie;
3902 struct hci_conn *conn;
3903
3904 BT_DBG("%s", hdev->name);
3905
3906 hci_dev_lock(hdev);
3907
3908 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3909 if (conn)
3910 memcpy(conn->features[1], ev->features, 8);
3911
3912 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3913 if (ie)
3914 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3915
3916 hci_dev_unlock(hdev);
3917 }
3918
3919 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3920 struct sk_buff *skb)
3921 {
3922 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3923 struct oob_data *data;
3924
3925 BT_DBG("%s", hdev->name);
3926
3927 hci_dev_lock(hdev);
3928
3929 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3930 goto unlock;
3931
3932 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3933 if (data) {
3934 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3935 struct hci_cp_remote_oob_ext_data_reply cp;
3936
3937 bacpy(&cp.bdaddr, &ev->bdaddr);
3938 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3939 memcpy(cp.randomizer192, data->randomizer192,
3940 sizeof(cp.randomizer192));
3941 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3942 memcpy(cp.randomizer256, data->randomizer256,
3943 sizeof(cp.randomizer256));
3944
3945 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3946 sizeof(cp), &cp);
3947 } else {
3948 struct hci_cp_remote_oob_data_reply cp;
3949
3950 bacpy(&cp.bdaddr, &ev->bdaddr);
3951 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3952 memcpy(cp.randomizer, data->randomizer192,
3953 sizeof(cp.randomizer));
3954
3955 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3956 sizeof(cp), &cp);
3957 }
3958 } else {
3959 struct hci_cp_remote_oob_data_neg_reply cp;
3960
3961 bacpy(&cp.bdaddr, &ev->bdaddr);
3962 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3963 sizeof(cp), &cp);
3964 }
3965
3966 unlock:
3967 hci_dev_unlock(hdev);
3968 }
3969
3970 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3971 struct sk_buff *skb)
3972 {
3973 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3974 struct hci_conn *hcon, *bredr_hcon;
3975
3976 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3977 ev->status);
3978
3979 hci_dev_lock(hdev);
3980
3981 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3982 if (!hcon) {
3983 hci_dev_unlock(hdev);
3984 return;
3985 }
3986
3987 if (ev->status) {
3988 hci_conn_del(hcon);
3989 hci_dev_unlock(hdev);
3990 return;
3991 }
3992
3993 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3994
3995 hcon->state = BT_CONNECTED;
3996 bacpy(&hcon->dst, &bredr_hcon->dst);
3997
3998 hci_conn_hold(hcon);
3999 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4000 hci_conn_drop(hcon);
4001
4002 hci_conn_add_sysfs(hcon);
4003
4004 amp_physical_cfm(bredr_hcon, hcon);
4005
4006 hci_dev_unlock(hdev);
4007 }
4008
4009 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4010 {
4011 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4012 struct hci_conn *hcon;
4013 struct hci_chan *hchan;
4014 struct amp_mgr *mgr;
4015
4016 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4017 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4018 ev->status);
4019
4020 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4021 if (!hcon)
4022 return;
4023
4024 /* Create AMP hchan */
4025 hchan = hci_chan_create(hcon);
4026 if (!hchan)
4027 return;
4028
4029 hchan->handle = le16_to_cpu(ev->handle);
4030
4031 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4032
4033 mgr = hcon->amp_mgr;
4034 if (mgr && mgr->bredr_chan) {
4035 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4036
4037 l2cap_chan_lock(bredr_chan);
4038
4039 bredr_chan->conn->mtu = hdev->block_mtu;
4040 l2cap_logical_cfm(bredr_chan, hchan, 0);
4041 hci_conn_hold(hcon);
4042
4043 l2cap_chan_unlock(bredr_chan);
4044 }
4045 }
4046
4047 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4048 struct sk_buff *skb)
4049 {
4050 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4051 struct hci_chan *hchan;
4052
4053 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4054 le16_to_cpu(ev->handle), ev->status);
4055
4056 if (ev->status)
4057 return;
4058
4059 hci_dev_lock(hdev);
4060
4061 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4062 if (!hchan)
4063 goto unlock;
4064
4065 amp_destroy_logical_link(hchan, ev->reason);
4066
4067 unlock:
4068 hci_dev_unlock(hdev);
4069 }
4070
4071 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4072 struct sk_buff *skb)
4073 {
4074 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4075 struct hci_conn *hcon;
4076
4077 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4078
4079 if (ev->status)
4080 return;
4081
4082 hci_dev_lock(hdev);
4083
4084 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4085 if (hcon) {
4086 hcon->state = BT_CLOSED;
4087 hci_conn_del(hcon);
4088 }
4089
4090 hci_dev_unlock(hdev);
4091 }
4092
4093 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4094 {
4095 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4096 struct hci_conn_params *params;
4097 struct hci_conn *conn;
4098 struct smp_irk *irk;
4099 u8 addr_type;
4100
4101 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4102
4103 hci_dev_lock(hdev);
4104
4105 /* All controllers implicitly stop advertising in the event of a
4106 * connection, so ensure that the state bit is cleared.
4107 */
4108 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4109
4110 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4111 if (!conn) {
4112 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
4113 if (!conn) {
4114 BT_ERR("No memory for new connection");
4115 goto unlock;
4116 }
4117
4118 conn->dst_type = ev->bdaddr_type;
4119
4120 if (ev->role == LE_CONN_ROLE_MASTER) {
4121 conn->out = true;
4122 set_bit(HCI_CONN_MASTER, &conn->flags);
4123 }
4124
4125 /* If we didn't have a hci_conn object previously
4126 * but we're in master role this must be something
4127 * initiated using a white list. Since white list based
4128 * connections are not "first class citizens" we don't
4129 * have full tracking of them. Therefore, we go ahead
4130 * with a "best effort" approach of determining the
4131 * initiator address based on the HCI_PRIVACY flag.
4132 */
4133 if (conn->out) {
4134 conn->resp_addr_type = ev->bdaddr_type;
4135 bacpy(&conn->resp_addr, &ev->bdaddr);
4136 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4137 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4138 bacpy(&conn->init_addr, &hdev->rpa);
4139 } else {
4140 hci_copy_identity_address(hdev,
4141 &conn->init_addr,
4142 &conn->init_addr_type);
4143 }
4144 }
4145 } else {
4146 cancel_delayed_work(&conn->le_conn_timeout);
4147 }
4148
4149 if (!conn->out) {
4150 /* Set the responder (our side) address type based on
4151 * the advertising address type.
4152 */
4153 conn->resp_addr_type = hdev->adv_addr_type;
4154 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4155 bacpy(&conn->resp_addr, &hdev->random_addr);
4156 else
4157 bacpy(&conn->resp_addr, &hdev->bdaddr);
4158
4159 conn->init_addr_type = ev->bdaddr_type;
4160 bacpy(&conn->init_addr, &ev->bdaddr);
4161
4162 /* For incoming connections, set the default minimum
4163 * and maximum connection interval. They will be used
4164 * to check if the parameters are in range and if not
4165 * trigger the connection update procedure.
4166 */
4167 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4168 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4169 }
4170
4171 /* Lookup the identity address from the stored connection
4172 * address and address type.
4173 *
4174 * When establishing connections to an identity address, the
4175 * connection procedure will store the resolvable random
4176 * address first. Now if it can be converted back into the
4177 * identity address, start using the identity address from
4178 * now on.
4179 */
4180 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4181 if (irk) {
4182 bacpy(&conn->dst, &irk->bdaddr);
4183 conn->dst_type = irk->addr_type;
4184 }
4185
4186 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4187 addr_type = BDADDR_LE_PUBLIC;
4188 else
4189 addr_type = BDADDR_LE_RANDOM;
4190
4191 /* Drop the connection if he device is blocked */
4192 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4193 hci_conn_drop(conn);
4194 goto unlock;
4195 }
4196
4197 if (ev->status) {
4198 hci_le_conn_failed(conn, ev->status);
4199 goto unlock;
4200 }
4201
4202 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4203 mgmt_device_connected(hdev, &conn->dst, conn->type,
4204 conn->dst_type, 0, NULL, 0, NULL);
4205
4206 conn->sec_level = BT_SECURITY_LOW;
4207 conn->handle = __le16_to_cpu(ev->handle);
4208 conn->state = BT_CONNECTED;
4209
4210 conn->le_conn_interval = le16_to_cpu(ev->interval);
4211 conn->le_conn_latency = le16_to_cpu(ev->latency);
4212 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4213
4214 hci_conn_add_sysfs(conn);
4215
4216 hci_proto_connect_cfm(conn, ev->status);
4217
4218 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4219 if (params)
4220 list_del_init(&params->action);
4221
4222 unlock:
4223 hci_update_background_scan(hdev);
4224 hci_dev_unlock(hdev);
4225 }
4226
4227 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4228 struct sk_buff *skb)
4229 {
4230 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4231 struct hci_conn *conn;
4232
4233 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4234
4235 if (ev->status)
4236 return;
4237
4238 hci_dev_lock(hdev);
4239
4240 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4241 if (conn) {
4242 conn->le_conn_interval = le16_to_cpu(ev->interval);
4243 conn->le_conn_latency = le16_to_cpu(ev->latency);
4244 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4245 }
4246
4247 hci_dev_unlock(hdev);
4248 }
4249
4250 /* This function requires the caller holds hdev->lock */
4251 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4252 u8 addr_type, u8 adv_type)
4253 {
4254 struct hci_conn *conn;
4255
4256 /* If the event is not connectable don't proceed further */
4257 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4258 return;
4259
4260 /* Ignore if the device is blocked */
4261 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4262 return;
4263
4264 /* If we're connectable, always connect any ADV_DIRECT_IND event */
4265 if (test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
4266 adv_type == LE_ADV_DIRECT_IND)
4267 goto connect;
4268
4269 /* If we're not connectable only connect devices that we have in
4270 * our pend_le_conns list.
4271 */
4272 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
4273 return;
4274
4275 connect:
4276 /* Request connection in master = true role */
4277 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4278 HCI_LE_AUTOCONN_TIMEOUT, true);
4279 if (!IS_ERR(conn))
4280 return;
4281
4282 switch (PTR_ERR(conn)) {
4283 case -EBUSY:
4284 /* If hci_connect() returns -EBUSY it means there is already
4285 * an LE connection attempt going on. Since controllers don't
4286 * support more than one connection attempt at the time, we
4287 * don't consider this an error case.
4288 */
4289 break;
4290 default:
4291 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4292 }
4293 }
4294
4295 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4296 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4297 {
4298 struct discovery_state *d = &hdev->discovery;
4299 struct smp_irk *irk;
4300 bool match;
4301 u32 flags;
4302
4303 /* Check if we need to convert to identity address */
4304 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4305 if (irk) {
4306 bdaddr = &irk->bdaddr;
4307 bdaddr_type = irk->addr_type;
4308 }
4309
4310 /* Check if we have been requested to connect to this device */
4311 check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4312
4313 /* Passive scanning shouldn't trigger any device found events,
4314 * except for devices marked as CONN_REPORT for which we do send
4315 * device found events.
4316 */
4317 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4318 struct hci_conn_params *param;
4319
4320 if (type == LE_ADV_DIRECT_IND)
4321 return;
4322
4323 param = hci_pend_le_action_lookup(&hdev->pend_le_reports,
4324 bdaddr, bdaddr_type);
4325 if (!param)
4326 return;
4327
4328 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4329 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4330 else
4331 flags = 0;
4332 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4333 rssi, flags, data, len, NULL, 0);
4334 return;
4335 }
4336
4337 /* When receiving non-connectable or scannable undirected
4338 * advertising reports, this means that the remote device is
4339 * not connectable and then clearly indicate this in the
4340 * device found event.
4341 *
4342 * When receiving a scan response, then there is no way to
4343 * know if the remote device is connectable or not. However
4344 * since scan responses are merged with a previously seen
4345 * advertising report, the flags field from that report
4346 * will be used.
4347 *
4348 * In the really unlikely case that a controller get confused
4349 * and just sends a scan response event, then it is marked as
4350 * not connectable as well.
4351 */
4352 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4353 type == LE_ADV_SCAN_RSP)
4354 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4355 else
4356 flags = 0;
4357
4358 /* If there's nothing pending either store the data from this
4359 * event or send an immediate device found event if the data
4360 * should not be stored for later.
4361 */
4362 if (!has_pending_adv_report(hdev)) {
4363 /* If the report will trigger a SCAN_REQ store it for
4364 * later merging.
4365 */
4366 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4367 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4368 rssi, flags, data, len);
4369 return;
4370 }
4371
4372 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4373 rssi, flags, data, len, NULL, 0);
4374 return;
4375 }
4376
4377 /* Check if the pending report is for the same device as the new one */
4378 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4379 bdaddr_type == d->last_adv_addr_type);
4380
4381 /* If the pending data doesn't match this report or this isn't a
4382 * scan response (e.g. we got a duplicate ADV_IND) then force
4383 * sending of the pending data.
4384 */
4385 if (type != LE_ADV_SCAN_RSP || !match) {
4386 /* Send out whatever is in the cache, but skip duplicates */
4387 if (!match)
4388 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4389 d->last_adv_addr_type, NULL,
4390 d->last_adv_rssi, d->last_adv_flags,
4391 d->last_adv_data,
4392 d->last_adv_data_len, NULL, 0);
4393
4394 /* If the new report will trigger a SCAN_REQ store it for
4395 * later merging.
4396 */
4397 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4398 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4399 rssi, flags, data, len);
4400 return;
4401 }
4402
4403 /* The advertising reports cannot be merged, so clear
4404 * the pending report and send out a device found event.
4405 */
4406 clear_pending_adv_report(hdev);
4407 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4408 rssi, flags, data, len, NULL, 0);
4409 return;
4410 }
4411
4412 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4413 * the new event is a SCAN_RSP. We can therefore proceed with
4414 * sending a merged device found event.
4415 */
4416 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4417 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4418 d->last_adv_data, d->last_adv_data_len, data, len);
4419 clear_pending_adv_report(hdev);
4420 }
4421
4422 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4423 {
4424 u8 num_reports = skb->data[0];
4425 void *ptr = &skb->data[1];
4426
4427 hci_dev_lock(hdev);
4428
4429 while (num_reports--) {
4430 struct hci_ev_le_advertising_info *ev = ptr;
4431 s8 rssi;
4432
4433 rssi = ev->data[ev->length];
4434 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4435 ev->bdaddr_type, rssi, ev->data, ev->length);
4436
4437 ptr += sizeof(*ev) + ev->length + 1;
4438 }
4439
4440 hci_dev_unlock(hdev);
4441 }
4442
4443 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4444 {
4445 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4446 struct hci_cp_le_ltk_reply cp;
4447 struct hci_cp_le_ltk_neg_reply neg;
4448 struct hci_conn *conn;
4449 struct smp_ltk *ltk;
4450
4451 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4452
4453 hci_dev_lock(hdev);
4454
4455 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4456 if (conn == NULL)
4457 goto not_found;
4458
4459 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4460 if (ltk == NULL)
4461 goto not_found;
4462
4463 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4464 cp.handle = cpu_to_le16(conn->handle);
4465
4466 if (ltk->authenticated)
4467 conn->pending_sec_level = BT_SECURITY_HIGH;
4468 else
4469 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4470
4471 conn->enc_key_size = ltk->enc_size;
4472
4473 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4474
4475 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4476 * temporary key used to encrypt a connection following
4477 * pairing. It is used during the Encrypted Session Setup to
4478 * distribute the keys. Later, security can be re-established
4479 * using a distributed LTK.
4480 */
4481 if (ltk->type == SMP_STK) {
4482 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4483 list_del(&ltk->list);
4484 kfree(ltk);
4485 } else {
4486 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4487 }
4488
4489 hci_dev_unlock(hdev);
4490
4491 return;
4492
4493 not_found:
4494 neg.handle = ev->handle;
4495 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4496 hci_dev_unlock(hdev);
4497 }
4498
4499 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4500 u8 reason)
4501 {
4502 struct hci_cp_le_conn_param_req_neg_reply cp;
4503
4504 cp.handle = cpu_to_le16(handle);
4505 cp.reason = reason;
4506
4507 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4508 &cp);
4509 }
4510
4511 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4512 struct sk_buff *skb)
4513 {
4514 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4515 struct hci_cp_le_conn_param_req_reply cp;
4516 struct hci_conn *hcon;
4517 u16 handle, min, max, latency, timeout;
4518
4519 handle = le16_to_cpu(ev->handle);
4520 min = le16_to_cpu(ev->interval_min);
4521 max = le16_to_cpu(ev->interval_max);
4522 latency = le16_to_cpu(ev->latency);
4523 timeout = le16_to_cpu(ev->timeout);
4524
4525 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4526 if (!hcon || hcon->state != BT_CONNECTED)
4527 return send_conn_param_neg_reply(hdev, handle,
4528 HCI_ERROR_UNKNOWN_CONN_ID);
4529
4530 if (hci_check_conn_params(min, max, latency, timeout))
4531 return send_conn_param_neg_reply(hdev, handle,
4532 HCI_ERROR_INVALID_LL_PARAMS);
4533
4534 if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
4535 struct hci_conn_params *params;
4536 u8 store_hint;
4537
4538 hci_dev_lock(hdev);
4539
4540 params = hci_conn_params_lookup(hdev, &hcon->dst,
4541 hcon->dst_type);
4542 if (params) {
4543 params->conn_min_interval = min;
4544 params->conn_max_interval = max;
4545 params->conn_latency = latency;
4546 params->supervision_timeout = timeout;
4547 store_hint = 0x01;
4548 } else{
4549 store_hint = 0x00;
4550 }
4551
4552 hci_dev_unlock(hdev);
4553
4554 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4555 store_hint, min, max, latency, timeout);
4556 }
4557
4558 cp.handle = ev->handle;
4559 cp.interval_min = ev->interval_min;
4560 cp.interval_max = ev->interval_max;
4561 cp.latency = ev->latency;
4562 cp.timeout = ev->timeout;
4563 cp.min_ce_len = 0;
4564 cp.max_ce_len = 0;
4565
4566 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4567 }
4568
4569 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4570 {
4571 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4572
4573 skb_pull(skb, sizeof(*le_ev));
4574
4575 switch (le_ev->subevent) {
4576 case HCI_EV_LE_CONN_COMPLETE:
4577 hci_le_conn_complete_evt(hdev, skb);
4578 break;
4579
4580 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4581 hci_le_conn_update_complete_evt(hdev, skb);
4582 break;
4583
4584 case HCI_EV_LE_ADVERTISING_REPORT:
4585 hci_le_adv_report_evt(hdev, skb);
4586 break;
4587
4588 case HCI_EV_LE_LTK_REQ:
4589 hci_le_ltk_request_evt(hdev, skb);
4590 break;
4591
4592 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4593 hci_le_remote_conn_param_req_evt(hdev, skb);
4594 break;
4595
4596 default:
4597 break;
4598 }
4599 }
4600
4601 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4602 {
4603 struct hci_ev_channel_selected *ev = (void *) skb->data;
4604 struct hci_conn *hcon;
4605
4606 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4607
4608 skb_pull(skb, sizeof(*ev));
4609
4610 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4611 if (!hcon)
4612 return;
4613
4614 amp_read_loc_assoc_final_data(hdev, hcon);
4615 }
4616
4617 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4618 {
4619 struct hci_event_hdr *hdr = (void *) skb->data;
4620 __u8 event = hdr->evt;
4621
4622 hci_dev_lock(hdev);
4623
4624 /* Received events are (currently) only needed when a request is
4625 * ongoing so avoid unnecessary memory allocation.
4626 */
4627 if (hci_req_pending(hdev)) {
4628 kfree_skb(hdev->recv_evt);
4629 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4630 }
4631
4632 hci_dev_unlock(hdev);
4633
4634 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4635
4636 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4637 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4638 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4639
4640 hci_req_cmd_complete(hdev, opcode, 0);
4641 }
4642
4643 switch (event) {
4644 case HCI_EV_INQUIRY_COMPLETE:
4645 hci_inquiry_complete_evt(hdev, skb);
4646 break;
4647
4648 case HCI_EV_INQUIRY_RESULT:
4649 hci_inquiry_result_evt(hdev, skb);
4650 break;
4651
4652 case HCI_EV_CONN_COMPLETE:
4653 hci_conn_complete_evt(hdev, skb);
4654 break;
4655
4656 case HCI_EV_CONN_REQUEST:
4657 hci_conn_request_evt(hdev, skb);
4658 break;
4659
4660 case HCI_EV_DISCONN_COMPLETE:
4661 hci_disconn_complete_evt(hdev, skb);
4662 break;
4663
4664 case HCI_EV_AUTH_COMPLETE:
4665 hci_auth_complete_evt(hdev, skb);
4666 break;
4667
4668 case HCI_EV_REMOTE_NAME:
4669 hci_remote_name_evt(hdev, skb);
4670 break;
4671
4672 case HCI_EV_ENCRYPT_CHANGE:
4673 hci_encrypt_change_evt(hdev, skb);
4674 break;
4675
4676 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4677 hci_change_link_key_complete_evt(hdev, skb);
4678 break;
4679
4680 case HCI_EV_REMOTE_FEATURES:
4681 hci_remote_features_evt(hdev, skb);
4682 break;
4683
4684 case HCI_EV_CMD_COMPLETE:
4685 hci_cmd_complete_evt(hdev, skb);
4686 break;
4687
4688 case HCI_EV_CMD_STATUS:
4689 hci_cmd_status_evt(hdev, skb);
4690 break;
4691
4692 case HCI_EV_ROLE_CHANGE:
4693 hci_role_change_evt(hdev, skb);
4694 break;
4695
4696 case HCI_EV_NUM_COMP_PKTS:
4697 hci_num_comp_pkts_evt(hdev, skb);
4698 break;
4699
4700 case HCI_EV_MODE_CHANGE:
4701 hci_mode_change_evt(hdev, skb);
4702 break;
4703
4704 case HCI_EV_PIN_CODE_REQ:
4705 hci_pin_code_request_evt(hdev, skb);
4706 break;
4707
4708 case HCI_EV_LINK_KEY_REQ:
4709 hci_link_key_request_evt(hdev, skb);
4710 break;
4711
4712 case HCI_EV_LINK_KEY_NOTIFY:
4713 hci_link_key_notify_evt(hdev, skb);
4714 break;
4715
4716 case HCI_EV_CLOCK_OFFSET:
4717 hci_clock_offset_evt(hdev, skb);
4718 break;
4719
4720 case HCI_EV_PKT_TYPE_CHANGE:
4721 hci_pkt_type_change_evt(hdev, skb);
4722 break;
4723
4724 case HCI_EV_PSCAN_REP_MODE:
4725 hci_pscan_rep_mode_evt(hdev, skb);
4726 break;
4727
4728 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4729 hci_inquiry_result_with_rssi_evt(hdev, skb);
4730 break;
4731
4732 case HCI_EV_REMOTE_EXT_FEATURES:
4733 hci_remote_ext_features_evt(hdev, skb);
4734 break;
4735
4736 case HCI_EV_SYNC_CONN_COMPLETE:
4737 hci_sync_conn_complete_evt(hdev, skb);
4738 break;
4739
4740 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4741 hci_extended_inquiry_result_evt(hdev, skb);
4742 break;
4743
4744 case HCI_EV_KEY_REFRESH_COMPLETE:
4745 hci_key_refresh_complete_evt(hdev, skb);
4746 break;
4747
4748 case HCI_EV_IO_CAPA_REQUEST:
4749 hci_io_capa_request_evt(hdev, skb);
4750 break;
4751
4752 case HCI_EV_IO_CAPA_REPLY:
4753 hci_io_capa_reply_evt(hdev, skb);
4754 break;
4755
4756 case HCI_EV_USER_CONFIRM_REQUEST:
4757 hci_user_confirm_request_evt(hdev, skb);
4758 break;
4759
4760 case HCI_EV_USER_PASSKEY_REQUEST:
4761 hci_user_passkey_request_evt(hdev, skb);
4762 break;
4763
4764 case HCI_EV_USER_PASSKEY_NOTIFY:
4765 hci_user_passkey_notify_evt(hdev, skb);
4766 break;
4767
4768 case HCI_EV_KEYPRESS_NOTIFY:
4769 hci_keypress_notify_evt(hdev, skb);
4770 break;
4771
4772 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4773 hci_simple_pair_complete_evt(hdev, skb);
4774 break;
4775
4776 case HCI_EV_REMOTE_HOST_FEATURES:
4777 hci_remote_host_features_evt(hdev, skb);
4778 break;
4779
4780 case HCI_EV_LE_META:
4781 hci_le_meta_evt(hdev, skb);
4782 break;
4783
4784 case HCI_EV_CHANNEL_SELECTED:
4785 hci_chan_selected_evt(hdev, skb);
4786 break;
4787
4788 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4789 hci_remote_oob_data_request_evt(hdev, skb);
4790 break;
4791
4792 case HCI_EV_PHY_LINK_COMPLETE:
4793 hci_phy_link_complete_evt(hdev, skb);
4794 break;
4795
4796 case HCI_EV_LOGICAL_LINK_COMPLETE:
4797 hci_loglink_complete_evt(hdev, skb);
4798 break;
4799
4800 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4801 hci_disconn_loglink_complete_evt(hdev, skb);
4802 break;
4803
4804 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4805 hci_disconn_phylink_complete_evt(hdev, skb);
4806 break;
4807
4808 case HCI_EV_NUM_COMP_BLOCKS:
4809 hci_num_comp_blocks_evt(hdev, skb);
4810 break;
4811
4812 default:
4813 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4814 break;
4815 }
4816
4817 kfree_skb(skb);
4818 hdev->stat.evt_rx++;
4819 }
This page took 0.125167 seconds and 6 git commands to generate.