Bluetooth: Store Bluetooth address from controller setup
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35 #include "smp.h"
36
37 /* Handle HCI Event packets */
38
39 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40 {
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn) {
105 if (rp->role)
106 clear_bit(HCI_CONN_MASTER, &conn->flags);
107 else
108 set_bit(HCI_CONN_MASTER, &conn->flags);
109 }
110
111 hci_dev_unlock(hdev);
112 }
113
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
118
119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120
121 if (rp->status)
122 return;
123
124 hci_dev_lock(hdev);
125
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 if (conn)
128 conn->link_policy = __le16_to_cpu(rp->policy);
129
130 hci_dev_unlock(hdev);
131 }
132
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
137 void *sent;
138
139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140
141 if (rp->status)
142 return;
143
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 if (!sent)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = get_unaligned_le16(sent + 2);
153
154 hci_dev_unlock(hdev);
155 }
156
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
159 {
160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164 if (rp->status)
165 return;
166
167 hdev->link_policy = __le16_to_cpu(rp->policy);
168 }
169
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177
178 if (status)
179 return;
180
181 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
182 if (!sent)
183 return;
184
185 hdev->link_policy = get_unaligned_le16(sent);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 /* Reset all non-persistent flags */
197 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
198
199 hdev->discovery.state = DISCOVERY_STOPPED;
200 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
201 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
202
203 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
204 hdev->adv_data_len = 0;
205
206 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
207 hdev->scan_rsp_data_len = 0;
208
209 hdev->le_scan_type = LE_SCAN_PASSIVE;
210
211 hdev->ssp_debug_mode = 0;
212 }
213
214 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
215 {
216 __u8 status = *((__u8 *) skb->data);
217 void *sent;
218
219 BT_DBG("%s status 0x%2.2x", hdev->name, status);
220
221 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
222 if (!sent)
223 return;
224
225 hci_dev_lock(hdev);
226
227 if (test_bit(HCI_MGMT, &hdev->dev_flags))
228 mgmt_set_local_name_complete(hdev, sent, status);
229 else if (!status)
230 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
231
232 hci_dev_unlock(hdev);
233 }
234
235 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
236 {
237 struct hci_rp_read_local_name *rp = (void *) skb->data;
238
239 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
240
241 if (rp->status)
242 return;
243
244 if (test_bit(HCI_SETUP, &hdev->dev_flags))
245 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
246 }
247
248 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
249 {
250 __u8 status = *((__u8 *) skb->data);
251 void *sent;
252
253 BT_DBG("%s status 0x%2.2x", hdev->name, status);
254
255 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
256 if (!sent)
257 return;
258
259 if (!status) {
260 __u8 param = *((__u8 *) sent);
261
262 if (param == AUTH_ENABLED)
263 set_bit(HCI_AUTH, &hdev->flags);
264 else
265 clear_bit(HCI_AUTH, &hdev->flags);
266 }
267
268 if (test_bit(HCI_MGMT, &hdev->dev_flags))
269 mgmt_auth_enable_complete(hdev, status);
270 }
271
272 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
273 {
274 __u8 status = *((__u8 *) skb->data);
275 __u8 param;
276 void *sent;
277
278 BT_DBG("%s status 0x%2.2x", hdev->name, status);
279
280 if (status)
281 return;
282
283 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
284 if (!sent)
285 return;
286
287 param = *((__u8 *) sent);
288
289 if (param)
290 set_bit(HCI_ENCRYPT, &hdev->flags);
291 else
292 clear_bit(HCI_ENCRYPT, &hdev->flags);
293 }
294
295 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
296 {
297 __u8 status = *((__u8 *) skb->data);
298 __u8 param;
299 int old_pscan, old_iscan;
300 void *sent;
301
302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
303
304 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
305 if (!sent)
306 return;
307
308 param = *((__u8 *) sent);
309
310 hci_dev_lock(hdev);
311
312 if (status) {
313 mgmt_write_scan_failed(hdev, param, status);
314 hdev->discov_timeout = 0;
315 goto done;
316 }
317
318 /* We need to ensure that we set this back on if someone changed
319 * the scan mode through a raw HCI socket.
320 */
321 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
322
323 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
324 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
325
326 if (param & SCAN_INQUIRY) {
327 set_bit(HCI_ISCAN, &hdev->flags);
328 if (!old_iscan)
329 mgmt_discoverable(hdev, 1);
330 } else if (old_iscan)
331 mgmt_discoverable(hdev, 0);
332
333 if (param & SCAN_PAGE) {
334 set_bit(HCI_PSCAN, &hdev->flags);
335 if (!old_pscan)
336 mgmt_connectable(hdev, 1);
337 } else if (old_pscan)
338 mgmt_connectable(hdev, 0);
339
340 done:
341 hci_dev_unlock(hdev);
342 }
343
344 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
345 {
346 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
347
348 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
349
350 if (rp->status)
351 return;
352
353 memcpy(hdev->dev_class, rp->dev_class, 3);
354
355 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
356 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
357 }
358
359 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
360 {
361 __u8 status = *((__u8 *) skb->data);
362 void *sent;
363
364 BT_DBG("%s status 0x%2.2x", hdev->name, status);
365
366 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
367 if (!sent)
368 return;
369
370 hci_dev_lock(hdev);
371
372 if (status == 0)
373 memcpy(hdev->dev_class, sent, 3);
374
375 if (test_bit(HCI_MGMT, &hdev->dev_flags))
376 mgmt_set_class_of_dev_complete(hdev, sent, status);
377
378 hci_dev_unlock(hdev);
379 }
380
381 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
382 {
383 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
384 __u16 setting;
385
386 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
387
388 if (rp->status)
389 return;
390
391 setting = __le16_to_cpu(rp->voice_setting);
392
393 if (hdev->voice_setting == setting)
394 return;
395
396 hdev->voice_setting = setting;
397
398 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
399
400 if (hdev->notify)
401 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
402 }
403
404 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
405 struct sk_buff *skb)
406 {
407 __u8 status = *((__u8 *) skb->data);
408 __u16 setting;
409 void *sent;
410
411 BT_DBG("%s status 0x%2.2x", hdev->name, status);
412
413 if (status)
414 return;
415
416 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
417 if (!sent)
418 return;
419
420 setting = get_unaligned_le16(sent);
421
422 if (hdev->voice_setting == setting)
423 return;
424
425 hdev->voice_setting = setting;
426
427 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
428
429 if (hdev->notify)
430 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
431 }
432
433 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
434 struct sk_buff *skb)
435 {
436 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
437
438 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
439
440 if (rp->status)
441 return;
442
443 hdev->num_iac = rp->num_iac;
444
445 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
446 }
447
448 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
449 {
450 __u8 status = *((__u8 *) skb->data);
451 struct hci_cp_write_ssp_mode *sent;
452
453 BT_DBG("%s status 0x%2.2x", hdev->name, status);
454
455 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
456 if (!sent)
457 return;
458
459 if (!status) {
460 if (sent->mode)
461 hdev->features[1][0] |= LMP_HOST_SSP;
462 else
463 hdev->features[1][0] &= ~LMP_HOST_SSP;
464 }
465
466 if (test_bit(HCI_MGMT, &hdev->dev_flags))
467 mgmt_ssp_enable_complete(hdev, sent->mode, status);
468 else if (!status) {
469 if (sent->mode)
470 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
471 else
472 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
473 }
474 }
475
476 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
477 {
478 u8 status = *((u8 *) skb->data);
479 struct hci_cp_write_sc_support *sent;
480
481 BT_DBG("%s status 0x%2.2x", hdev->name, status);
482
483 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
484 if (!sent)
485 return;
486
487 if (!status) {
488 if (sent->support)
489 hdev->features[1][0] |= LMP_HOST_SC;
490 else
491 hdev->features[1][0] &= ~LMP_HOST_SC;
492 }
493
494 if (test_bit(HCI_MGMT, &hdev->dev_flags))
495 mgmt_sc_enable_complete(hdev, sent->support, status);
496 else if (!status) {
497 if (sent->support)
498 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
499 else
500 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
501 }
502 }
503
504 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
505 {
506 struct hci_rp_read_local_version *rp = (void *) skb->data;
507
508 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
509
510 if (rp->status)
511 return;
512
513 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
514 hdev->hci_ver = rp->hci_ver;
515 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
516 hdev->lmp_ver = rp->lmp_ver;
517 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
518 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
519 }
520 }
521
522 static void hci_cc_read_local_commands(struct hci_dev *hdev,
523 struct sk_buff *skb)
524 {
525 struct hci_rp_read_local_commands *rp = (void *) skb->data;
526
527 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
528
529 if (rp->status)
530 return;
531
532 if (test_bit(HCI_SETUP, &hdev->dev_flags))
533 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
534 }
535
536 static void hci_cc_read_local_features(struct hci_dev *hdev,
537 struct sk_buff *skb)
538 {
539 struct hci_rp_read_local_features *rp = (void *) skb->data;
540
541 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
542
543 if (rp->status)
544 return;
545
546 memcpy(hdev->features, rp->features, 8);
547
548 /* Adjust default settings according to features
549 * supported by device. */
550
551 if (hdev->features[0][0] & LMP_3SLOT)
552 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
553
554 if (hdev->features[0][0] & LMP_5SLOT)
555 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
556
557 if (hdev->features[0][1] & LMP_HV2) {
558 hdev->pkt_type |= (HCI_HV2);
559 hdev->esco_type |= (ESCO_HV2);
560 }
561
562 if (hdev->features[0][1] & LMP_HV3) {
563 hdev->pkt_type |= (HCI_HV3);
564 hdev->esco_type |= (ESCO_HV3);
565 }
566
567 if (lmp_esco_capable(hdev))
568 hdev->esco_type |= (ESCO_EV3);
569
570 if (hdev->features[0][4] & LMP_EV4)
571 hdev->esco_type |= (ESCO_EV4);
572
573 if (hdev->features[0][4] & LMP_EV5)
574 hdev->esco_type |= (ESCO_EV5);
575
576 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
577 hdev->esco_type |= (ESCO_2EV3);
578
579 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
580 hdev->esco_type |= (ESCO_3EV3);
581
582 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
583 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
584 }
585
586 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
587 struct sk_buff *skb)
588 {
589 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
590
591 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592
593 if (rp->status)
594 return;
595
596 if (hdev->max_page < rp->max_page)
597 hdev->max_page = rp->max_page;
598
599 if (rp->page < HCI_MAX_PAGES)
600 memcpy(hdev->features[rp->page], rp->features, 8);
601 }
602
603 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
604 struct sk_buff *skb)
605 {
606 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
607
608 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
609
610 if (rp->status)
611 return;
612
613 hdev->flow_ctl_mode = rp->mode;
614 }
615
616 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
617 {
618 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
619
620 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
621
622 if (rp->status)
623 return;
624
625 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
626 hdev->sco_mtu = rp->sco_mtu;
627 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
628 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
629
630 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
631 hdev->sco_mtu = 64;
632 hdev->sco_pkts = 8;
633 }
634
635 hdev->acl_cnt = hdev->acl_pkts;
636 hdev->sco_cnt = hdev->sco_pkts;
637
638 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
639 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
640 }
641
642 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
643 {
644 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
645
646 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
647
648 if (rp->status)
649 return;
650
651 if (test_bit(HCI_INIT, &hdev->flags))
652 bacpy(&hdev->bdaddr, &rp->bdaddr);
653
654 if (test_bit(HCI_SETUP, &hdev->dev_flags))
655 bacpy(&hdev->setup_addr, &rp->bdaddr);
656 }
657
658 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
659 struct sk_buff *skb)
660 {
661 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
662
663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
664
665 if (rp->status)
666 return;
667
668 if (test_bit(HCI_INIT, &hdev->flags)) {
669 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
670 hdev->page_scan_window = __le16_to_cpu(rp->window);
671 }
672 }
673
674 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
675 struct sk_buff *skb)
676 {
677 u8 status = *((u8 *) skb->data);
678 struct hci_cp_write_page_scan_activity *sent;
679
680 BT_DBG("%s status 0x%2.2x", hdev->name, status);
681
682 if (status)
683 return;
684
685 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
686 if (!sent)
687 return;
688
689 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
690 hdev->page_scan_window = __le16_to_cpu(sent->window);
691 }
692
693 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
694 struct sk_buff *skb)
695 {
696 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
697
698 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
699
700 if (rp->status)
701 return;
702
703 if (test_bit(HCI_INIT, &hdev->flags))
704 hdev->page_scan_type = rp->type;
705 }
706
707 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
708 struct sk_buff *skb)
709 {
710 u8 status = *((u8 *) skb->data);
711 u8 *type;
712
713 BT_DBG("%s status 0x%2.2x", hdev->name, status);
714
715 if (status)
716 return;
717
718 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
719 if (type)
720 hdev->page_scan_type = *type;
721 }
722
723 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
724 struct sk_buff *skb)
725 {
726 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
727
728 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
729
730 if (rp->status)
731 return;
732
733 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
734 hdev->block_len = __le16_to_cpu(rp->block_len);
735 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
736
737 hdev->block_cnt = hdev->num_blocks;
738
739 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
740 hdev->block_cnt, hdev->block_len);
741 }
742
743 static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
744 {
745 struct hci_rp_read_clock *rp = (void *) skb->data;
746 struct hci_cp_read_clock *cp;
747 struct hci_conn *conn;
748
749 BT_DBG("%s", hdev->name);
750
751 if (skb->len < sizeof(*rp))
752 return;
753
754 if (rp->status)
755 return;
756
757 hci_dev_lock(hdev);
758
759 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
760 if (!cp)
761 goto unlock;
762
763 if (cp->which == 0x00) {
764 hdev->clock = le32_to_cpu(rp->clock);
765 goto unlock;
766 }
767
768 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
769 if (conn) {
770 conn->clock = le32_to_cpu(rp->clock);
771 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
772 }
773
774 unlock:
775 hci_dev_unlock(hdev);
776 }
777
778 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
779 struct sk_buff *skb)
780 {
781 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
782
783 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
784
785 if (rp->status)
786 goto a2mp_rsp;
787
788 hdev->amp_status = rp->amp_status;
789 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
790 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
791 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
792 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
793 hdev->amp_type = rp->amp_type;
794 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
795 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
796 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
797 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
798
799 a2mp_rsp:
800 a2mp_send_getinfo_rsp(hdev);
801 }
802
803 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
804 struct sk_buff *skb)
805 {
806 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
807 struct amp_assoc *assoc = &hdev->loc_assoc;
808 size_t rem_len, frag_len;
809
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
811
812 if (rp->status)
813 goto a2mp_rsp;
814
815 frag_len = skb->len - sizeof(*rp);
816 rem_len = __le16_to_cpu(rp->rem_len);
817
818 if (rem_len > frag_len) {
819 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
820
821 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
822 assoc->offset += frag_len;
823
824 /* Read other fragments */
825 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
826
827 return;
828 }
829
830 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
831 assoc->len = assoc->offset + rem_len;
832 assoc->offset = 0;
833
834 a2mp_rsp:
835 /* Send A2MP Rsp when all fragments are received */
836 a2mp_send_getampassoc_rsp(hdev, rp->status);
837 a2mp_send_create_phy_link_req(hdev, rp->status);
838 }
839
840 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
841 struct sk_buff *skb)
842 {
843 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
844
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846
847 if (rp->status)
848 return;
849
850 hdev->inq_tx_power = rp->tx_power;
851 }
852
853 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
854 {
855 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
856 struct hci_cp_pin_code_reply *cp;
857 struct hci_conn *conn;
858
859 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
860
861 hci_dev_lock(hdev);
862
863 if (test_bit(HCI_MGMT, &hdev->dev_flags))
864 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
865
866 if (rp->status)
867 goto unlock;
868
869 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
870 if (!cp)
871 goto unlock;
872
873 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
874 if (conn)
875 conn->pin_length = cp->pin_len;
876
877 unlock:
878 hci_dev_unlock(hdev);
879 }
880
881 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
882 {
883 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
884
885 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
886
887 hci_dev_lock(hdev);
888
889 if (test_bit(HCI_MGMT, &hdev->dev_flags))
890 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
891 rp->status);
892
893 hci_dev_unlock(hdev);
894 }
895
896 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
897 struct sk_buff *skb)
898 {
899 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
900
901 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
902
903 if (rp->status)
904 return;
905
906 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
907 hdev->le_pkts = rp->le_max_pkt;
908
909 hdev->le_cnt = hdev->le_pkts;
910
911 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
912 }
913
914 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
915 struct sk_buff *skb)
916 {
917 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
918
919 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
920
921 if (rp->status)
922 return;
923
924 memcpy(hdev->le_features, rp->features, 8);
925 }
926
927 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
928 struct sk_buff *skb)
929 {
930 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
931
932 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
933
934 if (rp->status)
935 return;
936
937 hdev->adv_tx_power = rp->tx_power;
938 }
939
940 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
941 {
942 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
943
944 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
945
946 hci_dev_lock(hdev);
947
948 if (test_bit(HCI_MGMT, &hdev->dev_flags))
949 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
950 rp->status);
951
952 hci_dev_unlock(hdev);
953 }
954
955 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
956 struct sk_buff *skb)
957 {
958 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
959
960 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
961
962 hci_dev_lock(hdev);
963
964 if (test_bit(HCI_MGMT, &hdev->dev_flags))
965 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
966 ACL_LINK, 0, rp->status);
967
968 hci_dev_unlock(hdev);
969 }
970
971 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
972 {
973 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
974
975 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
976
977 hci_dev_lock(hdev);
978
979 if (test_bit(HCI_MGMT, &hdev->dev_flags))
980 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
981 0, rp->status);
982
983 hci_dev_unlock(hdev);
984 }
985
986 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
987 struct sk_buff *skb)
988 {
989 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
990
991 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
992
993 hci_dev_lock(hdev);
994
995 if (test_bit(HCI_MGMT, &hdev->dev_flags))
996 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
997 ACL_LINK, 0, rp->status);
998
999 hci_dev_unlock(hdev);
1000 }
1001
1002 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
1003 struct sk_buff *skb)
1004 {
1005 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1006
1007 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1008
1009 hci_dev_lock(hdev);
1010 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
1011 NULL, NULL, rp->status);
1012 hci_dev_unlock(hdev);
1013 }
1014
1015 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
1016 struct sk_buff *skb)
1017 {
1018 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1019
1020 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1021
1022 hci_dev_lock(hdev);
1023 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1024 rp->hash256, rp->randomizer256,
1025 rp->status);
1026 hci_dev_unlock(hdev);
1027 }
1028
1029
1030 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1031 {
1032 __u8 status = *((__u8 *) skb->data);
1033 bdaddr_t *sent;
1034
1035 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1036
1037 if (status)
1038 return;
1039
1040 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1041 if (!sent)
1042 return;
1043
1044 hci_dev_lock(hdev);
1045
1046 bacpy(&hdev->random_addr, sent);
1047
1048 hci_dev_unlock(hdev);
1049 }
1050
1051 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1052 {
1053 __u8 *sent, status = *((__u8 *) skb->data);
1054
1055 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1056
1057 if (status)
1058 return;
1059
1060 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1061 if (!sent)
1062 return;
1063
1064 hci_dev_lock(hdev);
1065
1066 /* If we're doing connection initation as peripheral. Set a
1067 * timeout in case something goes wrong.
1068 */
1069 if (*sent) {
1070 struct hci_conn *conn;
1071
1072 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1073 if (conn)
1074 queue_delayed_work(hdev->workqueue,
1075 &conn->le_conn_timeout,
1076 HCI_LE_CONN_TIMEOUT);
1077 }
1078
1079 mgmt_advertising(hdev, *sent);
1080
1081 hci_dev_unlock(hdev);
1082 }
1083
1084 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1085 {
1086 struct hci_cp_le_set_scan_param *cp;
1087 __u8 status = *((__u8 *) skb->data);
1088
1089 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1090
1091 if (status)
1092 return;
1093
1094 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1095 if (!cp)
1096 return;
1097
1098 hci_dev_lock(hdev);
1099
1100 hdev->le_scan_type = cp->type;
1101
1102 hci_dev_unlock(hdev);
1103 }
1104
1105 static bool has_pending_adv_report(struct hci_dev *hdev)
1106 {
1107 struct discovery_state *d = &hdev->discovery;
1108
1109 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1110 }
1111
1112 static void clear_pending_adv_report(struct hci_dev *hdev)
1113 {
1114 struct discovery_state *d = &hdev->discovery;
1115
1116 bacpy(&d->last_adv_addr, BDADDR_ANY);
1117 d->last_adv_data_len = 0;
1118 }
1119
1120 static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1121 u8 bdaddr_type, s8 rssi, u32 flags,
1122 u8 *data, u8 len)
1123 {
1124 struct discovery_state *d = &hdev->discovery;
1125
1126 bacpy(&d->last_adv_addr, bdaddr);
1127 d->last_adv_addr_type = bdaddr_type;
1128 d->last_adv_rssi = rssi;
1129 d->last_adv_flags = flags;
1130 memcpy(d->last_adv_data, data, len);
1131 d->last_adv_data_len = len;
1132 }
1133
1134 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1135 struct sk_buff *skb)
1136 {
1137 struct hci_cp_le_set_scan_enable *cp;
1138 __u8 status = *((__u8 *) skb->data);
1139
1140 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1141
1142 if (status)
1143 return;
1144
1145 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1146 if (!cp)
1147 return;
1148
1149 switch (cp->enable) {
1150 case LE_SCAN_ENABLE:
1151 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1152 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1153 clear_pending_adv_report(hdev);
1154 break;
1155
1156 case LE_SCAN_DISABLE:
1157 /* We do this here instead of when setting DISCOVERY_STOPPED
1158 * since the latter would potentially require waiting for
1159 * inquiry to stop too.
1160 */
1161 if (has_pending_adv_report(hdev)) {
1162 struct discovery_state *d = &hdev->discovery;
1163
1164 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1165 d->last_adv_addr_type, NULL,
1166 d->last_adv_rssi, d->last_adv_flags,
1167 d->last_adv_data,
1168 d->last_adv_data_len, NULL, 0);
1169 }
1170
1171 /* Cancel this timer so that we don't try to disable scanning
1172 * when it's already disabled.
1173 */
1174 cancel_delayed_work(&hdev->le_scan_disable);
1175
1176 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1177 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1178 * interrupted scanning due to a connect request. Mark
1179 * therefore discovery as stopped.
1180 */
1181 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1182 &hdev->dev_flags))
1183 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1184 break;
1185
1186 default:
1187 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1188 break;
1189 }
1190 }
1191
1192 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1193 struct sk_buff *skb)
1194 {
1195 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1196
1197 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1198
1199 if (rp->status)
1200 return;
1201
1202 hdev->le_white_list_size = rp->size;
1203 }
1204
1205 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1206 struct sk_buff *skb)
1207 {
1208 __u8 status = *((__u8 *) skb->data);
1209
1210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1211
1212 if (status)
1213 return;
1214
1215 hci_white_list_clear(hdev);
1216 }
1217
1218 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1219 struct sk_buff *skb)
1220 {
1221 struct hci_cp_le_add_to_white_list *sent;
1222 __u8 status = *((__u8 *) skb->data);
1223
1224 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1225
1226 if (status)
1227 return;
1228
1229 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1230 if (!sent)
1231 return;
1232
1233 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1234 }
1235
1236 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1237 struct sk_buff *skb)
1238 {
1239 struct hci_cp_le_del_from_white_list *sent;
1240 __u8 status = *((__u8 *) skb->data);
1241
1242 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1243
1244 if (status)
1245 return;
1246
1247 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1248 if (!sent)
1249 return;
1250
1251 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1252 }
1253
1254 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1255 struct sk_buff *skb)
1256 {
1257 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1258
1259 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1260
1261 if (rp->status)
1262 return;
1263
1264 memcpy(hdev->le_states, rp->le_states, 8);
1265 }
1266
1267 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1268 struct sk_buff *skb)
1269 {
1270 struct hci_cp_write_le_host_supported *sent;
1271 __u8 status = *((__u8 *) skb->data);
1272
1273 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1274
1275 if (status)
1276 return;
1277
1278 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1279 if (!sent)
1280 return;
1281
1282 if (sent->le) {
1283 hdev->features[1][0] |= LMP_HOST_LE;
1284 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1285 } else {
1286 hdev->features[1][0] &= ~LMP_HOST_LE;
1287 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1288 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1289 }
1290
1291 if (sent->simul)
1292 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1293 else
1294 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1295 }
1296
1297 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1298 {
1299 struct hci_cp_le_set_adv_param *cp;
1300 u8 status = *((u8 *) skb->data);
1301
1302 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1303
1304 if (status)
1305 return;
1306
1307 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1308 if (!cp)
1309 return;
1310
1311 hci_dev_lock(hdev);
1312 hdev->adv_addr_type = cp->own_address_type;
1313 hci_dev_unlock(hdev);
1314 }
1315
1316 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1317 struct sk_buff *skb)
1318 {
1319 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1320
1321 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1322 hdev->name, rp->status, rp->phy_handle);
1323
1324 if (rp->status)
1325 return;
1326
1327 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1328 }
1329
1330 static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1331 {
1332 struct hci_rp_read_rssi *rp = (void *) skb->data;
1333 struct hci_conn *conn;
1334
1335 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1336
1337 if (rp->status)
1338 return;
1339
1340 hci_dev_lock(hdev);
1341
1342 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1343 if (conn)
1344 conn->rssi = rp->rssi;
1345
1346 hci_dev_unlock(hdev);
1347 }
1348
1349 static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1350 {
1351 struct hci_cp_read_tx_power *sent;
1352 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1353 struct hci_conn *conn;
1354
1355 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1356
1357 if (rp->status)
1358 return;
1359
1360 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1361 if (!sent)
1362 return;
1363
1364 hci_dev_lock(hdev);
1365
1366 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1367 if (!conn)
1368 goto unlock;
1369
1370 switch (sent->type) {
1371 case 0x00:
1372 conn->tx_power = rp->tx_power;
1373 break;
1374 case 0x01:
1375 conn->max_tx_power = rp->tx_power;
1376 break;
1377 }
1378
1379 unlock:
1380 hci_dev_unlock(hdev);
1381 }
1382
1383 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1384 {
1385 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1386
1387 if (status) {
1388 hci_conn_check_pending(hdev);
1389 return;
1390 }
1391
1392 set_bit(HCI_INQUIRY, &hdev->flags);
1393 }
1394
1395 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1396 {
1397 struct hci_cp_create_conn *cp;
1398 struct hci_conn *conn;
1399
1400 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1401
1402 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1403 if (!cp)
1404 return;
1405
1406 hci_dev_lock(hdev);
1407
1408 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1409
1410 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1411
1412 if (status) {
1413 if (conn && conn->state == BT_CONNECT) {
1414 if (status != 0x0c || conn->attempt > 2) {
1415 conn->state = BT_CLOSED;
1416 hci_proto_connect_cfm(conn, status);
1417 hci_conn_del(conn);
1418 } else
1419 conn->state = BT_CONNECT2;
1420 }
1421 } else {
1422 if (!conn) {
1423 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1424 if (conn) {
1425 conn->out = true;
1426 set_bit(HCI_CONN_MASTER, &conn->flags);
1427 } else
1428 BT_ERR("No memory for new connection");
1429 }
1430 }
1431
1432 hci_dev_unlock(hdev);
1433 }
1434
1435 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1436 {
1437 struct hci_cp_add_sco *cp;
1438 struct hci_conn *acl, *sco;
1439 __u16 handle;
1440
1441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1442
1443 if (!status)
1444 return;
1445
1446 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1447 if (!cp)
1448 return;
1449
1450 handle = __le16_to_cpu(cp->handle);
1451
1452 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1453
1454 hci_dev_lock(hdev);
1455
1456 acl = hci_conn_hash_lookup_handle(hdev, handle);
1457 if (acl) {
1458 sco = acl->link;
1459 if (sco) {
1460 sco->state = BT_CLOSED;
1461
1462 hci_proto_connect_cfm(sco, status);
1463 hci_conn_del(sco);
1464 }
1465 }
1466
1467 hci_dev_unlock(hdev);
1468 }
1469
1470 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1471 {
1472 struct hci_cp_auth_requested *cp;
1473 struct hci_conn *conn;
1474
1475 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1476
1477 if (!status)
1478 return;
1479
1480 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1481 if (!cp)
1482 return;
1483
1484 hci_dev_lock(hdev);
1485
1486 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1487 if (conn) {
1488 if (conn->state == BT_CONFIG) {
1489 hci_proto_connect_cfm(conn, status);
1490 hci_conn_drop(conn);
1491 }
1492 }
1493
1494 hci_dev_unlock(hdev);
1495 }
1496
1497 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1498 {
1499 struct hci_cp_set_conn_encrypt *cp;
1500 struct hci_conn *conn;
1501
1502 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1503
1504 if (!status)
1505 return;
1506
1507 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1508 if (!cp)
1509 return;
1510
1511 hci_dev_lock(hdev);
1512
1513 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1514 if (conn) {
1515 if (conn->state == BT_CONFIG) {
1516 hci_proto_connect_cfm(conn, status);
1517 hci_conn_drop(conn);
1518 }
1519 }
1520
1521 hci_dev_unlock(hdev);
1522 }
1523
1524 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1525 struct hci_conn *conn)
1526 {
1527 if (conn->state != BT_CONFIG || !conn->out)
1528 return 0;
1529
1530 if (conn->pending_sec_level == BT_SECURITY_SDP)
1531 return 0;
1532
1533 /* Only request authentication for SSP connections or non-SSP
1534 * devices with sec_level MEDIUM or HIGH or if MITM protection
1535 * is requested.
1536 */
1537 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1538 conn->pending_sec_level != BT_SECURITY_FIPS &&
1539 conn->pending_sec_level != BT_SECURITY_HIGH &&
1540 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1541 return 0;
1542
1543 return 1;
1544 }
1545
1546 static int hci_resolve_name(struct hci_dev *hdev,
1547 struct inquiry_entry *e)
1548 {
1549 struct hci_cp_remote_name_req cp;
1550
1551 memset(&cp, 0, sizeof(cp));
1552
1553 bacpy(&cp.bdaddr, &e->data.bdaddr);
1554 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1555 cp.pscan_mode = e->data.pscan_mode;
1556 cp.clock_offset = e->data.clock_offset;
1557
1558 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1559 }
1560
1561 static bool hci_resolve_next_name(struct hci_dev *hdev)
1562 {
1563 struct discovery_state *discov = &hdev->discovery;
1564 struct inquiry_entry *e;
1565
1566 if (list_empty(&discov->resolve))
1567 return false;
1568
1569 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1570 if (!e)
1571 return false;
1572
1573 if (hci_resolve_name(hdev, e) == 0) {
1574 e->name_state = NAME_PENDING;
1575 return true;
1576 }
1577
1578 return false;
1579 }
1580
1581 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1582 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1583 {
1584 struct discovery_state *discov = &hdev->discovery;
1585 struct inquiry_entry *e;
1586
1587 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1588 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1589 name_len, conn->dev_class);
1590
1591 if (discov->state == DISCOVERY_STOPPED)
1592 return;
1593
1594 if (discov->state == DISCOVERY_STOPPING)
1595 goto discov_complete;
1596
1597 if (discov->state != DISCOVERY_RESOLVING)
1598 return;
1599
1600 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1601 /* If the device was not found in a list of found devices names of which
1602 * are pending. there is no need to continue resolving a next name as it
1603 * will be done upon receiving another Remote Name Request Complete
1604 * Event */
1605 if (!e)
1606 return;
1607
1608 list_del(&e->list);
1609 if (name) {
1610 e->name_state = NAME_KNOWN;
1611 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1612 e->data.rssi, name, name_len);
1613 } else {
1614 e->name_state = NAME_NOT_KNOWN;
1615 }
1616
1617 if (hci_resolve_next_name(hdev))
1618 return;
1619
1620 discov_complete:
1621 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1622 }
1623
1624 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1625 {
1626 struct hci_cp_remote_name_req *cp;
1627 struct hci_conn *conn;
1628
1629 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1630
1631 /* If successful wait for the name req complete event before
1632 * checking for the need to do authentication */
1633 if (!status)
1634 return;
1635
1636 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1637 if (!cp)
1638 return;
1639
1640 hci_dev_lock(hdev);
1641
1642 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1643
1644 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1645 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1646
1647 if (!conn)
1648 goto unlock;
1649
1650 if (!hci_outgoing_auth_needed(hdev, conn))
1651 goto unlock;
1652
1653 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1654 struct hci_cp_auth_requested auth_cp;
1655
1656 auth_cp.handle = __cpu_to_le16(conn->handle);
1657 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1658 sizeof(auth_cp), &auth_cp);
1659 }
1660
1661 unlock:
1662 hci_dev_unlock(hdev);
1663 }
1664
1665 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1666 {
1667 struct hci_cp_read_remote_features *cp;
1668 struct hci_conn *conn;
1669
1670 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1671
1672 if (!status)
1673 return;
1674
1675 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1676 if (!cp)
1677 return;
1678
1679 hci_dev_lock(hdev);
1680
1681 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1682 if (conn) {
1683 if (conn->state == BT_CONFIG) {
1684 hci_proto_connect_cfm(conn, status);
1685 hci_conn_drop(conn);
1686 }
1687 }
1688
1689 hci_dev_unlock(hdev);
1690 }
1691
1692 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1693 {
1694 struct hci_cp_read_remote_ext_features *cp;
1695 struct hci_conn *conn;
1696
1697 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1698
1699 if (!status)
1700 return;
1701
1702 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1703 if (!cp)
1704 return;
1705
1706 hci_dev_lock(hdev);
1707
1708 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1709 if (conn) {
1710 if (conn->state == BT_CONFIG) {
1711 hci_proto_connect_cfm(conn, status);
1712 hci_conn_drop(conn);
1713 }
1714 }
1715
1716 hci_dev_unlock(hdev);
1717 }
1718
1719 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1720 {
1721 struct hci_cp_setup_sync_conn *cp;
1722 struct hci_conn *acl, *sco;
1723 __u16 handle;
1724
1725 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1726
1727 if (!status)
1728 return;
1729
1730 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1731 if (!cp)
1732 return;
1733
1734 handle = __le16_to_cpu(cp->handle);
1735
1736 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1737
1738 hci_dev_lock(hdev);
1739
1740 acl = hci_conn_hash_lookup_handle(hdev, handle);
1741 if (acl) {
1742 sco = acl->link;
1743 if (sco) {
1744 sco->state = BT_CLOSED;
1745
1746 hci_proto_connect_cfm(sco, status);
1747 hci_conn_del(sco);
1748 }
1749 }
1750
1751 hci_dev_unlock(hdev);
1752 }
1753
1754 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1755 {
1756 struct hci_cp_sniff_mode *cp;
1757 struct hci_conn *conn;
1758
1759 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1760
1761 if (!status)
1762 return;
1763
1764 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1765 if (!cp)
1766 return;
1767
1768 hci_dev_lock(hdev);
1769
1770 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1771 if (conn) {
1772 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1773
1774 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1775 hci_sco_setup(conn, status);
1776 }
1777
1778 hci_dev_unlock(hdev);
1779 }
1780
1781 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1782 {
1783 struct hci_cp_exit_sniff_mode *cp;
1784 struct hci_conn *conn;
1785
1786 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1787
1788 if (!status)
1789 return;
1790
1791 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1792 if (!cp)
1793 return;
1794
1795 hci_dev_lock(hdev);
1796
1797 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1798 if (conn) {
1799 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1800
1801 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1802 hci_sco_setup(conn, status);
1803 }
1804
1805 hci_dev_unlock(hdev);
1806 }
1807
1808 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1809 {
1810 struct hci_cp_disconnect *cp;
1811 struct hci_conn *conn;
1812
1813 if (!status)
1814 return;
1815
1816 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1817 if (!cp)
1818 return;
1819
1820 hci_dev_lock(hdev);
1821
1822 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1823 if (conn)
1824 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1825 conn->dst_type, status);
1826
1827 hci_dev_unlock(hdev);
1828 }
1829
1830 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1831 {
1832 struct hci_cp_create_phy_link *cp;
1833
1834 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1835
1836 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1837 if (!cp)
1838 return;
1839
1840 hci_dev_lock(hdev);
1841
1842 if (status) {
1843 struct hci_conn *hcon;
1844
1845 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1846 if (hcon)
1847 hci_conn_del(hcon);
1848 } else {
1849 amp_write_remote_assoc(hdev, cp->phy_handle);
1850 }
1851
1852 hci_dev_unlock(hdev);
1853 }
1854
1855 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1856 {
1857 struct hci_cp_accept_phy_link *cp;
1858
1859 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1860
1861 if (status)
1862 return;
1863
1864 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1865 if (!cp)
1866 return;
1867
1868 amp_write_remote_assoc(hdev, cp->phy_handle);
1869 }
1870
1871 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1872 {
1873 struct hci_cp_le_create_conn *cp;
1874 struct hci_conn *conn;
1875
1876 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1877
1878 /* All connection failure handling is taken care of by the
1879 * hci_le_conn_failed function which is triggered by the HCI
1880 * request completion callbacks used for connecting.
1881 */
1882 if (status)
1883 return;
1884
1885 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1886 if (!cp)
1887 return;
1888
1889 hci_dev_lock(hdev);
1890
1891 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1892 if (!conn)
1893 goto unlock;
1894
1895 /* Store the initiator and responder address information which
1896 * is needed for SMP. These values will not change during the
1897 * lifetime of the connection.
1898 */
1899 conn->init_addr_type = cp->own_address_type;
1900 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1901 bacpy(&conn->init_addr, &hdev->random_addr);
1902 else
1903 bacpy(&conn->init_addr, &hdev->bdaddr);
1904
1905 conn->resp_addr_type = cp->peer_addr_type;
1906 bacpy(&conn->resp_addr, &cp->peer_addr);
1907
1908 /* We don't want the connection attempt to stick around
1909 * indefinitely since LE doesn't have a page timeout concept
1910 * like BR/EDR. Set a timer for any connection that doesn't use
1911 * the white list for connecting.
1912 */
1913 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1914 queue_delayed_work(conn->hdev->workqueue,
1915 &conn->le_conn_timeout,
1916 HCI_LE_CONN_TIMEOUT);
1917
1918 unlock:
1919 hci_dev_unlock(hdev);
1920 }
1921
1922 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1923 {
1924 struct hci_cp_le_start_enc *cp;
1925 struct hci_conn *conn;
1926
1927 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1928
1929 if (!status)
1930 return;
1931
1932 hci_dev_lock(hdev);
1933
1934 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1935 if (!cp)
1936 goto unlock;
1937
1938 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1939 if (!conn)
1940 goto unlock;
1941
1942 if (conn->state != BT_CONNECTED)
1943 goto unlock;
1944
1945 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1946 hci_conn_drop(conn);
1947
1948 unlock:
1949 hci_dev_unlock(hdev);
1950 }
1951
1952 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1953 {
1954 __u8 status = *((__u8 *) skb->data);
1955 struct discovery_state *discov = &hdev->discovery;
1956 struct inquiry_entry *e;
1957
1958 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1959
1960 hci_conn_check_pending(hdev);
1961
1962 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1963 return;
1964
1965 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1966 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1967
1968 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1969 return;
1970
1971 hci_dev_lock(hdev);
1972
1973 if (discov->state != DISCOVERY_FINDING)
1974 goto unlock;
1975
1976 if (list_empty(&discov->resolve)) {
1977 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1978 goto unlock;
1979 }
1980
1981 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1982 if (e && hci_resolve_name(hdev, e) == 0) {
1983 e->name_state = NAME_PENDING;
1984 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1985 } else {
1986 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1987 }
1988
1989 unlock:
1990 hci_dev_unlock(hdev);
1991 }
1992
1993 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1994 {
1995 struct inquiry_data data;
1996 struct inquiry_info *info = (void *) (skb->data + 1);
1997 int num_rsp = *((__u8 *) skb->data);
1998
1999 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2000
2001 if (!num_rsp)
2002 return;
2003
2004 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2005 return;
2006
2007 hci_dev_lock(hdev);
2008
2009 for (; num_rsp; num_rsp--, info++) {
2010 u32 flags;
2011
2012 bacpy(&data.bdaddr, &info->bdaddr);
2013 data.pscan_rep_mode = info->pscan_rep_mode;
2014 data.pscan_period_mode = info->pscan_period_mode;
2015 data.pscan_mode = info->pscan_mode;
2016 memcpy(data.dev_class, info->dev_class, 3);
2017 data.clock_offset = info->clock_offset;
2018 data.rssi = 0x00;
2019 data.ssp_mode = 0x00;
2020
2021 flags = hci_inquiry_cache_update(hdev, &data, false);
2022
2023 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2024 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2025 }
2026
2027 hci_dev_unlock(hdev);
2028 }
2029
2030 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2031 {
2032 struct hci_ev_conn_complete *ev = (void *) skb->data;
2033 struct hci_conn *conn;
2034
2035 BT_DBG("%s", hdev->name);
2036
2037 hci_dev_lock(hdev);
2038
2039 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2040 if (!conn) {
2041 if (ev->link_type != SCO_LINK)
2042 goto unlock;
2043
2044 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2045 if (!conn)
2046 goto unlock;
2047
2048 conn->type = SCO_LINK;
2049 }
2050
2051 if (!ev->status) {
2052 conn->handle = __le16_to_cpu(ev->handle);
2053
2054 if (conn->type == ACL_LINK) {
2055 conn->state = BT_CONFIG;
2056 hci_conn_hold(conn);
2057
2058 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2059 !hci_find_link_key(hdev, &ev->bdaddr))
2060 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2061 else
2062 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2063 } else
2064 conn->state = BT_CONNECTED;
2065
2066 hci_conn_add_sysfs(conn);
2067
2068 if (test_bit(HCI_AUTH, &hdev->flags))
2069 set_bit(HCI_CONN_AUTH, &conn->flags);
2070
2071 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2072 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2073
2074 /* Get remote features */
2075 if (conn->type == ACL_LINK) {
2076 struct hci_cp_read_remote_features cp;
2077 cp.handle = ev->handle;
2078 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2079 sizeof(cp), &cp);
2080 }
2081
2082 /* Set packet type for incoming connection */
2083 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2084 struct hci_cp_change_conn_ptype cp;
2085 cp.handle = ev->handle;
2086 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2087 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2088 &cp);
2089 }
2090 } else {
2091 conn->state = BT_CLOSED;
2092 if (conn->type == ACL_LINK)
2093 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2094 conn->dst_type, ev->status);
2095 }
2096
2097 if (conn->type == ACL_LINK)
2098 hci_sco_setup(conn, ev->status);
2099
2100 if (ev->status) {
2101 hci_proto_connect_cfm(conn, ev->status);
2102 hci_conn_del(conn);
2103 } else if (ev->link_type != ACL_LINK)
2104 hci_proto_connect_cfm(conn, ev->status);
2105
2106 unlock:
2107 hci_dev_unlock(hdev);
2108
2109 hci_conn_check_pending(hdev);
2110 }
2111
2112 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2113 {
2114 struct hci_ev_conn_request *ev = (void *) skb->data;
2115 int mask = hdev->link_mode;
2116 __u8 flags = 0;
2117
2118 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2119 ev->link_type);
2120
2121 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2122 &flags);
2123
2124 if ((mask & HCI_LM_ACCEPT) &&
2125 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
2126 /* Connection accepted */
2127 struct inquiry_entry *ie;
2128 struct hci_conn *conn;
2129
2130 hci_dev_lock(hdev);
2131
2132 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2133 if (ie)
2134 memcpy(ie->data.dev_class, ev->dev_class, 3);
2135
2136 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2137 &ev->bdaddr);
2138 if (!conn) {
2139 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2140 if (!conn) {
2141 BT_ERR("No memory for new connection");
2142 hci_dev_unlock(hdev);
2143 return;
2144 }
2145 }
2146
2147 memcpy(conn->dev_class, ev->dev_class, 3);
2148
2149 hci_dev_unlock(hdev);
2150
2151 if (ev->link_type == ACL_LINK ||
2152 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2153 struct hci_cp_accept_conn_req cp;
2154 conn->state = BT_CONNECT;
2155
2156 bacpy(&cp.bdaddr, &ev->bdaddr);
2157
2158 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2159 cp.role = 0x00; /* Become master */
2160 else
2161 cp.role = 0x01; /* Remain slave */
2162
2163 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2164 &cp);
2165 } else if (!(flags & HCI_PROTO_DEFER)) {
2166 struct hci_cp_accept_sync_conn_req cp;
2167 conn->state = BT_CONNECT;
2168
2169 bacpy(&cp.bdaddr, &ev->bdaddr);
2170 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2171
2172 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2173 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2174 cp.max_latency = cpu_to_le16(0xffff);
2175 cp.content_format = cpu_to_le16(hdev->voice_setting);
2176 cp.retrans_effort = 0xff;
2177
2178 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2179 sizeof(cp), &cp);
2180 } else {
2181 conn->state = BT_CONNECT2;
2182 hci_proto_connect_cfm(conn, 0);
2183 }
2184 } else {
2185 /* Connection rejected */
2186 struct hci_cp_reject_conn_req cp;
2187
2188 bacpy(&cp.bdaddr, &ev->bdaddr);
2189 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2190 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2191 }
2192 }
2193
2194 static u8 hci_to_mgmt_reason(u8 err)
2195 {
2196 switch (err) {
2197 case HCI_ERROR_CONNECTION_TIMEOUT:
2198 return MGMT_DEV_DISCONN_TIMEOUT;
2199 case HCI_ERROR_REMOTE_USER_TERM:
2200 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2201 case HCI_ERROR_REMOTE_POWER_OFF:
2202 return MGMT_DEV_DISCONN_REMOTE;
2203 case HCI_ERROR_LOCAL_HOST_TERM:
2204 return MGMT_DEV_DISCONN_LOCAL_HOST;
2205 default:
2206 return MGMT_DEV_DISCONN_UNKNOWN;
2207 }
2208 }
2209
2210 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2211 {
2212 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2213 u8 reason = hci_to_mgmt_reason(ev->reason);
2214 struct hci_conn_params *params;
2215 struct hci_conn *conn;
2216 bool mgmt_connected;
2217 u8 type;
2218
2219 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2220
2221 hci_dev_lock(hdev);
2222
2223 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2224 if (!conn)
2225 goto unlock;
2226
2227 if (ev->status) {
2228 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2229 conn->dst_type, ev->status);
2230 goto unlock;
2231 }
2232
2233 conn->state = BT_CLOSED;
2234
2235 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2236 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2237 reason, mgmt_connected);
2238
2239 if (conn->type == ACL_LINK &&
2240 test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2241 hci_remove_link_key(hdev, &conn->dst);
2242
2243 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2244 if (params) {
2245 switch (params->auto_connect) {
2246 case HCI_AUTO_CONN_LINK_LOSS:
2247 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2248 break;
2249 /* Fall through */
2250
2251 case HCI_AUTO_CONN_ALWAYS:
2252 list_del_init(&params->action);
2253 list_add(&params->action, &hdev->pend_le_conns);
2254 hci_update_background_scan(hdev);
2255 break;
2256
2257 default:
2258 break;
2259 }
2260 }
2261
2262 type = conn->type;
2263
2264 hci_proto_disconn_cfm(conn, ev->reason);
2265 hci_conn_del(conn);
2266
2267 /* Re-enable advertising if necessary, since it might
2268 * have been disabled by the connection. From the
2269 * HCI_LE_Set_Advertise_Enable command description in
2270 * the core specification (v4.0):
2271 * "The Controller shall continue advertising until the Host
2272 * issues an LE_Set_Advertise_Enable command with
2273 * Advertising_Enable set to 0x00 (Advertising is disabled)
2274 * or until a connection is created or until the Advertising
2275 * is timed out due to Directed Advertising."
2276 */
2277 if (type == LE_LINK)
2278 mgmt_reenable_advertising(hdev);
2279
2280 unlock:
2281 hci_dev_unlock(hdev);
2282 }
2283
2284 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2285 {
2286 struct hci_ev_auth_complete *ev = (void *) skb->data;
2287 struct hci_conn *conn;
2288
2289 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2290
2291 hci_dev_lock(hdev);
2292
2293 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2294 if (!conn)
2295 goto unlock;
2296
2297 if (!ev->status) {
2298 if (!hci_conn_ssp_enabled(conn) &&
2299 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2300 BT_INFO("re-auth of legacy device is not possible.");
2301 } else {
2302 set_bit(HCI_CONN_AUTH, &conn->flags);
2303 conn->sec_level = conn->pending_sec_level;
2304 }
2305 } else {
2306 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2307 ev->status);
2308 }
2309
2310 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2311 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2312
2313 if (conn->state == BT_CONFIG) {
2314 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2315 struct hci_cp_set_conn_encrypt cp;
2316 cp.handle = ev->handle;
2317 cp.encrypt = 0x01;
2318 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2319 &cp);
2320 } else {
2321 conn->state = BT_CONNECTED;
2322 hci_proto_connect_cfm(conn, ev->status);
2323 hci_conn_drop(conn);
2324 }
2325 } else {
2326 hci_auth_cfm(conn, ev->status);
2327
2328 hci_conn_hold(conn);
2329 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2330 hci_conn_drop(conn);
2331 }
2332
2333 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2334 if (!ev->status) {
2335 struct hci_cp_set_conn_encrypt cp;
2336 cp.handle = ev->handle;
2337 cp.encrypt = 0x01;
2338 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2339 &cp);
2340 } else {
2341 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2342 hci_encrypt_cfm(conn, ev->status, 0x00);
2343 }
2344 }
2345
2346 unlock:
2347 hci_dev_unlock(hdev);
2348 }
2349
2350 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2351 {
2352 struct hci_ev_remote_name *ev = (void *) skb->data;
2353 struct hci_conn *conn;
2354
2355 BT_DBG("%s", hdev->name);
2356
2357 hci_conn_check_pending(hdev);
2358
2359 hci_dev_lock(hdev);
2360
2361 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2362
2363 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2364 goto check_auth;
2365
2366 if (ev->status == 0)
2367 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2368 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2369 else
2370 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2371
2372 check_auth:
2373 if (!conn)
2374 goto unlock;
2375
2376 if (!hci_outgoing_auth_needed(hdev, conn))
2377 goto unlock;
2378
2379 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2380 struct hci_cp_auth_requested cp;
2381 cp.handle = __cpu_to_le16(conn->handle);
2382 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2383 }
2384
2385 unlock:
2386 hci_dev_unlock(hdev);
2387 }
2388
2389 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2390 {
2391 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2392 struct hci_conn *conn;
2393
2394 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2395
2396 hci_dev_lock(hdev);
2397
2398 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2399 if (!conn)
2400 goto unlock;
2401
2402 if (!ev->status) {
2403 if (ev->encrypt) {
2404 /* Encryption implies authentication */
2405 set_bit(HCI_CONN_AUTH, &conn->flags);
2406 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2407 conn->sec_level = conn->pending_sec_level;
2408
2409 /* P-256 authentication key implies FIPS */
2410 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2411 set_bit(HCI_CONN_FIPS, &conn->flags);
2412
2413 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2414 conn->type == LE_LINK)
2415 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2416 } else {
2417 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2418 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2419 }
2420 }
2421
2422 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2423
2424 if (ev->status && conn->state == BT_CONNECTED) {
2425 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2426 hci_conn_drop(conn);
2427 goto unlock;
2428 }
2429
2430 if (conn->state == BT_CONFIG) {
2431 if (!ev->status)
2432 conn->state = BT_CONNECTED;
2433
2434 /* In Secure Connections Only mode, do not allow any
2435 * connections that are not encrypted with AES-CCM
2436 * using a P-256 authenticated combination key.
2437 */
2438 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2439 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2440 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2441 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2442 hci_conn_drop(conn);
2443 goto unlock;
2444 }
2445
2446 hci_proto_connect_cfm(conn, ev->status);
2447 hci_conn_drop(conn);
2448 } else
2449 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2450
2451 unlock:
2452 hci_dev_unlock(hdev);
2453 }
2454
2455 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2456 struct sk_buff *skb)
2457 {
2458 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2459 struct hci_conn *conn;
2460
2461 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2462
2463 hci_dev_lock(hdev);
2464
2465 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2466 if (conn) {
2467 if (!ev->status)
2468 set_bit(HCI_CONN_SECURE, &conn->flags);
2469
2470 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2471
2472 hci_key_change_cfm(conn, ev->status);
2473 }
2474
2475 hci_dev_unlock(hdev);
2476 }
2477
2478 static void hci_remote_features_evt(struct hci_dev *hdev,
2479 struct sk_buff *skb)
2480 {
2481 struct hci_ev_remote_features *ev = (void *) skb->data;
2482 struct hci_conn *conn;
2483
2484 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2485
2486 hci_dev_lock(hdev);
2487
2488 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2489 if (!conn)
2490 goto unlock;
2491
2492 if (!ev->status)
2493 memcpy(conn->features[0], ev->features, 8);
2494
2495 if (conn->state != BT_CONFIG)
2496 goto unlock;
2497
2498 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2499 struct hci_cp_read_remote_ext_features cp;
2500 cp.handle = ev->handle;
2501 cp.page = 0x01;
2502 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2503 sizeof(cp), &cp);
2504 goto unlock;
2505 }
2506
2507 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2508 struct hci_cp_remote_name_req cp;
2509 memset(&cp, 0, sizeof(cp));
2510 bacpy(&cp.bdaddr, &conn->dst);
2511 cp.pscan_rep_mode = 0x02;
2512 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2513 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2514 mgmt_device_connected(hdev, &conn->dst, conn->type,
2515 conn->dst_type, 0, NULL, 0,
2516 conn->dev_class);
2517
2518 if (!hci_outgoing_auth_needed(hdev, conn)) {
2519 conn->state = BT_CONNECTED;
2520 hci_proto_connect_cfm(conn, ev->status);
2521 hci_conn_drop(conn);
2522 }
2523
2524 unlock:
2525 hci_dev_unlock(hdev);
2526 }
2527
2528 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2529 {
2530 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2531 u8 status = skb->data[sizeof(*ev)];
2532 __u16 opcode;
2533
2534 skb_pull(skb, sizeof(*ev));
2535
2536 opcode = __le16_to_cpu(ev->opcode);
2537
2538 switch (opcode) {
2539 case HCI_OP_INQUIRY_CANCEL:
2540 hci_cc_inquiry_cancel(hdev, skb);
2541 break;
2542
2543 case HCI_OP_PERIODIC_INQ:
2544 hci_cc_periodic_inq(hdev, skb);
2545 break;
2546
2547 case HCI_OP_EXIT_PERIODIC_INQ:
2548 hci_cc_exit_periodic_inq(hdev, skb);
2549 break;
2550
2551 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2552 hci_cc_remote_name_req_cancel(hdev, skb);
2553 break;
2554
2555 case HCI_OP_ROLE_DISCOVERY:
2556 hci_cc_role_discovery(hdev, skb);
2557 break;
2558
2559 case HCI_OP_READ_LINK_POLICY:
2560 hci_cc_read_link_policy(hdev, skb);
2561 break;
2562
2563 case HCI_OP_WRITE_LINK_POLICY:
2564 hci_cc_write_link_policy(hdev, skb);
2565 break;
2566
2567 case HCI_OP_READ_DEF_LINK_POLICY:
2568 hci_cc_read_def_link_policy(hdev, skb);
2569 break;
2570
2571 case HCI_OP_WRITE_DEF_LINK_POLICY:
2572 hci_cc_write_def_link_policy(hdev, skb);
2573 break;
2574
2575 case HCI_OP_RESET:
2576 hci_cc_reset(hdev, skb);
2577 break;
2578
2579 case HCI_OP_WRITE_LOCAL_NAME:
2580 hci_cc_write_local_name(hdev, skb);
2581 break;
2582
2583 case HCI_OP_READ_LOCAL_NAME:
2584 hci_cc_read_local_name(hdev, skb);
2585 break;
2586
2587 case HCI_OP_WRITE_AUTH_ENABLE:
2588 hci_cc_write_auth_enable(hdev, skb);
2589 break;
2590
2591 case HCI_OP_WRITE_ENCRYPT_MODE:
2592 hci_cc_write_encrypt_mode(hdev, skb);
2593 break;
2594
2595 case HCI_OP_WRITE_SCAN_ENABLE:
2596 hci_cc_write_scan_enable(hdev, skb);
2597 break;
2598
2599 case HCI_OP_READ_CLASS_OF_DEV:
2600 hci_cc_read_class_of_dev(hdev, skb);
2601 break;
2602
2603 case HCI_OP_WRITE_CLASS_OF_DEV:
2604 hci_cc_write_class_of_dev(hdev, skb);
2605 break;
2606
2607 case HCI_OP_READ_VOICE_SETTING:
2608 hci_cc_read_voice_setting(hdev, skb);
2609 break;
2610
2611 case HCI_OP_WRITE_VOICE_SETTING:
2612 hci_cc_write_voice_setting(hdev, skb);
2613 break;
2614
2615 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2616 hci_cc_read_num_supported_iac(hdev, skb);
2617 break;
2618
2619 case HCI_OP_WRITE_SSP_MODE:
2620 hci_cc_write_ssp_mode(hdev, skb);
2621 break;
2622
2623 case HCI_OP_WRITE_SC_SUPPORT:
2624 hci_cc_write_sc_support(hdev, skb);
2625 break;
2626
2627 case HCI_OP_READ_LOCAL_VERSION:
2628 hci_cc_read_local_version(hdev, skb);
2629 break;
2630
2631 case HCI_OP_READ_LOCAL_COMMANDS:
2632 hci_cc_read_local_commands(hdev, skb);
2633 break;
2634
2635 case HCI_OP_READ_LOCAL_FEATURES:
2636 hci_cc_read_local_features(hdev, skb);
2637 break;
2638
2639 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2640 hci_cc_read_local_ext_features(hdev, skb);
2641 break;
2642
2643 case HCI_OP_READ_BUFFER_SIZE:
2644 hci_cc_read_buffer_size(hdev, skb);
2645 break;
2646
2647 case HCI_OP_READ_BD_ADDR:
2648 hci_cc_read_bd_addr(hdev, skb);
2649 break;
2650
2651 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2652 hci_cc_read_page_scan_activity(hdev, skb);
2653 break;
2654
2655 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2656 hci_cc_write_page_scan_activity(hdev, skb);
2657 break;
2658
2659 case HCI_OP_READ_PAGE_SCAN_TYPE:
2660 hci_cc_read_page_scan_type(hdev, skb);
2661 break;
2662
2663 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2664 hci_cc_write_page_scan_type(hdev, skb);
2665 break;
2666
2667 case HCI_OP_READ_DATA_BLOCK_SIZE:
2668 hci_cc_read_data_block_size(hdev, skb);
2669 break;
2670
2671 case HCI_OP_READ_FLOW_CONTROL_MODE:
2672 hci_cc_read_flow_control_mode(hdev, skb);
2673 break;
2674
2675 case HCI_OP_READ_LOCAL_AMP_INFO:
2676 hci_cc_read_local_amp_info(hdev, skb);
2677 break;
2678
2679 case HCI_OP_READ_CLOCK:
2680 hci_cc_read_clock(hdev, skb);
2681 break;
2682
2683 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2684 hci_cc_read_local_amp_assoc(hdev, skb);
2685 break;
2686
2687 case HCI_OP_READ_INQ_RSP_TX_POWER:
2688 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2689 break;
2690
2691 case HCI_OP_PIN_CODE_REPLY:
2692 hci_cc_pin_code_reply(hdev, skb);
2693 break;
2694
2695 case HCI_OP_PIN_CODE_NEG_REPLY:
2696 hci_cc_pin_code_neg_reply(hdev, skb);
2697 break;
2698
2699 case HCI_OP_READ_LOCAL_OOB_DATA:
2700 hci_cc_read_local_oob_data(hdev, skb);
2701 break;
2702
2703 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2704 hci_cc_read_local_oob_ext_data(hdev, skb);
2705 break;
2706
2707 case HCI_OP_LE_READ_BUFFER_SIZE:
2708 hci_cc_le_read_buffer_size(hdev, skb);
2709 break;
2710
2711 case HCI_OP_LE_READ_LOCAL_FEATURES:
2712 hci_cc_le_read_local_features(hdev, skb);
2713 break;
2714
2715 case HCI_OP_LE_READ_ADV_TX_POWER:
2716 hci_cc_le_read_adv_tx_power(hdev, skb);
2717 break;
2718
2719 case HCI_OP_USER_CONFIRM_REPLY:
2720 hci_cc_user_confirm_reply(hdev, skb);
2721 break;
2722
2723 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2724 hci_cc_user_confirm_neg_reply(hdev, skb);
2725 break;
2726
2727 case HCI_OP_USER_PASSKEY_REPLY:
2728 hci_cc_user_passkey_reply(hdev, skb);
2729 break;
2730
2731 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2732 hci_cc_user_passkey_neg_reply(hdev, skb);
2733 break;
2734
2735 case HCI_OP_LE_SET_RANDOM_ADDR:
2736 hci_cc_le_set_random_addr(hdev, skb);
2737 break;
2738
2739 case HCI_OP_LE_SET_ADV_ENABLE:
2740 hci_cc_le_set_adv_enable(hdev, skb);
2741 break;
2742
2743 case HCI_OP_LE_SET_SCAN_PARAM:
2744 hci_cc_le_set_scan_param(hdev, skb);
2745 break;
2746
2747 case HCI_OP_LE_SET_SCAN_ENABLE:
2748 hci_cc_le_set_scan_enable(hdev, skb);
2749 break;
2750
2751 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2752 hci_cc_le_read_white_list_size(hdev, skb);
2753 break;
2754
2755 case HCI_OP_LE_CLEAR_WHITE_LIST:
2756 hci_cc_le_clear_white_list(hdev, skb);
2757 break;
2758
2759 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2760 hci_cc_le_add_to_white_list(hdev, skb);
2761 break;
2762
2763 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2764 hci_cc_le_del_from_white_list(hdev, skb);
2765 break;
2766
2767 case HCI_OP_LE_READ_SUPPORTED_STATES:
2768 hci_cc_le_read_supported_states(hdev, skb);
2769 break;
2770
2771 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2772 hci_cc_write_le_host_supported(hdev, skb);
2773 break;
2774
2775 case HCI_OP_LE_SET_ADV_PARAM:
2776 hci_cc_set_adv_param(hdev, skb);
2777 break;
2778
2779 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2780 hci_cc_write_remote_amp_assoc(hdev, skb);
2781 break;
2782
2783 case HCI_OP_READ_RSSI:
2784 hci_cc_read_rssi(hdev, skb);
2785 break;
2786
2787 case HCI_OP_READ_TX_POWER:
2788 hci_cc_read_tx_power(hdev, skb);
2789 break;
2790
2791 default:
2792 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2793 break;
2794 }
2795
2796 if (opcode != HCI_OP_NOP)
2797 cancel_delayed_work(&hdev->cmd_timer);
2798
2799 hci_req_cmd_complete(hdev, opcode, status);
2800
2801 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2802 atomic_set(&hdev->cmd_cnt, 1);
2803 if (!skb_queue_empty(&hdev->cmd_q))
2804 queue_work(hdev->workqueue, &hdev->cmd_work);
2805 }
2806 }
2807
2808 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2809 {
2810 struct hci_ev_cmd_status *ev = (void *) skb->data;
2811 __u16 opcode;
2812
2813 skb_pull(skb, sizeof(*ev));
2814
2815 opcode = __le16_to_cpu(ev->opcode);
2816
2817 switch (opcode) {
2818 case HCI_OP_INQUIRY:
2819 hci_cs_inquiry(hdev, ev->status);
2820 break;
2821
2822 case HCI_OP_CREATE_CONN:
2823 hci_cs_create_conn(hdev, ev->status);
2824 break;
2825
2826 case HCI_OP_ADD_SCO:
2827 hci_cs_add_sco(hdev, ev->status);
2828 break;
2829
2830 case HCI_OP_AUTH_REQUESTED:
2831 hci_cs_auth_requested(hdev, ev->status);
2832 break;
2833
2834 case HCI_OP_SET_CONN_ENCRYPT:
2835 hci_cs_set_conn_encrypt(hdev, ev->status);
2836 break;
2837
2838 case HCI_OP_REMOTE_NAME_REQ:
2839 hci_cs_remote_name_req(hdev, ev->status);
2840 break;
2841
2842 case HCI_OP_READ_REMOTE_FEATURES:
2843 hci_cs_read_remote_features(hdev, ev->status);
2844 break;
2845
2846 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2847 hci_cs_read_remote_ext_features(hdev, ev->status);
2848 break;
2849
2850 case HCI_OP_SETUP_SYNC_CONN:
2851 hci_cs_setup_sync_conn(hdev, ev->status);
2852 break;
2853
2854 case HCI_OP_SNIFF_MODE:
2855 hci_cs_sniff_mode(hdev, ev->status);
2856 break;
2857
2858 case HCI_OP_EXIT_SNIFF_MODE:
2859 hci_cs_exit_sniff_mode(hdev, ev->status);
2860 break;
2861
2862 case HCI_OP_DISCONNECT:
2863 hci_cs_disconnect(hdev, ev->status);
2864 break;
2865
2866 case HCI_OP_CREATE_PHY_LINK:
2867 hci_cs_create_phylink(hdev, ev->status);
2868 break;
2869
2870 case HCI_OP_ACCEPT_PHY_LINK:
2871 hci_cs_accept_phylink(hdev, ev->status);
2872 break;
2873
2874 case HCI_OP_LE_CREATE_CONN:
2875 hci_cs_le_create_conn(hdev, ev->status);
2876 break;
2877
2878 case HCI_OP_LE_START_ENC:
2879 hci_cs_le_start_enc(hdev, ev->status);
2880 break;
2881
2882 default:
2883 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2884 break;
2885 }
2886
2887 if (opcode != HCI_OP_NOP)
2888 cancel_delayed_work(&hdev->cmd_timer);
2889
2890 if (ev->status ||
2891 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2892 hci_req_cmd_complete(hdev, opcode, ev->status);
2893
2894 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2895 atomic_set(&hdev->cmd_cnt, 1);
2896 if (!skb_queue_empty(&hdev->cmd_q))
2897 queue_work(hdev->workqueue, &hdev->cmd_work);
2898 }
2899 }
2900
2901 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2902 {
2903 struct hci_ev_role_change *ev = (void *) skb->data;
2904 struct hci_conn *conn;
2905
2906 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2907
2908 hci_dev_lock(hdev);
2909
2910 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2911 if (conn) {
2912 if (!ev->status) {
2913 if (ev->role)
2914 clear_bit(HCI_CONN_MASTER, &conn->flags);
2915 else
2916 set_bit(HCI_CONN_MASTER, &conn->flags);
2917 }
2918
2919 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2920
2921 hci_role_switch_cfm(conn, ev->status, ev->role);
2922 }
2923
2924 hci_dev_unlock(hdev);
2925 }
2926
2927 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2928 {
2929 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2930 int i;
2931
2932 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2933 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2934 return;
2935 }
2936
2937 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2938 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2939 BT_DBG("%s bad parameters", hdev->name);
2940 return;
2941 }
2942
2943 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2944
2945 for (i = 0; i < ev->num_hndl; i++) {
2946 struct hci_comp_pkts_info *info = &ev->handles[i];
2947 struct hci_conn *conn;
2948 __u16 handle, count;
2949
2950 handle = __le16_to_cpu(info->handle);
2951 count = __le16_to_cpu(info->count);
2952
2953 conn = hci_conn_hash_lookup_handle(hdev, handle);
2954 if (!conn)
2955 continue;
2956
2957 conn->sent -= count;
2958
2959 switch (conn->type) {
2960 case ACL_LINK:
2961 hdev->acl_cnt += count;
2962 if (hdev->acl_cnt > hdev->acl_pkts)
2963 hdev->acl_cnt = hdev->acl_pkts;
2964 break;
2965
2966 case LE_LINK:
2967 if (hdev->le_pkts) {
2968 hdev->le_cnt += count;
2969 if (hdev->le_cnt > hdev->le_pkts)
2970 hdev->le_cnt = hdev->le_pkts;
2971 } else {
2972 hdev->acl_cnt += count;
2973 if (hdev->acl_cnt > hdev->acl_pkts)
2974 hdev->acl_cnt = hdev->acl_pkts;
2975 }
2976 break;
2977
2978 case SCO_LINK:
2979 hdev->sco_cnt += count;
2980 if (hdev->sco_cnt > hdev->sco_pkts)
2981 hdev->sco_cnt = hdev->sco_pkts;
2982 break;
2983
2984 default:
2985 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2986 break;
2987 }
2988 }
2989
2990 queue_work(hdev->workqueue, &hdev->tx_work);
2991 }
2992
2993 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2994 __u16 handle)
2995 {
2996 struct hci_chan *chan;
2997
2998 switch (hdev->dev_type) {
2999 case HCI_BREDR:
3000 return hci_conn_hash_lookup_handle(hdev, handle);
3001 case HCI_AMP:
3002 chan = hci_chan_lookup_handle(hdev, handle);
3003 if (chan)
3004 return chan->conn;
3005 break;
3006 default:
3007 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3008 break;
3009 }
3010
3011 return NULL;
3012 }
3013
3014 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3015 {
3016 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3017 int i;
3018
3019 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3020 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3021 return;
3022 }
3023
3024 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3025 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3026 BT_DBG("%s bad parameters", hdev->name);
3027 return;
3028 }
3029
3030 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3031 ev->num_hndl);
3032
3033 for (i = 0; i < ev->num_hndl; i++) {
3034 struct hci_comp_blocks_info *info = &ev->handles[i];
3035 struct hci_conn *conn = NULL;
3036 __u16 handle, block_count;
3037
3038 handle = __le16_to_cpu(info->handle);
3039 block_count = __le16_to_cpu(info->blocks);
3040
3041 conn = __hci_conn_lookup_handle(hdev, handle);
3042 if (!conn)
3043 continue;
3044
3045 conn->sent -= block_count;
3046
3047 switch (conn->type) {
3048 case ACL_LINK:
3049 case AMP_LINK:
3050 hdev->block_cnt += block_count;
3051 if (hdev->block_cnt > hdev->num_blocks)
3052 hdev->block_cnt = hdev->num_blocks;
3053 break;
3054
3055 default:
3056 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3057 break;
3058 }
3059 }
3060
3061 queue_work(hdev->workqueue, &hdev->tx_work);
3062 }
3063
3064 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3065 {
3066 struct hci_ev_mode_change *ev = (void *) skb->data;
3067 struct hci_conn *conn;
3068
3069 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3070
3071 hci_dev_lock(hdev);
3072
3073 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3074 if (conn) {
3075 conn->mode = ev->mode;
3076
3077 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3078 &conn->flags)) {
3079 if (conn->mode == HCI_CM_ACTIVE)
3080 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3081 else
3082 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3083 }
3084
3085 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3086 hci_sco_setup(conn, ev->status);
3087 }
3088
3089 hci_dev_unlock(hdev);
3090 }
3091
3092 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3093 {
3094 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3095 struct hci_conn *conn;
3096
3097 BT_DBG("%s", hdev->name);
3098
3099 hci_dev_lock(hdev);
3100
3101 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3102 if (!conn)
3103 goto unlock;
3104
3105 if (conn->state == BT_CONNECTED) {
3106 hci_conn_hold(conn);
3107 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3108 hci_conn_drop(conn);
3109 }
3110
3111 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
3112 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3113 sizeof(ev->bdaddr), &ev->bdaddr);
3114 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3115 u8 secure;
3116
3117 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3118 secure = 1;
3119 else
3120 secure = 0;
3121
3122 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3123 }
3124
3125 unlock:
3126 hci_dev_unlock(hdev);
3127 }
3128
3129 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3130 {
3131 struct hci_ev_link_key_req *ev = (void *) skb->data;
3132 struct hci_cp_link_key_reply cp;
3133 struct hci_conn *conn;
3134 struct link_key *key;
3135
3136 BT_DBG("%s", hdev->name);
3137
3138 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3139 return;
3140
3141 hci_dev_lock(hdev);
3142
3143 key = hci_find_link_key(hdev, &ev->bdaddr);
3144 if (!key) {
3145 BT_DBG("%s link key not found for %pMR", hdev->name,
3146 &ev->bdaddr);
3147 goto not_found;
3148 }
3149
3150 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3151 &ev->bdaddr);
3152
3153 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3154 if (conn) {
3155 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3156 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3157 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3158 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3159 goto not_found;
3160 }
3161
3162 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3163 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3164 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3165 BT_DBG("%s ignoring key unauthenticated for high security",
3166 hdev->name);
3167 goto not_found;
3168 }
3169
3170 conn->key_type = key->type;
3171 conn->pin_length = key->pin_len;
3172 }
3173
3174 bacpy(&cp.bdaddr, &ev->bdaddr);
3175 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3176
3177 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3178
3179 hci_dev_unlock(hdev);
3180
3181 return;
3182
3183 not_found:
3184 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3185 hci_dev_unlock(hdev);
3186 }
3187
3188 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3189 {
3190 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3191 struct hci_conn *conn;
3192 struct link_key *key;
3193 bool persistent;
3194 u8 pin_len = 0;
3195
3196 BT_DBG("%s", hdev->name);
3197
3198 hci_dev_lock(hdev);
3199
3200 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3201 if (conn) {
3202 hci_conn_hold(conn);
3203 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3204 pin_len = conn->pin_length;
3205
3206 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3207 conn->key_type = ev->key_type;
3208
3209 hci_conn_drop(conn);
3210 }
3211
3212 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3213 goto unlock;
3214
3215 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3216 ev->key_type, pin_len, &persistent);
3217 if (!key)
3218 goto unlock;
3219
3220 mgmt_new_link_key(hdev, key, persistent);
3221
3222 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3223 * is set. If it's not set simply remove the key from the kernel
3224 * list (we've still notified user space about it but with
3225 * store_hint being 0).
3226 */
3227 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3228 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3229 list_del(&key->list);
3230 kfree(key);
3231 } else if (conn) {
3232 if (persistent)
3233 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3234 else
3235 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3236 }
3237
3238 unlock:
3239 hci_dev_unlock(hdev);
3240 }
3241
3242 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3243 {
3244 struct hci_ev_clock_offset *ev = (void *) skb->data;
3245 struct hci_conn *conn;
3246
3247 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3248
3249 hci_dev_lock(hdev);
3250
3251 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3252 if (conn && !ev->status) {
3253 struct inquiry_entry *ie;
3254
3255 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3256 if (ie) {
3257 ie->data.clock_offset = ev->clock_offset;
3258 ie->timestamp = jiffies;
3259 }
3260 }
3261
3262 hci_dev_unlock(hdev);
3263 }
3264
3265 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3266 {
3267 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3268 struct hci_conn *conn;
3269
3270 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3271
3272 hci_dev_lock(hdev);
3273
3274 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3275 if (conn && !ev->status)
3276 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3277
3278 hci_dev_unlock(hdev);
3279 }
3280
3281 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3282 {
3283 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3284 struct inquiry_entry *ie;
3285
3286 BT_DBG("%s", hdev->name);
3287
3288 hci_dev_lock(hdev);
3289
3290 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3291 if (ie) {
3292 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3293 ie->timestamp = jiffies;
3294 }
3295
3296 hci_dev_unlock(hdev);
3297 }
3298
3299 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3300 struct sk_buff *skb)
3301 {
3302 struct inquiry_data data;
3303 int num_rsp = *((__u8 *) skb->data);
3304
3305 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3306
3307 if (!num_rsp)
3308 return;
3309
3310 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3311 return;
3312
3313 hci_dev_lock(hdev);
3314
3315 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3316 struct inquiry_info_with_rssi_and_pscan_mode *info;
3317 info = (void *) (skb->data + 1);
3318
3319 for (; num_rsp; num_rsp--, info++) {
3320 u32 flags;
3321
3322 bacpy(&data.bdaddr, &info->bdaddr);
3323 data.pscan_rep_mode = info->pscan_rep_mode;
3324 data.pscan_period_mode = info->pscan_period_mode;
3325 data.pscan_mode = info->pscan_mode;
3326 memcpy(data.dev_class, info->dev_class, 3);
3327 data.clock_offset = info->clock_offset;
3328 data.rssi = info->rssi;
3329 data.ssp_mode = 0x00;
3330
3331 flags = hci_inquiry_cache_update(hdev, &data, false);
3332
3333 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3334 info->dev_class, info->rssi,
3335 flags, NULL, 0, NULL, 0);
3336 }
3337 } else {
3338 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3339
3340 for (; num_rsp; num_rsp--, info++) {
3341 u32 flags;
3342
3343 bacpy(&data.bdaddr, &info->bdaddr);
3344 data.pscan_rep_mode = info->pscan_rep_mode;
3345 data.pscan_period_mode = info->pscan_period_mode;
3346 data.pscan_mode = 0x00;
3347 memcpy(data.dev_class, info->dev_class, 3);
3348 data.clock_offset = info->clock_offset;
3349 data.rssi = info->rssi;
3350 data.ssp_mode = 0x00;
3351
3352 flags = hci_inquiry_cache_update(hdev, &data, false);
3353
3354 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3355 info->dev_class, info->rssi,
3356 flags, NULL, 0, NULL, 0);
3357 }
3358 }
3359
3360 hci_dev_unlock(hdev);
3361 }
3362
3363 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3364 struct sk_buff *skb)
3365 {
3366 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3367 struct hci_conn *conn;
3368
3369 BT_DBG("%s", hdev->name);
3370
3371 hci_dev_lock(hdev);
3372
3373 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3374 if (!conn)
3375 goto unlock;
3376
3377 if (ev->page < HCI_MAX_PAGES)
3378 memcpy(conn->features[ev->page], ev->features, 8);
3379
3380 if (!ev->status && ev->page == 0x01) {
3381 struct inquiry_entry *ie;
3382
3383 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3384 if (ie)
3385 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3386
3387 if (ev->features[0] & LMP_HOST_SSP) {
3388 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3389 } else {
3390 /* It is mandatory by the Bluetooth specification that
3391 * Extended Inquiry Results are only used when Secure
3392 * Simple Pairing is enabled, but some devices violate
3393 * this.
3394 *
3395 * To make these devices work, the internal SSP
3396 * enabled flag needs to be cleared if the remote host
3397 * features do not indicate SSP support */
3398 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3399 }
3400
3401 if (ev->features[0] & LMP_HOST_SC)
3402 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3403 }
3404
3405 if (conn->state != BT_CONFIG)
3406 goto unlock;
3407
3408 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3409 struct hci_cp_remote_name_req cp;
3410 memset(&cp, 0, sizeof(cp));
3411 bacpy(&cp.bdaddr, &conn->dst);
3412 cp.pscan_rep_mode = 0x02;
3413 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3414 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3415 mgmt_device_connected(hdev, &conn->dst, conn->type,
3416 conn->dst_type, 0, NULL, 0,
3417 conn->dev_class);
3418
3419 if (!hci_outgoing_auth_needed(hdev, conn)) {
3420 conn->state = BT_CONNECTED;
3421 hci_proto_connect_cfm(conn, ev->status);
3422 hci_conn_drop(conn);
3423 }
3424
3425 unlock:
3426 hci_dev_unlock(hdev);
3427 }
3428
3429 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3430 struct sk_buff *skb)
3431 {
3432 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3433 struct hci_conn *conn;
3434
3435 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3436
3437 hci_dev_lock(hdev);
3438
3439 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3440 if (!conn) {
3441 if (ev->link_type == ESCO_LINK)
3442 goto unlock;
3443
3444 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3445 if (!conn)
3446 goto unlock;
3447
3448 conn->type = SCO_LINK;
3449 }
3450
3451 switch (ev->status) {
3452 case 0x00:
3453 conn->handle = __le16_to_cpu(ev->handle);
3454 conn->state = BT_CONNECTED;
3455
3456 hci_conn_add_sysfs(conn);
3457 break;
3458
3459 case 0x10: /* Connection Accept Timeout */
3460 case 0x0d: /* Connection Rejected due to Limited Resources */
3461 case 0x11: /* Unsupported Feature or Parameter Value */
3462 case 0x1c: /* SCO interval rejected */
3463 case 0x1a: /* Unsupported Remote Feature */
3464 case 0x1f: /* Unspecified error */
3465 case 0x20: /* Unsupported LMP Parameter value */
3466 if (conn->out) {
3467 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3468 (hdev->esco_type & EDR_ESCO_MASK);
3469 if (hci_setup_sync(conn, conn->link->handle))
3470 goto unlock;
3471 }
3472 /* fall through */
3473
3474 default:
3475 conn->state = BT_CLOSED;
3476 break;
3477 }
3478
3479 hci_proto_connect_cfm(conn, ev->status);
3480 if (ev->status)
3481 hci_conn_del(conn);
3482
3483 unlock:
3484 hci_dev_unlock(hdev);
3485 }
3486
3487 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3488 {
3489 size_t parsed = 0;
3490
3491 while (parsed < eir_len) {
3492 u8 field_len = eir[0];
3493
3494 if (field_len == 0)
3495 return parsed;
3496
3497 parsed += field_len + 1;
3498 eir += field_len + 1;
3499 }
3500
3501 return eir_len;
3502 }
3503
3504 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3505 struct sk_buff *skb)
3506 {
3507 struct inquiry_data data;
3508 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3509 int num_rsp = *((__u8 *) skb->data);
3510 size_t eir_len;
3511
3512 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3513
3514 if (!num_rsp)
3515 return;
3516
3517 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3518 return;
3519
3520 hci_dev_lock(hdev);
3521
3522 for (; num_rsp; num_rsp--, info++) {
3523 u32 flags;
3524 bool name_known;
3525
3526 bacpy(&data.bdaddr, &info->bdaddr);
3527 data.pscan_rep_mode = info->pscan_rep_mode;
3528 data.pscan_period_mode = info->pscan_period_mode;
3529 data.pscan_mode = 0x00;
3530 memcpy(data.dev_class, info->dev_class, 3);
3531 data.clock_offset = info->clock_offset;
3532 data.rssi = info->rssi;
3533 data.ssp_mode = 0x01;
3534
3535 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3536 name_known = eir_has_data_type(info->data,
3537 sizeof(info->data),
3538 EIR_NAME_COMPLETE);
3539 else
3540 name_known = true;
3541
3542 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3543
3544 eir_len = eir_get_length(info->data, sizeof(info->data));
3545
3546 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3547 info->dev_class, info->rssi,
3548 flags, info->data, eir_len, NULL, 0);
3549 }
3550
3551 hci_dev_unlock(hdev);
3552 }
3553
3554 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3555 struct sk_buff *skb)
3556 {
3557 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3558 struct hci_conn *conn;
3559
3560 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3561 __le16_to_cpu(ev->handle));
3562
3563 hci_dev_lock(hdev);
3564
3565 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3566 if (!conn)
3567 goto unlock;
3568
3569 /* For BR/EDR the necessary steps are taken through the
3570 * auth_complete event.
3571 */
3572 if (conn->type != LE_LINK)
3573 goto unlock;
3574
3575 if (!ev->status)
3576 conn->sec_level = conn->pending_sec_level;
3577
3578 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3579
3580 if (ev->status && conn->state == BT_CONNECTED) {
3581 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3582 hci_conn_drop(conn);
3583 goto unlock;
3584 }
3585
3586 if (conn->state == BT_CONFIG) {
3587 if (!ev->status)
3588 conn->state = BT_CONNECTED;
3589
3590 hci_proto_connect_cfm(conn, ev->status);
3591 hci_conn_drop(conn);
3592 } else {
3593 hci_auth_cfm(conn, ev->status);
3594
3595 hci_conn_hold(conn);
3596 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3597 hci_conn_drop(conn);
3598 }
3599
3600 unlock:
3601 hci_dev_unlock(hdev);
3602 }
3603
3604 static u8 hci_get_auth_req(struct hci_conn *conn)
3605 {
3606 /* If remote requests no-bonding follow that lead */
3607 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3608 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3609 return conn->remote_auth | (conn->auth_type & 0x01);
3610
3611 /* If both remote and local have enough IO capabilities, require
3612 * MITM protection
3613 */
3614 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3615 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3616 return conn->remote_auth | 0x01;
3617
3618 /* No MITM protection possible so ignore remote requirement */
3619 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3620 }
3621
3622 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3623 {
3624 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3625 struct hci_conn *conn;
3626
3627 BT_DBG("%s", hdev->name);
3628
3629 hci_dev_lock(hdev);
3630
3631 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3632 if (!conn)
3633 goto unlock;
3634
3635 hci_conn_hold(conn);
3636
3637 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3638 goto unlock;
3639
3640 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3641 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3642 struct hci_cp_io_capability_reply cp;
3643
3644 bacpy(&cp.bdaddr, &ev->bdaddr);
3645 /* Change the IO capability from KeyboardDisplay
3646 * to DisplayYesNo as it is not supported by BT spec. */
3647 cp.capability = (conn->io_capability == 0x04) ?
3648 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3649
3650 /* If we are initiators, there is no remote information yet */
3651 if (conn->remote_auth == 0xff) {
3652 cp.authentication = conn->auth_type;
3653
3654 /* Request MITM protection if our IO caps allow it
3655 * except for the no-bonding case.
3656 * conn->auth_type is not updated here since
3657 * that might cause the user confirmation to be
3658 * rejected in case the remote doesn't have the
3659 * IO capabilities for MITM.
3660 */
3661 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3662 cp.authentication != HCI_AT_NO_BONDING)
3663 cp.authentication |= 0x01;
3664 } else {
3665 conn->auth_type = hci_get_auth_req(conn);
3666 cp.authentication = conn->auth_type;
3667 }
3668
3669 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3670 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3671 cp.oob_data = 0x01;
3672 else
3673 cp.oob_data = 0x00;
3674
3675 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3676 sizeof(cp), &cp);
3677 } else {
3678 struct hci_cp_io_capability_neg_reply cp;
3679
3680 bacpy(&cp.bdaddr, &ev->bdaddr);
3681 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3682
3683 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3684 sizeof(cp), &cp);
3685 }
3686
3687 unlock:
3688 hci_dev_unlock(hdev);
3689 }
3690
3691 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3692 {
3693 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3694 struct hci_conn *conn;
3695
3696 BT_DBG("%s", hdev->name);
3697
3698 hci_dev_lock(hdev);
3699
3700 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3701 if (!conn)
3702 goto unlock;
3703
3704 conn->remote_cap = ev->capability;
3705 conn->remote_auth = ev->authentication;
3706 if (ev->oob_data)
3707 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3708
3709 unlock:
3710 hci_dev_unlock(hdev);
3711 }
3712
3713 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3714 struct sk_buff *skb)
3715 {
3716 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3717 int loc_mitm, rem_mitm, confirm_hint = 0;
3718 struct hci_conn *conn;
3719
3720 BT_DBG("%s", hdev->name);
3721
3722 hci_dev_lock(hdev);
3723
3724 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3725 goto unlock;
3726
3727 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3728 if (!conn)
3729 goto unlock;
3730
3731 loc_mitm = (conn->auth_type & 0x01);
3732 rem_mitm = (conn->remote_auth & 0x01);
3733
3734 /* If we require MITM but the remote device can't provide that
3735 * (it has NoInputNoOutput) then reject the confirmation request
3736 */
3737 if (loc_mitm && conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3738 BT_DBG("Rejecting request: remote device can't provide MITM");
3739 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3740 sizeof(ev->bdaddr), &ev->bdaddr);
3741 goto unlock;
3742 }
3743
3744 /* If no side requires MITM protection; auto-accept */
3745 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3746 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3747
3748 /* If we're not the initiators request authorization to
3749 * proceed from user space (mgmt_user_confirm with
3750 * confirm_hint set to 1). The exception is if neither
3751 * side had MITM in which case we do auto-accept.
3752 */
3753 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3754 (loc_mitm || rem_mitm)) {
3755 BT_DBG("Confirming auto-accept as acceptor");
3756 confirm_hint = 1;
3757 goto confirm;
3758 }
3759
3760 BT_DBG("Auto-accept of user confirmation with %ums delay",
3761 hdev->auto_accept_delay);
3762
3763 if (hdev->auto_accept_delay > 0) {
3764 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3765 queue_delayed_work(conn->hdev->workqueue,
3766 &conn->auto_accept_work, delay);
3767 goto unlock;
3768 }
3769
3770 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3771 sizeof(ev->bdaddr), &ev->bdaddr);
3772 goto unlock;
3773 }
3774
3775 confirm:
3776 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3777 le32_to_cpu(ev->passkey), confirm_hint);
3778
3779 unlock:
3780 hci_dev_unlock(hdev);
3781 }
3782
3783 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3784 struct sk_buff *skb)
3785 {
3786 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3787
3788 BT_DBG("%s", hdev->name);
3789
3790 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3791 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3792 }
3793
3794 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3795 struct sk_buff *skb)
3796 {
3797 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3798 struct hci_conn *conn;
3799
3800 BT_DBG("%s", hdev->name);
3801
3802 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3803 if (!conn)
3804 return;
3805
3806 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3807 conn->passkey_entered = 0;
3808
3809 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3810 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3811 conn->dst_type, conn->passkey_notify,
3812 conn->passkey_entered);
3813 }
3814
3815 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3816 {
3817 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3818 struct hci_conn *conn;
3819
3820 BT_DBG("%s", hdev->name);
3821
3822 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3823 if (!conn)
3824 return;
3825
3826 switch (ev->type) {
3827 case HCI_KEYPRESS_STARTED:
3828 conn->passkey_entered = 0;
3829 return;
3830
3831 case HCI_KEYPRESS_ENTERED:
3832 conn->passkey_entered++;
3833 break;
3834
3835 case HCI_KEYPRESS_ERASED:
3836 conn->passkey_entered--;
3837 break;
3838
3839 case HCI_KEYPRESS_CLEARED:
3840 conn->passkey_entered = 0;
3841 break;
3842
3843 case HCI_KEYPRESS_COMPLETED:
3844 return;
3845 }
3846
3847 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3848 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3849 conn->dst_type, conn->passkey_notify,
3850 conn->passkey_entered);
3851 }
3852
3853 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3854 struct sk_buff *skb)
3855 {
3856 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3857 struct hci_conn *conn;
3858
3859 BT_DBG("%s", hdev->name);
3860
3861 hci_dev_lock(hdev);
3862
3863 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3864 if (!conn)
3865 goto unlock;
3866
3867 /* To avoid duplicate auth_failed events to user space we check
3868 * the HCI_CONN_AUTH_PEND flag which will be set if we
3869 * initiated the authentication. A traditional auth_complete
3870 * event gets always produced as initiator and is also mapped to
3871 * the mgmt_auth_failed event */
3872 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3873 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3874 ev->status);
3875
3876 hci_conn_drop(conn);
3877
3878 unlock:
3879 hci_dev_unlock(hdev);
3880 }
3881
3882 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3883 struct sk_buff *skb)
3884 {
3885 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3886 struct inquiry_entry *ie;
3887 struct hci_conn *conn;
3888
3889 BT_DBG("%s", hdev->name);
3890
3891 hci_dev_lock(hdev);
3892
3893 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3894 if (conn)
3895 memcpy(conn->features[1], ev->features, 8);
3896
3897 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3898 if (ie)
3899 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3900
3901 hci_dev_unlock(hdev);
3902 }
3903
3904 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3905 struct sk_buff *skb)
3906 {
3907 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3908 struct oob_data *data;
3909
3910 BT_DBG("%s", hdev->name);
3911
3912 hci_dev_lock(hdev);
3913
3914 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3915 goto unlock;
3916
3917 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3918 if (data) {
3919 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3920 struct hci_cp_remote_oob_ext_data_reply cp;
3921
3922 bacpy(&cp.bdaddr, &ev->bdaddr);
3923 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3924 memcpy(cp.randomizer192, data->randomizer192,
3925 sizeof(cp.randomizer192));
3926 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3927 memcpy(cp.randomizer256, data->randomizer256,
3928 sizeof(cp.randomizer256));
3929
3930 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3931 sizeof(cp), &cp);
3932 } else {
3933 struct hci_cp_remote_oob_data_reply cp;
3934
3935 bacpy(&cp.bdaddr, &ev->bdaddr);
3936 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3937 memcpy(cp.randomizer, data->randomizer192,
3938 sizeof(cp.randomizer));
3939
3940 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3941 sizeof(cp), &cp);
3942 }
3943 } else {
3944 struct hci_cp_remote_oob_data_neg_reply cp;
3945
3946 bacpy(&cp.bdaddr, &ev->bdaddr);
3947 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3948 sizeof(cp), &cp);
3949 }
3950
3951 unlock:
3952 hci_dev_unlock(hdev);
3953 }
3954
3955 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3956 struct sk_buff *skb)
3957 {
3958 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3959 struct hci_conn *hcon, *bredr_hcon;
3960
3961 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3962 ev->status);
3963
3964 hci_dev_lock(hdev);
3965
3966 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3967 if (!hcon) {
3968 hci_dev_unlock(hdev);
3969 return;
3970 }
3971
3972 if (ev->status) {
3973 hci_conn_del(hcon);
3974 hci_dev_unlock(hdev);
3975 return;
3976 }
3977
3978 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3979
3980 hcon->state = BT_CONNECTED;
3981 bacpy(&hcon->dst, &bredr_hcon->dst);
3982
3983 hci_conn_hold(hcon);
3984 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3985 hci_conn_drop(hcon);
3986
3987 hci_conn_add_sysfs(hcon);
3988
3989 amp_physical_cfm(bredr_hcon, hcon);
3990
3991 hci_dev_unlock(hdev);
3992 }
3993
3994 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3995 {
3996 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3997 struct hci_conn *hcon;
3998 struct hci_chan *hchan;
3999 struct amp_mgr *mgr;
4000
4001 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4002 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4003 ev->status);
4004
4005 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4006 if (!hcon)
4007 return;
4008
4009 /* Create AMP hchan */
4010 hchan = hci_chan_create(hcon);
4011 if (!hchan)
4012 return;
4013
4014 hchan->handle = le16_to_cpu(ev->handle);
4015
4016 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4017
4018 mgr = hcon->amp_mgr;
4019 if (mgr && mgr->bredr_chan) {
4020 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4021
4022 l2cap_chan_lock(bredr_chan);
4023
4024 bredr_chan->conn->mtu = hdev->block_mtu;
4025 l2cap_logical_cfm(bredr_chan, hchan, 0);
4026 hci_conn_hold(hcon);
4027
4028 l2cap_chan_unlock(bredr_chan);
4029 }
4030 }
4031
4032 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4033 struct sk_buff *skb)
4034 {
4035 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4036 struct hci_chan *hchan;
4037
4038 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4039 le16_to_cpu(ev->handle), ev->status);
4040
4041 if (ev->status)
4042 return;
4043
4044 hci_dev_lock(hdev);
4045
4046 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4047 if (!hchan)
4048 goto unlock;
4049
4050 amp_destroy_logical_link(hchan, ev->reason);
4051
4052 unlock:
4053 hci_dev_unlock(hdev);
4054 }
4055
4056 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4057 struct sk_buff *skb)
4058 {
4059 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4060 struct hci_conn *hcon;
4061
4062 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4063
4064 if (ev->status)
4065 return;
4066
4067 hci_dev_lock(hdev);
4068
4069 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4070 if (hcon) {
4071 hcon->state = BT_CLOSED;
4072 hci_conn_del(hcon);
4073 }
4074
4075 hci_dev_unlock(hdev);
4076 }
4077
4078 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4079 {
4080 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4081 struct hci_conn_params *params;
4082 struct hci_conn *conn;
4083 struct smp_irk *irk;
4084 u8 addr_type;
4085
4086 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4087
4088 hci_dev_lock(hdev);
4089
4090 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4091 if (!conn) {
4092 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
4093 if (!conn) {
4094 BT_ERR("No memory for new connection");
4095 goto unlock;
4096 }
4097
4098 conn->dst_type = ev->bdaddr_type;
4099
4100 if (ev->role == LE_CONN_ROLE_MASTER) {
4101 conn->out = true;
4102 set_bit(HCI_CONN_MASTER, &conn->flags);
4103 }
4104
4105 /* If we didn't have a hci_conn object previously
4106 * but we're in master role this must be something
4107 * initiated using a white list. Since white list based
4108 * connections are not "first class citizens" we don't
4109 * have full tracking of them. Therefore, we go ahead
4110 * with a "best effort" approach of determining the
4111 * initiator address based on the HCI_PRIVACY flag.
4112 */
4113 if (conn->out) {
4114 conn->resp_addr_type = ev->bdaddr_type;
4115 bacpy(&conn->resp_addr, &ev->bdaddr);
4116 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4117 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4118 bacpy(&conn->init_addr, &hdev->rpa);
4119 } else {
4120 hci_copy_identity_address(hdev,
4121 &conn->init_addr,
4122 &conn->init_addr_type);
4123 }
4124 }
4125 } else {
4126 cancel_delayed_work(&conn->le_conn_timeout);
4127 }
4128
4129 if (!conn->out) {
4130 /* Set the responder (our side) address type based on
4131 * the advertising address type.
4132 */
4133 conn->resp_addr_type = hdev->adv_addr_type;
4134 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4135 bacpy(&conn->resp_addr, &hdev->random_addr);
4136 else
4137 bacpy(&conn->resp_addr, &hdev->bdaddr);
4138
4139 conn->init_addr_type = ev->bdaddr_type;
4140 bacpy(&conn->init_addr, &ev->bdaddr);
4141
4142 /* For incoming connections, set the default minimum
4143 * and maximum connection interval. They will be used
4144 * to check if the parameters are in range and if not
4145 * trigger the connection update procedure.
4146 */
4147 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4148 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4149 }
4150
4151 /* Lookup the identity address from the stored connection
4152 * address and address type.
4153 *
4154 * When establishing connections to an identity address, the
4155 * connection procedure will store the resolvable random
4156 * address first. Now if it can be converted back into the
4157 * identity address, start using the identity address from
4158 * now on.
4159 */
4160 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4161 if (irk) {
4162 bacpy(&conn->dst, &irk->bdaddr);
4163 conn->dst_type = irk->addr_type;
4164 }
4165
4166 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4167 addr_type = BDADDR_LE_PUBLIC;
4168 else
4169 addr_type = BDADDR_LE_RANDOM;
4170
4171 /* Drop the connection if he device is blocked */
4172 if (hci_blacklist_lookup(hdev, &conn->dst, addr_type)) {
4173 hci_conn_drop(conn);
4174 goto unlock;
4175 }
4176
4177 if (ev->status) {
4178 hci_le_conn_failed(conn, ev->status);
4179 goto unlock;
4180 }
4181
4182 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4183 mgmt_device_connected(hdev, &conn->dst, conn->type,
4184 conn->dst_type, 0, NULL, 0, NULL);
4185
4186 conn->sec_level = BT_SECURITY_LOW;
4187 conn->handle = __le16_to_cpu(ev->handle);
4188 conn->state = BT_CONNECTED;
4189
4190 conn->le_conn_interval = le16_to_cpu(ev->interval);
4191 conn->le_conn_latency = le16_to_cpu(ev->latency);
4192 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4193
4194 hci_conn_add_sysfs(conn);
4195
4196 hci_proto_connect_cfm(conn, ev->status);
4197
4198 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
4199 if (params) {
4200 list_del_init(&params->action);
4201 hci_update_background_scan(hdev);
4202 }
4203
4204 unlock:
4205 hci_dev_unlock(hdev);
4206 }
4207
4208 static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4209 struct sk_buff *skb)
4210 {
4211 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4212 struct hci_conn *conn;
4213
4214 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4215
4216 if (ev->status)
4217 return;
4218
4219 hci_dev_lock(hdev);
4220
4221 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4222 if (conn) {
4223 conn->le_conn_interval = le16_to_cpu(ev->interval);
4224 conn->le_conn_latency = le16_to_cpu(ev->latency);
4225 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4226 }
4227
4228 hci_dev_unlock(hdev);
4229 }
4230
4231 /* This function requires the caller holds hdev->lock */
4232 static bool check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4233 u8 addr_type)
4234 {
4235 struct hci_conn *conn;
4236
4237 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, addr_type))
4238 return false;
4239
4240 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4241 HCI_AT_NO_BONDING);
4242 if (!IS_ERR(conn))
4243 return true;
4244
4245 switch (PTR_ERR(conn)) {
4246 case -EBUSY:
4247 /* If hci_connect() returns -EBUSY it means there is already
4248 * an LE connection attempt going on. Since controllers don't
4249 * support more than one connection attempt at the time, we
4250 * don't consider this an error case.
4251 */
4252 break;
4253 default:
4254 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4255 }
4256
4257 return true;
4258 }
4259
4260 static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4261 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4262 {
4263 struct discovery_state *d = &hdev->discovery;
4264 bool match;
4265 u32 flags;
4266
4267 /* Passive scanning shouldn't trigger any device found events,
4268 * except for devices marked as CONN_REPORT for which we do send
4269 * device found events.
4270 */
4271 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4272 struct hci_conn_params *param;
4273 struct smp_irk *irk;
4274
4275 /* Check if we need to convert to identity address */
4276 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4277 if (irk) {
4278 bdaddr = &irk->bdaddr;
4279 bdaddr_type = irk->addr_type;
4280 }
4281
4282 /* Ignore if the device is blocked */
4283 if (hci_blacklist_lookup(hdev, bdaddr, bdaddr_type))
4284 return;
4285
4286 if (type == LE_ADV_IND || type == LE_ADV_DIRECT_IND) {
4287 if (check_pending_le_conn(hdev, bdaddr, bdaddr_type))
4288 return;
4289 }
4290
4291 if (type == LE_ADV_DIRECT_IND)
4292 return;
4293
4294 param = hci_pend_le_action_lookup(&hdev->pend_le_reports,
4295 bdaddr, bdaddr_type);
4296 if (!param)
4297 return;
4298
4299 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4300 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4301 else
4302 flags = 0;
4303 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4304 rssi, flags, data, len, NULL, 0);
4305 return;
4306 }
4307
4308 /* When receiving non-connectable or scannable undirected
4309 * advertising reports, this means that the remote device is
4310 * not connectable and then clearly indicate this in the
4311 * device found event.
4312 *
4313 * When receiving a scan response, then there is no way to
4314 * know if the remote device is connectable or not. However
4315 * since scan responses are merged with a previously seen
4316 * advertising report, the flags field from that report
4317 * will be used.
4318 *
4319 * In the really unlikely case that a controller get confused
4320 * and just sends a scan response event, then it is marked as
4321 * not connectable as well.
4322 */
4323 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4324 type == LE_ADV_SCAN_RSP)
4325 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4326 else
4327 flags = 0;
4328
4329 /* If there's nothing pending either store the data from this
4330 * event or send an immediate device found event if the data
4331 * should not be stored for later.
4332 */
4333 if (!has_pending_adv_report(hdev)) {
4334 /* If the report will trigger a SCAN_REQ store it for
4335 * later merging.
4336 */
4337 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4338 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4339 rssi, flags, data, len);
4340 return;
4341 }
4342
4343 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4344 rssi, flags, data, len, NULL, 0);
4345 return;
4346 }
4347
4348 /* Check if the pending report is for the same device as the new one */
4349 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4350 bdaddr_type == d->last_adv_addr_type);
4351
4352 /* If the pending data doesn't match this report or this isn't a
4353 * scan response (e.g. we got a duplicate ADV_IND) then force
4354 * sending of the pending data.
4355 */
4356 if (type != LE_ADV_SCAN_RSP || !match) {
4357 /* Send out whatever is in the cache, but skip duplicates */
4358 if (!match)
4359 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4360 d->last_adv_addr_type, NULL,
4361 d->last_adv_rssi, d->last_adv_flags,
4362 d->last_adv_data,
4363 d->last_adv_data_len, NULL, 0);
4364
4365 /* If the new report will trigger a SCAN_REQ store it for
4366 * later merging.
4367 */
4368 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4369 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4370 rssi, flags, data, len);
4371 return;
4372 }
4373
4374 /* The advertising reports cannot be merged, so clear
4375 * the pending report and send out a device found event.
4376 */
4377 clear_pending_adv_report(hdev);
4378 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4379 rssi, flags, data, len, NULL, 0);
4380 return;
4381 }
4382
4383 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4384 * the new event is a SCAN_RSP. We can therefore proceed with
4385 * sending a merged device found event.
4386 */
4387 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4388 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4389 d->last_adv_data, d->last_adv_data_len, data, len);
4390 clear_pending_adv_report(hdev);
4391 }
4392
4393 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4394 {
4395 u8 num_reports = skb->data[0];
4396 void *ptr = &skb->data[1];
4397
4398 hci_dev_lock(hdev);
4399
4400 while (num_reports--) {
4401 struct hci_ev_le_advertising_info *ev = ptr;
4402 s8 rssi;
4403
4404 rssi = ev->data[ev->length];
4405 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4406 ev->bdaddr_type, rssi, ev->data, ev->length);
4407
4408 ptr += sizeof(*ev) + ev->length + 1;
4409 }
4410
4411 hci_dev_unlock(hdev);
4412 }
4413
4414 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4415 {
4416 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4417 struct hci_cp_le_ltk_reply cp;
4418 struct hci_cp_le_ltk_neg_reply neg;
4419 struct hci_conn *conn;
4420 struct smp_ltk *ltk;
4421
4422 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4423
4424 hci_dev_lock(hdev);
4425
4426 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4427 if (conn == NULL)
4428 goto not_found;
4429
4430 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
4431 if (ltk == NULL)
4432 goto not_found;
4433
4434 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4435 cp.handle = cpu_to_le16(conn->handle);
4436
4437 if (ltk->authenticated)
4438 conn->pending_sec_level = BT_SECURITY_HIGH;
4439 else
4440 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4441
4442 conn->enc_key_size = ltk->enc_size;
4443
4444 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4445
4446 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4447 * temporary key used to encrypt a connection following
4448 * pairing. It is used during the Encrypted Session Setup to
4449 * distribute the keys. Later, security can be re-established
4450 * using a distributed LTK.
4451 */
4452 if (ltk->type == SMP_STK) {
4453 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4454 list_del(&ltk->list);
4455 kfree(ltk);
4456 } else {
4457 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4458 }
4459
4460 hci_dev_unlock(hdev);
4461
4462 return;
4463
4464 not_found:
4465 neg.handle = ev->handle;
4466 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4467 hci_dev_unlock(hdev);
4468 }
4469
4470 static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4471 u8 reason)
4472 {
4473 struct hci_cp_le_conn_param_req_neg_reply cp;
4474
4475 cp.handle = cpu_to_le16(handle);
4476 cp.reason = reason;
4477
4478 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4479 &cp);
4480 }
4481
4482 static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4483 struct sk_buff *skb)
4484 {
4485 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4486 struct hci_cp_le_conn_param_req_reply cp;
4487 struct hci_conn *hcon;
4488 u16 handle, min, max, latency, timeout;
4489
4490 handle = le16_to_cpu(ev->handle);
4491 min = le16_to_cpu(ev->interval_min);
4492 max = le16_to_cpu(ev->interval_max);
4493 latency = le16_to_cpu(ev->latency);
4494 timeout = le16_to_cpu(ev->timeout);
4495
4496 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4497 if (!hcon || hcon->state != BT_CONNECTED)
4498 return send_conn_param_neg_reply(hdev, handle,
4499 HCI_ERROR_UNKNOWN_CONN_ID);
4500
4501 if (hci_check_conn_params(min, max, latency, timeout))
4502 return send_conn_param_neg_reply(hdev, handle,
4503 HCI_ERROR_INVALID_LL_PARAMS);
4504
4505 if (test_bit(HCI_CONN_MASTER, &hcon->flags)) {
4506 struct hci_conn_params *params;
4507 u8 store_hint;
4508
4509 hci_dev_lock(hdev);
4510
4511 params = hci_conn_params_lookup(hdev, &hcon->dst,
4512 hcon->dst_type);
4513 if (params) {
4514 params->conn_min_interval = min;
4515 params->conn_max_interval = max;
4516 params->conn_latency = latency;
4517 params->supervision_timeout = timeout;
4518 store_hint = 0x01;
4519 } else{
4520 store_hint = 0x00;
4521 }
4522
4523 hci_dev_unlock(hdev);
4524
4525 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4526 store_hint, min, max, latency, timeout);
4527 }
4528
4529 cp.handle = ev->handle;
4530 cp.interval_min = ev->interval_min;
4531 cp.interval_max = ev->interval_max;
4532 cp.latency = ev->latency;
4533 cp.timeout = ev->timeout;
4534 cp.min_ce_len = 0;
4535 cp.max_ce_len = 0;
4536
4537 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4538 }
4539
4540 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4541 {
4542 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4543
4544 skb_pull(skb, sizeof(*le_ev));
4545
4546 switch (le_ev->subevent) {
4547 case HCI_EV_LE_CONN_COMPLETE:
4548 hci_le_conn_complete_evt(hdev, skb);
4549 break;
4550
4551 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4552 hci_le_conn_update_complete_evt(hdev, skb);
4553 break;
4554
4555 case HCI_EV_LE_ADVERTISING_REPORT:
4556 hci_le_adv_report_evt(hdev, skb);
4557 break;
4558
4559 case HCI_EV_LE_LTK_REQ:
4560 hci_le_ltk_request_evt(hdev, skb);
4561 break;
4562
4563 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4564 hci_le_remote_conn_param_req_evt(hdev, skb);
4565 break;
4566
4567 default:
4568 break;
4569 }
4570 }
4571
4572 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4573 {
4574 struct hci_ev_channel_selected *ev = (void *) skb->data;
4575 struct hci_conn *hcon;
4576
4577 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4578
4579 skb_pull(skb, sizeof(*ev));
4580
4581 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4582 if (!hcon)
4583 return;
4584
4585 amp_read_loc_assoc_final_data(hdev, hcon);
4586 }
4587
4588 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4589 {
4590 struct hci_event_hdr *hdr = (void *) skb->data;
4591 __u8 event = hdr->evt;
4592
4593 hci_dev_lock(hdev);
4594
4595 /* Received events are (currently) only needed when a request is
4596 * ongoing so avoid unnecessary memory allocation.
4597 */
4598 if (hdev->req_status == HCI_REQ_PEND) {
4599 kfree_skb(hdev->recv_evt);
4600 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4601 }
4602
4603 hci_dev_unlock(hdev);
4604
4605 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4606
4607 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4608 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4609 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4610
4611 hci_req_cmd_complete(hdev, opcode, 0);
4612 }
4613
4614 switch (event) {
4615 case HCI_EV_INQUIRY_COMPLETE:
4616 hci_inquiry_complete_evt(hdev, skb);
4617 break;
4618
4619 case HCI_EV_INQUIRY_RESULT:
4620 hci_inquiry_result_evt(hdev, skb);
4621 break;
4622
4623 case HCI_EV_CONN_COMPLETE:
4624 hci_conn_complete_evt(hdev, skb);
4625 break;
4626
4627 case HCI_EV_CONN_REQUEST:
4628 hci_conn_request_evt(hdev, skb);
4629 break;
4630
4631 case HCI_EV_DISCONN_COMPLETE:
4632 hci_disconn_complete_evt(hdev, skb);
4633 break;
4634
4635 case HCI_EV_AUTH_COMPLETE:
4636 hci_auth_complete_evt(hdev, skb);
4637 break;
4638
4639 case HCI_EV_REMOTE_NAME:
4640 hci_remote_name_evt(hdev, skb);
4641 break;
4642
4643 case HCI_EV_ENCRYPT_CHANGE:
4644 hci_encrypt_change_evt(hdev, skb);
4645 break;
4646
4647 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4648 hci_change_link_key_complete_evt(hdev, skb);
4649 break;
4650
4651 case HCI_EV_REMOTE_FEATURES:
4652 hci_remote_features_evt(hdev, skb);
4653 break;
4654
4655 case HCI_EV_CMD_COMPLETE:
4656 hci_cmd_complete_evt(hdev, skb);
4657 break;
4658
4659 case HCI_EV_CMD_STATUS:
4660 hci_cmd_status_evt(hdev, skb);
4661 break;
4662
4663 case HCI_EV_ROLE_CHANGE:
4664 hci_role_change_evt(hdev, skb);
4665 break;
4666
4667 case HCI_EV_NUM_COMP_PKTS:
4668 hci_num_comp_pkts_evt(hdev, skb);
4669 break;
4670
4671 case HCI_EV_MODE_CHANGE:
4672 hci_mode_change_evt(hdev, skb);
4673 break;
4674
4675 case HCI_EV_PIN_CODE_REQ:
4676 hci_pin_code_request_evt(hdev, skb);
4677 break;
4678
4679 case HCI_EV_LINK_KEY_REQ:
4680 hci_link_key_request_evt(hdev, skb);
4681 break;
4682
4683 case HCI_EV_LINK_KEY_NOTIFY:
4684 hci_link_key_notify_evt(hdev, skb);
4685 break;
4686
4687 case HCI_EV_CLOCK_OFFSET:
4688 hci_clock_offset_evt(hdev, skb);
4689 break;
4690
4691 case HCI_EV_PKT_TYPE_CHANGE:
4692 hci_pkt_type_change_evt(hdev, skb);
4693 break;
4694
4695 case HCI_EV_PSCAN_REP_MODE:
4696 hci_pscan_rep_mode_evt(hdev, skb);
4697 break;
4698
4699 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4700 hci_inquiry_result_with_rssi_evt(hdev, skb);
4701 break;
4702
4703 case HCI_EV_REMOTE_EXT_FEATURES:
4704 hci_remote_ext_features_evt(hdev, skb);
4705 break;
4706
4707 case HCI_EV_SYNC_CONN_COMPLETE:
4708 hci_sync_conn_complete_evt(hdev, skb);
4709 break;
4710
4711 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4712 hci_extended_inquiry_result_evt(hdev, skb);
4713 break;
4714
4715 case HCI_EV_KEY_REFRESH_COMPLETE:
4716 hci_key_refresh_complete_evt(hdev, skb);
4717 break;
4718
4719 case HCI_EV_IO_CAPA_REQUEST:
4720 hci_io_capa_request_evt(hdev, skb);
4721 break;
4722
4723 case HCI_EV_IO_CAPA_REPLY:
4724 hci_io_capa_reply_evt(hdev, skb);
4725 break;
4726
4727 case HCI_EV_USER_CONFIRM_REQUEST:
4728 hci_user_confirm_request_evt(hdev, skb);
4729 break;
4730
4731 case HCI_EV_USER_PASSKEY_REQUEST:
4732 hci_user_passkey_request_evt(hdev, skb);
4733 break;
4734
4735 case HCI_EV_USER_PASSKEY_NOTIFY:
4736 hci_user_passkey_notify_evt(hdev, skb);
4737 break;
4738
4739 case HCI_EV_KEYPRESS_NOTIFY:
4740 hci_keypress_notify_evt(hdev, skb);
4741 break;
4742
4743 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4744 hci_simple_pair_complete_evt(hdev, skb);
4745 break;
4746
4747 case HCI_EV_REMOTE_HOST_FEATURES:
4748 hci_remote_host_features_evt(hdev, skb);
4749 break;
4750
4751 case HCI_EV_LE_META:
4752 hci_le_meta_evt(hdev, skb);
4753 break;
4754
4755 case HCI_EV_CHANNEL_SELECTED:
4756 hci_chan_selected_evt(hdev, skb);
4757 break;
4758
4759 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4760 hci_remote_oob_data_request_evt(hdev, skb);
4761 break;
4762
4763 case HCI_EV_PHY_LINK_COMPLETE:
4764 hci_phy_link_complete_evt(hdev, skb);
4765 break;
4766
4767 case HCI_EV_LOGICAL_LINK_COMPLETE:
4768 hci_loglink_complete_evt(hdev, skb);
4769 break;
4770
4771 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4772 hci_disconn_loglink_complete_evt(hdev, skb);
4773 break;
4774
4775 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4776 hci_disconn_phylink_complete_evt(hdev, skb);
4777 break;
4778
4779 case HCI_EV_NUM_COMP_BLOCKS:
4780 hci_num_comp_blocks_evt(hdev, skb);
4781 break;
4782
4783 default:
4784 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4785 break;
4786 }
4787
4788 kfree_skb(skb);
4789 hdev->stat.evt_rx++;
4790 }
This page took 0.367501 seconds and 5 git commands to generate.