Bluetooth: Add definitions for new link key types
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35
36 /* Handle HCI Event packets */
37
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 __u8 status = *((__u8 *) skb->data);
41
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43
44 if (status)
45 return;
46
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50
51 hci_conn_check_pending(hdev);
52 }
53
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55 {
56 __u8 status = *((__u8 *) skb->data);
57
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
59
60 if (status)
61 return;
62
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64 }
65
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76
77 hci_conn_check_pending(hdev);
78 }
79
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81 struct sk_buff *skb)
82 {
83 BT_DBG("%s", hdev->name);
84 }
85
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87 {
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
90
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92
93 if (rp->status)
94 return;
95
96 hci_dev_lock(hdev);
97
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99 if (conn) {
100 if (rp->role)
101 conn->link_mode &= ~HCI_LM_MASTER;
102 else
103 conn->link_mode |= HCI_LM_MASTER;
104 }
105
106 hci_dev_unlock(hdev);
107 }
108
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110 {
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
113
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115
116 if (rp->status)
117 return;
118
119 hci_dev_lock(hdev);
120
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122 if (conn)
123 conn->link_policy = __le16_to_cpu(rp->policy);
124
125 hci_dev_unlock(hdev);
126 }
127
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129 {
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
132 void *sent;
133
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135
136 if (rp->status)
137 return;
138
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140 if (!sent)
141 return;
142
143 hci_dev_lock(hdev);
144
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146 if (conn)
147 conn->link_policy = get_unaligned_le16(sent + 2);
148
149 hci_dev_unlock(hdev);
150 }
151
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153 struct sk_buff *skb)
154 {
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158
159 if (rp->status)
160 return;
161
162 hdev->link_policy = __le16_to_cpu(rp->policy);
163 }
164
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166 struct sk_buff *skb)
167 {
168 __u8 status = *((__u8 *) skb->data);
169 void *sent;
170
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
172
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174 if (!sent)
175 return;
176
177 if (!status)
178 hdev->link_policy = get_unaligned_le16(sent);
179 }
180
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182 {
183 __u8 status = *((__u8 *) skb->data);
184
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
186
187 clear_bit(HCI_RESET, &hdev->flags);
188
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
198
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
201
202 hdev->ssp_debug_mode = 0;
203 }
204
205 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207 __u8 status = *((__u8 *) skb->data);
208 void *sent;
209
210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
211
212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (!sent)
214 return;
215
216 hci_dev_lock(hdev);
217
218 if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 mgmt_set_local_name_complete(hdev, sent, status);
220 else if (!status)
221 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222
223 hci_dev_unlock(hdev);
224 }
225
226 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
227 {
228 struct hci_rp_read_local_name *rp = (void *) skb->data;
229
230 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
231
232 if (rp->status)
233 return;
234
235 if (test_bit(HCI_SETUP, &hdev->dev_flags))
236 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
237 }
238
239 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
240 {
241 __u8 status = *((__u8 *) skb->data);
242 void *sent;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
245
246 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
247 if (!sent)
248 return;
249
250 if (!status) {
251 __u8 param = *((__u8 *) sent);
252
253 if (param == AUTH_ENABLED)
254 set_bit(HCI_AUTH, &hdev->flags);
255 else
256 clear_bit(HCI_AUTH, &hdev->flags);
257 }
258
259 if (test_bit(HCI_MGMT, &hdev->dev_flags))
260 mgmt_auth_enable_complete(hdev, status);
261 }
262
263 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 {
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
267
268 BT_DBG("%s status 0x%2.2x", hdev->name, status);
269
270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 if (!sent)
272 return;
273
274 if (!status) {
275 __u8 param = *((__u8 *) sent);
276
277 if (param)
278 set_bit(HCI_ENCRYPT, &hdev->flags);
279 else
280 clear_bit(HCI_ENCRYPT, &hdev->flags);
281 }
282 }
283
284 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
285 {
286 __u8 param, status = *((__u8 *) skb->data);
287 int old_pscan, old_iscan;
288 void *sent;
289
290 BT_DBG("%s status 0x%2.2x", hdev->name, status);
291
292 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
293 if (!sent)
294 return;
295
296 param = *((__u8 *) sent);
297
298 hci_dev_lock(hdev);
299
300 if (status) {
301 mgmt_write_scan_failed(hdev, param, status);
302 hdev->discov_timeout = 0;
303 goto done;
304 }
305
306 /* We need to ensure that we set this back on if someone changed
307 * the scan mode through a raw HCI socket.
308 */
309 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
310
311 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
312 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
313
314 if (param & SCAN_INQUIRY) {
315 set_bit(HCI_ISCAN, &hdev->flags);
316 if (!old_iscan)
317 mgmt_discoverable(hdev, 1);
318 } else if (old_iscan)
319 mgmt_discoverable(hdev, 0);
320
321 if (param & SCAN_PAGE) {
322 set_bit(HCI_PSCAN, &hdev->flags);
323 if (!old_pscan)
324 mgmt_connectable(hdev, 1);
325 } else if (old_pscan)
326 mgmt_connectable(hdev, 0);
327
328 done:
329 hci_dev_unlock(hdev);
330 }
331
332 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333 {
334 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335
336 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
337
338 if (rp->status)
339 return;
340
341 memcpy(hdev->dev_class, rp->dev_class, 3);
342
343 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345 }
346
347 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 __u8 status = *((__u8 *) skb->data);
350 void *sent;
351
352 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353
354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
355 if (!sent)
356 return;
357
358 hci_dev_lock(hdev);
359
360 if (status == 0)
361 memcpy(hdev->dev_class, sent, 3);
362
363 if (test_bit(HCI_MGMT, &hdev->dev_flags))
364 mgmt_set_class_of_dev_complete(hdev, sent, status);
365
366 hci_dev_unlock(hdev);
367 }
368
369 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370 {
371 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
372 __u16 setting;
373
374 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
375
376 if (rp->status)
377 return;
378
379 setting = __le16_to_cpu(rp->voice_setting);
380
381 if (hdev->voice_setting == setting)
382 return;
383
384 hdev->voice_setting = setting;
385
386 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387
388 if (hdev->notify)
389 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390 }
391
392 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
393 struct sk_buff *skb)
394 {
395 __u8 status = *((__u8 *) skb->data);
396 __u16 setting;
397 void *sent;
398
399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
400
401 if (status)
402 return;
403
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
405 if (!sent)
406 return;
407
408 setting = get_unaligned_le16(sent);
409
410 if (hdev->voice_setting == setting)
411 return;
412
413 hdev->voice_setting = setting;
414
415 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416
417 if (hdev->notify)
418 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419 }
420
421 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
422 struct sk_buff *skb)
423 {
424 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->num_iac = rp->num_iac;
432
433 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
434 }
435
436 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
437 {
438 __u8 status = *((__u8 *) skb->data);
439 struct hci_cp_write_ssp_mode *sent;
440
441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442
443 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
444 if (!sent)
445 return;
446
447 if (!status) {
448 if (sent->mode)
449 hdev->features[1][0] |= LMP_HOST_SSP;
450 else
451 hdev->features[1][0] &= ~LMP_HOST_SSP;
452 }
453
454 if (test_bit(HCI_MGMT, &hdev->dev_flags))
455 mgmt_ssp_enable_complete(hdev, sent->mode, status);
456 else if (!status) {
457 if (sent->mode)
458 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459 else
460 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461 }
462 }
463
464 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
465 {
466 struct hci_rp_read_local_version *rp = (void *) skb->data;
467
468 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
469
470 if (rp->status)
471 return;
472
473 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
474 hdev->hci_ver = rp->hci_ver;
475 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
476 hdev->lmp_ver = rp->lmp_ver;
477 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
478 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
479 }
480 }
481
482 static void hci_cc_read_local_commands(struct hci_dev *hdev,
483 struct sk_buff *skb)
484 {
485 struct hci_rp_read_local_commands *rp = (void *) skb->data;
486
487 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
488
489 if (rp->status)
490 return;
491
492 if (test_bit(HCI_SETUP, &hdev->dev_flags))
493 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
494 }
495
496 static void hci_cc_read_local_features(struct hci_dev *hdev,
497 struct sk_buff *skb)
498 {
499 struct hci_rp_read_local_features *rp = (void *) skb->data;
500
501 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
502
503 if (rp->status)
504 return;
505
506 memcpy(hdev->features, rp->features, 8);
507
508 /* Adjust default settings according to features
509 * supported by device. */
510
511 if (hdev->features[0][0] & LMP_3SLOT)
512 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
513
514 if (hdev->features[0][0] & LMP_5SLOT)
515 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
516
517 if (hdev->features[0][1] & LMP_HV2) {
518 hdev->pkt_type |= (HCI_HV2);
519 hdev->esco_type |= (ESCO_HV2);
520 }
521
522 if (hdev->features[0][1] & LMP_HV3) {
523 hdev->pkt_type |= (HCI_HV3);
524 hdev->esco_type |= (ESCO_HV3);
525 }
526
527 if (lmp_esco_capable(hdev))
528 hdev->esco_type |= (ESCO_EV3);
529
530 if (hdev->features[0][4] & LMP_EV4)
531 hdev->esco_type |= (ESCO_EV4);
532
533 if (hdev->features[0][4] & LMP_EV5)
534 hdev->esco_type |= (ESCO_EV5);
535
536 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
537 hdev->esco_type |= (ESCO_2EV3);
538
539 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
540 hdev->esco_type |= (ESCO_3EV3);
541
542 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
543 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
544 }
545
546 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
547 struct sk_buff *skb)
548 {
549 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
550
551 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
552
553 if (rp->status)
554 return;
555
556 if (hdev->max_page < rp->max_page)
557 hdev->max_page = rp->max_page;
558
559 if (rp->page < HCI_MAX_PAGES)
560 memcpy(hdev->features[rp->page], rp->features, 8);
561 }
562
563 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
564 struct sk_buff *skb)
565 {
566 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
567
568 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
569
570 if (!rp->status)
571 hdev->flow_ctl_mode = rp->mode;
572 }
573
574 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
575 {
576 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
577
578 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
579
580 if (rp->status)
581 return;
582
583 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
584 hdev->sco_mtu = rp->sco_mtu;
585 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
586 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
587
588 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
589 hdev->sco_mtu = 64;
590 hdev->sco_pkts = 8;
591 }
592
593 hdev->acl_cnt = hdev->acl_pkts;
594 hdev->sco_cnt = hdev->sco_pkts;
595
596 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
597 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
598 }
599
600 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
601 {
602 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
603
604 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
605
606 if (!rp->status)
607 bacpy(&hdev->bdaddr, &rp->bdaddr);
608 }
609
610 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
611 struct sk_buff *skb)
612 {
613 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
614
615 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
616
617 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
618 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
619 hdev->page_scan_window = __le16_to_cpu(rp->window);
620 }
621 }
622
623 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
624 struct sk_buff *skb)
625 {
626 u8 status = *((u8 *) skb->data);
627 struct hci_cp_write_page_scan_activity *sent;
628
629 BT_DBG("%s status 0x%2.2x", hdev->name, status);
630
631 if (status)
632 return;
633
634 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
635 if (!sent)
636 return;
637
638 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
639 hdev->page_scan_window = __le16_to_cpu(sent->window);
640 }
641
642 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
643 struct sk_buff *skb)
644 {
645 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
646
647 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
648
649 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
650 hdev->page_scan_type = rp->type;
651 }
652
653 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
654 struct sk_buff *skb)
655 {
656 u8 status = *((u8 *) skb->data);
657 u8 *type;
658
659 BT_DBG("%s status 0x%2.2x", hdev->name, status);
660
661 if (status)
662 return;
663
664 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
665 if (type)
666 hdev->page_scan_type = *type;
667 }
668
669 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
670 struct sk_buff *skb)
671 {
672 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
673
674 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
675
676 if (rp->status)
677 return;
678
679 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
680 hdev->block_len = __le16_to_cpu(rp->block_len);
681 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
682
683 hdev->block_cnt = hdev->num_blocks;
684
685 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
686 hdev->block_cnt, hdev->block_len);
687 }
688
689 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
690 struct sk_buff *skb)
691 {
692 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
693
694 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
695
696 if (rp->status)
697 goto a2mp_rsp;
698
699 hdev->amp_status = rp->amp_status;
700 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
701 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
702 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
703 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
704 hdev->amp_type = rp->amp_type;
705 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
706 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
707 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
708 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
709
710 a2mp_rsp:
711 a2mp_send_getinfo_rsp(hdev);
712 }
713
714 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
715 struct sk_buff *skb)
716 {
717 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
718 struct amp_assoc *assoc = &hdev->loc_assoc;
719 size_t rem_len, frag_len;
720
721 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
722
723 if (rp->status)
724 goto a2mp_rsp;
725
726 frag_len = skb->len - sizeof(*rp);
727 rem_len = __le16_to_cpu(rp->rem_len);
728
729 if (rem_len > frag_len) {
730 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
731
732 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
733 assoc->offset += frag_len;
734
735 /* Read other fragments */
736 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
737
738 return;
739 }
740
741 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
742 assoc->len = assoc->offset + rem_len;
743 assoc->offset = 0;
744
745 a2mp_rsp:
746 /* Send A2MP Rsp when all fragments are received */
747 a2mp_send_getampassoc_rsp(hdev, rp->status);
748 a2mp_send_create_phy_link_req(hdev, rp->status);
749 }
750
751 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
752 struct sk_buff *skb)
753 {
754 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
755
756 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
757
758 if (!rp->status)
759 hdev->inq_tx_power = rp->tx_power;
760 }
761
762 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
763 {
764 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
765 struct hci_cp_pin_code_reply *cp;
766 struct hci_conn *conn;
767
768 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
769
770 hci_dev_lock(hdev);
771
772 if (test_bit(HCI_MGMT, &hdev->dev_flags))
773 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
774
775 if (rp->status)
776 goto unlock;
777
778 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
779 if (!cp)
780 goto unlock;
781
782 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
783 if (conn)
784 conn->pin_length = cp->pin_len;
785
786 unlock:
787 hci_dev_unlock(hdev);
788 }
789
790 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
791 {
792 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
793
794 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
795
796 hci_dev_lock(hdev);
797
798 if (test_bit(HCI_MGMT, &hdev->dev_flags))
799 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
800 rp->status);
801
802 hci_dev_unlock(hdev);
803 }
804
805 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
806 struct sk_buff *skb)
807 {
808 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
809
810 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
811
812 if (rp->status)
813 return;
814
815 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
816 hdev->le_pkts = rp->le_max_pkt;
817
818 hdev->le_cnt = hdev->le_pkts;
819
820 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
821 }
822
823 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
824 struct sk_buff *skb)
825 {
826 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
827
828 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
829
830 if (!rp->status)
831 memcpy(hdev->le_features, rp->features, 8);
832 }
833
834 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
835 struct sk_buff *skb)
836 {
837 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
838
839 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
840
841 if (!rp->status)
842 hdev->adv_tx_power = rp->tx_power;
843 }
844
845 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
846 {
847 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
848
849 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
850
851 hci_dev_lock(hdev);
852
853 if (test_bit(HCI_MGMT, &hdev->dev_flags))
854 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
855 rp->status);
856
857 hci_dev_unlock(hdev);
858 }
859
860 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
861 struct sk_buff *skb)
862 {
863 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
864
865 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
866
867 hci_dev_lock(hdev);
868
869 if (test_bit(HCI_MGMT, &hdev->dev_flags))
870 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
871 ACL_LINK, 0, rp->status);
872
873 hci_dev_unlock(hdev);
874 }
875
876 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
877 {
878 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
879
880 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
881
882 hci_dev_lock(hdev);
883
884 if (test_bit(HCI_MGMT, &hdev->dev_flags))
885 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
886 0, rp->status);
887
888 hci_dev_unlock(hdev);
889 }
890
891 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
892 struct sk_buff *skb)
893 {
894 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
895
896 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
897
898 hci_dev_lock(hdev);
899
900 if (test_bit(HCI_MGMT, &hdev->dev_flags))
901 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
902 ACL_LINK, 0, rp->status);
903
904 hci_dev_unlock(hdev);
905 }
906
907 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
908 struct sk_buff *skb)
909 {
910 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
911
912 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
913
914 hci_dev_lock(hdev);
915 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
916 rp->randomizer, rp->status);
917 hci_dev_unlock(hdev);
918 }
919
920 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
921 {
922 __u8 *sent, status = *((__u8 *) skb->data);
923
924 BT_DBG("%s status 0x%2.2x", hdev->name, status);
925
926 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
927 if (!sent)
928 return;
929
930 hci_dev_lock(hdev);
931
932 if (!status) {
933 if (*sent)
934 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
935 else
936 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
937 }
938
939 hci_dev_unlock(hdev);
940 }
941
942 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
943 struct sk_buff *skb)
944 {
945 struct hci_cp_le_set_scan_enable *cp;
946 __u8 status = *((__u8 *) skb->data);
947
948 BT_DBG("%s status 0x%2.2x", hdev->name, status);
949
950 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
951 if (!cp)
952 return;
953
954 if (status)
955 return;
956
957 switch (cp->enable) {
958 case LE_SCAN_ENABLE:
959 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
960 break;
961
962 case LE_SCAN_DISABLE:
963 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
964 break;
965
966 default:
967 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
968 break;
969 }
970 }
971
972 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
973 struct sk_buff *skb)
974 {
975 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
976
977 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
978
979 if (!rp->status)
980 hdev->le_white_list_size = rp->size;
981 }
982
983 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
984 struct sk_buff *skb)
985 {
986 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
987
988 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
989
990 if (!rp->status)
991 memcpy(hdev->le_states, rp->le_states, 8);
992 }
993
994 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
995 struct sk_buff *skb)
996 {
997 struct hci_cp_write_le_host_supported *sent;
998 __u8 status = *((__u8 *) skb->data);
999
1000 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1001
1002 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1003 if (!sent)
1004 return;
1005
1006 if (!status) {
1007 if (sent->le) {
1008 hdev->features[1][0] |= LMP_HOST_LE;
1009 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1010 } else {
1011 hdev->features[1][0] &= ~LMP_HOST_LE;
1012 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1013 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1014 }
1015
1016 if (sent->simul)
1017 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1018 else
1019 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1020 }
1021 }
1022
1023 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1024 struct sk_buff *skb)
1025 {
1026 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1027
1028 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1029 hdev->name, rp->status, rp->phy_handle);
1030
1031 if (rp->status)
1032 return;
1033
1034 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1035 }
1036
1037 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1038 {
1039 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1040
1041 if (status) {
1042 hci_conn_check_pending(hdev);
1043 return;
1044 }
1045
1046 set_bit(HCI_INQUIRY, &hdev->flags);
1047 }
1048
1049 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1050 {
1051 struct hci_cp_create_conn *cp;
1052 struct hci_conn *conn;
1053
1054 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1055
1056 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1057 if (!cp)
1058 return;
1059
1060 hci_dev_lock(hdev);
1061
1062 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1063
1064 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1065
1066 if (status) {
1067 if (conn && conn->state == BT_CONNECT) {
1068 if (status != 0x0c || conn->attempt > 2) {
1069 conn->state = BT_CLOSED;
1070 hci_proto_connect_cfm(conn, status);
1071 hci_conn_del(conn);
1072 } else
1073 conn->state = BT_CONNECT2;
1074 }
1075 } else {
1076 if (!conn) {
1077 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1078 if (conn) {
1079 conn->out = true;
1080 conn->link_mode |= HCI_LM_MASTER;
1081 } else
1082 BT_ERR("No memory for new connection");
1083 }
1084 }
1085
1086 hci_dev_unlock(hdev);
1087 }
1088
1089 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1090 {
1091 struct hci_cp_add_sco *cp;
1092 struct hci_conn *acl, *sco;
1093 __u16 handle;
1094
1095 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1096
1097 if (!status)
1098 return;
1099
1100 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1101 if (!cp)
1102 return;
1103
1104 handle = __le16_to_cpu(cp->handle);
1105
1106 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1107
1108 hci_dev_lock(hdev);
1109
1110 acl = hci_conn_hash_lookup_handle(hdev, handle);
1111 if (acl) {
1112 sco = acl->link;
1113 if (sco) {
1114 sco->state = BT_CLOSED;
1115
1116 hci_proto_connect_cfm(sco, status);
1117 hci_conn_del(sco);
1118 }
1119 }
1120
1121 hci_dev_unlock(hdev);
1122 }
1123
1124 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1125 {
1126 struct hci_cp_auth_requested *cp;
1127 struct hci_conn *conn;
1128
1129 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1130
1131 if (!status)
1132 return;
1133
1134 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1135 if (!cp)
1136 return;
1137
1138 hci_dev_lock(hdev);
1139
1140 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1141 if (conn) {
1142 if (conn->state == BT_CONFIG) {
1143 hci_proto_connect_cfm(conn, status);
1144 hci_conn_drop(conn);
1145 }
1146 }
1147
1148 hci_dev_unlock(hdev);
1149 }
1150
1151 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1152 {
1153 struct hci_cp_set_conn_encrypt *cp;
1154 struct hci_conn *conn;
1155
1156 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1157
1158 if (!status)
1159 return;
1160
1161 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1162 if (!cp)
1163 return;
1164
1165 hci_dev_lock(hdev);
1166
1167 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1168 if (conn) {
1169 if (conn->state == BT_CONFIG) {
1170 hci_proto_connect_cfm(conn, status);
1171 hci_conn_drop(conn);
1172 }
1173 }
1174
1175 hci_dev_unlock(hdev);
1176 }
1177
1178 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1179 struct hci_conn *conn)
1180 {
1181 if (conn->state != BT_CONFIG || !conn->out)
1182 return 0;
1183
1184 if (conn->pending_sec_level == BT_SECURITY_SDP)
1185 return 0;
1186
1187 /* Only request authentication for SSP connections or non-SSP
1188 * devices with sec_level MEDIUM or HIGH or if MITM protection
1189 * is requested.
1190 */
1191 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1192 conn->pending_sec_level != BT_SECURITY_HIGH &&
1193 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1194 return 0;
1195
1196 return 1;
1197 }
1198
1199 static int hci_resolve_name(struct hci_dev *hdev,
1200 struct inquiry_entry *e)
1201 {
1202 struct hci_cp_remote_name_req cp;
1203
1204 memset(&cp, 0, sizeof(cp));
1205
1206 bacpy(&cp.bdaddr, &e->data.bdaddr);
1207 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1208 cp.pscan_mode = e->data.pscan_mode;
1209 cp.clock_offset = e->data.clock_offset;
1210
1211 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1212 }
1213
1214 static bool hci_resolve_next_name(struct hci_dev *hdev)
1215 {
1216 struct discovery_state *discov = &hdev->discovery;
1217 struct inquiry_entry *e;
1218
1219 if (list_empty(&discov->resolve))
1220 return false;
1221
1222 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1223 if (!e)
1224 return false;
1225
1226 if (hci_resolve_name(hdev, e) == 0) {
1227 e->name_state = NAME_PENDING;
1228 return true;
1229 }
1230
1231 return false;
1232 }
1233
1234 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1235 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1236 {
1237 struct discovery_state *discov = &hdev->discovery;
1238 struct inquiry_entry *e;
1239
1240 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1241 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1242 name_len, conn->dev_class);
1243
1244 if (discov->state == DISCOVERY_STOPPED)
1245 return;
1246
1247 if (discov->state == DISCOVERY_STOPPING)
1248 goto discov_complete;
1249
1250 if (discov->state != DISCOVERY_RESOLVING)
1251 return;
1252
1253 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1254 /* If the device was not found in a list of found devices names of which
1255 * are pending. there is no need to continue resolving a next name as it
1256 * will be done upon receiving another Remote Name Request Complete
1257 * Event */
1258 if (!e)
1259 return;
1260
1261 list_del(&e->list);
1262 if (name) {
1263 e->name_state = NAME_KNOWN;
1264 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1265 e->data.rssi, name, name_len);
1266 } else {
1267 e->name_state = NAME_NOT_KNOWN;
1268 }
1269
1270 if (hci_resolve_next_name(hdev))
1271 return;
1272
1273 discov_complete:
1274 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1275 }
1276
1277 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1278 {
1279 struct hci_cp_remote_name_req *cp;
1280 struct hci_conn *conn;
1281
1282 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1283
1284 /* If successful wait for the name req complete event before
1285 * checking for the need to do authentication */
1286 if (!status)
1287 return;
1288
1289 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1290 if (!cp)
1291 return;
1292
1293 hci_dev_lock(hdev);
1294
1295 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1296
1297 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1298 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1299
1300 if (!conn)
1301 goto unlock;
1302
1303 if (!hci_outgoing_auth_needed(hdev, conn))
1304 goto unlock;
1305
1306 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1307 struct hci_cp_auth_requested auth_cp;
1308
1309 auth_cp.handle = __cpu_to_le16(conn->handle);
1310 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1311 sizeof(auth_cp), &auth_cp);
1312 }
1313
1314 unlock:
1315 hci_dev_unlock(hdev);
1316 }
1317
1318 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1319 {
1320 struct hci_cp_read_remote_features *cp;
1321 struct hci_conn *conn;
1322
1323 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1324
1325 if (!status)
1326 return;
1327
1328 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1329 if (!cp)
1330 return;
1331
1332 hci_dev_lock(hdev);
1333
1334 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1335 if (conn) {
1336 if (conn->state == BT_CONFIG) {
1337 hci_proto_connect_cfm(conn, status);
1338 hci_conn_drop(conn);
1339 }
1340 }
1341
1342 hci_dev_unlock(hdev);
1343 }
1344
1345 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1346 {
1347 struct hci_cp_read_remote_ext_features *cp;
1348 struct hci_conn *conn;
1349
1350 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1351
1352 if (!status)
1353 return;
1354
1355 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1356 if (!cp)
1357 return;
1358
1359 hci_dev_lock(hdev);
1360
1361 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1362 if (conn) {
1363 if (conn->state == BT_CONFIG) {
1364 hci_proto_connect_cfm(conn, status);
1365 hci_conn_drop(conn);
1366 }
1367 }
1368
1369 hci_dev_unlock(hdev);
1370 }
1371
1372 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1373 {
1374 struct hci_cp_setup_sync_conn *cp;
1375 struct hci_conn *acl, *sco;
1376 __u16 handle;
1377
1378 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1379
1380 if (!status)
1381 return;
1382
1383 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1384 if (!cp)
1385 return;
1386
1387 handle = __le16_to_cpu(cp->handle);
1388
1389 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1390
1391 hci_dev_lock(hdev);
1392
1393 acl = hci_conn_hash_lookup_handle(hdev, handle);
1394 if (acl) {
1395 sco = acl->link;
1396 if (sco) {
1397 sco->state = BT_CLOSED;
1398
1399 hci_proto_connect_cfm(sco, status);
1400 hci_conn_del(sco);
1401 }
1402 }
1403
1404 hci_dev_unlock(hdev);
1405 }
1406
1407 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1408 {
1409 struct hci_cp_sniff_mode *cp;
1410 struct hci_conn *conn;
1411
1412 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1413
1414 if (!status)
1415 return;
1416
1417 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1418 if (!cp)
1419 return;
1420
1421 hci_dev_lock(hdev);
1422
1423 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1424 if (conn) {
1425 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1426
1427 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1428 hci_sco_setup(conn, status);
1429 }
1430
1431 hci_dev_unlock(hdev);
1432 }
1433
1434 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1435 {
1436 struct hci_cp_exit_sniff_mode *cp;
1437 struct hci_conn *conn;
1438
1439 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1440
1441 if (!status)
1442 return;
1443
1444 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1445 if (!cp)
1446 return;
1447
1448 hci_dev_lock(hdev);
1449
1450 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1451 if (conn) {
1452 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1453
1454 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1455 hci_sco_setup(conn, status);
1456 }
1457
1458 hci_dev_unlock(hdev);
1459 }
1460
1461 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1462 {
1463 struct hci_cp_disconnect *cp;
1464 struct hci_conn *conn;
1465
1466 if (!status)
1467 return;
1468
1469 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1470 if (!cp)
1471 return;
1472
1473 hci_dev_lock(hdev);
1474
1475 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1476 if (conn)
1477 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1478 conn->dst_type, status);
1479
1480 hci_dev_unlock(hdev);
1481 }
1482
1483 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1484 {
1485 struct hci_cp_create_phy_link *cp;
1486
1487 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1488
1489 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1490 if (!cp)
1491 return;
1492
1493 hci_dev_lock(hdev);
1494
1495 if (status) {
1496 struct hci_conn *hcon;
1497
1498 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1499 if (hcon)
1500 hci_conn_del(hcon);
1501 } else {
1502 amp_write_remote_assoc(hdev, cp->phy_handle);
1503 }
1504
1505 hci_dev_unlock(hdev);
1506 }
1507
1508 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1509 {
1510 struct hci_cp_accept_phy_link *cp;
1511
1512 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1513
1514 if (status)
1515 return;
1516
1517 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1518 if (!cp)
1519 return;
1520
1521 amp_write_remote_assoc(hdev, cp->phy_handle);
1522 }
1523
1524 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1525 {
1526 __u8 status = *((__u8 *) skb->data);
1527 struct discovery_state *discov = &hdev->discovery;
1528 struct inquiry_entry *e;
1529
1530 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1531
1532 hci_conn_check_pending(hdev);
1533
1534 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1535 return;
1536
1537 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1538 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1539
1540 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1541 return;
1542
1543 hci_dev_lock(hdev);
1544
1545 if (discov->state != DISCOVERY_FINDING)
1546 goto unlock;
1547
1548 if (list_empty(&discov->resolve)) {
1549 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1550 goto unlock;
1551 }
1552
1553 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1554 if (e && hci_resolve_name(hdev, e) == 0) {
1555 e->name_state = NAME_PENDING;
1556 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1557 } else {
1558 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1559 }
1560
1561 unlock:
1562 hci_dev_unlock(hdev);
1563 }
1564
1565 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1566 {
1567 struct inquiry_data data;
1568 struct inquiry_info *info = (void *) (skb->data + 1);
1569 int num_rsp = *((__u8 *) skb->data);
1570
1571 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1572
1573 if (!num_rsp)
1574 return;
1575
1576 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1577 return;
1578
1579 hci_dev_lock(hdev);
1580
1581 for (; num_rsp; num_rsp--, info++) {
1582 bool name_known, ssp;
1583
1584 bacpy(&data.bdaddr, &info->bdaddr);
1585 data.pscan_rep_mode = info->pscan_rep_mode;
1586 data.pscan_period_mode = info->pscan_period_mode;
1587 data.pscan_mode = info->pscan_mode;
1588 memcpy(data.dev_class, info->dev_class, 3);
1589 data.clock_offset = info->clock_offset;
1590 data.rssi = 0x00;
1591 data.ssp_mode = 0x00;
1592
1593 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1594 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1595 info->dev_class, 0, !name_known, ssp, NULL,
1596 0);
1597 }
1598
1599 hci_dev_unlock(hdev);
1600 }
1601
1602 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1603 {
1604 struct hci_ev_conn_complete *ev = (void *) skb->data;
1605 struct hci_conn *conn;
1606
1607 BT_DBG("%s", hdev->name);
1608
1609 hci_dev_lock(hdev);
1610
1611 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1612 if (!conn) {
1613 if (ev->link_type != SCO_LINK)
1614 goto unlock;
1615
1616 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1617 if (!conn)
1618 goto unlock;
1619
1620 conn->type = SCO_LINK;
1621 }
1622
1623 if (!ev->status) {
1624 conn->handle = __le16_to_cpu(ev->handle);
1625
1626 if (conn->type == ACL_LINK) {
1627 conn->state = BT_CONFIG;
1628 hci_conn_hold(conn);
1629
1630 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1631 !hci_find_link_key(hdev, &ev->bdaddr))
1632 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1633 else
1634 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1635 } else
1636 conn->state = BT_CONNECTED;
1637
1638 hci_conn_add_sysfs(conn);
1639
1640 if (test_bit(HCI_AUTH, &hdev->flags))
1641 conn->link_mode |= HCI_LM_AUTH;
1642
1643 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1644 conn->link_mode |= HCI_LM_ENCRYPT;
1645
1646 /* Get remote features */
1647 if (conn->type == ACL_LINK) {
1648 struct hci_cp_read_remote_features cp;
1649 cp.handle = ev->handle;
1650 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1651 sizeof(cp), &cp);
1652 }
1653
1654 /* Set packet type for incoming connection */
1655 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1656 struct hci_cp_change_conn_ptype cp;
1657 cp.handle = ev->handle;
1658 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1659 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1660 &cp);
1661 }
1662 } else {
1663 conn->state = BT_CLOSED;
1664 if (conn->type == ACL_LINK)
1665 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1666 conn->dst_type, ev->status);
1667 }
1668
1669 if (conn->type == ACL_LINK)
1670 hci_sco_setup(conn, ev->status);
1671
1672 if (ev->status) {
1673 hci_proto_connect_cfm(conn, ev->status);
1674 hci_conn_del(conn);
1675 } else if (ev->link_type != ACL_LINK)
1676 hci_proto_connect_cfm(conn, ev->status);
1677
1678 unlock:
1679 hci_dev_unlock(hdev);
1680
1681 hci_conn_check_pending(hdev);
1682 }
1683
1684 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1685 {
1686 struct hci_ev_conn_request *ev = (void *) skb->data;
1687 int mask = hdev->link_mode;
1688 __u8 flags = 0;
1689
1690 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1691 ev->link_type);
1692
1693 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1694 &flags);
1695
1696 if ((mask & HCI_LM_ACCEPT) &&
1697 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1698 /* Connection accepted */
1699 struct inquiry_entry *ie;
1700 struct hci_conn *conn;
1701
1702 hci_dev_lock(hdev);
1703
1704 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1705 if (ie)
1706 memcpy(ie->data.dev_class, ev->dev_class, 3);
1707
1708 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1709 &ev->bdaddr);
1710 if (!conn) {
1711 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1712 if (!conn) {
1713 BT_ERR("No memory for new connection");
1714 hci_dev_unlock(hdev);
1715 return;
1716 }
1717 }
1718
1719 memcpy(conn->dev_class, ev->dev_class, 3);
1720
1721 hci_dev_unlock(hdev);
1722
1723 if (ev->link_type == ACL_LINK ||
1724 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1725 struct hci_cp_accept_conn_req cp;
1726 conn->state = BT_CONNECT;
1727
1728 bacpy(&cp.bdaddr, &ev->bdaddr);
1729
1730 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1731 cp.role = 0x00; /* Become master */
1732 else
1733 cp.role = 0x01; /* Remain slave */
1734
1735 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1736 &cp);
1737 } else if (!(flags & HCI_PROTO_DEFER)) {
1738 struct hci_cp_accept_sync_conn_req cp;
1739 conn->state = BT_CONNECT;
1740
1741 bacpy(&cp.bdaddr, &ev->bdaddr);
1742 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1743
1744 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1745 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1746 cp.max_latency = __constant_cpu_to_le16(0xffff);
1747 cp.content_format = cpu_to_le16(hdev->voice_setting);
1748 cp.retrans_effort = 0xff;
1749
1750 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1751 sizeof(cp), &cp);
1752 } else {
1753 conn->state = BT_CONNECT2;
1754 hci_proto_connect_cfm(conn, 0);
1755 }
1756 } else {
1757 /* Connection rejected */
1758 struct hci_cp_reject_conn_req cp;
1759
1760 bacpy(&cp.bdaddr, &ev->bdaddr);
1761 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1762 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1763 }
1764 }
1765
1766 static u8 hci_to_mgmt_reason(u8 err)
1767 {
1768 switch (err) {
1769 case HCI_ERROR_CONNECTION_TIMEOUT:
1770 return MGMT_DEV_DISCONN_TIMEOUT;
1771 case HCI_ERROR_REMOTE_USER_TERM:
1772 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1773 case HCI_ERROR_REMOTE_POWER_OFF:
1774 return MGMT_DEV_DISCONN_REMOTE;
1775 case HCI_ERROR_LOCAL_HOST_TERM:
1776 return MGMT_DEV_DISCONN_LOCAL_HOST;
1777 default:
1778 return MGMT_DEV_DISCONN_UNKNOWN;
1779 }
1780 }
1781
1782 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1783 {
1784 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1785 u8 reason = hci_to_mgmt_reason(ev->reason);
1786 struct hci_conn *conn;
1787 u8 type;
1788
1789 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1790
1791 hci_dev_lock(hdev);
1792
1793 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1794 if (!conn)
1795 goto unlock;
1796
1797 if (ev->status) {
1798 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1799 conn->dst_type, ev->status);
1800 goto unlock;
1801 }
1802
1803 conn->state = BT_CLOSED;
1804
1805 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1806 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1807 conn->dst_type, reason);
1808
1809 if (conn->type == ACL_LINK && conn->flush_key)
1810 hci_remove_link_key(hdev, &conn->dst);
1811
1812 type = conn->type;
1813
1814 hci_proto_disconn_cfm(conn, ev->reason);
1815 hci_conn_del(conn);
1816
1817 /* Re-enable advertising if necessary, since it might
1818 * have been disabled by the connection. From the
1819 * HCI_LE_Set_Advertise_Enable command description in
1820 * the core specification (v4.0):
1821 * "The Controller shall continue advertising until the Host
1822 * issues an LE_Set_Advertise_Enable command with
1823 * Advertising_Enable set to 0x00 (Advertising is disabled)
1824 * or until a connection is created or until the Advertising
1825 * is timed out due to Directed Advertising."
1826 */
1827 if (type == LE_LINK)
1828 mgmt_reenable_advertising(hdev);
1829
1830 unlock:
1831 hci_dev_unlock(hdev);
1832 }
1833
1834 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1835 {
1836 struct hci_ev_auth_complete *ev = (void *) skb->data;
1837 struct hci_conn *conn;
1838
1839 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1840
1841 hci_dev_lock(hdev);
1842
1843 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1844 if (!conn)
1845 goto unlock;
1846
1847 if (!ev->status) {
1848 if (!hci_conn_ssp_enabled(conn) &&
1849 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1850 BT_INFO("re-auth of legacy device is not possible.");
1851 } else {
1852 conn->link_mode |= HCI_LM_AUTH;
1853 conn->sec_level = conn->pending_sec_level;
1854 }
1855 } else {
1856 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1857 ev->status);
1858 }
1859
1860 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1861 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1862
1863 if (conn->state == BT_CONFIG) {
1864 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1865 struct hci_cp_set_conn_encrypt cp;
1866 cp.handle = ev->handle;
1867 cp.encrypt = 0x01;
1868 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1869 &cp);
1870 } else {
1871 conn->state = BT_CONNECTED;
1872 hci_proto_connect_cfm(conn, ev->status);
1873 hci_conn_drop(conn);
1874 }
1875 } else {
1876 hci_auth_cfm(conn, ev->status);
1877
1878 hci_conn_hold(conn);
1879 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1880 hci_conn_drop(conn);
1881 }
1882
1883 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1884 if (!ev->status) {
1885 struct hci_cp_set_conn_encrypt cp;
1886 cp.handle = ev->handle;
1887 cp.encrypt = 0x01;
1888 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1889 &cp);
1890 } else {
1891 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1892 hci_encrypt_cfm(conn, ev->status, 0x00);
1893 }
1894 }
1895
1896 unlock:
1897 hci_dev_unlock(hdev);
1898 }
1899
1900 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1901 {
1902 struct hci_ev_remote_name *ev = (void *) skb->data;
1903 struct hci_conn *conn;
1904
1905 BT_DBG("%s", hdev->name);
1906
1907 hci_conn_check_pending(hdev);
1908
1909 hci_dev_lock(hdev);
1910
1911 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1912
1913 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1914 goto check_auth;
1915
1916 if (ev->status == 0)
1917 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1918 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1919 else
1920 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1921
1922 check_auth:
1923 if (!conn)
1924 goto unlock;
1925
1926 if (!hci_outgoing_auth_needed(hdev, conn))
1927 goto unlock;
1928
1929 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1930 struct hci_cp_auth_requested cp;
1931 cp.handle = __cpu_to_le16(conn->handle);
1932 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1933 }
1934
1935 unlock:
1936 hci_dev_unlock(hdev);
1937 }
1938
1939 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1940 {
1941 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1942 struct hci_conn *conn;
1943
1944 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1945
1946 hci_dev_lock(hdev);
1947
1948 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1949 if (conn) {
1950 if (!ev->status) {
1951 if (ev->encrypt) {
1952 /* Encryption implies authentication */
1953 conn->link_mode |= HCI_LM_AUTH;
1954 conn->link_mode |= HCI_LM_ENCRYPT;
1955 conn->sec_level = conn->pending_sec_level;
1956 } else
1957 conn->link_mode &= ~HCI_LM_ENCRYPT;
1958 }
1959
1960 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1961
1962 if (ev->status && conn->state == BT_CONNECTED) {
1963 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1964 hci_conn_drop(conn);
1965 goto unlock;
1966 }
1967
1968 if (conn->state == BT_CONFIG) {
1969 if (!ev->status)
1970 conn->state = BT_CONNECTED;
1971
1972 hci_proto_connect_cfm(conn, ev->status);
1973 hci_conn_drop(conn);
1974 } else
1975 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1976 }
1977
1978 unlock:
1979 hci_dev_unlock(hdev);
1980 }
1981
1982 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
1983 struct sk_buff *skb)
1984 {
1985 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1986 struct hci_conn *conn;
1987
1988 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1989
1990 hci_dev_lock(hdev);
1991
1992 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1993 if (conn) {
1994 if (!ev->status)
1995 conn->link_mode |= HCI_LM_SECURE;
1996
1997 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1998
1999 hci_key_change_cfm(conn, ev->status);
2000 }
2001
2002 hci_dev_unlock(hdev);
2003 }
2004
2005 static void hci_remote_features_evt(struct hci_dev *hdev,
2006 struct sk_buff *skb)
2007 {
2008 struct hci_ev_remote_features *ev = (void *) skb->data;
2009 struct hci_conn *conn;
2010
2011 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2012
2013 hci_dev_lock(hdev);
2014
2015 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2016 if (!conn)
2017 goto unlock;
2018
2019 if (!ev->status)
2020 memcpy(conn->features[0], ev->features, 8);
2021
2022 if (conn->state != BT_CONFIG)
2023 goto unlock;
2024
2025 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2026 struct hci_cp_read_remote_ext_features cp;
2027 cp.handle = ev->handle;
2028 cp.page = 0x01;
2029 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2030 sizeof(cp), &cp);
2031 goto unlock;
2032 }
2033
2034 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2035 struct hci_cp_remote_name_req cp;
2036 memset(&cp, 0, sizeof(cp));
2037 bacpy(&cp.bdaddr, &conn->dst);
2038 cp.pscan_rep_mode = 0x02;
2039 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2040 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2041 mgmt_device_connected(hdev, &conn->dst, conn->type,
2042 conn->dst_type, 0, NULL, 0,
2043 conn->dev_class);
2044
2045 if (!hci_outgoing_auth_needed(hdev, conn)) {
2046 conn->state = BT_CONNECTED;
2047 hci_proto_connect_cfm(conn, ev->status);
2048 hci_conn_drop(conn);
2049 }
2050
2051 unlock:
2052 hci_dev_unlock(hdev);
2053 }
2054
2055 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2056 {
2057 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2058 u8 status = skb->data[sizeof(*ev)];
2059 __u16 opcode;
2060
2061 skb_pull(skb, sizeof(*ev));
2062
2063 opcode = __le16_to_cpu(ev->opcode);
2064
2065 switch (opcode) {
2066 case HCI_OP_INQUIRY_CANCEL:
2067 hci_cc_inquiry_cancel(hdev, skb);
2068 break;
2069
2070 case HCI_OP_PERIODIC_INQ:
2071 hci_cc_periodic_inq(hdev, skb);
2072 break;
2073
2074 case HCI_OP_EXIT_PERIODIC_INQ:
2075 hci_cc_exit_periodic_inq(hdev, skb);
2076 break;
2077
2078 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2079 hci_cc_remote_name_req_cancel(hdev, skb);
2080 break;
2081
2082 case HCI_OP_ROLE_DISCOVERY:
2083 hci_cc_role_discovery(hdev, skb);
2084 break;
2085
2086 case HCI_OP_READ_LINK_POLICY:
2087 hci_cc_read_link_policy(hdev, skb);
2088 break;
2089
2090 case HCI_OP_WRITE_LINK_POLICY:
2091 hci_cc_write_link_policy(hdev, skb);
2092 break;
2093
2094 case HCI_OP_READ_DEF_LINK_POLICY:
2095 hci_cc_read_def_link_policy(hdev, skb);
2096 break;
2097
2098 case HCI_OP_WRITE_DEF_LINK_POLICY:
2099 hci_cc_write_def_link_policy(hdev, skb);
2100 break;
2101
2102 case HCI_OP_RESET:
2103 hci_cc_reset(hdev, skb);
2104 break;
2105
2106 case HCI_OP_WRITE_LOCAL_NAME:
2107 hci_cc_write_local_name(hdev, skb);
2108 break;
2109
2110 case HCI_OP_READ_LOCAL_NAME:
2111 hci_cc_read_local_name(hdev, skb);
2112 break;
2113
2114 case HCI_OP_WRITE_AUTH_ENABLE:
2115 hci_cc_write_auth_enable(hdev, skb);
2116 break;
2117
2118 case HCI_OP_WRITE_ENCRYPT_MODE:
2119 hci_cc_write_encrypt_mode(hdev, skb);
2120 break;
2121
2122 case HCI_OP_WRITE_SCAN_ENABLE:
2123 hci_cc_write_scan_enable(hdev, skb);
2124 break;
2125
2126 case HCI_OP_READ_CLASS_OF_DEV:
2127 hci_cc_read_class_of_dev(hdev, skb);
2128 break;
2129
2130 case HCI_OP_WRITE_CLASS_OF_DEV:
2131 hci_cc_write_class_of_dev(hdev, skb);
2132 break;
2133
2134 case HCI_OP_READ_VOICE_SETTING:
2135 hci_cc_read_voice_setting(hdev, skb);
2136 break;
2137
2138 case HCI_OP_WRITE_VOICE_SETTING:
2139 hci_cc_write_voice_setting(hdev, skb);
2140 break;
2141
2142 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2143 hci_cc_read_num_supported_iac(hdev, skb);
2144 break;
2145
2146 case HCI_OP_WRITE_SSP_MODE:
2147 hci_cc_write_ssp_mode(hdev, skb);
2148 break;
2149
2150 case HCI_OP_READ_LOCAL_VERSION:
2151 hci_cc_read_local_version(hdev, skb);
2152 break;
2153
2154 case HCI_OP_READ_LOCAL_COMMANDS:
2155 hci_cc_read_local_commands(hdev, skb);
2156 break;
2157
2158 case HCI_OP_READ_LOCAL_FEATURES:
2159 hci_cc_read_local_features(hdev, skb);
2160 break;
2161
2162 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2163 hci_cc_read_local_ext_features(hdev, skb);
2164 break;
2165
2166 case HCI_OP_READ_BUFFER_SIZE:
2167 hci_cc_read_buffer_size(hdev, skb);
2168 break;
2169
2170 case HCI_OP_READ_BD_ADDR:
2171 hci_cc_read_bd_addr(hdev, skb);
2172 break;
2173
2174 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2175 hci_cc_read_page_scan_activity(hdev, skb);
2176 break;
2177
2178 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2179 hci_cc_write_page_scan_activity(hdev, skb);
2180 break;
2181
2182 case HCI_OP_READ_PAGE_SCAN_TYPE:
2183 hci_cc_read_page_scan_type(hdev, skb);
2184 break;
2185
2186 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2187 hci_cc_write_page_scan_type(hdev, skb);
2188 break;
2189
2190 case HCI_OP_READ_DATA_BLOCK_SIZE:
2191 hci_cc_read_data_block_size(hdev, skb);
2192 break;
2193
2194 case HCI_OP_READ_FLOW_CONTROL_MODE:
2195 hci_cc_read_flow_control_mode(hdev, skb);
2196 break;
2197
2198 case HCI_OP_READ_LOCAL_AMP_INFO:
2199 hci_cc_read_local_amp_info(hdev, skb);
2200 break;
2201
2202 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2203 hci_cc_read_local_amp_assoc(hdev, skb);
2204 break;
2205
2206 case HCI_OP_READ_INQ_RSP_TX_POWER:
2207 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2208 break;
2209
2210 case HCI_OP_PIN_CODE_REPLY:
2211 hci_cc_pin_code_reply(hdev, skb);
2212 break;
2213
2214 case HCI_OP_PIN_CODE_NEG_REPLY:
2215 hci_cc_pin_code_neg_reply(hdev, skb);
2216 break;
2217
2218 case HCI_OP_READ_LOCAL_OOB_DATA:
2219 hci_cc_read_local_oob_data_reply(hdev, skb);
2220 break;
2221
2222 case HCI_OP_LE_READ_BUFFER_SIZE:
2223 hci_cc_le_read_buffer_size(hdev, skb);
2224 break;
2225
2226 case HCI_OP_LE_READ_LOCAL_FEATURES:
2227 hci_cc_le_read_local_features(hdev, skb);
2228 break;
2229
2230 case HCI_OP_LE_READ_ADV_TX_POWER:
2231 hci_cc_le_read_adv_tx_power(hdev, skb);
2232 break;
2233
2234 case HCI_OP_USER_CONFIRM_REPLY:
2235 hci_cc_user_confirm_reply(hdev, skb);
2236 break;
2237
2238 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2239 hci_cc_user_confirm_neg_reply(hdev, skb);
2240 break;
2241
2242 case HCI_OP_USER_PASSKEY_REPLY:
2243 hci_cc_user_passkey_reply(hdev, skb);
2244 break;
2245
2246 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2247 hci_cc_user_passkey_neg_reply(hdev, skb);
2248 break;
2249
2250 case HCI_OP_LE_SET_ADV_ENABLE:
2251 hci_cc_le_set_adv_enable(hdev, skb);
2252 break;
2253
2254 case HCI_OP_LE_SET_SCAN_ENABLE:
2255 hci_cc_le_set_scan_enable(hdev, skb);
2256 break;
2257
2258 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2259 hci_cc_le_read_white_list_size(hdev, skb);
2260 break;
2261
2262 case HCI_OP_LE_READ_SUPPORTED_STATES:
2263 hci_cc_le_read_supported_states(hdev, skb);
2264 break;
2265
2266 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2267 hci_cc_write_le_host_supported(hdev, skb);
2268 break;
2269
2270 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2271 hci_cc_write_remote_amp_assoc(hdev, skb);
2272 break;
2273
2274 default:
2275 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2276 break;
2277 }
2278
2279 if (opcode != HCI_OP_NOP)
2280 del_timer(&hdev->cmd_timer);
2281
2282 hci_req_cmd_complete(hdev, opcode, status);
2283
2284 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2285 atomic_set(&hdev->cmd_cnt, 1);
2286 if (!skb_queue_empty(&hdev->cmd_q))
2287 queue_work(hdev->workqueue, &hdev->cmd_work);
2288 }
2289 }
2290
2291 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2292 {
2293 struct hci_ev_cmd_status *ev = (void *) skb->data;
2294 __u16 opcode;
2295
2296 skb_pull(skb, sizeof(*ev));
2297
2298 opcode = __le16_to_cpu(ev->opcode);
2299
2300 switch (opcode) {
2301 case HCI_OP_INQUIRY:
2302 hci_cs_inquiry(hdev, ev->status);
2303 break;
2304
2305 case HCI_OP_CREATE_CONN:
2306 hci_cs_create_conn(hdev, ev->status);
2307 break;
2308
2309 case HCI_OP_ADD_SCO:
2310 hci_cs_add_sco(hdev, ev->status);
2311 break;
2312
2313 case HCI_OP_AUTH_REQUESTED:
2314 hci_cs_auth_requested(hdev, ev->status);
2315 break;
2316
2317 case HCI_OP_SET_CONN_ENCRYPT:
2318 hci_cs_set_conn_encrypt(hdev, ev->status);
2319 break;
2320
2321 case HCI_OP_REMOTE_NAME_REQ:
2322 hci_cs_remote_name_req(hdev, ev->status);
2323 break;
2324
2325 case HCI_OP_READ_REMOTE_FEATURES:
2326 hci_cs_read_remote_features(hdev, ev->status);
2327 break;
2328
2329 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2330 hci_cs_read_remote_ext_features(hdev, ev->status);
2331 break;
2332
2333 case HCI_OP_SETUP_SYNC_CONN:
2334 hci_cs_setup_sync_conn(hdev, ev->status);
2335 break;
2336
2337 case HCI_OP_SNIFF_MODE:
2338 hci_cs_sniff_mode(hdev, ev->status);
2339 break;
2340
2341 case HCI_OP_EXIT_SNIFF_MODE:
2342 hci_cs_exit_sniff_mode(hdev, ev->status);
2343 break;
2344
2345 case HCI_OP_DISCONNECT:
2346 hci_cs_disconnect(hdev, ev->status);
2347 break;
2348
2349 case HCI_OP_CREATE_PHY_LINK:
2350 hci_cs_create_phylink(hdev, ev->status);
2351 break;
2352
2353 case HCI_OP_ACCEPT_PHY_LINK:
2354 hci_cs_accept_phylink(hdev, ev->status);
2355 break;
2356
2357 default:
2358 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2359 break;
2360 }
2361
2362 if (opcode != HCI_OP_NOP)
2363 del_timer(&hdev->cmd_timer);
2364
2365 if (ev->status ||
2366 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2367 hci_req_cmd_complete(hdev, opcode, ev->status);
2368
2369 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2370 atomic_set(&hdev->cmd_cnt, 1);
2371 if (!skb_queue_empty(&hdev->cmd_q))
2372 queue_work(hdev->workqueue, &hdev->cmd_work);
2373 }
2374 }
2375
2376 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2377 {
2378 struct hci_ev_role_change *ev = (void *) skb->data;
2379 struct hci_conn *conn;
2380
2381 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2382
2383 hci_dev_lock(hdev);
2384
2385 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2386 if (conn) {
2387 if (!ev->status) {
2388 if (ev->role)
2389 conn->link_mode &= ~HCI_LM_MASTER;
2390 else
2391 conn->link_mode |= HCI_LM_MASTER;
2392 }
2393
2394 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2395
2396 hci_role_switch_cfm(conn, ev->status, ev->role);
2397 }
2398
2399 hci_dev_unlock(hdev);
2400 }
2401
2402 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2403 {
2404 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2405 int i;
2406
2407 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2408 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2409 return;
2410 }
2411
2412 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2413 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2414 BT_DBG("%s bad parameters", hdev->name);
2415 return;
2416 }
2417
2418 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2419
2420 for (i = 0; i < ev->num_hndl; i++) {
2421 struct hci_comp_pkts_info *info = &ev->handles[i];
2422 struct hci_conn *conn;
2423 __u16 handle, count;
2424
2425 handle = __le16_to_cpu(info->handle);
2426 count = __le16_to_cpu(info->count);
2427
2428 conn = hci_conn_hash_lookup_handle(hdev, handle);
2429 if (!conn)
2430 continue;
2431
2432 conn->sent -= count;
2433
2434 switch (conn->type) {
2435 case ACL_LINK:
2436 hdev->acl_cnt += count;
2437 if (hdev->acl_cnt > hdev->acl_pkts)
2438 hdev->acl_cnt = hdev->acl_pkts;
2439 break;
2440
2441 case LE_LINK:
2442 if (hdev->le_pkts) {
2443 hdev->le_cnt += count;
2444 if (hdev->le_cnt > hdev->le_pkts)
2445 hdev->le_cnt = hdev->le_pkts;
2446 } else {
2447 hdev->acl_cnt += count;
2448 if (hdev->acl_cnt > hdev->acl_pkts)
2449 hdev->acl_cnt = hdev->acl_pkts;
2450 }
2451 break;
2452
2453 case SCO_LINK:
2454 hdev->sco_cnt += count;
2455 if (hdev->sco_cnt > hdev->sco_pkts)
2456 hdev->sco_cnt = hdev->sco_pkts;
2457 break;
2458
2459 default:
2460 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2461 break;
2462 }
2463 }
2464
2465 queue_work(hdev->workqueue, &hdev->tx_work);
2466 }
2467
2468 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2469 __u16 handle)
2470 {
2471 struct hci_chan *chan;
2472
2473 switch (hdev->dev_type) {
2474 case HCI_BREDR:
2475 return hci_conn_hash_lookup_handle(hdev, handle);
2476 case HCI_AMP:
2477 chan = hci_chan_lookup_handle(hdev, handle);
2478 if (chan)
2479 return chan->conn;
2480 break;
2481 default:
2482 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2483 break;
2484 }
2485
2486 return NULL;
2487 }
2488
2489 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2490 {
2491 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2492 int i;
2493
2494 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2495 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2496 return;
2497 }
2498
2499 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2500 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2501 BT_DBG("%s bad parameters", hdev->name);
2502 return;
2503 }
2504
2505 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2506 ev->num_hndl);
2507
2508 for (i = 0; i < ev->num_hndl; i++) {
2509 struct hci_comp_blocks_info *info = &ev->handles[i];
2510 struct hci_conn *conn = NULL;
2511 __u16 handle, block_count;
2512
2513 handle = __le16_to_cpu(info->handle);
2514 block_count = __le16_to_cpu(info->blocks);
2515
2516 conn = __hci_conn_lookup_handle(hdev, handle);
2517 if (!conn)
2518 continue;
2519
2520 conn->sent -= block_count;
2521
2522 switch (conn->type) {
2523 case ACL_LINK:
2524 case AMP_LINK:
2525 hdev->block_cnt += block_count;
2526 if (hdev->block_cnt > hdev->num_blocks)
2527 hdev->block_cnt = hdev->num_blocks;
2528 break;
2529
2530 default:
2531 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2532 break;
2533 }
2534 }
2535
2536 queue_work(hdev->workqueue, &hdev->tx_work);
2537 }
2538
2539 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2540 {
2541 struct hci_ev_mode_change *ev = (void *) skb->data;
2542 struct hci_conn *conn;
2543
2544 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2545
2546 hci_dev_lock(hdev);
2547
2548 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2549 if (conn) {
2550 conn->mode = ev->mode;
2551
2552 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2553 &conn->flags)) {
2554 if (conn->mode == HCI_CM_ACTIVE)
2555 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2556 else
2557 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2558 }
2559
2560 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2561 hci_sco_setup(conn, ev->status);
2562 }
2563
2564 hci_dev_unlock(hdev);
2565 }
2566
2567 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2568 {
2569 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2570 struct hci_conn *conn;
2571
2572 BT_DBG("%s", hdev->name);
2573
2574 hci_dev_lock(hdev);
2575
2576 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2577 if (!conn)
2578 goto unlock;
2579
2580 if (conn->state == BT_CONNECTED) {
2581 hci_conn_hold(conn);
2582 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2583 hci_conn_drop(conn);
2584 }
2585
2586 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2587 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2588 sizeof(ev->bdaddr), &ev->bdaddr);
2589 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2590 u8 secure;
2591
2592 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2593 secure = 1;
2594 else
2595 secure = 0;
2596
2597 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2598 }
2599
2600 unlock:
2601 hci_dev_unlock(hdev);
2602 }
2603
2604 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2605 {
2606 struct hci_ev_link_key_req *ev = (void *) skb->data;
2607 struct hci_cp_link_key_reply cp;
2608 struct hci_conn *conn;
2609 struct link_key *key;
2610
2611 BT_DBG("%s", hdev->name);
2612
2613 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2614 return;
2615
2616 hci_dev_lock(hdev);
2617
2618 key = hci_find_link_key(hdev, &ev->bdaddr);
2619 if (!key) {
2620 BT_DBG("%s link key not found for %pMR", hdev->name,
2621 &ev->bdaddr);
2622 goto not_found;
2623 }
2624
2625 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2626 &ev->bdaddr);
2627
2628 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2629 key->type == HCI_LK_DEBUG_COMBINATION) {
2630 BT_DBG("%s ignoring debug key", hdev->name);
2631 goto not_found;
2632 }
2633
2634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2635 if (conn) {
2636 if (key->type == HCI_LK_UNAUTH_COMBINATION_P192 &&
2637 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2638 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2639 goto not_found;
2640 }
2641
2642 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2643 conn->pending_sec_level == BT_SECURITY_HIGH) {
2644 BT_DBG("%s ignoring key unauthenticated for high security",
2645 hdev->name);
2646 goto not_found;
2647 }
2648
2649 conn->key_type = key->type;
2650 conn->pin_length = key->pin_len;
2651 }
2652
2653 bacpy(&cp.bdaddr, &ev->bdaddr);
2654 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2655
2656 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2657
2658 hci_dev_unlock(hdev);
2659
2660 return;
2661
2662 not_found:
2663 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2664 hci_dev_unlock(hdev);
2665 }
2666
2667 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2668 {
2669 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2670 struct hci_conn *conn;
2671 u8 pin_len = 0;
2672
2673 BT_DBG("%s", hdev->name);
2674
2675 hci_dev_lock(hdev);
2676
2677 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2678 if (conn) {
2679 hci_conn_hold(conn);
2680 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2681 pin_len = conn->pin_length;
2682
2683 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2684 conn->key_type = ev->key_type;
2685
2686 hci_conn_drop(conn);
2687 }
2688
2689 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2690 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2691 ev->key_type, pin_len);
2692
2693 hci_dev_unlock(hdev);
2694 }
2695
2696 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2697 {
2698 struct hci_ev_clock_offset *ev = (void *) skb->data;
2699 struct hci_conn *conn;
2700
2701 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2702
2703 hci_dev_lock(hdev);
2704
2705 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2706 if (conn && !ev->status) {
2707 struct inquiry_entry *ie;
2708
2709 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2710 if (ie) {
2711 ie->data.clock_offset = ev->clock_offset;
2712 ie->timestamp = jiffies;
2713 }
2714 }
2715
2716 hci_dev_unlock(hdev);
2717 }
2718
2719 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2720 {
2721 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2722 struct hci_conn *conn;
2723
2724 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2725
2726 hci_dev_lock(hdev);
2727
2728 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2729 if (conn && !ev->status)
2730 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2731
2732 hci_dev_unlock(hdev);
2733 }
2734
2735 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2736 {
2737 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2738 struct inquiry_entry *ie;
2739
2740 BT_DBG("%s", hdev->name);
2741
2742 hci_dev_lock(hdev);
2743
2744 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2745 if (ie) {
2746 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2747 ie->timestamp = jiffies;
2748 }
2749
2750 hci_dev_unlock(hdev);
2751 }
2752
2753 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2754 struct sk_buff *skb)
2755 {
2756 struct inquiry_data data;
2757 int num_rsp = *((__u8 *) skb->data);
2758 bool name_known, ssp;
2759
2760 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2761
2762 if (!num_rsp)
2763 return;
2764
2765 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2766 return;
2767
2768 hci_dev_lock(hdev);
2769
2770 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2771 struct inquiry_info_with_rssi_and_pscan_mode *info;
2772 info = (void *) (skb->data + 1);
2773
2774 for (; num_rsp; num_rsp--, info++) {
2775 bacpy(&data.bdaddr, &info->bdaddr);
2776 data.pscan_rep_mode = info->pscan_rep_mode;
2777 data.pscan_period_mode = info->pscan_period_mode;
2778 data.pscan_mode = info->pscan_mode;
2779 memcpy(data.dev_class, info->dev_class, 3);
2780 data.clock_offset = info->clock_offset;
2781 data.rssi = info->rssi;
2782 data.ssp_mode = 0x00;
2783
2784 name_known = hci_inquiry_cache_update(hdev, &data,
2785 false, &ssp);
2786 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2787 info->dev_class, info->rssi,
2788 !name_known, ssp, NULL, 0);
2789 }
2790 } else {
2791 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2792
2793 for (; num_rsp; num_rsp--, info++) {
2794 bacpy(&data.bdaddr, &info->bdaddr);
2795 data.pscan_rep_mode = info->pscan_rep_mode;
2796 data.pscan_period_mode = info->pscan_period_mode;
2797 data.pscan_mode = 0x00;
2798 memcpy(data.dev_class, info->dev_class, 3);
2799 data.clock_offset = info->clock_offset;
2800 data.rssi = info->rssi;
2801 data.ssp_mode = 0x00;
2802 name_known = hci_inquiry_cache_update(hdev, &data,
2803 false, &ssp);
2804 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2805 info->dev_class, info->rssi,
2806 !name_known, ssp, NULL, 0);
2807 }
2808 }
2809
2810 hci_dev_unlock(hdev);
2811 }
2812
2813 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2814 struct sk_buff *skb)
2815 {
2816 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2817 struct hci_conn *conn;
2818
2819 BT_DBG("%s", hdev->name);
2820
2821 hci_dev_lock(hdev);
2822
2823 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2824 if (!conn)
2825 goto unlock;
2826
2827 if (ev->page < HCI_MAX_PAGES)
2828 memcpy(conn->features[ev->page], ev->features, 8);
2829
2830 if (!ev->status && ev->page == 0x01) {
2831 struct inquiry_entry *ie;
2832
2833 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2834 if (ie)
2835 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2836
2837 if (ev->features[0] & LMP_HOST_SSP) {
2838 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2839 } else {
2840 /* It is mandatory by the Bluetooth specification that
2841 * Extended Inquiry Results are only used when Secure
2842 * Simple Pairing is enabled, but some devices violate
2843 * this.
2844 *
2845 * To make these devices work, the internal SSP
2846 * enabled flag needs to be cleared if the remote host
2847 * features do not indicate SSP support */
2848 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2849 }
2850 }
2851
2852 if (conn->state != BT_CONFIG)
2853 goto unlock;
2854
2855 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2856 struct hci_cp_remote_name_req cp;
2857 memset(&cp, 0, sizeof(cp));
2858 bacpy(&cp.bdaddr, &conn->dst);
2859 cp.pscan_rep_mode = 0x02;
2860 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2861 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2862 mgmt_device_connected(hdev, &conn->dst, conn->type,
2863 conn->dst_type, 0, NULL, 0,
2864 conn->dev_class);
2865
2866 if (!hci_outgoing_auth_needed(hdev, conn)) {
2867 conn->state = BT_CONNECTED;
2868 hci_proto_connect_cfm(conn, ev->status);
2869 hci_conn_drop(conn);
2870 }
2871
2872 unlock:
2873 hci_dev_unlock(hdev);
2874 }
2875
2876 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2877 struct sk_buff *skb)
2878 {
2879 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2880 struct hci_conn *conn;
2881
2882 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2883
2884 hci_dev_lock(hdev);
2885
2886 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2887 if (!conn) {
2888 if (ev->link_type == ESCO_LINK)
2889 goto unlock;
2890
2891 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2892 if (!conn)
2893 goto unlock;
2894
2895 conn->type = SCO_LINK;
2896 }
2897
2898 switch (ev->status) {
2899 case 0x00:
2900 conn->handle = __le16_to_cpu(ev->handle);
2901 conn->state = BT_CONNECTED;
2902
2903 hci_conn_add_sysfs(conn);
2904 break;
2905
2906 case 0x0d: /* Connection Rejected due to Limited Resources */
2907 case 0x11: /* Unsupported Feature or Parameter Value */
2908 case 0x1c: /* SCO interval rejected */
2909 case 0x1a: /* Unsupported Remote Feature */
2910 case 0x1f: /* Unspecified error */
2911 if (conn->out) {
2912 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2913 (hdev->esco_type & EDR_ESCO_MASK);
2914 if (hci_setup_sync(conn, conn->link->handle))
2915 goto unlock;
2916 }
2917 /* fall through */
2918
2919 default:
2920 conn->state = BT_CLOSED;
2921 break;
2922 }
2923
2924 hci_proto_connect_cfm(conn, ev->status);
2925 if (ev->status)
2926 hci_conn_del(conn);
2927
2928 unlock:
2929 hci_dev_unlock(hdev);
2930 }
2931
2932 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
2933 {
2934 size_t parsed = 0;
2935
2936 while (parsed < eir_len) {
2937 u8 field_len = eir[0];
2938
2939 if (field_len == 0)
2940 return parsed;
2941
2942 parsed += field_len + 1;
2943 eir += field_len + 1;
2944 }
2945
2946 return eir_len;
2947 }
2948
2949 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2950 struct sk_buff *skb)
2951 {
2952 struct inquiry_data data;
2953 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2954 int num_rsp = *((__u8 *) skb->data);
2955 size_t eir_len;
2956
2957 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2958
2959 if (!num_rsp)
2960 return;
2961
2962 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2963 return;
2964
2965 hci_dev_lock(hdev);
2966
2967 for (; num_rsp; num_rsp--, info++) {
2968 bool name_known, ssp;
2969
2970 bacpy(&data.bdaddr, &info->bdaddr);
2971 data.pscan_rep_mode = info->pscan_rep_mode;
2972 data.pscan_period_mode = info->pscan_period_mode;
2973 data.pscan_mode = 0x00;
2974 memcpy(data.dev_class, info->dev_class, 3);
2975 data.clock_offset = info->clock_offset;
2976 data.rssi = info->rssi;
2977 data.ssp_mode = 0x01;
2978
2979 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2980 name_known = eir_has_data_type(info->data,
2981 sizeof(info->data),
2982 EIR_NAME_COMPLETE);
2983 else
2984 name_known = true;
2985
2986 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2987 &ssp);
2988 eir_len = eir_get_length(info->data, sizeof(info->data));
2989 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2990 info->dev_class, info->rssi, !name_known,
2991 ssp, info->data, eir_len);
2992 }
2993
2994 hci_dev_unlock(hdev);
2995 }
2996
2997 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
2998 struct sk_buff *skb)
2999 {
3000 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3001 struct hci_conn *conn;
3002
3003 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3004 __le16_to_cpu(ev->handle));
3005
3006 hci_dev_lock(hdev);
3007
3008 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3009 if (!conn)
3010 goto unlock;
3011
3012 if (!ev->status)
3013 conn->sec_level = conn->pending_sec_level;
3014
3015 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3016
3017 if (ev->status && conn->state == BT_CONNECTED) {
3018 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3019 hci_conn_drop(conn);
3020 goto unlock;
3021 }
3022
3023 if (conn->state == BT_CONFIG) {
3024 if (!ev->status)
3025 conn->state = BT_CONNECTED;
3026
3027 hci_proto_connect_cfm(conn, ev->status);
3028 hci_conn_drop(conn);
3029 } else {
3030 hci_auth_cfm(conn, ev->status);
3031
3032 hci_conn_hold(conn);
3033 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3034 hci_conn_drop(conn);
3035 }
3036
3037 unlock:
3038 hci_dev_unlock(hdev);
3039 }
3040
3041 static u8 hci_get_auth_req(struct hci_conn *conn)
3042 {
3043 /* If remote requests dedicated bonding follow that lead */
3044 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3045 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3046 /* If both remote and local IO capabilities allow MITM
3047 * protection then require it, otherwise don't */
3048 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3049 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3050 return HCI_AT_DEDICATED_BONDING;
3051 else
3052 return HCI_AT_DEDICATED_BONDING_MITM;
3053 }
3054
3055 /* If remote requests no-bonding follow that lead */
3056 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3057 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3058 return conn->remote_auth | (conn->auth_type & 0x01);
3059
3060 return conn->auth_type;
3061 }
3062
3063 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3064 {
3065 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3066 struct hci_conn *conn;
3067
3068 BT_DBG("%s", hdev->name);
3069
3070 hci_dev_lock(hdev);
3071
3072 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3073 if (!conn)
3074 goto unlock;
3075
3076 hci_conn_hold(conn);
3077
3078 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3079 goto unlock;
3080
3081 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3082 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3083 struct hci_cp_io_capability_reply cp;
3084
3085 bacpy(&cp.bdaddr, &ev->bdaddr);
3086 /* Change the IO capability from KeyboardDisplay
3087 * to DisplayYesNo as it is not supported by BT spec. */
3088 cp.capability = (conn->io_capability == 0x04) ?
3089 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3090 conn->auth_type = hci_get_auth_req(conn);
3091 cp.authentication = conn->auth_type;
3092
3093 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3094 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3095 cp.oob_data = 0x01;
3096 else
3097 cp.oob_data = 0x00;
3098
3099 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3100 sizeof(cp), &cp);
3101 } else {
3102 struct hci_cp_io_capability_neg_reply cp;
3103
3104 bacpy(&cp.bdaddr, &ev->bdaddr);
3105 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3106
3107 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3108 sizeof(cp), &cp);
3109 }
3110
3111 unlock:
3112 hci_dev_unlock(hdev);
3113 }
3114
3115 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3116 {
3117 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3118 struct hci_conn *conn;
3119
3120 BT_DBG("%s", hdev->name);
3121
3122 hci_dev_lock(hdev);
3123
3124 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3125 if (!conn)
3126 goto unlock;
3127
3128 conn->remote_cap = ev->capability;
3129 conn->remote_auth = ev->authentication;
3130 if (ev->oob_data)
3131 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3132
3133 unlock:
3134 hci_dev_unlock(hdev);
3135 }
3136
3137 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3138 struct sk_buff *skb)
3139 {
3140 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3141 int loc_mitm, rem_mitm, confirm_hint = 0;
3142 struct hci_conn *conn;
3143
3144 BT_DBG("%s", hdev->name);
3145
3146 hci_dev_lock(hdev);
3147
3148 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3149 goto unlock;
3150
3151 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3152 if (!conn)
3153 goto unlock;
3154
3155 loc_mitm = (conn->auth_type & 0x01);
3156 rem_mitm = (conn->remote_auth & 0x01);
3157
3158 /* If we require MITM but the remote device can't provide that
3159 * (it has NoInputNoOutput) then reject the confirmation
3160 * request. The only exception is when we're dedicated bonding
3161 * initiators (connect_cfm_cb set) since then we always have the MITM
3162 * bit set. */
3163 if (!conn->connect_cfm_cb && loc_mitm &&
3164 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3165 BT_DBG("Rejecting request: remote device can't provide MITM");
3166 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3167 sizeof(ev->bdaddr), &ev->bdaddr);
3168 goto unlock;
3169 }
3170
3171 /* If no side requires MITM protection; auto-accept */
3172 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3173 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3174
3175 /* If we're not the initiators request authorization to
3176 * proceed from user space (mgmt_user_confirm with
3177 * confirm_hint set to 1). */
3178 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3179 BT_DBG("Confirming auto-accept as acceptor");
3180 confirm_hint = 1;
3181 goto confirm;
3182 }
3183
3184 BT_DBG("Auto-accept of user confirmation with %ums delay",
3185 hdev->auto_accept_delay);
3186
3187 if (hdev->auto_accept_delay > 0) {
3188 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3189 queue_delayed_work(conn->hdev->workqueue,
3190 &conn->auto_accept_work, delay);
3191 goto unlock;
3192 }
3193
3194 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3195 sizeof(ev->bdaddr), &ev->bdaddr);
3196 goto unlock;
3197 }
3198
3199 confirm:
3200 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3201 confirm_hint);
3202
3203 unlock:
3204 hci_dev_unlock(hdev);
3205 }
3206
3207 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3208 struct sk_buff *skb)
3209 {
3210 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3211
3212 BT_DBG("%s", hdev->name);
3213
3214 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3215 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3216 }
3217
3218 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3219 struct sk_buff *skb)
3220 {
3221 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3222 struct hci_conn *conn;
3223
3224 BT_DBG("%s", hdev->name);
3225
3226 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3227 if (!conn)
3228 return;
3229
3230 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3231 conn->passkey_entered = 0;
3232
3233 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3234 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3235 conn->dst_type, conn->passkey_notify,
3236 conn->passkey_entered);
3237 }
3238
3239 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3240 {
3241 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3242 struct hci_conn *conn;
3243
3244 BT_DBG("%s", hdev->name);
3245
3246 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3247 if (!conn)
3248 return;
3249
3250 switch (ev->type) {
3251 case HCI_KEYPRESS_STARTED:
3252 conn->passkey_entered = 0;
3253 return;
3254
3255 case HCI_KEYPRESS_ENTERED:
3256 conn->passkey_entered++;
3257 break;
3258
3259 case HCI_KEYPRESS_ERASED:
3260 conn->passkey_entered--;
3261 break;
3262
3263 case HCI_KEYPRESS_CLEARED:
3264 conn->passkey_entered = 0;
3265 break;
3266
3267 case HCI_KEYPRESS_COMPLETED:
3268 return;
3269 }
3270
3271 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3272 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3273 conn->dst_type, conn->passkey_notify,
3274 conn->passkey_entered);
3275 }
3276
3277 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3278 struct sk_buff *skb)
3279 {
3280 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3281 struct hci_conn *conn;
3282
3283 BT_DBG("%s", hdev->name);
3284
3285 hci_dev_lock(hdev);
3286
3287 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3288 if (!conn)
3289 goto unlock;
3290
3291 /* To avoid duplicate auth_failed events to user space we check
3292 * the HCI_CONN_AUTH_PEND flag which will be set if we
3293 * initiated the authentication. A traditional auth_complete
3294 * event gets always produced as initiator and is also mapped to
3295 * the mgmt_auth_failed event */
3296 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3297 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3298 ev->status);
3299
3300 hci_conn_drop(conn);
3301
3302 unlock:
3303 hci_dev_unlock(hdev);
3304 }
3305
3306 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3307 struct sk_buff *skb)
3308 {
3309 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3310 struct inquiry_entry *ie;
3311 struct hci_conn *conn;
3312
3313 BT_DBG("%s", hdev->name);
3314
3315 hci_dev_lock(hdev);
3316
3317 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3318 if (conn)
3319 memcpy(conn->features[1], ev->features, 8);
3320
3321 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3322 if (ie)
3323 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3324
3325 hci_dev_unlock(hdev);
3326 }
3327
3328 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3329 struct sk_buff *skb)
3330 {
3331 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3332 struct oob_data *data;
3333
3334 BT_DBG("%s", hdev->name);
3335
3336 hci_dev_lock(hdev);
3337
3338 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3339 goto unlock;
3340
3341 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3342 if (data) {
3343 struct hci_cp_remote_oob_data_reply cp;
3344
3345 bacpy(&cp.bdaddr, &ev->bdaddr);
3346 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3347 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3348
3349 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3350 &cp);
3351 } else {
3352 struct hci_cp_remote_oob_data_neg_reply cp;
3353
3354 bacpy(&cp.bdaddr, &ev->bdaddr);
3355 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3356 &cp);
3357 }
3358
3359 unlock:
3360 hci_dev_unlock(hdev);
3361 }
3362
3363 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3364 struct sk_buff *skb)
3365 {
3366 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3367 struct hci_conn *hcon, *bredr_hcon;
3368
3369 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3370 ev->status);
3371
3372 hci_dev_lock(hdev);
3373
3374 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3375 if (!hcon) {
3376 hci_dev_unlock(hdev);
3377 return;
3378 }
3379
3380 if (ev->status) {
3381 hci_conn_del(hcon);
3382 hci_dev_unlock(hdev);
3383 return;
3384 }
3385
3386 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3387
3388 hcon->state = BT_CONNECTED;
3389 bacpy(&hcon->dst, &bredr_hcon->dst);
3390
3391 hci_conn_hold(hcon);
3392 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3393 hci_conn_drop(hcon);
3394
3395 hci_conn_add_sysfs(hcon);
3396
3397 amp_physical_cfm(bredr_hcon, hcon);
3398
3399 hci_dev_unlock(hdev);
3400 }
3401
3402 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3403 {
3404 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3405 struct hci_conn *hcon;
3406 struct hci_chan *hchan;
3407 struct amp_mgr *mgr;
3408
3409 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3410 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3411 ev->status);
3412
3413 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3414 if (!hcon)
3415 return;
3416
3417 /* Create AMP hchan */
3418 hchan = hci_chan_create(hcon);
3419 if (!hchan)
3420 return;
3421
3422 hchan->handle = le16_to_cpu(ev->handle);
3423
3424 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3425
3426 mgr = hcon->amp_mgr;
3427 if (mgr && mgr->bredr_chan) {
3428 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3429
3430 l2cap_chan_lock(bredr_chan);
3431
3432 bredr_chan->conn->mtu = hdev->block_mtu;
3433 l2cap_logical_cfm(bredr_chan, hchan, 0);
3434 hci_conn_hold(hcon);
3435
3436 l2cap_chan_unlock(bredr_chan);
3437 }
3438 }
3439
3440 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3441 struct sk_buff *skb)
3442 {
3443 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3444 struct hci_chan *hchan;
3445
3446 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3447 le16_to_cpu(ev->handle), ev->status);
3448
3449 if (ev->status)
3450 return;
3451
3452 hci_dev_lock(hdev);
3453
3454 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3455 if (!hchan)
3456 goto unlock;
3457
3458 amp_destroy_logical_link(hchan, ev->reason);
3459
3460 unlock:
3461 hci_dev_unlock(hdev);
3462 }
3463
3464 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3465 struct sk_buff *skb)
3466 {
3467 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3468 struct hci_conn *hcon;
3469
3470 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3471
3472 if (ev->status)
3473 return;
3474
3475 hci_dev_lock(hdev);
3476
3477 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3478 if (hcon) {
3479 hcon->state = BT_CLOSED;
3480 hci_conn_del(hcon);
3481 }
3482
3483 hci_dev_unlock(hdev);
3484 }
3485
3486 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3487 {
3488 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3489 struct hci_conn *conn;
3490
3491 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3492
3493 hci_dev_lock(hdev);
3494
3495 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3496 if (!conn) {
3497 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3498 if (!conn) {
3499 BT_ERR("No memory for new connection");
3500 goto unlock;
3501 }
3502
3503 conn->dst_type = ev->bdaddr_type;
3504
3505 /* The advertising parameters for own address type
3506 * define which source address and source address
3507 * type this connections has.
3508 */
3509 if (bacmp(&conn->src, BDADDR_ANY)) {
3510 conn->src_type = ADDR_LE_DEV_PUBLIC;
3511 } else {
3512 bacpy(&conn->src, &hdev->static_addr);
3513 conn->src_type = ADDR_LE_DEV_RANDOM;
3514 }
3515
3516 if (ev->role == LE_CONN_ROLE_MASTER) {
3517 conn->out = true;
3518 conn->link_mode |= HCI_LM_MASTER;
3519 }
3520 }
3521
3522 if (ev->status) {
3523 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3524 conn->dst_type, ev->status);
3525 hci_proto_connect_cfm(conn, ev->status);
3526 conn->state = BT_CLOSED;
3527 hci_conn_del(conn);
3528 goto unlock;
3529 }
3530
3531 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3532 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3533 conn->dst_type, 0, NULL, 0, NULL);
3534
3535 conn->sec_level = BT_SECURITY_LOW;
3536 conn->handle = __le16_to_cpu(ev->handle);
3537 conn->state = BT_CONNECTED;
3538
3539 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3540 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3541
3542 hci_conn_add_sysfs(conn);
3543
3544 hci_proto_connect_cfm(conn, ev->status);
3545
3546 unlock:
3547 hci_dev_unlock(hdev);
3548 }
3549
3550 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3551 {
3552 u8 num_reports = skb->data[0];
3553 void *ptr = &skb->data[1];
3554 s8 rssi;
3555
3556 while (num_reports--) {
3557 struct hci_ev_le_advertising_info *ev = ptr;
3558
3559 rssi = ev->data[ev->length];
3560 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3561 NULL, rssi, 0, 1, ev->data, ev->length);
3562
3563 ptr += sizeof(*ev) + ev->length + 1;
3564 }
3565 }
3566
3567 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3568 {
3569 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3570 struct hci_cp_le_ltk_reply cp;
3571 struct hci_cp_le_ltk_neg_reply neg;
3572 struct hci_conn *conn;
3573 struct smp_ltk *ltk;
3574
3575 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3576
3577 hci_dev_lock(hdev);
3578
3579 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3580 if (conn == NULL)
3581 goto not_found;
3582
3583 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3584 if (ltk == NULL)
3585 goto not_found;
3586
3587 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3588 cp.handle = cpu_to_le16(conn->handle);
3589
3590 if (ltk->authenticated)
3591 conn->pending_sec_level = BT_SECURITY_HIGH;
3592 else
3593 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3594
3595 conn->enc_key_size = ltk->enc_size;
3596
3597 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3598
3599 if (ltk->type & HCI_SMP_STK) {
3600 list_del(&ltk->list);
3601 kfree(ltk);
3602 }
3603
3604 hci_dev_unlock(hdev);
3605
3606 return;
3607
3608 not_found:
3609 neg.handle = ev->handle;
3610 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3611 hci_dev_unlock(hdev);
3612 }
3613
3614 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3615 {
3616 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3617
3618 skb_pull(skb, sizeof(*le_ev));
3619
3620 switch (le_ev->subevent) {
3621 case HCI_EV_LE_CONN_COMPLETE:
3622 hci_le_conn_complete_evt(hdev, skb);
3623 break;
3624
3625 case HCI_EV_LE_ADVERTISING_REPORT:
3626 hci_le_adv_report_evt(hdev, skb);
3627 break;
3628
3629 case HCI_EV_LE_LTK_REQ:
3630 hci_le_ltk_request_evt(hdev, skb);
3631 break;
3632
3633 default:
3634 break;
3635 }
3636 }
3637
3638 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3639 {
3640 struct hci_ev_channel_selected *ev = (void *) skb->data;
3641 struct hci_conn *hcon;
3642
3643 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3644
3645 skb_pull(skb, sizeof(*ev));
3646
3647 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3648 if (!hcon)
3649 return;
3650
3651 amp_read_loc_assoc_final_data(hdev, hcon);
3652 }
3653
3654 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3655 {
3656 struct hci_event_hdr *hdr = (void *) skb->data;
3657 __u8 event = hdr->evt;
3658
3659 hci_dev_lock(hdev);
3660
3661 /* Received events are (currently) only needed when a request is
3662 * ongoing so avoid unnecessary memory allocation.
3663 */
3664 if (hdev->req_status == HCI_REQ_PEND) {
3665 kfree_skb(hdev->recv_evt);
3666 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3667 }
3668
3669 hci_dev_unlock(hdev);
3670
3671 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3672
3673 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3674 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
3675 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
3676
3677 hci_req_cmd_complete(hdev, opcode, 0);
3678 }
3679
3680 switch (event) {
3681 case HCI_EV_INQUIRY_COMPLETE:
3682 hci_inquiry_complete_evt(hdev, skb);
3683 break;
3684
3685 case HCI_EV_INQUIRY_RESULT:
3686 hci_inquiry_result_evt(hdev, skb);
3687 break;
3688
3689 case HCI_EV_CONN_COMPLETE:
3690 hci_conn_complete_evt(hdev, skb);
3691 break;
3692
3693 case HCI_EV_CONN_REQUEST:
3694 hci_conn_request_evt(hdev, skb);
3695 break;
3696
3697 case HCI_EV_DISCONN_COMPLETE:
3698 hci_disconn_complete_evt(hdev, skb);
3699 break;
3700
3701 case HCI_EV_AUTH_COMPLETE:
3702 hci_auth_complete_evt(hdev, skb);
3703 break;
3704
3705 case HCI_EV_REMOTE_NAME:
3706 hci_remote_name_evt(hdev, skb);
3707 break;
3708
3709 case HCI_EV_ENCRYPT_CHANGE:
3710 hci_encrypt_change_evt(hdev, skb);
3711 break;
3712
3713 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3714 hci_change_link_key_complete_evt(hdev, skb);
3715 break;
3716
3717 case HCI_EV_REMOTE_FEATURES:
3718 hci_remote_features_evt(hdev, skb);
3719 break;
3720
3721 case HCI_EV_CMD_COMPLETE:
3722 hci_cmd_complete_evt(hdev, skb);
3723 break;
3724
3725 case HCI_EV_CMD_STATUS:
3726 hci_cmd_status_evt(hdev, skb);
3727 break;
3728
3729 case HCI_EV_ROLE_CHANGE:
3730 hci_role_change_evt(hdev, skb);
3731 break;
3732
3733 case HCI_EV_NUM_COMP_PKTS:
3734 hci_num_comp_pkts_evt(hdev, skb);
3735 break;
3736
3737 case HCI_EV_MODE_CHANGE:
3738 hci_mode_change_evt(hdev, skb);
3739 break;
3740
3741 case HCI_EV_PIN_CODE_REQ:
3742 hci_pin_code_request_evt(hdev, skb);
3743 break;
3744
3745 case HCI_EV_LINK_KEY_REQ:
3746 hci_link_key_request_evt(hdev, skb);
3747 break;
3748
3749 case HCI_EV_LINK_KEY_NOTIFY:
3750 hci_link_key_notify_evt(hdev, skb);
3751 break;
3752
3753 case HCI_EV_CLOCK_OFFSET:
3754 hci_clock_offset_evt(hdev, skb);
3755 break;
3756
3757 case HCI_EV_PKT_TYPE_CHANGE:
3758 hci_pkt_type_change_evt(hdev, skb);
3759 break;
3760
3761 case HCI_EV_PSCAN_REP_MODE:
3762 hci_pscan_rep_mode_evt(hdev, skb);
3763 break;
3764
3765 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3766 hci_inquiry_result_with_rssi_evt(hdev, skb);
3767 break;
3768
3769 case HCI_EV_REMOTE_EXT_FEATURES:
3770 hci_remote_ext_features_evt(hdev, skb);
3771 break;
3772
3773 case HCI_EV_SYNC_CONN_COMPLETE:
3774 hci_sync_conn_complete_evt(hdev, skb);
3775 break;
3776
3777 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3778 hci_extended_inquiry_result_evt(hdev, skb);
3779 break;
3780
3781 case HCI_EV_KEY_REFRESH_COMPLETE:
3782 hci_key_refresh_complete_evt(hdev, skb);
3783 break;
3784
3785 case HCI_EV_IO_CAPA_REQUEST:
3786 hci_io_capa_request_evt(hdev, skb);
3787 break;
3788
3789 case HCI_EV_IO_CAPA_REPLY:
3790 hci_io_capa_reply_evt(hdev, skb);
3791 break;
3792
3793 case HCI_EV_USER_CONFIRM_REQUEST:
3794 hci_user_confirm_request_evt(hdev, skb);
3795 break;
3796
3797 case HCI_EV_USER_PASSKEY_REQUEST:
3798 hci_user_passkey_request_evt(hdev, skb);
3799 break;
3800
3801 case HCI_EV_USER_PASSKEY_NOTIFY:
3802 hci_user_passkey_notify_evt(hdev, skb);
3803 break;
3804
3805 case HCI_EV_KEYPRESS_NOTIFY:
3806 hci_keypress_notify_evt(hdev, skb);
3807 break;
3808
3809 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3810 hci_simple_pair_complete_evt(hdev, skb);
3811 break;
3812
3813 case HCI_EV_REMOTE_HOST_FEATURES:
3814 hci_remote_host_features_evt(hdev, skb);
3815 break;
3816
3817 case HCI_EV_LE_META:
3818 hci_le_meta_evt(hdev, skb);
3819 break;
3820
3821 case HCI_EV_CHANNEL_SELECTED:
3822 hci_chan_selected_evt(hdev, skb);
3823 break;
3824
3825 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3826 hci_remote_oob_data_request_evt(hdev, skb);
3827 break;
3828
3829 case HCI_EV_PHY_LINK_COMPLETE:
3830 hci_phy_link_complete_evt(hdev, skb);
3831 break;
3832
3833 case HCI_EV_LOGICAL_LINK_COMPLETE:
3834 hci_loglink_complete_evt(hdev, skb);
3835 break;
3836
3837 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3838 hci_disconn_loglink_complete_evt(hdev, skb);
3839 break;
3840
3841 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3842 hci_disconn_phylink_complete_evt(hdev, skb);
3843 break;
3844
3845 case HCI_EV_NUM_COMP_BLOCKS:
3846 hci_num_comp_blocks_evt(hdev, skb);
3847 break;
3848
3849 default:
3850 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3851 break;
3852 }
3853
3854 kfree_skb(skb);
3855 hdev->stat.evt_rx++;
3856 }
This page took 0.159308 seconds and 6 git commands to generate.