Bluetooth: Convert uses of __constant_<foo> to <foo>
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32
33 #include "a2mp.h"
34 #include "amp.h"
35
36 /* Handle HCI Event packets */
37
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 __u8 status = *((__u8 *) skb->data);
41
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43
44 if (status)
45 return;
46
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50
51 hci_conn_check_pending(hdev);
52 }
53
54 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55 {
56 __u8 status = *((__u8 *) skb->data);
57
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
59
60 if (status)
61 return;
62
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64 }
65
66 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67 {
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76
77 hci_conn_check_pending(hdev);
78 }
79
80 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81 struct sk_buff *skb)
82 {
83 BT_DBG("%s", hdev->name);
84 }
85
86 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87 {
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
90
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92
93 if (rp->status)
94 return;
95
96 hci_dev_lock(hdev);
97
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99 if (conn) {
100 if (rp->role)
101 conn->link_mode &= ~HCI_LM_MASTER;
102 else
103 conn->link_mode |= HCI_LM_MASTER;
104 }
105
106 hci_dev_unlock(hdev);
107 }
108
109 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110 {
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
113
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115
116 if (rp->status)
117 return;
118
119 hci_dev_lock(hdev);
120
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122 if (conn)
123 conn->link_policy = __le16_to_cpu(rp->policy);
124
125 hci_dev_unlock(hdev);
126 }
127
128 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129 {
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
132 void *sent;
133
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135
136 if (rp->status)
137 return;
138
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140 if (!sent)
141 return;
142
143 hci_dev_lock(hdev);
144
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146 if (conn)
147 conn->link_policy = get_unaligned_le16(sent + 2);
148
149 hci_dev_unlock(hdev);
150 }
151
152 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153 struct sk_buff *skb)
154 {
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158
159 if (rp->status)
160 return;
161
162 hdev->link_policy = __le16_to_cpu(rp->policy);
163 }
164
165 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166 struct sk_buff *skb)
167 {
168 __u8 status = *((__u8 *) skb->data);
169 void *sent;
170
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
172
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174 if (!sent)
175 return;
176
177 if (!status)
178 hdev->link_policy = get_unaligned_le16(sent);
179 }
180
181 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182 {
183 __u8 status = *((__u8 *) skb->data);
184
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
186
187 clear_bit(HCI_RESET, &hdev->flags);
188
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
198
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
201
202 hdev->ssp_debug_mode = 0;
203 }
204
205 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207 __u8 status = *((__u8 *) skb->data);
208 void *sent;
209
210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
211
212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (!sent)
214 return;
215
216 hci_dev_lock(hdev);
217
218 if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 mgmt_set_local_name_complete(hdev, sent, status);
220 else if (!status)
221 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222
223 hci_dev_unlock(hdev);
224 }
225
226 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
227 {
228 struct hci_rp_read_local_name *rp = (void *) skb->data;
229
230 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
231
232 if (rp->status)
233 return;
234
235 if (test_bit(HCI_SETUP, &hdev->dev_flags))
236 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
237 }
238
239 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
240 {
241 __u8 status = *((__u8 *) skb->data);
242 void *sent;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
245
246 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
247 if (!sent)
248 return;
249
250 if (!status) {
251 __u8 param = *((__u8 *) sent);
252
253 if (param == AUTH_ENABLED)
254 set_bit(HCI_AUTH, &hdev->flags);
255 else
256 clear_bit(HCI_AUTH, &hdev->flags);
257 }
258
259 if (test_bit(HCI_MGMT, &hdev->dev_flags))
260 mgmt_auth_enable_complete(hdev, status);
261 }
262
263 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 {
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
267
268 BT_DBG("%s status 0x%2.2x", hdev->name, status);
269
270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 if (!sent)
272 return;
273
274 if (!status) {
275 __u8 param = *((__u8 *) sent);
276
277 if (param)
278 set_bit(HCI_ENCRYPT, &hdev->flags);
279 else
280 clear_bit(HCI_ENCRYPT, &hdev->flags);
281 }
282 }
283
284 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
285 {
286 __u8 param, status = *((__u8 *) skb->data);
287 int old_pscan, old_iscan;
288 void *sent;
289
290 BT_DBG("%s status 0x%2.2x", hdev->name, status);
291
292 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
293 if (!sent)
294 return;
295
296 param = *((__u8 *) sent);
297
298 hci_dev_lock(hdev);
299
300 if (status) {
301 mgmt_write_scan_failed(hdev, param, status);
302 hdev->discov_timeout = 0;
303 goto done;
304 }
305
306 /* We need to ensure that we set this back on if someone changed
307 * the scan mode through a raw HCI socket.
308 */
309 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
310
311 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
312 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
313
314 if (param & SCAN_INQUIRY) {
315 set_bit(HCI_ISCAN, &hdev->flags);
316 if (!old_iscan)
317 mgmt_discoverable(hdev, 1);
318 } else if (old_iscan)
319 mgmt_discoverable(hdev, 0);
320
321 if (param & SCAN_PAGE) {
322 set_bit(HCI_PSCAN, &hdev->flags);
323 if (!old_pscan)
324 mgmt_connectable(hdev, 1);
325 } else if (old_pscan)
326 mgmt_connectable(hdev, 0);
327
328 done:
329 hci_dev_unlock(hdev);
330 }
331
332 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333 {
334 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335
336 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
337
338 if (rp->status)
339 return;
340
341 memcpy(hdev->dev_class, rp->dev_class, 3);
342
343 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345 }
346
347 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348 {
349 __u8 status = *((__u8 *) skb->data);
350 void *sent;
351
352 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353
354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
355 if (!sent)
356 return;
357
358 hci_dev_lock(hdev);
359
360 if (status == 0)
361 memcpy(hdev->dev_class, sent, 3);
362
363 if (test_bit(HCI_MGMT, &hdev->dev_flags))
364 mgmt_set_class_of_dev_complete(hdev, sent, status);
365
366 hci_dev_unlock(hdev);
367 }
368
369 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370 {
371 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
372 __u16 setting;
373
374 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
375
376 if (rp->status)
377 return;
378
379 setting = __le16_to_cpu(rp->voice_setting);
380
381 if (hdev->voice_setting == setting)
382 return;
383
384 hdev->voice_setting = setting;
385
386 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387
388 if (hdev->notify)
389 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390 }
391
392 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
393 struct sk_buff *skb)
394 {
395 __u8 status = *((__u8 *) skb->data);
396 __u16 setting;
397 void *sent;
398
399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
400
401 if (status)
402 return;
403
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
405 if (!sent)
406 return;
407
408 setting = get_unaligned_le16(sent);
409
410 if (hdev->voice_setting == setting)
411 return;
412
413 hdev->voice_setting = setting;
414
415 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416
417 if (hdev->notify)
418 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419 }
420
421 static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
422 struct sk_buff *skb)
423 {
424 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->num_iac = rp->num_iac;
432
433 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
434 }
435
436 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
437 {
438 __u8 status = *((__u8 *) skb->data);
439 struct hci_cp_write_ssp_mode *sent;
440
441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442
443 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
444 if (!sent)
445 return;
446
447 if (!status) {
448 if (sent->mode)
449 hdev->features[1][0] |= LMP_HOST_SSP;
450 else
451 hdev->features[1][0] &= ~LMP_HOST_SSP;
452 }
453
454 if (test_bit(HCI_MGMT, &hdev->dev_flags))
455 mgmt_ssp_enable_complete(hdev, sent->mode, status);
456 else if (!status) {
457 if (sent->mode)
458 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459 else
460 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461 }
462 }
463
464 static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
465 {
466 u8 status = *((u8 *) skb->data);
467 struct hci_cp_write_sc_support *sent;
468
469 BT_DBG("%s status 0x%2.2x", hdev->name, status);
470
471 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
472 if (!sent)
473 return;
474
475 if (!status) {
476 if (sent->support)
477 hdev->features[1][0] |= LMP_HOST_SC;
478 else
479 hdev->features[1][0] &= ~LMP_HOST_SC;
480 }
481
482 if (test_bit(HCI_MGMT, &hdev->dev_flags))
483 mgmt_sc_enable_complete(hdev, sent->support, status);
484 else if (!status) {
485 if (sent->support)
486 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
487 else
488 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
489 }
490 }
491
492 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
493 {
494 struct hci_rp_read_local_version *rp = (void *) skb->data;
495
496 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
497
498 if (rp->status)
499 return;
500
501 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
502 hdev->hci_ver = rp->hci_ver;
503 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
504 hdev->lmp_ver = rp->lmp_ver;
505 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
506 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
507 }
508 }
509
510 static void hci_cc_read_local_commands(struct hci_dev *hdev,
511 struct sk_buff *skb)
512 {
513 struct hci_rp_read_local_commands *rp = (void *) skb->data;
514
515 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
516
517 if (rp->status)
518 return;
519
520 if (test_bit(HCI_SETUP, &hdev->dev_flags))
521 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
522 }
523
524 static void hci_cc_read_local_features(struct hci_dev *hdev,
525 struct sk_buff *skb)
526 {
527 struct hci_rp_read_local_features *rp = (void *) skb->data;
528
529 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
530
531 if (rp->status)
532 return;
533
534 memcpy(hdev->features, rp->features, 8);
535
536 /* Adjust default settings according to features
537 * supported by device. */
538
539 if (hdev->features[0][0] & LMP_3SLOT)
540 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
541
542 if (hdev->features[0][0] & LMP_5SLOT)
543 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
544
545 if (hdev->features[0][1] & LMP_HV2) {
546 hdev->pkt_type |= (HCI_HV2);
547 hdev->esco_type |= (ESCO_HV2);
548 }
549
550 if (hdev->features[0][1] & LMP_HV3) {
551 hdev->pkt_type |= (HCI_HV3);
552 hdev->esco_type |= (ESCO_HV3);
553 }
554
555 if (lmp_esco_capable(hdev))
556 hdev->esco_type |= (ESCO_EV3);
557
558 if (hdev->features[0][4] & LMP_EV4)
559 hdev->esco_type |= (ESCO_EV4);
560
561 if (hdev->features[0][4] & LMP_EV5)
562 hdev->esco_type |= (ESCO_EV5);
563
564 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
565 hdev->esco_type |= (ESCO_2EV3);
566
567 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
568 hdev->esco_type |= (ESCO_3EV3);
569
570 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
571 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
572 }
573
574 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
575 struct sk_buff *skb)
576 {
577 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
578
579 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
580
581 if (rp->status)
582 return;
583
584 if (hdev->max_page < rp->max_page)
585 hdev->max_page = rp->max_page;
586
587 if (rp->page < HCI_MAX_PAGES)
588 memcpy(hdev->features[rp->page], rp->features, 8);
589 }
590
591 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
592 struct sk_buff *skb)
593 {
594 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
595
596 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
597
598 if (!rp->status)
599 hdev->flow_ctl_mode = rp->mode;
600 }
601
602 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
603 {
604 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
605
606 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607
608 if (rp->status)
609 return;
610
611 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
612 hdev->sco_mtu = rp->sco_mtu;
613 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
614 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
615
616 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
617 hdev->sco_mtu = 64;
618 hdev->sco_pkts = 8;
619 }
620
621 hdev->acl_cnt = hdev->acl_pkts;
622 hdev->sco_cnt = hdev->sco_pkts;
623
624 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
625 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
626 }
627
628 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
629 {
630 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
631
632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633
634 if (!rp->status)
635 bacpy(&hdev->bdaddr, &rp->bdaddr);
636 }
637
638 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
639 struct sk_buff *skb)
640 {
641 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
642
643 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
644
645 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
646 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
647 hdev->page_scan_window = __le16_to_cpu(rp->window);
648 }
649 }
650
651 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
652 struct sk_buff *skb)
653 {
654 u8 status = *((u8 *) skb->data);
655 struct hci_cp_write_page_scan_activity *sent;
656
657 BT_DBG("%s status 0x%2.2x", hdev->name, status);
658
659 if (status)
660 return;
661
662 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
663 if (!sent)
664 return;
665
666 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
667 hdev->page_scan_window = __le16_to_cpu(sent->window);
668 }
669
670 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
671 struct sk_buff *skb)
672 {
673 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
674
675 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
676
677 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
678 hdev->page_scan_type = rp->type;
679 }
680
681 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
682 struct sk_buff *skb)
683 {
684 u8 status = *((u8 *) skb->data);
685 u8 *type;
686
687 BT_DBG("%s status 0x%2.2x", hdev->name, status);
688
689 if (status)
690 return;
691
692 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
693 if (type)
694 hdev->page_scan_type = *type;
695 }
696
697 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
698 struct sk_buff *skb)
699 {
700 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
701
702 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
703
704 if (rp->status)
705 return;
706
707 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
708 hdev->block_len = __le16_to_cpu(rp->block_len);
709 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
710
711 hdev->block_cnt = hdev->num_blocks;
712
713 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
714 hdev->block_cnt, hdev->block_len);
715 }
716
717 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
718 struct sk_buff *skb)
719 {
720 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
721
722 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
723
724 if (rp->status)
725 goto a2mp_rsp;
726
727 hdev->amp_status = rp->amp_status;
728 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
729 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
730 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
731 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
732 hdev->amp_type = rp->amp_type;
733 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
734 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
735 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
736 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
737
738 a2mp_rsp:
739 a2mp_send_getinfo_rsp(hdev);
740 }
741
742 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
743 struct sk_buff *skb)
744 {
745 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
746 struct amp_assoc *assoc = &hdev->loc_assoc;
747 size_t rem_len, frag_len;
748
749 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
750
751 if (rp->status)
752 goto a2mp_rsp;
753
754 frag_len = skb->len - sizeof(*rp);
755 rem_len = __le16_to_cpu(rp->rem_len);
756
757 if (rem_len > frag_len) {
758 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
759
760 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
761 assoc->offset += frag_len;
762
763 /* Read other fragments */
764 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
765
766 return;
767 }
768
769 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
770 assoc->len = assoc->offset + rem_len;
771 assoc->offset = 0;
772
773 a2mp_rsp:
774 /* Send A2MP Rsp when all fragments are received */
775 a2mp_send_getampassoc_rsp(hdev, rp->status);
776 a2mp_send_create_phy_link_req(hdev, rp->status);
777 }
778
779 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
780 struct sk_buff *skb)
781 {
782 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
783
784 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
785
786 if (!rp->status)
787 hdev->inq_tx_power = rp->tx_power;
788 }
789
790 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
791 {
792 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
793 struct hci_cp_pin_code_reply *cp;
794 struct hci_conn *conn;
795
796 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
797
798 hci_dev_lock(hdev);
799
800 if (test_bit(HCI_MGMT, &hdev->dev_flags))
801 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
802
803 if (rp->status)
804 goto unlock;
805
806 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
807 if (!cp)
808 goto unlock;
809
810 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
811 if (conn)
812 conn->pin_length = cp->pin_len;
813
814 unlock:
815 hci_dev_unlock(hdev);
816 }
817
818 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
819 {
820 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 hci_dev_lock(hdev);
825
826 if (test_bit(HCI_MGMT, &hdev->dev_flags))
827 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
828 rp->status);
829
830 hci_dev_unlock(hdev);
831 }
832
833 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
834 struct sk_buff *skb)
835 {
836 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
837
838 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
839
840 if (rp->status)
841 return;
842
843 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
844 hdev->le_pkts = rp->le_max_pkt;
845
846 hdev->le_cnt = hdev->le_pkts;
847
848 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
849 }
850
851 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
852 struct sk_buff *skb)
853 {
854 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
855
856 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
857
858 if (!rp->status)
859 memcpy(hdev->le_features, rp->features, 8);
860 }
861
862 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
863 struct sk_buff *skb)
864 {
865 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
866
867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868
869 if (!rp->status)
870 hdev->adv_tx_power = rp->tx_power;
871 }
872
873 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
874 {
875 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
876
877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
878
879 hci_dev_lock(hdev);
880
881 if (test_bit(HCI_MGMT, &hdev->dev_flags))
882 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
883 rp->status);
884
885 hci_dev_unlock(hdev);
886 }
887
888 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
889 struct sk_buff *skb)
890 {
891 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
892
893 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
894
895 hci_dev_lock(hdev);
896
897 if (test_bit(HCI_MGMT, &hdev->dev_flags))
898 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
899 ACL_LINK, 0, rp->status);
900
901 hci_dev_unlock(hdev);
902 }
903
904 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
905 {
906 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
907
908 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
909
910 hci_dev_lock(hdev);
911
912 if (test_bit(HCI_MGMT, &hdev->dev_flags))
913 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
914 0, rp->status);
915
916 hci_dev_unlock(hdev);
917 }
918
919 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
920 struct sk_buff *skb)
921 {
922 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
923
924 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
925
926 hci_dev_lock(hdev);
927
928 if (test_bit(HCI_MGMT, &hdev->dev_flags))
929 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
930 ACL_LINK, 0, rp->status);
931
932 hci_dev_unlock(hdev);
933 }
934
935 static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
936 struct sk_buff *skb)
937 {
938 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
939
940 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
941
942 hci_dev_lock(hdev);
943 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
944 NULL, NULL, rp->status);
945 hci_dev_unlock(hdev);
946 }
947
948 static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
949 struct sk_buff *skb)
950 {
951 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
952
953 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
954
955 hci_dev_lock(hdev);
956 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
957 rp->hash256, rp->randomizer256,
958 rp->status);
959 hci_dev_unlock(hdev);
960 }
961
962
963 static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
964 {
965 __u8 status = *((__u8 *) skb->data);
966 bdaddr_t *sent;
967
968 BT_DBG("%s status 0x%2.2x", hdev->name, status);
969
970 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
971 if (!sent)
972 return;
973
974 hci_dev_lock(hdev);
975
976 if (!status)
977 bacpy(&hdev->random_addr, sent);
978
979 hci_dev_unlock(hdev);
980 }
981
982 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
983 {
984 __u8 *sent, status = *((__u8 *) skb->data);
985
986 BT_DBG("%s status 0x%2.2x", hdev->name, status);
987
988 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
989 if (!sent)
990 return;
991
992 hci_dev_lock(hdev);
993
994 if (!status)
995 mgmt_advertising(hdev, *sent);
996
997 hci_dev_unlock(hdev);
998 }
999
1000 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1001 struct sk_buff *skb)
1002 {
1003 struct hci_cp_le_set_scan_enable *cp;
1004 __u8 status = *((__u8 *) skb->data);
1005
1006 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1007
1008 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1009 if (!cp)
1010 return;
1011
1012 if (status)
1013 return;
1014
1015 switch (cp->enable) {
1016 case LE_SCAN_ENABLE:
1017 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1018 break;
1019
1020 case LE_SCAN_DISABLE:
1021 /* Cancel this timer so that we don't try to disable scanning
1022 * when it's already disabled.
1023 */
1024 cancel_delayed_work(&hdev->le_scan_disable);
1025
1026 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1027 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1028 * interrupted scanning due to a connect request. Mark
1029 * therefore discovery as stopped.
1030 */
1031 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1032 &hdev->dev_flags))
1033 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1034 break;
1035
1036 default:
1037 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1038 break;
1039 }
1040 }
1041
1042 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1043 struct sk_buff *skb)
1044 {
1045 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1046
1047 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1048
1049 if (!rp->status)
1050 hdev->le_white_list_size = rp->size;
1051 }
1052
1053 static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1054 struct sk_buff *skb)
1055 {
1056 __u8 status = *((__u8 *) skb->data);
1057
1058 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1059
1060 if (!status)
1061 hci_white_list_clear(hdev);
1062 }
1063
1064 static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1065 struct sk_buff *skb)
1066 {
1067 struct hci_cp_le_add_to_white_list *sent;
1068 __u8 status = *((__u8 *) skb->data);
1069
1070 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1071
1072 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1073 if (!sent)
1074 return;
1075
1076 if (!status)
1077 hci_white_list_add(hdev, &sent->bdaddr, sent->bdaddr_type);
1078 }
1079
1080 static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1081 struct sk_buff *skb)
1082 {
1083 struct hci_cp_le_del_from_white_list *sent;
1084 __u8 status = *((__u8 *) skb->data);
1085
1086 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1087
1088 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1089 if (!sent)
1090 return;
1091
1092 if (!status)
1093 hci_white_list_del(hdev, &sent->bdaddr, sent->bdaddr_type);
1094 }
1095
1096 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1097 struct sk_buff *skb)
1098 {
1099 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1100
1101 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1102
1103 if (!rp->status)
1104 memcpy(hdev->le_states, rp->le_states, 8);
1105 }
1106
1107 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1108 struct sk_buff *skb)
1109 {
1110 struct hci_cp_write_le_host_supported *sent;
1111 __u8 status = *((__u8 *) skb->data);
1112
1113 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1114
1115 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1116 if (!sent)
1117 return;
1118
1119 if (!status) {
1120 if (sent->le) {
1121 hdev->features[1][0] |= LMP_HOST_LE;
1122 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1123 } else {
1124 hdev->features[1][0] &= ~LMP_HOST_LE;
1125 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1126 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1127 }
1128
1129 if (sent->simul)
1130 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1131 else
1132 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1133 }
1134 }
1135
1136 static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1137 {
1138 struct hci_cp_le_set_adv_param *cp;
1139 u8 status = *((u8 *) skb->data);
1140
1141 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1142
1143 if (status)
1144 return;
1145
1146 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1147 if (!cp)
1148 return;
1149
1150 hci_dev_lock(hdev);
1151 hdev->adv_addr_type = cp->own_address_type;
1152 hci_dev_unlock(hdev);
1153 }
1154
1155 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1156 struct sk_buff *skb)
1157 {
1158 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1159
1160 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1161 hdev->name, rp->status, rp->phy_handle);
1162
1163 if (rp->status)
1164 return;
1165
1166 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1167 }
1168
1169 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1170 {
1171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1172
1173 if (status) {
1174 hci_conn_check_pending(hdev);
1175 return;
1176 }
1177
1178 set_bit(HCI_INQUIRY, &hdev->flags);
1179 }
1180
1181 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1182 {
1183 struct hci_cp_create_conn *cp;
1184 struct hci_conn *conn;
1185
1186 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1187
1188 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1189 if (!cp)
1190 return;
1191
1192 hci_dev_lock(hdev);
1193
1194 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1195
1196 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1197
1198 if (status) {
1199 if (conn && conn->state == BT_CONNECT) {
1200 if (status != 0x0c || conn->attempt > 2) {
1201 conn->state = BT_CLOSED;
1202 hci_proto_connect_cfm(conn, status);
1203 hci_conn_del(conn);
1204 } else
1205 conn->state = BT_CONNECT2;
1206 }
1207 } else {
1208 if (!conn) {
1209 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1210 if (conn) {
1211 conn->out = true;
1212 conn->link_mode |= HCI_LM_MASTER;
1213 } else
1214 BT_ERR("No memory for new connection");
1215 }
1216 }
1217
1218 hci_dev_unlock(hdev);
1219 }
1220
1221 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1222 {
1223 struct hci_cp_add_sco *cp;
1224 struct hci_conn *acl, *sco;
1225 __u16 handle;
1226
1227 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1228
1229 if (!status)
1230 return;
1231
1232 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1233 if (!cp)
1234 return;
1235
1236 handle = __le16_to_cpu(cp->handle);
1237
1238 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1239
1240 hci_dev_lock(hdev);
1241
1242 acl = hci_conn_hash_lookup_handle(hdev, handle);
1243 if (acl) {
1244 sco = acl->link;
1245 if (sco) {
1246 sco->state = BT_CLOSED;
1247
1248 hci_proto_connect_cfm(sco, status);
1249 hci_conn_del(sco);
1250 }
1251 }
1252
1253 hci_dev_unlock(hdev);
1254 }
1255
1256 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1257 {
1258 struct hci_cp_auth_requested *cp;
1259 struct hci_conn *conn;
1260
1261 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1262
1263 if (!status)
1264 return;
1265
1266 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1267 if (!cp)
1268 return;
1269
1270 hci_dev_lock(hdev);
1271
1272 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1273 if (conn) {
1274 if (conn->state == BT_CONFIG) {
1275 hci_proto_connect_cfm(conn, status);
1276 hci_conn_drop(conn);
1277 }
1278 }
1279
1280 hci_dev_unlock(hdev);
1281 }
1282
1283 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1284 {
1285 struct hci_cp_set_conn_encrypt *cp;
1286 struct hci_conn *conn;
1287
1288 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1289
1290 if (!status)
1291 return;
1292
1293 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1294 if (!cp)
1295 return;
1296
1297 hci_dev_lock(hdev);
1298
1299 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1300 if (conn) {
1301 if (conn->state == BT_CONFIG) {
1302 hci_proto_connect_cfm(conn, status);
1303 hci_conn_drop(conn);
1304 }
1305 }
1306
1307 hci_dev_unlock(hdev);
1308 }
1309
1310 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1311 struct hci_conn *conn)
1312 {
1313 if (conn->state != BT_CONFIG || !conn->out)
1314 return 0;
1315
1316 if (conn->pending_sec_level == BT_SECURITY_SDP)
1317 return 0;
1318
1319 /* Only request authentication for SSP connections or non-SSP
1320 * devices with sec_level MEDIUM or HIGH or if MITM protection
1321 * is requested.
1322 */
1323 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1324 conn->pending_sec_level != BT_SECURITY_HIGH &&
1325 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1326 return 0;
1327
1328 return 1;
1329 }
1330
1331 static int hci_resolve_name(struct hci_dev *hdev,
1332 struct inquiry_entry *e)
1333 {
1334 struct hci_cp_remote_name_req cp;
1335
1336 memset(&cp, 0, sizeof(cp));
1337
1338 bacpy(&cp.bdaddr, &e->data.bdaddr);
1339 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1340 cp.pscan_mode = e->data.pscan_mode;
1341 cp.clock_offset = e->data.clock_offset;
1342
1343 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1344 }
1345
1346 static bool hci_resolve_next_name(struct hci_dev *hdev)
1347 {
1348 struct discovery_state *discov = &hdev->discovery;
1349 struct inquiry_entry *e;
1350
1351 if (list_empty(&discov->resolve))
1352 return false;
1353
1354 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1355 if (!e)
1356 return false;
1357
1358 if (hci_resolve_name(hdev, e) == 0) {
1359 e->name_state = NAME_PENDING;
1360 return true;
1361 }
1362
1363 return false;
1364 }
1365
1366 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1367 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1368 {
1369 struct discovery_state *discov = &hdev->discovery;
1370 struct inquiry_entry *e;
1371
1372 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1373 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1374 name_len, conn->dev_class);
1375
1376 if (discov->state == DISCOVERY_STOPPED)
1377 return;
1378
1379 if (discov->state == DISCOVERY_STOPPING)
1380 goto discov_complete;
1381
1382 if (discov->state != DISCOVERY_RESOLVING)
1383 return;
1384
1385 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1386 /* If the device was not found in a list of found devices names of which
1387 * are pending. there is no need to continue resolving a next name as it
1388 * will be done upon receiving another Remote Name Request Complete
1389 * Event */
1390 if (!e)
1391 return;
1392
1393 list_del(&e->list);
1394 if (name) {
1395 e->name_state = NAME_KNOWN;
1396 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1397 e->data.rssi, name, name_len);
1398 } else {
1399 e->name_state = NAME_NOT_KNOWN;
1400 }
1401
1402 if (hci_resolve_next_name(hdev))
1403 return;
1404
1405 discov_complete:
1406 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1407 }
1408
1409 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1410 {
1411 struct hci_cp_remote_name_req *cp;
1412 struct hci_conn *conn;
1413
1414 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1415
1416 /* If successful wait for the name req complete event before
1417 * checking for the need to do authentication */
1418 if (!status)
1419 return;
1420
1421 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1422 if (!cp)
1423 return;
1424
1425 hci_dev_lock(hdev);
1426
1427 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1428
1429 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1430 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1431
1432 if (!conn)
1433 goto unlock;
1434
1435 if (!hci_outgoing_auth_needed(hdev, conn))
1436 goto unlock;
1437
1438 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1439 struct hci_cp_auth_requested auth_cp;
1440
1441 auth_cp.handle = __cpu_to_le16(conn->handle);
1442 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1443 sizeof(auth_cp), &auth_cp);
1444 }
1445
1446 unlock:
1447 hci_dev_unlock(hdev);
1448 }
1449
1450 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1451 {
1452 struct hci_cp_read_remote_features *cp;
1453 struct hci_conn *conn;
1454
1455 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1456
1457 if (!status)
1458 return;
1459
1460 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1461 if (!cp)
1462 return;
1463
1464 hci_dev_lock(hdev);
1465
1466 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1467 if (conn) {
1468 if (conn->state == BT_CONFIG) {
1469 hci_proto_connect_cfm(conn, status);
1470 hci_conn_drop(conn);
1471 }
1472 }
1473
1474 hci_dev_unlock(hdev);
1475 }
1476
1477 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1478 {
1479 struct hci_cp_read_remote_ext_features *cp;
1480 struct hci_conn *conn;
1481
1482 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1483
1484 if (!status)
1485 return;
1486
1487 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1488 if (!cp)
1489 return;
1490
1491 hci_dev_lock(hdev);
1492
1493 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1494 if (conn) {
1495 if (conn->state == BT_CONFIG) {
1496 hci_proto_connect_cfm(conn, status);
1497 hci_conn_drop(conn);
1498 }
1499 }
1500
1501 hci_dev_unlock(hdev);
1502 }
1503
1504 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1505 {
1506 struct hci_cp_setup_sync_conn *cp;
1507 struct hci_conn *acl, *sco;
1508 __u16 handle;
1509
1510 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1511
1512 if (!status)
1513 return;
1514
1515 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1516 if (!cp)
1517 return;
1518
1519 handle = __le16_to_cpu(cp->handle);
1520
1521 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1522
1523 hci_dev_lock(hdev);
1524
1525 acl = hci_conn_hash_lookup_handle(hdev, handle);
1526 if (acl) {
1527 sco = acl->link;
1528 if (sco) {
1529 sco->state = BT_CLOSED;
1530
1531 hci_proto_connect_cfm(sco, status);
1532 hci_conn_del(sco);
1533 }
1534 }
1535
1536 hci_dev_unlock(hdev);
1537 }
1538
1539 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1540 {
1541 struct hci_cp_sniff_mode *cp;
1542 struct hci_conn *conn;
1543
1544 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1545
1546 if (!status)
1547 return;
1548
1549 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1550 if (!cp)
1551 return;
1552
1553 hci_dev_lock(hdev);
1554
1555 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1556 if (conn) {
1557 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1558
1559 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1560 hci_sco_setup(conn, status);
1561 }
1562
1563 hci_dev_unlock(hdev);
1564 }
1565
1566 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1567 {
1568 struct hci_cp_exit_sniff_mode *cp;
1569 struct hci_conn *conn;
1570
1571 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1572
1573 if (!status)
1574 return;
1575
1576 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1577 if (!cp)
1578 return;
1579
1580 hci_dev_lock(hdev);
1581
1582 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1583 if (conn) {
1584 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1585
1586 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1587 hci_sco_setup(conn, status);
1588 }
1589
1590 hci_dev_unlock(hdev);
1591 }
1592
1593 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1594 {
1595 struct hci_cp_disconnect *cp;
1596 struct hci_conn *conn;
1597
1598 if (!status)
1599 return;
1600
1601 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1602 if (!cp)
1603 return;
1604
1605 hci_dev_lock(hdev);
1606
1607 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1608 if (conn)
1609 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1610 conn->dst_type, status);
1611
1612 hci_dev_unlock(hdev);
1613 }
1614
1615 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1616 {
1617 struct hci_cp_create_phy_link *cp;
1618
1619 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1620
1621 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1622 if (!cp)
1623 return;
1624
1625 hci_dev_lock(hdev);
1626
1627 if (status) {
1628 struct hci_conn *hcon;
1629
1630 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1631 if (hcon)
1632 hci_conn_del(hcon);
1633 } else {
1634 amp_write_remote_assoc(hdev, cp->phy_handle);
1635 }
1636
1637 hci_dev_unlock(hdev);
1638 }
1639
1640 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1641 {
1642 struct hci_cp_accept_phy_link *cp;
1643
1644 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1645
1646 if (status)
1647 return;
1648
1649 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1650 if (!cp)
1651 return;
1652
1653 amp_write_remote_assoc(hdev, cp->phy_handle);
1654 }
1655
1656 static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1657 {
1658 struct hci_cp_le_create_conn *cp;
1659 struct hci_conn *conn;
1660
1661 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1662
1663 /* All connection failure handling is taken care of by the
1664 * hci_le_conn_failed function which is triggered by the HCI
1665 * request completion callbacks used for connecting.
1666 */
1667 if (status)
1668 return;
1669
1670 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1671 if (!cp)
1672 return;
1673
1674 hci_dev_lock(hdev);
1675
1676 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1677 if (!conn)
1678 goto unlock;
1679
1680 /* Store the initiator and responder address information which
1681 * is needed for SMP. These values will not change during the
1682 * lifetime of the connection.
1683 */
1684 conn->init_addr_type = cp->own_address_type;
1685 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1686 bacpy(&conn->init_addr, &hdev->random_addr);
1687 else
1688 bacpy(&conn->init_addr, &hdev->bdaddr);
1689
1690 conn->resp_addr_type = cp->peer_addr_type;
1691 bacpy(&conn->resp_addr, &cp->peer_addr);
1692
1693 /* We don't want the connection attempt to stick around
1694 * indefinitely since LE doesn't have a page timeout concept
1695 * like BR/EDR. Set a timer for any connection that doesn't use
1696 * the white list for connecting.
1697 */
1698 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1699 queue_delayed_work(conn->hdev->workqueue,
1700 &conn->le_conn_timeout,
1701 HCI_LE_CONN_TIMEOUT);
1702
1703 unlock:
1704 hci_dev_unlock(hdev);
1705 }
1706
1707 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1708 {
1709 __u8 status = *((__u8 *) skb->data);
1710 struct discovery_state *discov = &hdev->discovery;
1711 struct inquiry_entry *e;
1712
1713 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1714
1715 hci_conn_check_pending(hdev);
1716
1717 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1718 return;
1719
1720 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1721 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1722
1723 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1724 return;
1725
1726 hci_dev_lock(hdev);
1727
1728 if (discov->state != DISCOVERY_FINDING)
1729 goto unlock;
1730
1731 if (list_empty(&discov->resolve)) {
1732 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1733 goto unlock;
1734 }
1735
1736 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1737 if (e && hci_resolve_name(hdev, e) == 0) {
1738 e->name_state = NAME_PENDING;
1739 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1740 } else {
1741 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1742 }
1743
1744 unlock:
1745 hci_dev_unlock(hdev);
1746 }
1747
1748 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1749 {
1750 struct inquiry_data data;
1751 struct inquiry_info *info = (void *) (skb->data + 1);
1752 int num_rsp = *((__u8 *) skb->data);
1753
1754 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1755
1756 if (!num_rsp)
1757 return;
1758
1759 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1760 return;
1761
1762 hci_dev_lock(hdev);
1763
1764 for (; num_rsp; num_rsp--, info++) {
1765 bool name_known, ssp;
1766
1767 bacpy(&data.bdaddr, &info->bdaddr);
1768 data.pscan_rep_mode = info->pscan_rep_mode;
1769 data.pscan_period_mode = info->pscan_period_mode;
1770 data.pscan_mode = info->pscan_mode;
1771 memcpy(data.dev_class, info->dev_class, 3);
1772 data.clock_offset = info->clock_offset;
1773 data.rssi = 0x00;
1774 data.ssp_mode = 0x00;
1775
1776 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1777 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1778 info->dev_class, 0, !name_known, ssp, NULL,
1779 0);
1780 }
1781
1782 hci_dev_unlock(hdev);
1783 }
1784
1785 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1786 {
1787 struct hci_ev_conn_complete *ev = (void *) skb->data;
1788 struct hci_conn *conn;
1789
1790 BT_DBG("%s", hdev->name);
1791
1792 hci_dev_lock(hdev);
1793
1794 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1795 if (!conn) {
1796 if (ev->link_type != SCO_LINK)
1797 goto unlock;
1798
1799 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1800 if (!conn)
1801 goto unlock;
1802
1803 conn->type = SCO_LINK;
1804 }
1805
1806 if (!ev->status) {
1807 conn->handle = __le16_to_cpu(ev->handle);
1808
1809 if (conn->type == ACL_LINK) {
1810 conn->state = BT_CONFIG;
1811 hci_conn_hold(conn);
1812
1813 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1814 !hci_find_link_key(hdev, &ev->bdaddr))
1815 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1816 else
1817 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1818 } else
1819 conn->state = BT_CONNECTED;
1820
1821 hci_conn_add_sysfs(conn);
1822
1823 if (test_bit(HCI_AUTH, &hdev->flags))
1824 conn->link_mode |= HCI_LM_AUTH;
1825
1826 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1827 conn->link_mode |= HCI_LM_ENCRYPT;
1828
1829 /* Get remote features */
1830 if (conn->type == ACL_LINK) {
1831 struct hci_cp_read_remote_features cp;
1832 cp.handle = ev->handle;
1833 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1834 sizeof(cp), &cp);
1835 }
1836
1837 /* Set packet type for incoming connection */
1838 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1839 struct hci_cp_change_conn_ptype cp;
1840 cp.handle = ev->handle;
1841 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1842 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1843 &cp);
1844 }
1845 } else {
1846 conn->state = BT_CLOSED;
1847 if (conn->type == ACL_LINK)
1848 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1849 conn->dst_type, ev->status);
1850 }
1851
1852 if (conn->type == ACL_LINK)
1853 hci_sco_setup(conn, ev->status);
1854
1855 if (ev->status) {
1856 hci_proto_connect_cfm(conn, ev->status);
1857 hci_conn_del(conn);
1858 } else if (ev->link_type != ACL_LINK)
1859 hci_proto_connect_cfm(conn, ev->status);
1860
1861 unlock:
1862 hci_dev_unlock(hdev);
1863
1864 hci_conn_check_pending(hdev);
1865 }
1866
1867 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1868 {
1869 struct hci_ev_conn_request *ev = (void *) skb->data;
1870 int mask = hdev->link_mode;
1871 __u8 flags = 0;
1872
1873 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1874 ev->link_type);
1875
1876 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1877 &flags);
1878
1879 if ((mask & HCI_LM_ACCEPT) &&
1880 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1881 /* Connection accepted */
1882 struct inquiry_entry *ie;
1883 struct hci_conn *conn;
1884
1885 hci_dev_lock(hdev);
1886
1887 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1888 if (ie)
1889 memcpy(ie->data.dev_class, ev->dev_class, 3);
1890
1891 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1892 &ev->bdaddr);
1893 if (!conn) {
1894 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1895 if (!conn) {
1896 BT_ERR("No memory for new connection");
1897 hci_dev_unlock(hdev);
1898 return;
1899 }
1900 }
1901
1902 memcpy(conn->dev_class, ev->dev_class, 3);
1903
1904 hci_dev_unlock(hdev);
1905
1906 if (ev->link_type == ACL_LINK ||
1907 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1908 struct hci_cp_accept_conn_req cp;
1909 conn->state = BT_CONNECT;
1910
1911 bacpy(&cp.bdaddr, &ev->bdaddr);
1912
1913 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1914 cp.role = 0x00; /* Become master */
1915 else
1916 cp.role = 0x01; /* Remain slave */
1917
1918 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1919 &cp);
1920 } else if (!(flags & HCI_PROTO_DEFER)) {
1921 struct hci_cp_accept_sync_conn_req cp;
1922 conn->state = BT_CONNECT;
1923
1924 bacpy(&cp.bdaddr, &ev->bdaddr);
1925 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1926
1927 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1928 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1929 cp.max_latency = cpu_to_le16(0xffff);
1930 cp.content_format = cpu_to_le16(hdev->voice_setting);
1931 cp.retrans_effort = 0xff;
1932
1933 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1934 sizeof(cp), &cp);
1935 } else {
1936 conn->state = BT_CONNECT2;
1937 hci_proto_connect_cfm(conn, 0);
1938 }
1939 } else {
1940 /* Connection rejected */
1941 struct hci_cp_reject_conn_req cp;
1942
1943 bacpy(&cp.bdaddr, &ev->bdaddr);
1944 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1945 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1946 }
1947 }
1948
1949 static u8 hci_to_mgmt_reason(u8 err)
1950 {
1951 switch (err) {
1952 case HCI_ERROR_CONNECTION_TIMEOUT:
1953 return MGMT_DEV_DISCONN_TIMEOUT;
1954 case HCI_ERROR_REMOTE_USER_TERM:
1955 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1956 case HCI_ERROR_REMOTE_POWER_OFF:
1957 return MGMT_DEV_DISCONN_REMOTE;
1958 case HCI_ERROR_LOCAL_HOST_TERM:
1959 return MGMT_DEV_DISCONN_LOCAL_HOST;
1960 default:
1961 return MGMT_DEV_DISCONN_UNKNOWN;
1962 }
1963 }
1964
1965 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1966 {
1967 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1968 u8 reason = hci_to_mgmt_reason(ev->reason);
1969 struct hci_conn_params *params;
1970 struct hci_conn *conn;
1971 bool mgmt_connected;
1972 u8 type;
1973
1974 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1975
1976 hci_dev_lock(hdev);
1977
1978 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1979 if (!conn)
1980 goto unlock;
1981
1982 if (ev->status) {
1983 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1984 conn->dst_type, ev->status);
1985 goto unlock;
1986 }
1987
1988 conn->state = BT_CLOSED;
1989
1990 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
1991 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
1992 reason, mgmt_connected);
1993
1994 if (conn->type == ACL_LINK && conn->flush_key)
1995 hci_remove_link_key(hdev, &conn->dst);
1996
1997 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
1998 if (params) {
1999 switch (params->auto_connect) {
2000 case HCI_AUTO_CONN_LINK_LOSS:
2001 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2002 break;
2003 /* Fall through */
2004
2005 case HCI_AUTO_CONN_ALWAYS:
2006 hci_pend_le_conn_add(hdev, &conn->dst, conn->dst_type);
2007 break;
2008
2009 default:
2010 break;
2011 }
2012 }
2013
2014 type = conn->type;
2015
2016 hci_proto_disconn_cfm(conn, ev->reason);
2017 hci_conn_del(conn);
2018
2019 /* Re-enable advertising if necessary, since it might
2020 * have been disabled by the connection. From the
2021 * HCI_LE_Set_Advertise_Enable command description in
2022 * the core specification (v4.0):
2023 * "The Controller shall continue advertising until the Host
2024 * issues an LE_Set_Advertise_Enable command with
2025 * Advertising_Enable set to 0x00 (Advertising is disabled)
2026 * or until a connection is created or until the Advertising
2027 * is timed out due to Directed Advertising."
2028 */
2029 if (type == LE_LINK)
2030 mgmt_reenable_advertising(hdev);
2031
2032 unlock:
2033 hci_dev_unlock(hdev);
2034 }
2035
2036 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2037 {
2038 struct hci_ev_auth_complete *ev = (void *) skb->data;
2039 struct hci_conn *conn;
2040
2041 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2042
2043 hci_dev_lock(hdev);
2044
2045 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2046 if (!conn)
2047 goto unlock;
2048
2049 if (!ev->status) {
2050 if (!hci_conn_ssp_enabled(conn) &&
2051 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2052 BT_INFO("re-auth of legacy device is not possible.");
2053 } else {
2054 conn->link_mode |= HCI_LM_AUTH;
2055 conn->sec_level = conn->pending_sec_level;
2056 }
2057 } else {
2058 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2059 ev->status);
2060 }
2061
2062 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2063 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2064
2065 if (conn->state == BT_CONFIG) {
2066 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2067 struct hci_cp_set_conn_encrypt cp;
2068 cp.handle = ev->handle;
2069 cp.encrypt = 0x01;
2070 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2071 &cp);
2072 } else {
2073 conn->state = BT_CONNECTED;
2074 hci_proto_connect_cfm(conn, ev->status);
2075 hci_conn_drop(conn);
2076 }
2077 } else {
2078 hci_auth_cfm(conn, ev->status);
2079
2080 hci_conn_hold(conn);
2081 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2082 hci_conn_drop(conn);
2083 }
2084
2085 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2086 if (!ev->status) {
2087 struct hci_cp_set_conn_encrypt cp;
2088 cp.handle = ev->handle;
2089 cp.encrypt = 0x01;
2090 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2091 &cp);
2092 } else {
2093 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2094 hci_encrypt_cfm(conn, ev->status, 0x00);
2095 }
2096 }
2097
2098 unlock:
2099 hci_dev_unlock(hdev);
2100 }
2101
2102 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2103 {
2104 struct hci_ev_remote_name *ev = (void *) skb->data;
2105 struct hci_conn *conn;
2106
2107 BT_DBG("%s", hdev->name);
2108
2109 hci_conn_check_pending(hdev);
2110
2111 hci_dev_lock(hdev);
2112
2113 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2114
2115 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2116 goto check_auth;
2117
2118 if (ev->status == 0)
2119 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2120 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2121 else
2122 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2123
2124 check_auth:
2125 if (!conn)
2126 goto unlock;
2127
2128 if (!hci_outgoing_auth_needed(hdev, conn))
2129 goto unlock;
2130
2131 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2132 struct hci_cp_auth_requested cp;
2133 cp.handle = __cpu_to_le16(conn->handle);
2134 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2135 }
2136
2137 unlock:
2138 hci_dev_unlock(hdev);
2139 }
2140
2141 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2142 {
2143 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2144 struct hci_conn *conn;
2145
2146 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2147
2148 hci_dev_lock(hdev);
2149
2150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2151 if (!conn)
2152 goto unlock;
2153
2154 if (!ev->status) {
2155 if (ev->encrypt) {
2156 /* Encryption implies authentication */
2157 conn->link_mode |= HCI_LM_AUTH;
2158 conn->link_mode |= HCI_LM_ENCRYPT;
2159 conn->sec_level = conn->pending_sec_level;
2160
2161 /* P-256 authentication key implies FIPS */
2162 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2163 conn->link_mode |= HCI_LM_FIPS;
2164
2165 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2166 conn->type == LE_LINK)
2167 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2168 } else {
2169 conn->link_mode &= ~HCI_LM_ENCRYPT;
2170 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2171 }
2172 }
2173
2174 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2175
2176 if (ev->status && conn->state == BT_CONNECTED) {
2177 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2178 hci_conn_drop(conn);
2179 goto unlock;
2180 }
2181
2182 if (conn->state == BT_CONFIG) {
2183 if (!ev->status)
2184 conn->state = BT_CONNECTED;
2185
2186 hci_proto_connect_cfm(conn, ev->status);
2187 hci_conn_drop(conn);
2188 } else
2189 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2190
2191 unlock:
2192 hci_dev_unlock(hdev);
2193 }
2194
2195 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2196 struct sk_buff *skb)
2197 {
2198 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2199 struct hci_conn *conn;
2200
2201 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2202
2203 hci_dev_lock(hdev);
2204
2205 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2206 if (conn) {
2207 if (!ev->status)
2208 conn->link_mode |= HCI_LM_SECURE;
2209
2210 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2211
2212 hci_key_change_cfm(conn, ev->status);
2213 }
2214
2215 hci_dev_unlock(hdev);
2216 }
2217
2218 static void hci_remote_features_evt(struct hci_dev *hdev,
2219 struct sk_buff *skb)
2220 {
2221 struct hci_ev_remote_features *ev = (void *) skb->data;
2222 struct hci_conn *conn;
2223
2224 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2225
2226 hci_dev_lock(hdev);
2227
2228 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2229 if (!conn)
2230 goto unlock;
2231
2232 if (!ev->status)
2233 memcpy(conn->features[0], ev->features, 8);
2234
2235 if (conn->state != BT_CONFIG)
2236 goto unlock;
2237
2238 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2239 struct hci_cp_read_remote_ext_features cp;
2240 cp.handle = ev->handle;
2241 cp.page = 0x01;
2242 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2243 sizeof(cp), &cp);
2244 goto unlock;
2245 }
2246
2247 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2248 struct hci_cp_remote_name_req cp;
2249 memset(&cp, 0, sizeof(cp));
2250 bacpy(&cp.bdaddr, &conn->dst);
2251 cp.pscan_rep_mode = 0x02;
2252 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2253 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2254 mgmt_device_connected(hdev, &conn->dst, conn->type,
2255 conn->dst_type, 0, NULL, 0,
2256 conn->dev_class);
2257
2258 if (!hci_outgoing_auth_needed(hdev, conn)) {
2259 conn->state = BT_CONNECTED;
2260 hci_proto_connect_cfm(conn, ev->status);
2261 hci_conn_drop(conn);
2262 }
2263
2264 unlock:
2265 hci_dev_unlock(hdev);
2266 }
2267
2268 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2269 {
2270 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2271 u8 status = skb->data[sizeof(*ev)];
2272 __u16 opcode;
2273
2274 skb_pull(skb, sizeof(*ev));
2275
2276 opcode = __le16_to_cpu(ev->opcode);
2277
2278 switch (opcode) {
2279 case HCI_OP_INQUIRY_CANCEL:
2280 hci_cc_inquiry_cancel(hdev, skb);
2281 break;
2282
2283 case HCI_OP_PERIODIC_INQ:
2284 hci_cc_periodic_inq(hdev, skb);
2285 break;
2286
2287 case HCI_OP_EXIT_PERIODIC_INQ:
2288 hci_cc_exit_periodic_inq(hdev, skb);
2289 break;
2290
2291 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2292 hci_cc_remote_name_req_cancel(hdev, skb);
2293 break;
2294
2295 case HCI_OP_ROLE_DISCOVERY:
2296 hci_cc_role_discovery(hdev, skb);
2297 break;
2298
2299 case HCI_OP_READ_LINK_POLICY:
2300 hci_cc_read_link_policy(hdev, skb);
2301 break;
2302
2303 case HCI_OP_WRITE_LINK_POLICY:
2304 hci_cc_write_link_policy(hdev, skb);
2305 break;
2306
2307 case HCI_OP_READ_DEF_LINK_POLICY:
2308 hci_cc_read_def_link_policy(hdev, skb);
2309 break;
2310
2311 case HCI_OP_WRITE_DEF_LINK_POLICY:
2312 hci_cc_write_def_link_policy(hdev, skb);
2313 break;
2314
2315 case HCI_OP_RESET:
2316 hci_cc_reset(hdev, skb);
2317 break;
2318
2319 case HCI_OP_WRITE_LOCAL_NAME:
2320 hci_cc_write_local_name(hdev, skb);
2321 break;
2322
2323 case HCI_OP_READ_LOCAL_NAME:
2324 hci_cc_read_local_name(hdev, skb);
2325 break;
2326
2327 case HCI_OP_WRITE_AUTH_ENABLE:
2328 hci_cc_write_auth_enable(hdev, skb);
2329 break;
2330
2331 case HCI_OP_WRITE_ENCRYPT_MODE:
2332 hci_cc_write_encrypt_mode(hdev, skb);
2333 break;
2334
2335 case HCI_OP_WRITE_SCAN_ENABLE:
2336 hci_cc_write_scan_enable(hdev, skb);
2337 break;
2338
2339 case HCI_OP_READ_CLASS_OF_DEV:
2340 hci_cc_read_class_of_dev(hdev, skb);
2341 break;
2342
2343 case HCI_OP_WRITE_CLASS_OF_DEV:
2344 hci_cc_write_class_of_dev(hdev, skb);
2345 break;
2346
2347 case HCI_OP_READ_VOICE_SETTING:
2348 hci_cc_read_voice_setting(hdev, skb);
2349 break;
2350
2351 case HCI_OP_WRITE_VOICE_SETTING:
2352 hci_cc_write_voice_setting(hdev, skb);
2353 break;
2354
2355 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2356 hci_cc_read_num_supported_iac(hdev, skb);
2357 break;
2358
2359 case HCI_OP_WRITE_SSP_MODE:
2360 hci_cc_write_ssp_mode(hdev, skb);
2361 break;
2362
2363 case HCI_OP_WRITE_SC_SUPPORT:
2364 hci_cc_write_sc_support(hdev, skb);
2365 break;
2366
2367 case HCI_OP_READ_LOCAL_VERSION:
2368 hci_cc_read_local_version(hdev, skb);
2369 break;
2370
2371 case HCI_OP_READ_LOCAL_COMMANDS:
2372 hci_cc_read_local_commands(hdev, skb);
2373 break;
2374
2375 case HCI_OP_READ_LOCAL_FEATURES:
2376 hci_cc_read_local_features(hdev, skb);
2377 break;
2378
2379 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2380 hci_cc_read_local_ext_features(hdev, skb);
2381 break;
2382
2383 case HCI_OP_READ_BUFFER_SIZE:
2384 hci_cc_read_buffer_size(hdev, skb);
2385 break;
2386
2387 case HCI_OP_READ_BD_ADDR:
2388 hci_cc_read_bd_addr(hdev, skb);
2389 break;
2390
2391 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2392 hci_cc_read_page_scan_activity(hdev, skb);
2393 break;
2394
2395 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2396 hci_cc_write_page_scan_activity(hdev, skb);
2397 break;
2398
2399 case HCI_OP_READ_PAGE_SCAN_TYPE:
2400 hci_cc_read_page_scan_type(hdev, skb);
2401 break;
2402
2403 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2404 hci_cc_write_page_scan_type(hdev, skb);
2405 break;
2406
2407 case HCI_OP_READ_DATA_BLOCK_SIZE:
2408 hci_cc_read_data_block_size(hdev, skb);
2409 break;
2410
2411 case HCI_OP_READ_FLOW_CONTROL_MODE:
2412 hci_cc_read_flow_control_mode(hdev, skb);
2413 break;
2414
2415 case HCI_OP_READ_LOCAL_AMP_INFO:
2416 hci_cc_read_local_amp_info(hdev, skb);
2417 break;
2418
2419 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2420 hci_cc_read_local_amp_assoc(hdev, skb);
2421 break;
2422
2423 case HCI_OP_READ_INQ_RSP_TX_POWER:
2424 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2425 break;
2426
2427 case HCI_OP_PIN_CODE_REPLY:
2428 hci_cc_pin_code_reply(hdev, skb);
2429 break;
2430
2431 case HCI_OP_PIN_CODE_NEG_REPLY:
2432 hci_cc_pin_code_neg_reply(hdev, skb);
2433 break;
2434
2435 case HCI_OP_READ_LOCAL_OOB_DATA:
2436 hci_cc_read_local_oob_data(hdev, skb);
2437 break;
2438
2439 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2440 hci_cc_read_local_oob_ext_data(hdev, skb);
2441 break;
2442
2443 case HCI_OP_LE_READ_BUFFER_SIZE:
2444 hci_cc_le_read_buffer_size(hdev, skb);
2445 break;
2446
2447 case HCI_OP_LE_READ_LOCAL_FEATURES:
2448 hci_cc_le_read_local_features(hdev, skb);
2449 break;
2450
2451 case HCI_OP_LE_READ_ADV_TX_POWER:
2452 hci_cc_le_read_adv_tx_power(hdev, skb);
2453 break;
2454
2455 case HCI_OP_USER_CONFIRM_REPLY:
2456 hci_cc_user_confirm_reply(hdev, skb);
2457 break;
2458
2459 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2460 hci_cc_user_confirm_neg_reply(hdev, skb);
2461 break;
2462
2463 case HCI_OP_USER_PASSKEY_REPLY:
2464 hci_cc_user_passkey_reply(hdev, skb);
2465 break;
2466
2467 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2468 hci_cc_user_passkey_neg_reply(hdev, skb);
2469 break;
2470
2471 case HCI_OP_LE_SET_RANDOM_ADDR:
2472 hci_cc_le_set_random_addr(hdev, skb);
2473 break;
2474
2475 case HCI_OP_LE_SET_ADV_ENABLE:
2476 hci_cc_le_set_adv_enable(hdev, skb);
2477 break;
2478
2479 case HCI_OP_LE_SET_SCAN_ENABLE:
2480 hci_cc_le_set_scan_enable(hdev, skb);
2481 break;
2482
2483 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2484 hci_cc_le_read_white_list_size(hdev, skb);
2485 break;
2486
2487 case HCI_OP_LE_CLEAR_WHITE_LIST:
2488 hci_cc_le_clear_white_list(hdev, skb);
2489 break;
2490
2491 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2492 hci_cc_le_add_to_white_list(hdev, skb);
2493 break;
2494
2495 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2496 hci_cc_le_del_from_white_list(hdev, skb);
2497 break;
2498
2499 case HCI_OP_LE_READ_SUPPORTED_STATES:
2500 hci_cc_le_read_supported_states(hdev, skb);
2501 break;
2502
2503 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2504 hci_cc_write_le_host_supported(hdev, skb);
2505 break;
2506
2507 case HCI_OP_LE_SET_ADV_PARAM:
2508 hci_cc_set_adv_param(hdev, skb);
2509 break;
2510
2511 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2512 hci_cc_write_remote_amp_assoc(hdev, skb);
2513 break;
2514
2515 default:
2516 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2517 break;
2518 }
2519
2520 if (opcode != HCI_OP_NOP)
2521 del_timer(&hdev->cmd_timer);
2522
2523 hci_req_cmd_complete(hdev, opcode, status);
2524
2525 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2526 atomic_set(&hdev->cmd_cnt, 1);
2527 if (!skb_queue_empty(&hdev->cmd_q))
2528 queue_work(hdev->workqueue, &hdev->cmd_work);
2529 }
2530 }
2531
2532 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2533 {
2534 struct hci_ev_cmd_status *ev = (void *) skb->data;
2535 __u16 opcode;
2536
2537 skb_pull(skb, sizeof(*ev));
2538
2539 opcode = __le16_to_cpu(ev->opcode);
2540
2541 switch (opcode) {
2542 case HCI_OP_INQUIRY:
2543 hci_cs_inquiry(hdev, ev->status);
2544 break;
2545
2546 case HCI_OP_CREATE_CONN:
2547 hci_cs_create_conn(hdev, ev->status);
2548 break;
2549
2550 case HCI_OP_ADD_SCO:
2551 hci_cs_add_sco(hdev, ev->status);
2552 break;
2553
2554 case HCI_OP_AUTH_REQUESTED:
2555 hci_cs_auth_requested(hdev, ev->status);
2556 break;
2557
2558 case HCI_OP_SET_CONN_ENCRYPT:
2559 hci_cs_set_conn_encrypt(hdev, ev->status);
2560 break;
2561
2562 case HCI_OP_REMOTE_NAME_REQ:
2563 hci_cs_remote_name_req(hdev, ev->status);
2564 break;
2565
2566 case HCI_OP_READ_REMOTE_FEATURES:
2567 hci_cs_read_remote_features(hdev, ev->status);
2568 break;
2569
2570 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2571 hci_cs_read_remote_ext_features(hdev, ev->status);
2572 break;
2573
2574 case HCI_OP_SETUP_SYNC_CONN:
2575 hci_cs_setup_sync_conn(hdev, ev->status);
2576 break;
2577
2578 case HCI_OP_SNIFF_MODE:
2579 hci_cs_sniff_mode(hdev, ev->status);
2580 break;
2581
2582 case HCI_OP_EXIT_SNIFF_MODE:
2583 hci_cs_exit_sniff_mode(hdev, ev->status);
2584 break;
2585
2586 case HCI_OP_DISCONNECT:
2587 hci_cs_disconnect(hdev, ev->status);
2588 break;
2589
2590 case HCI_OP_CREATE_PHY_LINK:
2591 hci_cs_create_phylink(hdev, ev->status);
2592 break;
2593
2594 case HCI_OP_ACCEPT_PHY_LINK:
2595 hci_cs_accept_phylink(hdev, ev->status);
2596 break;
2597
2598 case HCI_OP_LE_CREATE_CONN:
2599 hci_cs_le_create_conn(hdev, ev->status);
2600 break;
2601
2602 default:
2603 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2604 break;
2605 }
2606
2607 if (opcode != HCI_OP_NOP)
2608 del_timer(&hdev->cmd_timer);
2609
2610 if (ev->status ||
2611 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2612 hci_req_cmd_complete(hdev, opcode, ev->status);
2613
2614 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2615 atomic_set(&hdev->cmd_cnt, 1);
2616 if (!skb_queue_empty(&hdev->cmd_q))
2617 queue_work(hdev->workqueue, &hdev->cmd_work);
2618 }
2619 }
2620
2621 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2622 {
2623 struct hci_ev_role_change *ev = (void *) skb->data;
2624 struct hci_conn *conn;
2625
2626 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2627
2628 hci_dev_lock(hdev);
2629
2630 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2631 if (conn) {
2632 if (!ev->status) {
2633 if (ev->role)
2634 conn->link_mode &= ~HCI_LM_MASTER;
2635 else
2636 conn->link_mode |= HCI_LM_MASTER;
2637 }
2638
2639 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2640
2641 hci_role_switch_cfm(conn, ev->status, ev->role);
2642 }
2643
2644 hci_dev_unlock(hdev);
2645 }
2646
2647 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2648 {
2649 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2650 int i;
2651
2652 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2653 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2654 return;
2655 }
2656
2657 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2658 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2659 BT_DBG("%s bad parameters", hdev->name);
2660 return;
2661 }
2662
2663 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2664
2665 for (i = 0; i < ev->num_hndl; i++) {
2666 struct hci_comp_pkts_info *info = &ev->handles[i];
2667 struct hci_conn *conn;
2668 __u16 handle, count;
2669
2670 handle = __le16_to_cpu(info->handle);
2671 count = __le16_to_cpu(info->count);
2672
2673 conn = hci_conn_hash_lookup_handle(hdev, handle);
2674 if (!conn)
2675 continue;
2676
2677 conn->sent -= count;
2678
2679 switch (conn->type) {
2680 case ACL_LINK:
2681 hdev->acl_cnt += count;
2682 if (hdev->acl_cnt > hdev->acl_pkts)
2683 hdev->acl_cnt = hdev->acl_pkts;
2684 break;
2685
2686 case LE_LINK:
2687 if (hdev->le_pkts) {
2688 hdev->le_cnt += count;
2689 if (hdev->le_cnt > hdev->le_pkts)
2690 hdev->le_cnt = hdev->le_pkts;
2691 } else {
2692 hdev->acl_cnt += count;
2693 if (hdev->acl_cnt > hdev->acl_pkts)
2694 hdev->acl_cnt = hdev->acl_pkts;
2695 }
2696 break;
2697
2698 case SCO_LINK:
2699 hdev->sco_cnt += count;
2700 if (hdev->sco_cnt > hdev->sco_pkts)
2701 hdev->sco_cnt = hdev->sco_pkts;
2702 break;
2703
2704 default:
2705 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2706 break;
2707 }
2708 }
2709
2710 queue_work(hdev->workqueue, &hdev->tx_work);
2711 }
2712
2713 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2714 __u16 handle)
2715 {
2716 struct hci_chan *chan;
2717
2718 switch (hdev->dev_type) {
2719 case HCI_BREDR:
2720 return hci_conn_hash_lookup_handle(hdev, handle);
2721 case HCI_AMP:
2722 chan = hci_chan_lookup_handle(hdev, handle);
2723 if (chan)
2724 return chan->conn;
2725 break;
2726 default:
2727 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2728 break;
2729 }
2730
2731 return NULL;
2732 }
2733
2734 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2735 {
2736 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2737 int i;
2738
2739 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2740 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2741 return;
2742 }
2743
2744 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2745 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2746 BT_DBG("%s bad parameters", hdev->name);
2747 return;
2748 }
2749
2750 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2751 ev->num_hndl);
2752
2753 for (i = 0; i < ev->num_hndl; i++) {
2754 struct hci_comp_blocks_info *info = &ev->handles[i];
2755 struct hci_conn *conn = NULL;
2756 __u16 handle, block_count;
2757
2758 handle = __le16_to_cpu(info->handle);
2759 block_count = __le16_to_cpu(info->blocks);
2760
2761 conn = __hci_conn_lookup_handle(hdev, handle);
2762 if (!conn)
2763 continue;
2764
2765 conn->sent -= block_count;
2766
2767 switch (conn->type) {
2768 case ACL_LINK:
2769 case AMP_LINK:
2770 hdev->block_cnt += block_count;
2771 if (hdev->block_cnt > hdev->num_blocks)
2772 hdev->block_cnt = hdev->num_blocks;
2773 break;
2774
2775 default:
2776 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2777 break;
2778 }
2779 }
2780
2781 queue_work(hdev->workqueue, &hdev->tx_work);
2782 }
2783
2784 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2785 {
2786 struct hci_ev_mode_change *ev = (void *) skb->data;
2787 struct hci_conn *conn;
2788
2789 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2790
2791 hci_dev_lock(hdev);
2792
2793 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2794 if (conn) {
2795 conn->mode = ev->mode;
2796
2797 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2798 &conn->flags)) {
2799 if (conn->mode == HCI_CM_ACTIVE)
2800 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2801 else
2802 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2803 }
2804
2805 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2806 hci_sco_setup(conn, ev->status);
2807 }
2808
2809 hci_dev_unlock(hdev);
2810 }
2811
2812 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2813 {
2814 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2815 struct hci_conn *conn;
2816
2817 BT_DBG("%s", hdev->name);
2818
2819 hci_dev_lock(hdev);
2820
2821 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2822 if (!conn)
2823 goto unlock;
2824
2825 if (conn->state == BT_CONNECTED) {
2826 hci_conn_hold(conn);
2827 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2828 hci_conn_drop(conn);
2829 }
2830
2831 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2832 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2833 sizeof(ev->bdaddr), &ev->bdaddr);
2834 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2835 u8 secure;
2836
2837 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2838 secure = 1;
2839 else
2840 secure = 0;
2841
2842 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2843 }
2844
2845 unlock:
2846 hci_dev_unlock(hdev);
2847 }
2848
2849 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2850 {
2851 struct hci_ev_link_key_req *ev = (void *) skb->data;
2852 struct hci_cp_link_key_reply cp;
2853 struct hci_conn *conn;
2854 struct link_key *key;
2855
2856 BT_DBG("%s", hdev->name);
2857
2858 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2859 return;
2860
2861 hci_dev_lock(hdev);
2862
2863 key = hci_find_link_key(hdev, &ev->bdaddr);
2864 if (!key) {
2865 BT_DBG("%s link key not found for %pMR", hdev->name,
2866 &ev->bdaddr);
2867 goto not_found;
2868 }
2869
2870 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2871 &ev->bdaddr);
2872
2873 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2874 key->type == HCI_LK_DEBUG_COMBINATION) {
2875 BT_DBG("%s ignoring debug key", hdev->name);
2876 goto not_found;
2877 }
2878
2879 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2880 if (conn) {
2881 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2882 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2883 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2884 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2885 goto not_found;
2886 }
2887
2888 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2889 conn->pending_sec_level == BT_SECURITY_HIGH) {
2890 BT_DBG("%s ignoring key unauthenticated for high security",
2891 hdev->name);
2892 goto not_found;
2893 }
2894
2895 conn->key_type = key->type;
2896 conn->pin_length = key->pin_len;
2897 }
2898
2899 bacpy(&cp.bdaddr, &ev->bdaddr);
2900 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2901
2902 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2903
2904 hci_dev_unlock(hdev);
2905
2906 return;
2907
2908 not_found:
2909 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2910 hci_dev_unlock(hdev);
2911 }
2912
2913 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2914 {
2915 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2916 struct hci_conn *conn;
2917 u8 pin_len = 0;
2918
2919 BT_DBG("%s", hdev->name);
2920
2921 hci_dev_lock(hdev);
2922
2923 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2924 if (conn) {
2925 hci_conn_hold(conn);
2926 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2927 pin_len = conn->pin_length;
2928
2929 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2930 conn->key_type = ev->key_type;
2931
2932 hci_conn_drop(conn);
2933 }
2934
2935 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2936 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2937 ev->key_type, pin_len);
2938
2939 hci_dev_unlock(hdev);
2940 }
2941
2942 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2943 {
2944 struct hci_ev_clock_offset *ev = (void *) skb->data;
2945 struct hci_conn *conn;
2946
2947 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2948
2949 hci_dev_lock(hdev);
2950
2951 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2952 if (conn && !ev->status) {
2953 struct inquiry_entry *ie;
2954
2955 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2956 if (ie) {
2957 ie->data.clock_offset = ev->clock_offset;
2958 ie->timestamp = jiffies;
2959 }
2960 }
2961
2962 hci_dev_unlock(hdev);
2963 }
2964
2965 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2966 {
2967 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2968 struct hci_conn *conn;
2969
2970 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2971
2972 hci_dev_lock(hdev);
2973
2974 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2975 if (conn && !ev->status)
2976 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2977
2978 hci_dev_unlock(hdev);
2979 }
2980
2981 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2982 {
2983 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2984 struct inquiry_entry *ie;
2985
2986 BT_DBG("%s", hdev->name);
2987
2988 hci_dev_lock(hdev);
2989
2990 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2991 if (ie) {
2992 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2993 ie->timestamp = jiffies;
2994 }
2995
2996 hci_dev_unlock(hdev);
2997 }
2998
2999 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3000 struct sk_buff *skb)
3001 {
3002 struct inquiry_data data;
3003 int num_rsp = *((__u8 *) skb->data);
3004 bool name_known, ssp;
3005
3006 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3007
3008 if (!num_rsp)
3009 return;
3010
3011 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3012 return;
3013
3014 hci_dev_lock(hdev);
3015
3016 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3017 struct inquiry_info_with_rssi_and_pscan_mode *info;
3018 info = (void *) (skb->data + 1);
3019
3020 for (; num_rsp; num_rsp--, info++) {
3021 bacpy(&data.bdaddr, &info->bdaddr);
3022 data.pscan_rep_mode = info->pscan_rep_mode;
3023 data.pscan_period_mode = info->pscan_period_mode;
3024 data.pscan_mode = info->pscan_mode;
3025 memcpy(data.dev_class, info->dev_class, 3);
3026 data.clock_offset = info->clock_offset;
3027 data.rssi = info->rssi;
3028 data.ssp_mode = 0x00;
3029
3030 name_known = hci_inquiry_cache_update(hdev, &data,
3031 false, &ssp);
3032 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3033 info->dev_class, info->rssi,
3034 !name_known, ssp, NULL, 0);
3035 }
3036 } else {
3037 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3038
3039 for (; num_rsp; num_rsp--, info++) {
3040 bacpy(&data.bdaddr, &info->bdaddr);
3041 data.pscan_rep_mode = info->pscan_rep_mode;
3042 data.pscan_period_mode = info->pscan_period_mode;
3043 data.pscan_mode = 0x00;
3044 memcpy(data.dev_class, info->dev_class, 3);
3045 data.clock_offset = info->clock_offset;
3046 data.rssi = info->rssi;
3047 data.ssp_mode = 0x00;
3048 name_known = hci_inquiry_cache_update(hdev, &data,
3049 false, &ssp);
3050 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3051 info->dev_class, info->rssi,
3052 !name_known, ssp, NULL, 0);
3053 }
3054 }
3055
3056 hci_dev_unlock(hdev);
3057 }
3058
3059 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3060 struct sk_buff *skb)
3061 {
3062 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3063 struct hci_conn *conn;
3064
3065 BT_DBG("%s", hdev->name);
3066
3067 hci_dev_lock(hdev);
3068
3069 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3070 if (!conn)
3071 goto unlock;
3072
3073 if (ev->page < HCI_MAX_PAGES)
3074 memcpy(conn->features[ev->page], ev->features, 8);
3075
3076 if (!ev->status && ev->page == 0x01) {
3077 struct inquiry_entry *ie;
3078
3079 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3080 if (ie)
3081 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3082
3083 if (ev->features[0] & LMP_HOST_SSP) {
3084 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3085 } else {
3086 /* It is mandatory by the Bluetooth specification that
3087 * Extended Inquiry Results are only used when Secure
3088 * Simple Pairing is enabled, but some devices violate
3089 * this.
3090 *
3091 * To make these devices work, the internal SSP
3092 * enabled flag needs to be cleared if the remote host
3093 * features do not indicate SSP support */
3094 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3095 }
3096
3097 if (ev->features[0] & LMP_HOST_SC)
3098 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3099 }
3100
3101 if (conn->state != BT_CONFIG)
3102 goto unlock;
3103
3104 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3105 struct hci_cp_remote_name_req cp;
3106 memset(&cp, 0, sizeof(cp));
3107 bacpy(&cp.bdaddr, &conn->dst);
3108 cp.pscan_rep_mode = 0x02;
3109 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3110 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3111 mgmt_device_connected(hdev, &conn->dst, conn->type,
3112 conn->dst_type, 0, NULL, 0,
3113 conn->dev_class);
3114
3115 if (!hci_outgoing_auth_needed(hdev, conn)) {
3116 conn->state = BT_CONNECTED;
3117 hci_proto_connect_cfm(conn, ev->status);
3118 hci_conn_drop(conn);
3119 }
3120
3121 unlock:
3122 hci_dev_unlock(hdev);
3123 }
3124
3125 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3126 struct sk_buff *skb)
3127 {
3128 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3129 struct hci_conn *conn;
3130
3131 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3132
3133 hci_dev_lock(hdev);
3134
3135 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3136 if (!conn) {
3137 if (ev->link_type == ESCO_LINK)
3138 goto unlock;
3139
3140 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3141 if (!conn)
3142 goto unlock;
3143
3144 conn->type = SCO_LINK;
3145 }
3146
3147 switch (ev->status) {
3148 case 0x00:
3149 conn->handle = __le16_to_cpu(ev->handle);
3150 conn->state = BT_CONNECTED;
3151
3152 hci_conn_add_sysfs(conn);
3153 break;
3154
3155 case 0x0d: /* Connection Rejected due to Limited Resources */
3156 case 0x11: /* Unsupported Feature or Parameter Value */
3157 case 0x1c: /* SCO interval rejected */
3158 case 0x1a: /* Unsupported Remote Feature */
3159 case 0x1f: /* Unspecified error */
3160 case 0x20: /* Unsupported LMP Parameter value */
3161 if (conn->out) {
3162 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3163 (hdev->esco_type & EDR_ESCO_MASK);
3164 if (hci_setup_sync(conn, conn->link->handle))
3165 goto unlock;
3166 }
3167 /* fall through */
3168
3169 default:
3170 conn->state = BT_CLOSED;
3171 break;
3172 }
3173
3174 hci_proto_connect_cfm(conn, ev->status);
3175 if (ev->status)
3176 hci_conn_del(conn);
3177
3178 unlock:
3179 hci_dev_unlock(hdev);
3180 }
3181
3182 static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3183 {
3184 size_t parsed = 0;
3185
3186 while (parsed < eir_len) {
3187 u8 field_len = eir[0];
3188
3189 if (field_len == 0)
3190 return parsed;
3191
3192 parsed += field_len + 1;
3193 eir += field_len + 1;
3194 }
3195
3196 return eir_len;
3197 }
3198
3199 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3200 struct sk_buff *skb)
3201 {
3202 struct inquiry_data data;
3203 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3204 int num_rsp = *((__u8 *) skb->data);
3205 size_t eir_len;
3206
3207 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3208
3209 if (!num_rsp)
3210 return;
3211
3212 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3213 return;
3214
3215 hci_dev_lock(hdev);
3216
3217 for (; num_rsp; num_rsp--, info++) {
3218 bool name_known, ssp;
3219
3220 bacpy(&data.bdaddr, &info->bdaddr);
3221 data.pscan_rep_mode = info->pscan_rep_mode;
3222 data.pscan_period_mode = info->pscan_period_mode;
3223 data.pscan_mode = 0x00;
3224 memcpy(data.dev_class, info->dev_class, 3);
3225 data.clock_offset = info->clock_offset;
3226 data.rssi = info->rssi;
3227 data.ssp_mode = 0x01;
3228
3229 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3230 name_known = eir_has_data_type(info->data,
3231 sizeof(info->data),
3232 EIR_NAME_COMPLETE);
3233 else
3234 name_known = true;
3235
3236 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3237 &ssp);
3238 eir_len = eir_get_length(info->data, sizeof(info->data));
3239 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3240 info->dev_class, info->rssi, !name_known,
3241 ssp, info->data, eir_len);
3242 }
3243
3244 hci_dev_unlock(hdev);
3245 }
3246
3247 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3248 struct sk_buff *skb)
3249 {
3250 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3251 struct hci_conn *conn;
3252
3253 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3254 __le16_to_cpu(ev->handle));
3255
3256 hci_dev_lock(hdev);
3257
3258 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3259 if (!conn)
3260 goto unlock;
3261
3262 if (!ev->status)
3263 conn->sec_level = conn->pending_sec_level;
3264
3265 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3266
3267 if (ev->status && conn->state == BT_CONNECTED) {
3268 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3269 hci_conn_drop(conn);
3270 goto unlock;
3271 }
3272
3273 if (conn->state == BT_CONFIG) {
3274 if (!ev->status)
3275 conn->state = BT_CONNECTED;
3276
3277 hci_proto_connect_cfm(conn, ev->status);
3278 hci_conn_drop(conn);
3279 } else {
3280 hci_auth_cfm(conn, ev->status);
3281
3282 hci_conn_hold(conn);
3283 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3284 hci_conn_drop(conn);
3285 }
3286
3287 unlock:
3288 hci_dev_unlock(hdev);
3289 }
3290
3291 static u8 hci_get_auth_req(struct hci_conn *conn)
3292 {
3293 /* If remote requests dedicated bonding follow that lead */
3294 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3295 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3296 /* If both remote and local IO capabilities allow MITM
3297 * protection then require it, otherwise don't */
3298 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3299 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3300 return HCI_AT_DEDICATED_BONDING;
3301 else
3302 return HCI_AT_DEDICATED_BONDING_MITM;
3303 }
3304
3305 /* If remote requests no-bonding follow that lead */
3306 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3307 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3308 return conn->remote_auth | (conn->auth_type & 0x01);
3309
3310 return conn->auth_type;
3311 }
3312
3313 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3314 {
3315 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3316 struct hci_conn *conn;
3317
3318 BT_DBG("%s", hdev->name);
3319
3320 hci_dev_lock(hdev);
3321
3322 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3323 if (!conn)
3324 goto unlock;
3325
3326 hci_conn_hold(conn);
3327
3328 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3329 goto unlock;
3330
3331 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3332 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3333 struct hci_cp_io_capability_reply cp;
3334
3335 bacpy(&cp.bdaddr, &ev->bdaddr);
3336 /* Change the IO capability from KeyboardDisplay
3337 * to DisplayYesNo as it is not supported by BT spec. */
3338 cp.capability = (conn->io_capability == 0x04) ?
3339 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3340 conn->auth_type = hci_get_auth_req(conn);
3341 cp.authentication = conn->auth_type;
3342
3343 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3344 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3345 cp.oob_data = 0x01;
3346 else
3347 cp.oob_data = 0x00;
3348
3349 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3350 sizeof(cp), &cp);
3351 } else {
3352 struct hci_cp_io_capability_neg_reply cp;
3353
3354 bacpy(&cp.bdaddr, &ev->bdaddr);
3355 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3356
3357 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3358 sizeof(cp), &cp);
3359 }
3360
3361 unlock:
3362 hci_dev_unlock(hdev);
3363 }
3364
3365 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3366 {
3367 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3368 struct hci_conn *conn;
3369
3370 BT_DBG("%s", hdev->name);
3371
3372 hci_dev_lock(hdev);
3373
3374 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3375 if (!conn)
3376 goto unlock;
3377
3378 conn->remote_cap = ev->capability;
3379 conn->remote_auth = ev->authentication;
3380 if (ev->oob_data)
3381 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3382
3383 unlock:
3384 hci_dev_unlock(hdev);
3385 }
3386
3387 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3388 struct sk_buff *skb)
3389 {
3390 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3391 int loc_mitm, rem_mitm, confirm_hint = 0;
3392 struct hci_conn *conn;
3393
3394 BT_DBG("%s", hdev->name);
3395
3396 hci_dev_lock(hdev);
3397
3398 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3399 goto unlock;
3400
3401 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3402 if (!conn)
3403 goto unlock;
3404
3405 loc_mitm = (conn->auth_type & 0x01);
3406 rem_mitm = (conn->remote_auth & 0x01);
3407
3408 /* If we require MITM but the remote device can't provide that
3409 * (it has NoInputNoOutput) then reject the confirmation
3410 * request. The only exception is when we're dedicated bonding
3411 * initiators (connect_cfm_cb set) since then we always have the MITM
3412 * bit set. */
3413 if (!conn->connect_cfm_cb && loc_mitm &&
3414 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3415 BT_DBG("Rejecting request: remote device can't provide MITM");
3416 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3417 sizeof(ev->bdaddr), &ev->bdaddr);
3418 goto unlock;
3419 }
3420
3421 /* If no side requires MITM protection; auto-accept */
3422 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3423 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3424
3425 /* If we're not the initiators request authorization to
3426 * proceed from user space (mgmt_user_confirm with
3427 * confirm_hint set to 1). */
3428 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3429 BT_DBG("Confirming auto-accept as acceptor");
3430 confirm_hint = 1;
3431 goto confirm;
3432 }
3433
3434 BT_DBG("Auto-accept of user confirmation with %ums delay",
3435 hdev->auto_accept_delay);
3436
3437 if (hdev->auto_accept_delay > 0) {
3438 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3439 queue_delayed_work(conn->hdev->workqueue,
3440 &conn->auto_accept_work, delay);
3441 goto unlock;
3442 }
3443
3444 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3445 sizeof(ev->bdaddr), &ev->bdaddr);
3446 goto unlock;
3447 }
3448
3449 confirm:
3450 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3451 confirm_hint);
3452
3453 unlock:
3454 hci_dev_unlock(hdev);
3455 }
3456
3457 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3458 struct sk_buff *skb)
3459 {
3460 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3461
3462 BT_DBG("%s", hdev->name);
3463
3464 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3465 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3466 }
3467
3468 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3469 struct sk_buff *skb)
3470 {
3471 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3472 struct hci_conn *conn;
3473
3474 BT_DBG("%s", hdev->name);
3475
3476 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3477 if (!conn)
3478 return;
3479
3480 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3481 conn->passkey_entered = 0;
3482
3483 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3484 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3485 conn->dst_type, conn->passkey_notify,
3486 conn->passkey_entered);
3487 }
3488
3489 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3490 {
3491 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3492 struct hci_conn *conn;
3493
3494 BT_DBG("%s", hdev->name);
3495
3496 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3497 if (!conn)
3498 return;
3499
3500 switch (ev->type) {
3501 case HCI_KEYPRESS_STARTED:
3502 conn->passkey_entered = 0;
3503 return;
3504
3505 case HCI_KEYPRESS_ENTERED:
3506 conn->passkey_entered++;
3507 break;
3508
3509 case HCI_KEYPRESS_ERASED:
3510 conn->passkey_entered--;
3511 break;
3512
3513 case HCI_KEYPRESS_CLEARED:
3514 conn->passkey_entered = 0;
3515 break;
3516
3517 case HCI_KEYPRESS_COMPLETED:
3518 return;
3519 }
3520
3521 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3522 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3523 conn->dst_type, conn->passkey_notify,
3524 conn->passkey_entered);
3525 }
3526
3527 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3528 struct sk_buff *skb)
3529 {
3530 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3531 struct hci_conn *conn;
3532
3533 BT_DBG("%s", hdev->name);
3534
3535 hci_dev_lock(hdev);
3536
3537 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3538 if (!conn)
3539 goto unlock;
3540
3541 /* To avoid duplicate auth_failed events to user space we check
3542 * the HCI_CONN_AUTH_PEND flag which will be set if we
3543 * initiated the authentication. A traditional auth_complete
3544 * event gets always produced as initiator and is also mapped to
3545 * the mgmt_auth_failed event */
3546 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3547 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3548 ev->status);
3549
3550 hci_conn_drop(conn);
3551
3552 unlock:
3553 hci_dev_unlock(hdev);
3554 }
3555
3556 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3557 struct sk_buff *skb)
3558 {
3559 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3560 struct inquiry_entry *ie;
3561 struct hci_conn *conn;
3562
3563 BT_DBG("%s", hdev->name);
3564
3565 hci_dev_lock(hdev);
3566
3567 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3568 if (conn)
3569 memcpy(conn->features[1], ev->features, 8);
3570
3571 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3572 if (ie)
3573 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3574
3575 hci_dev_unlock(hdev);
3576 }
3577
3578 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3579 struct sk_buff *skb)
3580 {
3581 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3582 struct oob_data *data;
3583
3584 BT_DBG("%s", hdev->name);
3585
3586 hci_dev_lock(hdev);
3587
3588 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3589 goto unlock;
3590
3591 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3592 if (data) {
3593 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3594 struct hci_cp_remote_oob_ext_data_reply cp;
3595
3596 bacpy(&cp.bdaddr, &ev->bdaddr);
3597 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3598 memcpy(cp.randomizer192, data->randomizer192,
3599 sizeof(cp.randomizer192));
3600 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3601 memcpy(cp.randomizer256, data->randomizer256,
3602 sizeof(cp.randomizer256));
3603
3604 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3605 sizeof(cp), &cp);
3606 } else {
3607 struct hci_cp_remote_oob_data_reply cp;
3608
3609 bacpy(&cp.bdaddr, &ev->bdaddr);
3610 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3611 memcpy(cp.randomizer, data->randomizer192,
3612 sizeof(cp.randomizer));
3613
3614 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3615 sizeof(cp), &cp);
3616 }
3617 } else {
3618 struct hci_cp_remote_oob_data_neg_reply cp;
3619
3620 bacpy(&cp.bdaddr, &ev->bdaddr);
3621 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3622 sizeof(cp), &cp);
3623 }
3624
3625 unlock:
3626 hci_dev_unlock(hdev);
3627 }
3628
3629 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3630 struct sk_buff *skb)
3631 {
3632 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3633 struct hci_conn *hcon, *bredr_hcon;
3634
3635 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3636 ev->status);
3637
3638 hci_dev_lock(hdev);
3639
3640 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3641 if (!hcon) {
3642 hci_dev_unlock(hdev);
3643 return;
3644 }
3645
3646 if (ev->status) {
3647 hci_conn_del(hcon);
3648 hci_dev_unlock(hdev);
3649 return;
3650 }
3651
3652 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3653
3654 hcon->state = BT_CONNECTED;
3655 bacpy(&hcon->dst, &bredr_hcon->dst);
3656
3657 hci_conn_hold(hcon);
3658 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3659 hci_conn_drop(hcon);
3660
3661 hci_conn_add_sysfs(hcon);
3662
3663 amp_physical_cfm(bredr_hcon, hcon);
3664
3665 hci_dev_unlock(hdev);
3666 }
3667
3668 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3669 {
3670 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3671 struct hci_conn *hcon;
3672 struct hci_chan *hchan;
3673 struct amp_mgr *mgr;
3674
3675 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3676 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3677 ev->status);
3678
3679 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3680 if (!hcon)
3681 return;
3682
3683 /* Create AMP hchan */
3684 hchan = hci_chan_create(hcon);
3685 if (!hchan)
3686 return;
3687
3688 hchan->handle = le16_to_cpu(ev->handle);
3689
3690 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3691
3692 mgr = hcon->amp_mgr;
3693 if (mgr && mgr->bredr_chan) {
3694 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3695
3696 l2cap_chan_lock(bredr_chan);
3697
3698 bredr_chan->conn->mtu = hdev->block_mtu;
3699 l2cap_logical_cfm(bredr_chan, hchan, 0);
3700 hci_conn_hold(hcon);
3701
3702 l2cap_chan_unlock(bredr_chan);
3703 }
3704 }
3705
3706 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3707 struct sk_buff *skb)
3708 {
3709 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3710 struct hci_chan *hchan;
3711
3712 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3713 le16_to_cpu(ev->handle), ev->status);
3714
3715 if (ev->status)
3716 return;
3717
3718 hci_dev_lock(hdev);
3719
3720 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3721 if (!hchan)
3722 goto unlock;
3723
3724 amp_destroy_logical_link(hchan, ev->reason);
3725
3726 unlock:
3727 hci_dev_unlock(hdev);
3728 }
3729
3730 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3731 struct sk_buff *skb)
3732 {
3733 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3734 struct hci_conn *hcon;
3735
3736 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3737
3738 if (ev->status)
3739 return;
3740
3741 hci_dev_lock(hdev);
3742
3743 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3744 if (hcon) {
3745 hcon->state = BT_CLOSED;
3746 hci_conn_del(hcon);
3747 }
3748
3749 hci_dev_unlock(hdev);
3750 }
3751
3752 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3753 {
3754 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3755 struct hci_conn *conn;
3756 struct smp_irk *irk;
3757
3758 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3759
3760 hci_dev_lock(hdev);
3761
3762 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3763 if (!conn) {
3764 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3765 if (!conn) {
3766 BT_ERR("No memory for new connection");
3767 goto unlock;
3768 }
3769
3770 conn->dst_type = ev->bdaddr_type;
3771
3772 /* The advertising parameters for own address type
3773 * define which source address and source address
3774 * type this connections has.
3775 */
3776 if (bacmp(&conn->src, BDADDR_ANY)) {
3777 conn->src_type = ADDR_LE_DEV_PUBLIC;
3778 } else {
3779 bacpy(&conn->src, &hdev->static_addr);
3780 conn->src_type = ADDR_LE_DEV_RANDOM;
3781 }
3782
3783 if (ev->role == LE_CONN_ROLE_MASTER) {
3784 conn->out = true;
3785 conn->link_mode |= HCI_LM_MASTER;
3786 }
3787
3788 /* If we didn't have a hci_conn object previously
3789 * but we're in master role this must be something
3790 * initiated using a white list. Since white list based
3791 * connections are not "first class citizens" we don't
3792 * have full tracking of them. Therefore, we go ahead
3793 * with a "best effort" approach of determining the
3794 * initiator address based on the HCI_PRIVACY flag.
3795 */
3796 if (conn->out) {
3797 conn->resp_addr_type = ev->bdaddr_type;
3798 bacpy(&conn->resp_addr, &ev->bdaddr);
3799 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
3800 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
3801 bacpy(&conn->init_addr, &hdev->rpa);
3802 } else {
3803 hci_copy_identity_address(hdev,
3804 &conn->init_addr,
3805 &conn->init_addr_type);
3806 }
3807 } else {
3808 /* Set the responder (our side) address type based on
3809 * the advertising address type.
3810 */
3811 conn->resp_addr_type = hdev->adv_addr_type;
3812 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
3813 bacpy(&conn->resp_addr, &hdev->random_addr);
3814 else
3815 bacpy(&conn->resp_addr, &hdev->bdaddr);
3816
3817 conn->init_addr_type = ev->bdaddr_type;
3818 bacpy(&conn->init_addr, &ev->bdaddr);
3819 }
3820 } else {
3821 cancel_delayed_work(&conn->le_conn_timeout);
3822 }
3823
3824 /* Ensure that the hci_conn contains the identity address type
3825 * regardless of which address the connection was made with.
3826 */
3827 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
3828
3829 /* Lookup the identity address from the stored connection
3830 * address and address type.
3831 *
3832 * When establishing connections to an identity address, the
3833 * connection procedure will store the resolvable random
3834 * address first. Now if it can be converted back into the
3835 * identity address, start using the identity address from
3836 * now on.
3837 */
3838 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
3839 if (irk) {
3840 bacpy(&conn->dst, &irk->bdaddr);
3841 conn->dst_type = irk->addr_type;
3842 }
3843
3844 if (ev->status) {
3845 hci_le_conn_failed(conn, ev->status);
3846 goto unlock;
3847 }
3848
3849 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3850 mgmt_device_connected(hdev, &conn->dst, conn->type,
3851 conn->dst_type, 0, NULL, 0, NULL);
3852
3853 conn->sec_level = BT_SECURITY_LOW;
3854 conn->handle = __le16_to_cpu(ev->handle);
3855 conn->state = BT_CONNECTED;
3856
3857 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3858 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3859
3860 hci_conn_add_sysfs(conn);
3861
3862 hci_proto_connect_cfm(conn, ev->status);
3863
3864 hci_pend_le_conn_del(hdev, &conn->dst, conn->dst_type);
3865
3866 unlock:
3867 hci_dev_unlock(hdev);
3868 }
3869
3870 /* This function requires the caller holds hdev->lock */
3871 static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
3872 u8 addr_type)
3873 {
3874 struct hci_conn *conn;
3875 struct smp_irk *irk;
3876
3877 /* If this is a resolvable address, we should resolve it and then
3878 * update address and address type variables.
3879 */
3880 irk = hci_get_irk(hdev, addr, addr_type);
3881 if (irk) {
3882 addr = &irk->bdaddr;
3883 addr_type = irk->addr_type;
3884 }
3885
3886 if (!hci_pend_le_conn_lookup(hdev, addr, addr_type))
3887 return;
3888
3889 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
3890 HCI_AT_NO_BONDING);
3891 if (!IS_ERR(conn))
3892 return;
3893
3894 switch (PTR_ERR(conn)) {
3895 case -EBUSY:
3896 /* If hci_connect() returns -EBUSY it means there is already
3897 * an LE connection attempt going on. Since controllers don't
3898 * support more than one connection attempt at the time, we
3899 * don't consider this an error case.
3900 */
3901 break;
3902 default:
3903 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
3904 }
3905 }
3906
3907 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3908 {
3909 u8 num_reports = skb->data[0];
3910 void *ptr = &skb->data[1];
3911 s8 rssi;
3912
3913 hci_dev_lock(hdev);
3914
3915 while (num_reports--) {
3916 struct hci_ev_le_advertising_info *ev = ptr;
3917
3918 if (ev->evt_type == LE_ADV_IND ||
3919 ev->evt_type == LE_ADV_DIRECT_IND)
3920 check_pending_le_conn(hdev, &ev->bdaddr,
3921 ev->bdaddr_type);
3922
3923 rssi = ev->data[ev->length];
3924 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3925 NULL, rssi, 0, 1, ev->data, ev->length);
3926
3927 ptr += sizeof(*ev) + ev->length + 1;
3928 }
3929
3930 hci_dev_unlock(hdev);
3931 }
3932
3933 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3934 {
3935 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3936 struct hci_cp_le_ltk_reply cp;
3937 struct hci_cp_le_ltk_neg_reply neg;
3938 struct hci_conn *conn;
3939 struct smp_ltk *ltk;
3940
3941 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3942
3943 hci_dev_lock(hdev);
3944
3945 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3946 if (conn == NULL)
3947 goto not_found;
3948
3949 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->out);
3950 if (ltk == NULL)
3951 goto not_found;
3952
3953 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3954 cp.handle = cpu_to_le16(conn->handle);
3955
3956 if (ltk->authenticated)
3957 conn->pending_sec_level = BT_SECURITY_HIGH;
3958 else
3959 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3960
3961 conn->enc_key_size = ltk->enc_size;
3962
3963 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3964
3965 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
3966 * temporary key used to encrypt a connection following
3967 * pairing. It is used during the Encrypted Session Setup to
3968 * distribute the keys. Later, security can be re-established
3969 * using a distributed LTK.
3970 */
3971 if (ltk->type == HCI_SMP_STK_SLAVE) {
3972 list_del(&ltk->list);
3973 kfree(ltk);
3974 }
3975
3976 hci_dev_unlock(hdev);
3977
3978 return;
3979
3980 not_found:
3981 neg.handle = ev->handle;
3982 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3983 hci_dev_unlock(hdev);
3984 }
3985
3986 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3987 {
3988 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3989
3990 skb_pull(skb, sizeof(*le_ev));
3991
3992 switch (le_ev->subevent) {
3993 case HCI_EV_LE_CONN_COMPLETE:
3994 hci_le_conn_complete_evt(hdev, skb);
3995 break;
3996
3997 case HCI_EV_LE_ADVERTISING_REPORT:
3998 hci_le_adv_report_evt(hdev, skb);
3999 break;
4000
4001 case HCI_EV_LE_LTK_REQ:
4002 hci_le_ltk_request_evt(hdev, skb);
4003 break;
4004
4005 default:
4006 break;
4007 }
4008 }
4009
4010 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4011 {
4012 struct hci_ev_channel_selected *ev = (void *) skb->data;
4013 struct hci_conn *hcon;
4014
4015 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4016
4017 skb_pull(skb, sizeof(*ev));
4018
4019 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4020 if (!hcon)
4021 return;
4022
4023 amp_read_loc_assoc_final_data(hdev, hcon);
4024 }
4025
4026 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4027 {
4028 struct hci_event_hdr *hdr = (void *) skb->data;
4029 __u8 event = hdr->evt;
4030
4031 hci_dev_lock(hdev);
4032
4033 /* Received events are (currently) only needed when a request is
4034 * ongoing so avoid unnecessary memory allocation.
4035 */
4036 if (hdev->req_status == HCI_REQ_PEND) {
4037 kfree_skb(hdev->recv_evt);
4038 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4039 }
4040
4041 hci_dev_unlock(hdev);
4042
4043 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4044
4045 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4046 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4047 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4048
4049 hci_req_cmd_complete(hdev, opcode, 0);
4050 }
4051
4052 switch (event) {
4053 case HCI_EV_INQUIRY_COMPLETE:
4054 hci_inquiry_complete_evt(hdev, skb);
4055 break;
4056
4057 case HCI_EV_INQUIRY_RESULT:
4058 hci_inquiry_result_evt(hdev, skb);
4059 break;
4060
4061 case HCI_EV_CONN_COMPLETE:
4062 hci_conn_complete_evt(hdev, skb);
4063 break;
4064
4065 case HCI_EV_CONN_REQUEST:
4066 hci_conn_request_evt(hdev, skb);
4067 break;
4068
4069 case HCI_EV_DISCONN_COMPLETE:
4070 hci_disconn_complete_evt(hdev, skb);
4071 break;
4072
4073 case HCI_EV_AUTH_COMPLETE:
4074 hci_auth_complete_evt(hdev, skb);
4075 break;
4076
4077 case HCI_EV_REMOTE_NAME:
4078 hci_remote_name_evt(hdev, skb);
4079 break;
4080
4081 case HCI_EV_ENCRYPT_CHANGE:
4082 hci_encrypt_change_evt(hdev, skb);
4083 break;
4084
4085 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4086 hci_change_link_key_complete_evt(hdev, skb);
4087 break;
4088
4089 case HCI_EV_REMOTE_FEATURES:
4090 hci_remote_features_evt(hdev, skb);
4091 break;
4092
4093 case HCI_EV_CMD_COMPLETE:
4094 hci_cmd_complete_evt(hdev, skb);
4095 break;
4096
4097 case HCI_EV_CMD_STATUS:
4098 hci_cmd_status_evt(hdev, skb);
4099 break;
4100
4101 case HCI_EV_ROLE_CHANGE:
4102 hci_role_change_evt(hdev, skb);
4103 break;
4104
4105 case HCI_EV_NUM_COMP_PKTS:
4106 hci_num_comp_pkts_evt(hdev, skb);
4107 break;
4108
4109 case HCI_EV_MODE_CHANGE:
4110 hci_mode_change_evt(hdev, skb);
4111 break;
4112
4113 case HCI_EV_PIN_CODE_REQ:
4114 hci_pin_code_request_evt(hdev, skb);
4115 break;
4116
4117 case HCI_EV_LINK_KEY_REQ:
4118 hci_link_key_request_evt(hdev, skb);
4119 break;
4120
4121 case HCI_EV_LINK_KEY_NOTIFY:
4122 hci_link_key_notify_evt(hdev, skb);
4123 break;
4124
4125 case HCI_EV_CLOCK_OFFSET:
4126 hci_clock_offset_evt(hdev, skb);
4127 break;
4128
4129 case HCI_EV_PKT_TYPE_CHANGE:
4130 hci_pkt_type_change_evt(hdev, skb);
4131 break;
4132
4133 case HCI_EV_PSCAN_REP_MODE:
4134 hci_pscan_rep_mode_evt(hdev, skb);
4135 break;
4136
4137 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4138 hci_inquiry_result_with_rssi_evt(hdev, skb);
4139 break;
4140
4141 case HCI_EV_REMOTE_EXT_FEATURES:
4142 hci_remote_ext_features_evt(hdev, skb);
4143 break;
4144
4145 case HCI_EV_SYNC_CONN_COMPLETE:
4146 hci_sync_conn_complete_evt(hdev, skb);
4147 break;
4148
4149 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4150 hci_extended_inquiry_result_evt(hdev, skb);
4151 break;
4152
4153 case HCI_EV_KEY_REFRESH_COMPLETE:
4154 hci_key_refresh_complete_evt(hdev, skb);
4155 break;
4156
4157 case HCI_EV_IO_CAPA_REQUEST:
4158 hci_io_capa_request_evt(hdev, skb);
4159 break;
4160
4161 case HCI_EV_IO_CAPA_REPLY:
4162 hci_io_capa_reply_evt(hdev, skb);
4163 break;
4164
4165 case HCI_EV_USER_CONFIRM_REQUEST:
4166 hci_user_confirm_request_evt(hdev, skb);
4167 break;
4168
4169 case HCI_EV_USER_PASSKEY_REQUEST:
4170 hci_user_passkey_request_evt(hdev, skb);
4171 break;
4172
4173 case HCI_EV_USER_PASSKEY_NOTIFY:
4174 hci_user_passkey_notify_evt(hdev, skb);
4175 break;
4176
4177 case HCI_EV_KEYPRESS_NOTIFY:
4178 hci_keypress_notify_evt(hdev, skb);
4179 break;
4180
4181 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4182 hci_simple_pair_complete_evt(hdev, skb);
4183 break;
4184
4185 case HCI_EV_REMOTE_HOST_FEATURES:
4186 hci_remote_host_features_evt(hdev, skb);
4187 break;
4188
4189 case HCI_EV_LE_META:
4190 hci_le_meta_evt(hdev, skb);
4191 break;
4192
4193 case HCI_EV_CHANNEL_SELECTED:
4194 hci_chan_selected_evt(hdev, skb);
4195 break;
4196
4197 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4198 hci_remote_oob_data_request_evt(hdev, skb);
4199 break;
4200
4201 case HCI_EV_PHY_LINK_COMPLETE:
4202 hci_phy_link_complete_evt(hdev, skb);
4203 break;
4204
4205 case HCI_EV_LOGICAL_LINK_COMPLETE:
4206 hci_loglink_complete_evt(hdev, skb);
4207 break;
4208
4209 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4210 hci_disconn_loglink_complete_evt(hdev, skb);
4211 break;
4212
4213 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4214 hci_disconn_phylink_complete_evt(hdev, skb);
4215 break;
4216
4217 case HCI_EV_NUM_COMP_BLOCKS:
4218 hci_num_comp_blocks_evt(hdev, skb);
4219 break;
4220
4221 default:
4222 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4223 break;
4224 }
4225
4226 kfree_skb(skb);
4227 hdev->stat.evt_rx++;
4228 }
This page took 0.135098 seconds and 5 git commands to generate.