Bluetooth: Fix dereferencing conn variable before NULL check
[deliverable/linux.git] / net / bluetooth / hci_event.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "a2mp.h"
34#include "amp.h"
35#include "smp.h"
36
37/* Handle HCI Event packets */
38
39static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
40{
41 __u8 status = *((__u8 *) skb->data);
42
43 BT_DBG("%s status 0x%2.2x", hdev->name, status);
44
45 if (status)
46 return;
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
50 wake_up_bit(&hdev->flags, HCI_INQUIRY);
51
52 hci_dev_lock(hdev);
53 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
54 hci_dev_unlock(hdev);
55
56 hci_conn_check_pending(hdev);
57}
58
59static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60{
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69}
70
71static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72{
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83}
84
85static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87{
88 BT_DBG("%s", hdev->name);
89}
90
91static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92{
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn)
105 conn->role = rp->role;
106
107 hci_dev_unlock(hdev);
108}
109
110static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
111{
112 struct hci_rp_read_link_policy *rp = (void *) skb->data;
113 struct hci_conn *conn;
114
115 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
116
117 if (rp->status)
118 return;
119
120 hci_dev_lock(hdev);
121
122 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
123 if (conn)
124 conn->link_policy = __le16_to_cpu(rp->policy);
125
126 hci_dev_unlock(hdev);
127}
128
129static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
130{
131 struct hci_rp_write_link_policy *rp = (void *) skb->data;
132 struct hci_conn *conn;
133 void *sent;
134
135 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
136
137 if (rp->status)
138 return;
139
140 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
141 if (!sent)
142 return;
143
144 hci_dev_lock(hdev);
145
146 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
147 if (conn)
148 conn->link_policy = get_unaligned_le16(sent + 2);
149
150 hci_dev_unlock(hdev);
151}
152
153static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
154 struct sk_buff *skb)
155{
156 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
157
158 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
159
160 if (rp->status)
161 return;
162
163 hdev->link_policy = __le16_to_cpu(rp->policy);
164}
165
166static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
167 struct sk_buff *skb)
168{
169 __u8 status = *((__u8 *) skb->data);
170 void *sent;
171
172 BT_DBG("%s status 0x%2.2x", hdev->name, status);
173
174 if (status)
175 return;
176
177 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
178 if (!sent)
179 return;
180
181 hdev->link_policy = get_unaligned_le16(sent);
182}
183
184static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
185{
186 __u8 status = *((__u8 *) skb->data);
187
188 BT_DBG("%s status 0x%2.2x", hdev->name, status);
189
190 clear_bit(HCI_RESET, &hdev->flags);
191
192 /* Reset all non-persistent flags */
193 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
194
195 hdev->discovery.state = DISCOVERY_STOPPED;
196 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
197 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
198
199 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
200 hdev->adv_data_len = 0;
201
202 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
203 hdev->scan_rsp_data_len = 0;
204
205 hdev->le_scan_type = LE_SCAN_PASSIVE;
206
207 hdev->ssp_debug_mode = 0;
208}
209
210static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
211{
212 __u8 status = *((__u8 *) skb->data);
213 void *sent;
214
215 BT_DBG("%s status 0x%2.2x", hdev->name, status);
216
217 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
218 if (!sent)
219 return;
220
221 hci_dev_lock(hdev);
222
223 if (test_bit(HCI_MGMT, &hdev->dev_flags))
224 mgmt_set_local_name_complete(hdev, sent, status);
225 else if (!status)
226 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
227
228 hci_dev_unlock(hdev);
229}
230
231static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
232{
233 struct hci_rp_read_local_name *rp = (void *) skb->data;
234
235 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
236
237 if (rp->status)
238 return;
239
240 if (test_bit(HCI_SETUP, &hdev->dev_flags))
241 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
242}
243
244static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
245{
246 __u8 status = *((__u8 *) skb->data);
247 void *sent;
248
249 BT_DBG("%s status 0x%2.2x", hdev->name, status);
250
251 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
252 if (!sent)
253 return;
254
255 if (!status) {
256 __u8 param = *((__u8 *) sent);
257
258 if (param == AUTH_ENABLED)
259 set_bit(HCI_AUTH, &hdev->flags);
260 else
261 clear_bit(HCI_AUTH, &hdev->flags);
262 }
263
264 if (test_bit(HCI_MGMT, &hdev->dev_flags))
265 mgmt_auth_enable_complete(hdev, status);
266}
267
268static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
269{
270 __u8 status = *((__u8 *) skb->data);
271 __u8 param;
272 void *sent;
273
274 BT_DBG("%s status 0x%2.2x", hdev->name, status);
275
276 if (status)
277 return;
278
279 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
280 if (!sent)
281 return;
282
283 param = *((__u8 *) sent);
284
285 if (param)
286 set_bit(HCI_ENCRYPT, &hdev->flags);
287 else
288 clear_bit(HCI_ENCRYPT, &hdev->flags);
289}
290
291static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
292{
293 __u8 status = *((__u8 *) skb->data);
294 __u8 param;
295 void *sent;
296
297 BT_DBG("%s status 0x%2.2x", hdev->name, status);
298
299 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
300 if (!sent)
301 return;
302
303 param = *((__u8 *) sent);
304
305 hci_dev_lock(hdev);
306
307 if (status) {
308 hdev->discov_timeout = 0;
309 goto done;
310 }
311
312 if (param & SCAN_INQUIRY)
313 set_bit(HCI_ISCAN, &hdev->flags);
314 else
315 clear_bit(HCI_ISCAN, &hdev->flags);
316
317 if (param & SCAN_PAGE)
318 set_bit(HCI_PSCAN, &hdev->flags);
319 else
320 clear_bit(HCI_PSCAN, &hdev->flags);
321
322done:
323 hci_dev_unlock(hdev);
324}
325
326static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
327{
328 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
329
330 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
331
332 if (rp->status)
333 return;
334
335 memcpy(hdev->dev_class, rp->dev_class, 3);
336
337 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
338 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
339}
340
341static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
342{
343 __u8 status = *((__u8 *) skb->data);
344 void *sent;
345
346 BT_DBG("%s status 0x%2.2x", hdev->name, status);
347
348 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
349 if (!sent)
350 return;
351
352 hci_dev_lock(hdev);
353
354 if (status == 0)
355 memcpy(hdev->dev_class, sent, 3);
356
357 if (test_bit(HCI_MGMT, &hdev->dev_flags))
358 mgmt_set_class_of_dev_complete(hdev, sent, status);
359
360 hci_dev_unlock(hdev);
361}
362
363static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
364{
365 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
366 __u16 setting;
367
368 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
369
370 if (rp->status)
371 return;
372
373 setting = __le16_to_cpu(rp->voice_setting);
374
375 if (hdev->voice_setting == setting)
376 return;
377
378 hdev->voice_setting = setting;
379
380 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
381
382 if (hdev->notify)
383 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
384}
385
386static void hci_cc_write_voice_setting(struct hci_dev *hdev,
387 struct sk_buff *skb)
388{
389 __u8 status = *((__u8 *) skb->data);
390 __u16 setting;
391 void *sent;
392
393 BT_DBG("%s status 0x%2.2x", hdev->name, status);
394
395 if (status)
396 return;
397
398 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
399 if (!sent)
400 return;
401
402 setting = get_unaligned_le16(sent);
403
404 if (hdev->voice_setting == setting)
405 return;
406
407 hdev->voice_setting = setting;
408
409 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
410
411 if (hdev->notify)
412 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
413}
414
415static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
416 struct sk_buff *skb)
417{
418 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
419
420 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
421
422 if (rp->status)
423 return;
424
425 hdev->num_iac = rp->num_iac;
426
427 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
428}
429
430static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
431{
432 __u8 status = *((__u8 *) skb->data);
433 struct hci_cp_write_ssp_mode *sent;
434
435 BT_DBG("%s status 0x%2.2x", hdev->name, status);
436
437 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
438 if (!sent)
439 return;
440
441 if (!status) {
442 if (sent->mode)
443 hdev->features[1][0] |= LMP_HOST_SSP;
444 else
445 hdev->features[1][0] &= ~LMP_HOST_SSP;
446 }
447
448 if (test_bit(HCI_MGMT, &hdev->dev_flags))
449 mgmt_ssp_enable_complete(hdev, sent->mode, status);
450 else if (!status) {
451 if (sent->mode)
452 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
453 else
454 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455 }
456}
457
458static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
459{
460 u8 status = *((u8 *) skb->data);
461 struct hci_cp_write_sc_support *sent;
462
463 BT_DBG("%s status 0x%2.2x", hdev->name, status);
464
465 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
466 if (!sent)
467 return;
468
469 if (!status) {
470 if (sent->support)
471 hdev->features[1][0] |= LMP_HOST_SC;
472 else
473 hdev->features[1][0] &= ~LMP_HOST_SC;
474 }
475
476 if (test_bit(HCI_MGMT, &hdev->dev_flags))
477 mgmt_sc_enable_complete(hdev, sent->support, status);
478 else if (!status) {
479 if (sent->support)
480 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
481 else
482 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
483 }
484}
485
486static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
487{
488 struct hci_rp_read_local_version *rp = (void *) skb->data;
489
490 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
491
492 if (rp->status)
493 return;
494
495 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
496 hdev->hci_ver = rp->hci_ver;
497 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
498 hdev->lmp_ver = rp->lmp_ver;
499 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
500 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
501 }
502}
503
504static void hci_cc_read_local_commands(struct hci_dev *hdev,
505 struct sk_buff *skb)
506{
507 struct hci_rp_read_local_commands *rp = (void *) skb->data;
508
509 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
510
511 if (rp->status)
512 return;
513
514 if (test_bit(HCI_SETUP, &hdev->dev_flags))
515 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
516}
517
518static void hci_cc_read_local_features(struct hci_dev *hdev,
519 struct sk_buff *skb)
520{
521 struct hci_rp_read_local_features *rp = (void *) skb->data;
522
523 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
524
525 if (rp->status)
526 return;
527
528 memcpy(hdev->features, rp->features, 8);
529
530 /* Adjust default settings according to features
531 * supported by device. */
532
533 if (hdev->features[0][0] & LMP_3SLOT)
534 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
535
536 if (hdev->features[0][0] & LMP_5SLOT)
537 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
538
539 if (hdev->features[0][1] & LMP_HV2) {
540 hdev->pkt_type |= (HCI_HV2);
541 hdev->esco_type |= (ESCO_HV2);
542 }
543
544 if (hdev->features[0][1] & LMP_HV3) {
545 hdev->pkt_type |= (HCI_HV3);
546 hdev->esco_type |= (ESCO_HV3);
547 }
548
549 if (lmp_esco_capable(hdev))
550 hdev->esco_type |= (ESCO_EV3);
551
552 if (hdev->features[0][4] & LMP_EV4)
553 hdev->esco_type |= (ESCO_EV4);
554
555 if (hdev->features[0][4] & LMP_EV5)
556 hdev->esco_type |= (ESCO_EV5);
557
558 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
559 hdev->esco_type |= (ESCO_2EV3);
560
561 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
562 hdev->esco_type |= (ESCO_3EV3);
563
564 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
565 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
566}
567
568static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
569 struct sk_buff *skb)
570{
571 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
572
573 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
574
575 if (rp->status)
576 return;
577
578 if (hdev->max_page < rp->max_page)
579 hdev->max_page = rp->max_page;
580
581 if (rp->page < HCI_MAX_PAGES)
582 memcpy(hdev->features[rp->page], rp->features, 8);
583}
584
585static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
586 struct sk_buff *skb)
587{
588 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
589
590 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
591
592 if (rp->status)
593 return;
594
595 hdev->flow_ctl_mode = rp->mode;
596}
597
598static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
599{
600 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
601
602 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603
604 if (rp->status)
605 return;
606
607 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
608 hdev->sco_mtu = rp->sco_mtu;
609 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
610 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
611
612 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
613 hdev->sco_mtu = 64;
614 hdev->sco_pkts = 8;
615 }
616
617 hdev->acl_cnt = hdev->acl_pkts;
618 hdev->sco_cnt = hdev->sco_pkts;
619
620 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
621 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
622}
623
624static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
625{
626 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
627
628 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
629
630 if (rp->status)
631 return;
632
633 if (test_bit(HCI_INIT, &hdev->flags))
634 bacpy(&hdev->bdaddr, &rp->bdaddr);
635
636 if (test_bit(HCI_SETUP, &hdev->dev_flags))
637 bacpy(&hdev->setup_addr, &rp->bdaddr);
638}
639
640static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
641 struct sk_buff *skb)
642{
643 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
644
645 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
646
647 if (rp->status)
648 return;
649
650 if (test_bit(HCI_INIT, &hdev->flags)) {
651 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
652 hdev->page_scan_window = __le16_to_cpu(rp->window);
653 }
654}
655
656static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
657 struct sk_buff *skb)
658{
659 u8 status = *((u8 *) skb->data);
660 struct hci_cp_write_page_scan_activity *sent;
661
662 BT_DBG("%s status 0x%2.2x", hdev->name, status);
663
664 if (status)
665 return;
666
667 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
668 if (!sent)
669 return;
670
671 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
672 hdev->page_scan_window = __le16_to_cpu(sent->window);
673}
674
675static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
676 struct sk_buff *skb)
677{
678 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
679
680 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
681
682 if (rp->status)
683 return;
684
685 if (test_bit(HCI_INIT, &hdev->flags))
686 hdev->page_scan_type = rp->type;
687}
688
689static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
690 struct sk_buff *skb)
691{
692 u8 status = *((u8 *) skb->data);
693 u8 *type;
694
695 BT_DBG("%s status 0x%2.2x", hdev->name, status);
696
697 if (status)
698 return;
699
700 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
701 if (type)
702 hdev->page_scan_type = *type;
703}
704
705static void hci_cc_read_data_block_size(struct hci_dev *hdev,
706 struct sk_buff *skb)
707{
708 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
709
710 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
711
712 if (rp->status)
713 return;
714
715 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
716 hdev->block_len = __le16_to_cpu(rp->block_len);
717 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
718
719 hdev->block_cnt = hdev->num_blocks;
720
721 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
722 hdev->block_cnt, hdev->block_len);
723}
724
725static void hci_cc_read_clock(struct hci_dev *hdev, struct sk_buff *skb)
726{
727 struct hci_rp_read_clock *rp = (void *) skb->data;
728 struct hci_cp_read_clock *cp;
729 struct hci_conn *conn;
730
731 BT_DBG("%s", hdev->name);
732
733 if (skb->len < sizeof(*rp))
734 return;
735
736 if (rp->status)
737 return;
738
739 hci_dev_lock(hdev);
740
741 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK);
742 if (!cp)
743 goto unlock;
744
745 if (cp->which == 0x00) {
746 hdev->clock = le32_to_cpu(rp->clock);
747 goto unlock;
748 }
749
750 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
751 if (conn) {
752 conn->clock = le32_to_cpu(rp->clock);
753 conn->clock_accuracy = le16_to_cpu(rp->accuracy);
754 }
755
756unlock:
757 hci_dev_unlock(hdev);
758}
759
760static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
761 struct sk_buff *skb)
762{
763 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
764
765 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
766
767 if (rp->status)
768 goto a2mp_rsp;
769
770 hdev->amp_status = rp->amp_status;
771 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
772 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
773 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
774 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
775 hdev->amp_type = rp->amp_type;
776 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
777 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
778 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
779 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
780
781a2mp_rsp:
782 a2mp_send_getinfo_rsp(hdev);
783}
784
785static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
786 struct sk_buff *skb)
787{
788 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
789 struct amp_assoc *assoc = &hdev->loc_assoc;
790 size_t rem_len, frag_len;
791
792 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
793
794 if (rp->status)
795 goto a2mp_rsp;
796
797 frag_len = skb->len - sizeof(*rp);
798 rem_len = __le16_to_cpu(rp->rem_len);
799
800 if (rem_len > frag_len) {
801 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
802
803 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
804 assoc->offset += frag_len;
805
806 /* Read other fragments */
807 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
808
809 return;
810 }
811
812 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
813 assoc->len = assoc->offset + rem_len;
814 assoc->offset = 0;
815
816a2mp_rsp:
817 /* Send A2MP Rsp when all fragments are received */
818 a2mp_send_getampassoc_rsp(hdev, rp->status);
819 a2mp_send_create_phy_link_req(hdev, rp->status);
820}
821
822static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
823 struct sk_buff *skb)
824{
825 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
826
827 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
828
829 if (rp->status)
830 return;
831
832 hdev->inq_tx_power = rp->tx_power;
833}
834
835static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
836{
837 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
838 struct hci_cp_pin_code_reply *cp;
839 struct hci_conn *conn;
840
841 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
842
843 hci_dev_lock(hdev);
844
845 if (test_bit(HCI_MGMT, &hdev->dev_flags))
846 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
847
848 if (rp->status)
849 goto unlock;
850
851 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
852 if (!cp)
853 goto unlock;
854
855 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
856 if (conn)
857 conn->pin_length = cp->pin_len;
858
859unlock:
860 hci_dev_unlock(hdev);
861}
862
863static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
864{
865 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
866
867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868
869 hci_dev_lock(hdev);
870
871 if (test_bit(HCI_MGMT, &hdev->dev_flags))
872 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
873 rp->status);
874
875 hci_dev_unlock(hdev);
876}
877
878static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
879 struct sk_buff *skb)
880{
881 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
882
883 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884
885 if (rp->status)
886 return;
887
888 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
889 hdev->le_pkts = rp->le_max_pkt;
890
891 hdev->le_cnt = hdev->le_pkts;
892
893 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
894}
895
896static void hci_cc_le_read_local_features(struct hci_dev *hdev,
897 struct sk_buff *skb)
898{
899 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
900
901 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
902
903 if (rp->status)
904 return;
905
906 memcpy(hdev->le_features, rp->features, 8);
907}
908
909static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
910 struct sk_buff *skb)
911{
912 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
913
914 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
915
916 if (rp->status)
917 return;
918
919 hdev->adv_tx_power = rp->tx_power;
920}
921
922static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
923{
924 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
925
926 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
927
928 hci_dev_lock(hdev);
929
930 if (test_bit(HCI_MGMT, &hdev->dev_flags))
931 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
932 rp->status);
933
934 hci_dev_unlock(hdev);
935}
936
937static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
938 struct sk_buff *skb)
939{
940 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
941
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943
944 hci_dev_lock(hdev);
945
946 if (test_bit(HCI_MGMT, &hdev->dev_flags))
947 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
948 ACL_LINK, 0, rp->status);
949
950 hci_dev_unlock(hdev);
951}
952
953static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
954{
955 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
956
957 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
958
959 hci_dev_lock(hdev);
960
961 if (test_bit(HCI_MGMT, &hdev->dev_flags))
962 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
963 0, rp->status);
964
965 hci_dev_unlock(hdev);
966}
967
968static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
969 struct sk_buff *skb)
970{
971 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
972
973 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
974
975 hci_dev_lock(hdev);
976
977 if (test_bit(HCI_MGMT, &hdev->dev_flags))
978 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
979 ACL_LINK, 0, rp->status);
980
981 hci_dev_unlock(hdev);
982}
983
984static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
985 struct sk_buff *skb)
986{
987 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
988
989 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
990
991 hci_dev_lock(hdev);
992 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
993 NULL, NULL, rp->status);
994 hci_dev_unlock(hdev);
995}
996
997static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
998 struct sk_buff *skb)
999{
1000 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
1001
1002 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1003
1004 hci_dev_lock(hdev);
1005 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
1006 rp->hash256, rp->randomizer256,
1007 rp->status);
1008 hci_dev_unlock(hdev);
1009}
1010
1011
1012static void hci_cc_le_set_random_addr(struct hci_dev *hdev, struct sk_buff *skb)
1013{
1014 __u8 status = *((__u8 *) skb->data);
1015 bdaddr_t *sent;
1016
1017 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1018
1019 if (status)
1020 return;
1021
1022 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR);
1023 if (!sent)
1024 return;
1025
1026 hci_dev_lock(hdev);
1027
1028 bacpy(&hdev->random_addr, sent);
1029
1030 hci_dev_unlock(hdev);
1031}
1032
1033static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
1034{
1035 __u8 *sent, status = *((__u8 *) skb->data);
1036
1037 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1038
1039 if (status)
1040 return;
1041
1042 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
1043 if (!sent)
1044 return;
1045
1046 hci_dev_lock(hdev);
1047
1048 /* If we're doing connection initation as peripheral. Set a
1049 * timeout in case something goes wrong.
1050 */
1051 if (*sent) {
1052 struct hci_conn *conn;
1053
1054 set_bit(HCI_LE_ADV, &hdev->dev_flags);
1055
1056 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1057 if (conn)
1058 queue_delayed_work(hdev->workqueue,
1059 &conn->le_conn_timeout,
1060 conn->conn_timeout);
1061 } else {
1062 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
1063 }
1064
1065 hci_dev_unlock(hdev);
1066}
1067
1068static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1069{
1070 struct hci_cp_le_set_scan_param *cp;
1071 __u8 status = *((__u8 *) skb->data);
1072
1073 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1074
1075 if (status)
1076 return;
1077
1078 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM);
1079 if (!cp)
1080 return;
1081
1082 hci_dev_lock(hdev);
1083
1084 hdev->le_scan_type = cp->type;
1085
1086 hci_dev_unlock(hdev);
1087}
1088
1089static bool has_pending_adv_report(struct hci_dev *hdev)
1090{
1091 struct discovery_state *d = &hdev->discovery;
1092
1093 return bacmp(&d->last_adv_addr, BDADDR_ANY);
1094}
1095
1096static void clear_pending_adv_report(struct hci_dev *hdev)
1097{
1098 struct discovery_state *d = &hdev->discovery;
1099
1100 bacpy(&d->last_adv_addr, BDADDR_ANY);
1101 d->last_adv_data_len = 0;
1102}
1103
1104static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr,
1105 u8 bdaddr_type, s8 rssi, u32 flags,
1106 u8 *data, u8 len)
1107{
1108 struct discovery_state *d = &hdev->discovery;
1109
1110 bacpy(&d->last_adv_addr, bdaddr);
1111 d->last_adv_addr_type = bdaddr_type;
1112 d->last_adv_rssi = rssi;
1113 d->last_adv_flags = flags;
1114 memcpy(d->last_adv_data, data, len);
1115 d->last_adv_data_len = len;
1116}
1117
1118static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1119 struct sk_buff *skb)
1120{
1121 struct hci_cp_le_set_scan_enable *cp;
1122 __u8 status = *((__u8 *) skb->data);
1123
1124 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1125
1126 if (status)
1127 return;
1128
1129 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1130 if (!cp)
1131 return;
1132
1133 switch (cp->enable) {
1134 case LE_SCAN_ENABLE:
1135 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1136 if (hdev->le_scan_type == LE_SCAN_ACTIVE)
1137 clear_pending_adv_report(hdev);
1138 break;
1139
1140 case LE_SCAN_DISABLE:
1141 /* We do this here instead of when setting DISCOVERY_STOPPED
1142 * since the latter would potentially require waiting for
1143 * inquiry to stop too.
1144 */
1145 if (has_pending_adv_report(hdev)) {
1146 struct discovery_state *d = &hdev->discovery;
1147
1148 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
1149 d->last_adv_addr_type, NULL,
1150 d->last_adv_rssi, d->last_adv_flags,
1151 d->last_adv_data,
1152 d->last_adv_data_len, NULL, 0);
1153 }
1154
1155 /* Cancel this timer so that we don't try to disable scanning
1156 * when it's already disabled.
1157 */
1158 cancel_delayed_work(&hdev->le_scan_disable);
1159
1160 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1161
1162 /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we
1163 * interrupted scanning due to a connect request. Mark
1164 * therefore discovery as stopped. If this was not
1165 * because of a connect request advertising might have
1166 * been disabled because of active scanning, so
1167 * re-enable it again if necessary.
1168 */
1169 if (test_and_clear_bit(HCI_LE_SCAN_INTERRUPTED,
1170 &hdev->dev_flags))
1171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1172 else if (!test_bit(HCI_LE_ADV, &hdev->dev_flags) &&
1173 hdev->discovery.state == DISCOVERY_FINDING)
1174 mgmt_reenable_advertising(hdev);
1175
1176 break;
1177
1178 default:
1179 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1180 break;
1181 }
1182}
1183
1184static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1185 struct sk_buff *skb)
1186{
1187 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1188
1189 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1190
1191 if (rp->status)
1192 return;
1193
1194 hdev->le_white_list_size = rp->size;
1195}
1196
1197static void hci_cc_le_clear_white_list(struct hci_dev *hdev,
1198 struct sk_buff *skb)
1199{
1200 __u8 status = *((__u8 *) skb->data);
1201
1202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1203
1204 if (status)
1205 return;
1206
1207 hci_bdaddr_list_clear(&hdev->le_white_list);
1208}
1209
1210static void hci_cc_le_add_to_white_list(struct hci_dev *hdev,
1211 struct sk_buff *skb)
1212{
1213 struct hci_cp_le_add_to_white_list *sent;
1214 __u8 status = *((__u8 *) skb->data);
1215
1216 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1217
1218 if (status)
1219 return;
1220
1221 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_WHITE_LIST);
1222 if (!sent)
1223 return;
1224
1225 hci_bdaddr_list_add(&hdev->le_white_list, &sent->bdaddr,
1226 sent->bdaddr_type);
1227}
1228
1229static void hci_cc_le_del_from_white_list(struct hci_dev *hdev,
1230 struct sk_buff *skb)
1231{
1232 struct hci_cp_le_del_from_white_list *sent;
1233 __u8 status = *((__u8 *) skb->data);
1234
1235 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1236
1237 if (status)
1238 return;
1239
1240 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_WHITE_LIST);
1241 if (!sent)
1242 return;
1243
1244 hci_bdaddr_list_del(&hdev->le_white_list, &sent->bdaddr,
1245 sent->bdaddr_type);
1246}
1247
1248static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1249 struct sk_buff *skb)
1250{
1251 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1252
1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1254
1255 if (rp->status)
1256 return;
1257
1258 memcpy(hdev->le_states, rp->le_states, 8);
1259}
1260
1261static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1262 struct sk_buff *skb)
1263{
1264 struct hci_cp_write_le_host_supported *sent;
1265 __u8 status = *((__u8 *) skb->data);
1266
1267 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1268
1269 if (status)
1270 return;
1271
1272 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1273 if (!sent)
1274 return;
1275
1276 if (sent->le) {
1277 hdev->features[1][0] |= LMP_HOST_LE;
1278 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1279 } else {
1280 hdev->features[1][0] &= ~LMP_HOST_LE;
1281 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1282 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1283 }
1284
1285 if (sent->simul)
1286 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1287 else
1288 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1289}
1290
1291static void hci_cc_set_adv_param(struct hci_dev *hdev, struct sk_buff *skb)
1292{
1293 struct hci_cp_le_set_adv_param *cp;
1294 u8 status = *((u8 *) skb->data);
1295
1296 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1297
1298 if (status)
1299 return;
1300
1301 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM);
1302 if (!cp)
1303 return;
1304
1305 hci_dev_lock(hdev);
1306 hdev->adv_addr_type = cp->own_address_type;
1307 hci_dev_unlock(hdev);
1308}
1309
1310static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1311 struct sk_buff *skb)
1312{
1313 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1314
1315 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1316 hdev->name, rp->status, rp->phy_handle);
1317
1318 if (rp->status)
1319 return;
1320
1321 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1322}
1323
1324static void hci_cc_read_rssi(struct hci_dev *hdev, struct sk_buff *skb)
1325{
1326 struct hci_rp_read_rssi *rp = (void *) skb->data;
1327 struct hci_conn *conn;
1328
1329 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1330
1331 if (rp->status)
1332 return;
1333
1334 hci_dev_lock(hdev);
1335
1336 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1337 if (conn)
1338 conn->rssi = rp->rssi;
1339
1340 hci_dev_unlock(hdev);
1341}
1342
1343static void hci_cc_read_tx_power(struct hci_dev *hdev, struct sk_buff *skb)
1344{
1345 struct hci_cp_read_tx_power *sent;
1346 struct hci_rp_read_tx_power *rp = (void *) skb->data;
1347 struct hci_conn *conn;
1348
1349 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1350
1351 if (rp->status)
1352 return;
1353
1354 sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER);
1355 if (!sent)
1356 return;
1357
1358 hci_dev_lock(hdev);
1359
1360 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
1361 if (!conn)
1362 goto unlock;
1363
1364 switch (sent->type) {
1365 case 0x00:
1366 conn->tx_power = rp->tx_power;
1367 break;
1368 case 0x01:
1369 conn->max_tx_power = rp->tx_power;
1370 break;
1371 }
1372
1373unlock:
1374 hci_dev_unlock(hdev);
1375}
1376
1377static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1378{
1379 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1380
1381 if (status) {
1382 hci_conn_check_pending(hdev);
1383 return;
1384 }
1385
1386 set_bit(HCI_INQUIRY, &hdev->flags);
1387}
1388
1389static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1390{
1391 struct hci_cp_create_conn *cp;
1392 struct hci_conn *conn;
1393
1394 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1395
1396 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1397 if (!cp)
1398 return;
1399
1400 hci_dev_lock(hdev);
1401
1402 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1403
1404 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1405
1406 if (status) {
1407 if (conn && conn->state == BT_CONNECT) {
1408 if (status != 0x0c || conn->attempt > 2) {
1409 conn->state = BT_CLOSED;
1410 hci_proto_connect_cfm(conn, status);
1411 hci_conn_del(conn);
1412 } else
1413 conn->state = BT_CONNECT2;
1414 }
1415 } else {
1416 if (!conn) {
1417 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr,
1418 HCI_ROLE_MASTER);
1419 if (!conn)
1420 BT_ERR("No memory for new connection");
1421 }
1422 }
1423
1424 hci_dev_unlock(hdev);
1425}
1426
1427static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1428{
1429 struct hci_cp_add_sco *cp;
1430 struct hci_conn *acl, *sco;
1431 __u16 handle;
1432
1433 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1434
1435 if (!status)
1436 return;
1437
1438 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1439 if (!cp)
1440 return;
1441
1442 handle = __le16_to_cpu(cp->handle);
1443
1444 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1445
1446 hci_dev_lock(hdev);
1447
1448 acl = hci_conn_hash_lookup_handle(hdev, handle);
1449 if (acl) {
1450 sco = acl->link;
1451 if (sco) {
1452 sco->state = BT_CLOSED;
1453
1454 hci_proto_connect_cfm(sco, status);
1455 hci_conn_del(sco);
1456 }
1457 }
1458
1459 hci_dev_unlock(hdev);
1460}
1461
1462static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1463{
1464 struct hci_cp_auth_requested *cp;
1465 struct hci_conn *conn;
1466
1467 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1468
1469 if (!status)
1470 return;
1471
1472 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1473 if (!cp)
1474 return;
1475
1476 hci_dev_lock(hdev);
1477
1478 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1479 if (conn) {
1480 if (conn->state == BT_CONFIG) {
1481 hci_proto_connect_cfm(conn, status);
1482 hci_conn_drop(conn);
1483 }
1484 }
1485
1486 hci_dev_unlock(hdev);
1487}
1488
1489static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1490{
1491 struct hci_cp_set_conn_encrypt *cp;
1492 struct hci_conn *conn;
1493
1494 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1495
1496 if (!status)
1497 return;
1498
1499 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1500 if (!cp)
1501 return;
1502
1503 hci_dev_lock(hdev);
1504
1505 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1506 if (conn) {
1507 if (conn->state == BT_CONFIG) {
1508 hci_proto_connect_cfm(conn, status);
1509 hci_conn_drop(conn);
1510 }
1511 }
1512
1513 hci_dev_unlock(hdev);
1514}
1515
1516static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1517 struct hci_conn *conn)
1518{
1519 if (conn->state != BT_CONFIG || !conn->out)
1520 return 0;
1521
1522 if (conn->pending_sec_level == BT_SECURITY_SDP)
1523 return 0;
1524
1525 /* Only request authentication for SSP connections or non-SSP
1526 * devices with sec_level MEDIUM or HIGH or if MITM protection
1527 * is requested.
1528 */
1529 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1530 conn->pending_sec_level != BT_SECURITY_FIPS &&
1531 conn->pending_sec_level != BT_SECURITY_HIGH &&
1532 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1533 return 0;
1534
1535 return 1;
1536}
1537
1538static int hci_resolve_name(struct hci_dev *hdev,
1539 struct inquiry_entry *e)
1540{
1541 struct hci_cp_remote_name_req cp;
1542
1543 memset(&cp, 0, sizeof(cp));
1544
1545 bacpy(&cp.bdaddr, &e->data.bdaddr);
1546 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1547 cp.pscan_mode = e->data.pscan_mode;
1548 cp.clock_offset = e->data.clock_offset;
1549
1550 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1551}
1552
1553static bool hci_resolve_next_name(struct hci_dev *hdev)
1554{
1555 struct discovery_state *discov = &hdev->discovery;
1556 struct inquiry_entry *e;
1557
1558 if (list_empty(&discov->resolve))
1559 return false;
1560
1561 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1562 if (!e)
1563 return false;
1564
1565 if (hci_resolve_name(hdev, e) == 0) {
1566 e->name_state = NAME_PENDING;
1567 return true;
1568 }
1569
1570 return false;
1571}
1572
1573static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1574 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1575{
1576 struct discovery_state *discov = &hdev->discovery;
1577 struct inquiry_entry *e;
1578
1579 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1580 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1581 name_len, conn->dev_class);
1582
1583 if (discov->state == DISCOVERY_STOPPED)
1584 return;
1585
1586 if (discov->state == DISCOVERY_STOPPING)
1587 goto discov_complete;
1588
1589 if (discov->state != DISCOVERY_RESOLVING)
1590 return;
1591
1592 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1593 /* If the device was not found in a list of found devices names of which
1594 * are pending. there is no need to continue resolving a next name as it
1595 * will be done upon receiving another Remote Name Request Complete
1596 * Event */
1597 if (!e)
1598 return;
1599
1600 list_del(&e->list);
1601 if (name) {
1602 e->name_state = NAME_KNOWN;
1603 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1604 e->data.rssi, name, name_len);
1605 } else {
1606 e->name_state = NAME_NOT_KNOWN;
1607 }
1608
1609 if (hci_resolve_next_name(hdev))
1610 return;
1611
1612discov_complete:
1613 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1614}
1615
1616static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1617{
1618 struct hci_cp_remote_name_req *cp;
1619 struct hci_conn *conn;
1620
1621 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1622
1623 /* If successful wait for the name req complete event before
1624 * checking for the need to do authentication */
1625 if (!status)
1626 return;
1627
1628 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1629 if (!cp)
1630 return;
1631
1632 hci_dev_lock(hdev);
1633
1634 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1635
1636 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1637 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1638
1639 if (!conn)
1640 goto unlock;
1641
1642 if (!hci_outgoing_auth_needed(hdev, conn))
1643 goto unlock;
1644
1645 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1646 struct hci_cp_auth_requested auth_cp;
1647
1648 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1649
1650 auth_cp.handle = __cpu_to_le16(conn->handle);
1651 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1652 sizeof(auth_cp), &auth_cp);
1653 }
1654
1655unlock:
1656 hci_dev_unlock(hdev);
1657}
1658
1659static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1660{
1661 struct hci_cp_read_remote_features *cp;
1662 struct hci_conn *conn;
1663
1664 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1665
1666 if (!status)
1667 return;
1668
1669 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1670 if (!cp)
1671 return;
1672
1673 hci_dev_lock(hdev);
1674
1675 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1676 if (conn) {
1677 if (conn->state == BT_CONFIG) {
1678 hci_proto_connect_cfm(conn, status);
1679 hci_conn_drop(conn);
1680 }
1681 }
1682
1683 hci_dev_unlock(hdev);
1684}
1685
1686static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1687{
1688 struct hci_cp_read_remote_ext_features *cp;
1689 struct hci_conn *conn;
1690
1691 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1692
1693 if (!status)
1694 return;
1695
1696 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1697 if (!cp)
1698 return;
1699
1700 hci_dev_lock(hdev);
1701
1702 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1703 if (conn) {
1704 if (conn->state == BT_CONFIG) {
1705 hci_proto_connect_cfm(conn, status);
1706 hci_conn_drop(conn);
1707 }
1708 }
1709
1710 hci_dev_unlock(hdev);
1711}
1712
1713static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1714{
1715 struct hci_cp_setup_sync_conn *cp;
1716 struct hci_conn *acl, *sco;
1717 __u16 handle;
1718
1719 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1720
1721 if (!status)
1722 return;
1723
1724 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1725 if (!cp)
1726 return;
1727
1728 handle = __le16_to_cpu(cp->handle);
1729
1730 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1731
1732 hci_dev_lock(hdev);
1733
1734 acl = hci_conn_hash_lookup_handle(hdev, handle);
1735 if (acl) {
1736 sco = acl->link;
1737 if (sco) {
1738 sco->state = BT_CLOSED;
1739
1740 hci_proto_connect_cfm(sco, status);
1741 hci_conn_del(sco);
1742 }
1743 }
1744
1745 hci_dev_unlock(hdev);
1746}
1747
1748static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1749{
1750 struct hci_cp_sniff_mode *cp;
1751 struct hci_conn *conn;
1752
1753 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1754
1755 if (!status)
1756 return;
1757
1758 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1759 if (!cp)
1760 return;
1761
1762 hci_dev_lock(hdev);
1763
1764 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1765 if (conn) {
1766 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1767
1768 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1769 hci_sco_setup(conn, status);
1770 }
1771
1772 hci_dev_unlock(hdev);
1773}
1774
1775static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1776{
1777 struct hci_cp_exit_sniff_mode *cp;
1778 struct hci_conn *conn;
1779
1780 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1781
1782 if (!status)
1783 return;
1784
1785 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1786 if (!cp)
1787 return;
1788
1789 hci_dev_lock(hdev);
1790
1791 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1792 if (conn) {
1793 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1794
1795 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1796 hci_sco_setup(conn, status);
1797 }
1798
1799 hci_dev_unlock(hdev);
1800}
1801
1802static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1803{
1804 struct hci_cp_disconnect *cp;
1805 struct hci_conn *conn;
1806
1807 if (!status)
1808 return;
1809
1810 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1811 if (!cp)
1812 return;
1813
1814 hci_dev_lock(hdev);
1815
1816 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1817 if (conn)
1818 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1819 conn->dst_type, status);
1820
1821 hci_dev_unlock(hdev);
1822}
1823
1824static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1825{
1826 struct hci_cp_create_phy_link *cp;
1827
1828 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1829
1830 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1831 if (!cp)
1832 return;
1833
1834 hci_dev_lock(hdev);
1835
1836 if (status) {
1837 struct hci_conn *hcon;
1838
1839 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1840 if (hcon)
1841 hci_conn_del(hcon);
1842 } else {
1843 amp_write_remote_assoc(hdev, cp->phy_handle);
1844 }
1845
1846 hci_dev_unlock(hdev);
1847}
1848
1849static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1850{
1851 struct hci_cp_accept_phy_link *cp;
1852
1853 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1854
1855 if (status)
1856 return;
1857
1858 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1859 if (!cp)
1860 return;
1861
1862 amp_write_remote_assoc(hdev, cp->phy_handle);
1863}
1864
1865static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
1866{
1867 struct hci_cp_le_create_conn *cp;
1868 struct hci_conn *conn;
1869
1870 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1871
1872 /* All connection failure handling is taken care of by the
1873 * hci_le_conn_failed function which is triggered by the HCI
1874 * request completion callbacks used for connecting.
1875 */
1876 if (status)
1877 return;
1878
1879 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1880 if (!cp)
1881 return;
1882
1883 hci_dev_lock(hdev);
1884
1885 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1886 if (!conn)
1887 goto unlock;
1888
1889 /* Store the initiator and responder address information which
1890 * is needed for SMP. These values will not change during the
1891 * lifetime of the connection.
1892 */
1893 conn->init_addr_type = cp->own_address_type;
1894 if (cp->own_address_type == ADDR_LE_DEV_RANDOM)
1895 bacpy(&conn->init_addr, &hdev->random_addr);
1896 else
1897 bacpy(&conn->init_addr, &hdev->bdaddr);
1898
1899 conn->resp_addr_type = cp->peer_addr_type;
1900 bacpy(&conn->resp_addr, &cp->peer_addr);
1901
1902 /* We don't want the connection attempt to stick around
1903 * indefinitely since LE doesn't have a page timeout concept
1904 * like BR/EDR. Set a timer for any connection that doesn't use
1905 * the white list for connecting.
1906 */
1907 if (cp->filter_policy == HCI_LE_USE_PEER_ADDR)
1908 queue_delayed_work(conn->hdev->workqueue,
1909 &conn->le_conn_timeout,
1910 conn->conn_timeout);
1911
1912unlock:
1913 hci_dev_unlock(hdev);
1914}
1915
1916static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1917{
1918 struct hci_cp_le_start_enc *cp;
1919 struct hci_conn *conn;
1920
1921 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1922
1923 if (!status)
1924 return;
1925
1926 hci_dev_lock(hdev);
1927
1928 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC);
1929 if (!cp)
1930 goto unlock;
1931
1932 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1933 if (!conn)
1934 goto unlock;
1935
1936 if (conn->state != BT_CONNECTED)
1937 goto unlock;
1938
1939 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1940 hci_conn_drop(conn);
1941
1942unlock:
1943 hci_dev_unlock(hdev);
1944}
1945
1946static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1947{
1948 __u8 status = *((__u8 *) skb->data);
1949 struct discovery_state *discov = &hdev->discovery;
1950 struct inquiry_entry *e;
1951
1952 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1953
1954 hci_conn_check_pending(hdev);
1955
1956 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1957 return;
1958
1959 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
1960 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1961
1962 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1963 return;
1964
1965 hci_dev_lock(hdev);
1966
1967 if (discov->state != DISCOVERY_FINDING)
1968 goto unlock;
1969
1970 if (list_empty(&discov->resolve)) {
1971 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1972 goto unlock;
1973 }
1974
1975 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1976 if (e && hci_resolve_name(hdev, e) == 0) {
1977 e->name_state = NAME_PENDING;
1978 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1979 } else {
1980 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1981 }
1982
1983unlock:
1984 hci_dev_unlock(hdev);
1985}
1986
1987static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1988{
1989 struct inquiry_data data;
1990 struct inquiry_info *info = (void *) (skb->data + 1);
1991 int num_rsp = *((__u8 *) skb->data);
1992
1993 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1994
1995 if (!num_rsp)
1996 return;
1997
1998 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1999 return;
2000
2001 hci_dev_lock(hdev);
2002
2003 for (; num_rsp; num_rsp--, info++) {
2004 u32 flags;
2005
2006 bacpy(&data.bdaddr, &info->bdaddr);
2007 data.pscan_rep_mode = info->pscan_rep_mode;
2008 data.pscan_period_mode = info->pscan_period_mode;
2009 data.pscan_mode = info->pscan_mode;
2010 memcpy(data.dev_class, info->dev_class, 3);
2011 data.clock_offset = info->clock_offset;
2012 data.rssi = 0x00;
2013 data.ssp_mode = 0x00;
2014
2015 flags = hci_inquiry_cache_update(hdev, &data, false);
2016
2017 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2018 info->dev_class, 0, flags, NULL, 0, NULL, 0);
2019 }
2020
2021 hci_dev_unlock(hdev);
2022}
2023
2024static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2025{
2026 struct hci_ev_conn_complete *ev = (void *) skb->data;
2027 struct hci_conn *conn;
2028
2029 BT_DBG("%s", hdev->name);
2030
2031 hci_dev_lock(hdev);
2032
2033 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2034 if (!conn) {
2035 if (ev->link_type != SCO_LINK)
2036 goto unlock;
2037
2038 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2039 if (!conn)
2040 goto unlock;
2041
2042 conn->type = SCO_LINK;
2043 }
2044
2045 if (!ev->status) {
2046 conn->handle = __le16_to_cpu(ev->handle);
2047
2048 if (conn->type == ACL_LINK) {
2049 conn->state = BT_CONFIG;
2050 hci_conn_hold(conn);
2051
2052 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
2053 !hci_find_link_key(hdev, &ev->bdaddr))
2054 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2055 else
2056 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2057 } else
2058 conn->state = BT_CONNECTED;
2059
2060 hci_conn_add_sysfs(conn);
2061
2062 if (test_bit(HCI_AUTH, &hdev->flags))
2063 set_bit(HCI_CONN_AUTH, &conn->flags);
2064
2065 if (test_bit(HCI_ENCRYPT, &hdev->flags))
2066 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2067
2068 /* Get remote features */
2069 if (conn->type == ACL_LINK) {
2070 struct hci_cp_read_remote_features cp;
2071 cp.handle = ev->handle;
2072 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
2073 sizeof(cp), &cp);
2074
2075 hci_update_page_scan(hdev, NULL);
2076 }
2077
2078 /* Set packet type for incoming connection */
2079 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
2080 struct hci_cp_change_conn_ptype cp;
2081 cp.handle = ev->handle;
2082 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2083 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
2084 &cp);
2085 }
2086 } else {
2087 conn->state = BT_CLOSED;
2088 if (conn->type == ACL_LINK)
2089 mgmt_connect_failed(hdev, &conn->dst, conn->type,
2090 conn->dst_type, ev->status);
2091 }
2092
2093 if (conn->type == ACL_LINK)
2094 hci_sco_setup(conn, ev->status);
2095
2096 if (ev->status) {
2097 hci_proto_connect_cfm(conn, ev->status);
2098 hci_conn_del(conn);
2099 } else if (ev->link_type != ACL_LINK)
2100 hci_proto_connect_cfm(conn, ev->status);
2101
2102unlock:
2103 hci_dev_unlock(hdev);
2104
2105 hci_conn_check_pending(hdev);
2106}
2107
2108static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr)
2109{
2110 struct hci_cp_reject_conn_req cp;
2111
2112 bacpy(&cp.bdaddr, bdaddr);
2113 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2114 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2115}
2116
2117static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2118{
2119 struct hci_ev_conn_request *ev = (void *) skb->data;
2120 int mask = hdev->link_mode;
2121 struct inquiry_entry *ie;
2122 struct hci_conn *conn;
2123 __u8 flags = 0;
2124
2125 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2126 ev->link_type);
2127
2128 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
2129 &flags);
2130
2131 if (!(mask & HCI_LM_ACCEPT)) {
2132 hci_reject_conn(hdev, &ev->bdaddr);
2133 return;
2134 }
2135
2136 if (hci_bdaddr_list_lookup(&hdev->blacklist, &ev->bdaddr,
2137 BDADDR_BREDR)) {
2138 hci_reject_conn(hdev, &ev->bdaddr);
2139 return;
2140 }
2141
2142 if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
2143 !hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
2144 BDADDR_BREDR)) {
2145 hci_reject_conn(hdev, &ev->bdaddr);
2146 return;
2147 }
2148
2149 /* Connection accepted */
2150
2151 hci_dev_lock(hdev);
2152
2153 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2154 if (ie)
2155 memcpy(ie->data.dev_class, ev->dev_class, 3);
2156
2157 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2158 &ev->bdaddr);
2159 if (!conn) {
2160 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
2161 HCI_ROLE_SLAVE);
2162 if (!conn) {
2163 BT_ERR("No memory for new connection");
2164 hci_dev_unlock(hdev);
2165 return;
2166 }
2167 }
2168
2169 memcpy(conn->dev_class, ev->dev_class, 3);
2170
2171 hci_dev_unlock(hdev);
2172
2173 if (ev->link_type == ACL_LINK ||
2174 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
2175 struct hci_cp_accept_conn_req cp;
2176 conn->state = BT_CONNECT;
2177
2178 bacpy(&cp.bdaddr, &ev->bdaddr);
2179
2180 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2181 cp.role = 0x00; /* Become master */
2182 else
2183 cp.role = 0x01; /* Remain slave */
2184
2185 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp);
2186 } else if (!(flags & HCI_PROTO_DEFER)) {
2187 struct hci_cp_accept_sync_conn_req cp;
2188 conn->state = BT_CONNECT;
2189
2190 bacpy(&cp.bdaddr, &ev->bdaddr);
2191 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2192
2193 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
2194 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
2195 cp.max_latency = cpu_to_le16(0xffff);
2196 cp.content_format = cpu_to_le16(hdev->voice_setting);
2197 cp.retrans_effort = 0xff;
2198
2199 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp),
2200 &cp);
2201 } else {
2202 conn->state = BT_CONNECT2;
2203 hci_proto_connect_cfm(conn, 0);
2204 }
2205}
2206
2207static u8 hci_to_mgmt_reason(u8 err)
2208{
2209 switch (err) {
2210 case HCI_ERROR_CONNECTION_TIMEOUT:
2211 return MGMT_DEV_DISCONN_TIMEOUT;
2212 case HCI_ERROR_REMOTE_USER_TERM:
2213 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2214 case HCI_ERROR_REMOTE_POWER_OFF:
2215 return MGMT_DEV_DISCONN_REMOTE;
2216 case HCI_ERROR_LOCAL_HOST_TERM:
2217 return MGMT_DEV_DISCONN_LOCAL_HOST;
2218 default:
2219 return MGMT_DEV_DISCONN_UNKNOWN;
2220 }
2221}
2222
2223static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2224{
2225 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2226 u8 reason = hci_to_mgmt_reason(ev->reason);
2227 struct hci_conn_params *params;
2228 struct hci_conn *conn;
2229 bool mgmt_connected;
2230 u8 type;
2231
2232 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2233
2234 hci_dev_lock(hdev);
2235
2236 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2237 if (!conn)
2238 goto unlock;
2239
2240 if (ev->status) {
2241 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2242 conn->dst_type, ev->status);
2243 goto unlock;
2244 }
2245
2246 conn->state = BT_CLOSED;
2247
2248 mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags);
2249 mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type,
2250 reason, mgmt_connected);
2251
2252 if (conn->type == ACL_LINK) {
2253 if (test_bit(HCI_CONN_FLUSH_KEY, &conn->flags))
2254 hci_remove_link_key(hdev, &conn->dst);
2255
2256 hci_update_page_scan(hdev, NULL);
2257 }
2258
2259 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
2260 if (params) {
2261 switch (params->auto_connect) {
2262 case HCI_AUTO_CONN_LINK_LOSS:
2263 if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT)
2264 break;
2265 /* Fall through */
2266
2267 case HCI_AUTO_CONN_DIRECT:
2268 case HCI_AUTO_CONN_ALWAYS:
2269 list_del_init(&params->action);
2270 list_add(&params->action, &hdev->pend_le_conns);
2271 hci_update_background_scan(hdev);
2272 break;
2273
2274 default:
2275 break;
2276 }
2277 }
2278
2279 type = conn->type;
2280
2281 hci_proto_disconn_cfm(conn, ev->reason);
2282 hci_conn_del(conn);
2283
2284 /* Re-enable advertising if necessary, since it might
2285 * have been disabled by the connection. From the
2286 * HCI_LE_Set_Advertise_Enable command description in
2287 * the core specification (v4.0):
2288 * "The Controller shall continue advertising until the Host
2289 * issues an LE_Set_Advertise_Enable command with
2290 * Advertising_Enable set to 0x00 (Advertising is disabled)
2291 * or until a connection is created or until the Advertising
2292 * is timed out due to Directed Advertising."
2293 */
2294 if (type == LE_LINK)
2295 mgmt_reenable_advertising(hdev);
2296
2297unlock:
2298 hci_dev_unlock(hdev);
2299}
2300
2301static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2302{
2303 struct hci_ev_auth_complete *ev = (void *) skb->data;
2304 struct hci_conn *conn;
2305
2306 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2307
2308 hci_dev_lock(hdev);
2309
2310 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2311 if (!conn)
2312 goto unlock;
2313
2314 if (!ev->status) {
2315 if (!hci_conn_ssp_enabled(conn) &&
2316 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2317 BT_INFO("re-auth of legacy device is not possible.");
2318 } else {
2319 set_bit(HCI_CONN_AUTH, &conn->flags);
2320 conn->sec_level = conn->pending_sec_level;
2321 }
2322 } else {
2323 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2324 ev->status);
2325 }
2326
2327 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2328 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2329
2330 if (conn->state == BT_CONFIG) {
2331 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2332 struct hci_cp_set_conn_encrypt cp;
2333 cp.handle = ev->handle;
2334 cp.encrypt = 0x01;
2335 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2336 &cp);
2337 } else {
2338 conn->state = BT_CONNECTED;
2339 hci_proto_connect_cfm(conn, ev->status);
2340 hci_conn_drop(conn);
2341 }
2342 } else {
2343 hci_auth_cfm(conn, ev->status);
2344
2345 hci_conn_hold(conn);
2346 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2347 hci_conn_drop(conn);
2348 }
2349
2350 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2351 if (!ev->status) {
2352 struct hci_cp_set_conn_encrypt cp;
2353 cp.handle = ev->handle;
2354 cp.encrypt = 0x01;
2355 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2356 &cp);
2357 } else {
2358 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2359 hci_encrypt_cfm(conn, ev->status, 0x00);
2360 }
2361 }
2362
2363unlock:
2364 hci_dev_unlock(hdev);
2365}
2366
2367static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2368{
2369 struct hci_ev_remote_name *ev = (void *) skb->data;
2370 struct hci_conn *conn;
2371
2372 BT_DBG("%s", hdev->name);
2373
2374 hci_conn_check_pending(hdev);
2375
2376 hci_dev_lock(hdev);
2377
2378 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2379
2380 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2381 goto check_auth;
2382
2383 if (ev->status == 0)
2384 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2385 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2386 else
2387 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2388
2389check_auth:
2390 if (!conn)
2391 goto unlock;
2392
2393 if (!hci_outgoing_auth_needed(hdev, conn))
2394 goto unlock;
2395
2396 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2397 struct hci_cp_auth_requested cp;
2398
2399 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
2400
2401 cp.handle = __cpu_to_le16(conn->handle);
2402 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2403 }
2404
2405unlock:
2406 hci_dev_unlock(hdev);
2407}
2408
2409static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2410{
2411 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2412 struct hci_conn *conn;
2413
2414 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2415
2416 hci_dev_lock(hdev);
2417
2418 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2419 if (!conn)
2420 goto unlock;
2421
2422 if (!ev->status) {
2423 if (ev->encrypt) {
2424 /* Encryption implies authentication */
2425 set_bit(HCI_CONN_AUTH, &conn->flags);
2426 set_bit(HCI_CONN_ENCRYPT, &conn->flags);
2427 conn->sec_level = conn->pending_sec_level;
2428
2429 /* P-256 authentication key implies FIPS */
2430 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256)
2431 set_bit(HCI_CONN_FIPS, &conn->flags);
2432
2433 if ((conn->type == ACL_LINK && ev->encrypt == 0x02) ||
2434 conn->type == LE_LINK)
2435 set_bit(HCI_CONN_AES_CCM, &conn->flags);
2436 } else {
2437 clear_bit(HCI_CONN_ENCRYPT, &conn->flags);
2438 clear_bit(HCI_CONN_AES_CCM, &conn->flags);
2439 }
2440 }
2441
2442 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2443
2444 if (ev->status && conn->state == BT_CONNECTED) {
2445 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2446 hci_conn_drop(conn);
2447 goto unlock;
2448 }
2449
2450 if (conn->state == BT_CONFIG) {
2451 if (!ev->status)
2452 conn->state = BT_CONNECTED;
2453
2454 /* In Secure Connections Only mode, do not allow any
2455 * connections that are not encrypted with AES-CCM
2456 * using a P-256 authenticated combination key.
2457 */
2458 if (test_bit(HCI_SC_ONLY, &hdev->dev_flags) &&
2459 (!test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
2460 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)) {
2461 hci_proto_connect_cfm(conn, HCI_ERROR_AUTH_FAILURE);
2462 hci_conn_drop(conn);
2463 goto unlock;
2464 }
2465
2466 hci_proto_connect_cfm(conn, ev->status);
2467 hci_conn_drop(conn);
2468 } else
2469 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2470
2471unlock:
2472 hci_dev_unlock(hdev);
2473}
2474
2475static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2476 struct sk_buff *skb)
2477{
2478 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2479 struct hci_conn *conn;
2480
2481 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2482
2483 hci_dev_lock(hdev);
2484
2485 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2486 if (conn) {
2487 if (!ev->status)
2488 set_bit(HCI_CONN_SECURE, &conn->flags);
2489
2490 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2491
2492 hci_key_change_cfm(conn, ev->status);
2493 }
2494
2495 hci_dev_unlock(hdev);
2496}
2497
2498static void hci_remote_features_evt(struct hci_dev *hdev,
2499 struct sk_buff *skb)
2500{
2501 struct hci_ev_remote_features *ev = (void *) skb->data;
2502 struct hci_conn *conn;
2503
2504 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2505
2506 hci_dev_lock(hdev);
2507
2508 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2509 if (!conn)
2510 goto unlock;
2511
2512 if (!ev->status)
2513 memcpy(conn->features[0], ev->features, 8);
2514
2515 if (conn->state != BT_CONFIG)
2516 goto unlock;
2517
2518 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2519 struct hci_cp_read_remote_ext_features cp;
2520 cp.handle = ev->handle;
2521 cp.page = 0x01;
2522 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2523 sizeof(cp), &cp);
2524 goto unlock;
2525 }
2526
2527 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2528 struct hci_cp_remote_name_req cp;
2529 memset(&cp, 0, sizeof(cp));
2530 bacpy(&cp.bdaddr, &conn->dst);
2531 cp.pscan_rep_mode = 0x02;
2532 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2533 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2534 mgmt_device_connected(hdev, &conn->dst, conn->type,
2535 conn->dst_type, 0, NULL, 0,
2536 conn->dev_class);
2537
2538 if (!hci_outgoing_auth_needed(hdev, conn)) {
2539 conn->state = BT_CONNECTED;
2540 hci_proto_connect_cfm(conn, ev->status);
2541 hci_conn_drop(conn);
2542 }
2543
2544unlock:
2545 hci_dev_unlock(hdev);
2546}
2547
2548static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2549{
2550 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2551 u8 status = skb->data[sizeof(*ev)];
2552 __u16 opcode;
2553
2554 skb_pull(skb, sizeof(*ev));
2555
2556 opcode = __le16_to_cpu(ev->opcode);
2557
2558 switch (opcode) {
2559 case HCI_OP_INQUIRY_CANCEL:
2560 hci_cc_inquiry_cancel(hdev, skb);
2561 break;
2562
2563 case HCI_OP_PERIODIC_INQ:
2564 hci_cc_periodic_inq(hdev, skb);
2565 break;
2566
2567 case HCI_OP_EXIT_PERIODIC_INQ:
2568 hci_cc_exit_periodic_inq(hdev, skb);
2569 break;
2570
2571 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2572 hci_cc_remote_name_req_cancel(hdev, skb);
2573 break;
2574
2575 case HCI_OP_ROLE_DISCOVERY:
2576 hci_cc_role_discovery(hdev, skb);
2577 break;
2578
2579 case HCI_OP_READ_LINK_POLICY:
2580 hci_cc_read_link_policy(hdev, skb);
2581 break;
2582
2583 case HCI_OP_WRITE_LINK_POLICY:
2584 hci_cc_write_link_policy(hdev, skb);
2585 break;
2586
2587 case HCI_OP_READ_DEF_LINK_POLICY:
2588 hci_cc_read_def_link_policy(hdev, skb);
2589 break;
2590
2591 case HCI_OP_WRITE_DEF_LINK_POLICY:
2592 hci_cc_write_def_link_policy(hdev, skb);
2593 break;
2594
2595 case HCI_OP_RESET:
2596 hci_cc_reset(hdev, skb);
2597 break;
2598
2599 case HCI_OP_WRITE_LOCAL_NAME:
2600 hci_cc_write_local_name(hdev, skb);
2601 break;
2602
2603 case HCI_OP_READ_LOCAL_NAME:
2604 hci_cc_read_local_name(hdev, skb);
2605 break;
2606
2607 case HCI_OP_WRITE_AUTH_ENABLE:
2608 hci_cc_write_auth_enable(hdev, skb);
2609 break;
2610
2611 case HCI_OP_WRITE_ENCRYPT_MODE:
2612 hci_cc_write_encrypt_mode(hdev, skb);
2613 break;
2614
2615 case HCI_OP_WRITE_SCAN_ENABLE:
2616 hci_cc_write_scan_enable(hdev, skb);
2617 break;
2618
2619 case HCI_OP_READ_CLASS_OF_DEV:
2620 hci_cc_read_class_of_dev(hdev, skb);
2621 break;
2622
2623 case HCI_OP_WRITE_CLASS_OF_DEV:
2624 hci_cc_write_class_of_dev(hdev, skb);
2625 break;
2626
2627 case HCI_OP_READ_VOICE_SETTING:
2628 hci_cc_read_voice_setting(hdev, skb);
2629 break;
2630
2631 case HCI_OP_WRITE_VOICE_SETTING:
2632 hci_cc_write_voice_setting(hdev, skb);
2633 break;
2634
2635 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2636 hci_cc_read_num_supported_iac(hdev, skb);
2637 break;
2638
2639 case HCI_OP_WRITE_SSP_MODE:
2640 hci_cc_write_ssp_mode(hdev, skb);
2641 break;
2642
2643 case HCI_OP_WRITE_SC_SUPPORT:
2644 hci_cc_write_sc_support(hdev, skb);
2645 break;
2646
2647 case HCI_OP_READ_LOCAL_VERSION:
2648 hci_cc_read_local_version(hdev, skb);
2649 break;
2650
2651 case HCI_OP_READ_LOCAL_COMMANDS:
2652 hci_cc_read_local_commands(hdev, skb);
2653 break;
2654
2655 case HCI_OP_READ_LOCAL_FEATURES:
2656 hci_cc_read_local_features(hdev, skb);
2657 break;
2658
2659 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2660 hci_cc_read_local_ext_features(hdev, skb);
2661 break;
2662
2663 case HCI_OP_READ_BUFFER_SIZE:
2664 hci_cc_read_buffer_size(hdev, skb);
2665 break;
2666
2667 case HCI_OP_READ_BD_ADDR:
2668 hci_cc_read_bd_addr(hdev, skb);
2669 break;
2670
2671 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2672 hci_cc_read_page_scan_activity(hdev, skb);
2673 break;
2674
2675 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2676 hci_cc_write_page_scan_activity(hdev, skb);
2677 break;
2678
2679 case HCI_OP_READ_PAGE_SCAN_TYPE:
2680 hci_cc_read_page_scan_type(hdev, skb);
2681 break;
2682
2683 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2684 hci_cc_write_page_scan_type(hdev, skb);
2685 break;
2686
2687 case HCI_OP_READ_DATA_BLOCK_SIZE:
2688 hci_cc_read_data_block_size(hdev, skb);
2689 break;
2690
2691 case HCI_OP_READ_FLOW_CONTROL_MODE:
2692 hci_cc_read_flow_control_mode(hdev, skb);
2693 break;
2694
2695 case HCI_OP_READ_LOCAL_AMP_INFO:
2696 hci_cc_read_local_amp_info(hdev, skb);
2697 break;
2698
2699 case HCI_OP_READ_CLOCK:
2700 hci_cc_read_clock(hdev, skb);
2701 break;
2702
2703 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2704 hci_cc_read_local_amp_assoc(hdev, skb);
2705 break;
2706
2707 case HCI_OP_READ_INQ_RSP_TX_POWER:
2708 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2709 break;
2710
2711 case HCI_OP_PIN_CODE_REPLY:
2712 hci_cc_pin_code_reply(hdev, skb);
2713 break;
2714
2715 case HCI_OP_PIN_CODE_NEG_REPLY:
2716 hci_cc_pin_code_neg_reply(hdev, skb);
2717 break;
2718
2719 case HCI_OP_READ_LOCAL_OOB_DATA:
2720 hci_cc_read_local_oob_data(hdev, skb);
2721 break;
2722
2723 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2724 hci_cc_read_local_oob_ext_data(hdev, skb);
2725 break;
2726
2727 case HCI_OP_LE_READ_BUFFER_SIZE:
2728 hci_cc_le_read_buffer_size(hdev, skb);
2729 break;
2730
2731 case HCI_OP_LE_READ_LOCAL_FEATURES:
2732 hci_cc_le_read_local_features(hdev, skb);
2733 break;
2734
2735 case HCI_OP_LE_READ_ADV_TX_POWER:
2736 hci_cc_le_read_adv_tx_power(hdev, skb);
2737 break;
2738
2739 case HCI_OP_USER_CONFIRM_REPLY:
2740 hci_cc_user_confirm_reply(hdev, skb);
2741 break;
2742
2743 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2744 hci_cc_user_confirm_neg_reply(hdev, skb);
2745 break;
2746
2747 case HCI_OP_USER_PASSKEY_REPLY:
2748 hci_cc_user_passkey_reply(hdev, skb);
2749 break;
2750
2751 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2752 hci_cc_user_passkey_neg_reply(hdev, skb);
2753 break;
2754
2755 case HCI_OP_LE_SET_RANDOM_ADDR:
2756 hci_cc_le_set_random_addr(hdev, skb);
2757 break;
2758
2759 case HCI_OP_LE_SET_ADV_ENABLE:
2760 hci_cc_le_set_adv_enable(hdev, skb);
2761 break;
2762
2763 case HCI_OP_LE_SET_SCAN_PARAM:
2764 hci_cc_le_set_scan_param(hdev, skb);
2765 break;
2766
2767 case HCI_OP_LE_SET_SCAN_ENABLE:
2768 hci_cc_le_set_scan_enable(hdev, skb);
2769 break;
2770
2771 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2772 hci_cc_le_read_white_list_size(hdev, skb);
2773 break;
2774
2775 case HCI_OP_LE_CLEAR_WHITE_LIST:
2776 hci_cc_le_clear_white_list(hdev, skb);
2777 break;
2778
2779 case HCI_OP_LE_ADD_TO_WHITE_LIST:
2780 hci_cc_le_add_to_white_list(hdev, skb);
2781 break;
2782
2783 case HCI_OP_LE_DEL_FROM_WHITE_LIST:
2784 hci_cc_le_del_from_white_list(hdev, skb);
2785 break;
2786
2787 case HCI_OP_LE_READ_SUPPORTED_STATES:
2788 hci_cc_le_read_supported_states(hdev, skb);
2789 break;
2790
2791 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2792 hci_cc_write_le_host_supported(hdev, skb);
2793 break;
2794
2795 case HCI_OP_LE_SET_ADV_PARAM:
2796 hci_cc_set_adv_param(hdev, skb);
2797 break;
2798
2799 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2800 hci_cc_write_remote_amp_assoc(hdev, skb);
2801 break;
2802
2803 case HCI_OP_READ_RSSI:
2804 hci_cc_read_rssi(hdev, skb);
2805 break;
2806
2807 case HCI_OP_READ_TX_POWER:
2808 hci_cc_read_tx_power(hdev, skb);
2809 break;
2810
2811 default:
2812 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2813 break;
2814 }
2815
2816 if (opcode != HCI_OP_NOP)
2817 cancel_delayed_work(&hdev->cmd_timer);
2818
2819 hci_req_cmd_complete(hdev, opcode, status);
2820
2821 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2822 atomic_set(&hdev->cmd_cnt, 1);
2823 if (!skb_queue_empty(&hdev->cmd_q))
2824 queue_work(hdev->workqueue, &hdev->cmd_work);
2825 }
2826}
2827
2828static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2829{
2830 struct hci_ev_cmd_status *ev = (void *) skb->data;
2831 __u16 opcode;
2832
2833 skb_pull(skb, sizeof(*ev));
2834
2835 opcode = __le16_to_cpu(ev->opcode);
2836
2837 switch (opcode) {
2838 case HCI_OP_INQUIRY:
2839 hci_cs_inquiry(hdev, ev->status);
2840 break;
2841
2842 case HCI_OP_CREATE_CONN:
2843 hci_cs_create_conn(hdev, ev->status);
2844 break;
2845
2846 case HCI_OP_ADD_SCO:
2847 hci_cs_add_sco(hdev, ev->status);
2848 break;
2849
2850 case HCI_OP_AUTH_REQUESTED:
2851 hci_cs_auth_requested(hdev, ev->status);
2852 break;
2853
2854 case HCI_OP_SET_CONN_ENCRYPT:
2855 hci_cs_set_conn_encrypt(hdev, ev->status);
2856 break;
2857
2858 case HCI_OP_REMOTE_NAME_REQ:
2859 hci_cs_remote_name_req(hdev, ev->status);
2860 break;
2861
2862 case HCI_OP_READ_REMOTE_FEATURES:
2863 hci_cs_read_remote_features(hdev, ev->status);
2864 break;
2865
2866 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2867 hci_cs_read_remote_ext_features(hdev, ev->status);
2868 break;
2869
2870 case HCI_OP_SETUP_SYNC_CONN:
2871 hci_cs_setup_sync_conn(hdev, ev->status);
2872 break;
2873
2874 case HCI_OP_SNIFF_MODE:
2875 hci_cs_sniff_mode(hdev, ev->status);
2876 break;
2877
2878 case HCI_OP_EXIT_SNIFF_MODE:
2879 hci_cs_exit_sniff_mode(hdev, ev->status);
2880 break;
2881
2882 case HCI_OP_DISCONNECT:
2883 hci_cs_disconnect(hdev, ev->status);
2884 break;
2885
2886 case HCI_OP_CREATE_PHY_LINK:
2887 hci_cs_create_phylink(hdev, ev->status);
2888 break;
2889
2890 case HCI_OP_ACCEPT_PHY_LINK:
2891 hci_cs_accept_phylink(hdev, ev->status);
2892 break;
2893
2894 case HCI_OP_LE_CREATE_CONN:
2895 hci_cs_le_create_conn(hdev, ev->status);
2896 break;
2897
2898 case HCI_OP_LE_START_ENC:
2899 hci_cs_le_start_enc(hdev, ev->status);
2900 break;
2901
2902 default:
2903 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2904 break;
2905 }
2906
2907 if (opcode != HCI_OP_NOP)
2908 cancel_delayed_work(&hdev->cmd_timer);
2909
2910 if (ev->status ||
2911 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2912 hci_req_cmd_complete(hdev, opcode, ev->status);
2913
2914 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2915 atomic_set(&hdev->cmd_cnt, 1);
2916 if (!skb_queue_empty(&hdev->cmd_q))
2917 queue_work(hdev->workqueue, &hdev->cmd_work);
2918 }
2919}
2920
2921static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2922{
2923 struct hci_ev_role_change *ev = (void *) skb->data;
2924 struct hci_conn *conn;
2925
2926 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2927
2928 hci_dev_lock(hdev);
2929
2930 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2931 if (conn) {
2932 if (!ev->status)
2933 conn->role = ev->role;
2934
2935 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2936
2937 hci_role_switch_cfm(conn, ev->status, ev->role);
2938 }
2939
2940 hci_dev_unlock(hdev);
2941}
2942
2943static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2944{
2945 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2946 int i;
2947
2948 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2949 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2950 return;
2951 }
2952
2953 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2954 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2955 BT_DBG("%s bad parameters", hdev->name);
2956 return;
2957 }
2958
2959 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2960
2961 for (i = 0; i < ev->num_hndl; i++) {
2962 struct hci_comp_pkts_info *info = &ev->handles[i];
2963 struct hci_conn *conn;
2964 __u16 handle, count;
2965
2966 handle = __le16_to_cpu(info->handle);
2967 count = __le16_to_cpu(info->count);
2968
2969 conn = hci_conn_hash_lookup_handle(hdev, handle);
2970 if (!conn)
2971 continue;
2972
2973 conn->sent -= count;
2974
2975 switch (conn->type) {
2976 case ACL_LINK:
2977 hdev->acl_cnt += count;
2978 if (hdev->acl_cnt > hdev->acl_pkts)
2979 hdev->acl_cnt = hdev->acl_pkts;
2980 break;
2981
2982 case LE_LINK:
2983 if (hdev->le_pkts) {
2984 hdev->le_cnt += count;
2985 if (hdev->le_cnt > hdev->le_pkts)
2986 hdev->le_cnt = hdev->le_pkts;
2987 } else {
2988 hdev->acl_cnt += count;
2989 if (hdev->acl_cnt > hdev->acl_pkts)
2990 hdev->acl_cnt = hdev->acl_pkts;
2991 }
2992 break;
2993
2994 case SCO_LINK:
2995 hdev->sco_cnt += count;
2996 if (hdev->sco_cnt > hdev->sco_pkts)
2997 hdev->sco_cnt = hdev->sco_pkts;
2998 break;
2999
3000 default:
3001 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3002 break;
3003 }
3004 }
3005
3006 queue_work(hdev->workqueue, &hdev->tx_work);
3007}
3008
3009static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
3010 __u16 handle)
3011{
3012 struct hci_chan *chan;
3013
3014 switch (hdev->dev_type) {
3015 case HCI_BREDR:
3016 return hci_conn_hash_lookup_handle(hdev, handle);
3017 case HCI_AMP:
3018 chan = hci_chan_lookup_handle(hdev, handle);
3019 if (chan)
3020 return chan->conn;
3021 break;
3022 default:
3023 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
3024 break;
3025 }
3026
3027 return NULL;
3028}
3029
3030static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
3031{
3032 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
3033 int i;
3034
3035 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
3036 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
3037 return;
3038 }
3039
3040 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
3041 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
3042 BT_DBG("%s bad parameters", hdev->name);
3043 return;
3044 }
3045
3046 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
3047 ev->num_hndl);
3048
3049 for (i = 0; i < ev->num_hndl; i++) {
3050 struct hci_comp_blocks_info *info = &ev->handles[i];
3051 struct hci_conn *conn = NULL;
3052 __u16 handle, block_count;
3053
3054 handle = __le16_to_cpu(info->handle);
3055 block_count = __le16_to_cpu(info->blocks);
3056
3057 conn = __hci_conn_lookup_handle(hdev, handle);
3058 if (!conn)
3059 continue;
3060
3061 conn->sent -= block_count;
3062
3063 switch (conn->type) {
3064 case ACL_LINK:
3065 case AMP_LINK:
3066 hdev->block_cnt += block_count;
3067 if (hdev->block_cnt > hdev->num_blocks)
3068 hdev->block_cnt = hdev->num_blocks;
3069 break;
3070
3071 default:
3072 BT_ERR("Unknown type %d conn %p", conn->type, conn);
3073 break;
3074 }
3075 }
3076
3077 queue_work(hdev->workqueue, &hdev->tx_work);
3078}
3079
3080static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3081{
3082 struct hci_ev_mode_change *ev = (void *) skb->data;
3083 struct hci_conn *conn;
3084
3085 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3086
3087 hci_dev_lock(hdev);
3088
3089 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3090 if (conn) {
3091 conn->mode = ev->mode;
3092
3093 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
3094 &conn->flags)) {
3095 if (conn->mode == HCI_CM_ACTIVE)
3096 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3097 else
3098 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
3099 }
3100
3101 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
3102 hci_sco_setup(conn, ev->status);
3103 }
3104
3105 hci_dev_unlock(hdev);
3106}
3107
3108static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3109{
3110 struct hci_ev_pin_code_req *ev = (void *) skb->data;
3111 struct hci_conn *conn;
3112
3113 BT_DBG("%s", hdev->name);
3114
3115 hci_dev_lock(hdev);
3116
3117 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3118 if (!conn)
3119 goto unlock;
3120
3121 if (conn->state == BT_CONNECTED) {
3122 hci_conn_hold(conn);
3123 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
3124 hci_conn_drop(conn);
3125 }
3126
3127 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags) &&
3128 !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) {
3129 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
3130 sizeof(ev->bdaddr), &ev->bdaddr);
3131 } else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
3132 u8 secure;
3133
3134 if (conn->pending_sec_level == BT_SECURITY_HIGH)
3135 secure = 1;
3136 else
3137 secure = 0;
3138
3139 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
3140 }
3141
3142unlock:
3143 hci_dev_unlock(hdev);
3144}
3145
3146static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3147{
3148 struct hci_ev_link_key_req *ev = (void *) skb->data;
3149 struct hci_cp_link_key_reply cp;
3150 struct hci_conn *conn;
3151 struct link_key *key;
3152
3153 BT_DBG("%s", hdev->name);
3154
3155 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3156 return;
3157
3158 hci_dev_lock(hdev);
3159
3160 key = hci_find_link_key(hdev, &ev->bdaddr);
3161 if (!key) {
3162 BT_DBG("%s link key not found for %pMR", hdev->name,
3163 &ev->bdaddr);
3164 goto not_found;
3165 }
3166
3167 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
3168 &ev->bdaddr);
3169
3170 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3171 if (conn) {
3172 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
3173 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
3174 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
3175 BT_DBG("%s ignoring unauthenticated key", hdev->name);
3176 goto not_found;
3177 }
3178
3179 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
3180 (conn->pending_sec_level == BT_SECURITY_HIGH ||
3181 conn->pending_sec_level == BT_SECURITY_FIPS)) {
3182 BT_DBG("%s ignoring key unauthenticated for high security",
3183 hdev->name);
3184 goto not_found;
3185 }
3186
3187 conn->key_type = key->type;
3188 conn->pin_length = key->pin_len;
3189 }
3190
3191 bacpy(&cp.bdaddr, &ev->bdaddr);
3192 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
3193
3194 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
3195
3196 hci_dev_unlock(hdev);
3197
3198 return;
3199
3200not_found:
3201 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
3202 hci_dev_unlock(hdev);
3203}
3204
3205static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3206{
3207 struct hci_ev_link_key_notify *ev = (void *) skb->data;
3208 struct hci_conn *conn;
3209 struct link_key *key;
3210 bool persistent;
3211 u8 pin_len = 0;
3212
3213 BT_DBG("%s", hdev->name);
3214
3215 hci_dev_lock(hdev);
3216
3217 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3218 if (conn) {
3219 hci_conn_hold(conn);
3220 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3221 pin_len = conn->pin_length;
3222
3223 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
3224 conn->key_type = ev->key_type;
3225
3226 hci_conn_drop(conn);
3227 }
3228
3229 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3230 goto unlock;
3231
3232 key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key,
3233 ev->key_type, pin_len, &persistent);
3234 if (!key)
3235 goto unlock;
3236
3237 mgmt_new_link_key(hdev, key, persistent);
3238
3239 /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag
3240 * is set. If it's not set simply remove the key from the kernel
3241 * list (we've still notified user space about it but with
3242 * store_hint being 0).
3243 */
3244 if (key->type == HCI_LK_DEBUG_COMBINATION &&
3245 !test_bit(HCI_KEEP_DEBUG_KEYS, &hdev->dev_flags)) {
3246 list_del(&key->list);
3247 kfree(key);
3248 } else if (conn) {
3249 if (persistent)
3250 clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3251 else
3252 set_bit(HCI_CONN_FLUSH_KEY, &conn->flags);
3253 }
3254
3255unlock:
3256 hci_dev_unlock(hdev);
3257}
3258
3259static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3260{
3261 struct hci_ev_clock_offset *ev = (void *) skb->data;
3262 struct hci_conn *conn;
3263
3264 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3265
3266 hci_dev_lock(hdev);
3267
3268 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3269 if (conn && !ev->status) {
3270 struct inquiry_entry *ie;
3271
3272 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3273 if (ie) {
3274 ie->data.clock_offset = ev->clock_offset;
3275 ie->timestamp = jiffies;
3276 }
3277 }
3278
3279 hci_dev_unlock(hdev);
3280}
3281
3282static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3283{
3284 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3285 struct hci_conn *conn;
3286
3287 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3288
3289 hci_dev_lock(hdev);
3290
3291 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3292 if (conn && !ev->status)
3293 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3294
3295 hci_dev_unlock(hdev);
3296}
3297
3298static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3299{
3300 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3301 struct inquiry_entry *ie;
3302
3303 BT_DBG("%s", hdev->name);
3304
3305 hci_dev_lock(hdev);
3306
3307 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3308 if (ie) {
3309 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3310 ie->timestamp = jiffies;
3311 }
3312
3313 hci_dev_unlock(hdev);
3314}
3315
3316static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3317 struct sk_buff *skb)
3318{
3319 struct inquiry_data data;
3320 int num_rsp = *((__u8 *) skb->data);
3321
3322 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3323
3324 if (!num_rsp)
3325 return;
3326
3327 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3328 return;
3329
3330 hci_dev_lock(hdev);
3331
3332 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3333 struct inquiry_info_with_rssi_and_pscan_mode *info;
3334 info = (void *) (skb->data + 1);
3335
3336 for (; num_rsp; num_rsp--, info++) {
3337 u32 flags;
3338
3339 bacpy(&data.bdaddr, &info->bdaddr);
3340 data.pscan_rep_mode = info->pscan_rep_mode;
3341 data.pscan_period_mode = info->pscan_period_mode;
3342 data.pscan_mode = info->pscan_mode;
3343 memcpy(data.dev_class, info->dev_class, 3);
3344 data.clock_offset = info->clock_offset;
3345 data.rssi = info->rssi;
3346 data.ssp_mode = 0x00;
3347
3348 flags = hci_inquiry_cache_update(hdev, &data, false);
3349
3350 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3351 info->dev_class, info->rssi,
3352 flags, NULL, 0, NULL, 0);
3353 }
3354 } else {
3355 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3356
3357 for (; num_rsp; num_rsp--, info++) {
3358 u32 flags;
3359
3360 bacpy(&data.bdaddr, &info->bdaddr);
3361 data.pscan_rep_mode = info->pscan_rep_mode;
3362 data.pscan_period_mode = info->pscan_period_mode;
3363 data.pscan_mode = 0x00;
3364 memcpy(data.dev_class, info->dev_class, 3);
3365 data.clock_offset = info->clock_offset;
3366 data.rssi = info->rssi;
3367 data.ssp_mode = 0x00;
3368
3369 flags = hci_inquiry_cache_update(hdev, &data, false);
3370
3371 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3372 info->dev_class, info->rssi,
3373 flags, NULL, 0, NULL, 0);
3374 }
3375 }
3376
3377 hci_dev_unlock(hdev);
3378}
3379
3380static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3381 struct sk_buff *skb)
3382{
3383 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3384 struct hci_conn *conn;
3385
3386 BT_DBG("%s", hdev->name);
3387
3388 hci_dev_lock(hdev);
3389
3390 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3391 if (!conn)
3392 goto unlock;
3393
3394 if (ev->page < HCI_MAX_PAGES)
3395 memcpy(conn->features[ev->page], ev->features, 8);
3396
3397 if (!ev->status && ev->page == 0x01) {
3398 struct inquiry_entry *ie;
3399
3400 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3401 if (ie)
3402 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3403
3404 if (ev->features[0] & LMP_HOST_SSP) {
3405 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3406 } else {
3407 /* It is mandatory by the Bluetooth specification that
3408 * Extended Inquiry Results are only used when Secure
3409 * Simple Pairing is enabled, but some devices violate
3410 * this.
3411 *
3412 * To make these devices work, the internal SSP
3413 * enabled flag needs to be cleared if the remote host
3414 * features do not indicate SSP support */
3415 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3416 }
3417
3418 if (ev->features[0] & LMP_HOST_SC)
3419 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
3420 }
3421
3422 if (conn->state != BT_CONFIG)
3423 goto unlock;
3424
3425 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3426 struct hci_cp_remote_name_req cp;
3427 memset(&cp, 0, sizeof(cp));
3428 bacpy(&cp.bdaddr, &conn->dst);
3429 cp.pscan_rep_mode = 0x02;
3430 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3431 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3432 mgmt_device_connected(hdev, &conn->dst, conn->type,
3433 conn->dst_type, 0, NULL, 0,
3434 conn->dev_class);
3435
3436 if (!hci_outgoing_auth_needed(hdev, conn)) {
3437 conn->state = BT_CONNECTED;
3438 hci_proto_connect_cfm(conn, ev->status);
3439 hci_conn_drop(conn);
3440 }
3441
3442unlock:
3443 hci_dev_unlock(hdev);
3444}
3445
3446static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3447 struct sk_buff *skb)
3448{
3449 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3450 struct hci_conn *conn;
3451
3452 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3453
3454 hci_dev_lock(hdev);
3455
3456 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3457 if (!conn) {
3458 if (ev->link_type == ESCO_LINK)
3459 goto unlock;
3460
3461 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3462 if (!conn)
3463 goto unlock;
3464
3465 conn->type = SCO_LINK;
3466 }
3467
3468 switch (ev->status) {
3469 case 0x00:
3470 conn->handle = __le16_to_cpu(ev->handle);
3471 conn->state = BT_CONNECTED;
3472
3473 hci_conn_add_sysfs(conn);
3474 break;
3475
3476 case 0x10: /* Connection Accept Timeout */
3477 case 0x0d: /* Connection Rejected due to Limited Resources */
3478 case 0x11: /* Unsupported Feature or Parameter Value */
3479 case 0x1c: /* SCO interval rejected */
3480 case 0x1a: /* Unsupported Remote Feature */
3481 case 0x1f: /* Unspecified error */
3482 case 0x20: /* Unsupported LMP Parameter value */
3483 if (conn->out) {
3484 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3485 (hdev->esco_type & EDR_ESCO_MASK);
3486 if (hci_setup_sync(conn, conn->link->handle))
3487 goto unlock;
3488 }
3489 /* fall through */
3490
3491 default:
3492 conn->state = BT_CLOSED;
3493 break;
3494 }
3495
3496 hci_proto_connect_cfm(conn, ev->status);
3497 if (ev->status)
3498 hci_conn_del(conn);
3499
3500unlock:
3501 hci_dev_unlock(hdev);
3502}
3503
3504static inline size_t eir_get_length(u8 *eir, size_t eir_len)
3505{
3506 size_t parsed = 0;
3507
3508 while (parsed < eir_len) {
3509 u8 field_len = eir[0];
3510
3511 if (field_len == 0)
3512 return parsed;
3513
3514 parsed += field_len + 1;
3515 eir += field_len + 1;
3516 }
3517
3518 return eir_len;
3519}
3520
3521static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3522 struct sk_buff *skb)
3523{
3524 struct inquiry_data data;
3525 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3526 int num_rsp = *((__u8 *) skb->data);
3527 size_t eir_len;
3528
3529 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3530
3531 if (!num_rsp)
3532 return;
3533
3534 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3535 return;
3536
3537 hci_dev_lock(hdev);
3538
3539 for (; num_rsp; num_rsp--, info++) {
3540 u32 flags;
3541 bool name_known;
3542
3543 bacpy(&data.bdaddr, &info->bdaddr);
3544 data.pscan_rep_mode = info->pscan_rep_mode;
3545 data.pscan_period_mode = info->pscan_period_mode;
3546 data.pscan_mode = 0x00;
3547 memcpy(data.dev_class, info->dev_class, 3);
3548 data.clock_offset = info->clock_offset;
3549 data.rssi = info->rssi;
3550 data.ssp_mode = 0x01;
3551
3552 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3553 name_known = eir_has_data_type(info->data,
3554 sizeof(info->data),
3555 EIR_NAME_COMPLETE);
3556 else
3557 name_known = true;
3558
3559 flags = hci_inquiry_cache_update(hdev, &data, name_known);
3560
3561 eir_len = eir_get_length(info->data, sizeof(info->data));
3562
3563 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3564 info->dev_class, info->rssi,
3565 flags, info->data, eir_len, NULL, 0);
3566 }
3567
3568 hci_dev_unlock(hdev);
3569}
3570
3571static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3572 struct sk_buff *skb)
3573{
3574 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3575 struct hci_conn *conn;
3576
3577 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3578 __le16_to_cpu(ev->handle));
3579
3580 hci_dev_lock(hdev);
3581
3582 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3583 if (!conn)
3584 goto unlock;
3585
3586 /* For BR/EDR the necessary steps are taken through the
3587 * auth_complete event.
3588 */
3589 if (conn->type != LE_LINK)
3590 goto unlock;
3591
3592 if (!ev->status)
3593 conn->sec_level = conn->pending_sec_level;
3594
3595 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3596
3597 if (ev->status && conn->state == BT_CONNECTED) {
3598 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3599 hci_conn_drop(conn);
3600 goto unlock;
3601 }
3602
3603 if (conn->state == BT_CONFIG) {
3604 if (!ev->status)
3605 conn->state = BT_CONNECTED;
3606
3607 hci_proto_connect_cfm(conn, ev->status);
3608 hci_conn_drop(conn);
3609 } else {
3610 hci_auth_cfm(conn, ev->status);
3611
3612 hci_conn_hold(conn);
3613 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3614 hci_conn_drop(conn);
3615 }
3616
3617unlock:
3618 hci_dev_unlock(hdev);
3619}
3620
3621static u8 hci_get_auth_req(struct hci_conn *conn)
3622{
3623 /* If remote requests no-bonding follow that lead */
3624 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3625 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3626 return conn->remote_auth | (conn->auth_type & 0x01);
3627
3628 /* If both remote and local have enough IO capabilities, require
3629 * MITM protection
3630 */
3631 if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT &&
3632 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT)
3633 return conn->remote_auth | 0x01;
3634
3635 /* No MITM protection possible so ignore remote requirement */
3636 return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01);
3637}
3638
3639static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3640{
3641 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3642 struct hci_conn *conn;
3643
3644 BT_DBG("%s", hdev->name);
3645
3646 hci_dev_lock(hdev);
3647
3648 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3649 if (!conn)
3650 goto unlock;
3651
3652 hci_conn_hold(conn);
3653
3654 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3655 goto unlock;
3656
3657 /* Allow pairing if we're pairable, the initiators of the
3658 * pairing or if the remote is not requesting bonding.
3659 */
3660 if (test_bit(HCI_BONDABLE, &hdev->dev_flags) ||
3661 test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) ||
3662 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3663 struct hci_cp_io_capability_reply cp;
3664
3665 bacpy(&cp.bdaddr, &ev->bdaddr);
3666 /* Change the IO capability from KeyboardDisplay
3667 * to DisplayYesNo as it is not supported by BT spec. */
3668 cp.capability = (conn->io_capability == 0x04) ?
3669 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3670
3671 /* If we are initiators, there is no remote information yet */
3672 if (conn->remote_auth == 0xff) {
3673 /* Request MITM protection if our IO caps allow it
3674 * except for the no-bonding case.
3675 */
3676 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3677 conn->auth_type != HCI_AT_NO_BONDING)
3678 conn->auth_type |= 0x01;
3679 } else {
3680 conn->auth_type = hci_get_auth_req(conn);
3681 }
3682
3683 /* If we're not bondable, force one of the non-bondable
3684 * authentication requirement values.
3685 */
3686 if (!test_bit(HCI_BONDABLE, &hdev->dev_flags))
3687 conn->auth_type &= HCI_AT_NO_BONDING_MITM;
3688
3689 cp.authentication = conn->auth_type;
3690
3691 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3692 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3693 cp.oob_data = 0x01;
3694 else
3695 cp.oob_data = 0x00;
3696
3697 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3698 sizeof(cp), &cp);
3699 } else {
3700 struct hci_cp_io_capability_neg_reply cp;
3701
3702 bacpy(&cp.bdaddr, &ev->bdaddr);
3703 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3704
3705 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3706 sizeof(cp), &cp);
3707 }
3708
3709unlock:
3710 hci_dev_unlock(hdev);
3711}
3712
3713static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3714{
3715 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3716 struct hci_conn *conn;
3717
3718 BT_DBG("%s", hdev->name);
3719
3720 hci_dev_lock(hdev);
3721
3722 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3723 if (!conn)
3724 goto unlock;
3725
3726 conn->remote_cap = ev->capability;
3727 conn->remote_auth = ev->authentication;
3728 if (ev->oob_data)
3729 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3730
3731unlock:
3732 hci_dev_unlock(hdev);
3733}
3734
3735static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3736 struct sk_buff *skb)
3737{
3738 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3739 int loc_mitm, rem_mitm, confirm_hint = 0;
3740 struct hci_conn *conn;
3741
3742 BT_DBG("%s", hdev->name);
3743
3744 hci_dev_lock(hdev);
3745
3746 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3747 goto unlock;
3748
3749 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3750 if (!conn)
3751 goto unlock;
3752
3753 loc_mitm = (conn->auth_type & 0x01);
3754 rem_mitm = (conn->remote_auth & 0x01);
3755
3756 /* If we require MITM but the remote device can't provide that
3757 * (it has NoInputNoOutput) then reject the confirmation
3758 * request. We check the security level here since it doesn't
3759 * necessarily match conn->auth_type.
3760 */
3761 if (conn->pending_sec_level > BT_SECURITY_MEDIUM &&
3762 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3763 BT_DBG("Rejecting request: remote device can't provide MITM");
3764 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3765 sizeof(ev->bdaddr), &ev->bdaddr);
3766 goto unlock;
3767 }
3768
3769 /* If no side requires MITM protection; auto-accept */
3770 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3771 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3772
3773 /* If we're not the initiators request authorization to
3774 * proceed from user space (mgmt_user_confirm with
3775 * confirm_hint set to 1). The exception is if neither
3776 * side had MITM or if the local IO capability is
3777 * NoInputNoOutput, in which case we do auto-accept
3778 */
3779 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3780 conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3781 (loc_mitm || rem_mitm)) {
3782 BT_DBG("Confirming auto-accept as acceptor");
3783 confirm_hint = 1;
3784 goto confirm;
3785 }
3786
3787 BT_DBG("Auto-accept of user confirmation with %ums delay",
3788 hdev->auto_accept_delay);
3789
3790 if (hdev->auto_accept_delay > 0) {
3791 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3792 queue_delayed_work(conn->hdev->workqueue,
3793 &conn->auto_accept_work, delay);
3794 goto unlock;
3795 }
3796
3797 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3798 sizeof(ev->bdaddr), &ev->bdaddr);
3799 goto unlock;
3800 }
3801
3802confirm:
3803 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0,
3804 le32_to_cpu(ev->passkey), confirm_hint);
3805
3806unlock:
3807 hci_dev_unlock(hdev);
3808}
3809
3810static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3811 struct sk_buff *skb)
3812{
3813 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3814
3815 BT_DBG("%s", hdev->name);
3816
3817 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3818 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3819}
3820
3821static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3822 struct sk_buff *skb)
3823{
3824 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3825 struct hci_conn *conn;
3826
3827 BT_DBG("%s", hdev->name);
3828
3829 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3830 if (!conn)
3831 return;
3832
3833 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3834 conn->passkey_entered = 0;
3835
3836 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3837 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3838 conn->dst_type, conn->passkey_notify,
3839 conn->passkey_entered);
3840}
3841
3842static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3843{
3844 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3845 struct hci_conn *conn;
3846
3847 BT_DBG("%s", hdev->name);
3848
3849 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3850 if (!conn)
3851 return;
3852
3853 switch (ev->type) {
3854 case HCI_KEYPRESS_STARTED:
3855 conn->passkey_entered = 0;
3856 return;
3857
3858 case HCI_KEYPRESS_ENTERED:
3859 conn->passkey_entered++;
3860 break;
3861
3862 case HCI_KEYPRESS_ERASED:
3863 conn->passkey_entered--;
3864 break;
3865
3866 case HCI_KEYPRESS_CLEARED:
3867 conn->passkey_entered = 0;
3868 break;
3869
3870 case HCI_KEYPRESS_COMPLETED:
3871 return;
3872 }
3873
3874 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3875 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3876 conn->dst_type, conn->passkey_notify,
3877 conn->passkey_entered);
3878}
3879
3880static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3881 struct sk_buff *skb)
3882{
3883 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3884 struct hci_conn *conn;
3885
3886 BT_DBG("%s", hdev->name);
3887
3888 hci_dev_lock(hdev);
3889
3890 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3891 if (!conn)
3892 goto unlock;
3893
3894 /* Reset the authentication requirement to unknown */
3895 conn->remote_auth = 0xff;
3896
3897 /* To avoid duplicate auth_failed events to user space we check
3898 * the HCI_CONN_AUTH_PEND flag which will be set if we
3899 * initiated the authentication. A traditional auth_complete
3900 * event gets always produced as initiator and is also mapped to
3901 * the mgmt_auth_failed event */
3902 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3903 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3904 ev->status);
3905
3906 hci_conn_drop(conn);
3907
3908unlock:
3909 hci_dev_unlock(hdev);
3910}
3911
3912static void hci_remote_host_features_evt(struct hci_dev *hdev,
3913 struct sk_buff *skb)
3914{
3915 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3916 struct inquiry_entry *ie;
3917 struct hci_conn *conn;
3918
3919 BT_DBG("%s", hdev->name);
3920
3921 hci_dev_lock(hdev);
3922
3923 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3924 if (conn)
3925 memcpy(conn->features[1], ev->features, 8);
3926
3927 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3928 if (ie)
3929 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3930
3931 hci_dev_unlock(hdev);
3932}
3933
3934static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3935 struct sk_buff *skb)
3936{
3937 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3938 struct oob_data *data;
3939
3940 BT_DBG("%s", hdev->name);
3941
3942 hci_dev_lock(hdev);
3943
3944 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3945 goto unlock;
3946
3947 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3948 if (data) {
3949 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3950 struct hci_cp_remote_oob_ext_data_reply cp;
3951
3952 bacpy(&cp.bdaddr, &ev->bdaddr);
3953 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3954 memcpy(cp.randomizer192, data->randomizer192,
3955 sizeof(cp.randomizer192));
3956 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3957 memcpy(cp.randomizer256, data->randomizer256,
3958 sizeof(cp.randomizer256));
3959
3960 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3961 sizeof(cp), &cp);
3962 } else {
3963 struct hci_cp_remote_oob_data_reply cp;
3964
3965 bacpy(&cp.bdaddr, &ev->bdaddr);
3966 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3967 memcpy(cp.randomizer, data->randomizer192,
3968 sizeof(cp.randomizer));
3969
3970 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3971 sizeof(cp), &cp);
3972 }
3973 } else {
3974 struct hci_cp_remote_oob_data_neg_reply cp;
3975
3976 bacpy(&cp.bdaddr, &ev->bdaddr);
3977 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3978 sizeof(cp), &cp);
3979 }
3980
3981unlock:
3982 hci_dev_unlock(hdev);
3983}
3984
3985static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3986 struct sk_buff *skb)
3987{
3988 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3989 struct hci_conn *hcon, *bredr_hcon;
3990
3991 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3992 ev->status);
3993
3994 hci_dev_lock(hdev);
3995
3996 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3997 if (!hcon) {
3998 hci_dev_unlock(hdev);
3999 return;
4000 }
4001
4002 if (ev->status) {
4003 hci_conn_del(hcon);
4004 hci_dev_unlock(hdev);
4005 return;
4006 }
4007
4008 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
4009
4010 hcon->state = BT_CONNECTED;
4011 bacpy(&hcon->dst, &bredr_hcon->dst);
4012
4013 hci_conn_hold(hcon);
4014 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
4015 hci_conn_drop(hcon);
4016
4017 hci_conn_add_sysfs(hcon);
4018
4019 amp_physical_cfm(bredr_hcon, hcon);
4020
4021 hci_dev_unlock(hdev);
4022}
4023
4024static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4025{
4026 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
4027 struct hci_conn *hcon;
4028 struct hci_chan *hchan;
4029 struct amp_mgr *mgr;
4030
4031 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
4032 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
4033 ev->status);
4034
4035 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4036 if (!hcon)
4037 return;
4038
4039 /* Create AMP hchan */
4040 hchan = hci_chan_create(hcon);
4041 if (!hchan)
4042 return;
4043
4044 hchan->handle = le16_to_cpu(ev->handle);
4045
4046 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
4047
4048 mgr = hcon->amp_mgr;
4049 if (mgr && mgr->bredr_chan) {
4050 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
4051
4052 l2cap_chan_lock(bredr_chan);
4053
4054 bredr_chan->conn->mtu = hdev->block_mtu;
4055 l2cap_logical_cfm(bredr_chan, hchan, 0);
4056 hci_conn_hold(hcon);
4057
4058 l2cap_chan_unlock(bredr_chan);
4059 }
4060}
4061
4062static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
4063 struct sk_buff *skb)
4064{
4065 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
4066 struct hci_chan *hchan;
4067
4068 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
4069 le16_to_cpu(ev->handle), ev->status);
4070
4071 if (ev->status)
4072 return;
4073
4074 hci_dev_lock(hdev);
4075
4076 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
4077 if (!hchan)
4078 goto unlock;
4079
4080 amp_destroy_logical_link(hchan, ev->reason);
4081
4082unlock:
4083 hci_dev_unlock(hdev);
4084}
4085
4086static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
4087 struct sk_buff *skb)
4088{
4089 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
4090 struct hci_conn *hcon;
4091
4092 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4093
4094 if (ev->status)
4095 return;
4096
4097 hci_dev_lock(hdev);
4098
4099 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4100 if (hcon) {
4101 hcon->state = BT_CLOSED;
4102 hci_conn_del(hcon);
4103 }
4104
4105 hci_dev_unlock(hdev);
4106}
4107
4108static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
4109{
4110 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
4111 struct hci_conn_params *params;
4112 struct hci_conn *conn;
4113 struct smp_irk *irk;
4114 u8 addr_type;
4115
4116 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4117
4118 hci_dev_lock(hdev);
4119
4120 /* All controllers implicitly stop advertising in the event of a
4121 * connection, so ensure that the state bit is cleared.
4122 */
4123 clear_bit(HCI_LE_ADV, &hdev->dev_flags);
4124
4125 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
4126 if (!conn) {
4127 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr, ev->role);
4128 if (!conn) {
4129 BT_ERR("No memory for new connection");
4130 goto unlock;
4131 }
4132
4133 conn->dst_type = ev->bdaddr_type;
4134
4135 /* If we didn't have a hci_conn object previously
4136 * but we're in master role this must be something
4137 * initiated using a white list. Since white list based
4138 * connections are not "first class citizens" we don't
4139 * have full tracking of them. Therefore, we go ahead
4140 * with a "best effort" approach of determining the
4141 * initiator address based on the HCI_PRIVACY flag.
4142 */
4143 if (conn->out) {
4144 conn->resp_addr_type = ev->bdaddr_type;
4145 bacpy(&conn->resp_addr, &ev->bdaddr);
4146 if (test_bit(HCI_PRIVACY, &hdev->dev_flags)) {
4147 conn->init_addr_type = ADDR_LE_DEV_RANDOM;
4148 bacpy(&conn->init_addr, &hdev->rpa);
4149 } else {
4150 hci_copy_identity_address(hdev,
4151 &conn->init_addr,
4152 &conn->init_addr_type);
4153 }
4154 }
4155 } else {
4156 cancel_delayed_work(&conn->le_conn_timeout);
4157 }
4158
4159 if (!conn->out) {
4160 /* Set the responder (our side) address type based on
4161 * the advertising address type.
4162 */
4163 conn->resp_addr_type = hdev->adv_addr_type;
4164 if (hdev->adv_addr_type == ADDR_LE_DEV_RANDOM)
4165 bacpy(&conn->resp_addr, &hdev->random_addr);
4166 else
4167 bacpy(&conn->resp_addr, &hdev->bdaddr);
4168
4169 conn->init_addr_type = ev->bdaddr_type;
4170 bacpy(&conn->init_addr, &ev->bdaddr);
4171
4172 /* For incoming connections, set the default minimum
4173 * and maximum connection interval. They will be used
4174 * to check if the parameters are in range and if not
4175 * trigger the connection update procedure.
4176 */
4177 conn->le_conn_min_interval = hdev->le_conn_min_interval;
4178 conn->le_conn_max_interval = hdev->le_conn_max_interval;
4179 }
4180
4181 /* Lookup the identity address from the stored connection
4182 * address and address type.
4183 *
4184 * When establishing connections to an identity address, the
4185 * connection procedure will store the resolvable random
4186 * address first. Now if it can be converted back into the
4187 * identity address, start using the identity address from
4188 * now on.
4189 */
4190 irk = hci_get_irk(hdev, &conn->dst, conn->dst_type);
4191 if (irk) {
4192 bacpy(&conn->dst, &irk->bdaddr);
4193 conn->dst_type = irk->addr_type;
4194 }
4195
4196 if (ev->status) {
4197 hci_le_conn_failed(conn, ev->status);
4198 goto unlock;
4199 }
4200
4201 if (conn->dst_type == ADDR_LE_DEV_PUBLIC)
4202 addr_type = BDADDR_LE_PUBLIC;
4203 else
4204 addr_type = BDADDR_LE_RANDOM;
4205
4206 /* Drop the connection if the device is blocked */
4207 if (hci_bdaddr_list_lookup(&hdev->blacklist, &conn->dst, addr_type)) {
4208 hci_conn_drop(conn);
4209 goto unlock;
4210 }
4211
4212 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
4213 mgmt_device_connected(hdev, &conn->dst, conn->type,
4214 conn->dst_type, 0, NULL, 0, NULL);
4215
4216 conn->sec_level = BT_SECURITY_LOW;
4217 conn->handle = __le16_to_cpu(ev->handle);
4218 conn->state = BT_CONNECTED;
4219
4220 conn->le_conn_interval = le16_to_cpu(ev->interval);
4221 conn->le_conn_latency = le16_to_cpu(ev->latency);
4222 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4223
4224 hci_conn_add_sysfs(conn);
4225
4226 hci_proto_connect_cfm(conn, ev->status);
4227
4228 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
4229 conn->dst_type);
4230 if (params) {
4231 list_del_init(&params->action);
4232 if (params->conn) {
4233 hci_conn_drop(params->conn);
4234 hci_conn_put(params->conn);
4235 params->conn = NULL;
4236 }
4237 }
4238
4239unlock:
4240 hci_update_background_scan(hdev);
4241 hci_dev_unlock(hdev);
4242}
4243
4244static void hci_le_conn_update_complete_evt(struct hci_dev *hdev,
4245 struct sk_buff *skb)
4246{
4247 struct hci_ev_le_conn_update_complete *ev = (void *) skb->data;
4248 struct hci_conn *conn;
4249
4250 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
4251
4252 if (ev->status)
4253 return;
4254
4255 hci_dev_lock(hdev);
4256
4257 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4258 if (conn) {
4259 conn->le_conn_interval = le16_to_cpu(ev->interval);
4260 conn->le_conn_latency = le16_to_cpu(ev->latency);
4261 conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout);
4262 }
4263
4264 hci_dev_unlock(hdev);
4265}
4266
4267/* This function requires the caller holds hdev->lock */
4268static void check_pending_le_conn(struct hci_dev *hdev, bdaddr_t *addr,
4269 u8 addr_type, u8 adv_type)
4270{
4271 struct hci_conn *conn;
4272 struct hci_conn_params *params;
4273
4274 /* If the event is not connectable don't proceed further */
4275 if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND)
4276 return;
4277
4278 /* Ignore if the device is blocked */
4279 if (hci_bdaddr_list_lookup(&hdev->blacklist, addr, addr_type))
4280 return;
4281
4282 /* Most controller will fail if we try to create new connections
4283 * while we have an existing one in slave role.
4284 */
4285 if (hdev->conn_hash.le_num_slave > 0)
4286 return;
4287
4288 /* If we're not connectable only connect devices that we have in
4289 * our pend_le_conns list.
4290 */
4291 params = hci_pend_le_action_lookup(&hdev->pend_le_conns,
4292 addr, addr_type);
4293 if (!params)
4294 return;
4295
4296 switch (params->auto_connect) {
4297 case HCI_AUTO_CONN_DIRECT:
4298 /* Only devices advertising with ADV_DIRECT_IND are
4299 * triggering a connection attempt. This is allowing
4300 * incoming connections from slave devices.
4301 */
4302 if (adv_type != LE_ADV_DIRECT_IND)
4303 return;
4304 break;
4305 case HCI_AUTO_CONN_ALWAYS:
4306 /* Devices advertising with ADV_IND or ADV_DIRECT_IND
4307 * are triggering a connection attempt. This means
4308 * that incoming connectioms from slave device are
4309 * accepted and also outgoing connections to slave
4310 * devices are established when found.
4311 */
4312 break;
4313 default:
4314 return;
4315 }
4316
4317 conn = hci_connect_le(hdev, addr, addr_type, BT_SECURITY_LOW,
4318 HCI_LE_AUTOCONN_TIMEOUT, HCI_ROLE_MASTER);
4319 if (!IS_ERR(conn)) {
4320 /* Store the pointer since we don't really have any
4321 * other owner of the object besides the params that
4322 * triggered it. This way we can abort the connection if
4323 * the parameters get removed and keep the reference
4324 * count consistent once the connection is established.
4325 */
4326 params->conn = hci_conn_get(conn);
4327 return;
4328 }
4329
4330 switch (PTR_ERR(conn)) {
4331 case -EBUSY:
4332 /* If hci_connect() returns -EBUSY it means there is already
4333 * an LE connection attempt going on. Since controllers don't
4334 * support more than one connection attempt at the time, we
4335 * don't consider this an error case.
4336 */
4337 break;
4338 default:
4339 BT_DBG("Failed to connect: err %ld", PTR_ERR(conn));
4340 }
4341}
4342
4343static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,
4344 u8 bdaddr_type, s8 rssi, u8 *data, u8 len)
4345{
4346 struct discovery_state *d = &hdev->discovery;
4347 struct smp_irk *irk;
4348 bool match;
4349 u32 flags;
4350
4351 /* Check if we need to convert to identity address */
4352 irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
4353 if (irk) {
4354 bdaddr = &irk->bdaddr;
4355 bdaddr_type = irk->addr_type;
4356 }
4357
4358 /* Check if we have been requested to connect to this device */
4359 check_pending_le_conn(hdev, bdaddr, bdaddr_type, type);
4360
4361 /* Passive scanning shouldn't trigger any device found events,
4362 * except for devices marked as CONN_REPORT for which we do send
4363 * device found events.
4364 */
4365 if (hdev->le_scan_type == LE_SCAN_PASSIVE) {
4366 if (type == LE_ADV_DIRECT_IND)
4367 return;
4368
4369 if (!hci_pend_le_action_lookup(&hdev->pend_le_reports,
4370 bdaddr, bdaddr_type))
4371 return;
4372
4373 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND)
4374 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4375 else
4376 flags = 0;
4377 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4378 rssi, flags, data, len, NULL, 0);
4379 return;
4380 }
4381
4382 /* When receiving non-connectable or scannable undirected
4383 * advertising reports, this means that the remote device is
4384 * not connectable and then clearly indicate this in the
4385 * device found event.
4386 *
4387 * When receiving a scan response, then there is no way to
4388 * know if the remote device is connectable or not. However
4389 * since scan responses are merged with a previously seen
4390 * advertising report, the flags field from that report
4391 * will be used.
4392 *
4393 * In the really unlikely case that a controller get confused
4394 * and just sends a scan response event, then it is marked as
4395 * not connectable as well.
4396 */
4397 if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND ||
4398 type == LE_ADV_SCAN_RSP)
4399 flags = MGMT_DEV_FOUND_NOT_CONNECTABLE;
4400 else
4401 flags = 0;
4402
4403 /* If there's nothing pending either store the data from this
4404 * event or send an immediate device found event if the data
4405 * should not be stored for later.
4406 */
4407 if (!has_pending_adv_report(hdev)) {
4408 /* If the report will trigger a SCAN_REQ store it for
4409 * later merging.
4410 */
4411 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4412 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4413 rssi, flags, data, len);
4414 return;
4415 }
4416
4417 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4418 rssi, flags, data, len, NULL, 0);
4419 return;
4420 }
4421
4422 /* Check if the pending report is for the same device as the new one */
4423 match = (!bacmp(bdaddr, &d->last_adv_addr) &&
4424 bdaddr_type == d->last_adv_addr_type);
4425
4426 /* If the pending data doesn't match this report or this isn't a
4427 * scan response (e.g. we got a duplicate ADV_IND) then force
4428 * sending of the pending data.
4429 */
4430 if (type != LE_ADV_SCAN_RSP || !match) {
4431 /* Send out whatever is in the cache, but skip duplicates */
4432 if (!match)
4433 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4434 d->last_adv_addr_type, NULL,
4435 d->last_adv_rssi, d->last_adv_flags,
4436 d->last_adv_data,
4437 d->last_adv_data_len, NULL, 0);
4438
4439 /* If the new report will trigger a SCAN_REQ store it for
4440 * later merging.
4441 */
4442 if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) {
4443 store_pending_adv_report(hdev, bdaddr, bdaddr_type,
4444 rssi, flags, data, len);
4445 return;
4446 }
4447
4448 /* The advertising reports cannot be merged, so clear
4449 * the pending report and send out a device found event.
4450 */
4451 clear_pending_adv_report(hdev);
4452 mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL,
4453 rssi, flags, data, len, NULL, 0);
4454 return;
4455 }
4456
4457 /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and
4458 * the new event is a SCAN_RSP. We can therefore proceed with
4459 * sending a merged device found event.
4460 */
4461 mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,
4462 d->last_adv_addr_type, NULL, rssi, d->last_adv_flags,
4463 d->last_adv_data, d->last_adv_data_len, data, len);
4464 clear_pending_adv_report(hdev);
4465}
4466
4467static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
4468{
4469 u8 num_reports = skb->data[0];
4470 void *ptr = &skb->data[1];
4471
4472 hci_dev_lock(hdev);
4473
4474 while (num_reports--) {
4475 struct hci_ev_le_advertising_info *ev = ptr;
4476 s8 rssi;
4477
4478 rssi = ev->data[ev->length];
4479 process_adv_report(hdev, ev->evt_type, &ev->bdaddr,
4480 ev->bdaddr_type, rssi, ev->data, ev->length);
4481
4482 ptr += sizeof(*ev) + ev->length + 1;
4483 }
4484
4485 hci_dev_unlock(hdev);
4486}
4487
4488static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
4489{
4490 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
4491 struct hci_cp_le_ltk_reply cp;
4492 struct hci_cp_le_ltk_neg_reply neg;
4493 struct hci_conn *conn;
4494 struct smp_ltk *ltk;
4495
4496 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
4497
4498 hci_dev_lock(hdev);
4499
4500 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
4501 if (conn == NULL)
4502 goto not_found;
4503
4504 ltk = hci_find_ltk(hdev, ev->ediv, ev->rand, conn->role);
4505 if (ltk == NULL)
4506 goto not_found;
4507
4508 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
4509 cp.handle = cpu_to_le16(conn->handle);
4510
4511 if (ltk->authenticated)
4512 conn->pending_sec_level = BT_SECURITY_HIGH;
4513 else
4514 conn->pending_sec_level = BT_SECURITY_MEDIUM;
4515
4516 conn->enc_key_size = ltk->enc_size;
4517
4518 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
4519
4520 /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a
4521 * temporary key used to encrypt a connection following
4522 * pairing. It is used during the Encrypted Session Setup to
4523 * distribute the keys. Later, security can be re-established
4524 * using a distributed LTK.
4525 */
4526 if (ltk->type == SMP_STK) {
4527 set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4528 list_del(&ltk->list);
4529 kfree(ltk);
4530 } else {
4531 clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags);
4532 }
4533
4534 hci_dev_unlock(hdev);
4535
4536 return;
4537
4538not_found:
4539 neg.handle = ev->handle;
4540 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
4541 hci_dev_unlock(hdev);
4542}
4543
4544static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle,
4545 u8 reason)
4546{
4547 struct hci_cp_le_conn_param_req_neg_reply cp;
4548
4549 cp.handle = cpu_to_le16(handle);
4550 cp.reason = reason;
4551
4552 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp),
4553 &cp);
4554}
4555
4556static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev,
4557 struct sk_buff *skb)
4558{
4559 struct hci_ev_le_remote_conn_param_req *ev = (void *) skb->data;
4560 struct hci_cp_le_conn_param_req_reply cp;
4561 struct hci_conn *hcon;
4562 u16 handle, min, max, latency, timeout;
4563
4564 handle = le16_to_cpu(ev->handle);
4565 min = le16_to_cpu(ev->interval_min);
4566 max = le16_to_cpu(ev->interval_max);
4567 latency = le16_to_cpu(ev->latency);
4568 timeout = le16_to_cpu(ev->timeout);
4569
4570 hcon = hci_conn_hash_lookup_handle(hdev, handle);
4571 if (!hcon || hcon->state != BT_CONNECTED)
4572 return send_conn_param_neg_reply(hdev, handle,
4573 HCI_ERROR_UNKNOWN_CONN_ID);
4574
4575 if (hci_check_conn_params(min, max, latency, timeout))
4576 return send_conn_param_neg_reply(hdev, handle,
4577 HCI_ERROR_INVALID_LL_PARAMS);
4578
4579 if (hcon->role == HCI_ROLE_MASTER) {
4580 struct hci_conn_params *params;
4581 u8 store_hint;
4582
4583 hci_dev_lock(hdev);
4584
4585 params = hci_conn_params_lookup(hdev, &hcon->dst,
4586 hcon->dst_type);
4587 if (params) {
4588 params->conn_min_interval = min;
4589 params->conn_max_interval = max;
4590 params->conn_latency = latency;
4591 params->supervision_timeout = timeout;
4592 store_hint = 0x01;
4593 } else{
4594 store_hint = 0x00;
4595 }
4596
4597 hci_dev_unlock(hdev);
4598
4599 mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type,
4600 store_hint, min, max, latency, timeout);
4601 }
4602
4603 cp.handle = ev->handle;
4604 cp.interval_min = ev->interval_min;
4605 cp.interval_max = ev->interval_max;
4606 cp.latency = ev->latency;
4607 cp.timeout = ev->timeout;
4608 cp.min_ce_len = 0;
4609 cp.max_ce_len = 0;
4610
4611 hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp);
4612}
4613
4614static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
4615{
4616 struct hci_ev_le_meta *le_ev = (void *) skb->data;
4617
4618 skb_pull(skb, sizeof(*le_ev));
4619
4620 switch (le_ev->subevent) {
4621 case HCI_EV_LE_CONN_COMPLETE:
4622 hci_le_conn_complete_evt(hdev, skb);
4623 break;
4624
4625 case HCI_EV_LE_CONN_UPDATE_COMPLETE:
4626 hci_le_conn_update_complete_evt(hdev, skb);
4627 break;
4628
4629 case HCI_EV_LE_ADVERTISING_REPORT:
4630 hci_le_adv_report_evt(hdev, skb);
4631 break;
4632
4633 case HCI_EV_LE_LTK_REQ:
4634 hci_le_ltk_request_evt(hdev, skb);
4635 break;
4636
4637 case HCI_EV_LE_REMOTE_CONN_PARAM_REQ:
4638 hci_le_remote_conn_param_req_evt(hdev, skb);
4639 break;
4640
4641 default:
4642 break;
4643 }
4644}
4645
4646static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
4647{
4648 struct hci_ev_channel_selected *ev = (void *) skb->data;
4649 struct hci_conn *hcon;
4650
4651 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
4652
4653 skb_pull(skb, sizeof(*ev));
4654
4655 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
4656 if (!hcon)
4657 return;
4658
4659 amp_read_loc_assoc_final_data(hdev, hcon);
4660}
4661
4662void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
4663{
4664 struct hci_event_hdr *hdr = (void *) skb->data;
4665 __u8 event = hdr->evt;
4666
4667 hci_dev_lock(hdev);
4668
4669 /* Received events are (currently) only needed when a request is
4670 * ongoing so avoid unnecessary memory allocation.
4671 */
4672 if (hci_req_pending(hdev)) {
4673 kfree_skb(hdev->recv_evt);
4674 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
4675 }
4676
4677 hci_dev_unlock(hdev);
4678
4679 skb_pull(skb, HCI_EVENT_HDR_SIZE);
4680
4681 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
4682 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
4683 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
4684
4685 hci_req_cmd_complete(hdev, opcode, 0);
4686 }
4687
4688 switch (event) {
4689 case HCI_EV_INQUIRY_COMPLETE:
4690 hci_inquiry_complete_evt(hdev, skb);
4691 break;
4692
4693 case HCI_EV_INQUIRY_RESULT:
4694 hci_inquiry_result_evt(hdev, skb);
4695 break;
4696
4697 case HCI_EV_CONN_COMPLETE:
4698 hci_conn_complete_evt(hdev, skb);
4699 break;
4700
4701 case HCI_EV_CONN_REQUEST:
4702 hci_conn_request_evt(hdev, skb);
4703 break;
4704
4705 case HCI_EV_DISCONN_COMPLETE:
4706 hci_disconn_complete_evt(hdev, skb);
4707 break;
4708
4709 case HCI_EV_AUTH_COMPLETE:
4710 hci_auth_complete_evt(hdev, skb);
4711 break;
4712
4713 case HCI_EV_REMOTE_NAME:
4714 hci_remote_name_evt(hdev, skb);
4715 break;
4716
4717 case HCI_EV_ENCRYPT_CHANGE:
4718 hci_encrypt_change_evt(hdev, skb);
4719 break;
4720
4721 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
4722 hci_change_link_key_complete_evt(hdev, skb);
4723 break;
4724
4725 case HCI_EV_REMOTE_FEATURES:
4726 hci_remote_features_evt(hdev, skb);
4727 break;
4728
4729 case HCI_EV_CMD_COMPLETE:
4730 hci_cmd_complete_evt(hdev, skb);
4731 break;
4732
4733 case HCI_EV_CMD_STATUS:
4734 hci_cmd_status_evt(hdev, skb);
4735 break;
4736
4737 case HCI_EV_ROLE_CHANGE:
4738 hci_role_change_evt(hdev, skb);
4739 break;
4740
4741 case HCI_EV_NUM_COMP_PKTS:
4742 hci_num_comp_pkts_evt(hdev, skb);
4743 break;
4744
4745 case HCI_EV_MODE_CHANGE:
4746 hci_mode_change_evt(hdev, skb);
4747 break;
4748
4749 case HCI_EV_PIN_CODE_REQ:
4750 hci_pin_code_request_evt(hdev, skb);
4751 break;
4752
4753 case HCI_EV_LINK_KEY_REQ:
4754 hci_link_key_request_evt(hdev, skb);
4755 break;
4756
4757 case HCI_EV_LINK_KEY_NOTIFY:
4758 hci_link_key_notify_evt(hdev, skb);
4759 break;
4760
4761 case HCI_EV_CLOCK_OFFSET:
4762 hci_clock_offset_evt(hdev, skb);
4763 break;
4764
4765 case HCI_EV_PKT_TYPE_CHANGE:
4766 hci_pkt_type_change_evt(hdev, skb);
4767 break;
4768
4769 case HCI_EV_PSCAN_REP_MODE:
4770 hci_pscan_rep_mode_evt(hdev, skb);
4771 break;
4772
4773 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
4774 hci_inquiry_result_with_rssi_evt(hdev, skb);
4775 break;
4776
4777 case HCI_EV_REMOTE_EXT_FEATURES:
4778 hci_remote_ext_features_evt(hdev, skb);
4779 break;
4780
4781 case HCI_EV_SYNC_CONN_COMPLETE:
4782 hci_sync_conn_complete_evt(hdev, skb);
4783 break;
4784
4785 case HCI_EV_EXTENDED_INQUIRY_RESULT:
4786 hci_extended_inquiry_result_evt(hdev, skb);
4787 break;
4788
4789 case HCI_EV_KEY_REFRESH_COMPLETE:
4790 hci_key_refresh_complete_evt(hdev, skb);
4791 break;
4792
4793 case HCI_EV_IO_CAPA_REQUEST:
4794 hci_io_capa_request_evt(hdev, skb);
4795 break;
4796
4797 case HCI_EV_IO_CAPA_REPLY:
4798 hci_io_capa_reply_evt(hdev, skb);
4799 break;
4800
4801 case HCI_EV_USER_CONFIRM_REQUEST:
4802 hci_user_confirm_request_evt(hdev, skb);
4803 break;
4804
4805 case HCI_EV_USER_PASSKEY_REQUEST:
4806 hci_user_passkey_request_evt(hdev, skb);
4807 break;
4808
4809 case HCI_EV_USER_PASSKEY_NOTIFY:
4810 hci_user_passkey_notify_evt(hdev, skb);
4811 break;
4812
4813 case HCI_EV_KEYPRESS_NOTIFY:
4814 hci_keypress_notify_evt(hdev, skb);
4815 break;
4816
4817 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4818 hci_simple_pair_complete_evt(hdev, skb);
4819 break;
4820
4821 case HCI_EV_REMOTE_HOST_FEATURES:
4822 hci_remote_host_features_evt(hdev, skb);
4823 break;
4824
4825 case HCI_EV_LE_META:
4826 hci_le_meta_evt(hdev, skb);
4827 break;
4828
4829 case HCI_EV_CHANNEL_SELECTED:
4830 hci_chan_selected_evt(hdev, skb);
4831 break;
4832
4833 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4834 hci_remote_oob_data_request_evt(hdev, skb);
4835 break;
4836
4837 case HCI_EV_PHY_LINK_COMPLETE:
4838 hci_phy_link_complete_evt(hdev, skb);
4839 break;
4840
4841 case HCI_EV_LOGICAL_LINK_COMPLETE:
4842 hci_loglink_complete_evt(hdev, skb);
4843 break;
4844
4845 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
4846 hci_disconn_loglink_complete_evt(hdev, skb);
4847 break;
4848
4849 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
4850 hci_disconn_phylink_complete_evt(hdev, skb);
4851 break;
4852
4853 case HCI_EV_NUM_COMP_BLOCKS:
4854 hci_num_comp_blocks_evt(hdev, skb);
4855 break;
4856
4857 default:
4858 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4859 break;
4860 }
4861
4862 kfree_skb(skb);
4863 hdev->stat.evt_rx++;
4864}
This page took 0.059345 seconds and 5 git commands to generate.