Bluetooth: Remove use_debug_keys debugfs entry
[deliverable/linux.git] / net / bluetooth / hci_event.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <asm/unaligned.h>
28
29#include <net/bluetooth/bluetooth.h>
30#include <net/bluetooth/hci_core.h>
31#include <net/bluetooth/mgmt.h>
32
33#include "a2mp.h"
34#include "amp.h"
35
36/* Handle HCI Event packets */
37
38static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39{
40 __u8 status = *((__u8 *) skb->data);
41
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43
44 if (status)
45 return;
46
47 clear_bit(HCI_INQUIRY, &hdev->flags);
48 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50
51 hci_conn_check_pending(hdev);
52}
53
54static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
55{
56 __u8 status = *((__u8 *) skb->data);
57
58 BT_DBG("%s status 0x%2.2x", hdev->name, status);
59
60 if (status)
61 return;
62
63 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
64}
65
66static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
67{
68 __u8 status = *((__u8 *) skb->data);
69
70 BT_DBG("%s status 0x%2.2x", hdev->name, status);
71
72 if (status)
73 return;
74
75 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
76
77 hci_conn_check_pending(hdev);
78}
79
80static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
81 struct sk_buff *skb)
82{
83 BT_DBG("%s", hdev->name);
84}
85
86static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
87{
88 struct hci_rp_role_discovery *rp = (void *) skb->data;
89 struct hci_conn *conn;
90
91 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
92
93 if (rp->status)
94 return;
95
96 hci_dev_lock(hdev);
97
98 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
99 if (conn) {
100 if (rp->role)
101 conn->link_mode &= ~HCI_LM_MASTER;
102 else
103 conn->link_mode |= HCI_LM_MASTER;
104 }
105
106 hci_dev_unlock(hdev);
107}
108
109static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
110{
111 struct hci_rp_read_link_policy *rp = (void *) skb->data;
112 struct hci_conn *conn;
113
114 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
115
116 if (rp->status)
117 return;
118
119 hci_dev_lock(hdev);
120
121 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
122 if (conn)
123 conn->link_policy = __le16_to_cpu(rp->policy);
124
125 hci_dev_unlock(hdev);
126}
127
128static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
129{
130 struct hci_rp_write_link_policy *rp = (void *) skb->data;
131 struct hci_conn *conn;
132 void *sent;
133
134 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
135
136 if (rp->status)
137 return;
138
139 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
140 if (!sent)
141 return;
142
143 hci_dev_lock(hdev);
144
145 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
146 if (conn)
147 conn->link_policy = get_unaligned_le16(sent + 2);
148
149 hci_dev_unlock(hdev);
150}
151
152static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
153 struct sk_buff *skb)
154{
155 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
156
157 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
158
159 if (rp->status)
160 return;
161
162 hdev->link_policy = __le16_to_cpu(rp->policy);
163}
164
165static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
166 struct sk_buff *skb)
167{
168 __u8 status = *((__u8 *) skb->data);
169 void *sent;
170
171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
172
173 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
174 if (!sent)
175 return;
176
177 if (!status)
178 hdev->link_policy = get_unaligned_le16(sent);
179}
180
181static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
182{
183 __u8 status = *((__u8 *) skb->data);
184
185 BT_DBG("%s status 0x%2.2x", hdev->name, status);
186
187 clear_bit(HCI_RESET, &hdev->flags);
188
189 /* Reset all non-persistent flags */
190 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
191
192 hdev->discovery.state = DISCOVERY_STOPPED;
193 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
194 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
195
196 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
197 hdev->adv_data_len = 0;
198
199 memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data));
200 hdev->scan_rsp_data_len = 0;
201
202 hdev->ssp_debug_mode = 0;
203}
204
205static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206{
207 __u8 status = *((__u8 *) skb->data);
208 void *sent;
209
210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
211
212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (!sent)
214 return;
215
216 hci_dev_lock(hdev);
217
218 if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 mgmt_set_local_name_complete(hdev, sent, status);
220 else if (!status)
221 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222
223 hci_dev_unlock(hdev);
224}
225
226static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
227{
228 struct hci_rp_read_local_name *rp = (void *) skb->data;
229
230 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
231
232 if (rp->status)
233 return;
234
235 if (test_bit(HCI_SETUP, &hdev->dev_flags))
236 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
237}
238
239static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
240{
241 __u8 status = *((__u8 *) skb->data);
242 void *sent;
243
244 BT_DBG("%s status 0x%2.2x", hdev->name, status);
245
246 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
247 if (!sent)
248 return;
249
250 if (!status) {
251 __u8 param = *((__u8 *) sent);
252
253 if (param == AUTH_ENABLED)
254 set_bit(HCI_AUTH, &hdev->flags);
255 else
256 clear_bit(HCI_AUTH, &hdev->flags);
257 }
258
259 if (test_bit(HCI_MGMT, &hdev->dev_flags))
260 mgmt_auth_enable_complete(hdev, status);
261}
262
263static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264{
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
267
268 BT_DBG("%s status 0x%2.2x", hdev->name, status);
269
270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 if (!sent)
272 return;
273
274 if (!status) {
275 __u8 param = *((__u8 *) sent);
276
277 if (param)
278 set_bit(HCI_ENCRYPT, &hdev->flags);
279 else
280 clear_bit(HCI_ENCRYPT, &hdev->flags);
281 }
282}
283
284static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
285{
286 __u8 param, status = *((__u8 *) skb->data);
287 int old_pscan, old_iscan;
288 void *sent;
289
290 BT_DBG("%s status 0x%2.2x", hdev->name, status);
291
292 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
293 if (!sent)
294 return;
295
296 param = *((__u8 *) sent);
297
298 hci_dev_lock(hdev);
299
300 if (status) {
301 mgmt_write_scan_failed(hdev, param, status);
302 hdev->discov_timeout = 0;
303 goto done;
304 }
305
306 /* We need to ensure that we set this back on if someone changed
307 * the scan mode through a raw HCI socket.
308 */
309 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
310
311 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
312 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
313
314 if (param & SCAN_INQUIRY) {
315 set_bit(HCI_ISCAN, &hdev->flags);
316 if (!old_iscan)
317 mgmt_discoverable(hdev, 1);
318 } else if (old_iscan)
319 mgmt_discoverable(hdev, 0);
320
321 if (param & SCAN_PAGE) {
322 set_bit(HCI_PSCAN, &hdev->flags);
323 if (!old_pscan)
324 mgmt_connectable(hdev, 1);
325 } else if (old_pscan)
326 mgmt_connectable(hdev, 0);
327
328done:
329 hci_dev_unlock(hdev);
330}
331
332static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
333{
334 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
335
336 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
337
338 if (rp->status)
339 return;
340
341 memcpy(hdev->dev_class, rp->dev_class, 3);
342
343 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
344 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
345}
346
347static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
348{
349 __u8 status = *((__u8 *) skb->data);
350 void *sent;
351
352 BT_DBG("%s status 0x%2.2x", hdev->name, status);
353
354 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
355 if (!sent)
356 return;
357
358 hci_dev_lock(hdev);
359
360 if (status == 0)
361 memcpy(hdev->dev_class, sent, 3);
362
363 if (test_bit(HCI_MGMT, &hdev->dev_flags))
364 mgmt_set_class_of_dev_complete(hdev, sent, status);
365
366 hci_dev_unlock(hdev);
367}
368
369static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
370{
371 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
372 __u16 setting;
373
374 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
375
376 if (rp->status)
377 return;
378
379 setting = __le16_to_cpu(rp->voice_setting);
380
381 if (hdev->voice_setting == setting)
382 return;
383
384 hdev->voice_setting = setting;
385
386 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
387
388 if (hdev->notify)
389 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
390}
391
392static void hci_cc_write_voice_setting(struct hci_dev *hdev,
393 struct sk_buff *skb)
394{
395 __u8 status = *((__u8 *) skb->data);
396 __u16 setting;
397 void *sent;
398
399 BT_DBG("%s status 0x%2.2x", hdev->name, status);
400
401 if (status)
402 return;
403
404 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
405 if (!sent)
406 return;
407
408 setting = get_unaligned_le16(sent);
409
410 if (hdev->voice_setting == setting)
411 return;
412
413 hdev->voice_setting = setting;
414
415 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
416
417 if (hdev->notify)
418 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
419}
420
421static void hci_cc_read_num_supported_iac(struct hci_dev *hdev,
422 struct sk_buff *skb)
423{
424 struct hci_rp_read_num_supported_iac *rp = (void *) skb->data;
425
426 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
427
428 if (rp->status)
429 return;
430
431 hdev->num_iac = rp->num_iac;
432
433 BT_DBG("%s num iac %d", hdev->name, hdev->num_iac);
434}
435
436static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
437{
438 __u8 status = *((__u8 *) skb->data);
439 struct hci_cp_write_ssp_mode *sent;
440
441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
442
443 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
444 if (!sent)
445 return;
446
447 if (!status) {
448 if (sent->mode)
449 hdev->features[1][0] |= LMP_HOST_SSP;
450 else
451 hdev->features[1][0] &= ~LMP_HOST_SSP;
452 }
453
454 if (test_bit(HCI_MGMT, &hdev->dev_flags))
455 mgmt_ssp_enable_complete(hdev, sent->mode, status);
456 else if (!status) {
457 if (sent->mode)
458 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
459 else
460 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
461 }
462}
463
464static void hci_cc_write_sc_support(struct hci_dev *hdev, struct sk_buff *skb)
465{
466 u8 status = *((u8 *) skb->data);
467 struct hci_cp_write_sc_support *sent;
468
469 BT_DBG("%s status 0x%2.2x", hdev->name, status);
470
471 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT);
472 if (!sent)
473 return;
474
475 if (!status) {
476 if (sent->support)
477 hdev->features[1][0] |= LMP_HOST_SC;
478 else
479 hdev->features[1][0] &= ~LMP_HOST_SC;
480 }
481
482 if (test_bit(HCI_MGMT, &hdev->dev_flags))
483 mgmt_sc_enable_complete(hdev, sent->support, status);
484 else if (!status) {
485 if (sent->support)
486 set_bit(HCI_SC_ENABLED, &hdev->dev_flags);
487 else
488 clear_bit(HCI_SC_ENABLED, &hdev->dev_flags);
489 }
490}
491
492static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
493{
494 struct hci_rp_read_local_version *rp = (void *) skb->data;
495
496 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
497
498 if (rp->status)
499 return;
500
501 if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
502 hdev->hci_ver = rp->hci_ver;
503 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
504 hdev->lmp_ver = rp->lmp_ver;
505 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
506 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
507 }
508}
509
510static void hci_cc_read_local_commands(struct hci_dev *hdev,
511 struct sk_buff *skb)
512{
513 struct hci_rp_read_local_commands *rp = (void *) skb->data;
514
515 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
516
517 if (rp->status)
518 return;
519
520 if (test_bit(HCI_SETUP, &hdev->dev_flags))
521 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
522}
523
524static void hci_cc_read_local_features(struct hci_dev *hdev,
525 struct sk_buff *skb)
526{
527 struct hci_rp_read_local_features *rp = (void *) skb->data;
528
529 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
530
531 if (rp->status)
532 return;
533
534 memcpy(hdev->features, rp->features, 8);
535
536 /* Adjust default settings according to features
537 * supported by device. */
538
539 if (hdev->features[0][0] & LMP_3SLOT)
540 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
541
542 if (hdev->features[0][0] & LMP_5SLOT)
543 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
544
545 if (hdev->features[0][1] & LMP_HV2) {
546 hdev->pkt_type |= (HCI_HV2);
547 hdev->esco_type |= (ESCO_HV2);
548 }
549
550 if (hdev->features[0][1] & LMP_HV3) {
551 hdev->pkt_type |= (HCI_HV3);
552 hdev->esco_type |= (ESCO_HV3);
553 }
554
555 if (lmp_esco_capable(hdev))
556 hdev->esco_type |= (ESCO_EV3);
557
558 if (hdev->features[0][4] & LMP_EV4)
559 hdev->esco_type |= (ESCO_EV4);
560
561 if (hdev->features[0][4] & LMP_EV5)
562 hdev->esco_type |= (ESCO_EV5);
563
564 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
565 hdev->esco_type |= (ESCO_2EV3);
566
567 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
568 hdev->esco_type |= (ESCO_3EV3);
569
570 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
571 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
572}
573
574static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
575 struct sk_buff *skb)
576{
577 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
578
579 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
580
581 if (rp->status)
582 return;
583
584 if (hdev->max_page < rp->max_page)
585 hdev->max_page = rp->max_page;
586
587 if (rp->page < HCI_MAX_PAGES)
588 memcpy(hdev->features[rp->page], rp->features, 8);
589}
590
591static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
592 struct sk_buff *skb)
593{
594 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
595
596 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
597
598 if (!rp->status)
599 hdev->flow_ctl_mode = rp->mode;
600}
601
602static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
603{
604 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
605
606 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
607
608 if (rp->status)
609 return;
610
611 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
612 hdev->sco_mtu = rp->sco_mtu;
613 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
614 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
615
616 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
617 hdev->sco_mtu = 64;
618 hdev->sco_pkts = 8;
619 }
620
621 hdev->acl_cnt = hdev->acl_pkts;
622 hdev->sco_cnt = hdev->sco_pkts;
623
624 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
625 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
626}
627
628static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
629{
630 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
631
632 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
633
634 if (!rp->status)
635 bacpy(&hdev->bdaddr, &rp->bdaddr);
636}
637
638static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
639 struct sk_buff *skb)
640{
641 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
642
643 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
644
645 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
646 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
647 hdev->page_scan_window = __le16_to_cpu(rp->window);
648 }
649}
650
651static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
652 struct sk_buff *skb)
653{
654 u8 status = *((u8 *) skb->data);
655 struct hci_cp_write_page_scan_activity *sent;
656
657 BT_DBG("%s status 0x%2.2x", hdev->name, status);
658
659 if (status)
660 return;
661
662 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
663 if (!sent)
664 return;
665
666 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
667 hdev->page_scan_window = __le16_to_cpu(sent->window);
668}
669
670static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
671 struct sk_buff *skb)
672{
673 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
674
675 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
676
677 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
678 hdev->page_scan_type = rp->type;
679}
680
681static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
682 struct sk_buff *skb)
683{
684 u8 status = *((u8 *) skb->data);
685 u8 *type;
686
687 BT_DBG("%s status 0x%2.2x", hdev->name, status);
688
689 if (status)
690 return;
691
692 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
693 if (type)
694 hdev->page_scan_type = *type;
695}
696
697static void hci_cc_read_data_block_size(struct hci_dev *hdev,
698 struct sk_buff *skb)
699{
700 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
701
702 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
703
704 if (rp->status)
705 return;
706
707 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
708 hdev->block_len = __le16_to_cpu(rp->block_len);
709 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
710
711 hdev->block_cnt = hdev->num_blocks;
712
713 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
714 hdev->block_cnt, hdev->block_len);
715}
716
717static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
718 struct sk_buff *skb)
719{
720 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
721
722 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
723
724 if (rp->status)
725 goto a2mp_rsp;
726
727 hdev->amp_status = rp->amp_status;
728 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
729 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
730 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
731 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
732 hdev->amp_type = rp->amp_type;
733 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
734 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
735 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
736 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
737
738a2mp_rsp:
739 a2mp_send_getinfo_rsp(hdev);
740}
741
742static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
743 struct sk_buff *skb)
744{
745 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
746 struct amp_assoc *assoc = &hdev->loc_assoc;
747 size_t rem_len, frag_len;
748
749 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
750
751 if (rp->status)
752 goto a2mp_rsp;
753
754 frag_len = skb->len - sizeof(*rp);
755 rem_len = __le16_to_cpu(rp->rem_len);
756
757 if (rem_len > frag_len) {
758 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
759
760 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
761 assoc->offset += frag_len;
762
763 /* Read other fragments */
764 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
765
766 return;
767 }
768
769 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
770 assoc->len = assoc->offset + rem_len;
771 assoc->offset = 0;
772
773a2mp_rsp:
774 /* Send A2MP Rsp when all fragments are received */
775 a2mp_send_getampassoc_rsp(hdev, rp->status);
776 a2mp_send_create_phy_link_req(hdev, rp->status);
777}
778
779static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
780 struct sk_buff *skb)
781{
782 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
783
784 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
785
786 if (!rp->status)
787 hdev->inq_tx_power = rp->tx_power;
788}
789
790static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
791{
792 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
793 struct hci_cp_pin_code_reply *cp;
794 struct hci_conn *conn;
795
796 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
797
798 hci_dev_lock(hdev);
799
800 if (test_bit(HCI_MGMT, &hdev->dev_flags))
801 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
802
803 if (rp->status)
804 goto unlock;
805
806 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
807 if (!cp)
808 goto unlock;
809
810 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
811 if (conn)
812 conn->pin_length = cp->pin_len;
813
814unlock:
815 hci_dev_unlock(hdev);
816}
817
818static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
819{
820 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 hci_dev_lock(hdev);
825
826 if (test_bit(HCI_MGMT, &hdev->dev_flags))
827 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
828 rp->status);
829
830 hci_dev_unlock(hdev);
831}
832
833static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
834 struct sk_buff *skb)
835{
836 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
837
838 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
839
840 if (rp->status)
841 return;
842
843 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
844 hdev->le_pkts = rp->le_max_pkt;
845
846 hdev->le_cnt = hdev->le_pkts;
847
848 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
849}
850
851static void hci_cc_le_read_local_features(struct hci_dev *hdev,
852 struct sk_buff *skb)
853{
854 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
855
856 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
857
858 if (!rp->status)
859 memcpy(hdev->le_features, rp->features, 8);
860}
861
862static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
863 struct sk_buff *skb)
864{
865 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
866
867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868
869 if (!rp->status)
870 hdev->adv_tx_power = rp->tx_power;
871}
872
873static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
874{
875 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
876
877 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
878
879 hci_dev_lock(hdev);
880
881 if (test_bit(HCI_MGMT, &hdev->dev_flags))
882 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
883 rp->status);
884
885 hci_dev_unlock(hdev);
886}
887
888static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
889 struct sk_buff *skb)
890{
891 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
892
893 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
894
895 hci_dev_lock(hdev);
896
897 if (test_bit(HCI_MGMT, &hdev->dev_flags))
898 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
899 ACL_LINK, 0, rp->status);
900
901 hci_dev_unlock(hdev);
902}
903
904static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
905{
906 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
907
908 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
909
910 hci_dev_lock(hdev);
911
912 if (test_bit(HCI_MGMT, &hdev->dev_flags))
913 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
914 0, rp->status);
915
916 hci_dev_unlock(hdev);
917}
918
919static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
920 struct sk_buff *skb)
921{
922 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
923
924 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
925
926 hci_dev_lock(hdev);
927
928 if (test_bit(HCI_MGMT, &hdev->dev_flags))
929 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
930 ACL_LINK, 0, rp->status);
931
932 hci_dev_unlock(hdev);
933}
934
935static void hci_cc_read_local_oob_data(struct hci_dev *hdev,
936 struct sk_buff *skb)
937{
938 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
939
940 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
941
942 hci_dev_lock(hdev);
943 mgmt_read_local_oob_data_complete(hdev, rp->hash, rp->randomizer,
944 NULL, NULL, rp->status);
945 hci_dev_unlock(hdev);
946}
947
948static void hci_cc_read_local_oob_ext_data(struct hci_dev *hdev,
949 struct sk_buff *skb)
950{
951 struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data;
952
953 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
954
955 hci_dev_lock(hdev);
956 mgmt_read_local_oob_data_complete(hdev, rp->hash192, rp->randomizer192,
957 rp->hash256, rp->randomizer256,
958 rp->status);
959 hci_dev_unlock(hdev);
960}
961
962static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
963{
964 __u8 *sent, status = *((__u8 *) skb->data);
965
966 BT_DBG("%s status 0x%2.2x", hdev->name, status);
967
968 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
969 if (!sent)
970 return;
971
972 hci_dev_lock(hdev);
973
974 if (!status) {
975 if (*sent)
976 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
977 else
978 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
979 }
980
981 hci_dev_unlock(hdev);
982}
983
984static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
985 struct sk_buff *skb)
986{
987 struct hci_cp_le_set_scan_enable *cp;
988 __u8 status = *((__u8 *) skb->data);
989
990 BT_DBG("%s status 0x%2.2x", hdev->name, status);
991
992 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
993 if (!cp)
994 return;
995
996 if (status)
997 return;
998
999 switch (cp->enable) {
1000 case LE_SCAN_ENABLE:
1001 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1002 break;
1003
1004 case LE_SCAN_DISABLE:
1005 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1006 break;
1007
1008 default:
1009 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1010 break;
1011 }
1012}
1013
1014static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
1015 struct sk_buff *skb)
1016{
1017 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
1018
1019 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
1020
1021 if (!rp->status)
1022 hdev->le_white_list_size = rp->size;
1023}
1024
1025static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
1026 struct sk_buff *skb)
1027{
1028 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
1029
1030 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1031
1032 if (!rp->status)
1033 memcpy(hdev->le_states, rp->le_states, 8);
1034}
1035
1036static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1037 struct sk_buff *skb)
1038{
1039 struct hci_cp_write_le_host_supported *sent;
1040 __u8 status = *((__u8 *) skb->data);
1041
1042 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1043
1044 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1045 if (!sent)
1046 return;
1047
1048 if (!status) {
1049 if (sent->le) {
1050 hdev->features[1][0] |= LMP_HOST_LE;
1051 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1052 } else {
1053 hdev->features[1][0] &= ~LMP_HOST_LE;
1054 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1055 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1056 }
1057
1058 if (sent->simul)
1059 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1060 else
1061 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1062 }
1063}
1064
1065static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1066 struct sk_buff *skb)
1067{
1068 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1069
1070 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1071 hdev->name, rp->status, rp->phy_handle);
1072
1073 if (rp->status)
1074 return;
1075
1076 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1077}
1078
1079static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1080{
1081 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1082
1083 if (status) {
1084 hci_conn_check_pending(hdev);
1085 return;
1086 }
1087
1088 set_bit(HCI_INQUIRY, &hdev->flags);
1089}
1090
1091static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1092{
1093 struct hci_cp_create_conn *cp;
1094 struct hci_conn *conn;
1095
1096 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1097
1098 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1099 if (!cp)
1100 return;
1101
1102 hci_dev_lock(hdev);
1103
1104 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1105
1106 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1107
1108 if (status) {
1109 if (conn && conn->state == BT_CONNECT) {
1110 if (status != 0x0c || conn->attempt > 2) {
1111 conn->state = BT_CLOSED;
1112 hci_proto_connect_cfm(conn, status);
1113 hci_conn_del(conn);
1114 } else
1115 conn->state = BT_CONNECT2;
1116 }
1117 } else {
1118 if (!conn) {
1119 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1120 if (conn) {
1121 conn->out = true;
1122 conn->link_mode |= HCI_LM_MASTER;
1123 } else
1124 BT_ERR("No memory for new connection");
1125 }
1126 }
1127
1128 hci_dev_unlock(hdev);
1129}
1130
1131static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1132{
1133 struct hci_cp_add_sco *cp;
1134 struct hci_conn *acl, *sco;
1135 __u16 handle;
1136
1137 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1138
1139 if (!status)
1140 return;
1141
1142 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1143 if (!cp)
1144 return;
1145
1146 handle = __le16_to_cpu(cp->handle);
1147
1148 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1149
1150 hci_dev_lock(hdev);
1151
1152 acl = hci_conn_hash_lookup_handle(hdev, handle);
1153 if (acl) {
1154 sco = acl->link;
1155 if (sco) {
1156 sco->state = BT_CLOSED;
1157
1158 hci_proto_connect_cfm(sco, status);
1159 hci_conn_del(sco);
1160 }
1161 }
1162
1163 hci_dev_unlock(hdev);
1164}
1165
1166static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1167{
1168 struct hci_cp_auth_requested *cp;
1169 struct hci_conn *conn;
1170
1171 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1172
1173 if (!status)
1174 return;
1175
1176 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1177 if (!cp)
1178 return;
1179
1180 hci_dev_lock(hdev);
1181
1182 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1183 if (conn) {
1184 if (conn->state == BT_CONFIG) {
1185 hci_proto_connect_cfm(conn, status);
1186 hci_conn_drop(conn);
1187 }
1188 }
1189
1190 hci_dev_unlock(hdev);
1191}
1192
1193static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1194{
1195 struct hci_cp_set_conn_encrypt *cp;
1196 struct hci_conn *conn;
1197
1198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1199
1200 if (!status)
1201 return;
1202
1203 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1204 if (!cp)
1205 return;
1206
1207 hci_dev_lock(hdev);
1208
1209 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1210 if (conn) {
1211 if (conn->state == BT_CONFIG) {
1212 hci_proto_connect_cfm(conn, status);
1213 hci_conn_drop(conn);
1214 }
1215 }
1216
1217 hci_dev_unlock(hdev);
1218}
1219
1220static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1221 struct hci_conn *conn)
1222{
1223 if (conn->state != BT_CONFIG || !conn->out)
1224 return 0;
1225
1226 if (conn->pending_sec_level == BT_SECURITY_SDP)
1227 return 0;
1228
1229 /* Only request authentication for SSP connections or non-SSP
1230 * devices with sec_level MEDIUM or HIGH or if MITM protection
1231 * is requested.
1232 */
1233 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1234 conn->pending_sec_level != BT_SECURITY_HIGH &&
1235 conn->pending_sec_level != BT_SECURITY_MEDIUM)
1236 return 0;
1237
1238 return 1;
1239}
1240
1241static int hci_resolve_name(struct hci_dev *hdev,
1242 struct inquiry_entry *e)
1243{
1244 struct hci_cp_remote_name_req cp;
1245
1246 memset(&cp, 0, sizeof(cp));
1247
1248 bacpy(&cp.bdaddr, &e->data.bdaddr);
1249 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1250 cp.pscan_mode = e->data.pscan_mode;
1251 cp.clock_offset = e->data.clock_offset;
1252
1253 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1254}
1255
1256static bool hci_resolve_next_name(struct hci_dev *hdev)
1257{
1258 struct discovery_state *discov = &hdev->discovery;
1259 struct inquiry_entry *e;
1260
1261 if (list_empty(&discov->resolve))
1262 return false;
1263
1264 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1265 if (!e)
1266 return false;
1267
1268 if (hci_resolve_name(hdev, e) == 0) {
1269 e->name_state = NAME_PENDING;
1270 return true;
1271 }
1272
1273 return false;
1274}
1275
1276static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1277 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1278{
1279 struct discovery_state *discov = &hdev->discovery;
1280 struct inquiry_entry *e;
1281
1282 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1283 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1284 name_len, conn->dev_class);
1285
1286 if (discov->state == DISCOVERY_STOPPED)
1287 return;
1288
1289 if (discov->state == DISCOVERY_STOPPING)
1290 goto discov_complete;
1291
1292 if (discov->state != DISCOVERY_RESOLVING)
1293 return;
1294
1295 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1296 /* If the device was not found in a list of found devices names of which
1297 * are pending. there is no need to continue resolving a next name as it
1298 * will be done upon receiving another Remote Name Request Complete
1299 * Event */
1300 if (!e)
1301 return;
1302
1303 list_del(&e->list);
1304 if (name) {
1305 e->name_state = NAME_KNOWN;
1306 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1307 e->data.rssi, name, name_len);
1308 } else {
1309 e->name_state = NAME_NOT_KNOWN;
1310 }
1311
1312 if (hci_resolve_next_name(hdev))
1313 return;
1314
1315discov_complete:
1316 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1317}
1318
1319static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1320{
1321 struct hci_cp_remote_name_req *cp;
1322 struct hci_conn *conn;
1323
1324 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1325
1326 /* If successful wait for the name req complete event before
1327 * checking for the need to do authentication */
1328 if (!status)
1329 return;
1330
1331 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1332 if (!cp)
1333 return;
1334
1335 hci_dev_lock(hdev);
1336
1337 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1338
1339 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1340 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1341
1342 if (!conn)
1343 goto unlock;
1344
1345 if (!hci_outgoing_auth_needed(hdev, conn))
1346 goto unlock;
1347
1348 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1349 struct hci_cp_auth_requested auth_cp;
1350
1351 auth_cp.handle = __cpu_to_le16(conn->handle);
1352 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1353 sizeof(auth_cp), &auth_cp);
1354 }
1355
1356unlock:
1357 hci_dev_unlock(hdev);
1358}
1359
1360static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1361{
1362 struct hci_cp_read_remote_features *cp;
1363 struct hci_conn *conn;
1364
1365 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1366
1367 if (!status)
1368 return;
1369
1370 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1371 if (!cp)
1372 return;
1373
1374 hci_dev_lock(hdev);
1375
1376 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1377 if (conn) {
1378 if (conn->state == BT_CONFIG) {
1379 hci_proto_connect_cfm(conn, status);
1380 hci_conn_drop(conn);
1381 }
1382 }
1383
1384 hci_dev_unlock(hdev);
1385}
1386
1387static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1388{
1389 struct hci_cp_read_remote_ext_features *cp;
1390 struct hci_conn *conn;
1391
1392 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1393
1394 if (!status)
1395 return;
1396
1397 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1398 if (!cp)
1399 return;
1400
1401 hci_dev_lock(hdev);
1402
1403 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1404 if (conn) {
1405 if (conn->state == BT_CONFIG) {
1406 hci_proto_connect_cfm(conn, status);
1407 hci_conn_drop(conn);
1408 }
1409 }
1410
1411 hci_dev_unlock(hdev);
1412}
1413
1414static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1415{
1416 struct hci_cp_setup_sync_conn *cp;
1417 struct hci_conn *acl, *sco;
1418 __u16 handle;
1419
1420 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1421
1422 if (!status)
1423 return;
1424
1425 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1426 if (!cp)
1427 return;
1428
1429 handle = __le16_to_cpu(cp->handle);
1430
1431 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1432
1433 hci_dev_lock(hdev);
1434
1435 acl = hci_conn_hash_lookup_handle(hdev, handle);
1436 if (acl) {
1437 sco = acl->link;
1438 if (sco) {
1439 sco->state = BT_CLOSED;
1440
1441 hci_proto_connect_cfm(sco, status);
1442 hci_conn_del(sco);
1443 }
1444 }
1445
1446 hci_dev_unlock(hdev);
1447}
1448
1449static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1450{
1451 struct hci_cp_sniff_mode *cp;
1452 struct hci_conn *conn;
1453
1454 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1455
1456 if (!status)
1457 return;
1458
1459 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1460 if (!cp)
1461 return;
1462
1463 hci_dev_lock(hdev);
1464
1465 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1466 if (conn) {
1467 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1468
1469 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1470 hci_sco_setup(conn, status);
1471 }
1472
1473 hci_dev_unlock(hdev);
1474}
1475
1476static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1477{
1478 struct hci_cp_exit_sniff_mode *cp;
1479 struct hci_conn *conn;
1480
1481 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1482
1483 if (!status)
1484 return;
1485
1486 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1487 if (!cp)
1488 return;
1489
1490 hci_dev_lock(hdev);
1491
1492 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1493 if (conn) {
1494 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1495
1496 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1497 hci_sco_setup(conn, status);
1498 }
1499
1500 hci_dev_unlock(hdev);
1501}
1502
1503static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1504{
1505 struct hci_cp_disconnect *cp;
1506 struct hci_conn *conn;
1507
1508 if (!status)
1509 return;
1510
1511 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1512 if (!cp)
1513 return;
1514
1515 hci_dev_lock(hdev);
1516
1517 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1518 if (conn)
1519 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1520 conn->dst_type, status);
1521
1522 hci_dev_unlock(hdev);
1523}
1524
1525static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1526{
1527 struct hci_cp_create_phy_link *cp;
1528
1529 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1530
1531 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1532 if (!cp)
1533 return;
1534
1535 hci_dev_lock(hdev);
1536
1537 if (status) {
1538 struct hci_conn *hcon;
1539
1540 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1541 if (hcon)
1542 hci_conn_del(hcon);
1543 } else {
1544 amp_write_remote_assoc(hdev, cp->phy_handle);
1545 }
1546
1547 hci_dev_unlock(hdev);
1548}
1549
1550static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1551{
1552 struct hci_cp_accept_phy_link *cp;
1553
1554 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1555
1556 if (status)
1557 return;
1558
1559 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1560 if (!cp)
1561 return;
1562
1563 amp_write_remote_assoc(hdev, cp->phy_handle);
1564}
1565
1566static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1567{
1568 __u8 status = *((__u8 *) skb->data);
1569 struct discovery_state *discov = &hdev->discovery;
1570 struct inquiry_entry *e;
1571
1572 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1573
1574 hci_conn_check_pending(hdev);
1575
1576 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1577 return;
1578
1579 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1580 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1581
1582 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1583 return;
1584
1585 hci_dev_lock(hdev);
1586
1587 if (discov->state != DISCOVERY_FINDING)
1588 goto unlock;
1589
1590 if (list_empty(&discov->resolve)) {
1591 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1592 goto unlock;
1593 }
1594
1595 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1596 if (e && hci_resolve_name(hdev, e) == 0) {
1597 e->name_state = NAME_PENDING;
1598 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1599 } else {
1600 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1601 }
1602
1603unlock:
1604 hci_dev_unlock(hdev);
1605}
1606
1607static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1608{
1609 struct inquiry_data data;
1610 struct inquiry_info *info = (void *) (skb->data + 1);
1611 int num_rsp = *((__u8 *) skb->data);
1612
1613 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1614
1615 if (!num_rsp)
1616 return;
1617
1618 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1619 return;
1620
1621 hci_dev_lock(hdev);
1622
1623 for (; num_rsp; num_rsp--, info++) {
1624 bool name_known, ssp;
1625
1626 bacpy(&data.bdaddr, &info->bdaddr);
1627 data.pscan_rep_mode = info->pscan_rep_mode;
1628 data.pscan_period_mode = info->pscan_period_mode;
1629 data.pscan_mode = info->pscan_mode;
1630 memcpy(data.dev_class, info->dev_class, 3);
1631 data.clock_offset = info->clock_offset;
1632 data.rssi = 0x00;
1633 data.ssp_mode = 0x00;
1634
1635 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1636 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1637 info->dev_class, 0, !name_known, ssp, NULL,
1638 0);
1639 }
1640
1641 hci_dev_unlock(hdev);
1642}
1643
1644static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1645{
1646 struct hci_ev_conn_complete *ev = (void *) skb->data;
1647 struct hci_conn *conn;
1648
1649 BT_DBG("%s", hdev->name);
1650
1651 hci_dev_lock(hdev);
1652
1653 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1654 if (!conn) {
1655 if (ev->link_type != SCO_LINK)
1656 goto unlock;
1657
1658 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1659 if (!conn)
1660 goto unlock;
1661
1662 conn->type = SCO_LINK;
1663 }
1664
1665 if (!ev->status) {
1666 conn->handle = __le16_to_cpu(ev->handle);
1667
1668 if (conn->type == ACL_LINK) {
1669 conn->state = BT_CONFIG;
1670 hci_conn_hold(conn);
1671
1672 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1673 !hci_find_link_key(hdev, &ev->bdaddr))
1674 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1675 else
1676 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1677 } else
1678 conn->state = BT_CONNECTED;
1679
1680 hci_conn_add_sysfs(conn);
1681
1682 if (test_bit(HCI_AUTH, &hdev->flags))
1683 conn->link_mode |= HCI_LM_AUTH;
1684
1685 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1686 conn->link_mode |= HCI_LM_ENCRYPT;
1687
1688 /* Get remote features */
1689 if (conn->type == ACL_LINK) {
1690 struct hci_cp_read_remote_features cp;
1691 cp.handle = ev->handle;
1692 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1693 sizeof(cp), &cp);
1694 }
1695
1696 /* Set packet type for incoming connection */
1697 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1698 struct hci_cp_change_conn_ptype cp;
1699 cp.handle = ev->handle;
1700 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1701 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1702 &cp);
1703 }
1704 } else {
1705 conn->state = BT_CLOSED;
1706 if (conn->type == ACL_LINK)
1707 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1708 conn->dst_type, ev->status);
1709 }
1710
1711 if (conn->type == ACL_LINK)
1712 hci_sco_setup(conn, ev->status);
1713
1714 if (ev->status) {
1715 hci_proto_connect_cfm(conn, ev->status);
1716 hci_conn_del(conn);
1717 } else if (ev->link_type != ACL_LINK)
1718 hci_proto_connect_cfm(conn, ev->status);
1719
1720unlock:
1721 hci_dev_unlock(hdev);
1722
1723 hci_conn_check_pending(hdev);
1724}
1725
1726static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1727{
1728 struct hci_ev_conn_request *ev = (void *) skb->data;
1729 int mask = hdev->link_mode;
1730 __u8 flags = 0;
1731
1732 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1733 ev->link_type);
1734
1735 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1736 &flags);
1737
1738 if ((mask & HCI_LM_ACCEPT) &&
1739 !hci_blacklist_lookup(hdev, &ev->bdaddr, BDADDR_BREDR)) {
1740 /* Connection accepted */
1741 struct inquiry_entry *ie;
1742 struct hci_conn *conn;
1743
1744 hci_dev_lock(hdev);
1745
1746 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1747 if (ie)
1748 memcpy(ie->data.dev_class, ev->dev_class, 3);
1749
1750 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1751 &ev->bdaddr);
1752 if (!conn) {
1753 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1754 if (!conn) {
1755 BT_ERR("No memory for new connection");
1756 hci_dev_unlock(hdev);
1757 return;
1758 }
1759 }
1760
1761 memcpy(conn->dev_class, ev->dev_class, 3);
1762
1763 hci_dev_unlock(hdev);
1764
1765 if (ev->link_type == ACL_LINK ||
1766 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1767 struct hci_cp_accept_conn_req cp;
1768 conn->state = BT_CONNECT;
1769
1770 bacpy(&cp.bdaddr, &ev->bdaddr);
1771
1772 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1773 cp.role = 0x00; /* Become master */
1774 else
1775 cp.role = 0x01; /* Remain slave */
1776
1777 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1778 &cp);
1779 } else if (!(flags & HCI_PROTO_DEFER)) {
1780 struct hci_cp_accept_sync_conn_req cp;
1781 conn->state = BT_CONNECT;
1782
1783 bacpy(&cp.bdaddr, &ev->bdaddr);
1784 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1785
1786 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1787 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1788 cp.max_latency = __constant_cpu_to_le16(0xffff);
1789 cp.content_format = cpu_to_le16(hdev->voice_setting);
1790 cp.retrans_effort = 0xff;
1791
1792 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1793 sizeof(cp), &cp);
1794 } else {
1795 conn->state = BT_CONNECT2;
1796 hci_proto_connect_cfm(conn, 0);
1797 }
1798 } else {
1799 /* Connection rejected */
1800 struct hci_cp_reject_conn_req cp;
1801
1802 bacpy(&cp.bdaddr, &ev->bdaddr);
1803 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1804 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1805 }
1806}
1807
1808static u8 hci_to_mgmt_reason(u8 err)
1809{
1810 switch (err) {
1811 case HCI_ERROR_CONNECTION_TIMEOUT:
1812 return MGMT_DEV_DISCONN_TIMEOUT;
1813 case HCI_ERROR_REMOTE_USER_TERM:
1814 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1815 case HCI_ERROR_REMOTE_POWER_OFF:
1816 return MGMT_DEV_DISCONN_REMOTE;
1817 case HCI_ERROR_LOCAL_HOST_TERM:
1818 return MGMT_DEV_DISCONN_LOCAL_HOST;
1819 default:
1820 return MGMT_DEV_DISCONN_UNKNOWN;
1821 }
1822}
1823
1824static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1825{
1826 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1827 u8 reason = hci_to_mgmt_reason(ev->reason);
1828 struct hci_conn *conn;
1829 u8 type;
1830
1831 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1832
1833 hci_dev_lock(hdev);
1834
1835 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1836 if (!conn)
1837 goto unlock;
1838
1839 if (ev->status) {
1840 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1841 conn->dst_type, ev->status);
1842 goto unlock;
1843 }
1844
1845 conn->state = BT_CLOSED;
1846
1847 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1848 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1849 conn->dst_type, reason);
1850
1851 if (conn->type == ACL_LINK && conn->flush_key)
1852 hci_remove_link_key(hdev, &conn->dst);
1853
1854 type = conn->type;
1855
1856 hci_proto_disconn_cfm(conn, ev->reason);
1857 hci_conn_del(conn);
1858
1859 /* Re-enable advertising if necessary, since it might
1860 * have been disabled by the connection. From the
1861 * HCI_LE_Set_Advertise_Enable command description in
1862 * the core specification (v4.0):
1863 * "The Controller shall continue advertising until the Host
1864 * issues an LE_Set_Advertise_Enable command with
1865 * Advertising_Enable set to 0x00 (Advertising is disabled)
1866 * or until a connection is created or until the Advertising
1867 * is timed out due to Directed Advertising."
1868 */
1869 if (type == LE_LINK)
1870 mgmt_reenable_advertising(hdev);
1871
1872unlock:
1873 hci_dev_unlock(hdev);
1874}
1875
1876static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1877{
1878 struct hci_ev_auth_complete *ev = (void *) skb->data;
1879 struct hci_conn *conn;
1880
1881 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1882
1883 hci_dev_lock(hdev);
1884
1885 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1886 if (!conn)
1887 goto unlock;
1888
1889 if (!ev->status) {
1890 if (!hci_conn_ssp_enabled(conn) &&
1891 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1892 BT_INFO("re-auth of legacy device is not possible.");
1893 } else {
1894 conn->link_mode |= HCI_LM_AUTH;
1895 conn->sec_level = conn->pending_sec_level;
1896 }
1897 } else {
1898 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1899 ev->status);
1900 }
1901
1902 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1903 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1904
1905 if (conn->state == BT_CONFIG) {
1906 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1907 struct hci_cp_set_conn_encrypt cp;
1908 cp.handle = ev->handle;
1909 cp.encrypt = 0x01;
1910 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1911 &cp);
1912 } else {
1913 conn->state = BT_CONNECTED;
1914 hci_proto_connect_cfm(conn, ev->status);
1915 hci_conn_drop(conn);
1916 }
1917 } else {
1918 hci_auth_cfm(conn, ev->status);
1919
1920 hci_conn_hold(conn);
1921 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1922 hci_conn_drop(conn);
1923 }
1924
1925 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1926 if (!ev->status) {
1927 struct hci_cp_set_conn_encrypt cp;
1928 cp.handle = ev->handle;
1929 cp.encrypt = 0x01;
1930 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1931 &cp);
1932 } else {
1933 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1934 hci_encrypt_cfm(conn, ev->status, 0x00);
1935 }
1936 }
1937
1938unlock:
1939 hci_dev_unlock(hdev);
1940}
1941
1942static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1943{
1944 struct hci_ev_remote_name *ev = (void *) skb->data;
1945 struct hci_conn *conn;
1946
1947 BT_DBG("%s", hdev->name);
1948
1949 hci_conn_check_pending(hdev);
1950
1951 hci_dev_lock(hdev);
1952
1953 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1954
1955 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1956 goto check_auth;
1957
1958 if (ev->status == 0)
1959 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1960 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1961 else
1962 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1963
1964check_auth:
1965 if (!conn)
1966 goto unlock;
1967
1968 if (!hci_outgoing_auth_needed(hdev, conn))
1969 goto unlock;
1970
1971 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1972 struct hci_cp_auth_requested cp;
1973 cp.handle = __cpu_to_le16(conn->handle);
1974 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1975 }
1976
1977unlock:
1978 hci_dev_unlock(hdev);
1979}
1980
1981static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1982{
1983 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1984 struct hci_conn *conn;
1985
1986 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1987
1988 hci_dev_lock(hdev);
1989
1990 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1991 if (conn) {
1992 if (!ev->status) {
1993 if (ev->encrypt) {
1994 /* Encryption implies authentication */
1995 conn->link_mode |= HCI_LM_AUTH;
1996 conn->link_mode |= HCI_LM_ENCRYPT;
1997 conn->sec_level = conn->pending_sec_level;
1998 } else
1999 conn->link_mode &= ~HCI_LM_ENCRYPT;
2000 }
2001
2002 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2003
2004 if (ev->status && conn->state == BT_CONNECTED) {
2005 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2006 hci_conn_drop(conn);
2007 goto unlock;
2008 }
2009
2010 if (conn->state == BT_CONFIG) {
2011 if (!ev->status)
2012 conn->state = BT_CONNECTED;
2013
2014 hci_proto_connect_cfm(conn, ev->status);
2015 hci_conn_drop(conn);
2016 } else
2017 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2018 }
2019
2020unlock:
2021 hci_dev_unlock(hdev);
2022}
2023
2024static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2025 struct sk_buff *skb)
2026{
2027 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2028 struct hci_conn *conn;
2029
2030 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2031
2032 hci_dev_lock(hdev);
2033
2034 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2035 if (conn) {
2036 if (!ev->status)
2037 conn->link_mode |= HCI_LM_SECURE;
2038
2039 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2040
2041 hci_key_change_cfm(conn, ev->status);
2042 }
2043
2044 hci_dev_unlock(hdev);
2045}
2046
2047static void hci_remote_features_evt(struct hci_dev *hdev,
2048 struct sk_buff *skb)
2049{
2050 struct hci_ev_remote_features *ev = (void *) skb->data;
2051 struct hci_conn *conn;
2052
2053 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2054
2055 hci_dev_lock(hdev);
2056
2057 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2058 if (!conn)
2059 goto unlock;
2060
2061 if (!ev->status)
2062 memcpy(conn->features[0], ev->features, 8);
2063
2064 if (conn->state != BT_CONFIG)
2065 goto unlock;
2066
2067 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2068 struct hci_cp_read_remote_ext_features cp;
2069 cp.handle = ev->handle;
2070 cp.page = 0x01;
2071 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2072 sizeof(cp), &cp);
2073 goto unlock;
2074 }
2075
2076 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2077 struct hci_cp_remote_name_req cp;
2078 memset(&cp, 0, sizeof(cp));
2079 bacpy(&cp.bdaddr, &conn->dst);
2080 cp.pscan_rep_mode = 0x02;
2081 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2082 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2083 mgmt_device_connected(hdev, &conn->dst, conn->type,
2084 conn->dst_type, 0, NULL, 0,
2085 conn->dev_class);
2086
2087 if (!hci_outgoing_auth_needed(hdev, conn)) {
2088 conn->state = BT_CONNECTED;
2089 hci_proto_connect_cfm(conn, ev->status);
2090 hci_conn_drop(conn);
2091 }
2092
2093unlock:
2094 hci_dev_unlock(hdev);
2095}
2096
2097static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2098{
2099 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2100 u8 status = skb->data[sizeof(*ev)];
2101 __u16 opcode;
2102
2103 skb_pull(skb, sizeof(*ev));
2104
2105 opcode = __le16_to_cpu(ev->opcode);
2106
2107 switch (opcode) {
2108 case HCI_OP_INQUIRY_CANCEL:
2109 hci_cc_inquiry_cancel(hdev, skb);
2110 break;
2111
2112 case HCI_OP_PERIODIC_INQ:
2113 hci_cc_periodic_inq(hdev, skb);
2114 break;
2115
2116 case HCI_OP_EXIT_PERIODIC_INQ:
2117 hci_cc_exit_periodic_inq(hdev, skb);
2118 break;
2119
2120 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2121 hci_cc_remote_name_req_cancel(hdev, skb);
2122 break;
2123
2124 case HCI_OP_ROLE_DISCOVERY:
2125 hci_cc_role_discovery(hdev, skb);
2126 break;
2127
2128 case HCI_OP_READ_LINK_POLICY:
2129 hci_cc_read_link_policy(hdev, skb);
2130 break;
2131
2132 case HCI_OP_WRITE_LINK_POLICY:
2133 hci_cc_write_link_policy(hdev, skb);
2134 break;
2135
2136 case HCI_OP_READ_DEF_LINK_POLICY:
2137 hci_cc_read_def_link_policy(hdev, skb);
2138 break;
2139
2140 case HCI_OP_WRITE_DEF_LINK_POLICY:
2141 hci_cc_write_def_link_policy(hdev, skb);
2142 break;
2143
2144 case HCI_OP_RESET:
2145 hci_cc_reset(hdev, skb);
2146 break;
2147
2148 case HCI_OP_WRITE_LOCAL_NAME:
2149 hci_cc_write_local_name(hdev, skb);
2150 break;
2151
2152 case HCI_OP_READ_LOCAL_NAME:
2153 hci_cc_read_local_name(hdev, skb);
2154 break;
2155
2156 case HCI_OP_WRITE_AUTH_ENABLE:
2157 hci_cc_write_auth_enable(hdev, skb);
2158 break;
2159
2160 case HCI_OP_WRITE_ENCRYPT_MODE:
2161 hci_cc_write_encrypt_mode(hdev, skb);
2162 break;
2163
2164 case HCI_OP_WRITE_SCAN_ENABLE:
2165 hci_cc_write_scan_enable(hdev, skb);
2166 break;
2167
2168 case HCI_OP_READ_CLASS_OF_DEV:
2169 hci_cc_read_class_of_dev(hdev, skb);
2170 break;
2171
2172 case HCI_OP_WRITE_CLASS_OF_DEV:
2173 hci_cc_write_class_of_dev(hdev, skb);
2174 break;
2175
2176 case HCI_OP_READ_VOICE_SETTING:
2177 hci_cc_read_voice_setting(hdev, skb);
2178 break;
2179
2180 case HCI_OP_WRITE_VOICE_SETTING:
2181 hci_cc_write_voice_setting(hdev, skb);
2182 break;
2183
2184 case HCI_OP_READ_NUM_SUPPORTED_IAC:
2185 hci_cc_read_num_supported_iac(hdev, skb);
2186 break;
2187
2188 case HCI_OP_WRITE_SSP_MODE:
2189 hci_cc_write_ssp_mode(hdev, skb);
2190 break;
2191
2192 case HCI_OP_WRITE_SC_SUPPORT:
2193 hci_cc_write_sc_support(hdev, skb);
2194 break;
2195
2196 case HCI_OP_READ_LOCAL_VERSION:
2197 hci_cc_read_local_version(hdev, skb);
2198 break;
2199
2200 case HCI_OP_READ_LOCAL_COMMANDS:
2201 hci_cc_read_local_commands(hdev, skb);
2202 break;
2203
2204 case HCI_OP_READ_LOCAL_FEATURES:
2205 hci_cc_read_local_features(hdev, skb);
2206 break;
2207
2208 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2209 hci_cc_read_local_ext_features(hdev, skb);
2210 break;
2211
2212 case HCI_OP_READ_BUFFER_SIZE:
2213 hci_cc_read_buffer_size(hdev, skb);
2214 break;
2215
2216 case HCI_OP_READ_BD_ADDR:
2217 hci_cc_read_bd_addr(hdev, skb);
2218 break;
2219
2220 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2221 hci_cc_read_page_scan_activity(hdev, skb);
2222 break;
2223
2224 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2225 hci_cc_write_page_scan_activity(hdev, skb);
2226 break;
2227
2228 case HCI_OP_READ_PAGE_SCAN_TYPE:
2229 hci_cc_read_page_scan_type(hdev, skb);
2230 break;
2231
2232 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2233 hci_cc_write_page_scan_type(hdev, skb);
2234 break;
2235
2236 case HCI_OP_READ_DATA_BLOCK_SIZE:
2237 hci_cc_read_data_block_size(hdev, skb);
2238 break;
2239
2240 case HCI_OP_READ_FLOW_CONTROL_MODE:
2241 hci_cc_read_flow_control_mode(hdev, skb);
2242 break;
2243
2244 case HCI_OP_READ_LOCAL_AMP_INFO:
2245 hci_cc_read_local_amp_info(hdev, skb);
2246 break;
2247
2248 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2249 hci_cc_read_local_amp_assoc(hdev, skb);
2250 break;
2251
2252 case HCI_OP_READ_INQ_RSP_TX_POWER:
2253 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2254 break;
2255
2256 case HCI_OP_PIN_CODE_REPLY:
2257 hci_cc_pin_code_reply(hdev, skb);
2258 break;
2259
2260 case HCI_OP_PIN_CODE_NEG_REPLY:
2261 hci_cc_pin_code_neg_reply(hdev, skb);
2262 break;
2263
2264 case HCI_OP_READ_LOCAL_OOB_DATA:
2265 hci_cc_read_local_oob_data(hdev, skb);
2266 break;
2267
2268 case HCI_OP_READ_LOCAL_OOB_EXT_DATA:
2269 hci_cc_read_local_oob_ext_data(hdev, skb);
2270 break;
2271
2272 case HCI_OP_LE_READ_BUFFER_SIZE:
2273 hci_cc_le_read_buffer_size(hdev, skb);
2274 break;
2275
2276 case HCI_OP_LE_READ_LOCAL_FEATURES:
2277 hci_cc_le_read_local_features(hdev, skb);
2278 break;
2279
2280 case HCI_OP_LE_READ_ADV_TX_POWER:
2281 hci_cc_le_read_adv_tx_power(hdev, skb);
2282 break;
2283
2284 case HCI_OP_USER_CONFIRM_REPLY:
2285 hci_cc_user_confirm_reply(hdev, skb);
2286 break;
2287
2288 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2289 hci_cc_user_confirm_neg_reply(hdev, skb);
2290 break;
2291
2292 case HCI_OP_USER_PASSKEY_REPLY:
2293 hci_cc_user_passkey_reply(hdev, skb);
2294 break;
2295
2296 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2297 hci_cc_user_passkey_neg_reply(hdev, skb);
2298 break;
2299
2300 case HCI_OP_LE_SET_ADV_ENABLE:
2301 hci_cc_le_set_adv_enable(hdev, skb);
2302 break;
2303
2304 case HCI_OP_LE_SET_SCAN_ENABLE:
2305 hci_cc_le_set_scan_enable(hdev, skb);
2306 break;
2307
2308 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2309 hci_cc_le_read_white_list_size(hdev, skb);
2310 break;
2311
2312 case HCI_OP_LE_READ_SUPPORTED_STATES:
2313 hci_cc_le_read_supported_states(hdev, skb);
2314 break;
2315
2316 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2317 hci_cc_write_le_host_supported(hdev, skb);
2318 break;
2319
2320 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2321 hci_cc_write_remote_amp_assoc(hdev, skb);
2322 break;
2323
2324 default:
2325 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2326 break;
2327 }
2328
2329 if (opcode != HCI_OP_NOP)
2330 del_timer(&hdev->cmd_timer);
2331
2332 hci_req_cmd_complete(hdev, opcode, status);
2333
2334 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2335 atomic_set(&hdev->cmd_cnt, 1);
2336 if (!skb_queue_empty(&hdev->cmd_q))
2337 queue_work(hdev->workqueue, &hdev->cmd_work);
2338 }
2339}
2340
2341static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2342{
2343 struct hci_ev_cmd_status *ev = (void *) skb->data;
2344 __u16 opcode;
2345
2346 skb_pull(skb, sizeof(*ev));
2347
2348 opcode = __le16_to_cpu(ev->opcode);
2349
2350 switch (opcode) {
2351 case HCI_OP_INQUIRY:
2352 hci_cs_inquiry(hdev, ev->status);
2353 break;
2354
2355 case HCI_OP_CREATE_CONN:
2356 hci_cs_create_conn(hdev, ev->status);
2357 break;
2358
2359 case HCI_OP_ADD_SCO:
2360 hci_cs_add_sco(hdev, ev->status);
2361 break;
2362
2363 case HCI_OP_AUTH_REQUESTED:
2364 hci_cs_auth_requested(hdev, ev->status);
2365 break;
2366
2367 case HCI_OP_SET_CONN_ENCRYPT:
2368 hci_cs_set_conn_encrypt(hdev, ev->status);
2369 break;
2370
2371 case HCI_OP_REMOTE_NAME_REQ:
2372 hci_cs_remote_name_req(hdev, ev->status);
2373 break;
2374
2375 case HCI_OP_READ_REMOTE_FEATURES:
2376 hci_cs_read_remote_features(hdev, ev->status);
2377 break;
2378
2379 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2380 hci_cs_read_remote_ext_features(hdev, ev->status);
2381 break;
2382
2383 case HCI_OP_SETUP_SYNC_CONN:
2384 hci_cs_setup_sync_conn(hdev, ev->status);
2385 break;
2386
2387 case HCI_OP_SNIFF_MODE:
2388 hci_cs_sniff_mode(hdev, ev->status);
2389 break;
2390
2391 case HCI_OP_EXIT_SNIFF_MODE:
2392 hci_cs_exit_sniff_mode(hdev, ev->status);
2393 break;
2394
2395 case HCI_OP_DISCONNECT:
2396 hci_cs_disconnect(hdev, ev->status);
2397 break;
2398
2399 case HCI_OP_CREATE_PHY_LINK:
2400 hci_cs_create_phylink(hdev, ev->status);
2401 break;
2402
2403 case HCI_OP_ACCEPT_PHY_LINK:
2404 hci_cs_accept_phylink(hdev, ev->status);
2405 break;
2406
2407 default:
2408 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2409 break;
2410 }
2411
2412 if (opcode != HCI_OP_NOP)
2413 del_timer(&hdev->cmd_timer);
2414
2415 if (ev->status ||
2416 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2417 hci_req_cmd_complete(hdev, opcode, ev->status);
2418
2419 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2420 atomic_set(&hdev->cmd_cnt, 1);
2421 if (!skb_queue_empty(&hdev->cmd_q))
2422 queue_work(hdev->workqueue, &hdev->cmd_work);
2423 }
2424}
2425
2426static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2427{
2428 struct hci_ev_role_change *ev = (void *) skb->data;
2429 struct hci_conn *conn;
2430
2431 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2432
2433 hci_dev_lock(hdev);
2434
2435 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2436 if (conn) {
2437 if (!ev->status) {
2438 if (ev->role)
2439 conn->link_mode &= ~HCI_LM_MASTER;
2440 else
2441 conn->link_mode |= HCI_LM_MASTER;
2442 }
2443
2444 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2445
2446 hci_role_switch_cfm(conn, ev->status, ev->role);
2447 }
2448
2449 hci_dev_unlock(hdev);
2450}
2451
2452static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2453{
2454 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2455 int i;
2456
2457 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2458 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2459 return;
2460 }
2461
2462 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2463 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2464 BT_DBG("%s bad parameters", hdev->name);
2465 return;
2466 }
2467
2468 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2469
2470 for (i = 0; i < ev->num_hndl; i++) {
2471 struct hci_comp_pkts_info *info = &ev->handles[i];
2472 struct hci_conn *conn;
2473 __u16 handle, count;
2474
2475 handle = __le16_to_cpu(info->handle);
2476 count = __le16_to_cpu(info->count);
2477
2478 conn = hci_conn_hash_lookup_handle(hdev, handle);
2479 if (!conn)
2480 continue;
2481
2482 conn->sent -= count;
2483
2484 switch (conn->type) {
2485 case ACL_LINK:
2486 hdev->acl_cnt += count;
2487 if (hdev->acl_cnt > hdev->acl_pkts)
2488 hdev->acl_cnt = hdev->acl_pkts;
2489 break;
2490
2491 case LE_LINK:
2492 if (hdev->le_pkts) {
2493 hdev->le_cnt += count;
2494 if (hdev->le_cnt > hdev->le_pkts)
2495 hdev->le_cnt = hdev->le_pkts;
2496 } else {
2497 hdev->acl_cnt += count;
2498 if (hdev->acl_cnt > hdev->acl_pkts)
2499 hdev->acl_cnt = hdev->acl_pkts;
2500 }
2501 break;
2502
2503 case SCO_LINK:
2504 hdev->sco_cnt += count;
2505 if (hdev->sco_cnt > hdev->sco_pkts)
2506 hdev->sco_cnt = hdev->sco_pkts;
2507 break;
2508
2509 default:
2510 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2511 break;
2512 }
2513 }
2514
2515 queue_work(hdev->workqueue, &hdev->tx_work);
2516}
2517
2518static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2519 __u16 handle)
2520{
2521 struct hci_chan *chan;
2522
2523 switch (hdev->dev_type) {
2524 case HCI_BREDR:
2525 return hci_conn_hash_lookup_handle(hdev, handle);
2526 case HCI_AMP:
2527 chan = hci_chan_lookup_handle(hdev, handle);
2528 if (chan)
2529 return chan->conn;
2530 break;
2531 default:
2532 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2533 break;
2534 }
2535
2536 return NULL;
2537}
2538
2539static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2540{
2541 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2542 int i;
2543
2544 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2545 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2546 return;
2547 }
2548
2549 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2550 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2551 BT_DBG("%s bad parameters", hdev->name);
2552 return;
2553 }
2554
2555 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2556 ev->num_hndl);
2557
2558 for (i = 0; i < ev->num_hndl; i++) {
2559 struct hci_comp_blocks_info *info = &ev->handles[i];
2560 struct hci_conn *conn = NULL;
2561 __u16 handle, block_count;
2562
2563 handle = __le16_to_cpu(info->handle);
2564 block_count = __le16_to_cpu(info->blocks);
2565
2566 conn = __hci_conn_lookup_handle(hdev, handle);
2567 if (!conn)
2568 continue;
2569
2570 conn->sent -= block_count;
2571
2572 switch (conn->type) {
2573 case ACL_LINK:
2574 case AMP_LINK:
2575 hdev->block_cnt += block_count;
2576 if (hdev->block_cnt > hdev->num_blocks)
2577 hdev->block_cnt = hdev->num_blocks;
2578 break;
2579
2580 default:
2581 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2582 break;
2583 }
2584 }
2585
2586 queue_work(hdev->workqueue, &hdev->tx_work);
2587}
2588
2589static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2590{
2591 struct hci_ev_mode_change *ev = (void *) skb->data;
2592 struct hci_conn *conn;
2593
2594 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2595
2596 hci_dev_lock(hdev);
2597
2598 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2599 if (conn) {
2600 conn->mode = ev->mode;
2601
2602 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2603 &conn->flags)) {
2604 if (conn->mode == HCI_CM_ACTIVE)
2605 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2606 else
2607 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2608 }
2609
2610 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2611 hci_sco_setup(conn, ev->status);
2612 }
2613
2614 hci_dev_unlock(hdev);
2615}
2616
2617static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2618{
2619 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2620 struct hci_conn *conn;
2621
2622 BT_DBG("%s", hdev->name);
2623
2624 hci_dev_lock(hdev);
2625
2626 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2627 if (!conn)
2628 goto unlock;
2629
2630 if (conn->state == BT_CONNECTED) {
2631 hci_conn_hold(conn);
2632 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2633 hci_conn_drop(conn);
2634 }
2635
2636 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2637 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2638 sizeof(ev->bdaddr), &ev->bdaddr);
2639 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2640 u8 secure;
2641
2642 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2643 secure = 1;
2644 else
2645 secure = 0;
2646
2647 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2648 }
2649
2650unlock:
2651 hci_dev_unlock(hdev);
2652}
2653
2654static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2655{
2656 struct hci_ev_link_key_req *ev = (void *) skb->data;
2657 struct hci_cp_link_key_reply cp;
2658 struct hci_conn *conn;
2659 struct link_key *key;
2660
2661 BT_DBG("%s", hdev->name);
2662
2663 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2664 return;
2665
2666 hci_dev_lock(hdev);
2667
2668 key = hci_find_link_key(hdev, &ev->bdaddr);
2669 if (!key) {
2670 BT_DBG("%s link key not found for %pMR", hdev->name,
2671 &ev->bdaddr);
2672 goto not_found;
2673 }
2674
2675 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2676 &ev->bdaddr);
2677
2678 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2679 key->type == HCI_LK_DEBUG_COMBINATION) {
2680 BT_DBG("%s ignoring debug key", hdev->name);
2681 goto not_found;
2682 }
2683
2684 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2685 if (conn) {
2686 if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 ||
2687 key->type == HCI_LK_UNAUTH_COMBINATION_P256) &&
2688 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2689 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2690 goto not_found;
2691 }
2692
2693 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2694 conn->pending_sec_level == BT_SECURITY_HIGH) {
2695 BT_DBG("%s ignoring key unauthenticated for high security",
2696 hdev->name);
2697 goto not_found;
2698 }
2699
2700 conn->key_type = key->type;
2701 conn->pin_length = key->pin_len;
2702 }
2703
2704 bacpy(&cp.bdaddr, &ev->bdaddr);
2705 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2706
2707 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2708
2709 hci_dev_unlock(hdev);
2710
2711 return;
2712
2713not_found:
2714 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2715 hci_dev_unlock(hdev);
2716}
2717
2718static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2719{
2720 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2721 struct hci_conn *conn;
2722 u8 pin_len = 0;
2723
2724 BT_DBG("%s", hdev->name);
2725
2726 hci_dev_lock(hdev);
2727
2728 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2729 if (conn) {
2730 hci_conn_hold(conn);
2731 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2732 pin_len = conn->pin_length;
2733
2734 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2735 conn->key_type = ev->key_type;
2736
2737 hci_conn_drop(conn);
2738 }
2739
2740 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2741 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2742 ev->key_type, pin_len);
2743
2744 hci_dev_unlock(hdev);
2745}
2746
2747static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2748{
2749 struct hci_ev_clock_offset *ev = (void *) skb->data;
2750 struct hci_conn *conn;
2751
2752 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2753
2754 hci_dev_lock(hdev);
2755
2756 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2757 if (conn && !ev->status) {
2758 struct inquiry_entry *ie;
2759
2760 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2761 if (ie) {
2762 ie->data.clock_offset = ev->clock_offset;
2763 ie->timestamp = jiffies;
2764 }
2765 }
2766
2767 hci_dev_unlock(hdev);
2768}
2769
2770static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2771{
2772 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2773 struct hci_conn *conn;
2774
2775 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2776
2777 hci_dev_lock(hdev);
2778
2779 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2780 if (conn && !ev->status)
2781 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2782
2783 hci_dev_unlock(hdev);
2784}
2785
2786static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2787{
2788 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2789 struct inquiry_entry *ie;
2790
2791 BT_DBG("%s", hdev->name);
2792
2793 hci_dev_lock(hdev);
2794
2795 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2796 if (ie) {
2797 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2798 ie->timestamp = jiffies;
2799 }
2800
2801 hci_dev_unlock(hdev);
2802}
2803
2804static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2805 struct sk_buff *skb)
2806{
2807 struct inquiry_data data;
2808 int num_rsp = *((__u8 *) skb->data);
2809 bool name_known, ssp;
2810
2811 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2812
2813 if (!num_rsp)
2814 return;
2815
2816 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2817 return;
2818
2819 hci_dev_lock(hdev);
2820
2821 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2822 struct inquiry_info_with_rssi_and_pscan_mode *info;
2823 info = (void *) (skb->data + 1);
2824
2825 for (; num_rsp; num_rsp--, info++) {
2826 bacpy(&data.bdaddr, &info->bdaddr);
2827 data.pscan_rep_mode = info->pscan_rep_mode;
2828 data.pscan_period_mode = info->pscan_period_mode;
2829 data.pscan_mode = info->pscan_mode;
2830 memcpy(data.dev_class, info->dev_class, 3);
2831 data.clock_offset = info->clock_offset;
2832 data.rssi = info->rssi;
2833 data.ssp_mode = 0x00;
2834
2835 name_known = hci_inquiry_cache_update(hdev, &data,
2836 false, &ssp);
2837 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2838 info->dev_class, info->rssi,
2839 !name_known, ssp, NULL, 0);
2840 }
2841 } else {
2842 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2843
2844 for (; num_rsp; num_rsp--, info++) {
2845 bacpy(&data.bdaddr, &info->bdaddr);
2846 data.pscan_rep_mode = info->pscan_rep_mode;
2847 data.pscan_period_mode = info->pscan_period_mode;
2848 data.pscan_mode = 0x00;
2849 memcpy(data.dev_class, info->dev_class, 3);
2850 data.clock_offset = info->clock_offset;
2851 data.rssi = info->rssi;
2852 data.ssp_mode = 0x00;
2853 name_known = hci_inquiry_cache_update(hdev, &data,
2854 false, &ssp);
2855 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2856 info->dev_class, info->rssi,
2857 !name_known, ssp, NULL, 0);
2858 }
2859 }
2860
2861 hci_dev_unlock(hdev);
2862}
2863
2864static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2865 struct sk_buff *skb)
2866{
2867 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2868 struct hci_conn *conn;
2869
2870 BT_DBG("%s", hdev->name);
2871
2872 hci_dev_lock(hdev);
2873
2874 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2875 if (!conn)
2876 goto unlock;
2877
2878 if (ev->page < HCI_MAX_PAGES)
2879 memcpy(conn->features[ev->page], ev->features, 8);
2880
2881 if (!ev->status && ev->page == 0x01) {
2882 struct inquiry_entry *ie;
2883
2884 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2885 if (ie)
2886 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2887
2888 if (ev->features[0] & LMP_HOST_SSP) {
2889 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2890 } else {
2891 /* It is mandatory by the Bluetooth specification that
2892 * Extended Inquiry Results are only used when Secure
2893 * Simple Pairing is enabled, but some devices violate
2894 * this.
2895 *
2896 * To make these devices work, the internal SSP
2897 * enabled flag needs to be cleared if the remote host
2898 * features do not indicate SSP support */
2899 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2900 }
2901
2902 if (ev->features[0] & LMP_HOST_SC)
2903 set_bit(HCI_CONN_SC_ENABLED, &conn->flags);
2904 }
2905
2906 if (conn->state != BT_CONFIG)
2907 goto unlock;
2908
2909 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2910 struct hci_cp_remote_name_req cp;
2911 memset(&cp, 0, sizeof(cp));
2912 bacpy(&cp.bdaddr, &conn->dst);
2913 cp.pscan_rep_mode = 0x02;
2914 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2915 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2916 mgmt_device_connected(hdev, &conn->dst, conn->type,
2917 conn->dst_type, 0, NULL, 0,
2918 conn->dev_class);
2919
2920 if (!hci_outgoing_auth_needed(hdev, conn)) {
2921 conn->state = BT_CONNECTED;
2922 hci_proto_connect_cfm(conn, ev->status);
2923 hci_conn_drop(conn);
2924 }
2925
2926unlock:
2927 hci_dev_unlock(hdev);
2928}
2929
2930static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2931 struct sk_buff *skb)
2932{
2933 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2934 struct hci_conn *conn;
2935
2936 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2937
2938 hci_dev_lock(hdev);
2939
2940 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2941 if (!conn) {
2942 if (ev->link_type == ESCO_LINK)
2943 goto unlock;
2944
2945 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2946 if (!conn)
2947 goto unlock;
2948
2949 conn->type = SCO_LINK;
2950 }
2951
2952 switch (ev->status) {
2953 case 0x00:
2954 conn->handle = __le16_to_cpu(ev->handle);
2955 conn->state = BT_CONNECTED;
2956
2957 hci_conn_add_sysfs(conn);
2958 break;
2959
2960 case 0x0d: /* Connection Rejected due to Limited Resources */
2961 case 0x11: /* Unsupported Feature or Parameter Value */
2962 case 0x1c: /* SCO interval rejected */
2963 case 0x1a: /* Unsupported Remote Feature */
2964 case 0x1f: /* Unspecified error */
2965 if (conn->out) {
2966 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2967 (hdev->esco_type & EDR_ESCO_MASK);
2968 if (hci_setup_sync(conn, conn->link->handle))
2969 goto unlock;
2970 }
2971 /* fall through */
2972
2973 default:
2974 conn->state = BT_CLOSED;
2975 break;
2976 }
2977
2978 hci_proto_connect_cfm(conn, ev->status);
2979 if (ev->status)
2980 hci_conn_del(conn);
2981
2982unlock:
2983 hci_dev_unlock(hdev);
2984}
2985
2986static inline size_t eir_get_length(u8 *eir, size_t eir_len)
2987{
2988 size_t parsed = 0;
2989
2990 while (parsed < eir_len) {
2991 u8 field_len = eir[0];
2992
2993 if (field_len == 0)
2994 return parsed;
2995
2996 parsed += field_len + 1;
2997 eir += field_len + 1;
2998 }
2999
3000 return eir_len;
3001}
3002
3003static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3004 struct sk_buff *skb)
3005{
3006 struct inquiry_data data;
3007 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3008 int num_rsp = *((__u8 *) skb->data);
3009 size_t eir_len;
3010
3011 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3012
3013 if (!num_rsp)
3014 return;
3015
3016 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3017 return;
3018
3019 hci_dev_lock(hdev);
3020
3021 for (; num_rsp; num_rsp--, info++) {
3022 bool name_known, ssp;
3023
3024 bacpy(&data.bdaddr, &info->bdaddr);
3025 data.pscan_rep_mode = info->pscan_rep_mode;
3026 data.pscan_period_mode = info->pscan_period_mode;
3027 data.pscan_mode = 0x00;
3028 memcpy(data.dev_class, info->dev_class, 3);
3029 data.clock_offset = info->clock_offset;
3030 data.rssi = info->rssi;
3031 data.ssp_mode = 0x01;
3032
3033 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3034 name_known = eir_has_data_type(info->data,
3035 sizeof(info->data),
3036 EIR_NAME_COMPLETE);
3037 else
3038 name_known = true;
3039
3040 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3041 &ssp);
3042 eir_len = eir_get_length(info->data, sizeof(info->data));
3043 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3044 info->dev_class, info->rssi, !name_known,
3045 ssp, info->data, eir_len);
3046 }
3047
3048 hci_dev_unlock(hdev);
3049}
3050
3051static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3052 struct sk_buff *skb)
3053{
3054 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3055 struct hci_conn *conn;
3056
3057 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3058 __le16_to_cpu(ev->handle));
3059
3060 hci_dev_lock(hdev);
3061
3062 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3063 if (!conn)
3064 goto unlock;
3065
3066 if (!ev->status)
3067 conn->sec_level = conn->pending_sec_level;
3068
3069 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3070
3071 if (ev->status && conn->state == BT_CONNECTED) {
3072 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
3073 hci_conn_drop(conn);
3074 goto unlock;
3075 }
3076
3077 if (conn->state == BT_CONFIG) {
3078 if (!ev->status)
3079 conn->state = BT_CONNECTED;
3080
3081 hci_proto_connect_cfm(conn, ev->status);
3082 hci_conn_drop(conn);
3083 } else {
3084 hci_auth_cfm(conn, ev->status);
3085
3086 hci_conn_hold(conn);
3087 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3088 hci_conn_drop(conn);
3089 }
3090
3091unlock:
3092 hci_dev_unlock(hdev);
3093}
3094
3095static u8 hci_get_auth_req(struct hci_conn *conn)
3096{
3097 /* If remote requests dedicated bonding follow that lead */
3098 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3099 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3100 /* If both remote and local IO capabilities allow MITM
3101 * protection then require it, otherwise don't */
3102 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3103 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3104 return HCI_AT_DEDICATED_BONDING;
3105 else
3106 return HCI_AT_DEDICATED_BONDING_MITM;
3107 }
3108
3109 /* If remote requests no-bonding follow that lead */
3110 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3111 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3112 return conn->remote_auth | (conn->auth_type & 0x01);
3113
3114 return conn->auth_type;
3115}
3116
3117static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3118{
3119 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3120 struct hci_conn *conn;
3121
3122 BT_DBG("%s", hdev->name);
3123
3124 hci_dev_lock(hdev);
3125
3126 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3127 if (!conn)
3128 goto unlock;
3129
3130 hci_conn_hold(conn);
3131
3132 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3133 goto unlock;
3134
3135 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3136 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3137 struct hci_cp_io_capability_reply cp;
3138
3139 bacpy(&cp.bdaddr, &ev->bdaddr);
3140 /* Change the IO capability from KeyboardDisplay
3141 * to DisplayYesNo as it is not supported by BT spec. */
3142 cp.capability = (conn->io_capability == 0x04) ?
3143 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3144 conn->auth_type = hci_get_auth_req(conn);
3145 cp.authentication = conn->auth_type;
3146
3147 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3148 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3149 cp.oob_data = 0x01;
3150 else
3151 cp.oob_data = 0x00;
3152
3153 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3154 sizeof(cp), &cp);
3155 } else {
3156 struct hci_cp_io_capability_neg_reply cp;
3157
3158 bacpy(&cp.bdaddr, &ev->bdaddr);
3159 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3160
3161 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3162 sizeof(cp), &cp);
3163 }
3164
3165unlock:
3166 hci_dev_unlock(hdev);
3167}
3168
3169static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3170{
3171 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3172 struct hci_conn *conn;
3173
3174 BT_DBG("%s", hdev->name);
3175
3176 hci_dev_lock(hdev);
3177
3178 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3179 if (!conn)
3180 goto unlock;
3181
3182 conn->remote_cap = ev->capability;
3183 conn->remote_auth = ev->authentication;
3184 if (ev->oob_data)
3185 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3186
3187unlock:
3188 hci_dev_unlock(hdev);
3189}
3190
3191static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3192 struct sk_buff *skb)
3193{
3194 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3195 int loc_mitm, rem_mitm, confirm_hint = 0;
3196 struct hci_conn *conn;
3197
3198 BT_DBG("%s", hdev->name);
3199
3200 hci_dev_lock(hdev);
3201
3202 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3203 goto unlock;
3204
3205 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3206 if (!conn)
3207 goto unlock;
3208
3209 loc_mitm = (conn->auth_type & 0x01);
3210 rem_mitm = (conn->remote_auth & 0x01);
3211
3212 /* If we require MITM but the remote device can't provide that
3213 * (it has NoInputNoOutput) then reject the confirmation
3214 * request. The only exception is when we're dedicated bonding
3215 * initiators (connect_cfm_cb set) since then we always have the MITM
3216 * bit set. */
3217 if (!conn->connect_cfm_cb && loc_mitm &&
3218 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3219 BT_DBG("Rejecting request: remote device can't provide MITM");
3220 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3221 sizeof(ev->bdaddr), &ev->bdaddr);
3222 goto unlock;
3223 }
3224
3225 /* If no side requires MITM protection; auto-accept */
3226 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3227 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3228
3229 /* If we're not the initiators request authorization to
3230 * proceed from user space (mgmt_user_confirm with
3231 * confirm_hint set to 1). */
3232 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3233 BT_DBG("Confirming auto-accept as acceptor");
3234 confirm_hint = 1;
3235 goto confirm;
3236 }
3237
3238 BT_DBG("Auto-accept of user confirmation with %ums delay",
3239 hdev->auto_accept_delay);
3240
3241 if (hdev->auto_accept_delay > 0) {
3242 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3243 queue_delayed_work(conn->hdev->workqueue,
3244 &conn->auto_accept_work, delay);
3245 goto unlock;
3246 }
3247
3248 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3249 sizeof(ev->bdaddr), &ev->bdaddr);
3250 goto unlock;
3251 }
3252
3253confirm:
3254 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3255 confirm_hint);
3256
3257unlock:
3258 hci_dev_unlock(hdev);
3259}
3260
3261static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3262 struct sk_buff *skb)
3263{
3264 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3265
3266 BT_DBG("%s", hdev->name);
3267
3268 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3269 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3270}
3271
3272static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3273 struct sk_buff *skb)
3274{
3275 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3276 struct hci_conn *conn;
3277
3278 BT_DBG("%s", hdev->name);
3279
3280 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3281 if (!conn)
3282 return;
3283
3284 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3285 conn->passkey_entered = 0;
3286
3287 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3288 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3289 conn->dst_type, conn->passkey_notify,
3290 conn->passkey_entered);
3291}
3292
3293static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3294{
3295 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3296 struct hci_conn *conn;
3297
3298 BT_DBG("%s", hdev->name);
3299
3300 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3301 if (!conn)
3302 return;
3303
3304 switch (ev->type) {
3305 case HCI_KEYPRESS_STARTED:
3306 conn->passkey_entered = 0;
3307 return;
3308
3309 case HCI_KEYPRESS_ENTERED:
3310 conn->passkey_entered++;
3311 break;
3312
3313 case HCI_KEYPRESS_ERASED:
3314 conn->passkey_entered--;
3315 break;
3316
3317 case HCI_KEYPRESS_CLEARED:
3318 conn->passkey_entered = 0;
3319 break;
3320
3321 case HCI_KEYPRESS_COMPLETED:
3322 return;
3323 }
3324
3325 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3326 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3327 conn->dst_type, conn->passkey_notify,
3328 conn->passkey_entered);
3329}
3330
3331static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3332 struct sk_buff *skb)
3333{
3334 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3335 struct hci_conn *conn;
3336
3337 BT_DBG("%s", hdev->name);
3338
3339 hci_dev_lock(hdev);
3340
3341 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3342 if (!conn)
3343 goto unlock;
3344
3345 /* To avoid duplicate auth_failed events to user space we check
3346 * the HCI_CONN_AUTH_PEND flag which will be set if we
3347 * initiated the authentication. A traditional auth_complete
3348 * event gets always produced as initiator and is also mapped to
3349 * the mgmt_auth_failed event */
3350 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3351 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3352 ev->status);
3353
3354 hci_conn_drop(conn);
3355
3356unlock:
3357 hci_dev_unlock(hdev);
3358}
3359
3360static void hci_remote_host_features_evt(struct hci_dev *hdev,
3361 struct sk_buff *skb)
3362{
3363 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3364 struct inquiry_entry *ie;
3365 struct hci_conn *conn;
3366
3367 BT_DBG("%s", hdev->name);
3368
3369 hci_dev_lock(hdev);
3370
3371 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3372 if (conn)
3373 memcpy(conn->features[1], ev->features, 8);
3374
3375 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3376 if (ie)
3377 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3378
3379 hci_dev_unlock(hdev);
3380}
3381
3382static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3383 struct sk_buff *skb)
3384{
3385 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3386 struct oob_data *data;
3387
3388 BT_DBG("%s", hdev->name);
3389
3390 hci_dev_lock(hdev);
3391
3392 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3393 goto unlock;
3394
3395 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3396 if (data) {
3397 if (test_bit(HCI_SC_ENABLED, &hdev->dev_flags)) {
3398 struct hci_cp_remote_oob_ext_data_reply cp;
3399
3400 bacpy(&cp.bdaddr, &ev->bdaddr);
3401 memcpy(cp.hash192, data->hash192, sizeof(cp.hash192));
3402 memcpy(cp.randomizer192, data->randomizer192,
3403 sizeof(cp.randomizer192));
3404 memcpy(cp.hash256, data->hash256, sizeof(cp.hash256));
3405 memcpy(cp.randomizer256, data->randomizer256,
3406 sizeof(cp.randomizer256));
3407
3408 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY,
3409 sizeof(cp), &cp);
3410 } else {
3411 struct hci_cp_remote_oob_data_reply cp;
3412
3413 bacpy(&cp.bdaddr, &ev->bdaddr);
3414 memcpy(cp.hash, data->hash192, sizeof(cp.hash));
3415 memcpy(cp.randomizer, data->randomizer192,
3416 sizeof(cp.randomizer));
3417
3418 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY,
3419 sizeof(cp), &cp);
3420 }
3421 } else {
3422 struct hci_cp_remote_oob_data_neg_reply cp;
3423
3424 bacpy(&cp.bdaddr, &ev->bdaddr);
3425 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY,
3426 sizeof(cp), &cp);
3427 }
3428
3429unlock:
3430 hci_dev_unlock(hdev);
3431}
3432
3433static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3434 struct sk_buff *skb)
3435{
3436 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3437 struct hci_conn *hcon, *bredr_hcon;
3438
3439 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3440 ev->status);
3441
3442 hci_dev_lock(hdev);
3443
3444 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3445 if (!hcon) {
3446 hci_dev_unlock(hdev);
3447 return;
3448 }
3449
3450 if (ev->status) {
3451 hci_conn_del(hcon);
3452 hci_dev_unlock(hdev);
3453 return;
3454 }
3455
3456 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3457
3458 hcon->state = BT_CONNECTED;
3459 bacpy(&hcon->dst, &bredr_hcon->dst);
3460
3461 hci_conn_hold(hcon);
3462 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3463 hci_conn_drop(hcon);
3464
3465 hci_conn_add_sysfs(hcon);
3466
3467 amp_physical_cfm(bredr_hcon, hcon);
3468
3469 hci_dev_unlock(hdev);
3470}
3471
3472static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3473{
3474 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3475 struct hci_conn *hcon;
3476 struct hci_chan *hchan;
3477 struct amp_mgr *mgr;
3478
3479 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3480 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3481 ev->status);
3482
3483 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3484 if (!hcon)
3485 return;
3486
3487 /* Create AMP hchan */
3488 hchan = hci_chan_create(hcon);
3489 if (!hchan)
3490 return;
3491
3492 hchan->handle = le16_to_cpu(ev->handle);
3493
3494 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3495
3496 mgr = hcon->amp_mgr;
3497 if (mgr && mgr->bredr_chan) {
3498 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3499
3500 l2cap_chan_lock(bredr_chan);
3501
3502 bredr_chan->conn->mtu = hdev->block_mtu;
3503 l2cap_logical_cfm(bredr_chan, hchan, 0);
3504 hci_conn_hold(hcon);
3505
3506 l2cap_chan_unlock(bredr_chan);
3507 }
3508}
3509
3510static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3511 struct sk_buff *skb)
3512{
3513 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3514 struct hci_chan *hchan;
3515
3516 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3517 le16_to_cpu(ev->handle), ev->status);
3518
3519 if (ev->status)
3520 return;
3521
3522 hci_dev_lock(hdev);
3523
3524 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3525 if (!hchan)
3526 goto unlock;
3527
3528 amp_destroy_logical_link(hchan, ev->reason);
3529
3530unlock:
3531 hci_dev_unlock(hdev);
3532}
3533
3534static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3535 struct sk_buff *skb)
3536{
3537 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3538 struct hci_conn *hcon;
3539
3540 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3541
3542 if (ev->status)
3543 return;
3544
3545 hci_dev_lock(hdev);
3546
3547 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3548 if (hcon) {
3549 hcon->state = BT_CLOSED;
3550 hci_conn_del(hcon);
3551 }
3552
3553 hci_dev_unlock(hdev);
3554}
3555
3556static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3557{
3558 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3559 struct hci_conn *conn;
3560
3561 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3562
3563 hci_dev_lock(hdev);
3564
3565 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3566 if (!conn) {
3567 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3568 if (!conn) {
3569 BT_ERR("No memory for new connection");
3570 goto unlock;
3571 }
3572
3573 conn->dst_type = ev->bdaddr_type;
3574
3575 /* The advertising parameters for own address type
3576 * define which source address and source address
3577 * type this connections has.
3578 */
3579 if (bacmp(&conn->src, BDADDR_ANY)) {
3580 conn->src_type = ADDR_LE_DEV_PUBLIC;
3581 } else {
3582 bacpy(&conn->src, &hdev->static_addr);
3583 conn->src_type = ADDR_LE_DEV_RANDOM;
3584 }
3585
3586 if (ev->role == LE_CONN_ROLE_MASTER) {
3587 conn->out = true;
3588 conn->link_mode |= HCI_LM_MASTER;
3589 }
3590 }
3591
3592 if (ev->status) {
3593 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3594 conn->dst_type, ev->status);
3595 hci_proto_connect_cfm(conn, ev->status);
3596 conn->state = BT_CLOSED;
3597 hci_conn_del(conn);
3598 goto unlock;
3599 }
3600
3601 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3602 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3603 conn->dst_type, 0, NULL, 0, NULL);
3604
3605 conn->sec_level = BT_SECURITY_LOW;
3606 conn->handle = __le16_to_cpu(ev->handle);
3607 conn->state = BT_CONNECTED;
3608
3609 if (test_bit(HCI_6LOWPAN_ENABLED, &hdev->dev_flags))
3610 set_bit(HCI_CONN_6LOWPAN, &conn->flags);
3611
3612 hci_conn_add_sysfs(conn);
3613
3614 hci_proto_connect_cfm(conn, ev->status);
3615
3616unlock:
3617 hci_dev_unlock(hdev);
3618}
3619
3620static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3621{
3622 u8 num_reports = skb->data[0];
3623 void *ptr = &skb->data[1];
3624 s8 rssi;
3625
3626 while (num_reports--) {
3627 struct hci_ev_le_advertising_info *ev = ptr;
3628
3629 rssi = ev->data[ev->length];
3630 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3631 NULL, rssi, 0, 1, ev->data, ev->length);
3632
3633 ptr += sizeof(*ev) + ev->length + 1;
3634 }
3635}
3636
3637static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3638{
3639 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3640 struct hci_cp_le_ltk_reply cp;
3641 struct hci_cp_le_ltk_neg_reply neg;
3642 struct hci_conn *conn;
3643 struct smp_ltk *ltk;
3644
3645 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3646
3647 hci_dev_lock(hdev);
3648
3649 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3650 if (conn == NULL)
3651 goto not_found;
3652
3653 ltk = hci_find_ltk(hdev, ev->ediv, ev->random, conn->out);
3654 if (ltk == NULL)
3655 goto not_found;
3656
3657 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3658 cp.handle = cpu_to_le16(conn->handle);
3659
3660 if (ltk->authenticated)
3661 conn->pending_sec_level = BT_SECURITY_HIGH;
3662 else
3663 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3664
3665 conn->enc_key_size = ltk->enc_size;
3666
3667 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3668
3669 if (ltk->type & HCI_SMP_STK) {
3670 list_del(&ltk->list);
3671 kfree(ltk);
3672 }
3673
3674 hci_dev_unlock(hdev);
3675
3676 return;
3677
3678not_found:
3679 neg.handle = ev->handle;
3680 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3681 hci_dev_unlock(hdev);
3682}
3683
3684static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3685{
3686 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3687
3688 skb_pull(skb, sizeof(*le_ev));
3689
3690 switch (le_ev->subevent) {
3691 case HCI_EV_LE_CONN_COMPLETE:
3692 hci_le_conn_complete_evt(hdev, skb);
3693 break;
3694
3695 case HCI_EV_LE_ADVERTISING_REPORT:
3696 hci_le_adv_report_evt(hdev, skb);
3697 break;
3698
3699 case HCI_EV_LE_LTK_REQ:
3700 hci_le_ltk_request_evt(hdev, skb);
3701 break;
3702
3703 default:
3704 break;
3705 }
3706}
3707
3708static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3709{
3710 struct hci_ev_channel_selected *ev = (void *) skb->data;
3711 struct hci_conn *hcon;
3712
3713 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3714
3715 skb_pull(skb, sizeof(*ev));
3716
3717 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3718 if (!hcon)
3719 return;
3720
3721 amp_read_loc_assoc_final_data(hdev, hcon);
3722}
3723
3724void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3725{
3726 struct hci_event_hdr *hdr = (void *) skb->data;
3727 __u8 event = hdr->evt;
3728
3729 hci_dev_lock(hdev);
3730
3731 /* Received events are (currently) only needed when a request is
3732 * ongoing so avoid unnecessary memory allocation.
3733 */
3734 if (hdev->req_status == HCI_REQ_PEND) {
3735 kfree_skb(hdev->recv_evt);
3736 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3737 }
3738
3739 hci_dev_unlock(hdev);
3740
3741 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3742
3743 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3744 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
3745 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
3746
3747 hci_req_cmd_complete(hdev, opcode, 0);
3748 }
3749
3750 switch (event) {
3751 case HCI_EV_INQUIRY_COMPLETE:
3752 hci_inquiry_complete_evt(hdev, skb);
3753 break;
3754
3755 case HCI_EV_INQUIRY_RESULT:
3756 hci_inquiry_result_evt(hdev, skb);
3757 break;
3758
3759 case HCI_EV_CONN_COMPLETE:
3760 hci_conn_complete_evt(hdev, skb);
3761 break;
3762
3763 case HCI_EV_CONN_REQUEST:
3764 hci_conn_request_evt(hdev, skb);
3765 break;
3766
3767 case HCI_EV_DISCONN_COMPLETE:
3768 hci_disconn_complete_evt(hdev, skb);
3769 break;
3770
3771 case HCI_EV_AUTH_COMPLETE:
3772 hci_auth_complete_evt(hdev, skb);
3773 break;
3774
3775 case HCI_EV_REMOTE_NAME:
3776 hci_remote_name_evt(hdev, skb);
3777 break;
3778
3779 case HCI_EV_ENCRYPT_CHANGE:
3780 hci_encrypt_change_evt(hdev, skb);
3781 break;
3782
3783 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3784 hci_change_link_key_complete_evt(hdev, skb);
3785 break;
3786
3787 case HCI_EV_REMOTE_FEATURES:
3788 hci_remote_features_evt(hdev, skb);
3789 break;
3790
3791 case HCI_EV_CMD_COMPLETE:
3792 hci_cmd_complete_evt(hdev, skb);
3793 break;
3794
3795 case HCI_EV_CMD_STATUS:
3796 hci_cmd_status_evt(hdev, skb);
3797 break;
3798
3799 case HCI_EV_ROLE_CHANGE:
3800 hci_role_change_evt(hdev, skb);
3801 break;
3802
3803 case HCI_EV_NUM_COMP_PKTS:
3804 hci_num_comp_pkts_evt(hdev, skb);
3805 break;
3806
3807 case HCI_EV_MODE_CHANGE:
3808 hci_mode_change_evt(hdev, skb);
3809 break;
3810
3811 case HCI_EV_PIN_CODE_REQ:
3812 hci_pin_code_request_evt(hdev, skb);
3813 break;
3814
3815 case HCI_EV_LINK_KEY_REQ:
3816 hci_link_key_request_evt(hdev, skb);
3817 break;
3818
3819 case HCI_EV_LINK_KEY_NOTIFY:
3820 hci_link_key_notify_evt(hdev, skb);
3821 break;
3822
3823 case HCI_EV_CLOCK_OFFSET:
3824 hci_clock_offset_evt(hdev, skb);
3825 break;
3826
3827 case HCI_EV_PKT_TYPE_CHANGE:
3828 hci_pkt_type_change_evt(hdev, skb);
3829 break;
3830
3831 case HCI_EV_PSCAN_REP_MODE:
3832 hci_pscan_rep_mode_evt(hdev, skb);
3833 break;
3834
3835 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3836 hci_inquiry_result_with_rssi_evt(hdev, skb);
3837 break;
3838
3839 case HCI_EV_REMOTE_EXT_FEATURES:
3840 hci_remote_ext_features_evt(hdev, skb);
3841 break;
3842
3843 case HCI_EV_SYNC_CONN_COMPLETE:
3844 hci_sync_conn_complete_evt(hdev, skb);
3845 break;
3846
3847 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3848 hci_extended_inquiry_result_evt(hdev, skb);
3849 break;
3850
3851 case HCI_EV_KEY_REFRESH_COMPLETE:
3852 hci_key_refresh_complete_evt(hdev, skb);
3853 break;
3854
3855 case HCI_EV_IO_CAPA_REQUEST:
3856 hci_io_capa_request_evt(hdev, skb);
3857 break;
3858
3859 case HCI_EV_IO_CAPA_REPLY:
3860 hci_io_capa_reply_evt(hdev, skb);
3861 break;
3862
3863 case HCI_EV_USER_CONFIRM_REQUEST:
3864 hci_user_confirm_request_evt(hdev, skb);
3865 break;
3866
3867 case HCI_EV_USER_PASSKEY_REQUEST:
3868 hci_user_passkey_request_evt(hdev, skb);
3869 break;
3870
3871 case HCI_EV_USER_PASSKEY_NOTIFY:
3872 hci_user_passkey_notify_evt(hdev, skb);
3873 break;
3874
3875 case HCI_EV_KEYPRESS_NOTIFY:
3876 hci_keypress_notify_evt(hdev, skb);
3877 break;
3878
3879 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3880 hci_simple_pair_complete_evt(hdev, skb);
3881 break;
3882
3883 case HCI_EV_REMOTE_HOST_FEATURES:
3884 hci_remote_host_features_evt(hdev, skb);
3885 break;
3886
3887 case HCI_EV_LE_META:
3888 hci_le_meta_evt(hdev, skb);
3889 break;
3890
3891 case HCI_EV_CHANNEL_SELECTED:
3892 hci_chan_selected_evt(hdev, skb);
3893 break;
3894
3895 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3896 hci_remote_oob_data_request_evt(hdev, skb);
3897 break;
3898
3899 case HCI_EV_PHY_LINK_COMPLETE:
3900 hci_phy_link_complete_evt(hdev, skb);
3901 break;
3902
3903 case HCI_EV_LOGICAL_LINK_COMPLETE:
3904 hci_loglink_complete_evt(hdev, skb);
3905 break;
3906
3907 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3908 hci_disconn_loglink_complete_evt(hdev, skb);
3909 break;
3910
3911 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3912 hci_disconn_phylink_complete_evt(hdev, skb);
3913 break;
3914
3915 case HCI_EV_NUM_COMP_BLOCKS:
3916 hci_num_comp_blocks_evt(hdev, skb);
3917 break;
3918
3919 default:
3920 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3921 break;
3922 }
3923
3924 kfree_skb(skb);
3925 hdev->stat.evt_rx++;
3926}
This page took 0.071213 seconds and 5 git commands to generate.