Bluetooth: Remove hdev->ioctl driver callback
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <asm/unaligned.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/mgmt.h>
32 #include <net/bluetooth/a2mp.h>
33 #include <net/bluetooth/amp.h>
34
35 /* Handle HCI Event packets */
36
37 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
38 {
39 __u8 status = *((__u8 *) skb->data);
40
41 BT_DBG("%s status 0x%2.2x", hdev->name, status);
42
43 if (status)
44 return;
45
46 clear_bit(HCI_INQUIRY, &hdev->flags);
47 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
48 wake_up_bit(&hdev->flags, HCI_INQUIRY);
49
50 hci_conn_check_pending(hdev);
51 }
52
53 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
54 {
55 __u8 status = *((__u8 *) skb->data);
56
57 BT_DBG("%s status 0x%2.2x", hdev->name, status);
58
59 if (status)
60 return;
61
62 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
63 }
64
65 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
66 {
67 __u8 status = *((__u8 *) skb->data);
68
69 BT_DBG("%s status 0x%2.2x", hdev->name, status);
70
71 if (status)
72 return;
73
74 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
75
76 hci_conn_check_pending(hdev);
77 }
78
79 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
80 struct sk_buff *skb)
81 {
82 BT_DBG("%s", hdev->name);
83 }
84
85 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 struct hci_rp_role_discovery *rp = (void *) skb->data;
88 struct hci_conn *conn;
89
90 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
91
92 if (rp->status)
93 return;
94
95 hci_dev_lock(hdev);
96
97 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
98 if (conn) {
99 if (rp->role)
100 conn->link_mode &= ~HCI_LM_MASTER;
101 else
102 conn->link_mode |= HCI_LM_MASTER;
103 }
104
105 hci_dev_unlock(hdev);
106 }
107
108 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
109 {
110 struct hci_rp_read_link_policy *rp = (void *) skb->data;
111 struct hci_conn *conn;
112
113 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
114
115 if (rp->status)
116 return;
117
118 hci_dev_lock(hdev);
119
120 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
121 if (conn)
122 conn->link_policy = __le16_to_cpu(rp->policy);
123
124 hci_dev_unlock(hdev);
125 }
126
127 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
128 {
129 struct hci_rp_write_link_policy *rp = (void *) skb->data;
130 struct hci_conn *conn;
131 void *sent;
132
133 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
134
135 if (rp->status)
136 return;
137
138 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
139 if (!sent)
140 return;
141
142 hci_dev_lock(hdev);
143
144 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
145 if (conn)
146 conn->link_policy = get_unaligned_le16(sent + 2);
147
148 hci_dev_unlock(hdev);
149 }
150
151 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
152 struct sk_buff *skb)
153 {
154 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
155
156 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
157
158 if (rp->status)
159 return;
160
161 hdev->link_policy = __le16_to_cpu(rp->policy);
162 }
163
164 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
165 struct sk_buff *skb)
166 {
167 __u8 status = *((__u8 *) skb->data);
168 void *sent;
169
170 BT_DBG("%s status 0x%2.2x", hdev->name, status);
171
172 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
173 if (!sent)
174 return;
175
176 if (!status)
177 hdev->link_policy = get_unaligned_le16(sent);
178 }
179
180 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
181 {
182 __u8 status = *((__u8 *) skb->data);
183
184 BT_DBG("%s status 0x%2.2x", hdev->name, status);
185
186 clear_bit(HCI_RESET, &hdev->flags);
187
188 /* Reset all non-persistent flags */
189 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
190
191 hdev->discovery.state = DISCOVERY_STOPPED;
192 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
193 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
194
195 memset(hdev->adv_data, 0, sizeof(hdev->adv_data));
196 hdev->adv_data_len = 0;
197 }
198
199 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
200 {
201 __u8 status = *((__u8 *) skb->data);
202 void *sent;
203
204 BT_DBG("%s status 0x%2.2x", hdev->name, status);
205
206 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
207 if (!sent)
208 return;
209
210 hci_dev_lock(hdev);
211
212 if (test_bit(HCI_MGMT, &hdev->dev_flags))
213 mgmt_set_local_name_complete(hdev, sent, status);
214 else if (!status)
215 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
216
217 hci_dev_unlock(hdev);
218 }
219
220 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
221 {
222 struct hci_rp_read_local_name *rp = (void *) skb->data;
223
224 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
225
226 if (rp->status)
227 return;
228
229 if (test_bit(HCI_SETUP, &hdev->dev_flags))
230 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
231 }
232
233 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
234 {
235 __u8 status = *((__u8 *) skb->data);
236 void *sent;
237
238 BT_DBG("%s status 0x%2.2x", hdev->name, status);
239
240 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
241 if (!sent)
242 return;
243
244 if (!status) {
245 __u8 param = *((__u8 *) sent);
246
247 if (param == AUTH_ENABLED)
248 set_bit(HCI_AUTH, &hdev->flags);
249 else
250 clear_bit(HCI_AUTH, &hdev->flags);
251 }
252
253 if (test_bit(HCI_MGMT, &hdev->dev_flags))
254 mgmt_auth_enable_complete(hdev, status);
255 }
256
257 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
258 {
259 __u8 status = *((__u8 *) skb->data);
260 void *sent;
261
262 BT_DBG("%s status 0x%2.2x", hdev->name, status);
263
264 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
265 if (!sent)
266 return;
267
268 if (!status) {
269 __u8 param = *((__u8 *) sent);
270
271 if (param)
272 set_bit(HCI_ENCRYPT, &hdev->flags);
273 else
274 clear_bit(HCI_ENCRYPT, &hdev->flags);
275 }
276 }
277
278 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
279 {
280 __u8 param, status = *((__u8 *) skb->data);
281 int old_pscan, old_iscan;
282 void *sent;
283
284 BT_DBG("%s status 0x%2.2x", hdev->name, status);
285
286 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
287 if (!sent)
288 return;
289
290 param = *((__u8 *) sent);
291
292 hci_dev_lock(hdev);
293
294 if (status) {
295 mgmt_write_scan_failed(hdev, param, status);
296 hdev->discov_timeout = 0;
297 goto done;
298 }
299
300 /* We need to ensure that we set this back on if someone changed
301 * the scan mode through a raw HCI socket.
302 */
303 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
304
305 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
306 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
307
308 if (param & SCAN_INQUIRY) {
309 set_bit(HCI_ISCAN, &hdev->flags);
310 if (!old_iscan)
311 mgmt_discoverable(hdev, 1);
312 if (hdev->discov_timeout > 0) {
313 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
314 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
315 to);
316 }
317 } else if (old_iscan)
318 mgmt_discoverable(hdev, 0);
319
320 if (param & SCAN_PAGE) {
321 set_bit(HCI_PSCAN, &hdev->flags);
322 if (!old_pscan)
323 mgmt_connectable(hdev, 1);
324 } else if (old_pscan)
325 mgmt_connectable(hdev, 0);
326
327 done:
328 hci_dev_unlock(hdev);
329 }
330
331 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
332 {
333 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
334
335 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
336
337 if (rp->status)
338 return;
339
340 memcpy(hdev->dev_class, rp->dev_class, 3);
341
342 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
343 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
344 }
345
346 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
347 {
348 __u8 status = *((__u8 *) skb->data);
349 void *sent;
350
351 BT_DBG("%s status 0x%2.2x", hdev->name, status);
352
353 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
354 if (!sent)
355 return;
356
357 hci_dev_lock(hdev);
358
359 if (status == 0)
360 memcpy(hdev->dev_class, sent, 3);
361
362 if (test_bit(HCI_MGMT, &hdev->dev_flags))
363 mgmt_set_class_of_dev_complete(hdev, sent, status);
364
365 hci_dev_unlock(hdev);
366 }
367
368 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
369 {
370 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
371 __u16 setting;
372
373 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
374
375 if (rp->status)
376 return;
377
378 setting = __le16_to_cpu(rp->voice_setting);
379
380 if (hdev->voice_setting == setting)
381 return;
382
383 hdev->voice_setting = setting;
384
385 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
386
387 if (hdev->notify)
388 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
389 }
390
391 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
392 struct sk_buff *skb)
393 {
394 __u8 status = *((__u8 *) skb->data);
395 __u16 setting;
396 void *sent;
397
398 BT_DBG("%s status 0x%2.2x", hdev->name, status);
399
400 if (status)
401 return;
402
403 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
404 if (!sent)
405 return;
406
407 setting = get_unaligned_le16(sent);
408
409 if (hdev->voice_setting == setting)
410 return;
411
412 hdev->voice_setting = setting;
413
414 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
415
416 if (hdev->notify)
417 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
418 }
419
420 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
421 {
422 __u8 status = *((__u8 *) skb->data);
423 struct hci_cp_write_ssp_mode *sent;
424
425 BT_DBG("%s status 0x%2.2x", hdev->name, status);
426
427 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
428 if (!sent)
429 return;
430
431 if (!status) {
432 if (sent->mode)
433 hdev->features[1][0] |= LMP_HOST_SSP;
434 else
435 hdev->features[1][0] &= ~LMP_HOST_SSP;
436 }
437
438 if (test_bit(HCI_MGMT, &hdev->dev_flags))
439 mgmt_ssp_enable_complete(hdev, sent->mode, status);
440 else if (!status) {
441 if (sent->mode)
442 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
443 else
444 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
445 }
446 }
447
448 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
449 {
450 struct hci_rp_read_local_version *rp = (void *) skb->data;
451
452 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
453
454 if (rp->status)
455 return;
456
457 hdev->hci_ver = rp->hci_ver;
458 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
459 hdev->lmp_ver = rp->lmp_ver;
460 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
461 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
462
463 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
464 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
465 }
466
467 static void hci_cc_read_local_commands(struct hci_dev *hdev,
468 struct sk_buff *skb)
469 {
470 struct hci_rp_read_local_commands *rp = (void *) skb->data;
471
472 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
473
474 if (!rp->status)
475 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
476 }
477
478 static void hci_cc_read_local_features(struct hci_dev *hdev,
479 struct sk_buff *skb)
480 {
481 struct hci_rp_read_local_features *rp = (void *) skb->data;
482
483 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
484
485 if (rp->status)
486 return;
487
488 memcpy(hdev->features, rp->features, 8);
489
490 /* Adjust default settings according to features
491 * supported by device. */
492
493 if (hdev->features[0][0] & LMP_3SLOT)
494 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
495
496 if (hdev->features[0][0] & LMP_5SLOT)
497 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
498
499 if (hdev->features[0][1] & LMP_HV2) {
500 hdev->pkt_type |= (HCI_HV2);
501 hdev->esco_type |= (ESCO_HV2);
502 }
503
504 if (hdev->features[0][1] & LMP_HV3) {
505 hdev->pkt_type |= (HCI_HV3);
506 hdev->esco_type |= (ESCO_HV3);
507 }
508
509 if (lmp_esco_capable(hdev))
510 hdev->esco_type |= (ESCO_EV3);
511
512 if (hdev->features[0][4] & LMP_EV4)
513 hdev->esco_type |= (ESCO_EV4);
514
515 if (hdev->features[0][4] & LMP_EV5)
516 hdev->esco_type |= (ESCO_EV5);
517
518 if (hdev->features[0][5] & LMP_EDR_ESCO_2M)
519 hdev->esco_type |= (ESCO_2EV3);
520
521 if (hdev->features[0][5] & LMP_EDR_ESCO_3M)
522 hdev->esco_type |= (ESCO_3EV3);
523
524 if (hdev->features[0][5] & LMP_EDR_3S_ESCO)
525 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
526
527 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
528 hdev->features[0][0], hdev->features[0][1],
529 hdev->features[0][2], hdev->features[0][3],
530 hdev->features[0][4], hdev->features[0][5],
531 hdev->features[0][6], hdev->features[0][7]);
532 }
533
534 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
535 struct sk_buff *skb)
536 {
537 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
538
539 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
540
541 if (rp->status)
542 return;
543
544 hdev->max_page = rp->max_page;
545
546 if (rp->page < HCI_MAX_PAGES)
547 memcpy(hdev->features[rp->page], rp->features, 8);
548 }
549
550 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
551 struct sk_buff *skb)
552 {
553 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
554
555 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
556
557 if (!rp->status)
558 hdev->flow_ctl_mode = rp->mode;
559 }
560
561 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
562 {
563 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
564
565 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
566
567 if (rp->status)
568 return;
569
570 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
571 hdev->sco_mtu = rp->sco_mtu;
572 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
573 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
574
575 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
576 hdev->sco_mtu = 64;
577 hdev->sco_pkts = 8;
578 }
579
580 hdev->acl_cnt = hdev->acl_pkts;
581 hdev->sco_cnt = hdev->sco_pkts;
582
583 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
584 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
585 }
586
587 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
588 {
589 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
590
591 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
592
593 if (!rp->status)
594 bacpy(&hdev->bdaddr, &rp->bdaddr);
595 }
596
597 static void hci_cc_read_page_scan_activity(struct hci_dev *hdev,
598 struct sk_buff *skb)
599 {
600 struct hci_rp_read_page_scan_activity *rp = (void *) skb->data;
601
602 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
603
604 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status) {
605 hdev->page_scan_interval = __le16_to_cpu(rp->interval);
606 hdev->page_scan_window = __le16_to_cpu(rp->window);
607 }
608 }
609
610 static void hci_cc_write_page_scan_activity(struct hci_dev *hdev,
611 struct sk_buff *skb)
612 {
613 u8 status = *((u8 *) skb->data);
614 struct hci_cp_write_page_scan_activity *sent;
615
616 BT_DBG("%s status 0x%2.2x", hdev->name, status);
617
618 if (status)
619 return;
620
621 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY);
622 if (!sent)
623 return;
624
625 hdev->page_scan_interval = __le16_to_cpu(sent->interval);
626 hdev->page_scan_window = __le16_to_cpu(sent->window);
627 }
628
629 static void hci_cc_read_page_scan_type(struct hci_dev *hdev,
630 struct sk_buff *skb)
631 {
632 struct hci_rp_read_page_scan_type *rp = (void *) skb->data;
633
634 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
635
636 if (test_bit(HCI_INIT, &hdev->flags) && !rp->status)
637 hdev->page_scan_type = rp->type;
638 }
639
640 static void hci_cc_write_page_scan_type(struct hci_dev *hdev,
641 struct sk_buff *skb)
642 {
643 u8 status = *((u8 *) skb->data);
644 u8 *type;
645
646 BT_DBG("%s status 0x%2.2x", hdev->name, status);
647
648 if (status)
649 return;
650
651 type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE);
652 if (type)
653 hdev->page_scan_type = *type;
654 }
655
656 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
657 struct sk_buff *skb)
658 {
659 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
660
661 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
662
663 if (rp->status)
664 return;
665
666 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
667 hdev->block_len = __le16_to_cpu(rp->block_len);
668 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
669
670 hdev->block_cnt = hdev->num_blocks;
671
672 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
673 hdev->block_cnt, hdev->block_len);
674 }
675
676 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
677 struct sk_buff *skb)
678 {
679 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
680
681 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
682
683 if (rp->status)
684 goto a2mp_rsp;
685
686 hdev->amp_status = rp->amp_status;
687 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
688 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
689 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
690 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
691 hdev->amp_type = rp->amp_type;
692 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
693 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
694 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
695 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
696
697 a2mp_rsp:
698 a2mp_send_getinfo_rsp(hdev);
699 }
700
701 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
702 struct sk_buff *skb)
703 {
704 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
705 struct amp_assoc *assoc = &hdev->loc_assoc;
706 size_t rem_len, frag_len;
707
708 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
709
710 if (rp->status)
711 goto a2mp_rsp;
712
713 frag_len = skb->len - sizeof(*rp);
714 rem_len = __le16_to_cpu(rp->rem_len);
715
716 if (rem_len > frag_len) {
717 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
718
719 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
720 assoc->offset += frag_len;
721
722 /* Read other fragments */
723 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
724
725 return;
726 }
727
728 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
729 assoc->len = assoc->offset + rem_len;
730 assoc->offset = 0;
731
732 a2mp_rsp:
733 /* Send A2MP Rsp when all fragments are received */
734 a2mp_send_getampassoc_rsp(hdev, rp->status);
735 a2mp_send_create_phy_link_req(hdev, rp->status);
736 }
737
738 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
739 struct sk_buff *skb)
740 {
741 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
742
743 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
744
745 if (!rp->status)
746 hdev->inq_tx_power = rp->tx_power;
747 }
748
749 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
750 {
751 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
752 struct hci_cp_pin_code_reply *cp;
753 struct hci_conn *conn;
754
755 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
756
757 hci_dev_lock(hdev);
758
759 if (test_bit(HCI_MGMT, &hdev->dev_flags))
760 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
761
762 if (rp->status)
763 goto unlock;
764
765 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
766 if (!cp)
767 goto unlock;
768
769 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
770 if (conn)
771 conn->pin_length = cp->pin_len;
772
773 unlock:
774 hci_dev_unlock(hdev);
775 }
776
777 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
778 {
779 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
780
781 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
782
783 hci_dev_lock(hdev);
784
785 if (test_bit(HCI_MGMT, &hdev->dev_flags))
786 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
787 rp->status);
788
789 hci_dev_unlock(hdev);
790 }
791
792 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
793 struct sk_buff *skb)
794 {
795 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
796
797 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
798
799 if (rp->status)
800 return;
801
802 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
803 hdev->le_pkts = rp->le_max_pkt;
804
805 hdev->le_cnt = hdev->le_pkts;
806
807 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
808 }
809
810 static void hci_cc_le_read_local_features(struct hci_dev *hdev,
811 struct sk_buff *skb)
812 {
813 struct hci_rp_le_read_local_features *rp = (void *) skb->data;
814
815 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
816
817 if (!rp->status)
818 memcpy(hdev->le_features, rp->features, 8);
819 }
820
821 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
822 struct sk_buff *skb)
823 {
824 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
825
826 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
827
828 if (!rp->status)
829 hdev->adv_tx_power = rp->tx_power;
830 }
831
832 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
833 {
834 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
835
836 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
837
838 hci_dev_lock(hdev);
839
840 if (test_bit(HCI_MGMT, &hdev->dev_flags))
841 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
842 rp->status);
843
844 hci_dev_unlock(hdev);
845 }
846
847 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
848 struct sk_buff *skb)
849 {
850 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
851
852 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
853
854 hci_dev_lock(hdev);
855
856 if (test_bit(HCI_MGMT, &hdev->dev_flags))
857 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
858 ACL_LINK, 0, rp->status);
859
860 hci_dev_unlock(hdev);
861 }
862
863 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
864 {
865 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
866
867 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
868
869 hci_dev_lock(hdev);
870
871 if (test_bit(HCI_MGMT, &hdev->dev_flags))
872 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
873 0, rp->status);
874
875 hci_dev_unlock(hdev);
876 }
877
878 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
879 struct sk_buff *skb)
880 {
881 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
882
883 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
884
885 hci_dev_lock(hdev);
886
887 if (test_bit(HCI_MGMT, &hdev->dev_flags))
888 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
889 ACL_LINK, 0, rp->status);
890
891 hci_dev_unlock(hdev);
892 }
893
894 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
895 struct sk_buff *skb)
896 {
897 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
898
899 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
900
901 hci_dev_lock(hdev);
902 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
903 rp->randomizer, rp->status);
904 hci_dev_unlock(hdev);
905 }
906
907 static void hci_cc_le_set_adv_enable(struct hci_dev *hdev, struct sk_buff *skb)
908 {
909 __u8 *sent, status = *((__u8 *) skb->data);
910
911 BT_DBG("%s status 0x%2.2x", hdev->name, status);
912
913 sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE);
914 if (!sent)
915 return;
916
917 hci_dev_lock(hdev);
918
919 if (!status) {
920 if (*sent)
921 set_bit(HCI_ADVERTISING, &hdev->dev_flags);
922 else
923 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
924 }
925
926 if (!test_bit(HCI_INIT, &hdev->flags)) {
927 struct hci_request req;
928
929 hci_req_init(&req, hdev);
930 hci_update_ad(&req);
931 hci_req_run(&req, NULL);
932 }
933
934 hci_dev_unlock(hdev);
935 }
936
937 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
938 struct sk_buff *skb)
939 {
940 struct hci_cp_le_set_scan_enable *cp;
941 __u8 status = *((__u8 *) skb->data);
942
943 BT_DBG("%s status 0x%2.2x", hdev->name, status);
944
945 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
946 if (!cp)
947 return;
948
949 if (status)
950 return;
951
952 switch (cp->enable) {
953 case LE_SCAN_ENABLE:
954 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
955 break;
956
957 case LE_SCAN_DISABLE:
958 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
959 break;
960
961 default:
962 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
963 break;
964 }
965 }
966
967 static void hci_cc_le_read_white_list_size(struct hci_dev *hdev,
968 struct sk_buff *skb)
969 {
970 struct hci_rp_le_read_white_list_size *rp = (void *) skb->data;
971
972 BT_DBG("%s status 0x%2.2x size %u", hdev->name, rp->status, rp->size);
973
974 if (!rp->status)
975 hdev->le_white_list_size = rp->size;
976 }
977
978 static void hci_cc_le_read_supported_states(struct hci_dev *hdev,
979 struct sk_buff *skb)
980 {
981 struct hci_rp_le_read_supported_states *rp = (void *) skb->data;
982
983 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
984
985 if (!rp->status)
986 memcpy(hdev->le_states, rp->le_states, 8);
987 }
988
989 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
990 struct sk_buff *skb)
991 {
992 struct hci_cp_write_le_host_supported *sent;
993 __u8 status = *((__u8 *) skb->data);
994
995 BT_DBG("%s status 0x%2.2x", hdev->name, status);
996
997 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
998 if (!sent)
999 return;
1000
1001 if (!status) {
1002 if (sent->le) {
1003 hdev->features[1][0] |= LMP_HOST_LE;
1004 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1005 } else {
1006 hdev->features[1][0] &= ~LMP_HOST_LE;
1007 clear_bit(HCI_LE_ENABLED, &hdev->dev_flags);
1008 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
1009 }
1010
1011 if (sent->simul)
1012 hdev->features[1][0] |= LMP_HOST_LE_BREDR;
1013 else
1014 hdev->features[1][0] &= ~LMP_HOST_LE_BREDR;
1015 }
1016 }
1017
1018 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1019 struct sk_buff *skb)
1020 {
1021 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1022
1023 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1024 hdev->name, rp->status, rp->phy_handle);
1025
1026 if (rp->status)
1027 return;
1028
1029 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1030 }
1031
1032 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1033 {
1034 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1035
1036 if (status) {
1037 hci_conn_check_pending(hdev);
1038 return;
1039 }
1040
1041 set_bit(HCI_INQUIRY, &hdev->flags);
1042 }
1043
1044 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1045 {
1046 struct hci_cp_create_conn *cp;
1047 struct hci_conn *conn;
1048
1049 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1050
1051 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1052 if (!cp)
1053 return;
1054
1055 hci_dev_lock(hdev);
1056
1057 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1058
1059 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1060
1061 if (status) {
1062 if (conn && conn->state == BT_CONNECT) {
1063 if (status != 0x0c || conn->attempt > 2) {
1064 conn->state = BT_CLOSED;
1065 hci_proto_connect_cfm(conn, status);
1066 hci_conn_del(conn);
1067 } else
1068 conn->state = BT_CONNECT2;
1069 }
1070 } else {
1071 if (!conn) {
1072 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1073 if (conn) {
1074 conn->out = true;
1075 conn->link_mode |= HCI_LM_MASTER;
1076 } else
1077 BT_ERR("No memory for new connection");
1078 }
1079 }
1080
1081 hci_dev_unlock(hdev);
1082 }
1083
1084 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1085 {
1086 struct hci_cp_add_sco *cp;
1087 struct hci_conn *acl, *sco;
1088 __u16 handle;
1089
1090 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1091
1092 if (!status)
1093 return;
1094
1095 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1096 if (!cp)
1097 return;
1098
1099 handle = __le16_to_cpu(cp->handle);
1100
1101 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1102
1103 hci_dev_lock(hdev);
1104
1105 acl = hci_conn_hash_lookup_handle(hdev, handle);
1106 if (acl) {
1107 sco = acl->link;
1108 if (sco) {
1109 sco->state = BT_CLOSED;
1110
1111 hci_proto_connect_cfm(sco, status);
1112 hci_conn_del(sco);
1113 }
1114 }
1115
1116 hci_dev_unlock(hdev);
1117 }
1118
1119 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1120 {
1121 struct hci_cp_auth_requested *cp;
1122 struct hci_conn *conn;
1123
1124 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1125
1126 if (!status)
1127 return;
1128
1129 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1130 if (!cp)
1131 return;
1132
1133 hci_dev_lock(hdev);
1134
1135 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1136 if (conn) {
1137 if (conn->state == BT_CONFIG) {
1138 hci_proto_connect_cfm(conn, status);
1139 hci_conn_drop(conn);
1140 }
1141 }
1142
1143 hci_dev_unlock(hdev);
1144 }
1145
1146 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1147 {
1148 struct hci_cp_set_conn_encrypt *cp;
1149 struct hci_conn *conn;
1150
1151 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1152
1153 if (!status)
1154 return;
1155
1156 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1157 if (!cp)
1158 return;
1159
1160 hci_dev_lock(hdev);
1161
1162 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1163 if (conn) {
1164 if (conn->state == BT_CONFIG) {
1165 hci_proto_connect_cfm(conn, status);
1166 hci_conn_drop(conn);
1167 }
1168 }
1169
1170 hci_dev_unlock(hdev);
1171 }
1172
1173 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1174 struct hci_conn *conn)
1175 {
1176 if (conn->state != BT_CONFIG || !conn->out)
1177 return 0;
1178
1179 if (conn->pending_sec_level == BT_SECURITY_SDP)
1180 return 0;
1181
1182 /* Only request authentication for SSP connections or non-SSP
1183 * devices with sec_level HIGH or if MITM protection is requested */
1184 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1185 conn->pending_sec_level != BT_SECURITY_HIGH)
1186 return 0;
1187
1188 return 1;
1189 }
1190
1191 static int hci_resolve_name(struct hci_dev *hdev,
1192 struct inquiry_entry *e)
1193 {
1194 struct hci_cp_remote_name_req cp;
1195
1196 memset(&cp, 0, sizeof(cp));
1197
1198 bacpy(&cp.bdaddr, &e->data.bdaddr);
1199 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1200 cp.pscan_mode = e->data.pscan_mode;
1201 cp.clock_offset = e->data.clock_offset;
1202
1203 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1204 }
1205
1206 static bool hci_resolve_next_name(struct hci_dev *hdev)
1207 {
1208 struct discovery_state *discov = &hdev->discovery;
1209 struct inquiry_entry *e;
1210
1211 if (list_empty(&discov->resolve))
1212 return false;
1213
1214 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1215 if (!e)
1216 return false;
1217
1218 if (hci_resolve_name(hdev, e) == 0) {
1219 e->name_state = NAME_PENDING;
1220 return true;
1221 }
1222
1223 return false;
1224 }
1225
1226 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1227 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1228 {
1229 struct discovery_state *discov = &hdev->discovery;
1230 struct inquiry_entry *e;
1231
1232 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1233 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1234 name_len, conn->dev_class);
1235
1236 if (discov->state == DISCOVERY_STOPPED)
1237 return;
1238
1239 if (discov->state == DISCOVERY_STOPPING)
1240 goto discov_complete;
1241
1242 if (discov->state != DISCOVERY_RESOLVING)
1243 return;
1244
1245 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1246 /* If the device was not found in a list of found devices names of which
1247 * are pending. there is no need to continue resolving a next name as it
1248 * will be done upon receiving another Remote Name Request Complete
1249 * Event */
1250 if (!e)
1251 return;
1252
1253 list_del(&e->list);
1254 if (name) {
1255 e->name_state = NAME_KNOWN;
1256 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1257 e->data.rssi, name, name_len);
1258 } else {
1259 e->name_state = NAME_NOT_KNOWN;
1260 }
1261
1262 if (hci_resolve_next_name(hdev))
1263 return;
1264
1265 discov_complete:
1266 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1267 }
1268
1269 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1270 {
1271 struct hci_cp_remote_name_req *cp;
1272 struct hci_conn *conn;
1273
1274 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1275
1276 /* If successful wait for the name req complete event before
1277 * checking for the need to do authentication */
1278 if (!status)
1279 return;
1280
1281 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1282 if (!cp)
1283 return;
1284
1285 hci_dev_lock(hdev);
1286
1287 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1288
1289 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1290 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1291
1292 if (!conn)
1293 goto unlock;
1294
1295 if (!hci_outgoing_auth_needed(hdev, conn))
1296 goto unlock;
1297
1298 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1299 struct hci_cp_auth_requested auth_cp;
1300
1301 auth_cp.handle = __cpu_to_le16(conn->handle);
1302 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED,
1303 sizeof(auth_cp), &auth_cp);
1304 }
1305
1306 unlock:
1307 hci_dev_unlock(hdev);
1308 }
1309
1310 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1311 {
1312 struct hci_cp_read_remote_features *cp;
1313 struct hci_conn *conn;
1314
1315 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1316
1317 if (!status)
1318 return;
1319
1320 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1321 if (!cp)
1322 return;
1323
1324 hci_dev_lock(hdev);
1325
1326 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1327 if (conn) {
1328 if (conn->state == BT_CONFIG) {
1329 hci_proto_connect_cfm(conn, status);
1330 hci_conn_drop(conn);
1331 }
1332 }
1333
1334 hci_dev_unlock(hdev);
1335 }
1336
1337 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1338 {
1339 struct hci_cp_read_remote_ext_features *cp;
1340 struct hci_conn *conn;
1341
1342 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1343
1344 if (!status)
1345 return;
1346
1347 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1348 if (!cp)
1349 return;
1350
1351 hci_dev_lock(hdev);
1352
1353 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1354 if (conn) {
1355 if (conn->state == BT_CONFIG) {
1356 hci_proto_connect_cfm(conn, status);
1357 hci_conn_drop(conn);
1358 }
1359 }
1360
1361 hci_dev_unlock(hdev);
1362 }
1363
1364 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1365 {
1366 struct hci_cp_setup_sync_conn *cp;
1367 struct hci_conn *acl, *sco;
1368 __u16 handle;
1369
1370 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1371
1372 if (!status)
1373 return;
1374
1375 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1376 if (!cp)
1377 return;
1378
1379 handle = __le16_to_cpu(cp->handle);
1380
1381 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1382
1383 hci_dev_lock(hdev);
1384
1385 acl = hci_conn_hash_lookup_handle(hdev, handle);
1386 if (acl) {
1387 sco = acl->link;
1388 if (sco) {
1389 sco->state = BT_CLOSED;
1390
1391 hci_proto_connect_cfm(sco, status);
1392 hci_conn_del(sco);
1393 }
1394 }
1395
1396 hci_dev_unlock(hdev);
1397 }
1398
1399 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1400 {
1401 struct hci_cp_sniff_mode *cp;
1402 struct hci_conn *conn;
1403
1404 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1405
1406 if (!status)
1407 return;
1408
1409 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1410 if (!cp)
1411 return;
1412
1413 hci_dev_lock(hdev);
1414
1415 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1416 if (conn) {
1417 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1418
1419 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1420 hci_sco_setup(conn, status);
1421 }
1422
1423 hci_dev_unlock(hdev);
1424 }
1425
1426 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1427 {
1428 struct hci_cp_exit_sniff_mode *cp;
1429 struct hci_conn *conn;
1430
1431 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1432
1433 if (!status)
1434 return;
1435
1436 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1437 if (!cp)
1438 return;
1439
1440 hci_dev_lock(hdev);
1441
1442 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1443 if (conn) {
1444 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1445
1446 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1447 hci_sco_setup(conn, status);
1448 }
1449
1450 hci_dev_unlock(hdev);
1451 }
1452
1453 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1454 {
1455 struct hci_cp_disconnect *cp;
1456 struct hci_conn *conn;
1457
1458 if (!status)
1459 return;
1460
1461 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1462 if (!cp)
1463 return;
1464
1465 hci_dev_lock(hdev);
1466
1467 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1468 if (conn)
1469 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1470 conn->dst_type, status);
1471
1472 hci_dev_unlock(hdev);
1473 }
1474
1475 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1476 {
1477 struct hci_cp_create_phy_link *cp;
1478
1479 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1480
1481 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1482 if (!cp)
1483 return;
1484
1485 hci_dev_lock(hdev);
1486
1487 if (status) {
1488 struct hci_conn *hcon;
1489
1490 hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle);
1491 if (hcon)
1492 hci_conn_del(hcon);
1493 } else {
1494 amp_write_remote_assoc(hdev, cp->phy_handle);
1495 }
1496
1497 hci_dev_unlock(hdev);
1498 }
1499
1500 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1501 {
1502 struct hci_cp_accept_phy_link *cp;
1503
1504 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1505
1506 if (status)
1507 return;
1508
1509 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1510 if (!cp)
1511 return;
1512
1513 amp_write_remote_assoc(hdev, cp->phy_handle);
1514 }
1515
1516 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1517 {
1518 __u8 status = *((__u8 *) skb->data);
1519 struct discovery_state *discov = &hdev->discovery;
1520 struct inquiry_entry *e;
1521
1522 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1523
1524 hci_conn_check_pending(hdev);
1525
1526 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1527 return;
1528
1529 smp_mb__after_clear_bit(); /* wake_up_bit advises about this barrier */
1530 wake_up_bit(&hdev->flags, HCI_INQUIRY);
1531
1532 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1533 return;
1534
1535 hci_dev_lock(hdev);
1536
1537 if (discov->state != DISCOVERY_FINDING)
1538 goto unlock;
1539
1540 if (list_empty(&discov->resolve)) {
1541 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1542 goto unlock;
1543 }
1544
1545 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1546 if (e && hci_resolve_name(hdev, e) == 0) {
1547 e->name_state = NAME_PENDING;
1548 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1549 } else {
1550 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1551 }
1552
1553 unlock:
1554 hci_dev_unlock(hdev);
1555 }
1556
1557 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1558 {
1559 struct inquiry_data data;
1560 struct inquiry_info *info = (void *) (skb->data + 1);
1561 int num_rsp = *((__u8 *) skb->data);
1562
1563 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1564
1565 if (!num_rsp)
1566 return;
1567
1568 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1569 return;
1570
1571 hci_dev_lock(hdev);
1572
1573 for (; num_rsp; num_rsp--, info++) {
1574 bool name_known, ssp;
1575
1576 bacpy(&data.bdaddr, &info->bdaddr);
1577 data.pscan_rep_mode = info->pscan_rep_mode;
1578 data.pscan_period_mode = info->pscan_period_mode;
1579 data.pscan_mode = info->pscan_mode;
1580 memcpy(data.dev_class, info->dev_class, 3);
1581 data.clock_offset = info->clock_offset;
1582 data.rssi = 0x00;
1583 data.ssp_mode = 0x00;
1584
1585 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1586 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1587 info->dev_class, 0, !name_known, ssp, NULL,
1588 0);
1589 }
1590
1591 hci_dev_unlock(hdev);
1592 }
1593
1594 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1595 {
1596 struct hci_ev_conn_complete *ev = (void *) skb->data;
1597 struct hci_conn *conn;
1598
1599 BT_DBG("%s", hdev->name);
1600
1601 hci_dev_lock(hdev);
1602
1603 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1604 if (!conn) {
1605 if (ev->link_type != SCO_LINK)
1606 goto unlock;
1607
1608 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1609 if (!conn)
1610 goto unlock;
1611
1612 conn->type = SCO_LINK;
1613 }
1614
1615 if (!ev->status) {
1616 conn->handle = __le16_to_cpu(ev->handle);
1617
1618 if (conn->type == ACL_LINK) {
1619 conn->state = BT_CONFIG;
1620 hci_conn_hold(conn);
1621
1622 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1623 !hci_find_link_key(hdev, &ev->bdaddr))
1624 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1625 else
1626 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1627 } else
1628 conn->state = BT_CONNECTED;
1629
1630 hci_conn_add_sysfs(conn);
1631
1632 if (test_bit(HCI_AUTH, &hdev->flags))
1633 conn->link_mode |= HCI_LM_AUTH;
1634
1635 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1636 conn->link_mode |= HCI_LM_ENCRYPT;
1637
1638 /* Get remote features */
1639 if (conn->type == ACL_LINK) {
1640 struct hci_cp_read_remote_features cp;
1641 cp.handle = ev->handle;
1642 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1643 sizeof(cp), &cp);
1644 }
1645
1646 /* Set packet type for incoming connection */
1647 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1648 struct hci_cp_change_conn_ptype cp;
1649 cp.handle = ev->handle;
1650 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1651 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1652 &cp);
1653 }
1654 } else {
1655 conn->state = BT_CLOSED;
1656 if (conn->type == ACL_LINK)
1657 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1658 conn->dst_type, ev->status);
1659 }
1660
1661 if (conn->type == ACL_LINK)
1662 hci_sco_setup(conn, ev->status);
1663
1664 if (ev->status) {
1665 hci_proto_connect_cfm(conn, ev->status);
1666 hci_conn_del(conn);
1667 } else if (ev->link_type != ACL_LINK)
1668 hci_proto_connect_cfm(conn, ev->status);
1669
1670 unlock:
1671 hci_dev_unlock(hdev);
1672
1673 hci_conn_check_pending(hdev);
1674 }
1675
1676 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1677 {
1678 struct hci_ev_conn_request *ev = (void *) skb->data;
1679 int mask = hdev->link_mode;
1680 __u8 flags = 0;
1681
1682 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1683 ev->link_type);
1684
1685 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type,
1686 &flags);
1687
1688 if ((mask & HCI_LM_ACCEPT) &&
1689 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1690 /* Connection accepted */
1691 struct inquiry_entry *ie;
1692 struct hci_conn *conn;
1693
1694 hci_dev_lock(hdev);
1695
1696 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1697 if (ie)
1698 memcpy(ie->data.dev_class, ev->dev_class, 3);
1699
1700 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1701 &ev->bdaddr);
1702 if (!conn) {
1703 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1704 if (!conn) {
1705 BT_ERR("No memory for new connection");
1706 hci_dev_unlock(hdev);
1707 return;
1708 }
1709 }
1710
1711 memcpy(conn->dev_class, ev->dev_class, 3);
1712
1713 hci_dev_unlock(hdev);
1714
1715 if (ev->link_type == ACL_LINK ||
1716 (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) {
1717 struct hci_cp_accept_conn_req cp;
1718 conn->state = BT_CONNECT;
1719
1720 bacpy(&cp.bdaddr, &ev->bdaddr);
1721
1722 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1723 cp.role = 0x00; /* Become master */
1724 else
1725 cp.role = 0x01; /* Remain slave */
1726
1727 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1728 &cp);
1729 } else if (!(flags & HCI_PROTO_DEFER)) {
1730 struct hci_cp_accept_sync_conn_req cp;
1731 conn->state = BT_CONNECT;
1732
1733 bacpy(&cp.bdaddr, &ev->bdaddr);
1734 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1735
1736 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1737 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1738 cp.max_latency = __constant_cpu_to_le16(0xffff);
1739 cp.content_format = cpu_to_le16(hdev->voice_setting);
1740 cp.retrans_effort = 0xff;
1741
1742 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1743 sizeof(cp), &cp);
1744 } else {
1745 conn->state = BT_CONNECT2;
1746 hci_proto_connect_cfm(conn, 0);
1747 }
1748 } else {
1749 /* Connection rejected */
1750 struct hci_cp_reject_conn_req cp;
1751
1752 bacpy(&cp.bdaddr, &ev->bdaddr);
1753 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1754 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1755 }
1756 }
1757
1758 static u8 hci_to_mgmt_reason(u8 err)
1759 {
1760 switch (err) {
1761 case HCI_ERROR_CONNECTION_TIMEOUT:
1762 return MGMT_DEV_DISCONN_TIMEOUT;
1763 case HCI_ERROR_REMOTE_USER_TERM:
1764 case HCI_ERROR_REMOTE_LOW_RESOURCES:
1765 case HCI_ERROR_REMOTE_POWER_OFF:
1766 return MGMT_DEV_DISCONN_REMOTE;
1767 case HCI_ERROR_LOCAL_HOST_TERM:
1768 return MGMT_DEV_DISCONN_LOCAL_HOST;
1769 default:
1770 return MGMT_DEV_DISCONN_UNKNOWN;
1771 }
1772 }
1773
1774 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1775 {
1776 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1777 struct hci_conn *conn;
1778
1779 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1780
1781 hci_dev_lock(hdev);
1782
1783 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1784 if (!conn)
1785 goto unlock;
1786
1787 if (ev->status == 0)
1788 conn->state = BT_CLOSED;
1789
1790 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1791 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1792 if (ev->status) {
1793 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1794 conn->dst_type, ev->status);
1795 } else {
1796 u8 reason = hci_to_mgmt_reason(ev->reason);
1797
1798 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1799 conn->dst_type, reason);
1800 }
1801 }
1802
1803 if (ev->status == 0) {
1804 u8 type = conn->type;
1805
1806 if (type == ACL_LINK && conn->flush_key)
1807 hci_remove_link_key(hdev, &conn->dst);
1808 hci_proto_disconn_cfm(conn, ev->reason);
1809 hci_conn_del(conn);
1810
1811 /* Re-enable advertising if necessary, since it might
1812 * have been disabled by the connection. From the
1813 * HCI_LE_Set_Advertise_Enable command description in
1814 * the core specification (v4.0):
1815 * "The Controller shall continue advertising until the Host
1816 * issues an LE_Set_Advertise_Enable command with
1817 * Advertising_Enable set to 0x00 (Advertising is disabled)
1818 * or until a connection is created or until the Advertising
1819 * is timed out due to Directed Advertising."
1820 */
1821 if (type == LE_LINK)
1822 mgmt_reenable_advertising(hdev);
1823 }
1824
1825 unlock:
1826 hci_dev_unlock(hdev);
1827 }
1828
1829 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1830 {
1831 struct hci_ev_auth_complete *ev = (void *) skb->data;
1832 struct hci_conn *conn;
1833
1834 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1835
1836 hci_dev_lock(hdev);
1837
1838 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1839 if (!conn)
1840 goto unlock;
1841
1842 if (!ev->status) {
1843 if (!hci_conn_ssp_enabled(conn) &&
1844 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1845 BT_INFO("re-auth of legacy device is not possible.");
1846 } else {
1847 conn->link_mode |= HCI_LM_AUTH;
1848 conn->sec_level = conn->pending_sec_level;
1849 }
1850 } else {
1851 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1852 ev->status);
1853 }
1854
1855 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1856 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1857
1858 if (conn->state == BT_CONFIG) {
1859 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1860 struct hci_cp_set_conn_encrypt cp;
1861 cp.handle = ev->handle;
1862 cp.encrypt = 0x01;
1863 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1864 &cp);
1865 } else {
1866 conn->state = BT_CONNECTED;
1867 hci_proto_connect_cfm(conn, ev->status);
1868 hci_conn_drop(conn);
1869 }
1870 } else {
1871 hci_auth_cfm(conn, ev->status);
1872
1873 hci_conn_hold(conn);
1874 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1875 hci_conn_drop(conn);
1876 }
1877
1878 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1879 if (!ev->status) {
1880 struct hci_cp_set_conn_encrypt cp;
1881 cp.handle = ev->handle;
1882 cp.encrypt = 0x01;
1883 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1884 &cp);
1885 } else {
1886 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1887 hci_encrypt_cfm(conn, ev->status, 0x00);
1888 }
1889 }
1890
1891 unlock:
1892 hci_dev_unlock(hdev);
1893 }
1894
1895 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1896 {
1897 struct hci_ev_remote_name *ev = (void *) skb->data;
1898 struct hci_conn *conn;
1899
1900 BT_DBG("%s", hdev->name);
1901
1902 hci_conn_check_pending(hdev);
1903
1904 hci_dev_lock(hdev);
1905
1906 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1907
1908 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1909 goto check_auth;
1910
1911 if (ev->status == 0)
1912 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1913 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1914 else
1915 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1916
1917 check_auth:
1918 if (!conn)
1919 goto unlock;
1920
1921 if (!hci_outgoing_auth_needed(hdev, conn))
1922 goto unlock;
1923
1924 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1925 struct hci_cp_auth_requested cp;
1926 cp.handle = __cpu_to_le16(conn->handle);
1927 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1928 }
1929
1930 unlock:
1931 hci_dev_unlock(hdev);
1932 }
1933
1934 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1935 {
1936 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1937 struct hci_conn *conn;
1938
1939 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1940
1941 hci_dev_lock(hdev);
1942
1943 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1944 if (conn) {
1945 if (!ev->status) {
1946 if (ev->encrypt) {
1947 /* Encryption implies authentication */
1948 conn->link_mode |= HCI_LM_AUTH;
1949 conn->link_mode |= HCI_LM_ENCRYPT;
1950 conn->sec_level = conn->pending_sec_level;
1951 } else
1952 conn->link_mode &= ~HCI_LM_ENCRYPT;
1953 }
1954
1955 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1956
1957 if (ev->status && conn->state == BT_CONNECTED) {
1958 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
1959 hci_conn_drop(conn);
1960 goto unlock;
1961 }
1962
1963 if (conn->state == BT_CONFIG) {
1964 if (!ev->status)
1965 conn->state = BT_CONNECTED;
1966
1967 hci_proto_connect_cfm(conn, ev->status);
1968 hci_conn_drop(conn);
1969 } else
1970 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
1971 }
1972
1973 unlock:
1974 hci_dev_unlock(hdev);
1975 }
1976
1977 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
1978 struct sk_buff *skb)
1979 {
1980 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
1981 struct hci_conn *conn;
1982
1983 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1984
1985 hci_dev_lock(hdev);
1986
1987 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1988 if (conn) {
1989 if (!ev->status)
1990 conn->link_mode |= HCI_LM_SECURE;
1991
1992 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1993
1994 hci_key_change_cfm(conn, ev->status);
1995 }
1996
1997 hci_dev_unlock(hdev);
1998 }
1999
2000 static void hci_remote_features_evt(struct hci_dev *hdev,
2001 struct sk_buff *skb)
2002 {
2003 struct hci_ev_remote_features *ev = (void *) skb->data;
2004 struct hci_conn *conn;
2005
2006 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2007
2008 hci_dev_lock(hdev);
2009
2010 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2011 if (!conn)
2012 goto unlock;
2013
2014 if (!ev->status)
2015 memcpy(conn->features[0], ev->features, 8);
2016
2017 if (conn->state != BT_CONFIG)
2018 goto unlock;
2019
2020 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2021 struct hci_cp_read_remote_ext_features cp;
2022 cp.handle = ev->handle;
2023 cp.page = 0x01;
2024 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2025 sizeof(cp), &cp);
2026 goto unlock;
2027 }
2028
2029 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2030 struct hci_cp_remote_name_req cp;
2031 memset(&cp, 0, sizeof(cp));
2032 bacpy(&cp.bdaddr, &conn->dst);
2033 cp.pscan_rep_mode = 0x02;
2034 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2035 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2036 mgmt_device_connected(hdev, &conn->dst, conn->type,
2037 conn->dst_type, 0, NULL, 0,
2038 conn->dev_class);
2039
2040 if (!hci_outgoing_auth_needed(hdev, conn)) {
2041 conn->state = BT_CONNECTED;
2042 hci_proto_connect_cfm(conn, ev->status);
2043 hci_conn_drop(conn);
2044 }
2045
2046 unlock:
2047 hci_dev_unlock(hdev);
2048 }
2049
2050 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2051 {
2052 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2053 u8 status = skb->data[sizeof(*ev)];
2054 __u16 opcode;
2055
2056 skb_pull(skb, sizeof(*ev));
2057
2058 opcode = __le16_to_cpu(ev->opcode);
2059
2060 switch (opcode) {
2061 case HCI_OP_INQUIRY_CANCEL:
2062 hci_cc_inquiry_cancel(hdev, skb);
2063 break;
2064
2065 case HCI_OP_PERIODIC_INQ:
2066 hci_cc_periodic_inq(hdev, skb);
2067 break;
2068
2069 case HCI_OP_EXIT_PERIODIC_INQ:
2070 hci_cc_exit_periodic_inq(hdev, skb);
2071 break;
2072
2073 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2074 hci_cc_remote_name_req_cancel(hdev, skb);
2075 break;
2076
2077 case HCI_OP_ROLE_DISCOVERY:
2078 hci_cc_role_discovery(hdev, skb);
2079 break;
2080
2081 case HCI_OP_READ_LINK_POLICY:
2082 hci_cc_read_link_policy(hdev, skb);
2083 break;
2084
2085 case HCI_OP_WRITE_LINK_POLICY:
2086 hci_cc_write_link_policy(hdev, skb);
2087 break;
2088
2089 case HCI_OP_READ_DEF_LINK_POLICY:
2090 hci_cc_read_def_link_policy(hdev, skb);
2091 break;
2092
2093 case HCI_OP_WRITE_DEF_LINK_POLICY:
2094 hci_cc_write_def_link_policy(hdev, skb);
2095 break;
2096
2097 case HCI_OP_RESET:
2098 hci_cc_reset(hdev, skb);
2099 break;
2100
2101 case HCI_OP_WRITE_LOCAL_NAME:
2102 hci_cc_write_local_name(hdev, skb);
2103 break;
2104
2105 case HCI_OP_READ_LOCAL_NAME:
2106 hci_cc_read_local_name(hdev, skb);
2107 break;
2108
2109 case HCI_OP_WRITE_AUTH_ENABLE:
2110 hci_cc_write_auth_enable(hdev, skb);
2111 break;
2112
2113 case HCI_OP_WRITE_ENCRYPT_MODE:
2114 hci_cc_write_encrypt_mode(hdev, skb);
2115 break;
2116
2117 case HCI_OP_WRITE_SCAN_ENABLE:
2118 hci_cc_write_scan_enable(hdev, skb);
2119 break;
2120
2121 case HCI_OP_READ_CLASS_OF_DEV:
2122 hci_cc_read_class_of_dev(hdev, skb);
2123 break;
2124
2125 case HCI_OP_WRITE_CLASS_OF_DEV:
2126 hci_cc_write_class_of_dev(hdev, skb);
2127 break;
2128
2129 case HCI_OP_READ_VOICE_SETTING:
2130 hci_cc_read_voice_setting(hdev, skb);
2131 break;
2132
2133 case HCI_OP_WRITE_VOICE_SETTING:
2134 hci_cc_write_voice_setting(hdev, skb);
2135 break;
2136
2137 case HCI_OP_WRITE_SSP_MODE:
2138 hci_cc_write_ssp_mode(hdev, skb);
2139 break;
2140
2141 case HCI_OP_READ_LOCAL_VERSION:
2142 hci_cc_read_local_version(hdev, skb);
2143 break;
2144
2145 case HCI_OP_READ_LOCAL_COMMANDS:
2146 hci_cc_read_local_commands(hdev, skb);
2147 break;
2148
2149 case HCI_OP_READ_LOCAL_FEATURES:
2150 hci_cc_read_local_features(hdev, skb);
2151 break;
2152
2153 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2154 hci_cc_read_local_ext_features(hdev, skb);
2155 break;
2156
2157 case HCI_OP_READ_BUFFER_SIZE:
2158 hci_cc_read_buffer_size(hdev, skb);
2159 break;
2160
2161 case HCI_OP_READ_BD_ADDR:
2162 hci_cc_read_bd_addr(hdev, skb);
2163 break;
2164
2165 case HCI_OP_READ_PAGE_SCAN_ACTIVITY:
2166 hci_cc_read_page_scan_activity(hdev, skb);
2167 break;
2168
2169 case HCI_OP_WRITE_PAGE_SCAN_ACTIVITY:
2170 hci_cc_write_page_scan_activity(hdev, skb);
2171 break;
2172
2173 case HCI_OP_READ_PAGE_SCAN_TYPE:
2174 hci_cc_read_page_scan_type(hdev, skb);
2175 break;
2176
2177 case HCI_OP_WRITE_PAGE_SCAN_TYPE:
2178 hci_cc_write_page_scan_type(hdev, skb);
2179 break;
2180
2181 case HCI_OP_READ_DATA_BLOCK_SIZE:
2182 hci_cc_read_data_block_size(hdev, skb);
2183 break;
2184
2185 case HCI_OP_READ_FLOW_CONTROL_MODE:
2186 hci_cc_read_flow_control_mode(hdev, skb);
2187 break;
2188
2189 case HCI_OP_READ_LOCAL_AMP_INFO:
2190 hci_cc_read_local_amp_info(hdev, skb);
2191 break;
2192
2193 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2194 hci_cc_read_local_amp_assoc(hdev, skb);
2195 break;
2196
2197 case HCI_OP_READ_INQ_RSP_TX_POWER:
2198 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2199 break;
2200
2201 case HCI_OP_PIN_CODE_REPLY:
2202 hci_cc_pin_code_reply(hdev, skb);
2203 break;
2204
2205 case HCI_OP_PIN_CODE_NEG_REPLY:
2206 hci_cc_pin_code_neg_reply(hdev, skb);
2207 break;
2208
2209 case HCI_OP_READ_LOCAL_OOB_DATA:
2210 hci_cc_read_local_oob_data_reply(hdev, skb);
2211 break;
2212
2213 case HCI_OP_LE_READ_BUFFER_SIZE:
2214 hci_cc_le_read_buffer_size(hdev, skb);
2215 break;
2216
2217 case HCI_OP_LE_READ_LOCAL_FEATURES:
2218 hci_cc_le_read_local_features(hdev, skb);
2219 break;
2220
2221 case HCI_OP_LE_READ_ADV_TX_POWER:
2222 hci_cc_le_read_adv_tx_power(hdev, skb);
2223 break;
2224
2225 case HCI_OP_USER_CONFIRM_REPLY:
2226 hci_cc_user_confirm_reply(hdev, skb);
2227 break;
2228
2229 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2230 hci_cc_user_confirm_neg_reply(hdev, skb);
2231 break;
2232
2233 case HCI_OP_USER_PASSKEY_REPLY:
2234 hci_cc_user_passkey_reply(hdev, skb);
2235 break;
2236
2237 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2238 hci_cc_user_passkey_neg_reply(hdev, skb);
2239 break;
2240
2241 case HCI_OP_LE_SET_ADV_ENABLE:
2242 hci_cc_le_set_adv_enable(hdev, skb);
2243 break;
2244
2245 case HCI_OP_LE_SET_SCAN_ENABLE:
2246 hci_cc_le_set_scan_enable(hdev, skb);
2247 break;
2248
2249 case HCI_OP_LE_READ_WHITE_LIST_SIZE:
2250 hci_cc_le_read_white_list_size(hdev, skb);
2251 break;
2252
2253 case HCI_OP_LE_READ_SUPPORTED_STATES:
2254 hci_cc_le_read_supported_states(hdev, skb);
2255 break;
2256
2257 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2258 hci_cc_write_le_host_supported(hdev, skb);
2259 break;
2260
2261 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2262 hci_cc_write_remote_amp_assoc(hdev, skb);
2263 break;
2264
2265 default:
2266 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2267 break;
2268 }
2269
2270 if (opcode != HCI_OP_NOP)
2271 del_timer(&hdev->cmd_timer);
2272
2273 hci_req_cmd_complete(hdev, opcode, status);
2274
2275 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2276 atomic_set(&hdev->cmd_cnt, 1);
2277 if (!skb_queue_empty(&hdev->cmd_q))
2278 queue_work(hdev->workqueue, &hdev->cmd_work);
2279 }
2280 }
2281
2282 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2283 {
2284 struct hci_ev_cmd_status *ev = (void *) skb->data;
2285 __u16 opcode;
2286
2287 skb_pull(skb, sizeof(*ev));
2288
2289 opcode = __le16_to_cpu(ev->opcode);
2290
2291 switch (opcode) {
2292 case HCI_OP_INQUIRY:
2293 hci_cs_inquiry(hdev, ev->status);
2294 break;
2295
2296 case HCI_OP_CREATE_CONN:
2297 hci_cs_create_conn(hdev, ev->status);
2298 break;
2299
2300 case HCI_OP_ADD_SCO:
2301 hci_cs_add_sco(hdev, ev->status);
2302 break;
2303
2304 case HCI_OP_AUTH_REQUESTED:
2305 hci_cs_auth_requested(hdev, ev->status);
2306 break;
2307
2308 case HCI_OP_SET_CONN_ENCRYPT:
2309 hci_cs_set_conn_encrypt(hdev, ev->status);
2310 break;
2311
2312 case HCI_OP_REMOTE_NAME_REQ:
2313 hci_cs_remote_name_req(hdev, ev->status);
2314 break;
2315
2316 case HCI_OP_READ_REMOTE_FEATURES:
2317 hci_cs_read_remote_features(hdev, ev->status);
2318 break;
2319
2320 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2321 hci_cs_read_remote_ext_features(hdev, ev->status);
2322 break;
2323
2324 case HCI_OP_SETUP_SYNC_CONN:
2325 hci_cs_setup_sync_conn(hdev, ev->status);
2326 break;
2327
2328 case HCI_OP_SNIFF_MODE:
2329 hci_cs_sniff_mode(hdev, ev->status);
2330 break;
2331
2332 case HCI_OP_EXIT_SNIFF_MODE:
2333 hci_cs_exit_sniff_mode(hdev, ev->status);
2334 break;
2335
2336 case HCI_OP_DISCONNECT:
2337 hci_cs_disconnect(hdev, ev->status);
2338 break;
2339
2340 case HCI_OP_CREATE_PHY_LINK:
2341 hci_cs_create_phylink(hdev, ev->status);
2342 break;
2343
2344 case HCI_OP_ACCEPT_PHY_LINK:
2345 hci_cs_accept_phylink(hdev, ev->status);
2346 break;
2347
2348 default:
2349 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2350 break;
2351 }
2352
2353 if (opcode != HCI_OP_NOP)
2354 del_timer(&hdev->cmd_timer);
2355
2356 if (ev->status ||
2357 (hdev->sent_cmd && !bt_cb(hdev->sent_cmd)->req.event))
2358 hci_req_cmd_complete(hdev, opcode, ev->status);
2359
2360 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2361 atomic_set(&hdev->cmd_cnt, 1);
2362 if (!skb_queue_empty(&hdev->cmd_q))
2363 queue_work(hdev->workqueue, &hdev->cmd_work);
2364 }
2365 }
2366
2367 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2368 {
2369 struct hci_ev_role_change *ev = (void *) skb->data;
2370 struct hci_conn *conn;
2371
2372 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2373
2374 hci_dev_lock(hdev);
2375
2376 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2377 if (conn) {
2378 if (!ev->status) {
2379 if (ev->role)
2380 conn->link_mode &= ~HCI_LM_MASTER;
2381 else
2382 conn->link_mode |= HCI_LM_MASTER;
2383 }
2384
2385 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2386
2387 hci_role_switch_cfm(conn, ev->status, ev->role);
2388 }
2389
2390 hci_dev_unlock(hdev);
2391 }
2392
2393 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2394 {
2395 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2396 int i;
2397
2398 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2399 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2400 return;
2401 }
2402
2403 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2404 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2405 BT_DBG("%s bad parameters", hdev->name);
2406 return;
2407 }
2408
2409 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2410
2411 for (i = 0; i < ev->num_hndl; i++) {
2412 struct hci_comp_pkts_info *info = &ev->handles[i];
2413 struct hci_conn *conn;
2414 __u16 handle, count;
2415
2416 handle = __le16_to_cpu(info->handle);
2417 count = __le16_to_cpu(info->count);
2418
2419 conn = hci_conn_hash_lookup_handle(hdev, handle);
2420 if (!conn)
2421 continue;
2422
2423 conn->sent -= count;
2424
2425 switch (conn->type) {
2426 case ACL_LINK:
2427 hdev->acl_cnt += count;
2428 if (hdev->acl_cnt > hdev->acl_pkts)
2429 hdev->acl_cnt = hdev->acl_pkts;
2430 break;
2431
2432 case LE_LINK:
2433 if (hdev->le_pkts) {
2434 hdev->le_cnt += count;
2435 if (hdev->le_cnt > hdev->le_pkts)
2436 hdev->le_cnt = hdev->le_pkts;
2437 } else {
2438 hdev->acl_cnt += count;
2439 if (hdev->acl_cnt > hdev->acl_pkts)
2440 hdev->acl_cnt = hdev->acl_pkts;
2441 }
2442 break;
2443
2444 case SCO_LINK:
2445 hdev->sco_cnt += count;
2446 if (hdev->sco_cnt > hdev->sco_pkts)
2447 hdev->sco_cnt = hdev->sco_pkts;
2448 break;
2449
2450 default:
2451 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2452 break;
2453 }
2454 }
2455
2456 queue_work(hdev->workqueue, &hdev->tx_work);
2457 }
2458
2459 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2460 __u16 handle)
2461 {
2462 struct hci_chan *chan;
2463
2464 switch (hdev->dev_type) {
2465 case HCI_BREDR:
2466 return hci_conn_hash_lookup_handle(hdev, handle);
2467 case HCI_AMP:
2468 chan = hci_chan_lookup_handle(hdev, handle);
2469 if (chan)
2470 return chan->conn;
2471 break;
2472 default:
2473 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2474 break;
2475 }
2476
2477 return NULL;
2478 }
2479
2480 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2481 {
2482 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2483 int i;
2484
2485 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2486 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2487 return;
2488 }
2489
2490 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2491 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2492 BT_DBG("%s bad parameters", hdev->name);
2493 return;
2494 }
2495
2496 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2497 ev->num_hndl);
2498
2499 for (i = 0; i < ev->num_hndl; i++) {
2500 struct hci_comp_blocks_info *info = &ev->handles[i];
2501 struct hci_conn *conn = NULL;
2502 __u16 handle, block_count;
2503
2504 handle = __le16_to_cpu(info->handle);
2505 block_count = __le16_to_cpu(info->blocks);
2506
2507 conn = __hci_conn_lookup_handle(hdev, handle);
2508 if (!conn)
2509 continue;
2510
2511 conn->sent -= block_count;
2512
2513 switch (conn->type) {
2514 case ACL_LINK:
2515 case AMP_LINK:
2516 hdev->block_cnt += block_count;
2517 if (hdev->block_cnt > hdev->num_blocks)
2518 hdev->block_cnt = hdev->num_blocks;
2519 break;
2520
2521 default:
2522 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2523 break;
2524 }
2525 }
2526
2527 queue_work(hdev->workqueue, &hdev->tx_work);
2528 }
2529
2530 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2531 {
2532 struct hci_ev_mode_change *ev = (void *) skb->data;
2533 struct hci_conn *conn;
2534
2535 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2536
2537 hci_dev_lock(hdev);
2538
2539 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2540 if (conn) {
2541 conn->mode = ev->mode;
2542 conn->interval = __le16_to_cpu(ev->interval);
2543
2544 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2545 &conn->flags)) {
2546 if (conn->mode == HCI_CM_ACTIVE)
2547 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2548 else
2549 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2550 }
2551
2552 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2553 hci_sco_setup(conn, ev->status);
2554 }
2555
2556 hci_dev_unlock(hdev);
2557 }
2558
2559 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2560 {
2561 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2562 struct hci_conn *conn;
2563
2564 BT_DBG("%s", hdev->name);
2565
2566 hci_dev_lock(hdev);
2567
2568 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2569 if (!conn)
2570 goto unlock;
2571
2572 if (conn->state == BT_CONNECTED) {
2573 hci_conn_hold(conn);
2574 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2575 hci_conn_drop(conn);
2576 }
2577
2578 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2579 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2580 sizeof(ev->bdaddr), &ev->bdaddr);
2581 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2582 u8 secure;
2583
2584 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2585 secure = 1;
2586 else
2587 secure = 0;
2588
2589 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2590 }
2591
2592 unlock:
2593 hci_dev_unlock(hdev);
2594 }
2595
2596 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2597 {
2598 struct hci_ev_link_key_req *ev = (void *) skb->data;
2599 struct hci_cp_link_key_reply cp;
2600 struct hci_conn *conn;
2601 struct link_key *key;
2602
2603 BT_DBG("%s", hdev->name);
2604
2605 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2606 return;
2607
2608 hci_dev_lock(hdev);
2609
2610 key = hci_find_link_key(hdev, &ev->bdaddr);
2611 if (!key) {
2612 BT_DBG("%s link key not found for %pMR", hdev->name,
2613 &ev->bdaddr);
2614 goto not_found;
2615 }
2616
2617 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2618 &ev->bdaddr);
2619
2620 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2621 key->type == HCI_LK_DEBUG_COMBINATION) {
2622 BT_DBG("%s ignoring debug key", hdev->name);
2623 goto not_found;
2624 }
2625
2626 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2627 if (conn) {
2628 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2629 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2630 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2631 goto not_found;
2632 }
2633
2634 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2635 conn->pending_sec_level == BT_SECURITY_HIGH) {
2636 BT_DBG("%s ignoring key unauthenticated for high security",
2637 hdev->name);
2638 goto not_found;
2639 }
2640
2641 conn->key_type = key->type;
2642 conn->pin_length = key->pin_len;
2643 }
2644
2645 bacpy(&cp.bdaddr, &ev->bdaddr);
2646 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2647
2648 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2649
2650 hci_dev_unlock(hdev);
2651
2652 return;
2653
2654 not_found:
2655 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2656 hci_dev_unlock(hdev);
2657 }
2658
2659 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2660 {
2661 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2662 struct hci_conn *conn;
2663 u8 pin_len = 0;
2664
2665 BT_DBG("%s", hdev->name);
2666
2667 hci_dev_lock(hdev);
2668
2669 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2670 if (conn) {
2671 hci_conn_hold(conn);
2672 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2673 pin_len = conn->pin_length;
2674
2675 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2676 conn->key_type = ev->key_type;
2677
2678 hci_conn_drop(conn);
2679 }
2680
2681 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2682 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2683 ev->key_type, pin_len);
2684
2685 hci_dev_unlock(hdev);
2686 }
2687
2688 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2689 {
2690 struct hci_ev_clock_offset *ev = (void *) skb->data;
2691 struct hci_conn *conn;
2692
2693 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2694
2695 hci_dev_lock(hdev);
2696
2697 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2698 if (conn && !ev->status) {
2699 struct inquiry_entry *ie;
2700
2701 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2702 if (ie) {
2703 ie->data.clock_offset = ev->clock_offset;
2704 ie->timestamp = jiffies;
2705 }
2706 }
2707
2708 hci_dev_unlock(hdev);
2709 }
2710
2711 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2712 {
2713 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2714 struct hci_conn *conn;
2715
2716 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2717
2718 hci_dev_lock(hdev);
2719
2720 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2721 if (conn && !ev->status)
2722 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2723
2724 hci_dev_unlock(hdev);
2725 }
2726
2727 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2728 {
2729 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2730 struct inquiry_entry *ie;
2731
2732 BT_DBG("%s", hdev->name);
2733
2734 hci_dev_lock(hdev);
2735
2736 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2737 if (ie) {
2738 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2739 ie->timestamp = jiffies;
2740 }
2741
2742 hci_dev_unlock(hdev);
2743 }
2744
2745 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2746 struct sk_buff *skb)
2747 {
2748 struct inquiry_data data;
2749 int num_rsp = *((__u8 *) skb->data);
2750 bool name_known, ssp;
2751
2752 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2753
2754 if (!num_rsp)
2755 return;
2756
2757 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2758 return;
2759
2760 hci_dev_lock(hdev);
2761
2762 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2763 struct inquiry_info_with_rssi_and_pscan_mode *info;
2764 info = (void *) (skb->data + 1);
2765
2766 for (; num_rsp; num_rsp--, info++) {
2767 bacpy(&data.bdaddr, &info->bdaddr);
2768 data.pscan_rep_mode = info->pscan_rep_mode;
2769 data.pscan_period_mode = info->pscan_period_mode;
2770 data.pscan_mode = info->pscan_mode;
2771 memcpy(data.dev_class, info->dev_class, 3);
2772 data.clock_offset = info->clock_offset;
2773 data.rssi = info->rssi;
2774 data.ssp_mode = 0x00;
2775
2776 name_known = hci_inquiry_cache_update(hdev, &data,
2777 false, &ssp);
2778 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2779 info->dev_class, info->rssi,
2780 !name_known, ssp, NULL, 0);
2781 }
2782 } else {
2783 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2784
2785 for (; num_rsp; num_rsp--, info++) {
2786 bacpy(&data.bdaddr, &info->bdaddr);
2787 data.pscan_rep_mode = info->pscan_rep_mode;
2788 data.pscan_period_mode = info->pscan_period_mode;
2789 data.pscan_mode = 0x00;
2790 memcpy(data.dev_class, info->dev_class, 3);
2791 data.clock_offset = info->clock_offset;
2792 data.rssi = info->rssi;
2793 data.ssp_mode = 0x00;
2794 name_known = hci_inquiry_cache_update(hdev, &data,
2795 false, &ssp);
2796 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2797 info->dev_class, info->rssi,
2798 !name_known, ssp, NULL, 0);
2799 }
2800 }
2801
2802 hci_dev_unlock(hdev);
2803 }
2804
2805 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2806 struct sk_buff *skb)
2807 {
2808 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2809 struct hci_conn *conn;
2810
2811 BT_DBG("%s", hdev->name);
2812
2813 hci_dev_lock(hdev);
2814
2815 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2816 if (!conn)
2817 goto unlock;
2818
2819 if (ev->page < HCI_MAX_PAGES)
2820 memcpy(conn->features[ev->page], ev->features, 8);
2821
2822 if (!ev->status && ev->page == 0x01) {
2823 struct inquiry_entry *ie;
2824
2825 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2826 if (ie)
2827 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2828
2829 if (ev->features[0] & LMP_HOST_SSP) {
2830 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2831 } else {
2832 /* It is mandatory by the Bluetooth specification that
2833 * Extended Inquiry Results are only used when Secure
2834 * Simple Pairing is enabled, but some devices violate
2835 * this.
2836 *
2837 * To make these devices work, the internal SSP
2838 * enabled flag needs to be cleared if the remote host
2839 * features do not indicate SSP support */
2840 clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2841 }
2842 }
2843
2844 if (conn->state != BT_CONFIG)
2845 goto unlock;
2846
2847 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2848 struct hci_cp_remote_name_req cp;
2849 memset(&cp, 0, sizeof(cp));
2850 bacpy(&cp.bdaddr, &conn->dst);
2851 cp.pscan_rep_mode = 0x02;
2852 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2853 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2854 mgmt_device_connected(hdev, &conn->dst, conn->type,
2855 conn->dst_type, 0, NULL, 0,
2856 conn->dev_class);
2857
2858 if (!hci_outgoing_auth_needed(hdev, conn)) {
2859 conn->state = BT_CONNECTED;
2860 hci_proto_connect_cfm(conn, ev->status);
2861 hci_conn_drop(conn);
2862 }
2863
2864 unlock:
2865 hci_dev_unlock(hdev);
2866 }
2867
2868 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2869 struct sk_buff *skb)
2870 {
2871 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2872 struct hci_conn *conn;
2873
2874 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2875
2876 hci_dev_lock(hdev);
2877
2878 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2879 if (!conn) {
2880 if (ev->link_type == ESCO_LINK)
2881 goto unlock;
2882
2883 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2884 if (!conn)
2885 goto unlock;
2886
2887 conn->type = SCO_LINK;
2888 }
2889
2890 switch (ev->status) {
2891 case 0x00:
2892 conn->handle = __le16_to_cpu(ev->handle);
2893 conn->state = BT_CONNECTED;
2894
2895 hci_conn_add_sysfs(conn);
2896 break;
2897
2898 case 0x0d: /* Connection Rejected due to Limited Resources */
2899 case 0x11: /* Unsupported Feature or Parameter Value */
2900 case 0x1c: /* SCO interval rejected */
2901 case 0x1a: /* Unsupported Remote Feature */
2902 case 0x1f: /* Unspecified error */
2903 if (conn->out) {
2904 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2905 (hdev->esco_type & EDR_ESCO_MASK);
2906 if (hci_setup_sync(conn, conn->link->handle))
2907 goto unlock;
2908 }
2909 /* fall through */
2910
2911 default:
2912 conn->state = BT_CLOSED;
2913 break;
2914 }
2915
2916 hci_proto_connect_cfm(conn, ev->status);
2917 if (ev->status)
2918 hci_conn_del(conn);
2919
2920 unlock:
2921 hci_dev_unlock(hdev);
2922 }
2923
2924 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2925 struct sk_buff *skb)
2926 {
2927 struct inquiry_data data;
2928 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2929 int num_rsp = *((__u8 *) skb->data);
2930 size_t eir_len;
2931
2932 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2933
2934 if (!num_rsp)
2935 return;
2936
2937 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2938 return;
2939
2940 hci_dev_lock(hdev);
2941
2942 for (; num_rsp; num_rsp--, info++) {
2943 bool name_known, ssp;
2944
2945 bacpy(&data.bdaddr, &info->bdaddr);
2946 data.pscan_rep_mode = info->pscan_rep_mode;
2947 data.pscan_period_mode = info->pscan_period_mode;
2948 data.pscan_mode = 0x00;
2949 memcpy(data.dev_class, info->dev_class, 3);
2950 data.clock_offset = info->clock_offset;
2951 data.rssi = info->rssi;
2952 data.ssp_mode = 0x01;
2953
2954 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2955 name_known = eir_has_data_type(info->data,
2956 sizeof(info->data),
2957 EIR_NAME_COMPLETE);
2958 else
2959 name_known = true;
2960
2961 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2962 &ssp);
2963 eir_len = eir_get_length(info->data, sizeof(info->data));
2964 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2965 info->dev_class, info->rssi, !name_known,
2966 ssp, info->data, eir_len);
2967 }
2968
2969 hci_dev_unlock(hdev);
2970 }
2971
2972 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
2973 struct sk_buff *skb)
2974 {
2975 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
2976 struct hci_conn *conn;
2977
2978 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
2979 __le16_to_cpu(ev->handle));
2980
2981 hci_dev_lock(hdev);
2982
2983 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2984 if (!conn)
2985 goto unlock;
2986
2987 if (!ev->status)
2988 conn->sec_level = conn->pending_sec_level;
2989
2990 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2991
2992 if (ev->status && conn->state == BT_CONNECTED) {
2993 hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE);
2994 hci_conn_drop(conn);
2995 goto unlock;
2996 }
2997
2998 if (conn->state == BT_CONFIG) {
2999 if (!ev->status)
3000 conn->state = BT_CONNECTED;
3001
3002 hci_proto_connect_cfm(conn, ev->status);
3003 hci_conn_drop(conn);
3004 } else {
3005 hci_auth_cfm(conn, ev->status);
3006
3007 hci_conn_hold(conn);
3008 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3009 hci_conn_drop(conn);
3010 }
3011
3012 unlock:
3013 hci_dev_unlock(hdev);
3014 }
3015
3016 static u8 hci_get_auth_req(struct hci_conn *conn)
3017 {
3018 /* If remote requests dedicated bonding follow that lead */
3019 if (conn->remote_auth == HCI_AT_DEDICATED_BONDING ||
3020 conn->remote_auth == HCI_AT_DEDICATED_BONDING_MITM) {
3021 /* If both remote and local IO capabilities allow MITM
3022 * protection then require it, otherwise don't */
3023 if (conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT ||
3024 conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)
3025 return HCI_AT_DEDICATED_BONDING;
3026 else
3027 return HCI_AT_DEDICATED_BONDING_MITM;
3028 }
3029
3030 /* If remote requests no-bonding follow that lead */
3031 if (conn->remote_auth == HCI_AT_NO_BONDING ||
3032 conn->remote_auth == HCI_AT_NO_BONDING_MITM)
3033 return conn->remote_auth | (conn->auth_type & 0x01);
3034
3035 return conn->auth_type;
3036 }
3037
3038 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3039 {
3040 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3041 struct hci_conn *conn;
3042
3043 BT_DBG("%s", hdev->name);
3044
3045 hci_dev_lock(hdev);
3046
3047 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3048 if (!conn)
3049 goto unlock;
3050
3051 hci_conn_hold(conn);
3052
3053 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3054 goto unlock;
3055
3056 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3057 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3058 struct hci_cp_io_capability_reply cp;
3059
3060 bacpy(&cp.bdaddr, &ev->bdaddr);
3061 /* Change the IO capability from KeyboardDisplay
3062 * to DisplayYesNo as it is not supported by BT spec. */
3063 cp.capability = (conn->io_capability == 0x04) ?
3064 HCI_IO_DISPLAY_YESNO : conn->io_capability;
3065 conn->auth_type = hci_get_auth_req(conn);
3066 cp.authentication = conn->auth_type;
3067
3068 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3069 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3070 cp.oob_data = 0x01;
3071 else
3072 cp.oob_data = 0x00;
3073
3074 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3075 sizeof(cp), &cp);
3076 } else {
3077 struct hci_cp_io_capability_neg_reply cp;
3078
3079 bacpy(&cp.bdaddr, &ev->bdaddr);
3080 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3081
3082 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3083 sizeof(cp), &cp);
3084 }
3085
3086 unlock:
3087 hci_dev_unlock(hdev);
3088 }
3089
3090 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3091 {
3092 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3093 struct hci_conn *conn;
3094
3095 BT_DBG("%s", hdev->name);
3096
3097 hci_dev_lock(hdev);
3098
3099 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3100 if (!conn)
3101 goto unlock;
3102
3103 conn->remote_cap = ev->capability;
3104 conn->remote_auth = ev->authentication;
3105 if (ev->oob_data)
3106 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3107
3108 unlock:
3109 hci_dev_unlock(hdev);
3110 }
3111
3112 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3113 struct sk_buff *skb)
3114 {
3115 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3116 int loc_mitm, rem_mitm, confirm_hint = 0;
3117 struct hci_conn *conn;
3118
3119 BT_DBG("%s", hdev->name);
3120
3121 hci_dev_lock(hdev);
3122
3123 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3124 goto unlock;
3125
3126 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3127 if (!conn)
3128 goto unlock;
3129
3130 loc_mitm = (conn->auth_type & 0x01);
3131 rem_mitm = (conn->remote_auth & 0x01);
3132
3133 /* If we require MITM but the remote device can't provide that
3134 * (it has NoInputNoOutput) then reject the confirmation
3135 * request. The only exception is when we're dedicated bonding
3136 * initiators (connect_cfm_cb set) since then we always have the MITM
3137 * bit set. */
3138 if (!conn->connect_cfm_cb && loc_mitm &&
3139 conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) {
3140 BT_DBG("Rejecting request: remote device can't provide MITM");
3141 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3142 sizeof(ev->bdaddr), &ev->bdaddr);
3143 goto unlock;
3144 }
3145
3146 /* If no side requires MITM protection; auto-accept */
3147 if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) &&
3148 (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) {
3149
3150 /* If we're not the initiators request authorization to
3151 * proceed from user space (mgmt_user_confirm with
3152 * confirm_hint set to 1). */
3153 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3154 BT_DBG("Confirming auto-accept as acceptor");
3155 confirm_hint = 1;
3156 goto confirm;
3157 }
3158
3159 BT_DBG("Auto-accept of user confirmation with %ums delay",
3160 hdev->auto_accept_delay);
3161
3162 if (hdev->auto_accept_delay > 0) {
3163 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3164 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3165 goto unlock;
3166 }
3167
3168 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3169 sizeof(ev->bdaddr), &ev->bdaddr);
3170 goto unlock;
3171 }
3172
3173 confirm:
3174 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3175 confirm_hint);
3176
3177 unlock:
3178 hci_dev_unlock(hdev);
3179 }
3180
3181 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3182 struct sk_buff *skb)
3183 {
3184 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3185
3186 BT_DBG("%s", hdev->name);
3187
3188 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3189 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3190 }
3191
3192 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3193 struct sk_buff *skb)
3194 {
3195 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3196 struct hci_conn *conn;
3197
3198 BT_DBG("%s", hdev->name);
3199
3200 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3201 if (!conn)
3202 return;
3203
3204 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3205 conn->passkey_entered = 0;
3206
3207 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3208 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3209 conn->dst_type, conn->passkey_notify,
3210 conn->passkey_entered);
3211 }
3212
3213 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3214 {
3215 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3216 struct hci_conn *conn;
3217
3218 BT_DBG("%s", hdev->name);
3219
3220 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3221 if (!conn)
3222 return;
3223
3224 switch (ev->type) {
3225 case HCI_KEYPRESS_STARTED:
3226 conn->passkey_entered = 0;
3227 return;
3228
3229 case HCI_KEYPRESS_ENTERED:
3230 conn->passkey_entered++;
3231 break;
3232
3233 case HCI_KEYPRESS_ERASED:
3234 conn->passkey_entered--;
3235 break;
3236
3237 case HCI_KEYPRESS_CLEARED:
3238 conn->passkey_entered = 0;
3239 break;
3240
3241 case HCI_KEYPRESS_COMPLETED:
3242 return;
3243 }
3244
3245 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3246 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3247 conn->dst_type, conn->passkey_notify,
3248 conn->passkey_entered);
3249 }
3250
3251 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3252 struct sk_buff *skb)
3253 {
3254 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3255 struct hci_conn *conn;
3256
3257 BT_DBG("%s", hdev->name);
3258
3259 hci_dev_lock(hdev);
3260
3261 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3262 if (!conn)
3263 goto unlock;
3264
3265 /* To avoid duplicate auth_failed events to user space we check
3266 * the HCI_CONN_AUTH_PEND flag which will be set if we
3267 * initiated the authentication. A traditional auth_complete
3268 * event gets always produced as initiator and is also mapped to
3269 * the mgmt_auth_failed event */
3270 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3271 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3272 ev->status);
3273
3274 hci_conn_drop(conn);
3275
3276 unlock:
3277 hci_dev_unlock(hdev);
3278 }
3279
3280 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3281 struct sk_buff *skb)
3282 {
3283 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3284 struct inquiry_entry *ie;
3285 struct hci_conn *conn;
3286
3287 BT_DBG("%s", hdev->name);
3288
3289 hci_dev_lock(hdev);
3290
3291 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3292 if (conn)
3293 memcpy(conn->features[1], ev->features, 8);
3294
3295 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3296 if (ie)
3297 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3298
3299 hci_dev_unlock(hdev);
3300 }
3301
3302 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3303 struct sk_buff *skb)
3304 {
3305 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3306 struct oob_data *data;
3307
3308 BT_DBG("%s", hdev->name);
3309
3310 hci_dev_lock(hdev);
3311
3312 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3313 goto unlock;
3314
3315 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3316 if (data) {
3317 struct hci_cp_remote_oob_data_reply cp;
3318
3319 bacpy(&cp.bdaddr, &ev->bdaddr);
3320 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3321 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3322
3323 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3324 &cp);
3325 } else {
3326 struct hci_cp_remote_oob_data_neg_reply cp;
3327
3328 bacpy(&cp.bdaddr, &ev->bdaddr);
3329 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3330 &cp);
3331 }
3332
3333 unlock:
3334 hci_dev_unlock(hdev);
3335 }
3336
3337 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3338 struct sk_buff *skb)
3339 {
3340 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3341 struct hci_conn *hcon, *bredr_hcon;
3342
3343 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3344 ev->status);
3345
3346 hci_dev_lock(hdev);
3347
3348 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3349 if (!hcon) {
3350 hci_dev_unlock(hdev);
3351 return;
3352 }
3353
3354 if (ev->status) {
3355 hci_conn_del(hcon);
3356 hci_dev_unlock(hdev);
3357 return;
3358 }
3359
3360 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3361
3362 hcon->state = BT_CONNECTED;
3363 bacpy(&hcon->dst, &bredr_hcon->dst);
3364
3365 hci_conn_hold(hcon);
3366 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3367 hci_conn_drop(hcon);
3368
3369 hci_conn_add_sysfs(hcon);
3370
3371 amp_physical_cfm(bredr_hcon, hcon);
3372
3373 hci_dev_unlock(hdev);
3374 }
3375
3376 static void hci_loglink_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3377 {
3378 struct hci_ev_logical_link_complete *ev = (void *) skb->data;
3379 struct hci_conn *hcon;
3380 struct hci_chan *hchan;
3381 struct amp_mgr *mgr;
3382
3383 BT_DBG("%s log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x",
3384 hdev->name, le16_to_cpu(ev->handle), ev->phy_handle,
3385 ev->status);
3386
3387 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3388 if (!hcon)
3389 return;
3390
3391 /* Create AMP hchan */
3392 hchan = hci_chan_create(hcon);
3393 if (!hchan)
3394 return;
3395
3396 hchan->handle = le16_to_cpu(ev->handle);
3397
3398 BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan);
3399
3400 mgr = hcon->amp_mgr;
3401 if (mgr && mgr->bredr_chan) {
3402 struct l2cap_chan *bredr_chan = mgr->bredr_chan;
3403
3404 l2cap_chan_lock(bredr_chan);
3405
3406 bredr_chan->conn->mtu = hdev->block_mtu;
3407 l2cap_logical_cfm(bredr_chan, hchan, 0);
3408 hci_conn_hold(hcon);
3409
3410 l2cap_chan_unlock(bredr_chan);
3411 }
3412 }
3413
3414 static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev,
3415 struct sk_buff *skb)
3416 {
3417 struct hci_ev_disconn_logical_link_complete *ev = (void *) skb->data;
3418 struct hci_chan *hchan;
3419
3420 BT_DBG("%s log handle 0x%4.4x status 0x%2.2x", hdev->name,
3421 le16_to_cpu(ev->handle), ev->status);
3422
3423 if (ev->status)
3424 return;
3425
3426 hci_dev_lock(hdev);
3427
3428 hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle));
3429 if (!hchan)
3430 goto unlock;
3431
3432 amp_destroy_logical_link(hchan, ev->reason);
3433
3434 unlock:
3435 hci_dev_unlock(hdev);
3436 }
3437
3438 static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev,
3439 struct sk_buff *skb)
3440 {
3441 struct hci_ev_disconn_phy_link_complete *ev = (void *) skb->data;
3442 struct hci_conn *hcon;
3443
3444 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3445
3446 if (ev->status)
3447 return;
3448
3449 hci_dev_lock(hdev);
3450
3451 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3452 if (hcon) {
3453 hcon->state = BT_CLOSED;
3454 hci_conn_del(hcon);
3455 }
3456
3457 hci_dev_unlock(hdev);
3458 }
3459
3460 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3461 {
3462 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3463 struct hci_conn *conn;
3464
3465 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3466
3467 hci_dev_lock(hdev);
3468
3469 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3470 if (!conn) {
3471 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3472 if (!conn) {
3473 BT_ERR("No memory for new connection");
3474 goto unlock;
3475 }
3476
3477 conn->dst_type = ev->bdaddr_type;
3478
3479 if (ev->role == LE_CONN_ROLE_MASTER) {
3480 conn->out = true;
3481 conn->link_mode |= HCI_LM_MASTER;
3482 }
3483 }
3484
3485 if (ev->status) {
3486 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3487 conn->dst_type, ev->status);
3488 hci_proto_connect_cfm(conn, ev->status);
3489 conn->state = BT_CLOSED;
3490 hci_conn_del(conn);
3491 goto unlock;
3492 }
3493
3494 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3495 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3496 conn->dst_type, 0, NULL, 0, NULL);
3497
3498 conn->sec_level = BT_SECURITY_LOW;
3499 conn->handle = __le16_to_cpu(ev->handle);
3500 conn->state = BT_CONNECTED;
3501
3502 hci_conn_add_sysfs(conn);
3503
3504 hci_proto_connect_cfm(conn, ev->status);
3505
3506 unlock:
3507 hci_dev_unlock(hdev);
3508 }
3509
3510 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3511 {
3512 u8 num_reports = skb->data[0];
3513 void *ptr = &skb->data[1];
3514 s8 rssi;
3515
3516 while (num_reports--) {
3517 struct hci_ev_le_advertising_info *ev = ptr;
3518
3519 rssi = ev->data[ev->length];
3520 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3521 NULL, rssi, 0, 1, ev->data, ev->length);
3522
3523 ptr += sizeof(*ev) + ev->length + 1;
3524 }
3525 }
3526
3527 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3528 {
3529 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3530 struct hci_cp_le_ltk_reply cp;
3531 struct hci_cp_le_ltk_neg_reply neg;
3532 struct hci_conn *conn;
3533 struct smp_ltk *ltk;
3534
3535 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3536
3537 hci_dev_lock(hdev);
3538
3539 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3540 if (conn == NULL)
3541 goto not_found;
3542
3543 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3544 if (ltk == NULL)
3545 goto not_found;
3546
3547 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3548 cp.handle = cpu_to_le16(conn->handle);
3549
3550 if (ltk->authenticated)
3551 conn->pending_sec_level = BT_SECURITY_HIGH;
3552 else
3553 conn->pending_sec_level = BT_SECURITY_MEDIUM;
3554
3555 conn->enc_key_size = ltk->enc_size;
3556
3557 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3558
3559 if (ltk->type & HCI_SMP_STK) {
3560 list_del(&ltk->list);
3561 kfree(ltk);
3562 }
3563
3564 hci_dev_unlock(hdev);
3565
3566 return;
3567
3568 not_found:
3569 neg.handle = ev->handle;
3570 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3571 hci_dev_unlock(hdev);
3572 }
3573
3574 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3575 {
3576 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3577
3578 skb_pull(skb, sizeof(*le_ev));
3579
3580 switch (le_ev->subevent) {
3581 case HCI_EV_LE_CONN_COMPLETE:
3582 hci_le_conn_complete_evt(hdev, skb);
3583 break;
3584
3585 case HCI_EV_LE_ADVERTISING_REPORT:
3586 hci_le_adv_report_evt(hdev, skb);
3587 break;
3588
3589 case HCI_EV_LE_LTK_REQ:
3590 hci_le_ltk_request_evt(hdev, skb);
3591 break;
3592
3593 default:
3594 break;
3595 }
3596 }
3597
3598 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3599 {
3600 struct hci_ev_channel_selected *ev = (void *) skb->data;
3601 struct hci_conn *hcon;
3602
3603 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3604
3605 skb_pull(skb, sizeof(*ev));
3606
3607 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3608 if (!hcon)
3609 return;
3610
3611 amp_read_loc_assoc_final_data(hdev, hcon);
3612 }
3613
3614 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3615 {
3616 struct hci_event_hdr *hdr = (void *) skb->data;
3617 __u8 event = hdr->evt;
3618
3619 hci_dev_lock(hdev);
3620
3621 /* Received events are (currently) only needed when a request is
3622 * ongoing so avoid unnecessary memory allocation.
3623 */
3624 if (hdev->req_status == HCI_REQ_PEND) {
3625 kfree_skb(hdev->recv_evt);
3626 hdev->recv_evt = skb_clone(skb, GFP_KERNEL);
3627 }
3628
3629 hci_dev_unlock(hdev);
3630
3631 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3632
3633 if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->req.event == event) {
3634 struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
3635 u16 opcode = __le16_to_cpu(cmd_hdr->opcode);
3636
3637 hci_req_cmd_complete(hdev, opcode, 0);
3638 }
3639
3640 switch (event) {
3641 case HCI_EV_INQUIRY_COMPLETE:
3642 hci_inquiry_complete_evt(hdev, skb);
3643 break;
3644
3645 case HCI_EV_INQUIRY_RESULT:
3646 hci_inquiry_result_evt(hdev, skb);
3647 break;
3648
3649 case HCI_EV_CONN_COMPLETE:
3650 hci_conn_complete_evt(hdev, skb);
3651 break;
3652
3653 case HCI_EV_CONN_REQUEST:
3654 hci_conn_request_evt(hdev, skb);
3655 break;
3656
3657 case HCI_EV_DISCONN_COMPLETE:
3658 hci_disconn_complete_evt(hdev, skb);
3659 break;
3660
3661 case HCI_EV_AUTH_COMPLETE:
3662 hci_auth_complete_evt(hdev, skb);
3663 break;
3664
3665 case HCI_EV_REMOTE_NAME:
3666 hci_remote_name_evt(hdev, skb);
3667 break;
3668
3669 case HCI_EV_ENCRYPT_CHANGE:
3670 hci_encrypt_change_evt(hdev, skb);
3671 break;
3672
3673 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3674 hci_change_link_key_complete_evt(hdev, skb);
3675 break;
3676
3677 case HCI_EV_REMOTE_FEATURES:
3678 hci_remote_features_evt(hdev, skb);
3679 break;
3680
3681 case HCI_EV_CMD_COMPLETE:
3682 hci_cmd_complete_evt(hdev, skb);
3683 break;
3684
3685 case HCI_EV_CMD_STATUS:
3686 hci_cmd_status_evt(hdev, skb);
3687 break;
3688
3689 case HCI_EV_ROLE_CHANGE:
3690 hci_role_change_evt(hdev, skb);
3691 break;
3692
3693 case HCI_EV_NUM_COMP_PKTS:
3694 hci_num_comp_pkts_evt(hdev, skb);
3695 break;
3696
3697 case HCI_EV_MODE_CHANGE:
3698 hci_mode_change_evt(hdev, skb);
3699 break;
3700
3701 case HCI_EV_PIN_CODE_REQ:
3702 hci_pin_code_request_evt(hdev, skb);
3703 break;
3704
3705 case HCI_EV_LINK_KEY_REQ:
3706 hci_link_key_request_evt(hdev, skb);
3707 break;
3708
3709 case HCI_EV_LINK_KEY_NOTIFY:
3710 hci_link_key_notify_evt(hdev, skb);
3711 break;
3712
3713 case HCI_EV_CLOCK_OFFSET:
3714 hci_clock_offset_evt(hdev, skb);
3715 break;
3716
3717 case HCI_EV_PKT_TYPE_CHANGE:
3718 hci_pkt_type_change_evt(hdev, skb);
3719 break;
3720
3721 case HCI_EV_PSCAN_REP_MODE:
3722 hci_pscan_rep_mode_evt(hdev, skb);
3723 break;
3724
3725 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3726 hci_inquiry_result_with_rssi_evt(hdev, skb);
3727 break;
3728
3729 case HCI_EV_REMOTE_EXT_FEATURES:
3730 hci_remote_ext_features_evt(hdev, skb);
3731 break;
3732
3733 case HCI_EV_SYNC_CONN_COMPLETE:
3734 hci_sync_conn_complete_evt(hdev, skb);
3735 break;
3736
3737 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3738 hci_extended_inquiry_result_evt(hdev, skb);
3739 break;
3740
3741 case HCI_EV_KEY_REFRESH_COMPLETE:
3742 hci_key_refresh_complete_evt(hdev, skb);
3743 break;
3744
3745 case HCI_EV_IO_CAPA_REQUEST:
3746 hci_io_capa_request_evt(hdev, skb);
3747 break;
3748
3749 case HCI_EV_IO_CAPA_REPLY:
3750 hci_io_capa_reply_evt(hdev, skb);
3751 break;
3752
3753 case HCI_EV_USER_CONFIRM_REQUEST:
3754 hci_user_confirm_request_evt(hdev, skb);
3755 break;
3756
3757 case HCI_EV_USER_PASSKEY_REQUEST:
3758 hci_user_passkey_request_evt(hdev, skb);
3759 break;
3760
3761 case HCI_EV_USER_PASSKEY_NOTIFY:
3762 hci_user_passkey_notify_evt(hdev, skb);
3763 break;
3764
3765 case HCI_EV_KEYPRESS_NOTIFY:
3766 hci_keypress_notify_evt(hdev, skb);
3767 break;
3768
3769 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3770 hci_simple_pair_complete_evt(hdev, skb);
3771 break;
3772
3773 case HCI_EV_REMOTE_HOST_FEATURES:
3774 hci_remote_host_features_evt(hdev, skb);
3775 break;
3776
3777 case HCI_EV_LE_META:
3778 hci_le_meta_evt(hdev, skb);
3779 break;
3780
3781 case HCI_EV_CHANNEL_SELECTED:
3782 hci_chan_selected_evt(hdev, skb);
3783 break;
3784
3785 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3786 hci_remote_oob_data_request_evt(hdev, skb);
3787 break;
3788
3789 case HCI_EV_PHY_LINK_COMPLETE:
3790 hci_phy_link_complete_evt(hdev, skb);
3791 break;
3792
3793 case HCI_EV_LOGICAL_LINK_COMPLETE:
3794 hci_loglink_complete_evt(hdev, skb);
3795 break;
3796
3797 case HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE:
3798 hci_disconn_loglink_complete_evt(hdev, skb);
3799 break;
3800
3801 case HCI_EV_DISCONN_PHY_LINK_COMPLETE:
3802 hci_disconn_phylink_complete_evt(hdev, skb);
3803 break;
3804
3805 case HCI_EV_NUM_COMP_BLOCKS:
3806 hci_num_comp_blocks_evt(hdev, skb);
3807 break;
3808
3809 default:
3810 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3811 break;
3812 }
3813
3814 kfree_skb(skb);
3815 hdev->stat.evt_rx++;
3816 }
This page took 0.106346 seconds and 6 git commands to generate.