Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32
33 /* Handle HCI Event packets */
34
35 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
36 {
37 __u8 status = *((__u8 *) skb->data);
38
39 BT_DBG("%s status 0x%2.2x", hdev->name, status);
40
41 if (status) {
42 hci_dev_lock(hdev);
43 mgmt_stop_discovery_failed(hdev, status);
44 hci_dev_unlock(hdev);
45 return;
46 }
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49
50 hci_dev_lock(hdev);
51 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
52 hci_dev_unlock(hdev);
53
54 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn) {
105 if (rp->role)
106 conn->link_mode &= ~HCI_LM_MASTER;
107 else
108 conn->link_mode |= HCI_LM_MASTER;
109 }
110
111 hci_dev_unlock(hdev);
112 }
113
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
118
119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120
121 if (rp->status)
122 return;
123
124 hci_dev_lock(hdev);
125
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 if (conn)
128 conn->link_policy = __le16_to_cpu(rp->policy);
129
130 hci_dev_unlock(hdev);
131 }
132
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
137 void *sent;
138
139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140
141 if (rp->status)
142 return;
143
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 if (!sent)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = get_unaligned_le16(sent + 2);
153
154 hci_dev_unlock(hdev);
155 }
156
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
159 {
160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164 if (rp->status)
165 return;
166
167 hdev->link_policy = __le16_to_cpu(rp->policy);
168 }
169
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 /* Reset all non-persistent flags */
199 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
200 BIT(HCI_PERIODIC_INQ));
201
202 hdev->discovery.state = DISCOVERY_STOPPED;
203 }
204
205 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207 __u8 status = *((__u8 *) skb->data);
208 void *sent;
209
210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
211
212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (!sent)
214 return;
215
216 hci_dev_lock(hdev);
217
218 if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 mgmt_set_local_name_complete(hdev, sent, status);
220 else if (!status)
221 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222
223 hci_dev_unlock(hdev);
224
225 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263
264 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
265 }
266
267 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
268 {
269 __u8 status = *((__u8 *) skb->data);
270 void *sent;
271
272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
273
274 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
275 if (!sent)
276 return;
277
278 if (!status) {
279 __u8 param = *((__u8 *) sent);
280
281 if (param)
282 set_bit(HCI_ENCRYPT, &hdev->flags);
283 else
284 clear_bit(HCI_ENCRYPT, &hdev->flags);
285 }
286
287 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
288 }
289
290 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 __u8 param, status = *((__u8 *) skb->data);
293 int old_pscan, old_iscan;
294 void *sent;
295
296 BT_DBG("%s status 0x%2.2x", hdev->name, status);
297
298 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
299 if (!sent)
300 return;
301
302 param = *((__u8 *) sent);
303
304 hci_dev_lock(hdev);
305
306 if (status != 0) {
307 mgmt_write_scan_failed(hdev, param, status);
308 hdev->discov_timeout = 0;
309 goto done;
310 }
311
312 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
313 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
314
315 if (param & SCAN_INQUIRY) {
316 set_bit(HCI_ISCAN, &hdev->flags);
317 if (!old_iscan)
318 mgmt_discoverable(hdev, 1);
319 if (hdev->discov_timeout > 0) {
320 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
321 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
322 to);
323 }
324 } else if (old_iscan)
325 mgmt_discoverable(hdev, 0);
326
327 if (param & SCAN_PAGE) {
328 set_bit(HCI_PSCAN, &hdev->flags);
329 if (!old_pscan)
330 mgmt_connectable(hdev, 1);
331 } else if (old_pscan)
332 mgmt_connectable(hdev, 0);
333
334 done:
335 hci_dev_unlock(hdev);
336 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
337 }
338
339 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
340 {
341 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
342
343 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
344
345 if (rp->status)
346 return;
347
348 memcpy(hdev->dev_class, rp->dev_class, 3);
349
350 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
351 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
352 }
353
354 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
355 {
356 __u8 status = *((__u8 *) skb->data);
357 void *sent;
358
359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
360
361 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
362 if (!sent)
363 return;
364
365 hci_dev_lock(hdev);
366
367 if (status == 0)
368 memcpy(hdev->dev_class, sent, 3);
369
370 if (test_bit(HCI_MGMT, &hdev->dev_flags))
371 mgmt_set_class_of_dev_complete(hdev, sent, status);
372
373 hci_dev_unlock(hdev);
374 }
375
376 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
379 __u16 setting;
380
381 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
382
383 if (rp->status)
384 return;
385
386 setting = __le16_to_cpu(rp->voice_setting);
387
388 if (hdev->voice_setting == setting)
389 return;
390
391 hdev->voice_setting = setting;
392
393 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
394
395 if (hdev->notify)
396 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
397 }
398
399 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 struct sk_buff *skb)
401 {
402 __u8 status = *((__u8 *) skb->data);
403 __u16 setting;
404 void *sent;
405
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
407
408 if (status)
409 return;
410
411 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
412 if (!sent)
413 return;
414
415 setting = get_unaligned_le16(sent);
416
417 if (hdev->voice_setting == setting)
418 return;
419
420 hdev->voice_setting = setting;
421
422 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
423
424 if (hdev->notify)
425 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
426 }
427
428 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
429 {
430 __u8 status = *((__u8 *) skb->data);
431
432 BT_DBG("%s status 0x%2.2x", hdev->name, status);
433
434 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
435 }
436
437 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
438 {
439 __u8 status = *((__u8 *) skb->data);
440 void *sent;
441
442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
443
444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
445 if (!sent)
446 return;
447
448 if (test_bit(HCI_MGMT, &hdev->dev_flags))
449 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
450 else if (!status) {
451 if (*((u8 *) sent))
452 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
453 else
454 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455 }
456 }
457
458 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
459 {
460 if (hdev->features[6] & LMP_EXT_INQ)
461 return 2;
462
463 if (hdev->features[3] & LMP_RSSI_INQ)
464 return 1;
465
466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
467 hdev->lmp_subver == 0x0757)
468 return 1;
469
470 if (hdev->manufacturer == 15) {
471 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
472 return 1;
473 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
474 return 1;
475 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
476 return 1;
477 }
478
479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
480 hdev->lmp_subver == 0x1805)
481 return 1;
482
483 return 0;
484 }
485
486 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
487 {
488 u8 mode;
489
490 mode = hci_get_inquiry_mode(hdev);
491
492 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
493 }
494
495 static void hci_setup_event_mask(struct hci_dev *hdev)
496 {
497 /* The second byte is 0xff instead of 0x9f (two reserved bits
498 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
499 * command otherwise */
500 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
501
502 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
503 * any event mask for pre 1.2 devices */
504 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
505 return;
506
507 events[4] |= 0x01; /* Flow Specification Complete */
508 events[4] |= 0x02; /* Inquiry Result with RSSI */
509 events[4] |= 0x04; /* Read Remote Extended Features Complete */
510 events[5] |= 0x08; /* Synchronous Connection Complete */
511 events[5] |= 0x10; /* Synchronous Connection Changed */
512
513 if (hdev->features[3] & LMP_RSSI_INQ)
514 events[4] |= 0x02; /* Inquiry Result with RSSI */
515
516 if (lmp_sniffsubr_capable(hdev))
517 events[5] |= 0x20; /* Sniff Subrating */
518
519 if (hdev->features[5] & LMP_PAUSE_ENC)
520 events[5] |= 0x80; /* Encryption Key Refresh Complete */
521
522 if (hdev->features[6] & LMP_EXT_INQ)
523 events[5] |= 0x40; /* Extended Inquiry Result */
524
525 if (lmp_no_flush_capable(hdev))
526 events[7] |= 0x01; /* Enhanced Flush Complete */
527
528 if (hdev->features[7] & LMP_LSTO)
529 events[6] |= 0x80; /* Link Supervision Timeout Changed */
530
531 if (lmp_ssp_capable(hdev)) {
532 events[6] |= 0x01; /* IO Capability Request */
533 events[6] |= 0x02; /* IO Capability Response */
534 events[6] |= 0x04; /* User Confirmation Request */
535 events[6] |= 0x08; /* User Passkey Request */
536 events[6] |= 0x10; /* Remote OOB Data Request */
537 events[6] |= 0x20; /* Simple Pairing Complete */
538 events[7] |= 0x04; /* User Passkey Notification */
539 events[7] |= 0x08; /* Keypress Notification */
540 events[7] |= 0x10; /* Remote Host Supported
541 * Features Notification */
542 }
543
544 if (lmp_le_capable(hdev))
545 events[7] |= 0x20; /* LE Meta-Event */
546
547 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
548 }
549
550 static void hci_setup(struct hci_dev *hdev)
551 {
552 if (hdev->dev_type != HCI_BREDR)
553 return;
554
555 hci_setup_event_mask(hdev);
556
557 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
558 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
559
560 if (lmp_ssp_capable(hdev)) {
561 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
562 u8 mode = 0x01;
563 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
564 sizeof(mode), &mode);
565 } else {
566 struct hci_cp_write_eir cp;
567
568 memset(hdev->eir, 0, sizeof(hdev->eir));
569 memset(&cp, 0, sizeof(cp));
570
571 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
572 }
573 }
574
575 if (hdev->features[3] & LMP_RSSI_INQ)
576 hci_setup_inquiry_mode(hdev);
577
578 if (hdev->features[7] & LMP_INQ_TX_PWR)
579 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
580
581 if (hdev->features[7] & LMP_EXTFEATURES) {
582 struct hci_cp_read_local_ext_features cp;
583
584 cp.page = 0x01;
585 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
586 &cp);
587 }
588
589 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
590 u8 enable = 1;
591 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
592 &enable);
593 }
594 }
595
596 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
597 {
598 struct hci_rp_read_local_version *rp = (void *) skb->data;
599
600 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
601
602 if (rp->status)
603 goto done;
604
605 hdev->hci_ver = rp->hci_ver;
606 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
607 hdev->lmp_ver = rp->lmp_ver;
608 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
609 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
610
611 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
612 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
613
614 if (test_bit(HCI_INIT, &hdev->flags))
615 hci_setup(hdev);
616
617 done:
618 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
619 }
620
621 static void hci_setup_link_policy(struct hci_dev *hdev)
622 {
623 struct hci_cp_write_def_link_policy cp;
624 u16 link_policy = 0;
625
626 if (lmp_rswitch_capable(hdev))
627 link_policy |= HCI_LP_RSWITCH;
628 if (hdev->features[0] & LMP_HOLD)
629 link_policy |= HCI_LP_HOLD;
630 if (lmp_sniff_capable(hdev))
631 link_policy |= HCI_LP_SNIFF;
632 if (hdev->features[1] & LMP_PARK)
633 link_policy |= HCI_LP_PARK;
634
635 cp.policy = cpu_to_le16(link_policy);
636 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
637 }
638
639 static void hci_cc_read_local_commands(struct hci_dev *hdev,
640 struct sk_buff *skb)
641 {
642 struct hci_rp_read_local_commands *rp = (void *) skb->data;
643
644 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
645
646 if (rp->status)
647 goto done;
648
649 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
650
651 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
652 hci_setup_link_policy(hdev);
653
654 done:
655 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
656 }
657
658 static void hci_cc_read_local_features(struct hci_dev *hdev,
659 struct sk_buff *skb)
660 {
661 struct hci_rp_read_local_features *rp = (void *) skb->data;
662
663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
664
665 if (rp->status)
666 return;
667
668 memcpy(hdev->features, rp->features, 8);
669
670 /* Adjust default settings according to features
671 * supported by device. */
672
673 if (hdev->features[0] & LMP_3SLOT)
674 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
675
676 if (hdev->features[0] & LMP_5SLOT)
677 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
678
679 if (hdev->features[1] & LMP_HV2) {
680 hdev->pkt_type |= (HCI_HV2);
681 hdev->esco_type |= (ESCO_HV2);
682 }
683
684 if (hdev->features[1] & LMP_HV3) {
685 hdev->pkt_type |= (HCI_HV3);
686 hdev->esco_type |= (ESCO_HV3);
687 }
688
689 if (lmp_esco_capable(hdev))
690 hdev->esco_type |= (ESCO_EV3);
691
692 if (hdev->features[4] & LMP_EV4)
693 hdev->esco_type |= (ESCO_EV4);
694
695 if (hdev->features[4] & LMP_EV5)
696 hdev->esco_type |= (ESCO_EV5);
697
698 if (hdev->features[5] & LMP_EDR_ESCO_2M)
699 hdev->esco_type |= (ESCO_2EV3);
700
701 if (hdev->features[5] & LMP_EDR_ESCO_3M)
702 hdev->esco_type |= (ESCO_3EV3);
703
704 if (hdev->features[5] & LMP_EDR_3S_ESCO)
705 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
706
707 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
708 hdev->features[0], hdev->features[1],
709 hdev->features[2], hdev->features[3],
710 hdev->features[4], hdev->features[5],
711 hdev->features[6], hdev->features[7]);
712 }
713
714 static void hci_set_le_support(struct hci_dev *hdev)
715 {
716 struct hci_cp_write_le_host_supported cp;
717
718 memset(&cp, 0, sizeof(cp));
719
720 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
721 cp.le = 1;
722 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
723 }
724
725 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
726 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
727 &cp);
728 }
729
730 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
731 struct sk_buff *skb)
732 {
733 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
734
735 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
736
737 if (rp->status)
738 goto done;
739
740 switch (rp->page) {
741 case 0:
742 memcpy(hdev->features, rp->features, 8);
743 break;
744 case 1:
745 memcpy(hdev->host_features, rp->features, 8);
746 break;
747 }
748
749 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
750 hci_set_le_support(hdev);
751
752 done:
753 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
754 }
755
756 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
757 struct sk_buff *skb)
758 {
759 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
760
761 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
762
763 if (rp->status)
764 return;
765
766 hdev->flow_ctl_mode = rp->mode;
767
768 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
769 }
770
771 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
772 {
773 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
774
775 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
776
777 if (rp->status)
778 return;
779
780 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
781 hdev->sco_mtu = rp->sco_mtu;
782 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
783 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
784
785 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
786 hdev->sco_mtu = 64;
787 hdev->sco_pkts = 8;
788 }
789
790 hdev->acl_cnt = hdev->acl_pkts;
791 hdev->sco_cnt = hdev->sco_pkts;
792
793 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
794 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
795 }
796
797 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
798 {
799 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
800
801 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802
803 if (!rp->status)
804 bacpy(&hdev->bdaddr, &rp->bdaddr);
805
806 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
807 }
808
809 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
810 struct sk_buff *skb)
811 {
812 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
813
814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815
816 if (rp->status)
817 return;
818
819 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
820 hdev->block_len = __le16_to_cpu(rp->block_len);
821 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
822
823 hdev->block_cnt = hdev->num_blocks;
824
825 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
826 hdev->block_cnt, hdev->block_len);
827
828 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
829 }
830
831 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
832 {
833 __u8 status = *((__u8 *) skb->data);
834
835 BT_DBG("%s status 0x%2.2x", hdev->name, status);
836
837 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
838 }
839
840 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
841 struct sk_buff *skb)
842 {
843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
844
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846
847 if (rp->status)
848 return;
849
850 hdev->amp_status = rp->amp_status;
851 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
852 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
853 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
854 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
855 hdev->amp_type = rp->amp_type;
856 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
857 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
858 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
859 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
860
861 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
862 }
863
864 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
865 struct sk_buff *skb)
866 {
867 __u8 status = *((__u8 *) skb->data);
868
869 BT_DBG("%s status 0x%2.2x", hdev->name, status);
870
871 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
872 }
873
874 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
875 {
876 __u8 status = *((__u8 *) skb->data);
877
878 BT_DBG("%s status 0x%2.2x", hdev->name, status);
879
880 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
881 }
882
883 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
884 struct sk_buff *skb)
885 {
886 __u8 status = *((__u8 *) skb->data);
887
888 BT_DBG("%s status 0x%2.2x", hdev->name, status);
889
890 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
891 }
892
893 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
894 struct sk_buff *skb)
895 {
896 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
897
898 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899
900 if (!rp->status)
901 hdev->inq_tx_power = rp->tx_power;
902
903 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
904 }
905
906 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 __u8 status = *((__u8 *) skb->data);
909
910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
911
912 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
913 }
914
915 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
916 {
917 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
918 struct hci_cp_pin_code_reply *cp;
919 struct hci_conn *conn;
920
921 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
922
923 hci_dev_lock(hdev);
924
925 if (test_bit(HCI_MGMT, &hdev->dev_flags))
926 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
927
928 if (rp->status != 0)
929 goto unlock;
930
931 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
932 if (!cp)
933 goto unlock;
934
935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
936 if (conn)
937 conn->pin_length = cp->pin_len;
938
939 unlock:
940 hci_dev_unlock(hdev);
941 }
942
943 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
944 {
945 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
946
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948
949 hci_dev_lock(hdev);
950
951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
953 rp->status);
954
955 hci_dev_unlock(hdev);
956 }
957
958 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
959 struct sk_buff *skb)
960 {
961 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
962
963 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964
965 if (rp->status)
966 return;
967
968 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
969 hdev->le_pkts = rp->le_max_pkt;
970
971 hdev->le_cnt = hdev->le_pkts;
972
973 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
974
975 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
976 }
977
978 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 {
980 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
983
984 hci_dev_lock(hdev);
985
986 if (test_bit(HCI_MGMT, &hdev->dev_flags))
987 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
988 rp->status);
989
990 hci_dev_unlock(hdev);
991 }
992
993 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
994 struct sk_buff *skb)
995 {
996 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997
998 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999
1000 hci_dev_lock(hdev);
1001
1002 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1003 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1004 ACL_LINK, 0, rp->status);
1005
1006 hci_dev_unlock(hdev);
1007 }
1008
1009 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1010 {
1011 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014
1015 hci_dev_lock(hdev);
1016
1017 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1018 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1019 0, rp->status);
1020
1021 hci_dev_unlock(hdev);
1022 }
1023
1024 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1025 struct sk_buff *skb)
1026 {
1027 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030
1031 hci_dev_lock(hdev);
1032
1033 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1034 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1035 ACL_LINK, 0, rp->status);
1036
1037 hci_dev_unlock(hdev);
1038 }
1039
1040 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1041 struct sk_buff *skb)
1042 {
1043 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1044
1045 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046
1047 hci_dev_lock(hdev);
1048 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1049 rp->randomizer, rp->status);
1050 hci_dev_unlock(hdev);
1051 }
1052
1053 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1054 {
1055 __u8 status = *((__u8 *) skb->data);
1056
1057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1058
1059 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1060
1061 if (status) {
1062 hci_dev_lock(hdev);
1063 mgmt_start_discovery_failed(hdev, status);
1064 hci_dev_unlock(hdev);
1065 return;
1066 }
1067 }
1068
1069 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1070 struct sk_buff *skb)
1071 {
1072 struct hci_cp_le_set_scan_enable *cp;
1073 __u8 status = *((__u8 *) skb->data);
1074
1075 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1076
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1078 if (!cp)
1079 return;
1080
1081 switch (cp->enable) {
1082 case LE_SCANNING_ENABLED:
1083 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1084
1085 if (status) {
1086 hci_dev_lock(hdev);
1087 mgmt_start_discovery_failed(hdev, status);
1088 hci_dev_unlock(hdev);
1089 return;
1090 }
1091
1092 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1093
1094 hci_dev_lock(hdev);
1095 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1096 hci_dev_unlock(hdev);
1097 break;
1098
1099 case LE_SCANNING_DISABLED:
1100 if (status) {
1101 hci_dev_lock(hdev);
1102 mgmt_stop_discovery_failed(hdev, status);
1103 hci_dev_unlock(hdev);
1104 return;
1105 }
1106
1107 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1108
1109 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1110 hdev->discovery.state == DISCOVERY_FINDING) {
1111 mgmt_interleaved_discovery(hdev);
1112 } else {
1113 hci_dev_lock(hdev);
1114 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1115 hci_dev_unlock(hdev);
1116 }
1117
1118 break;
1119
1120 default:
1121 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1122 break;
1123 }
1124 }
1125
1126 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1127 {
1128 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1129
1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1131
1132 if (rp->status)
1133 return;
1134
1135 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1136 }
1137
1138 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1139 {
1140 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1141
1142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1143
1144 if (rp->status)
1145 return;
1146
1147 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1148 }
1149
1150 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1151 struct sk_buff *skb)
1152 {
1153 struct hci_cp_write_le_host_supported *sent;
1154 __u8 status = *((__u8 *) skb->data);
1155
1156 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1157
1158 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1159 if (!sent)
1160 return;
1161
1162 if (!status) {
1163 if (sent->le)
1164 hdev->host_features[0] |= LMP_HOST_LE;
1165 else
1166 hdev->host_features[0] &= ~LMP_HOST_LE;
1167 }
1168
1169 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1170 !test_bit(HCI_INIT, &hdev->flags))
1171 mgmt_le_enable_complete(hdev, sent->le, status);
1172
1173 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1174 }
1175
1176 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1177 {
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1179
1180 if (status) {
1181 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1182 hci_conn_check_pending(hdev);
1183 hci_dev_lock(hdev);
1184 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1185 mgmt_start_discovery_failed(hdev, status);
1186 hci_dev_unlock(hdev);
1187 return;
1188 }
1189
1190 set_bit(HCI_INQUIRY, &hdev->flags);
1191
1192 hci_dev_lock(hdev);
1193 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1194 hci_dev_unlock(hdev);
1195 }
1196
1197 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1198 {
1199 struct hci_cp_create_conn *cp;
1200 struct hci_conn *conn;
1201
1202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1203
1204 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1205 if (!cp)
1206 return;
1207
1208 hci_dev_lock(hdev);
1209
1210 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1211
1212 BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
1213
1214 if (status) {
1215 if (conn && conn->state == BT_CONNECT) {
1216 if (status != 0x0c || conn->attempt > 2) {
1217 conn->state = BT_CLOSED;
1218 hci_proto_connect_cfm(conn, status);
1219 hci_conn_del(conn);
1220 } else
1221 conn->state = BT_CONNECT2;
1222 }
1223 } else {
1224 if (!conn) {
1225 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1226 if (conn) {
1227 conn->out = true;
1228 conn->link_mode |= HCI_LM_MASTER;
1229 } else
1230 BT_ERR("No memory for new connection");
1231 }
1232 }
1233
1234 hci_dev_unlock(hdev);
1235 }
1236
1237 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1238 {
1239 struct hci_cp_add_sco *cp;
1240 struct hci_conn *acl, *sco;
1241 __u16 handle;
1242
1243 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1244
1245 if (!status)
1246 return;
1247
1248 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1249 if (!cp)
1250 return;
1251
1252 handle = __le16_to_cpu(cp->handle);
1253
1254 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1255
1256 hci_dev_lock(hdev);
1257
1258 acl = hci_conn_hash_lookup_handle(hdev, handle);
1259 if (acl) {
1260 sco = acl->link;
1261 if (sco) {
1262 sco->state = BT_CLOSED;
1263
1264 hci_proto_connect_cfm(sco, status);
1265 hci_conn_del(sco);
1266 }
1267 }
1268
1269 hci_dev_unlock(hdev);
1270 }
1271
1272 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1273 {
1274 struct hci_cp_auth_requested *cp;
1275 struct hci_conn *conn;
1276
1277 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1278
1279 if (!status)
1280 return;
1281
1282 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1283 if (!cp)
1284 return;
1285
1286 hci_dev_lock(hdev);
1287
1288 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1289 if (conn) {
1290 if (conn->state == BT_CONFIG) {
1291 hci_proto_connect_cfm(conn, status);
1292 hci_conn_put(conn);
1293 }
1294 }
1295
1296 hci_dev_unlock(hdev);
1297 }
1298
1299 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1300 {
1301 struct hci_cp_set_conn_encrypt *cp;
1302 struct hci_conn *conn;
1303
1304 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1305
1306 if (!status)
1307 return;
1308
1309 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1310 if (!cp)
1311 return;
1312
1313 hci_dev_lock(hdev);
1314
1315 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1316 if (conn) {
1317 if (conn->state == BT_CONFIG) {
1318 hci_proto_connect_cfm(conn, status);
1319 hci_conn_put(conn);
1320 }
1321 }
1322
1323 hci_dev_unlock(hdev);
1324 }
1325
1326 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1327 struct hci_conn *conn)
1328 {
1329 if (conn->state != BT_CONFIG || !conn->out)
1330 return 0;
1331
1332 if (conn->pending_sec_level == BT_SECURITY_SDP)
1333 return 0;
1334
1335 /* Only request authentication for SSP connections or non-SSP
1336 * devices with sec_level HIGH or if MITM protection is requested */
1337 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1338 conn->pending_sec_level != BT_SECURITY_HIGH)
1339 return 0;
1340
1341 return 1;
1342 }
1343
1344 static int hci_resolve_name(struct hci_dev *hdev,
1345 struct inquiry_entry *e)
1346 {
1347 struct hci_cp_remote_name_req cp;
1348
1349 memset(&cp, 0, sizeof(cp));
1350
1351 bacpy(&cp.bdaddr, &e->data.bdaddr);
1352 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1353 cp.pscan_mode = e->data.pscan_mode;
1354 cp.clock_offset = e->data.clock_offset;
1355
1356 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1357 }
1358
1359 static bool hci_resolve_next_name(struct hci_dev *hdev)
1360 {
1361 struct discovery_state *discov = &hdev->discovery;
1362 struct inquiry_entry *e;
1363
1364 if (list_empty(&discov->resolve))
1365 return false;
1366
1367 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1368 if (!e)
1369 return false;
1370
1371 if (hci_resolve_name(hdev, e) == 0) {
1372 e->name_state = NAME_PENDING;
1373 return true;
1374 }
1375
1376 return false;
1377 }
1378
1379 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1380 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1381 {
1382 struct discovery_state *discov = &hdev->discovery;
1383 struct inquiry_entry *e;
1384
1385 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1386 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1387 name_len, conn->dev_class);
1388
1389 if (discov->state == DISCOVERY_STOPPED)
1390 return;
1391
1392 if (discov->state == DISCOVERY_STOPPING)
1393 goto discov_complete;
1394
1395 if (discov->state != DISCOVERY_RESOLVING)
1396 return;
1397
1398 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1399 /* If the device was not found in a list of found devices names of which
1400 * are pending. there is no need to continue resolving a next name as it
1401 * will be done upon receiving another Remote Name Request Complete
1402 * Event */
1403 if (!e)
1404 return;
1405
1406 list_del(&e->list);
1407 if (name) {
1408 e->name_state = NAME_KNOWN;
1409 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1410 e->data.rssi, name, name_len);
1411 } else {
1412 e->name_state = NAME_NOT_KNOWN;
1413 }
1414
1415 if (hci_resolve_next_name(hdev))
1416 return;
1417
1418 discov_complete:
1419 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1420 }
1421
1422 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1423 {
1424 struct hci_cp_remote_name_req *cp;
1425 struct hci_conn *conn;
1426
1427 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1428
1429 /* If successful wait for the name req complete event before
1430 * checking for the need to do authentication */
1431 if (!status)
1432 return;
1433
1434 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1435 if (!cp)
1436 return;
1437
1438 hci_dev_lock(hdev);
1439
1440 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1441
1442 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1443 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1444
1445 if (!conn)
1446 goto unlock;
1447
1448 if (!hci_outgoing_auth_needed(hdev, conn))
1449 goto unlock;
1450
1451 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1452 struct hci_cp_auth_requested cp;
1453 cp.handle = __cpu_to_le16(conn->handle);
1454 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1455 }
1456
1457 unlock:
1458 hci_dev_unlock(hdev);
1459 }
1460
1461 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1462 {
1463 struct hci_cp_read_remote_features *cp;
1464 struct hci_conn *conn;
1465
1466 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1467
1468 if (!status)
1469 return;
1470
1471 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1472 if (!cp)
1473 return;
1474
1475 hci_dev_lock(hdev);
1476
1477 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1478 if (conn) {
1479 if (conn->state == BT_CONFIG) {
1480 hci_proto_connect_cfm(conn, status);
1481 hci_conn_put(conn);
1482 }
1483 }
1484
1485 hci_dev_unlock(hdev);
1486 }
1487
1488 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1489 {
1490 struct hci_cp_read_remote_ext_features *cp;
1491 struct hci_conn *conn;
1492
1493 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1494
1495 if (!status)
1496 return;
1497
1498 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1499 if (!cp)
1500 return;
1501
1502 hci_dev_lock(hdev);
1503
1504 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1505 if (conn) {
1506 if (conn->state == BT_CONFIG) {
1507 hci_proto_connect_cfm(conn, status);
1508 hci_conn_put(conn);
1509 }
1510 }
1511
1512 hci_dev_unlock(hdev);
1513 }
1514
1515 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1516 {
1517 struct hci_cp_setup_sync_conn *cp;
1518 struct hci_conn *acl, *sco;
1519 __u16 handle;
1520
1521 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1522
1523 if (!status)
1524 return;
1525
1526 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1527 if (!cp)
1528 return;
1529
1530 handle = __le16_to_cpu(cp->handle);
1531
1532 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1533
1534 hci_dev_lock(hdev);
1535
1536 acl = hci_conn_hash_lookup_handle(hdev, handle);
1537 if (acl) {
1538 sco = acl->link;
1539 if (sco) {
1540 sco->state = BT_CLOSED;
1541
1542 hci_proto_connect_cfm(sco, status);
1543 hci_conn_del(sco);
1544 }
1545 }
1546
1547 hci_dev_unlock(hdev);
1548 }
1549
1550 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1551 {
1552 struct hci_cp_sniff_mode *cp;
1553 struct hci_conn *conn;
1554
1555 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1556
1557 if (!status)
1558 return;
1559
1560 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1561 if (!cp)
1562 return;
1563
1564 hci_dev_lock(hdev);
1565
1566 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1567 if (conn) {
1568 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1569
1570 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1571 hci_sco_setup(conn, status);
1572 }
1573
1574 hci_dev_unlock(hdev);
1575 }
1576
1577 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1578 {
1579 struct hci_cp_exit_sniff_mode *cp;
1580 struct hci_conn *conn;
1581
1582 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1583
1584 if (!status)
1585 return;
1586
1587 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1588 if (!cp)
1589 return;
1590
1591 hci_dev_lock(hdev);
1592
1593 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1594 if (conn) {
1595 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1596
1597 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1598 hci_sco_setup(conn, status);
1599 }
1600
1601 hci_dev_unlock(hdev);
1602 }
1603
1604 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1605 {
1606 struct hci_cp_disconnect *cp;
1607 struct hci_conn *conn;
1608
1609 if (!status)
1610 return;
1611
1612 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1613 if (!cp)
1614 return;
1615
1616 hci_dev_lock(hdev);
1617
1618 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1619 if (conn)
1620 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1621 conn->dst_type, status);
1622
1623 hci_dev_unlock(hdev);
1624 }
1625
1626 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1627 {
1628 struct hci_conn *conn;
1629
1630 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1631
1632 if (status) {
1633 hci_dev_lock(hdev);
1634
1635 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1636 if (!conn) {
1637 hci_dev_unlock(hdev);
1638 return;
1639 }
1640
1641 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
1642 conn);
1643
1644 conn->state = BT_CLOSED;
1645 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1646 conn->dst_type, status);
1647 hci_proto_connect_cfm(conn, status);
1648 hci_conn_del(conn);
1649
1650 hci_dev_unlock(hdev);
1651 }
1652 }
1653
1654 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1655 {
1656 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1657 }
1658
1659 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1660 {
1661 __u8 status = *((__u8 *) skb->data);
1662 struct discovery_state *discov = &hdev->discovery;
1663 struct inquiry_entry *e;
1664
1665 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1666
1667 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1668
1669 hci_conn_check_pending(hdev);
1670
1671 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1672 return;
1673
1674 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1675 return;
1676
1677 hci_dev_lock(hdev);
1678
1679 if (discov->state != DISCOVERY_FINDING)
1680 goto unlock;
1681
1682 if (list_empty(&discov->resolve)) {
1683 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1684 goto unlock;
1685 }
1686
1687 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1688 if (e && hci_resolve_name(hdev, e) == 0) {
1689 e->name_state = NAME_PENDING;
1690 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1691 } else {
1692 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1693 }
1694
1695 unlock:
1696 hci_dev_unlock(hdev);
1697 }
1698
1699 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1700 {
1701 struct inquiry_data data;
1702 struct inquiry_info *info = (void *) (skb->data + 1);
1703 int num_rsp = *((__u8 *) skb->data);
1704
1705 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1706
1707 if (!num_rsp)
1708 return;
1709
1710 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1711 return;
1712
1713 hci_dev_lock(hdev);
1714
1715 for (; num_rsp; num_rsp--, info++) {
1716 bool name_known, ssp;
1717
1718 bacpy(&data.bdaddr, &info->bdaddr);
1719 data.pscan_rep_mode = info->pscan_rep_mode;
1720 data.pscan_period_mode = info->pscan_period_mode;
1721 data.pscan_mode = info->pscan_mode;
1722 memcpy(data.dev_class, info->dev_class, 3);
1723 data.clock_offset = info->clock_offset;
1724 data.rssi = 0x00;
1725 data.ssp_mode = 0x00;
1726
1727 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1728 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1729 info->dev_class, 0, !name_known, ssp, NULL,
1730 0);
1731 }
1732
1733 hci_dev_unlock(hdev);
1734 }
1735
1736 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1737 {
1738 struct hci_ev_conn_complete *ev = (void *) skb->data;
1739 struct hci_conn *conn;
1740
1741 BT_DBG("%s", hdev->name);
1742
1743 hci_dev_lock(hdev);
1744
1745 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1746 if (!conn) {
1747 if (ev->link_type != SCO_LINK)
1748 goto unlock;
1749
1750 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1751 if (!conn)
1752 goto unlock;
1753
1754 conn->type = SCO_LINK;
1755 }
1756
1757 if (!ev->status) {
1758 conn->handle = __le16_to_cpu(ev->handle);
1759
1760 if (conn->type == ACL_LINK) {
1761 conn->state = BT_CONFIG;
1762 hci_conn_hold(conn);
1763
1764 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1765 !hci_find_link_key(hdev, &ev->bdaddr))
1766 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1767 else
1768 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1769 } else
1770 conn->state = BT_CONNECTED;
1771
1772 hci_conn_hold_device(conn);
1773 hci_conn_add_sysfs(conn);
1774
1775 if (test_bit(HCI_AUTH, &hdev->flags))
1776 conn->link_mode |= HCI_LM_AUTH;
1777
1778 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1779 conn->link_mode |= HCI_LM_ENCRYPT;
1780
1781 /* Get remote features */
1782 if (conn->type == ACL_LINK) {
1783 struct hci_cp_read_remote_features cp;
1784 cp.handle = ev->handle;
1785 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1786 sizeof(cp), &cp);
1787 }
1788
1789 /* Set packet type for incoming connection */
1790 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1791 struct hci_cp_change_conn_ptype cp;
1792 cp.handle = ev->handle;
1793 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1794 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1795 &cp);
1796 }
1797 } else {
1798 conn->state = BT_CLOSED;
1799 if (conn->type == ACL_LINK)
1800 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1801 conn->dst_type, ev->status);
1802 }
1803
1804 if (conn->type == ACL_LINK)
1805 hci_sco_setup(conn, ev->status);
1806
1807 if (ev->status) {
1808 hci_proto_connect_cfm(conn, ev->status);
1809 hci_conn_del(conn);
1810 } else if (ev->link_type != ACL_LINK)
1811 hci_proto_connect_cfm(conn, ev->status);
1812
1813 unlock:
1814 hci_dev_unlock(hdev);
1815
1816 hci_conn_check_pending(hdev);
1817 }
1818
1819 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1820 {
1821 struct hci_ev_conn_request *ev = (void *) skb->data;
1822 int mask = hdev->link_mode;
1823
1824 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1825 ev->link_type);
1826
1827 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1828
1829 if ((mask & HCI_LM_ACCEPT) &&
1830 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1831 /* Connection accepted */
1832 struct inquiry_entry *ie;
1833 struct hci_conn *conn;
1834
1835 hci_dev_lock(hdev);
1836
1837 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1838 if (ie)
1839 memcpy(ie->data.dev_class, ev->dev_class, 3);
1840
1841 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1842 &ev->bdaddr);
1843 if (!conn) {
1844 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1845 if (!conn) {
1846 BT_ERR("No memory for new connection");
1847 hci_dev_unlock(hdev);
1848 return;
1849 }
1850 }
1851
1852 memcpy(conn->dev_class, ev->dev_class, 3);
1853 conn->state = BT_CONNECT;
1854
1855 hci_dev_unlock(hdev);
1856
1857 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1858 struct hci_cp_accept_conn_req cp;
1859
1860 bacpy(&cp.bdaddr, &ev->bdaddr);
1861
1862 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1863 cp.role = 0x00; /* Become master */
1864 else
1865 cp.role = 0x01; /* Remain slave */
1866
1867 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1868 &cp);
1869 } else {
1870 struct hci_cp_accept_sync_conn_req cp;
1871
1872 bacpy(&cp.bdaddr, &ev->bdaddr);
1873 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1874
1875 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1876 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1877 cp.max_latency = __constant_cpu_to_le16(0xffff);
1878 cp.content_format = cpu_to_le16(hdev->voice_setting);
1879 cp.retrans_effort = 0xff;
1880
1881 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1882 sizeof(cp), &cp);
1883 }
1884 } else {
1885 /* Connection rejected */
1886 struct hci_cp_reject_conn_req cp;
1887
1888 bacpy(&cp.bdaddr, &ev->bdaddr);
1889 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1890 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1891 }
1892 }
1893
1894 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1895 {
1896 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1897 struct hci_conn *conn;
1898
1899 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1900
1901 hci_dev_lock(hdev);
1902
1903 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1904 if (!conn)
1905 goto unlock;
1906
1907 if (ev->status == 0)
1908 conn->state = BT_CLOSED;
1909
1910 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1911 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1912 if (ev->status != 0)
1913 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1914 conn->dst_type, ev->status);
1915 else
1916 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1917 conn->dst_type);
1918 }
1919
1920 if (ev->status == 0) {
1921 if (conn->type == ACL_LINK && conn->flush_key)
1922 hci_remove_link_key(hdev, &conn->dst);
1923 hci_proto_disconn_cfm(conn, ev->reason);
1924 hci_conn_del(conn);
1925 }
1926
1927 unlock:
1928 hci_dev_unlock(hdev);
1929 }
1930
1931 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1932 {
1933 struct hci_ev_auth_complete *ev = (void *) skb->data;
1934 struct hci_conn *conn;
1935
1936 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1937
1938 hci_dev_lock(hdev);
1939
1940 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1941 if (!conn)
1942 goto unlock;
1943
1944 if (!ev->status) {
1945 if (!hci_conn_ssp_enabled(conn) &&
1946 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1947 BT_INFO("re-auth of legacy device is not possible.");
1948 } else {
1949 conn->link_mode |= HCI_LM_AUTH;
1950 conn->sec_level = conn->pending_sec_level;
1951 }
1952 } else {
1953 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1954 ev->status);
1955 }
1956
1957 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1958 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1959
1960 if (conn->state == BT_CONFIG) {
1961 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1962 struct hci_cp_set_conn_encrypt cp;
1963 cp.handle = ev->handle;
1964 cp.encrypt = 0x01;
1965 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1966 &cp);
1967 } else {
1968 conn->state = BT_CONNECTED;
1969 hci_proto_connect_cfm(conn, ev->status);
1970 hci_conn_put(conn);
1971 }
1972 } else {
1973 hci_auth_cfm(conn, ev->status);
1974
1975 hci_conn_hold(conn);
1976 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1977 hci_conn_put(conn);
1978 }
1979
1980 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1981 if (!ev->status) {
1982 struct hci_cp_set_conn_encrypt cp;
1983 cp.handle = ev->handle;
1984 cp.encrypt = 0x01;
1985 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1986 &cp);
1987 } else {
1988 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1989 hci_encrypt_cfm(conn, ev->status, 0x00);
1990 }
1991 }
1992
1993 unlock:
1994 hci_dev_unlock(hdev);
1995 }
1996
1997 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1998 {
1999 struct hci_ev_remote_name *ev = (void *) skb->data;
2000 struct hci_conn *conn;
2001
2002 BT_DBG("%s", hdev->name);
2003
2004 hci_conn_check_pending(hdev);
2005
2006 hci_dev_lock(hdev);
2007
2008 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2009
2010 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2011 goto check_auth;
2012
2013 if (ev->status == 0)
2014 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2015 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2016 else
2017 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2018
2019 check_auth:
2020 if (!conn)
2021 goto unlock;
2022
2023 if (!hci_outgoing_auth_needed(hdev, conn))
2024 goto unlock;
2025
2026 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2027 struct hci_cp_auth_requested cp;
2028 cp.handle = __cpu_to_le16(conn->handle);
2029 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2030 }
2031
2032 unlock:
2033 hci_dev_unlock(hdev);
2034 }
2035
2036 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2037 {
2038 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2039 struct hci_conn *conn;
2040
2041 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2042
2043 hci_dev_lock(hdev);
2044
2045 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2046 if (conn) {
2047 if (!ev->status) {
2048 if (ev->encrypt) {
2049 /* Encryption implies authentication */
2050 conn->link_mode |= HCI_LM_AUTH;
2051 conn->link_mode |= HCI_LM_ENCRYPT;
2052 conn->sec_level = conn->pending_sec_level;
2053 } else
2054 conn->link_mode &= ~HCI_LM_ENCRYPT;
2055 }
2056
2057 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2058
2059 if (ev->status && conn->state == BT_CONNECTED) {
2060 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2061 hci_conn_put(conn);
2062 goto unlock;
2063 }
2064
2065 if (conn->state == BT_CONFIG) {
2066 if (!ev->status)
2067 conn->state = BT_CONNECTED;
2068
2069 hci_proto_connect_cfm(conn, ev->status);
2070 hci_conn_put(conn);
2071 } else
2072 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2073 }
2074
2075 unlock:
2076 hci_dev_unlock(hdev);
2077 }
2078
2079 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2080 struct sk_buff *skb)
2081 {
2082 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2083 struct hci_conn *conn;
2084
2085 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2086
2087 hci_dev_lock(hdev);
2088
2089 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2090 if (conn) {
2091 if (!ev->status)
2092 conn->link_mode |= HCI_LM_SECURE;
2093
2094 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2095
2096 hci_key_change_cfm(conn, ev->status);
2097 }
2098
2099 hci_dev_unlock(hdev);
2100 }
2101
2102 static void hci_remote_features_evt(struct hci_dev *hdev,
2103 struct sk_buff *skb)
2104 {
2105 struct hci_ev_remote_features *ev = (void *) skb->data;
2106 struct hci_conn *conn;
2107
2108 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2109
2110 hci_dev_lock(hdev);
2111
2112 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2113 if (!conn)
2114 goto unlock;
2115
2116 if (!ev->status)
2117 memcpy(conn->features, ev->features, 8);
2118
2119 if (conn->state != BT_CONFIG)
2120 goto unlock;
2121
2122 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2123 struct hci_cp_read_remote_ext_features cp;
2124 cp.handle = ev->handle;
2125 cp.page = 0x01;
2126 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2127 sizeof(cp), &cp);
2128 goto unlock;
2129 }
2130
2131 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2132 struct hci_cp_remote_name_req cp;
2133 memset(&cp, 0, sizeof(cp));
2134 bacpy(&cp.bdaddr, &conn->dst);
2135 cp.pscan_rep_mode = 0x02;
2136 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2137 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2138 mgmt_device_connected(hdev, &conn->dst, conn->type,
2139 conn->dst_type, 0, NULL, 0,
2140 conn->dev_class);
2141
2142 if (!hci_outgoing_auth_needed(hdev, conn)) {
2143 conn->state = BT_CONNECTED;
2144 hci_proto_connect_cfm(conn, ev->status);
2145 hci_conn_put(conn);
2146 }
2147
2148 unlock:
2149 hci_dev_unlock(hdev);
2150 }
2151
2152 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2153 {
2154 BT_DBG("%s", hdev->name);
2155 }
2156
2157 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2158 struct sk_buff *skb)
2159 {
2160 BT_DBG("%s", hdev->name);
2161 }
2162
2163 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2164 {
2165 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2166 __u16 opcode;
2167
2168 skb_pull(skb, sizeof(*ev));
2169
2170 opcode = __le16_to_cpu(ev->opcode);
2171
2172 switch (opcode) {
2173 case HCI_OP_INQUIRY_CANCEL:
2174 hci_cc_inquiry_cancel(hdev, skb);
2175 break;
2176
2177 case HCI_OP_PERIODIC_INQ:
2178 hci_cc_periodic_inq(hdev, skb);
2179 break;
2180
2181 case HCI_OP_EXIT_PERIODIC_INQ:
2182 hci_cc_exit_periodic_inq(hdev, skb);
2183 break;
2184
2185 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2186 hci_cc_remote_name_req_cancel(hdev, skb);
2187 break;
2188
2189 case HCI_OP_ROLE_DISCOVERY:
2190 hci_cc_role_discovery(hdev, skb);
2191 break;
2192
2193 case HCI_OP_READ_LINK_POLICY:
2194 hci_cc_read_link_policy(hdev, skb);
2195 break;
2196
2197 case HCI_OP_WRITE_LINK_POLICY:
2198 hci_cc_write_link_policy(hdev, skb);
2199 break;
2200
2201 case HCI_OP_READ_DEF_LINK_POLICY:
2202 hci_cc_read_def_link_policy(hdev, skb);
2203 break;
2204
2205 case HCI_OP_WRITE_DEF_LINK_POLICY:
2206 hci_cc_write_def_link_policy(hdev, skb);
2207 break;
2208
2209 case HCI_OP_RESET:
2210 hci_cc_reset(hdev, skb);
2211 break;
2212
2213 case HCI_OP_WRITE_LOCAL_NAME:
2214 hci_cc_write_local_name(hdev, skb);
2215 break;
2216
2217 case HCI_OP_READ_LOCAL_NAME:
2218 hci_cc_read_local_name(hdev, skb);
2219 break;
2220
2221 case HCI_OP_WRITE_AUTH_ENABLE:
2222 hci_cc_write_auth_enable(hdev, skb);
2223 break;
2224
2225 case HCI_OP_WRITE_ENCRYPT_MODE:
2226 hci_cc_write_encrypt_mode(hdev, skb);
2227 break;
2228
2229 case HCI_OP_WRITE_SCAN_ENABLE:
2230 hci_cc_write_scan_enable(hdev, skb);
2231 break;
2232
2233 case HCI_OP_READ_CLASS_OF_DEV:
2234 hci_cc_read_class_of_dev(hdev, skb);
2235 break;
2236
2237 case HCI_OP_WRITE_CLASS_OF_DEV:
2238 hci_cc_write_class_of_dev(hdev, skb);
2239 break;
2240
2241 case HCI_OP_READ_VOICE_SETTING:
2242 hci_cc_read_voice_setting(hdev, skb);
2243 break;
2244
2245 case HCI_OP_WRITE_VOICE_SETTING:
2246 hci_cc_write_voice_setting(hdev, skb);
2247 break;
2248
2249 case HCI_OP_HOST_BUFFER_SIZE:
2250 hci_cc_host_buffer_size(hdev, skb);
2251 break;
2252
2253 case HCI_OP_WRITE_SSP_MODE:
2254 hci_cc_write_ssp_mode(hdev, skb);
2255 break;
2256
2257 case HCI_OP_READ_LOCAL_VERSION:
2258 hci_cc_read_local_version(hdev, skb);
2259 break;
2260
2261 case HCI_OP_READ_LOCAL_COMMANDS:
2262 hci_cc_read_local_commands(hdev, skb);
2263 break;
2264
2265 case HCI_OP_READ_LOCAL_FEATURES:
2266 hci_cc_read_local_features(hdev, skb);
2267 break;
2268
2269 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2270 hci_cc_read_local_ext_features(hdev, skb);
2271 break;
2272
2273 case HCI_OP_READ_BUFFER_SIZE:
2274 hci_cc_read_buffer_size(hdev, skb);
2275 break;
2276
2277 case HCI_OP_READ_BD_ADDR:
2278 hci_cc_read_bd_addr(hdev, skb);
2279 break;
2280
2281 case HCI_OP_READ_DATA_BLOCK_SIZE:
2282 hci_cc_read_data_block_size(hdev, skb);
2283 break;
2284
2285 case HCI_OP_WRITE_CA_TIMEOUT:
2286 hci_cc_write_ca_timeout(hdev, skb);
2287 break;
2288
2289 case HCI_OP_READ_FLOW_CONTROL_MODE:
2290 hci_cc_read_flow_control_mode(hdev, skb);
2291 break;
2292
2293 case HCI_OP_READ_LOCAL_AMP_INFO:
2294 hci_cc_read_local_amp_info(hdev, skb);
2295 break;
2296
2297 case HCI_OP_DELETE_STORED_LINK_KEY:
2298 hci_cc_delete_stored_link_key(hdev, skb);
2299 break;
2300
2301 case HCI_OP_SET_EVENT_MASK:
2302 hci_cc_set_event_mask(hdev, skb);
2303 break;
2304
2305 case HCI_OP_WRITE_INQUIRY_MODE:
2306 hci_cc_write_inquiry_mode(hdev, skb);
2307 break;
2308
2309 case HCI_OP_READ_INQ_RSP_TX_POWER:
2310 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2311 break;
2312
2313 case HCI_OP_SET_EVENT_FLT:
2314 hci_cc_set_event_flt(hdev, skb);
2315 break;
2316
2317 case HCI_OP_PIN_CODE_REPLY:
2318 hci_cc_pin_code_reply(hdev, skb);
2319 break;
2320
2321 case HCI_OP_PIN_CODE_NEG_REPLY:
2322 hci_cc_pin_code_neg_reply(hdev, skb);
2323 break;
2324
2325 case HCI_OP_READ_LOCAL_OOB_DATA:
2326 hci_cc_read_local_oob_data_reply(hdev, skb);
2327 break;
2328
2329 case HCI_OP_LE_READ_BUFFER_SIZE:
2330 hci_cc_le_read_buffer_size(hdev, skb);
2331 break;
2332
2333 case HCI_OP_USER_CONFIRM_REPLY:
2334 hci_cc_user_confirm_reply(hdev, skb);
2335 break;
2336
2337 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2338 hci_cc_user_confirm_neg_reply(hdev, skb);
2339 break;
2340
2341 case HCI_OP_USER_PASSKEY_REPLY:
2342 hci_cc_user_passkey_reply(hdev, skb);
2343 break;
2344
2345 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2346 hci_cc_user_passkey_neg_reply(hdev, skb);
2347 break;
2348
2349 case HCI_OP_LE_SET_SCAN_PARAM:
2350 hci_cc_le_set_scan_param(hdev, skb);
2351 break;
2352
2353 case HCI_OP_LE_SET_SCAN_ENABLE:
2354 hci_cc_le_set_scan_enable(hdev, skb);
2355 break;
2356
2357 case HCI_OP_LE_LTK_REPLY:
2358 hci_cc_le_ltk_reply(hdev, skb);
2359 break;
2360
2361 case HCI_OP_LE_LTK_NEG_REPLY:
2362 hci_cc_le_ltk_neg_reply(hdev, skb);
2363 break;
2364
2365 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2366 hci_cc_write_le_host_supported(hdev, skb);
2367 break;
2368
2369 default:
2370 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2371 break;
2372 }
2373
2374 if (ev->opcode != HCI_OP_NOP)
2375 del_timer(&hdev->cmd_timer);
2376
2377 if (ev->ncmd) {
2378 atomic_set(&hdev->cmd_cnt, 1);
2379 if (!skb_queue_empty(&hdev->cmd_q))
2380 queue_work(hdev->workqueue, &hdev->cmd_work);
2381 }
2382 }
2383
2384 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2385 {
2386 struct hci_ev_cmd_status *ev = (void *) skb->data;
2387 __u16 opcode;
2388
2389 skb_pull(skb, sizeof(*ev));
2390
2391 opcode = __le16_to_cpu(ev->opcode);
2392
2393 switch (opcode) {
2394 case HCI_OP_INQUIRY:
2395 hci_cs_inquiry(hdev, ev->status);
2396 break;
2397
2398 case HCI_OP_CREATE_CONN:
2399 hci_cs_create_conn(hdev, ev->status);
2400 break;
2401
2402 case HCI_OP_ADD_SCO:
2403 hci_cs_add_sco(hdev, ev->status);
2404 break;
2405
2406 case HCI_OP_AUTH_REQUESTED:
2407 hci_cs_auth_requested(hdev, ev->status);
2408 break;
2409
2410 case HCI_OP_SET_CONN_ENCRYPT:
2411 hci_cs_set_conn_encrypt(hdev, ev->status);
2412 break;
2413
2414 case HCI_OP_REMOTE_NAME_REQ:
2415 hci_cs_remote_name_req(hdev, ev->status);
2416 break;
2417
2418 case HCI_OP_READ_REMOTE_FEATURES:
2419 hci_cs_read_remote_features(hdev, ev->status);
2420 break;
2421
2422 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2423 hci_cs_read_remote_ext_features(hdev, ev->status);
2424 break;
2425
2426 case HCI_OP_SETUP_SYNC_CONN:
2427 hci_cs_setup_sync_conn(hdev, ev->status);
2428 break;
2429
2430 case HCI_OP_SNIFF_MODE:
2431 hci_cs_sniff_mode(hdev, ev->status);
2432 break;
2433
2434 case HCI_OP_EXIT_SNIFF_MODE:
2435 hci_cs_exit_sniff_mode(hdev, ev->status);
2436 break;
2437
2438 case HCI_OP_DISCONNECT:
2439 hci_cs_disconnect(hdev, ev->status);
2440 break;
2441
2442 case HCI_OP_LE_CREATE_CONN:
2443 hci_cs_le_create_conn(hdev, ev->status);
2444 break;
2445
2446 case HCI_OP_LE_START_ENC:
2447 hci_cs_le_start_enc(hdev, ev->status);
2448 break;
2449
2450 default:
2451 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2452 break;
2453 }
2454
2455 if (ev->opcode != HCI_OP_NOP)
2456 del_timer(&hdev->cmd_timer);
2457
2458 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2459 atomic_set(&hdev->cmd_cnt, 1);
2460 if (!skb_queue_empty(&hdev->cmd_q))
2461 queue_work(hdev->workqueue, &hdev->cmd_work);
2462 }
2463 }
2464
2465 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2466 {
2467 struct hci_ev_role_change *ev = (void *) skb->data;
2468 struct hci_conn *conn;
2469
2470 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2471
2472 hci_dev_lock(hdev);
2473
2474 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2475 if (conn) {
2476 if (!ev->status) {
2477 if (ev->role)
2478 conn->link_mode &= ~HCI_LM_MASTER;
2479 else
2480 conn->link_mode |= HCI_LM_MASTER;
2481 }
2482
2483 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2484
2485 hci_role_switch_cfm(conn, ev->status, ev->role);
2486 }
2487
2488 hci_dev_unlock(hdev);
2489 }
2490
2491 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2492 {
2493 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2494 int i;
2495
2496 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2497 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2498 return;
2499 }
2500
2501 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2502 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2503 BT_DBG("%s bad parameters", hdev->name);
2504 return;
2505 }
2506
2507 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2508
2509 for (i = 0; i < ev->num_hndl; i++) {
2510 struct hci_comp_pkts_info *info = &ev->handles[i];
2511 struct hci_conn *conn;
2512 __u16 handle, count;
2513
2514 handle = __le16_to_cpu(info->handle);
2515 count = __le16_to_cpu(info->count);
2516
2517 conn = hci_conn_hash_lookup_handle(hdev, handle);
2518 if (!conn)
2519 continue;
2520
2521 conn->sent -= count;
2522
2523 switch (conn->type) {
2524 case ACL_LINK:
2525 hdev->acl_cnt += count;
2526 if (hdev->acl_cnt > hdev->acl_pkts)
2527 hdev->acl_cnt = hdev->acl_pkts;
2528 break;
2529
2530 case LE_LINK:
2531 if (hdev->le_pkts) {
2532 hdev->le_cnt += count;
2533 if (hdev->le_cnt > hdev->le_pkts)
2534 hdev->le_cnt = hdev->le_pkts;
2535 } else {
2536 hdev->acl_cnt += count;
2537 if (hdev->acl_cnt > hdev->acl_pkts)
2538 hdev->acl_cnt = hdev->acl_pkts;
2539 }
2540 break;
2541
2542 case SCO_LINK:
2543 hdev->sco_cnt += count;
2544 if (hdev->sco_cnt > hdev->sco_pkts)
2545 hdev->sco_cnt = hdev->sco_pkts;
2546 break;
2547
2548 default:
2549 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2550 break;
2551 }
2552 }
2553
2554 queue_work(hdev->workqueue, &hdev->tx_work);
2555 }
2556
2557 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2558 {
2559 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2560 int i;
2561
2562 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2563 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2564 return;
2565 }
2566
2567 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2568 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2569 BT_DBG("%s bad parameters", hdev->name);
2570 return;
2571 }
2572
2573 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2574 ev->num_hndl);
2575
2576 for (i = 0; i < ev->num_hndl; i++) {
2577 struct hci_comp_blocks_info *info = &ev->handles[i];
2578 struct hci_conn *conn;
2579 __u16 handle, block_count;
2580
2581 handle = __le16_to_cpu(info->handle);
2582 block_count = __le16_to_cpu(info->blocks);
2583
2584 conn = hci_conn_hash_lookup_handle(hdev, handle);
2585 if (!conn)
2586 continue;
2587
2588 conn->sent -= block_count;
2589
2590 switch (conn->type) {
2591 case ACL_LINK:
2592 hdev->block_cnt += block_count;
2593 if (hdev->block_cnt > hdev->num_blocks)
2594 hdev->block_cnt = hdev->num_blocks;
2595 break;
2596
2597 default:
2598 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2599 break;
2600 }
2601 }
2602
2603 queue_work(hdev->workqueue, &hdev->tx_work);
2604 }
2605
2606 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2607 {
2608 struct hci_ev_mode_change *ev = (void *) skb->data;
2609 struct hci_conn *conn;
2610
2611 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2612
2613 hci_dev_lock(hdev);
2614
2615 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2616 if (conn) {
2617 conn->mode = ev->mode;
2618 conn->interval = __le16_to_cpu(ev->interval);
2619
2620 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2621 &conn->flags)) {
2622 if (conn->mode == HCI_CM_ACTIVE)
2623 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2624 else
2625 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2626 }
2627
2628 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2629 hci_sco_setup(conn, ev->status);
2630 }
2631
2632 hci_dev_unlock(hdev);
2633 }
2634
2635 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2636 {
2637 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2638 struct hci_conn *conn;
2639
2640 BT_DBG("%s", hdev->name);
2641
2642 hci_dev_lock(hdev);
2643
2644 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2645 if (!conn)
2646 goto unlock;
2647
2648 if (conn->state == BT_CONNECTED) {
2649 hci_conn_hold(conn);
2650 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2651 hci_conn_put(conn);
2652 }
2653
2654 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2655 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2656 sizeof(ev->bdaddr), &ev->bdaddr);
2657 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2658 u8 secure;
2659
2660 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2661 secure = 1;
2662 else
2663 secure = 0;
2664
2665 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2666 }
2667
2668 unlock:
2669 hci_dev_unlock(hdev);
2670 }
2671
2672 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2673 {
2674 struct hci_ev_link_key_req *ev = (void *) skb->data;
2675 struct hci_cp_link_key_reply cp;
2676 struct hci_conn *conn;
2677 struct link_key *key;
2678
2679 BT_DBG("%s", hdev->name);
2680
2681 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2682 return;
2683
2684 hci_dev_lock(hdev);
2685
2686 key = hci_find_link_key(hdev, &ev->bdaddr);
2687 if (!key) {
2688 BT_DBG("%s link key not found for %s", hdev->name,
2689 batostr(&ev->bdaddr));
2690 goto not_found;
2691 }
2692
2693 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2694 batostr(&ev->bdaddr));
2695
2696 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2697 key->type == HCI_LK_DEBUG_COMBINATION) {
2698 BT_DBG("%s ignoring debug key", hdev->name);
2699 goto not_found;
2700 }
2701
2702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2703 if (conn) {
2704 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2705 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2706 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2707 goto not_found;
2708 }
2709
2710 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2711 conn->pending_sec_level == BT_SECURITY_HIGH) {
2712 BT_DBG("%s ignoring key unauthenticated for high security",
2713 hdev->name);
2714 goto not_found;
2715 }
2716
2717 conn->key_type = key->type;
2718 conn->pin_length = key->pin_len;
2719 }
2720
2721 bacpy(&cp.bdaddr, &ev->bdaddr);
2722 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2723
2724 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2725
2726 hci_dev_unlock(hdev);
2727
2728 return;
2729
2730 not_found:
2731 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2732 hci_dev_unlock(hdev);
2733 }
2734
2735 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2736 {
2737 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2738 struct hci_conn *conn;
2739 u8 pin_len = 0;
2740
2741 BT_DBG("%s", hdev->name);
2742
2743 hci_dev_lock(hdev);
2744
2745 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2746 if (conn) {
2747 hci_conn_hold(conn);
2748 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2749 pin_len = conn->pin_length;
2750
2751 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2752 conn->key_type = ev->key_type;
2753
2754 hci_conn_put(conn);
2755 }
2756
2757 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2758 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2759 ev->key_type, pin_len);
2760
2761 hci_dev_unlock(hdev);
2762 }
2763
2764 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2765 {
2766 struct hci_ev_clock_offset *ev = (void *) skb->data;
2767 struct hci_conn *conn;
2768
2769 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2770
2771 hci_dev_lock(hdev);
2772
2773 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2774 if (conn && !ev->status) {
2775 struct inquiry_entry *ie;
2776
2777 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2778 if (ie) {
2779 ie->data.clock_offset = ev->clock_offset;
2780 ie->timestamp = jiffies;
2781 }
2782 }
2783
2784 hci_dev_unlock(hdev);
2785 }
2786
2787 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2788 {
2789 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2790 struct hci_conn *conn;
2791
2792 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2793
2794 hci_dev_lock(hdev);
2795
2796 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2797 if (conn && !ev->status)
2798 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2799
2800 hci_dev_unlock(hdev);
2801 }
2802
2803 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2804 {
2805 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2806 struct inquiry_entry *ie;
2807
2808 BT_DBG("%s", hdev->name);
2809
2810 hci_dev_lock(hdev);
2811
2812 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2813 if (ie) {
2814 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2815 ie->timestamp = jiffies;
2816 }
2817
2818 hci_dev_unlock(hdev);
2819 }
2820
2821 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2822 struct sk_buff *skb)
2823 {
2824 struct inquiry_data data;
2825 int num_rsp = *((__u8 *) skb->data);
2826 bool name_known, ssp;
2827
2828 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2829
2830 if (!num_rsp)
2831 return;
2832
2833 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2834 return;
2835
2836 hci_dev_lock(hdev);
2837
2838 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2839 struct inquiry_info_with_rssi_and_pscan_mode *info;
2840 info = (void *) (skb->data + 1);
2841
2842 for (; num_rsp; num_rsp--, info++) {
2843 bacpy(&data.bdaddr, &info->bdaddr);
2844 data.pscan_rep_mode = info->pscan_rep_mode;
2845 data.pscan_period_mode = info->pscan_period_mode;
2846 data.pscan_mode = info->pscan_mode;
2847 memcpy(data.dev_class, info->dev_class, 3);
2848 data.clock_offset = info->clock_offset;
2849 data.rssi = info->rssi;
2850 data.ssp_mode = 0x00;
2851
2852 name_known = hci_inquiry_cache_update(hdev, &data,
2853 false, &ssp);
2854 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2855 info->dev_class, info->rssi,
2856 !name_known, ssp, NULL, 0);
2857 }
2858 } else {
2859 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2860
2861 for (; num_rsp; num_rsp--, info++) {
2862 bacpy(&data.bdaddr, &info->bdaddr);
2863 data.pscan_rep_mode = info->pscan_rep_mode;
2864 data.pscan_period_mode = info->pscan_period_mode;
2865 data.pscan_mode = 0x00;
2866 memcpy(data.dev_class, info->dev_class, 3);
2867 data.clock_offset = info->clock_offset;
2868 data.rssi = info->rssi;
2869 data.ssp_mode = 0x00;
2870 name_known = hci_inquiry_cache_update(hdev, &data,
2871 false, &ssp);
2872 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2873 info->dev_class, info->rssi,
2874 !name_known, ssp, NULL, 0);
2875 }
2876 }
2877
2878 hci_dev_unlock(hdev);
2879 }
2880
2881 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2882 struct sk_buff *skb)
2883 {
2884 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2885 struct hci_conn *conn;
2886
2887 BT_DBG("%s", hdev->name);
2888
2889 hci_dev_lock(hdev);
2890
2891 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2892 if (!conn)
2893 goto unlock;
2894
2895 if (!ev->status && ev->page == 0x01) {
2896 struct inquiry_entry *ie;
2897
2898 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2899 if (ie)
2900 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2901
2902 if (ev->features[0] & LMP_HOST_SSP)
2903 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2904 }
2905
2906 if (conn->state != BT_CONFIG)
2907 goto unlock;
2908
2909 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2910 struct hci_cp_remote_name_req cp;
2911 memset(&cp, 0, sizeof(cp));
2912 bacpy(&cp.bdaddr, &conn->dst);
2913 cp.pscan_rep_mode = 0x02;
2914 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2915 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2916 mgmt_device_connected(hdev, &conn->dst, conn->type,
2917 conn->dst_type, 0, NULL, 0,
2918 conn->dev_class);
2919
2920 if (!hci_outgoing_auth_needed(hdev, conn)) {
2921 conn->state = BT_CONNECTED;
2922 hci_proto_connect_cfm(conn, ev->status);
2923 hci_conn_put(conn);
2924 }
2925
2926 unlock:
2927 hci_dev_unlock(hdev);
2928 }
2929
2930 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2931 struct sk_buff *skb)
2932 {
2933 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2934 struct hci_conn *conn;
2935
2936 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2937
2938 hci_dev_lock(hdev);
2939
2940 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2941 if (!conn) {
2942 if (ev->link_type == ESCO_LINK)
2943 goto unlock;
2944
2945 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2946 if (!conn)
2947 goto unlock;
2948
2949 conn->type = SCO_LINK;
2950 }
2951
2952 switch (ev->status) {
2953 case 0x00:
2954 conn->handle = __le16_to_cpu(ev->handle);
2955 conn->state = BT_CONNECTED;
2956
2957 hci_conn_hold_device(conn);
2958 hci_conn_add_sysfs(conn);
2959 break;
2960
2961 case 0x11: /* Unsupported Feature or Parameter Value */
2962 case 0x1c: /* SCO interval rejected */
2963 case 0x1a: /* Unsupported Remote Feature */
2964 case 0x1f: /* Unspecified error */
2965 if (conn->out && conn->attempt < 2) {
2966 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2967 (hdev->esco_type & EDR_ESCO_MASK);
2968 hci_setup_sync(conn, conn->link->handle);
2969 goto unlock;
2970 }
2971 /* fall through */
2972
2973 default:
2974 conn->state = BT_CLOSED;
2975 break;
2976 }
2977
2978 hci_proto_connect_cfm(conn, ev->status);
2979 if (ev->status)
2980 hci_conn_del(conn);
2981
2982 unlock:
2983 hci_dev_unlock(hdev);
2984 }
2985
2986 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2987 {
2988 BT_DBG("%s", hdev->name);
2989 }
2990
2991 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2992 {
2993 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2994
2995 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2996 }
2997
2998 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2999 struct sk_buff *skb)
3000 {
3001 struct inquiry_data data;
3002 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3003 int num_rsp = *((__u8 *) skb->data);
3004 size_t eir_len;
3005
3006 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3007
3008 if (!num_rsp)
3009 return;
3010
3011 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3012 return;
3013
3014 hci_dev_lock(hdev);
3015
3016 for (; num_rsp; num_rsp--, info++) {
3017 bool name_known, ssp;
3018
3019 bacpy(&data.bdaddr, &info->bdaddr);
3020 data.pscan_rep_mode = info->pscan_rep_mode;
3021 data.pscan_period_mode = info->pscan_period_mode;
3022 data.pscan_mode = 0x00;
3023 memcpy(data.dev_class, info->dev_class, 3);
3024 data.clock_offset = info->clock_offset;
3025 data.rssi = info->rssi;
3026 data.ssp_mode = 0x01;
3027
3028 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3029 name_known = eir_has_data_type(info->data,
3030 sizeof(info->data),
3031 EIR_NAME_COMPLETE);
3032 else
3033 name_known = true;
3034
3035 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3036 &ssp);
3037 eir_len = eir_get_length(info->data, sizeof(info->data));
3038 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3039 info->dev_class, info->rssi, !name_known,
3040 ssp, info->data, eir_len);
3041 }
3042
3043 hci_dev_unlock(hdev);
3044 }
3045
3046 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3047 struct sk_buff *skb)
3048 {
3049 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3050 struct hci_conn *conn;
3051
3052 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3053 __le16_to_cpu(ev->handle));
3054
3055 hci_dev_lock(hdev);
3056
3057 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3058 if (!conn)
3059 goto unlock;
3060
3061 if (!ev->status)
3062 conn->sec_level = conn->pending_sec_level;
3063
3064 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3065
3066 if (ev->status && conn->state == BT_CONNECTED) {
3067 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3068 hci_conn_put(conn);
3069 goto unlock;
3070 }
3071
3072 if (conn->state == BT_CONFIG) {
3073 if (!ev->status)
3074 conn->state = BT_CONNECTED;
3075
3076 hci_proto_connect_cfm(conn, ev->status);
3077 hci_conn_put(conn);
3078 } else {
3079 hci_auth_cfm(conn, ev->status);
3080
3081 hci_conn_hold(conn);
3082 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3083 hci_conn_put(conn);
3084 }
3085
3086 unlock:
3087 hci_dev_unlock(hdev);
3088 }
3089
3090 static u8 hci_get_auth_req(struct hci_conn *conn)
3091 {
3092 /* If remote requests dedicated bonding follow that lead */
3093 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3094 /* If both remote and local IO capabilities allow MITM
3095 * protection then require it, otherwise don't */
3096 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3097 return 0x02;
3098 else
3099 return 0x03;
3100 }
3101
3102 /* If remote requests no-bonding follow that lead */
3103 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3104 return conn->remote_auth | (conn->auth_type & 0x01);
3105
3106 return conn->auth_type;
3107 }
3108
3109 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3110 {
3111 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3112 struct hci_conn *conn;
3113
3114 BT_DBG("%s", hdev->name);
3115
3116 hci_dev_lock(hdev);
3117
3118 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3119 if (!conn)
3120 goto unlock;
3121
3122 hci_conn_hold(conn);
3123
3124 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3125 goto unlock;
3126
3127 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3128 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3129 struct hci_cp_io_capability_reply cp;
3130
3131 bacpy(&cp.bdaddr, &ev->bdaddr);
3132 /* Change the IO capability from KeyboardDisplay
3133 * to DisplayYesNo as it is not supported by BT spec. */
3134 cp.capability = (conn->io_capability == 0x04) ?
3135 0x01 : conn->io_capability;
3136 conn->auth_type = hci_get_auth_req(conn);
3137 cp.authentication = conn->auth_type;
3138
3139 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3140 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3141 cp.oob_data = 0x01;
3142 else
3143 cp.oob_data = 0x00;
3144
3145 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3146 sizeof(cp), &cp);
3147 } else {
3148 struct hci_cp_io_capability_neg_reply cp;
3149
3150 bacpy(&cp.bdaddr, &ev->bdaddr);
3151 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3152
3153 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3154 sizeof(cp), &cp);
3155 }
3156
3157 unlock:
3158 hci_dev_unlock(hdev);
3159 }
3160
3161 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3162 {
3163 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3164 struct hci_conn *conn;
3165
3166 BT_DBG("%s", hdev->name);
3167
3168 hci_dev_lock(hdev);
3169
3170 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3171 if (!conn)
3172 goto unlock;
3173
3174 conn->remote_cap = ev->capability;
3175 conn->remote_auth = ev->authentication;
3176 if (ev->oob_data)
3177 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3178
3179 unlock:
3180 hci_dev_unlock(hdev);
3181 }
3182
3183 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3184 struct sk_buff *skb)
3185 {
3186 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3187 int loc_mitm, rem_mitm, confirm_hint = 0;
3188 struct hci_conn *conn;
3189
3190 BT_DBG("%s", hdev->name);
3191
3192 hci_dev_lock(hdev);
3193
3194 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3195 goto unlock;
3196
3197 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3198 if (!conn)
3199 goto unlock;
3200
3201 loc_mitm = (conn->auth_type & 0x01);
3202 rem_mitm = (conn->remote_auth & 0x01);
3203
3204 /* If we require MITM but the remote device can't provide that
3205 * (it has NoInputNoOutput) then reject the confirmation
3206 * request. The only exception is when we're dedicated bonding
3207 * initiators (connect_cfm_cb set) since then we always have the MITM
3208 * bit set. */
3209 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3210 BT_DBG("Rejecting request: remote device can't provide MITM");
3211 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3212 sizeof(ev->bdaddr), &ev->bdaddr);
3213 goto unlock;
3214 }
3215
3216 /* If no side requires MITM protection; auto-accept */
3217 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3218 (!rem_mitm || conn->io_capability == 0x03)) {
3219
3220 /* If we're not the initiators request authorization to
3221 * proceed from user space (mgmt_user_confirm with
3222 * confirm_hint set to 1). */
3223 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3224 BT_DBG("Confirming auto-accept as acceptor");
3225 confirm_hint = 1;
3226 goto confirm;
3227 }
3228
3229 BT_DBG("Auto-accept of user confirmation with %ums delay",
3230 hdev->auto_accept_delay);
3231
3232 if (hdev->auto_accept_delay > 0) {
3233 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3234 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3235 goto unlock;
3236 }
3237
3238 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3239 sizeof(ev->bdaddr), &ev->bdaddr);
3240 goto unlock;
3241 }
3242
3243 confirm:
3244 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3245 confirm_hint);
3246
3247 unlock:
3248 hci_dev_unlock(hdev);
3249 }
3250
3251 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3252 struct sk_buff *skb)
3253 {
3254 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3255
3256 BT_DBG("%s", hdev->name);
3257
3258 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3259 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3260 }
3261
3262 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3263 struct sk_buff *skb)
3264 {
3265 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3266 struct hci_conn *conn;
3267
3268 BT_DBG("%s", hdev->name);
3269
3270 hci_dev_lock(hdev);
3271
3272 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3273 if (!conn)
3274 goto unlock;
3275
3276 /* To avoid duplicate auth_failed events to user space we check
3277 * the HCI_CONN_AUTH_PEND flag which will be set if we
3278 * initiated the authentication. A traditional auth_complete
3279 * event gets always produced as initiator and is also mapped to
3280 * the mgmt_auth_failed event */
3281 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3282 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3283 ev->status);
3284
3285 hci_conn_put(conn);
3286
3287 unlock:
3288 hci_dev_unlock(hdev);
3289 }
3290
3291 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3292 struct sk_buff *skb)
3293 {
3294 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3295 struct inquiry_entry *ie;
3296
3297 BT_DBG("%s", hdev->name);
3298
3299 hci_dev_lock(hdev);
3300
3301 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3302 if (ie)
3303 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3304
3305 hci_dev_unlock(hdev);
3306 }
3307
3308 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3309 struct sk_buff *skb)
3310 {
3311 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3312 struct oob_data *data;
3313
3314 BT_DBG("%s", hdev->name);
3315
3316 hci_dev_lock(hdev);
3317
3318 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3319 goto unlock;
3320
3321 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3322 if (data) {
3323 struct hci_cp_remote_oob_data_reply cp;
3324
3325 bacpy(&cp.bdaddr, &ev->bdaddr);
3326 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3327 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3328
3329 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3330 &cp);
3331 } else {
3332 struct hci_cp_remote_oob_data_neg_reply cp;
3333
3334 bacpy(&cp.bdaddr, &ev->bdaddr);
3335 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3336 &cp);
3337 }
3338
3339 unlock:
3340 hci_dev_unlock(hdev);
3341 }
3342
3343 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3344 {
3345 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3346 struct hci_conn *conn;
3347
3348 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3349
3350 hci_dev_lock(hdev);
3351
3352 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3353 if (!conn) {
3354 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3355 if (!conn) {
3356 BT_ERR("No memory for new connection");
3357 goto unlock;
3358 }
3359
3360 conn->dst_type = ev->bdaddr_type;
3361
3362 if (ev->role == LE_CONN_ROLE_MASTER) {
3363 conn->out = true;
3364 conn->link_mode |= HCI_LM_MASTER;
3365 }
3366 }
3367
3368 if (ev->status) {
3369 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3370 conn->dst_type, ev->status);
3371 hci_proto_connect_cfm(conn, ev->status);
3372 conn->state = BT_CLOSED;
3373 hci_conn_del(conn);
3374 goto unlock;
3375 }
3376
3377 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3378 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3379 conn->dst_type, 0, NULL, 0, NULL);
3380
3381 conn->sec_level = BT_SECURITY_LOW;
3382 conn->handle = __le16_to_cpu(ev->handle);
3383 conn->state = BT_CONNECTED;
3384
3385 hci_conn_hold_device(conn);
3386 hci_conn_add_sysfs(conn);
3387
3388 hci_proto_connect_cfm(conn, ev->status);
3389
3390 unlock:
3391 hci_dev_unlock(hdev);
3392 }
3393
3394 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3395 {
3396 u8 num_reports = skb->data[0];
3397 void *ptr = &skb->data[1];
3398 s8 rssi;
3399
3400 hci_dev_lock(hdev);
3401
3402 while (num_reports--) {
3403 struct hci_ev_le_advertising_info *ev = ptr;
3404
3405 rssi = ev->data[ev->length];
3406 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3407 NULL, rssi, 0, 1, ev->data, ev->length);
3408
3409 ptr += sizeof(*ev) + ev->length + 1;
3410 }
3411
3412 hci_dev_unlock(hdev);
3413 }
3414
3415 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3416 {
3417 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3418 struct hci_cp_le_ltk_reply cp;
3419 struct hci_cp_le_ltk_neg_reply neg;
3420 struct hci_conn *conn;
3421 struct smp_ltk *ltk;
3422
3423 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3424
3425 hci_dev_lock(hdev);
3426
3427 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3428 if (conn == NULL)
3429 goto not_found;
3430
3431 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3432 if (ltk == NULL)
3433 goto not_found;
3434
3435 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3436 cp.handle = cpu_to_le16(conn->handle);
3437
3438 if (ltk->authenticated)
3439 conn->sec_level = BT_SECURITY_HIGH;
3440
3441 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3442
3443 if (ltk->type & HCI_SMP_STK) {
3444 list_del(&ltk->list);
3445 kfree(ltk);
3446 }
3447
3448 hci_dev_unlock(hdev);
3449
3450 return;
3451
3452 not_found:
3453 neg.handle = ev->handle;
3454 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3455 hci_dev_unlock(hdev);
3456 }
3457
3458 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3459 {
3460 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3461
3462 skb_pull(skb, sizeof(*le_ev));
3463
3464 switch (le_ev->subevent) {
3465 case HCI_EV_LE_CONN_COMPLETE:
3466 hci_le_conn_complete_evt(hdev, skb);
3467 break;
3468
3469 case HCI_EV_LE_ADVERTISING_REPORT:
3470 hci_le_adv_report_evt(hdev, skb);
3471 break;
3472
3473 case HCI_EV_LE_LTK_REQ:
3474 hci_le_ltk_request_evt(hdev, skb);
3475 break;
3476
3477 default:
3478 break;
3479 }
3480 }
3481
3482 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3483 {
3484 struct hci_event_hdr *hdr = (void *) skb->data;
3485 __u8 event = hdr->evt;
3486
3487 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3488
3489 switch (event) {
3490 case HCI_EV_INQUIRY_COMPLETE:
3491 hci_inquiry_complete_evt(hdev, skb);
3492 break;
3493
3494 case HCI_EV_INQUIRY_RESULT:
3495 hci_inquiry_result_evt(hdev, skb);
3496 break;
3497
3498 case HCI_EV_CONN_COMPLETE:
3499 hci_conn_complete_evt(hdev, skb);
3500 break;
3501
3502 case HCI_EV_CONN_REQUEST:
3503 hci_conn_request_evt(hdev, skb);
3504 break;
3505
3506 case HCI_EV_DISCONN_COMPLETE:
3507 hci_disconn_complete_evt(hdev, skb);
3508 break;
3509
3510 case HCI_EV_AUTH_COMPLETE:
3511 hci_auth_complete_evt(hdev, skb);
3512 break;
3513
3514 case HCI_EV_REMOTE_NAME:
3515 hci_remote_name_evt(hdev, skb);
3516 break;
3517
3518 case HCI_EV_ENCRYPT_CHANGE:
3519 hci_encrypt_change_evt(hdev, skb);
3520 break;
3521
3522 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3523 hci_change_link_key_complete_evt(hdev, skb);
3524 break;
3525
3526 case HCI_EV_REMOTE_FEATURES:
3527 hci_remote_features_evt(hdev, skb);
3528 break;
3529
3530 case HCI_EV_REMOTE_VERSION:
3531 hci_remote_version_evt(hdev, skb);
3532 break;
3533
3534 case HCI_EV_QOS_SETUP_COMPLETE:
3535 hci_qos_setup_complete_evt(hdev, skb);
3536 break;
3537
3538 case HCI_EV_CMD_COMPLETE:
3539 hci_cmd_complete_evt(hdev, skb);
3540 break;
3541
3542 case HCI_EV_CMD_STATUS:
3543 hci_cmd_status_evt(hdev, skb);
3544 break;
3545
3546 case HCI_EV_ROLE_CHANGE:
3547 hci_role_change_evt(hdev, skb);
3548 break;
3549
3550 case HCI_EV_NUM_COMP_PKTS:
3551 hci_num_comp_pkts_evt(hdev, skb);
3552 break;
3553
3554 case HCI_EV_MODE_CHANGE:
3555 hci_mode_change_evt(hdev, skb);
3556 break;
3557
3558 case HCI_EV_PIN_CODE_REQ:
3559 hci_pin_code_request_evt(hdev, skb);
3560 break;
3561
3562 case HCI_EV_LINK_KEY_REQ:
3563 hci_link_key_request_evt(hdev, skb);
3564 break;
3565
3566 case HCI_EV_LINK_KEY_NOTIFY:
3567 hci_link_key_notify_evt(hdev, skb);
3568 break;
3569
3570 case HCI_EV_CLOCK_OFFSET:
3571 hci_clock_offset_evt(hdev, skb);
3572 break;
3573
3574 case HCI_EV_PKT_TYPE_CHANGE:
3575 hci_pkt_type_change_evt(hdev, skb);
3576 break;
3577
3578 case HCI_EV_PSCAN_REP_MODE:
3579 hci_pscan_rep_mode_evt(hdev, skb);
3580 break;
3581
3582 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3583 hci_inquiry_result_with_rssi_evt(hdev, skb);
3584 break;
3585
3586 case HCI_EV_REMOTE_EXT_FEATURES:
3587 hci_remote_ext_features_evt(hdev, skb);
3588 break;
3589
3590 case HCI_EV_SYNC_CONN_COMPLETE:
3591 hci_sync_conn_complete_evt(hdev, skb);
3592 break;
3593
3594 case HCI_EV_SYNC_CONN_CHANGED:
3595 hci_sync_conn_changed_evt(hdev, skb);
3596 break;
3597
3598 case HCI_EV_SNIFF_SUBRATE:
3599 hci_sniff_subrate_evt(hdev, skb);
3600 break;
3601
3602 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3603 hci_extended_inquiry_result_evt(hdev, skb);
3604 break;
3605
3606 case HCI_EV_KEY_REFRESH_COMPLETE:
3607 hci_key_refresh_complete_evt(hdev, skb);
3608 break;
3609
3610 case HCI_EV_IO_CAPA_REQUEST:
3611 hci_io_capa_request_evt(hdev, skb);
3612 break;
3613
3614 case HCI_EV_IO_CAPA_REPLY:
3615 hci_io_capa_reply_evt(hdev, skb);
3616 break;
3617
3618 case HCI_EV_USER_CONFIRM_REQUEST:
3619 hci_user_confirm_request_evt(hdev, skb);
3620 break;
3621
3622 case HCI_EV_USER_PASSKEY_REQUEST:
3623 hci_user_passkey_request_evt(hdev, skb);
3624 break;
3625
3626 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3627 hci_simple_pair_complete_evt(hdev, skb);
3628 break;
3629
3630 case HCI_EV_REMOTE_HOST_FEATURES:
3631 hci_remote_host_features_evt(hdev, skb);
3632 break;
3633
3634 case HCI_EV_LE_META:
3635 hci_le_meta_evt(hdev, skb);
3636 break;
3637
3638 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3639 hci_remote_oob_data_request_evt(hdev, skb);
3640 break;
3641
3642 case HCI_EV_NUM_COMP_BLOCKS:
3643 hci_num_comp_blocks_evt(hdev, skb);
3644 break;
3645
3646 default:
3647 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3648 break;
3649 }
3650
3651 kfree_skb(skb);
3652 hdev->stat.evt_rx++;
3653 }
This page took 0.140821 seconds and 5 git commands to generate.