Bluetooth: Fix minor coding style in hci_event.c
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32
33 /* Handle HCI Event packets */
34
35 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
36 {
37 __u8 status = *((__u8 *) skb->data);
38
39 BT_DBG("%s status 0x%2.2x", hdev->name, status);
40
41 if (status) {
42 hci_dev_lock(hdev);
43 mgmt_stop_discovery_failed(hdev, status);
44 hci_dev_unlock(hdev);
45 return;
46 }
47
48 clear_bit(HCI_INQUIRY, &hdev->flags);
49
50 hci_dev_lock(hdev);
51 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
52 hci_dev_unlock(hdev);
53
54 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
55
56 hci_conn_check_pending(hdev);
57 }
58
59 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
60 {
61 __u8 status = *((__u8 *) skb->data);
62
63 BT_DBG("%s status 0x%2.2x", hdev->name, status);
64
65 if (status)
66 return;
67
68 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
69 }
70
71 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
72 {
73 __u8 status = *((__u8 *) skb->data);
74
75 BT_DBG("%s status 0x%2.2x", hdev->name, status);
76
77 if (status)
78 return;
79
80 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
86 struct sk_buff *skb)
87 {
88 BT_DBG("%s", hdev->name);
89 }
90
91 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
92 {
93 struct hci_rp_role_discovery *rp = (void *) skb->data;
94 struct hci_conn *conn;
95
96 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
97
98 if (rp->status)
99 return;
100
101 hci_dev_lock(hdev);
102
103 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
104 if (conn) {
105 if (rp->role)
106 conn->link_mode &= ~HCI_LM_MASTER;
107 else
108 conn->link_mode |= HCI_LM_MASTER;
109 }
110
111 hci_dev_unlock(hdev);
112 }
113
114 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
115 {
116 struct hci_rp_read_link_policy *rp = (void *) skb->data;
117 struct hci_conn *conn;
118
119 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
120
121 if (rp->status)
122 return;
123
124 hci_dev_lock(hdev);
125
126 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
127 if (conn)
128 conn->link_policy = __le16_to_cpu(rp->policy);
129
130 hci_dev_unlock(hdev);
131 }
132
133 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
134 {
135 struct hci_rp_write_link_policy *rp = (void *) skb->data;
136 struct hci_conn *conn;
137 void *sent;
138
139 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
140
141 if (rp->status)
142 return;
143
144 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
145 if (!sent)
146 return;
147
148 hci_dev_lock(hdev);
149
150 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
151 if (conn)
152 conn->link_policy = get_unaligned_le16(sent + 2);
153
154 hci_dev_unlock(hdev);
155 }
156
157 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
158 struct sk_buff *skb)
159 {
160 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
161
162 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
163
164 if (rp->status)
165 return;
166
167 hdev->link_policy = __le16_to_cpu(rp->policy);
168 }
169
170 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
171 struct sk_buff *skb)
172 {
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%2.2x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186 }
187
188 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189 {
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%2.2x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 /* Reset all non-persistent flags */
199 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
200 BIT(HCI_PERIODIC_INQ));
201
202 hdev->discovery.state = DISCOVERY_STOPPED;
203 }
204
205 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
206 {
207 __u8 status = *((__u8 *) skb->data);
208 void *sent;
209
210 BT_DBG("%s status 0x%2.2x", hdev->name, status);
211
212 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
213 if (!sent)
214 return;
215
216 hci_dev_lock(hdev);
217
218 if (test_bit(HCI_MGMT, &hdev->dev_flags))
219 mgmt_set_local_name_complete(hdev, sent, status);
220 else if (!status)
221 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
222
223 hci_dev_unlock(hdev);
224
225 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
226 }
227
228 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
229 {
230 struct hci_rp_read_local_name *rp = (void *) skb->data;
231
232 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
233
234 if (rp->status)
235 return;
236
237 if (test_bit(HCI_SETUP, &hdev->dev_flags))
238 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
239 }
240
241 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
242 {
243 __u8 status = *((__u8 *) skb->data);
244 void *sent;
245
246 BT_DBG("%s status 0x%2.2x", hdev->name, status);
247
248 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
249 if (!sent)
250 return;
251
252 if (!status) {
253 __u8 param = *((__u8 *) sent);
254
255 if (param == AUTH_ENABLED)
256 set_bit(HCI_AUTH, &hdev->flags);
257 else
258 clear_bit(HCI_AUTH, &hdev->flags);
259 }
260
261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
262 mgmt_auth_enable_complete(hdev, status);
263
264 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
265 }
266
267 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
268 {
269 __u8 status = *((__u8 *) skb->data);
270 void *sent;
271
272 BT_DBG("%s status 0x%2.2x", hdev->name, status);
273
274 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
275 if (!sent)
276 return;
277
278 if (!status) {
279 __u8 param = *((__u8 *) sent);
280
281 if (param)
282 set_bit(HCI_ENCRYPT, &hdev->flags);
283 else
284 clear_bit(HCI_ENCRYPT, &hdev->flags);
285 }
286
287 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
288 }
289
290 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
291 {
292 __u8 param, status = *((__u8 *) skb->data);
293 int old_pscan, old_iscan;
294 void *sent;
295
296 BT_DBG("%s status 0x%2.2x", hdev->name, status);
297
298 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
299 if (!sent)
300 return;
301
302 param = *((__u8 *) sent);
303
304 hci_dev_lock(hdev);
305
306 if (status) {
307 mgmt_write_scan_failed(hdev, param, status);
308 hdev->discov_timeout = 0;
309 goto done;
310 }
311
312 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
313 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
314
315 if (param & SCAN_INQUIRY) {
316 set_bit(HCI_ISCAN, &hdev->flags);
317 if (!old_iscan)
318 mgmt_discoverable(hdev, 1);
319 if (hdev->discov_timeout > 0) {
320 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
321 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
322 to);
323 }
324 } else if (old_iscan)
325 mgmt_discoverable(hdev, 0);
326
327 if (param & SCAN_PAGE) {
328 set_bit(HCI_PSCAN, &hdev->flags);
329 if (!old_pscan)
330 mgmt_connectable(hdev, 1);
331 } else if (old_pscan)
332 mgmt_connectable(hdev, 0);
333
334 done:
335 hci_dev_unlock(hdev);
336 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
337 }
338
339 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
340 {
341 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
342
343 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
344
345 if (rp->status)
346 return;
347
348 memcpy(hdev->dev_class, rp->dev_class, 3);
349
350 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
351 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
352 }
353
354 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
355 {
356 __u8 status = *((__u8 *) skb->data);
357 void *sent;
358
359 BT_DBG("%s status 0x%2.2x", hdev->name, status);
360
361 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
362 if (!sent)
363 return;
364
365 hci_dev_lock(hdev);
366
367 if (status == 0)
368 memcpy(hdev->dev_class, sent, 3);
369
370 if (test_bit(HCI_MGMT, &hdev->dev_flags))
371 mgmt_set_class_of_dev_complete(hdev, sent, status);
372
373 hci_dev_unlock(hdev);
374 }
375
376 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
377 {
378 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
379 __u16 setting;
380
381 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
382
383 if (rp->status)
384 return;
385
386 setting = __le16_to_cpu(rp->voice_setting);
387
388 if (hdev->voice_setting == setting)
389 return;
390
391 hdev->voice_setting = setting;
392
393 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
394
395 if (hdev->notify)
396 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
397 }
398
399 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
400 struct sk_buff *skb)
401 {
402 __u8 status = *((__u8 *) skb->data);
403 __u16 setting;
404 void *sent;
405
406 BT_DBG("%s status 0x%2.2x", hdev->name, status);
407
408 if (status)
409 return;
410
411 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
412 if (!sent)
413 return;
414
415 setting = get_unaligned_le16(sent);
416
417 if (hdev->voice_setting == setting)
418 return;
419
420 hdev->voice_setting = setting;
421
422 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
423
424 if (hdev->notify)
425 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
426 }
427
428 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
429 {
430 __u8 status = *((__u8 *) skb->data);
431
432 BT_DBG("%s status 0x%2.2x", hdev->name, status);
433
434 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
435 }
436
437 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
438 {
439 __u8 status = *((__u8 *) skb->data);
440 void *sent;
441
442 BT_DBG("%s status 0x%2.2x", hdev->name, status);
443
444 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
445 if (!sent)
446 return;
447
448 if (test_bit(HCI_MGMT, &hdev->dev_flags))
449 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
450 else if (!status) {
451 if (*((u8 *) sent))
452 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
453 else
454 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
455 }
456 }
457
458 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
459 {
460 if (hdev->features[6] & LMP_EXT_INQ)
461 return 2;
462
463 if (hdev->features[3] & LMP_RSSI_INQ)
464 return 1;
465
466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
467 hdev->lmp_subver == 0x0757)
468 return 1;
469
470 if (hdev->manufacturer == 15) {
471 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
472 return 1;
473 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
474 return 1;
475 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
476 return 1;
477 }
478
479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
480 hdev->lmp_subver == 0x1805)
481 return 1;
482
483 return 0;
484 }
485
486 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
487 {
488 u8 mode;
489
490 mode = hci_get_inquiry_mode(hdev);
491
492 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
493 }
494
495 static void hci_setup_event_mask(struct hci_dev *hdev)
496 {
497 /* The second byte is 0xff instead of 0x9f (two reserved bits
498 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
499 * command otherwise */
500 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
501
502 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
503 * any event mask for pre 1.2 devices */
504 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
505 return;
506
507 events[4] |= 0x01; /* Flow Specification Complete */
508 events[4] |= 0x02; /* Inquiry Result with RSSI */
509 events[4] |= 0x04; /* Read Remote Extended Features Complete */
510 events[5] |= 0x08; /* Synchronous Connection Complete */
511 events[5] |= 0x10; /* Synchronous Connection Changed */
512
513 if (hdev->features[3] & LMP_RSSI_INQ)
514 events[4] |= 0x02; /* Inquiry Result with RSSI */
515
516 if (lmp_sniffsubr_capable(hdev))
517 events[5] |= 0x20; /* Sniff Subrating */
518
519 if (hdev->features[5] & LMP_PAUSE_ENC)
520 events[5] |= 0x80; /* Encryption Key Refresh Complete */
521
522 if (hdev->features[6] & LMP_EXT_INQ)
523 events[5] |= 0x40; /* Extended Inquiry Result */
524
525 if (lmp_no_flush_capable(hdev))
526 events[7] |= 0x01; /* Enhanced Flush Complete */
527
528 if (hdev->features[7] & LMP_LSTO)
529 events[6] |= 0x80; /* Link Supervision Timeout Changed */
530
531 if (lmp_ssp_capable(hdev)) {
532 events[6] |= 0x01; /* IO Capability Request */
533 events[6] |= 0x02; /* IO Capability Response */
534 events[6] |= 0x04; /* User Confirmation Request */
535 events[6] |= 0x08; /* User Passkey Request */
536 events[6] |= 0x10; /* Remote OOB Data Request */
537 events[6] |= 0x20; /* Simple Pairing Complete */
538 events[7] |= 0x04; /* User Passkey Notification */
539 events[7] |= 0x08; /* Keypress Notification */
540 events[7] |= 0x10; /* Remote Host Supported
541 * Features Notification */
542 }
543
544 if (lmp_le_capable(hdev))
545 events[7] |= 0x20; /* LE Meta-Event */
546
547 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
548 }
549
550 static void hci_setup(struct hci_dev *hdev)
551 {
552 if (hdev->dev_type != HCI_BREDR)
553 return;
554
555 hci_setup_event_mask(hdev);
556
557 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
558 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
559
560 if (lmp_ssp_capable(hdev)) {
561 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
562 u8 mode = 0x01;
563 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
564 sizeof(mode), &mode);
565 } else {
566 struct hci_cp_write_eir cp;
567
568 memset(hdev->eir, 0, sizeof(hdev->eir));
569 memset(&cp, 0, sizeof(cp));
570
571 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
572 }
573 }
574
575 if (hdev->features[3] & LMP_RSSI_INQ)
576 hci_setup_inquiry_mode(hdev);
577
578 if (hdev->features[7] & LMP_INQ_TX_PWR)
579 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
580
581 if (hdev->features[7] & LMP_EXTFEATURES) {
582 struct hci_cp_read_local_ext_features cp;
583
584 cp.page = 0x01;
585 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
586 &cp);
587 }
588
589 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
590 u8 enable = 1;
591 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
592 &enable);
593 }
594 }
595
596 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
597 {
598 struct hci_rp_read_local_version *rp = (void *) skb->data;
599
600 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
601
602 if (rp->status)
603 goto done;
604
605 hdev->hci_ver = rp->hci_ver;
606 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
607 hdev->lmp_ver = rp->lmp_ver;
608 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
609 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
610
611 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
612 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
613
614 if (test_bit(HCI_INIT, &hdev->flags))
615 hci_setup(hdev);
616
617 done:
618 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
619 }
620
621 static void hci_setup_link_policy(struct hci_dev *hdev)
622 {
623 struct hci_cp_write_def_link_policy cp;
624 u16 link_policy = 0;
625
626 if (lmp_rswitch_capable(hdev))
627 link_policy |= HCI_LP_RSWITCH;
628 if (hdev->features[0] & LMP_HOLD)
629 link_policy |= HCI_LP_HOLD;
630 if (lmp_sniff_capable(hdev))
631 link_policy |= HCI_LP_SNIFF;
632 if (hdev->features[1] & LMP_PARK)
633 link_policy |= HCI_LP_PARK;
634
635 cp.policy = cpu_to_le16(link_policy);
636 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
637 }
638
639 static void hci_cc_read_local_commands(struct hci_dev *hdev,
640 struct sk_buff *skb)
641 {
642 struct hci_rp_read_local_commands *rp = (void *) skb->data;
643
644 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
645
646 if (rp->status)
647 goto done;
648
649 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
650
651 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
652 hci_setup_link_policy(hdev);
653
654 done:
655 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
656 }
657
658 static void hci_cc_read_local_features(struct hci_dev *hdev,
659 struct sk_buff *skb)
660 {
661 struct hci_rp_read_local_features *rp = (void *) skb->data;
662
663 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
664
665 if (rp->status)
666 return;
667
668 memcpy(hdev->features, rp->features, 8);
669
670 /* Adjust default settings according to features
671 * supported by device. */
672
673 if (hdev->features[0] & LMP_3SLOT)
674 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
675
676 if (hdev->features[0] & LMP_5SLOT)
677 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
678
679 if (hdev->features[1] & LMP_HV2) {
680 hdev->pkt_type |= (HCI_HV2);
681 hdev->esco_type |= (ESCO_HV2);
682 }
683
684 if (hdev->features[1] & LMP_HV3) {
685 hdev->pkt_type |= (HCI_HV3);
686 hdev->esco_type |= (ESCO_HV3);
687 }
688
689 if (lmp_esco_capable(hdev))
690 hdev->esco_type |= (ESCO_EV3);
691
692 if (hdev->features[4] & LMP_EV4)
693 hdev->esco_type |= (ESCO_EV4);
694
695 if (hdev->features[4] & LMP_EV5)
696 hdev->esco_type |= (ESCO_EV5);
697
698 if (hdev->features[5] & LMP_EDR_ESCO_2M)
699 hdev->esco_type |= (ESCO_2EV3);
700
701 if (hdev->features[5] & LMP_EDR_ESCO_3M)
702 hdev->esco_type |= (ESCO_3EV3);
703
704 if (hdev->features[5] & LMP_EDR_3S_ESCO)
705 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
706
707 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
708 hdev->features[0], hdev->features[1],
709 hdev->features[2], hdev->features[3],
710 hdev->features[4], hdev->features[5],
711 hdev->features[6], hdev->features[7]);
712 }
713
714 static void hci_set_le_support(struct hci_dev *hdev)
715 {
716 struct hci_cp_write_le_host_supported cp;
717
718 memset(&cp, 0, sizeof(cp));
719
720 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
721 cp.le = 1;
722 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
723 }
724
725 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
726 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
727 &cp);
728 }
729
730 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
731 struct sk_buff *skb)
732 {
733 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
734
735 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
736
737 if (rp->status)
738 goto done;
739
740 switch (rp->page) {
741 case 0:
742 memcpy(hdev->features, rp->features, 8);
743 break;
744 case 1:
745 memcpy(hdev->host_features, rp->features, 8);
746 break;
747 }
748
749 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
750 hci_set_le_support(hdev);
751
752 done:
753 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
754 }
755
756 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
757 struct sk_buff *skb)
758 {
759 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
760
761 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
762
763 if (rp->status)
764 return;
765
766 hdev->flow_ctl_mode = rp->mode;
767
768 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
769 }
770
771 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
772 {
773 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
774
775 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
776
777 if (rp->status)
778 return;
779
780 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
781 hdev->sco_mtu = rp->sco_mtu;
782 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
783 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
784
785 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
786 hdev->sco_mtu = 64;
787 hdev->sco_pkts = 8;
788 }
789
790 hdev->acl_cnt = hdev->acl_pkts;
791 hdev->sco_cnt = hdev->sco_pkts;
792
793 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
794 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
795 }
796
797 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
798 {
799 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
800
801 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
802
803 if (!rp->status)
804 bacpy(&hdev->bdaddr, &rp->bdaddr);
805
806 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
807 }
808
809 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
810 struct sk_buff *skb)
811 {
812 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
813
814 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
815
816 if (rp->status)
817 return;
818
819 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
820 hdev->block_len = __le16_to_cpu(rp->block_len);
821 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
822
823 hdev->block_cnt = hdev->num_blocks;
824
825 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
826 hdev->block_cnt, hdev->block_len);
827
828 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
829 }
830
831 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
832 {
833 __u8 status = *((__u8 *) skb->data);
834
835 BT_DBG("%s status 0x%2.2x", hdev->name, status);
836
837 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
838 }
839
840 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
841 struct sk_buff *skb)
842 {
843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
844
845 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
846
847 if (rp->status)
848 return;
849
850 hdev->amp_status = rp->amp_status;
851 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
852 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
853 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
854 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
855 hdev->amp_type = rp->amp_type;
856 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
857 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
858 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
859 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
860
861 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
862 }
863
864 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
865 struct sk_buff *skb)
866 {
867 __u8 status = *((__u8 *) skb->data);
868
869 BT_DBG("%s status 0x%2.2x", hdev->name, status);
870
871 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
872 }
873
874 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
875 {
876 __u8 status = *((__u8 *) skb->data);
877
878 BT_DBG("%s status 0x%2.2x", hdev->name, status);
879
880 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
881 }
882
883 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
884 struct sk_buff *skb)
885 {
886 __u8 status = *((__u8 *) skb->data);
887
888 BT_DBG("%s status 0x%2.2x", hdev->name, status);
889
890 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
891 }
892
893 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
894 struct sk_buff *skb)
895 {
896 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
897
898 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
899
900 if (!rp->status)
901 hdev->inq_tx_power = rp->tx_power;
902
903 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
904 }
905
906 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
907 {
908 __u8 status = *((__u8 *) skb->data);
909
910 BT_DBG("%s status 0x%2.2x", hdev->name, status);
911
912 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
913 }
914
915 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
916 {
917 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
918 struct hci_cp_pin_code_reply *cp;
919 struct hci_conn *conn;
920
921 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
922
923 hci_dev_lock(hdev);
924
925 if (test_bit(HCI_MGMT, &hdev->dev_flags))
926 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
927
928 if (rp->status)
929 goto unlock;
930
931 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
932 if (!cp)
933 goto unlock;
934
935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
936 if (conn)
937 conn->pin_length = cp->pin_len;
938
939 unlock:
940 hci_dev_unlock(hdev);
941 }
942
943 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
944 {
945 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
946
947 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
948
949 hci_dev_lock(hdev);
950
951 if (test_bit(HCI_MGMT, &hdev->dev_flags))
952 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
953 rp->status);
954
955 hci_dev_unlock(hdev);
956 }
957
958 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
959 struct sk_buff *skb)
960 {
961 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
962
963 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
964
965 if (rp->status)
966 return;
967
968 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
969 hdev->le_pkts = rp->le_max_pkt;
970
971 hdev->le_cnt = hdev->le_pkts;
972
973 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
974
975 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
976 }
977
978 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
979 {
980 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
981
982 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
983
984 hci_dev_lock(hdev);
985
986 if (test_bit(HCI_MGMT, &hdev->dev_flags))
987 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
988 rp->status);
989
990 hci_dev_unlock(hdev);
991 }
992
993 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
994 struct sk_buff *skb)
995 {
996 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
997
998 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
999
1000 hci_dev_lock(hdev);
1001
1002 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1003 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1004 ACL_LINK, 0, rp->status);
1005
1006 hci_dev_unlock(hdev);
1007 }
1008
1009 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1010 {
1011 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1014
1015 hci_dev_lock(hdev);
1016
1017 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1018 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1019 0, rp->status);
1020
1021 hci_dev_unlock(hdev);
1022 }
1023
1024 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1025 struct sk_buff *skb)
1026 {
1027 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030
1031 hci_dev_lock(hdev);
1032
1033 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1034 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1035 ACL_LINK, 0, rp->status);
1036
1037 hci_dev_unlock(hdev);
1038 }
1039
1040 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1041 struct sk_buff *skb)
1042 {
1043 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1044
1045 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1046
1047 hci_dev_lock(hdev);
1048 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1049 rp->randomizer, rp->status);
1050 hci_dev_unlock(hdev);
1051 }
1052
1053 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1054 {
1055 __u8 status = *((__u8 *) skb->data);
1056
1057 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1058
1059 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1060
1061 if (status) {
1062 hci_dev_lock(hdev);
1063 mgmt_start_discovery_failed(hdev, status);
1064 hci_dev_unlock(hdev);
1065 return;
1066 }
1067 }
1068
1069 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1070 struct sk_buff *skb)
1071 {
1072 struct hci_cp_le_set_scan_enable *cp;
1073 __u8 status = *((__u8 *) skb->data);
1074
1075 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1076
1077 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1078 if (!cp)
1079 return;
1080
1081 switch (cp->enable) {
1082 case LE_SCANNING_ENABLED:
1083 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1084
1085 if (status) {
1086 hci_dev_lock(hdev);
1087 mgmt_start_discovery_failed(hdev, status);
1088 hci_dev_unlock(hdev);
1089 return;
1090 }
1091
1092 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1093
1094 hci_dev_lock(hdev);
1095 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1096 hci_dev_unlock(hdev);
1097 break;
1098
1099 case LE_SCANNING_DISABLED:
1100 if (status) {
1101 hci_dev_lock(hdev);
1102 mgmt_stop_discovery_failed(hdev, status);
1103 hci_dev_unlock(hdev);
1104 return;
1105 }
1106
1107 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1108
1109 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1110 hdev->discovery.state == DISCOVERY_FINDING) {
1111 mgmt_interleaved_discovery(hdev);
1112 } else {
1113 hci_dev_lock(hdev);
1114 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1115 hci_dev_unlock(hdev);
1116 }
1117
1118 break;
1119
1120 default:
1121 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1122 break;
1123 }
1124 }
1125
1126 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1127 {
1128 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1129
1130 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1131
1132 if (rp->status)
1133 return;
1134
1135 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1136 }
1137
1138 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1139 {
1140 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1141
1142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1143
1144 if (rp->status)
1145 return;
1146
1147 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1148 }
1149
1150 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1151 struct sk_buff *skb)
1152 {
1153 struct hci_cp_write_le_host_supported *sent;
1154 __u8 status = *((__u8 *) skb->data);
1155
1156 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1157
1158 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1159 if (!sent)
1160 return;
1161
1162 if (!status) {
1163 if (sent->le)
1164 hdev->host_features[0] |= LMP_HOST_LE;
1165 else
1166 hdev->host_features[0] &= ~LMP_HOST_LE;
1167 }
1168
1169 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1170 !test_bit(HCI_INIT, &hdev->flags))
1171 mgmt_le_enable_complete(hdev, sent->le, status);
1172
1173 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1174 }
1175
1176 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1177 {
1178 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1179
1180 if (status) {
1181 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1182 hci_conn_check_pending(hdev);
1183 hci_dev_lock(hdev);
1184 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1185 mgmt_start_discovery_failed(hdev, status);
1186 hci_dev_unlock(hdev);
1187 return;
1188 }
1189
1190 set_bit(HCI_INQUIRY, &hdev->flags);
1191
1192 hci_dev_lock(hdev);
1193 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1194 hci_dev_unlock(hdev);
1195 }
1196
1197 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1198 {
1199 struct hci_cp_create_conn *cp;
1200 struct hci_conn *conn;
1201
1202 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1203
1204 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1205 if (!cp)
1206 return;
1207
1208 hci_dev_lock(hdev);
1209
1210 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1211
1212 BT_DBG("%s bdaddr %s hcon %p", hdev->name, batostr(&cp->bdaddr), conn);
1213
1214 if (status) {
1215 if (conn && conn->state == BT_CONNECT) {
1216 if (status != 0x0c || conn->attempt > 2) {
1217 conn->state = BT_CLOSED;
1218 hci_proto_connect_cfm(conn, status);
1219 hci_conn_del(conn);
1220 } else
1221 conn->state = BT_CONNECT2;
1222 }
1223 } else {
1224 if (!conn) {
1225 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1226 if (conn) {
1227 conn->out = true;
1228 conn->link_mode |= HCI_LM_MASTER;
1229 } else
1230 BT_ERR("No memory for new connection");
1231 }
1232 }
1233
1234 hci_dev_unlock(hdev);
1235 }
1236
1237 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1238 {
1239 struct hci_cp_add_sco *cp;
1240 struct hci_conn *acl, *sco;
1241 __u16 handle;
1242
1243 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1244
1245 if (!status)
1246 return;
1247
1248 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1249 if (!cp)
1250 return;
1251
1252 handle = __le16_to_cpu(cp->handle);
1253
1254 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1255
1256 hci_dev_lock(hdev);
1257
1258 acl = hci_conn_hash_lookup_handle(hdev, handle);
1259 if (acl) {
1260 sco = acl->link;
1261 if (sco) {
1262 sco->state = BT_CLOSED;
1263
1264 hci_proto_connect_cfm(sco, status);
1265 hci_conn_del(sco);
1266 }
1267 }
1268
1269 hci_dev_unlock(hdev);
1270 }
1271
1272 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1273 {
1274 struct hci_cp_auth_requested *cp;
1275 struct hci_conn *conn;
1276
1277 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1278
1279 if (!status)
1280 return;
1281
1282 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1283 if (!cp)
1284 return;
1285
1286 hci_dev_lock(hdev);
1287
1288 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1289 if (conn) {
1290 if (conn->state == BT_CONFIG) {
1291 hci_proto_connect_cfm(conn, status);
1292 hci_conn_put(conn);
1293 }
1294 }
1295
1296 hci_dev_unlock(hdev);
1297 }
1298
1299 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1300 {
1301 struct hci_cp_set_conn_encrypt *cp;
1302 struct hci_conn *conn;
1303
1304 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1305
1306 if (!status)
1307 return;
1308
1309 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1310 if (!cp)
1311 return;
1312
1313 hci_dev_lock(hdev);
1314
1315 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1316 if (conn) {
1317 if (conn->state == BT_CONFIG) {
1318 hci_proto_connect_cfm(conn, status);
1319 hci_conn_put(conn);
1320 }
1321 }
1322
1323 hci_dev_unlock(hdev);
1324 }
1325
1326 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1327 struct hci_conn *conn)
1328 {
1329 if (conn->state != BT_CONFIG || !conn->out)
1330 return 0;
1331
1332 if (conn->pending_sec_level == BT_SECURITY_SDP)
1333 return 0;
1334
1335 /* Only request authentication for SSP connections or non-SSP
1336 * devices with sec_level HIGH or if MITM protection is requested */
1337 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1338 conn->pending_sec_level != BT_SECURITY_HIGH)
1339 return 0;
1340
1341 return 1;
1342 }
1343
1344 static int hci_resolve_name(struct hci_dev *hdev,
1345 struct inquiry_entry *e)
1346 {
1347 struct hci_cp_remote_name_req cp;
1348
1349 memset(&cp, 0, sizeof(cp));
1350
1351 bacpy(&cp.bdaddr, &e->data.bdaddr);
1352 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1353 cp.pscan_mode = e->data.pscan_mode;
1354 cp.clock_offset = e->data.clock_offset;
1355
1356 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1357 }
1358
1359 static bool hci_resolve_next_name(struct hci_dev *hdev)
1360 {
1361 struct discovery_state *discov = &hdev->discovery;
1362 struct inquiry_entry *e;
1363
1364 if (list_empty(&discov->resolve))
1365 return false;
1366
1367 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1368 if (hci_resolve_name(hdev, e) == 0) {
1369 e->name_state = NAME_PENDING;
1370 return true;
1371 }
1372
1373 return false;
1374 }
1375
1376 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1377 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1378 {
1379 struct discovery_state *discov = &hdev->discovery;
1380 struct inquiry_entry *e;
1381
1382 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1383 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1384 name_len, conn->dev_class);
1385
1386 if (discov->state == DISCOVERY_STOPPED)
1387 return;
1388
1389 if (discov->state == DISCOVERY_STOPPING)
1390 goto discov_complete;
1391
1392 if (discov->state != DISCOVERY_RESOLVING)
1393 return;
1394
1395 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1396 if (e) {
1397 e->name_state = NAME_KNOWN;
1398 list_del(&e->list);
1399 if (name)
1400 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1401 e->data.rssi, name, name_len);
1402 }
1403
1404 if (hci_resolve_next_name(hdev))
1405 return;
1406
1407 discov_complete:
1408 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1409 }
1410
1411 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1412 {
1413 struct hci_cp_remote_name_req *cp;
1414 struct hci_conn *conn;
1415
1416 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1417
1418 /* If successful wait for the name req complete event before
1419 * checking for the need to do authentication */
1420 if (!status)
1421 return;
1422
1423 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1424 if (!cp)
1425 return;
1426
1427 hci_dev_lock(hdev);
1428
1429 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1430
1431 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1432 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1433
1434 if (!conn)
1435 goto unlock;
1436
1437 if (!hci_outgoing_auth_needed(hdev, conn))
1438 goto unlock;
1439
1440 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1441 struct hci_cp_auth_requested cp;
1442 cp.handle = __cpu_to_le16(conn->handle);
1443 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1444 }
1445
1446 unlock:
1447 hci_dev_unlock(hdev);
1448 }
1449
1450 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1451 {
1452 struct hci_cp_read_remote_features *cp;
1453 struct hci_conn *conn;
1454
1455 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1456
1457 if (!status)
1458 return;
1459
1460 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1461 if (!cp)
1462 return;
1463
1464 hci_dev_lock(hdev);
1465
1466 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1467 if (conn) {
1468 if (conn->state == BT_CONFIG) {
1469 hci_proto_connect_cfm(conn, status);
1470 hci_conn_put(conn);
1471 }
1472 }
1473
1474 hci_dev_unlock(hdev);
1475 }
1476
1477 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1478 {
1479 struct hci_cp_read_remote_ext_features *cp;
1480 struct hci_conn *conn;
1481
1482 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1483
1484 if (!status)
1485 return;
1486
1487 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1488 if (!cp)
1489 return;
1490
1491 hci_dev_lock(hdev);
1492
1493 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1494 if (conn) {
1495 if (conn->state == BT_CONFIG) {
1496 hci_proto_connect_cfm(conn, status);
1497 hci_conn_put(conn);
1498 }
1499 }
1500
1501 hci_dev_unlock(hdev);
1502 }
1503
1504 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1505 {
1506 struct hci_cp_setup_sync_conn *cp;
1507 struct hci_conn *acl, *sco;
1508 __u16 handle;
1509
1510 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1511
1512 if (!status)
1513 return;
1514
1515 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1516 if (!cp)
1517 return;
1518
1519 handle = __le16_to_cpu(cp->handle);
1520
1521 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1522
1523 hci_dev_lock(hdev);
1524
1525 acl = hci_conn_hash_lookup_handle(hdev, handle);
1526 if (acl) {
1527 sco = acl->link;
1528 if (sco) {
1529 sco->state = BT_CLOSED;
1530
1531 hci_proto_connect_cfm(sco, status);
1532 hci_conn_del(sco);
1533 }
1534 }
1535
1536 hci_dev_unlock(hdev);
1537 }
1538
1539 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1540 {
1541 struct hci_cp_sniff_mode *cp;
1542 struct hci_conn *conn;
1543
1544 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1545
1546 if (!status)
1547 return;
1548
1549 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1550 if (!cp)
1551 return;
1552
1553 hci_dev_lock(hdev);
1554
1555 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1556 if (conn) {
1557 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1558
1559 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1560 hci_sco_setup(conn, status);
1561 }
1562
1563 hci_dev_unlock(hdev);
1564 }
1565
1566 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1567 {
1568 struct hci_cp_exit_sniff_mode *cp;
1569 struct hci_conn *conn;
1570
1571 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1572
1573 if (!status)
1574 return;
1575
1576 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1577 if (!cp)
1578 return;
1579
1580 hci_dev_lock(hdev);
1581
1582 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1583 if (conn) {
1584 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1585
1586 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1587 hci_sco_setup(conn, status);
1588 }
1589
1590 hci_dev_unlock(hdev);
1591 }
1592
1593 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1594 {
1595 struct hci_cp_disconnect *cp;
1596 struct hci_conn *conn;
1597
1598 if (!status)
1599 return;
1600
1601 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1602 if (!cp)
1603 return;
1604
1605 hci_dev_lock(hdev);
1606
1607 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1608 if (conn)
1609 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1610 conn->dst_type, status);
1611
1612 hci_dev_unlock(hdev);
1613 }
1614
1615 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1616 {
1617 struct hci_conn *conn;
1618
1619 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1620
1621 if (status) {
1622 hci_dev_lock(hdev);
1623
1624 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1625 if (!conn) {
1626 hci_dev_unlock(hdev);
1627 return;
1628 }
1629
1630 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&conn->dst),
1631 conn);
1632
1633 conn->state = BT_CLOSED;
1634 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1635 conn->dst_type, status);
1636 hci_proto_connect_cfm(conn, status);
1637 hci_conn_del(conn);
1638
1639 hci_dev_unlock(hdev);
1640 }
1641 }
1642
1643 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1644 {
1645 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1646 }
1647
1648 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1649 {
1650 __u8 status = *((__u8 *) skb->data);
1651 struct discovery_state *discov = &hdev->discovery;
1652 struct inquiry_entry *e;
1653
1654 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1655
1656 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1657
1658 hci_conn_check_pending(hdev);
1659
1660 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1661 return;
1662
1663 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1664 return;
1665
1666 hci_dev_lock(hdev);
1667
1668 if (discov->state != DISCOVERY_FINDING)
1669 goto unlock;
1670
1671 if (list_empty(&discov->resolve)) {
1672 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1673 goto unlock;
1674 }
1675
1676 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1677 if (e && hci_resolve_name(hdev, e) == 0) {
1678 e->name_state = NAME_PENDING;
1679 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1680 } else {
1681 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1682 }
1683
1684 unlock:
1685 hci_dev_unlock(hdev);
1686 }
1687
1688 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1689 {
1690 struct inquiry_data data;
1691 struct inquiry_info *info = (void *) (skb->data + 1);
1692 int num_rsp = *((__u8 *) skb->data);
1693
1694 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1695
1696 if (!num_rsp)
1697 return;
1698
1699 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1700 return;
1701
1702 hci_dev_lock(hdev);
1703
1704 for (; num_rsp; num_rsp--, info++) {
1705 bool name_known, ssp;
1706
1707 bacpy(&data.bdaddr, &info->bdaddr);
1708 data.pscan_rep_mode = info->pscan_rep_mode;
1709 data.pscan_period_mode = info->pscan_period_mode;
1710 data.pscan_mode = info->pscan_mode;
1711 memcpy(data.dev_class, info->dev_class, 3);
1712 data.clock_offset = info->clock_offset;
1713 data.rssi = 0x00;
1714 data.ssp_mode = 0x00;
1715
1716 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1717 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1718 info->dev_class, 0, !name_known, ssp, NULL,
1719 0);
1720 }
1721
1722 hci_dev_unlock(hdev);
1723 }
1724
1725 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1726 {
1727 struct hci_ev_conn_complete *ev = (void *) skb->data;
1728 struct hci_conn *conn;
1729
1730 BT_DBG("%s", hdev->name);
1731
1732 hci_dev_lock(hdev);
1733
1734 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1735 if (!conn) {
1736 if (ev->link_type != SCO_LINK)
1737 goto unlock;
1738
1739 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1740 if (!conn)
1741 goto unlock;
1742
1743 conn->type = SCO_LINK;
1744 }
1745
1746 if (!ev->status) {
1747 conn->handle = __le16_to_cpu(ev->handle);
1748
1749 if (conn->type == ACL_LINK) {
1750 conn->state = BT_CONFIG;
1751 hci_conn_hold(conn);
1752 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1753 } else
1754 conn->state = BT_CONNECTED;
1755
1756 hci_conn_hold_device(conn);
1757 hci_conn_add_sysfs(conn);
1758
1759 if (test_bit(HCI_AUTH, &hdev->flags))
1760 conn->link_mode |= HCI_LM_AUTH;
1761
1762 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1763 conn->link_mode |= HCI_LM_ENCRYPT;
1764
1765 /* Get remote features */
1766 if (conn->type == ACL_LINK) {
1767 struct hci_cp_read_remote_features cp;
1768 cp.handle = ev->handle;
1769 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1770 sizeof(cp), &cp);
1771 }
1772
1773 /* Set packet type for incoming connection */
1774 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1775 struct hci_cp_change_conn_ptype cp;
1776 cp.handle = ev->handle;
1777 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1778 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1779 &cp);
1780 }
1781 } else {
1782 conn->state = BT_CLOSED;
1783 if (conn->type == ACL_LINK)
1784 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1785 conn->dst_type, ev->status);
1786 }
1787
1788 if (conn->type == ACL_LINK)
1789 hci_sco_setup(conn, ev->status);
1790
1791 if (ev->status) {
1792 hci_proto_connect_cfm(conn, ev->status);
1793 hci_conn_del(conn);
1794 } else if (ev->link_type != ACL_LINK)
1795 hci_proto_connect_cfm(conn, ev->status);
1796
1797 unlock:
1798 hci_dev_unlock(hdev);
1799
1800 hci_conn_check_pending(hdev);
1801 }
1802
1803 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1804 {
1805 struct hci_ev_conn_request *ev = (void *) skb->data;
1806 int mask = hdev->link_mode;
1807
1808 BT_DBG("%s bdaddr %s type 0x%x", hdev->name, batostr(&ev->bdaddr),
1809 ev->link_type);
1810
1811 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1812
1813 if ((mask & HCI_LM_ACCEPT) &&
1814 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1815 /* Connection accepted */
1816 struct inquiry_entry *ie;
1817 struct hci_conn *conn;
1818
1819 hci_dev_lock(hdev);
1820
1821 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1822 if (ie)
1823 memcpy(ie->data.dev_class, ev->dev_class, 3);
1824
1825 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
1826 &ev->bdaddr);
1827 if (!conn) {
1828 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1829 if (!conn) {
1830 BT_ERR("No memory for new connection");
1831 hci_dev_unlock(hdev);
1832 return;
1833 }
1834 }
1835
1836 memcpy(conn->dev_class, ev->dev_class, 3);
1837 conn->state = BT_CONNECT;
1838
1839 hci_dev_unlock(hdev);
1840
1841 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1842 struct hci_cp_accept_conn_req cp;
1843
1844 bacpy(&cp.bdaddr, &ev->bdaddr);
1845
1846 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1847 cp.role = 0x00; /* Become master */
1848 else
1849 cp.role = 0x01; /* Remain slave */
1850
1851 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
1852 &cp);
1853 } else {
1854 struct hci_cp_accept_sync_conn_req cp;
1855
1856 bacpy(&cp.bdaddr, &ev->bdaddr);
1857 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1858
1859 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1860 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
1861 cp.max_latency = __constant_cpu_to_le16(0xffff);
1862 cp.content_format = cpu_to_le16(hdev->voice_setting);
1863 cp.retrans_effort = 0xff;
1864
1865 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1866 sizeof(cp), &cp);
1867 }
1868 } else {
1869 /* Connection rejected */
1870 struct hci_cp_reject_conn_req cp;
1871
1872 bacpy(&cp.bdaddr, &ev->bdaddr);
1873 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1874 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1875 }
1876 }
1877
1878 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1879 {
1880 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1881 struct hci_conn *conn;
1882
1883 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1884
1885 hci_dev_lock(hdev);
1886
1887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1888 if (!conn)
1889 goto unlock;
1890
1891 if (ev->status == 0)
1892 conn->state = BT_CLOSED;
1893
1894 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1895 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1896 if (ev->status)
1897 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1898 conn->dst_type, ev->status);
1899 else
1900 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1901 conn->dst_type);
1902 }
1903
1904 if (ev->status == 0) {
1905 if (conn->type == ACL_LINK && conn->flush_key)
1906 hci_remove_link_key(hdev, &conn->dst);
1907 hci_proto_disconn_cfm(conn, ev->reason);
1908 hci_conn_del(conn);
1909 }
1910
1911 unlock:
1912 hci_dev_unlock(hdev);
1913 }
1914
1915 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1916 {
1917 struct hci_ev_auth_complete *ev = (void *) skb->data;
1918 struct hci_conn *conn;
1919
1920 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
1921
1922 hci_dev_lock(hdev);
1923
1924 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1925 if (!conn)
1926 goto unlock;
1927
1928 if (!ev->status) {
1929 if (!hci_conn_ssp_enabled(conn) &&
1930 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1931 BT_INFO("re-auth of legacy device is not possible.");
1932 } else {
1933 conn->link_mode |= HCI_LM_AUTH;
1934 conn->sec_level = conn->pending_sec_level;
1935 }
1936 } else {
1937 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1938 ev->status);
1939 }
1940
1941 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1942 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1943
1944 if (conn->state == BT_CONFIG) {
1945 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1946 struct hci_cp_set_conn_encrypt cp;
1947 cp.handle = ev->handle;
1948 cp.encrypt = 0x01;
1949 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1950 &cp);
1951 } else {
1952 conn->state = BT_CONNECTED;
1953 hci_proto_connect_cfm(conn, ev->status);
1954 hci_conn_put(conn);
1955 }
1956 } else {
1957 hci_auth_cfm(conn, ev->status);
1958
1959 hci_conn_hold(conn);
1960 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1961 hci_conn_put(conn);
1962 }
1963
1964 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1965 if (!ev->status) {
1966 struct hci_cp_set_conn_encrypt cp;
1967 cp.handle = ev->handle;
1968 cp.encrypt = 0x01;
1969 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1970 &cp);
1971 } else {
1972 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1973 hci_encrypt_cfm(conn, ev->status, 0x00);
1974 }
1975 }
1976
1977 unlock:
1978 hci_dev_unlock(hdev);
1979 }
1980
1981 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1982 {
1983 struct hci_ev_remote_name *ev = (void *) skb->data;
1984 struct hci_conn *conn;
1985
1986 BT_DBG("%s", hdev->name);
1987
1988 hci_conn_check_pending(hdev);
1989
1990 hci_dev_lock(hdev);
1991
1992 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1993
1994 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1995 goto check_auth;
1996
1997 if (ev->status == 0)
1998 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1999 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2000 else
2001 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2002
2003 check_auth:
2004 if (!conn)
2005 goto unlock;
2006
2007 if (!hci_outgoing_auth_needed(hdev, conn))
2008 goto unlock;
2009
2010 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2011 struct hci_cp_auth_requested cp;
2012 cp.handle = __cpu_to_le16(conn->handle);
2013 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2014 }
2015
2016 unlock:
2017 hci_dev_unlock(hdev);
2018 }
2019
2020 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2021 {
2022 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2023 struct hci_conn *conn;
2024
2025 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2026
2027 hci_dev_lock(hdev);
2028
2029 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2030 if (conn) {
2031 if (!ev->status) {
2032 if (ev->encrypt) {
2033 /* Encryption implies authentication */
2034 conn->link_mode |= HCI_LM_AUTH;
2035 conn->link_mode |= HCI_LM_ENCRYPT;
2036 conn->sec_level = conn->pending_sec_level;
2037 } else
2038 conn->link_mode &= ~HCI_LM_ENCRYPT;
2039 }
2040
2041 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2042
2043 if (ev->status && conn->state == BT_CONNECTED) {
2044 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2045 hci_conn_put(conn);
2046 goto unlock;
2047 }
2048
2049 if (conn->state == BT_CONFIG) {
2050 if (!ev->status)
2051 conn->state = BT_CONNECTED;
2052
2053 hci_proto_connect_cfm(conn, ev->status);
2054 hci_conn_put(conn);
2055 } else
2056 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2057 }
2058
2059 unlock:
2060 hci_dev_unlock(hdev);
2061 }
2062
2063 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2064 struct sk_buff *skb)
2065 {
2066 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2067 struct hci_conn *conn;
2068
2069 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2070
2071 hci_dev_lock(hdev);
2072
2073 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2074 if (conn) {
2075 if (!ev->status)
2076 conn->link_mode |= HCI_LM_SECURE;
2077
2078 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2079
2080 hci_key_change_cfm(conn, ev->status);
2081 }
2082
2083 hci_dev_unlock(hdev);
2084 }
2085
2086 static void hci_remote_features_evt(struct hci_dev *hdev,
2087 struct sk_buff *skb)
2088 {
2089 struct hci_ev_remote_features *ev = (void *) skb->data;
2090 struct hci_conn *conn;
2091
2092 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2093
2094 hci_dev_lock(hdev);
2095
2096 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2097 if (!conn)
2098 goto unlock;
2099
2100 if (!ev->status)
2101 memcpy(conn->features, ev->features, 8);
2102
2103 if (conn->state != BT_CONFIG)
2104 goto unlock;
2105
2106 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2107 struct hci_cp_read_remote_ext_features cp;
2108 cp.handle = ev->handle;
2109 cp.page = 0x01;
2110 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2111 sizeof(cp), &cp);
2112 goto unlock;
2113 }
2114
2115 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2116 struct hci_cp_remote_name_req cp;
2117 memset(&cp, 0, sizeof(cp));
2118 bacpy(&cp.bdaddr, &conn->dst);
2119 cp.pscan_rep_mode = 0x02;
2120 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2121 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2122 mgmt_device_connected(hdev, &conn->dst, conn->type,
2123 conn->dst_type, 0, NULL, 0,
2124 conn->dev_class);
2125
2126 if (!hci_outgoing_auth_needed(hdev, conn)) {
2127 conn->state = BT_CONNECTED;
2128 hci_proto_connect_cfm(conn, ev->status);
2129 hci_conn_put(conn);
2130 }
2131
2132 unlock:
2133 hci_dev_unlock(hdev);
2134 }
2135
2136 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2137 {
2138 BT_DBG("%s", hdev->name);
2139 }
2140
2141 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2142 struct sk_buff *skb)
2143 {
2144 BT_DBG("%s", hdev->name);
2145 }
2146
2147 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2148 {
2149 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2150 __u16 opcode;
2151
2152 skb_pull(skb, sizeof(*ev));
2153
2154 opcode = __le16_to_cpu(ev->opcode);
2155
2156 switch (opcode) {
2157 case HCI_OP_INQUIRY_CANCEL:
2158 hci_cc_inquiry_cancel(hdev, skb);
2159 break;
2160
2161 case HCI_OP_PERIODIC_INQ:
2162 hci_cc_periodic_inq(hdev, skb);
2163 break;
2164
2165 case HCI_OP_EXIT_PERIODIC_INQ:
2166 hci_cc_exit_periodic_inq(hdev, skb);
2167 break;
2168
2169 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2170 hci_cc_remote_name_req_cancel(hdev, skb);
2171 break;
2172
2173 case HCI_OP_ROLE_DISCOVERY:
2174 hci_cc_role_discovery(hdev, skb);
2175 break;
2176
2177 case HCI_OP_READ_LINK_POLICY:
2178 hci_cc_read_link_policy(hdev, skb);
2179 break;
2180
2181 case HCI_OP_WRITE_LINK_POLICY:
2182 hci_cc_write_link_policy(hdev, skb);
2183 break;
2184
2185 case HCI_OP_READ_DEF_LINK_POLICY:
2186 hci_cc_read_def_link_policy(hdev, skb);
2187 break;
2188
2189 case HCI_OP_WRITE_DEF_LINK_POLICY:
2190 hci_cc_write_def_link_policy(hdev, skb);
2191 break;
2192
2193 case HCI_OP_RESET:
2194 hci_cc_reset(hdev, skb);
2195 break;
2196
2197 case HCI_OP_WRITE_LOCAL_NAME:
2198 hci_cc_write_local_name(hdev, skb);
2199 break;
2200
2201 case HCI_OP_READ_LOCAL_NAME:
2202 hci_cc_read_local_name(hdev, skb);
2203 break;
2204
2205 case HCI_OP_WRITE_AUTH_ENABLE:
2206 hci_cc_write_auth_enable(hdev, skb);
2207 break;
2208
2209 case HCI_OP_WRITE_ENCRYPT_MODE:
2210 hci_cc_write_encrypt_mode(hdev, skb);
2211 break;
2212
2213 case HCI_OP_WRITE_SCAN_ENABLE:
2214 hci_cc_write_scan_enable(hdev, skb);
2215 break;
2216
2217 case HCI_OP_READ_CLASS_OF_DEV:
2218 hci_cc_read_class_of_dev(hdev, skb);
2219 break;
2220
2221 case HCI_OP_WRITE_CLASS_OF_DEV:
2222 hci_cc_write_class_of_dev(hdev, skb);
2223 break;
2224
2225 case HCI_OP_READ_VOICE_SETTING:
2226 hci_cc_read_voice_setting(hdev, skb);
2227 break;
2228
2229 case HCI_OP_WRITE_VOICE_SETTING:
2230 hci_cc_write_voice_setting(hdev, skb);
2231 break;
2232
2233 case HCI_OP_HOST_BUFFER_SIZE:
2234 hci_cc_host_buffer_size(hdev, skb);
2235 break;
2236
2237 case HCI_OP_WRITE_SSP_MODE:
2238 hci_cc_write_ssp_mode(hdev, skb);
2239 break;
2240
2241 case HCI_OP_READ_LOCAL_VERSION:
2242 hci_cc_read_local_version(hdev, skb);
2243 break;
2244
2245 case HCI_OP_READ_LOCAL_COMMANDS:
2246 hci_cc_read_local_commands(hdev, skb);
2247 break;
2248
2249 case HCI_OP_READ_LOCAL_FEATURES:
2250 hci_cc_read_local_features(hdev, skb);
2251 break;
2252
2253 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2254 hci_cc_read_local_ext_features(hdev, skb);
2255 break;
2256
2257 case HCI_OP_READ_BUFFER_SIZE:
2258 hci_cc_read_buffer_size(hdev, skb);
2259 break;
2260
2261 case HCI_OP_READ_BD_ADDR:
2262 hci_cc_read_bd_addr(hdev, skb);
2263 break;
2264
2265 case HCI_OP_READ_DATA_BLOCK_SIZE:
2266 hci_cc_read_data_block_size(hdev, skb);
2267 break;
2268
2269 case HCI_OP_WRITE_CA_TIMEOUT:
2270 hci_cc_write_ca_timeout(hdev, skb);
2271 break;
2272
2273 case HCI_OP_READ_FLOW_CONTROL_MODE:
2274 hci_cc_read_flow_control_mode(hdev, skb);
2275 break;
2276
2277 case HCI_OP_READ_LOCAL_AMP_INFO:
2278 hci_cc_read_local_amp_info(hdev, skb);
2279 break;
2280
2281 case HCI_OP_DELETE_STORED_LINK_KEY:
2282 hci_cc_delete_stored_link_key(hdev, skb);
2283 break;
2284
2285 case HCI_OP_SET_EVENT_MASK:
2286 hci_cc_set_event_mask(hdev, skb);
2287 break;
2288
2289 case HCI_OP_WRITE_INQUIRY_MODE:
2290 hci_cc_write_inquiry_mode(hdev, skb);
2291 break;
2292
2293 case HCI_OP_READ_INQ_RSP_TX_POWER:
2294 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2295 break;
2296
2297 case HCI_OP_SET_EVENT_FLT:
2298 hci_cc_set_event_flt(hdev, skb);
2299 break;
2300
2301 case HCI_OP_PIN_CODE_REPLY:
2302 hci_cc_pin_code_reply(hdev, skb);
2303 break;
2304
2305 case HCI_OP_PIN_CODE_NEG_REPLY:
2306 hci_cc_pin_code_neg_reply(hdev, skb);
2307 break;
2308
2309 case HCI_OP_READ_LOCAL_OOB_DATA:
2310 hci_cc_read_local_oob_data_reply(hdev, skb);
2311 break;
2312
2313 case HCI_OP_LE_READ_BUFFER_SIZE:
2314 hci_cc_le_read_buffer_size(hdev, skb);
2315 break;
2316
2317 case HCI_OP_USER_CONFIRM_REPLY:
2318 hci_cc_user_confirm_reply(hdev, skb);
2319 break;
2320
2321 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2322 hci_cc_user_confirm_neg_reply(hdev, skb);
2323 break;
2324
2325 case HCI_OP_USER_PASSKEY_REPLY:
2326 hci_cc_user_passkey_reply(hdev, skb);
2327 break;
2328
2329 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2330 hci_cc_user_passkey_neg_reply(hdev, skb);
2331 break;
2332
2333 case HCI_OP_LE_SET_SCAN_PARAM:
2334 hci_cc_le_set_scan_param(hdev, skb);
2335 break;
2336
2337 case HCI_OP_LE_SET_SCAN_ENABLE:
2338 hci_cc_le_set_scan_enable(hdev, skb);
2339 break;
2340
2341 case HCI_OP_LE_LTK_REPLY:
2342 hci_cc_le_ltk_reply(hdev, skb);
2343 break;
2344
2345 case HCI_OP_LE_LTK_NEG_REPLY:
2346 hci_cc_le_ltk_neg_reply(hdev, skb);
2347 break;
2348
2349 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2350 hci_cc_write_le_host_supported(hdev, skb);
2351 break;
2352
2353 default:
2354 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2355 break;
2356 }
2357
2358 if (ev->opcode != HCI_OP_NOP)
2359 del_timer(&hdev->cmd_timer);
2360
2361 if (ev->ncmd) {
2362 atomic_set(&hdev->cmd_cnt, 1);
2363 if (!skb_queue_empty(&hdev->cmd_q))
2364 queue_work(hdev->workqueue, &hdev->cmd_work);
2365 }
2366 }
2367
2368 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2369 {
2370 struct hci_ev_cmd_status *ev = (void *) skb->data;
2371 __u16 opcode;
2372
2373 skb_pull(skb, sizeof(*ev));
2374
2375 opcode = __le16_to_cpu(ev->opcode);
2376
2377 switch (opcode) {
2378 case HCI_OP_INQUIRY:
2379 hci_cs_inquiry(hdev, ev->status);
2380 break;
2381
2382 case HCI_OP_CREATE_CONN:
2383 hci_cs_create_conn(hdev, ev->status);
2384 break;
2385
2386 case HCI_OP_ADD_SCO:
2387 hci_cs_add_sco(hdev, ev->status);
2388 break;
2389
2390 case HCI_OP_AUTH_REQUESTED:
2391 hci_cs_auth_requested(hdev, ev->status);
2392 break;
2393
2394 case HCI_OP_SET_CONN_ENCRYPT:
2395 hci_cs_set_conn_encrypt(hdev, ev->status);
2396 break;
2397
2398 case HCI_OP_REMOTE_NAME_REQ:
2399 hci_cs_remote_name_req(hdev, ev->status);
2400 break;
2401
2402 case HCI_OP_READ_REMOTE_FEATURES:
2403 hci_cs_read_remote_features(hdev, ev->status);
2404 break;
2405
2406 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2407 hci_cs_read_remote_ext_features(hdev, ev->status);
2408 break;
2409
2410 case HCI_OP_SETUP_SYNC_CONN:
2411 hci_cs_setup_sync_conn(hdev, ev->status);
2412 break;
2413
2414 case HCI_OP_SNIFF_MODE:
2415 hci_cs_sniff_mode(hdev, ev->status);
2416 break;
2417
2418 case HCI_OP_EXIT_SNIFF_MODE:
2419 hci_cs_exit_sniff_mode(hdev, ev->status);
2420 break;
2421
2422 case HCI_OP_DISCONNECT:
2423 hci_cs_disconnect(hdev, ev->status);
2424 break;
2425
2426 case HCI_OP_LE_CREATE_CONN:
2427 hci_cs_le_create_conn(hdev, ev->status);
2428 break;
2429
2430 case HCI_OP_LE_START_ENC:
2431 hci_cs_le_start_enc(hdev, ev->status);
2432 break;
2433
2434 default:
2435 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2436 break;
2437 }
2438
2439 if (ev->opcode != HCI_OP_NOP)
2440 del_timer(&hdev->cmd_timer);
2441
2442 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2443 atomic_set(&hdev->cmd_cnt, 1);
2444 if (!skb_queue_empty(&hdev->cmd_q))
2445 queue_work(hdev->workqueue, &hdev->cmd_work);
2446 }
2447 }
2448
2449 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2450 {
2451 struct hci_ev_role_change *ev = (void *) skb->data;
2452 struct hci_conn *conn;
2453
2454 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2455
2456 hci_dev_lock(hdev);
2457
2458 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2459 if (conn) {
2460 if (!ev->status) {
2461 if (ev->role)
2462 conn->link_mode &= ~HCI_LM_MASTER;
2463 else
2464 conn->link_mode |= HCI_LM_MASTER;
2465 }
2466
2467 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2468
2469 hci_role_switch_cfm(conn, ev->status, ev->role);
2470 }
2471
2472 hci_dev_unlock(hdev);
2473 }
2474
2475 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2476 {
2477 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2478 int i;
2479
2480 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2481 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2482 return;
2483 }
2484
2485 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2486 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2487 BT_DBG("%s bad parameters", hdev->name);
2488 return;
2489 }
2490
2491 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2492
2493 for (i = 0; i < ev->num_hndl; i++) {
2494 struct hci_comp_pkts_info *info = &ev->handles[i];
2495 struct hci_conn *conn;
2496 __u16 handle, count;
2497
2498 handle = __le16_to_cpu(info->handle);
2499 count = __le16_to_cpu(info->count);
2500
2501 conn = hci_conn_hash_lookup_handle(hdev, handle);
2502 if (!conn)
2503 continue;
2504
2505 conn->sent -= count;
2506
2507 switch (conn->type) {
2508 case ACL_LINK:
2509 hdev->acl_cnt += count;
2510 if (hdev->acl_cnt > hdev->acl_pkts)
2511 hdev->acl_cnt = hdev->acl_pkts;
2512 break;
2513
2514 case LE_LINK:
2515 if (hdev->le_pkts) {
2516 hdev->le_cnt += count;
2517 if (hdev->le_cnt > hdev->le_pkts)
2518 hdev->le_cnt = hdev->le_pkts;
2519 } else {
2520 hdev->acl_cnt += count;
2521 if (hdev->acl_cnt > hdev->acl_pkts)
2522 hdev->acl_cnt = hdev->acl_pkts;
2523 }
2524 break;
2525
2526 case SCO_LINK:
2527 hdev->sco_cnt += count;
2528 if (hdev->sco_cnt > hdev->sco_pkts)
2529 hdev->sco_cnt = hdev->sco_pkts;
2530 break;
2531
2532 default:
2533 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2534 break;
2535 }
2536 }
2537
2538 queue_work(hdev->workqueue, &hdev->tx_work);
2539 }
2540
2541 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2542 {
2543 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2544 int i;
2545
2546 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2547 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2548 return;
2549 }
2550
2551 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2552 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2553 BT_DBG("%s bad parameters", hdev->name);
2554 return;
2555 }
2556
2557 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2558 ev->num_hndl);
2559
2560 for (i = 0; i < ev->num_hndl; i++) {
2561 struct hci_comp_blocks_info *info = &ev->handles[i];
2562 struct hci_conn *conn;
2563 __u16 handle, block_count;
2564
2565 handle = __le16_to_cpu(info->handle);
2566 block_count = __le16_to_cpu(info->blocks);
2567
2568 conn = hci_conn_hash_lookup_handle(hdev, handle);
2569 if (!conn)
2570 continue;
2571
2572 conn->sent -= block_count;
2573
2574 switch (conn->type) {
2575 case ACL_LINK:
2576 hdev->block_cnt += block_count;
2577 if (hdev->block_cnt > hdev->num_blocks)
2578 hdev->block_cnt = hdev->num_blocks;
2579 break;
2580
2581 default:
2582 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2583 break;
2584 }
2585 }
2586
2587 queue_work(hdev->workqueue, &hdev->tx_work);
2588 }
2589
2590 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2591 {
2592 struct hci_ev_mode_change *ev = (void *) skb->data;
2593 struct hci_conn *conn;
2594
2595 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2596
2597 hci_dev_lock(hdev);
2598
2599 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2600 if (conn) {
2601 conn->mode = ev->mode;
2602 conn->interval = __le16_to_cpu(ev->interval);
2603
2604 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2605 &conn->flags)) {
2606 if (conn->mode == HCI_CM_ACTIVE)
2607 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2608 else
2609 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2610 }
2611
2612 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2613 hci_sco_setup(conn, ev->status);
2614 }
2615
2616 hci_dev_unlock(hdev);
2617 }
2618
2619 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2620 {
2621 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2622 struct hci_conn *conn;
2623
2624 BT_DBG("%s", hdev->name);
2625
2626 hci_dev_lock(hdev);
2627
2628 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2629 if (!conn)
2630 goto unlock;
2631
2632 if (conn->state == BT_CONNECTED) {
2633 hci_conn_hold(conn);
2634 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2635 hci_conn_put(conn);
2636 }
2637
2638 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2639 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2640 sizeof(ev->bdaddr), &ev->bdaddr);
2641 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2642 u8 secure;
2643
2644 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2645 secure = 1;
2646 else
2647 secure = 0;
2648
2649 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2650 }
2651
2652 unlock:
2653 hci_dev_unlock(hdev);
2654 }
2655
2656 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2657 {
2658 struct hci_ev_link_key_req *ev = (void *) skb->data;
2659 struct hci_cp_link_key_reply cp;
2660 struct hci_conn *conn;
2661 struct link_key *key;
2662
2663 BT_DBG("%s", hdev->name);
2664
2665 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2666 return;
2667
2668 hci_dev_lock(hdev);
2669
2670 key = hci_find_link_key(hdev, &ev->bdaddr);
2671 if (!key) {
2672 BT_DBG("%s link key not found for %s", hdev->name,
2673 batostr(&ev->bdaddr));
2674 goto not_found;
2675 }
2676
2677 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2678 batostr(&ev->bdaddr));
2679
2680 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2681 key->type == HCI_LK_DEBUG_COMBINATION) {
2682 BT_DBG("%s ignoring debug key", hdev->name);
2683 goto not_found;
2684 }
2685
2686 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2687 if (conn) {
2688 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2689 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2690 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2691 goto not_found;
2692 }
2693
2694 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2695 conn->pending_sec_level == BT_SECURITY_HIGH) {
2696 BT_DBG("%s ignoring key unauthenticated for high security",
2697 hdev->name);
2698 goto not_found;
2699 }
2700
2701 conn->key_type = key->type;
2702 conn->pin_length = key->pin_len;
2703 }
2704
2705 bacpy(&cp.bdaddr, &ev->bdaddr);
2706 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2707
2708 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2709
2710 hci_dev_unlock(hdev);
2711
2712 return;
2713
2714 not_found:
2715 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2716 hci_dev_unlock(hdev);
2717 }
2718
2719 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2720 {
2721 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2722 struct hci_conn *conn;
2723 u8 pin_len = 0;
2724
2725 BT_DBG("%s", hdev->name);
2726
2727 hci_dev_lock(hdev);
2728
2729 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2730 if (conn) {
2731 hci_conn_hold(conn);
2732 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2733 pin_len = conn->pin_length;
2734
2735 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2736 conn->key_type = ev->key_type;
2737
2738 hci_conn_put(conn);
2739 }
2740
2741 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2742 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2743 ev->key_type, pin_len);
2744
2745 hci_dev_unlock(hdev);
2746 }
2747
2748 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2749 {
2750 struct hci_ev_clock_offset *ev = (void *) skb->data;
2751 struct hci_conn *conn;
2752
2753 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2754
2755 hci_dev_lock(hdev);
2756
2757 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2758 if (conn && !ev->status) {
2759 struct inquiry_entry *ie;
2760
2761 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2762 if (ie) {
2763 ie->data.clock_offset = ev->clock_offset;
2764 ie->timestamp = jiffies;
2765 }
2766 }
2767
2768 hci_dev_unlock(hdev);
2769 }
2770
2771 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2772 {
2773 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2774 struct hci_conn *conn;
2775
2776 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2777
2778 hci_dev_lock(hdev);
2779
2780 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2781 if (conn && !ev->status)
2782 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2783
2784 hci_dev_unlock(hdev);
2785 }
2786
2787 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2788 {
2789 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2790 struct inquiry_entry *ie;
2791
2792 BT_DBG("%s", hdev->name);
2793
2794 hci_dev_lock(hdev);
2795
2796 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2797 if (ie) {
2798 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2799 ie->timestamp = jiffies;
2800 }
2801
2802 hci_dev_unlock(hdev);
2803 }
2804
2805 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
2806 struct sk_buff *skb)
2807 {
2808 struct inquiry_data data;
2809 int num_rsp = *((__u8 *) skb->data);
2810 bool name_known, ssp;
2811
2812 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2813
2814 if (!num_rsp)
2815 return;
2816
2817 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2818 return;
2819
2820 hci_dev_lock(hdev);
2821
2822 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2823 struct inquiry_info_with_rssi_and_pscan_mode *info;
2824 info = (void *) (skb->data + 1);
2825
2826 for (; num_rsp; num_rsp--, info++) {
2827 bacpy(&data.bdaddr, &info->bdaddr);
2828 data.pscan_rep_mode = info->pscan_rep_mode;
2829 data.pscan_period_mode = info->pscan_period_mode;
2830 data.pscan_mode = info->pscan_mode;
2831 memcpy(data.dev_class, info->dev_class, 3);
2832 data.clock_offset = info->clock_offset;
2833 data.rssi = info->rssi;
2834 data.ssp_mode = 0x00;
2835
2836 name_known = hci_inquiry_cache_update(hdev, &data,
2837 false, &ssp);
2838 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2839 info->dev_class, info->rssi,
2840 !name_known, ssp, NULL, 0);
2841 }
2842 } else {
2843 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2844
2845 for (; num_rsp; num_rsp--, info++) {
2846 bacpy(&data.bdaddr, &info->bdaddr);
2847 data.pscan_rep_mode = info->pscan_rep_mode;
2848 data.pscan_period_mode = info->pscan_period_mode;
2849 data.pscan_mode = 0x00;
2850 memcpy(data.dev_class, info->dev_class, 3);
2851 data.clock_offset = info->clock_offset;
2852 data.rssi = info->rssi;
2853 data.ssp_mode = 0x00;
2854 name_known = hci_inquiry_cache_update(hdev, &data,
2855 false, &ssp);
2856 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2857 info->dev_class, info->rssi,
2858 !name_known, ssp, NULL, 0);
2859 }
2860 }
2861
2862 hci_dev_unlock(hdev);
2863 }
2864
2865 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
2866 struct sk_buff *skb)
2867 {
2868 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2869 struct hci_conn *conn;
2870
2871 BT_DBG("%s", hdev->name);
2872
2873 hci_dev_lock(hdev);
2874
2875 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2876 if (!conn)
2877 goto unlock;
2878
2879 if (!ev->status && ev->page == 0x01) {
2880 struct inquiry_entry *ie;
2881
2882 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2883 if (ie)
2884 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
2885
2886 if (ev->features[0] & LMP_HOST_SSP)
2887 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2888 }
2889
2890 if (conn->state != BT_CONFIG)
2891 goto unlock;
2892
2893 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2894 struct hci_cp_remote_name_req cp;
2895 memset(&cp, 0, sizeof(cp));
2896 bacpy(&cp.bdaddr, &conn->dst);
2897 cp.pscan_rep_mode = 0x02;
2898 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2899 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2900 mgmt_device_connected(hdev, &conn->dst, conn->type,
2901 conn->dst_type, 0, NULL, 0,
2902 conn->dev_class);
2903
2904 if (!hci_outgoing_auth_needed(hdev, conn)) {
2905 conn->state = BT_CONNECTED;
2906 hci_proto_connect_cfm(conn, ev->status);
2907 hci_conn_put(conn);
2908 }
2909
2910 unlock:
2911 hci_dev_unlock(hdev);
2912 }
2913
2914 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
2915 struct sk_buff *skb)
2916 {
2917 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2918 struct hci_conn *conn;
2919
2920 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2921
2922 hci_dev_lock(hdev);
2923
2924 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2925 if (!conn) {
2926 if (ev->link_type == ESCO_LINK)
2927 goto unlock;
2928
2929 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2930 if (!conn)
2931 goto unlock;
2932
2933 conn->type = SCO_LINK;
2934 }
2935
2936 switch (ev->status) {
2937 case 0x00:
2938 conn->handle = __le16_to_cpu(ev->handle);
2939 conn->state = BT_CONNECTED;
2940
2941 hci_conn_hold_device(conn);
2942 hci_conn_add_sysfs(conn);
2943 break;
2944
2945 case 0x11: /* Unsupported Feature or Parameter Value */
2946 case 0x1c: /* SCO interval rejected */
2947 case 0x1a: /* Unsupported Remote Feature */
2948 case 0x1f: /* Unspecified error */
2949 if (conn->out && conn->attempt < 2) {
2950 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2951 (hdev->esco_type & EDR_ESCO_MASK);
2952 hci_setup_sync(conn, conn->link->handle);
2953 goto unlock;
2954 }
2955 /* fall through */
2956
2957 default:
2958 conn->state = BT_CLOSED;
2959 break;
2960 }
2961
2962 hci_proto_connect_cfm(conn, ev->status);
2963 if (ev->status)
2964 hci_conn_del(conn);
2965
2966 unlock:
2967 hci_dev_unlock(hdev);
2968 }
2969
2970 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2971 {
2972 BT_DBG("%s", hdev->name);
2973 }
2974
2975 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2976 {
2977 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2978
2979 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2980 }
2981
2982 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
2983 struct sk_buff *skb)
2984 {
2985 struct inquiry_data data;
2986 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2987 int num_rsp = *((__u8 *) skb->data);
2988 size_t eir_len;
2989
2990 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2991
2992 if (!num_rsp)
2993 return;
2994
2995 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
2996 return;
2997
2998 hci_dev_lock(hdev);
2999
3000 for (; num_rsp; num_rsp--, info++) {
3001 bool name_known, ssp;
3002
3003 bacpy(&data.bdaddr, &info->bdaddr);
3004 data.pscan_rep_mode = info->pscan_rep_mode;
3005 data.pscan_period_mode = info->pscan_period_mode;
3006 data.pscan_mode = 0x00;
3007 memcpy(data.dev_class, info->dev_class, 3);
3008 data.clock_offset = info->clock_offset;
3009 data.rssi = info->rssi;
3010 data.ssp_mode = 0x01;
3011
3012 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3013 name_known = eir_has_data_type(info->data,
3014 sizeof(info->data),
3015 EIR_NAME_COMPLETE);
3016 else
3017 name_known = true;
3018
3019 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3020 &ssp);
3021 eir_len = eir_get_length(info->data, sizeof(info->data));
3022 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3023 info->dev_class, info->rssi, !name_known,
3024 ssp, info->data, eir_len);
3025 }
3026
3027 hci_dev_unlock(hdev);
3028 }
3029
3030 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3031 struct sk_buff *skb)
3032 {
3033 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3034 struct hci_conn *conn;
3035
3036 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3037 __le16_to_cpu(ev->handle));
3038
3039 hci_dev_lock(hdev);
3040
3041 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3042 if (!conn)
3043 goto unlock;
3044
3045 if (!ev->status)
3046 conn->sec_level = conn->pending_sec_level;
3047
3048 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3049
3050 if (ev->status && conn->state == BT_CONNECTED) {
3051 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3052 hci_conn_put(conn);
3053 goto unlock;
3054 }
3055
3056 if (conn->state == BT_CONFIG) {
3057 if (!ev->status)
3058 conn->state = BT_CONNECTED;
3059
3060 hci_proto_connect_cfm(conn, ev->status);
3061 hci_conn_put(conn);
3062 } else {
3063 hci_auth_cfm(conn, ev->status);
3064
3065 hci_conn_hold(conn);
3066 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3067 hci_conn_put(conn);
3068 }
3069
3070 unlock:
3071 hci_dev_unlock(hdev);
3072 }
3073
3074 static u8 hci_get_auth_req(struct hci_conn *conn)
3075 {
3076 /* If remote requests dedicated bonding follow that lead */
3077 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3078 /* If both remote and local IO capabilities allow MITM
3079 * protection then require it, otherwise don't */
3080 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3081 return 0x02;
3082 else
3083 return 0x03;
3084 }
3085
3086 /* If remote requests no-bonding follow that lead */
3087 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3088 return conn->remote_auth | (conn->auth_type & 0x01);
3089
3090 return conn->auth_type;
3091 }
3092
3093 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3094 {
3095 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3096 struct hci_conn *conn;
3097
3098 BT_DBG("%s", hdev->name);
3099
3100 hci_dev_lock(hdev);
3101
3102 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3103 if (!conn)
3104 goto unlock;
3105
3106 hci_conn_hold(conn);
3107
3108 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3109 goto unlock;
3110
3111 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3112 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3113 struct hci_cp_io_capability_reply cp;
3114
3115 bacpy(&cp.bdaddr, &ev->bdaddr);
3116 /* Change the IO capability from KeyboardDisplay
3117 * to DisplayYesNo as it is not supported by BT spec. */
3118 cp.capability = (conn->io_capability == 0x04) ?
3119 0x01 : conn->io_capability;
3120 conn->auth_type = hci_get_auth_req(conn);
3121 cp.authentication = conn->auth_type;
3122
3123 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3124 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3125 cp.oob_data = 0x01;
3126 else
3127 cp.oob_data = 0x00;
3128
3129 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3130 sizeof(cp), &cp);
3131 } else {
3132 struct hci_cp_io_capability_neg_reply cp;
3133
3134 bacpy(&cp.bdaddr, &ev->bdaddr);
3135 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3136
3137 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3138 sizeof(cp), &cp);
3139 }
3140
3141 unlock:
3142 hci_dev_unlock(hdev);
3143 }
3144
3145 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3146 {
3147 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3148 struct hci_conn *conn;
3149
3150 BT_DBG("%s", hdev->name);
3151
3152 hci_dev_lock(hdev);
3153
3154 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3155 if (!conn)
3156 goto unlock;
3157
3158 conn->remote_cap = ev->capability;
3159 conn->remote_auth = ev->authentication;
3160 if (ev->oob_data)
3161 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3162
3163 unlock:
3164 hci_dev_unlock(hdev);
3165 }
3166
3167 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3168 struct sk_buff *skb)
3169 {
3170 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3171 int loc_mitm, rem_mitm, confirm_hint = 0;
3172 struct hci_conn *conn;
3173
3174 BT_DBG("%s", hdev->name);
3175
3176 hci_dev_lock(hdev);
3177
3178 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3179 goto unlock;
3180
3181 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3182 if (!conn)
3183 goto unlock;
3184
3185 loc_mitm = (conn->auth_type & 0x01);
3186 rem_mitm = (conn->remote_auth & 0x01);
3187
3188 /* If we require MITM but the remote device can't provide that
3189 * (it has NoInputNoOutput) then reject the confirmation
3190 * request. The only exception is when we're dedicated bonding
3191 * initiators (connect_cfm_cb set) since then we always have the MITM
3192 * bit set. */
3193 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3194 BT_DBG("Rejecting request: remote device can't provide MITM");
3195 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3196 sizeof(ev->bdaddr), &ev->bdaddr);
3197 goto unlock;
3198 }
3199
3200 /* If no side requires MITM protection; auto-accept */
3201 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3202 (!rem_mitm || conn->io_capability == 0x03)) {
3203
3204 /* If we're not the initiators request authorization to
3205 * proceed from user space (mgmt_user_confirm with
3206 * confirm_hint set to 1). */
3207 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3208 BT_DBG("Confirming auto-accept as acceptor");
3209 confirm_hint = 1;
3210 goto confirm;
3211 }
3212
3213 BT_DBG("Auto-accept of user confirmation with %ums delay",
3214 hdev->auto_accept_delay);
3215
3216 if (hdev->auto_accept_delay > 0) {
3217 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3218 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3219 goto unlock;
3220 }
3221
3222 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3223 sizeof(ev->bdaddr), &ev->bdaddr);
3224 goto unlock;
3225 }
3226
3227 confirm:
3228 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3229 confirm_hint);
3230
3231 unlock:
3232 hci_dev_unlock(hdev);
3233 }
3234
3235 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3236 struct sk_buff *skb)
3237 {
3238 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3239
3240 BT_DBG("%s", hdev->name);
3241
3242 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3243 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3244 }
3245
3246 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3247 struct sk_buff *skb)
3248 {
3249 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3250 struct hci_conn *conn;
3251
3252 BT_DBG("%s", hdev->name);
3253
3254 hci_dev_lock(hdev);
3255
3256 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3257 if (!conn)
3258 goto unlock;
3259
3260 /* To avoid duplicate auth_failed events to user space we check
3261 * the HCI_CONN_AUTH_PEND flag which will be set if we
3262 * initiated the authentication. A traditional auth_complete
3263 * event gets always produced as initiator and is also mapped to
3264 * the mgmt_auth_failed event */
3265 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3266 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3267 ev->status);
3268
3269 hci_conn_put(conn);
3270
3271 unlock:
3272 hci_dev_unlock(hdev);
3273 }
3274
3275 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3276 struct sk_buff *skb)
3277 {
3278 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3279 struct inquiry_entry *ie;
3280
3281 BT_DBG("%s", hdev->name);
3282
3283 hci_dev_lock(hdev);
3284
3285 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3286 if (ie)
3287 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3288
3289 hci_dev_unlock(hdev);
3290 }
3291
3292 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3293 struct sk_buff *skb)
3294 {
3295 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3296 struct oob_data *data;
3297
3298 BT_DBG("%s", hdev->name);
3299
3300 hci_dev_lock(hdev);
3301
3302 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3303 goto unlock;
3304
3305 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3306 if (data) {
3307 struct hci_cp_remote_oob_data_reply cp;
3308
3309 bacpy(&cp.bdaddr, &ev->bdaddr);
3310 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3311 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3312
3313 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3314 &cp);
3315 } else {
3316 struct hci_cp_remote_oob_data_neg_reply cp;
3317
3318 bacpy(&cp.bdaddr, &ev->bdaddr);
3319 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3320 &cp);
3321 }
3322
3323 unlock:
3324 hci_dev_unlock(hdev);
3325 }
3326
3327 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3328 {
3329 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3330 struct hci_conn *conn;
3331
3332 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3333
3334 hci_dev_lock(hdev);
3335
3336 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3337 if (!conn) {
3338 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3339 if (!conn) {
3340 BT_ERR("No memory for new connection");
3341 goto unlock;
3342 }
3343
3344 conn->dst_type = ev->bdaddr_type;
3345
3346 if (ev->role == LE_CONN_ROLE_MASTER) {
3347 conn->out = true;
3348 conn->link_mode |= HCI_LM_MASTER;
3349 }
3350 }
3351
3352 if (ev->status) {
3353 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3354 conn->dst_type, ev->status);
3355 hci_proto_connect_cfm(conn, ev->status);
3356 conn->state = BT_CLOSED;
3357 hci_conn_del(conn);
3358 goto unlock;
3359 }
3360
3361 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3362 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3363 conn->dst_type, 0, NULL, 0, NULL);
3364
3365 conn->sec_level = BT_SECURITY_LOW;
3366 conn->handle = __le16_to_cpu(ev->handle);
3367 conn->state = BT_CONNECTED;
3368
3369 hci_conn_hold_device(conn);
3370 hci_conn_add_sysfs(conn);
3371
3372 hci_proto_connect_cfm(conn, ev->status);
3373
3374 unlock:
3375 hci_dev_unlock(hdev);
3376 }
3377
3378 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3379 {
3380 u8 num_reports = skb->data[0];
3381 void *ptr = &skb->data[1];
3382 s8 rssi;
3383
3384 hci_dev_lock(hdev);
3385
3386 while (num_reports--) {
3387 struct hci_ev_le_advertising_info *ev = ptr;
3388
3389 rssi = ev->data[ev->length];
3390 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3391 NULL, rssi, 0, 1, ev->data, ev->length);
3392
3393 ptr += sizeof(*ev) + ev->length + 1;
3394 }
3395
3396 hci_dev_unlock(hdev);
3397 }
3398
3399 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3400 {
3401 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3402 struct hci_cp_le_ltk_reply cp;
3403 struct hci_cp_le_ltk_neg_reply neg;
3404 struct hci_conn *conn;
3405 struct smp_ltk *ltk;
3406
3407 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3408
3409 hci_dev_lock(hdev);
3410
3411 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3412 if (conn == NULL)
3413 goto not_found;
3414
3415 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3416 if (ltk == NULL)
3417 goto not_found;
3418
3419 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3420 cp.handle = cpu_to_le16(conn->handle);
3421
3422 if (ltk->authenticated)
3423 conn->sec_level = BT_SECURITY_HIGH;
3424
3425 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3426
3427 if (ltk->type & HCI_SMP_STK) {
3428 list_del(&ltk->list);
3429 kfree(ltk);
3430 }
3431
3432 hci_dev_unlock(hdev);
3433
3434 return;
3435
3436 not_found:
3437 neg.handle = ev->handle;
3438 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3439 hci_dev_unlock(hdev);
3440 }
3441
3442 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3443 {
3444 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3445
3446 skb_pull(skb, sizeof(*le_ev));
3447
3448 switch (le_ev->subevent) {
3449 case HCI_EV_LE_CONN_COMPLETE:
3450 hci_le_conn_complete_evt(hdev, skb);
3451 break;
3452
3453 case HCI_EV_LE_ADVERTISING_REPORT:
3454 hci_le_adv_report_evt(hdev, skb);
3455 break;
3456
3457 case HCI_EV_LE_LTK_REQ:
3458 hci_le_ltk_request_evt(hdev, skb);
3459 break;
3460
3461 default:
3462 break;
3463 }
3464 }
3465
3466 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3467 {
3468 struct hci_event_hdr *hdr = (void *) skb->data;
3469 __u8 event = hdr->evt;
3470
3471 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3472
3473 switch (event) {
3474 case HCI_EV_INQUIRY_COMPLETE:
3475 hci_inquiry_complete_evt(hdev, skb);
3476 break;
3477
3478 case HCI_EV_INQUIRY_RESULT:
3479 hci_inquiry_result_evt(hdev, skb);
3480 break;
3481
3482 case HCI_EV_CONN_COMPLETE:
3483 hci_conn_complete_evt(hdev, skb);
3484 break;
3485
3486 case HCI_EV_CONN_REQUEST:
3487 hci_conn_request_evt(hdev, skb);
3488 break;
3489
3490 case HCI_EV_DISCONN_COMPLETE:
3491 hci_disconn_complete_evt(hdev, skb);
3492 break;
3493
3494 case HCI_EV_AUTH_COMPLETE:
3495 hci_auth_complete_evt(hdev, skb);
3496 break;
3497
3498 case HCI_EV_REMOTE_NAME:
3499 hci_remote_name_evt(hdev, skb);
3500 break;
3501
3502 case HCI_EV_ENCRYPT_CHANGE:
3503 hci_encrypt_change_evt(hdev, skb);
3504 break;
3505
3506 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3507 hci_change_link_key_complete_evt(hdev, skb);
3508 break;
3509
3510 case HCI_EV_REMOTE_FEATURES:
3511 hci_remote_features_evt(hdev, skb);
3512 break;
3513
3514 case HCI_EV_REMOTE_VERSION:
3515 hci_remote_version_evt(hdev, skb);
3516 break;
3517
3518 case HCI_EV_QOS_SETUP_COMPLETE:
3519 hci_qos_setup_complete_evt(hdev, skb);
3520 break;
3521
3522 case HCI_EV_CMD_COMPLETE:
3523 hci_cmd_complete_evt(hdev, skb);
3524 break;
3525
3526 case HCI_EV_CMD_STATUS:
3527 hci_cmd_status_evt(hdev, skb);
3528 break;
3529
3530 case HCI_EV_ROLE_CHANGE:
3531 hci_role_change_evt(hdev, skb);
3532 break;
3533
3534 case HCI_EV_NUM_COMP_PKTS:
3535 hci_num_comp_pkts_evt(hdev, skb);
3536 break;
3537
3538 case HCI_EV_MODE_CHANGE:
3539 hci_mode_change_evt(hdev, skb);
3540 break;
3541
3542 case HCI_EV_PIN_CODE_REQ:
3543 hci_pin_code_request_evt(hdev, skb);
3544 break;
3545
3546 case HCI_EV_LINK_KEY_REQ:
3547 hci_link_key_request_evt(hdev, skb);
3548 break;
3549
3550 case HCI_EV_LINK_KEY_NOTIFY:
3551 hci_link_key_notify_evt(hdev, skb);
3552 break;
3553
3554 case HCI_EV_CLOCK_OFFSET:
3555 hci_clock_offset_evt(hdev, skb);
3556 break;
3557
3558 case HCI_EV_PKT_TYPE_CHANGE:
3559 hci_pkt_type_change_evt(hdev, skb);
3560 break;
3561
3562 case HCI_EV_PSCAN_REP_MODE:
3563 hci_pscan_rep_mode_evt(hdev, skb);
3564 break;
3565
3566 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3567 hci_inquiry_result_with_rssi_evt(hdev, skb);
3568 break;
3569
3570 case HCI_EV_REMOTE_EXT_FEATURES:
3571 hci_remote_ext_features_evt(hdev, skb);
3572 break;
3573
3574 case HCI_EV_SYNC_CONN_COMPLETE:
3575 hci_sync_conn_complete_evt(hdev, skb);
3576 break;
3577
3578 case HCI_EV_SYNC_CONN_CHANGED:
3579 hci_sync_conn_changed_evt(hdev, skb);
3580 break;
3581
3582 case HCI_EV_SNIFF_SUBRATE:
3583 hci_sniff_subrate_evt(hdev, skb);
3584 break;
3585
3586 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3587 hci_extended_inquiry_result_evt(hdev, skb);
3588 break;
3589
3590 case HCI_EV_KEY_REFRESH_COMPLETE:
3591 hci_key_refresh_complete_evt(hdev, skb);
3592 break;
3593
3594 case HCI_EV_IO_CAPA_REQUEST:
3595 hci_io_capa_request_evt(hdev, skb);
3596 break;
3597
3598 case HCI_EV_IO_CAPA_REPLY:
3599 hci_io_capa_reply_evt(hdev, skb);
3600 break;
3601
3602 case HCI_EV_USER_CONFIRM_REQUEST:
3603 hci_user_confirm_request_evt(hdev, skb);
3604 break;
3605
3606 case HCI_EV_USER_PASSKEY_REQUEST:
3607 hci_user_passkey_request_evt(hdev, skb);
3608 break;
3609
3610 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3611 hci_simple_pair_complete_evt(hdev, skb);
3612 break;
3613
3614 case HCI_EV_REMOTE_HOST_FEATURES:
3615 hci_remote_host_features_evt(hdev, skb);
3616 break;
3617
3618 case HCI_EV_LE_META:
3619 hci_le_meta_evt(hdev, skb);
3620 break;
3621
3622 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3623 hci_remote_oob_data_request_evt(hdev, skb);
3624 break;
3625
3626 case HCI_EV_NUM_COMP_BLOCKS:
3627 hci_num_comp_blocks_evt(hdev, skb);
3628 break;
3629
3630 default:
3631 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3632 break;
3633 }
3634
3635 kfree_skb(skb);
3636 hdev->stat.evt_rx++;
3637 }
This page took 0.212153 seconds and 5 git commands to generate.