Bluetooth: Don't send unnecessary write_le_enable command
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <net/sock.h>
39
40 #include <asm/system.h>
41 #include <linux/uaccess.h>
42 #include <asm/unaligned.h>
43
44 #include <net/bluetooth/bluetooth.h>
45 #include <net/bluetooth/hci_core.h>
46
47 /* Handle HCI Event packets */
48
49 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
50 {
51 __u8 status = *((__u8 *) skb->data);
52
53 BT_DBG("%s status 0x%x", hdev->name, status);
54
55 if (status) {
56 hci_dev_lock(hdev);
57 mgmt_stop_discovery_failed(hdev, status);
58 hci_dev_unlock(hdev);
59 return;
60 }
61
62 clear_bit(HCI_INQUIRY, &hdev->flags);
63
64 hci_dev_lock(hdev);
65 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
66 hci_dev_unlock(hdev);
67
68 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
69
70 hci_conn_check_pending(hdev);
71 }
72
73 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
74 {
75 __u8 status = *((__u8 *) skb->data);
76
77 BT_DBG("%s status 0x%x", hdev->name, status);
78
79 if (status)
80 return;
81
82 hci_conn_check_pending(hdev);
83 }
84
85 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
86 {
87 BT_DBG("%s", hdev->name);
88 }
89
90 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
91 {
92 struct hci_rp_role_discovery *rp = (void *) skb->data;
93 struct hci_conn *conn;
94
95 BT_DBG("%s status 0x%x", hdev->name, rp->status);
96
97 if (rp->status)
98 return;
99
100 hci_dev_lock(hdev);
101
102 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
103 if (conn) {
104 if (rp->role)
105 conn->link_mode &= ~HCI_LM_MASTER;
106 else
107 conn->link_mode |= HCI_LM_MASTER;
108 }
109
110 hci_dev_unlock(hdev);
111 }
112
113 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
114 {
115 struct hci_rp_read_link_policy *rp = (void *) skb->data;
116 struct hci_conn *conn;
117
118 BT_DBG("%s status 0x%x", hdev->name, rp->status);
119
120 if (rp->status)
121 return;
122
123 hci_dev_lock(hdev);
124
125 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
126 if (conn)
127 conn->link_policy = __le16_to_cpu(rp->policy);
128
129 hci_dev_unlock(hdev);
130 }
131
132 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
133 {
134 struct hci_rp_write_link_policy *rp = (void *) skb->data;
135 struct hci_conn *conn;
136 void *sent;
137
138 BT_DBG("%s status 0x%x", hdev->name, rp->status);
139
140 if (rp->status)
141 return;
142
143 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
144 if (!sent)
145 return;
146
147 hci_dev_lock(hdev);
148
149 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
150 if (conn)
151 conn->link_policy = get_unaligned_le16(sent + 2);
152
153 hci_dev_unlock(hdev);
154 }
155
156 static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
157 {
158 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
159
160 BT_DBG("%s status 0x%x", hdev->name, rp->status);
161
162 if (rp->status)
163 return;
164
165 hdev->link_policy = __le16_to_cpu(rp->policy);
166 }
167
168 static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
169 {
170 __u8 status = *((__u8 *) skb->data);
171 void *sent;
172
173 BT_DBG("%s status 0x%x", hdev->name, status);
174
175 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
176 if (!sent)
177 return;
178
179 if (!status)
180 hdev->link_policy = get_unaligned_le16(sent);
181
182 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
183 }
184
185 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
186 {
187 __u8 status = *((__u8 *) skb->data);
188
189 BT_DBG("%s status 0x%x", hdev->name, status);
190
191 clear_bit(HCI_RESET, &hdev->flags);
192
193 hci_req_complete(hdev, HCI_OP_RESET, status);
194
195 /* Reset all non-persistent flags */
196 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN));
197
198 hdev->discovery.state = DISCOVERY_STOPPED;
199 }
200
201 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
202 {
203 __u8 status = *((__u8 *) skb->data);
204 void *sent;
205
206 BT_DBG("%s status 0x%x", hdev->name, status);
207
208 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
209 if (!sent)
210 return;
211
212 hci_dev_lock(hdev);
213
214 if (test_bit(HCI_MGMT, &hdev->dev_flags))
215 mgmt_set_local_name_complete(hdev, sent, status);
216 else if (!status)
217 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
218
219 hci_dev_unlock(hdev);
220
221 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
222 }
223
224 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
225 {
226 struct hci_rp_read_local_name *rp = (void *) skb->data;
227
228 BT_DBG("%s status 0x%x", hdev->name, rp->status);
229
230 if (rp->status)
231 return;
232
233 if (test_bit(HCI_SETUP, &hdev->dev_flags))
234 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
235 }
236
237 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
238 {
239 __u8 status = *((__u8 *) skb->data);
240 void *sent;
241
242 BT_DBG("%s status 0x%x", hdev->name, status);
243
244 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
245 if (!sent)
246 return;
247
248 if (!status) {
249 __u8 param = *((__u8 *) sent);
250
251 if (param == AUTH_ENABLED)
252 set_bit(HCI_AUTH, &hdev->flags);
253 else
254 clear_bit(HCI_AUTH, &hdev->flags);
255 }
256
257 if (test_bit(HCI_MGMT, &hdev->dev_flags))
258 mgmt_auth_enable_complete(hdev, status);
259
260 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
261 }
262
263 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264 {
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
267
268 BT_DBG("%s status 0x%x", hdev->name, status);
269
270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 if (!sent)
272 return;
273
274 if (!status) {
275 __u8 param = *((__u8 *) sent);
276
277 if (param)
278 set_bit(HCI_ENCRYPT, &hdev->flags);
279 else
280 clear_bit(HCI_ENCRYPT, &hdev->flags);
281 }
282
283 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
284 }
285
286 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287 {
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status != 0) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330 done:
331 hci_dev_unlock(hdev);
332 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
333 }
334
335 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336 {
337 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338
339 BT_DBG("%s status 0x%x", hdev->name, rp->status);
340
341 if (rp->status)
342 return;
343
344 memcpy(hdev->dev_class, rp->dev_class, 3);
345
346 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
347 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
348 }
349
350 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351 {
352 __u8 status = *((__u8 *) skb->data);
353 void *sent;
354
355 BT_DBG("%s status 0x%x", hdev->name, status);
356
357 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
358 if (!sent)
359 return;
360
361 hci_dev_lock(hdev);
362
363 if (status == 0)
364 memcpy(hdev->dev_class, sent, 3);
365
366 if (test_bit(HCI_MGMT, &hdev->dev_flags))
367 mgmt_set_class_of_dev_complete(hdev, sent, status);
368
369 hci_dev_unlock(hdev);
370 }
371
372 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
373 {
374 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
375 __u16 setting;
376
377 BT_DBG("%s status 0x%x", hdev->name, rp->status);
378
379 if (rp->status)
380 return;
381
382 setting = __le16_to_cpu(rp->voice_setting);
383
384 if (hdev->voice_setting == setting)
385 return;
386
387 hdev->voice_setting = setting;
388
389 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
390
391 if (hdev->notify)
392 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
393 }
394
395 static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
396 {
397 __u8 status = *((__u8 *) skb->data);
398 __u16 setting;
399 void *sent;
400
401 BT_DBG("%s status 0x%x", hdev->name, status);
402
403 if (status)
404 return;
405
406 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
407 if (!sent)
408 return;
409
410 setting = get_unaligned_le16(sent);
411
412 if (hdev->voice_setting == setting)
413 return;
414
415 hdev->voice_setting = setting;
416
417 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
418
419 if (hdev->notify)
420 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
421 }
422
423 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
424 {
425 __u8 status = *((__u8 *) skb->data);
426
427 BT_DBG("%s status 0x%x", hdev->name, status);
428
429 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
430 }
431
432 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
433 {
434 __u8 status = *((__u8 *) skb->data);
435 void *sent;
436
437 BT_DBG("%s status 0x%x", hdev->name, status);
438
439 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
440 if (!sent)
441 return;
442
443 if (test_bit(HCI_MGMT, &hdev->dev_flags))
444 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
445 else if (!status) {
446 if (*((u8 *) sent))
447 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
448 else
449 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
450 }
451 }
452
453 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
454 {
455 if (hdev->features[6] & LMP_EXT_INQ)
456 return 2;
457
458 if (hdev->features[3] & LMP_RSSI_INQ)
459 return 1;
460
461 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
462 hdev->lmp_subver == 0x0757)
463 return 1;
464
465 if (hdev->manufacturer == 15) {
466 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
467 return 1;
468 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
469 return 1;
470 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
471 return 1;
472 }
473
474 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
475 hdev->lmp_subver == 0x1805)
476 return 1;
477
478 return 0;
479 }
480
481 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
482 {
483 u8 mode;
484
485 mode = hci_get_inquiry_mode(hdev);
486
487 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
488 }
489
490 static void hci_setup_event_mask(struct hci_dev *hdev)
491 {
492 /* The second byte is 0xff instead of 0x9f (two reserved bits
493 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
494 * command otherwise */
495 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
496
497 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
498 * any event mask for pre 1.2 devices */
499 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
500 return;
501
502 events[4] |= 0x01; /* Flow Specification Complete */
503 events[4] |= 0x02; /* Inquiry Result with RSSI */
504 events[4] |= 0x04; /* Read Remote Extended Features Complete */
505 events[5] |= 0x08; /* Synchronous Connection Complete */
506 events[5] |= 0x10; /* Synchronous Connection Changed */
507
508 if (hdev->features[3] & LMP_RSSI_INQ)
509 events[4] |= 0x04; /* Inquiry Result with RSSI */
510
511 if (hdev->features[5] & LMP_SNIFF_SUBR)
512 events[5] |= 0x20; /* Sniff Subrating */
513
514 if (hdev->features[5] & LMP_PAUSE_ENC)
515 events[5] |= 0x80; /* Encryption Key Refresh Complete */
516
517 if (hdev->features[6] & LMP_EXT_INQ)
518 events[5] |= 0x40; /* Extended Inquiry Result */
519
520 if (hdev->features[6] & LMP_NO_FLUSH)
521 events[7] |= 0x01; /* Enhanced Flush Complete */
522
523 if (hdev->features[7] & LMP_LSTO)
524 events[6] |= 0x80; /* Link Supervision Timeout Changed */
525
526 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
527 events[6] |= 0x01; /* IO Capability Request */
528 events[6] |= 0x02; /* IO Capability Response */
529 events[6] |= 0x04; /* User Confirmation Request */
530 events[6] |= 0x08; /* User Passkey Request */
531 events[6] |= 0x10; /* Remote OOB Data Request */
532 events[6] |= 0x20; /* Simple Pairing Complete */
533 events[7] |= 0x04; /* User Passkey Notification */
534 events[7] |= 0x08; /* Keypress Notification */
535 events[7] |= 0x10; /* Remote Host Supported
536 * Features Notification */
537 }
538
539 if (hdev->features[4] & LMP_LE)
540 events[7] |= 0x20; /* LE Meta-Event */
541
542 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
543 }
544
545 static void hci_set_le_support(struct hci_dev *hdev)
546 {
547 struct hci_cp_write_le_host_supported cp;
548
549 memset(&cp, 0, sizeof(cp));
550
551 if (enable_le && test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
552 cp.le = 1;
553 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
554 }
555
556 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
557 }
558
559 static void hci_setup(struct hci_dev *hdev)
560 {
561 if (hdev->dev_type != HCI_BREDR)
562 return;
563
564 hci_setup_event_mask(hdev);
565
566 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
567 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
568
569 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
570 test_bit(HCI_MGMT, &hdev->dev_flags)) {
571 struct hci_cp_write_local_name cp;
572
573 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
574 hci_send_cmd(hdev, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
575 }
576
577 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
578 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
579 u8 mode = 0x01;
580 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
581 sizeof(mode), &mode);
582 } else {
583 struct hci_cp_write_eir cp;
584
585 memset(hdev->eir, 0, sizeof(hdev->eir));
586 memset(&cp, 0, sizeof(cp));
587
588 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
589 }
590 }
591
592 if (hdev->features[3] & LMP_RSSI_INQ)
593 hci_setup_inquiry_mode(hdev);
594
595 if (hdev->features[7] & LMP_INQ_TX_PWR)
596 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
597
598 if (hdev->features[7] & LMP_EXTFEATURES) {
599 struct hci_cp_read_local_ext_features cp;
600
601 cp.page = 0x01;
602 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
603 sizeof(cp), &cp);
604 }
605
606 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
607 u8 enable = 1;
608 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE,
609 sizeof(enable), &enable);
610 }
611
612 if (hdev->features[4] & LMP_LE)
613 hci_set_le_support(hdev);
614 }
615
616 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
617 {
618 struct hci_rp_read_local_version *rp = (void *) skb->data;
619
620 BT_DBG("%s status 0x%x", hdev->name, rp->status);
621
622 if (rp->status)
623 goto done;
624
625 hdev->hci_ver = rp->hci_ver;
626 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
627 hdev->lmp_ver = rp->lmp_ver;
628 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
629 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
630
631 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
632 hdev->manufacturer,
633 hdev->hci_ver, hdev->hci_rev);
634
635 if (test_bit(HCI_INIT, &hdev->flags))
636 hci_setup(hdev);
637
638 done:
639 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
640 }
641
642 static void hci_setup_link_policy(struct hci_dev *hdev)
643 {
644 u16 link_policy = 0;
645
646 if (hdev->features[0] & LMP_RSWITCH)
647 link_policy |= HCI_LP_RSWITCH;
648 if (hdev->features[0] & LMP_HOLD)
649 link_policy |= HCI_LP_HOLD;
650 if (hdev->features[0] & LMP_SNIFF)
651 link_policy |= HCI_LP_SNIFF;
652 if (hdev->features[1] & LMP_PARK)
653 link_policy |= HCI_LP_PARK;
654
655 link_policy = cpu_to_le16(link_policy);
656 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
657 sizeof(link_policy), &link_policy);
658 }
659
660 static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
661 {
662 struct hci_rp_read_local_commands *rp = (void *) skb->data;
663
664 BT_DBG("%s status 0x%x", hdev->name, rp->status);
665
666 if (rp->status)
667 goto done;
668
669 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
670
671 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
672 hci_setup_link_policy(hdev);
673
674 done:
675 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
676 }
677
678 static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
679 {
680 struct hci_rp_read_local_features *rp = (void *) skb->data;
681
682 BT_DBG("%s status 0x%x", hdev->name, rp->status);
683
684 if (rp->status)
685 return;
686
687 memcpy(hdev->features, rp->features, 8);
688
689 /* Adjust default settings according to features
690 * supported by device. */
691
692 if (hdev->features[0] & LMP_3SLOT)
693 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
694
695 if (hdev->features[0] & LMP_5SLOT)
696 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
697
698 if (hdev->features[1] & LMP_HV2) {
699 hdev->pkt_type |= (HCI_HV2);
700 hdev->esco_type |= (ESCO_HV2);
701 }
702
703 if (hdev->features[1] & LMP_HV3) {
704 hdev->pkt_type |= (HCI_HV3);
705 hdev->esco_type |= (ESCO_HV3);
706 }
707
708 if (hdev->features[3] & LMP_ESCO)
709 hdev->esco_type |= (ESCO_EV3);
710
711 if (hdev->features[4] & LMP_EV4)
712 hdev->esco_type |= (ESCO_EV4);
713
714 if (hdev->features[4] & LMP_EV5)
715 hdev->esco_type |= (ESCO_EV5);
716
717 if (hdev->features[5] & LMP_EDR_ESCO_2M)
718 hdev->esco_type |= (ESCO_2EV3);
719
720 if (hdev->features[5] & LMP_EDR_ESCO_3M)
721 hdev->esco_type |= (ESCO_3EV3);
722
723 if (hdev->features[5] & LMP_EDR_3S_ESCO)
724 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
725
726 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
727 hdev->features[0], hdev->features[1],
728 hdev->features[2], hdev->features[3],
729 hdev->features[4], hdev->features[5],
730 hdev->features[6], hdev->features[7]);
731 }
732
733 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
734 struct sk_buff *skb)
735 {
736 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
737
738 BT_DBG("%s status 0x%x", hdev->name, rp->status);
739
740 if (rp->status)
741 return;
742
743 switch (rp->page) {
744 case 0:
745 memcpy(hdev->features, rp->features, 8);
746 break;
747 case 1:
748 memcpy(hdev->host_features, rp->features, 8);
749 break;
750 }
751
752 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
753 }
754
755 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
756 struct sk_buff *skb)
757 {
758 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
759
760 BT_DBG("%s status 0x%x", hdev->name, rp->status);
761
762 if (rp->status)
763 return;
764
765 hdev->flow_ctl_mode = rp->mode;
766
767 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
768 }
769
770 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
771 {
772 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
773
774 BT_DBG("%s status 0x%x", hdev->name, rp->status);
775
776 if (rp->status)
777 return;
778
779 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
780 hdev->sco_mtu = rp->sco_mtu;
781 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
782 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
783
784 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
785 hdev->sco_mtu = 64;
786 hdev->sco_pkts = 8;
787 }
788
789 hdev->acl_cnt = hdev->acl_pkts;
790 hdev->sco_cnt = hdev->sco_pkts;
791
792 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
793 hdev->acl_mtu, hdev->acl_pkts,
794 hdev->sco_mtu, hdev->sco_pkts);
795 }
796
797 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
798 {
799 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
800
801 BT_DBG("%s status 0x%x", hdev->name, rp->status);
802
803 if (!rp->status)
804 bacpy(&hdev->bdaddr, &rp->bdaddr);
805
806 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
807 }
808
809 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
810 struct sk_buff *skb)
811 {
812 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
813
814 BT_DBG("%s status 0x%x", hdev->name, rp->status);
815
816 if (rp->status)
817 return;
818
819 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
820 hdev->block_len = __le16_to_cpu(rp->block_len);
821 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
822
823 hdev->block_cnt = hdev->num_blocks;
824
825 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
826 hdev->block_cnt, hdev->block_len);
827
828 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
829 }
830
831 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
832 {
833 __u8 status = *((__u8 *) skb->data);
834
835 BT_DBG("%s status 0x%x", hdev->name, status);
836
837 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
838 }
839
840 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
841 struct sk_buff *skb)
842 {
843 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
844
845 BT_DBG("%s status 0x%x", hdev->name, rp->status);
846
847 if (rp->status)
848 return;
849
850 hdev->amp_status = rp->amp_status;
851 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
852 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
853 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
854 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
855 hdev->amp_type = rp->amp_type;
856 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
857 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
858 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
859 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
860
861 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
862 }
863
864 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
865 struct sk_buff *skb)
866 {
867 __u8 status = *((__u8 *) skb->data);
868
869 BT_DBG("%s status 0x%x", hdev->name, status);
870
871 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
872 }
873
874 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
875 {
876 __u8 status = *((__u8 *) skb->data);
877
878 BT_DBG("%s status 0x%x", hdev->name, status);
879
880 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
881 }
882
883 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
884 struct sk_buff *skb)
885 {
886 __u8 status = *((__u8 *) skb->data);
887
888 BT_DBG("%s status 0x%x", hdev->name, status);
889
890 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
891 }
892
893 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
894 struct sk_buff *skb)
895 {
896 __u8 status = *((__u8 *) skb->data);
897
898 BT_DBG("%s status 0x%x", hdev->name, status);
899
900 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
901 }
902
903 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
904 {
905 __u8 status = *((__u8 *) skb->data);
906
907 BT_DBG("%s status 0x%x", hdev->name, status);
908
909 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
910 }
911
912 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
913 {
914 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
915 struct hci_cp_pin_code_reply *cp;
916 struct hci_conn *conn;
917
918 BT_DBG("%s status 0x%x", hdev->name, rp->status);
919
920 hci_dev_lock(hdev);
921
922 if (test_bit(HCI_MGMT, &hdev->dev_flags))
923 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
924
925 if (rp->status != 0)
926 goto unlock;
927
928 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
929 if (!cp)
930 goto unlock;
931
932 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
933 if (conn)
934 conn->pin_length = cp->pin_len;
935
936 unlock:
937 hci_dev_unlock(hdev);
938 }
939
940 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
941 {
942 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
943
944 BT_DBG("%s status 0x%x", hdev->name, rp->status);
945
946 hci_dev_lock(hdev);
947
948 if (test_bit(HCI_MGMT, &hdev->dev_flags))
949 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
950 rp->status);
951
952 hci_dev_unlock(hdev);
953 }
954
955 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
956 struct sk_buff *skb)
957 {
958 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
959
960 BT_DBG("%s status 0x%x", hdev->name, rp->status);
961
962 if (rp->status)
963 return;
964
965 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
966 hdev->le_pkts = rp->le_max_pkt;
967
968 hdev->le_cnt = hdev->le_pkts;
969
970 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
971
972 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
973 }
974
975 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
976 {
977 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
978
979 BT_DBG("%s status 0x%x", hdev->name, rp->status);
980
981 hci_dev_lock(hdev);
982
983 if (test_bit(HCI_MGMT, &hdev->dev_flags))
984 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
985 0, rp->status);
986
987 hci_dev_unlock(hdev);
988 }
989
990 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
991 struct sk_buff *skb)
992 {
993 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
994
995 BT_DBG("%s status 0x%x", hdev->name, rp->status);
996
997 hci_dev_lock(hdev);
998
999 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1000 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1001 ACL_LINK, 0,
1002 rp->status);
1003
1004 hci_dev_unlock(hdev);
1005 }
1006
1007 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1008 {
1009 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1010
1011 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1012
1013 hci_dev_lock(hdev);
1014
1015 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1016 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1017 0, rp->status);
1018
1019 hci_dev_unlock(hdev);
1020 }
1021
1022 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1023 struct sk_buff *skb)
1024 {
1025 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1026
1027 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1028
1029 hci_dev_lock(hdev);
1030
1031 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1032 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1033 ACL_LINK, 0,
1034 rp->status);
1035
1036 hci_dev_unlock(hdev);
1037 }
1038
1039 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1040 struct sk_buff *skb)
1041 {
1042 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1043
1044 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1045
1046 hci_dev_lock(hdev);
1047 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1048 rp->randomizer, rp->status);
1049 hci_dev_unlock(hdev);
1050 }
1051
1052 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1053 {
1054 __u8 status = *((__u8 *) skb->data);
1055
1056 BT_DBG("%s status 0x%x", hdev->name, status);
1057
1058 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1059
1060 if (status) {
1061 hci_dev_lock(hdev);
1062 mgmt_start_discovery_failed(hdev, status);
1063 hci_dev_unlock(hdev);
1064 return;
1065 }
1066 }
1067
1068 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1069 struct sk_buff *skb)
1070 {
1071 struct hci_cp_le_set_scan_enable *cp;
1072 __u8 status = *((__u8 *) skb->data);
1073
1074 BT_DBG("%s status 0x%x", hdev->name, status);
1075
1076 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1077 if (!cp)
1078 return;
1079
1080 switch (cp->enable) {
1081 case LE_SCANNING_ENABLED:
1082 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1083
1084 if (status) {
1085 hci_dev_lock(hdev);
1086 mgmt_start_discovery_failed(hdev, status);
1087 hci_dev_unlock(hdev);
1088 return;
1089 }
1090
1091 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1092
1093 cancel_delayed_work_sync(&hdev->adv_work);
1094
1095 hci_dev_lock(hdev);
1096 hci_adv_entries_clear(hdev);
1097 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1098 hci_dev_unlock(hdev);
1099 break;
1100
1101 case LE_SCANNING_DISABLED:
1102 if (status)
1103 return;
1104
1105 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1106
1107 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1108
1109 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED) {
1110 mgmt_interleaved_discovery(hdev);
1111 } else {
1112 hci_dev_lock(hdev);
1113 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1114 hci_dev_unlock(hdev);
1115 }
1116
1117 break;
1118
1119 default:
1120 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1121 break;
1122 }
1123 }
1124
1125 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1126 {
1127 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1128
1129 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1130
1131 if (rp->status)
1132 return;
1133
1134 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1135 }
1136
1137 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1138 {
1139 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1140
1141 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1142
1143 if (rp->status)
1144 return;
1145
1146 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1147 }
1148
1149 static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1150 struct sk_buff *skb)
1151 {
1152 struct hci_cp_read_local_ext_features cp;
1153 struct hci_cp_write_le_host_supported *sent;
1154 __u8 status = *((__u8 *) skb->data);
1155
1156 BT_DBG("%s status 0x%x", hdev->name, status);
1157
1158 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1159 if (sent && test_bit(HCI_MGMT, &hdev->dev_flags))
1160 mgmt_le_enable_complete(hdev, sent->le, status);
1161
1162 if (status)
1163 return;
1164
1165 cp.page = 0x01;
1166 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1167 }
1168
1169 static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1170 {
1171 BT_DBG("%s status 0x%x", hdev->name, status);
1172
1173 if (status) {
1174 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1175 hci_conn_check_pending(hdev);
1176 hci_dev_lock(hdev);
1177 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1178 mgmt_start_discovery_failed(hdev, status);
1179 hci_dev_unlock(hdev);
1180 return;
1181 }
1182
1183 set_bit(HCI_INQUIRY, &hdev->flags);
1184
1185 hci_dev_lock(hdev);
1186 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1187 hci_dev_unlock(hdev);
1188 }
1189
1190 static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1191 {
1192 struct hci_cp_create_conn *cp;
1193 struct hci_conn *conn;
1194
1195 BT_DBG("%s status 0x%x", hdev->name, status);
1196
1197 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1198 if (!cp)
1199 return;
1200
1201 hci_dev_lock(hdev);
1202
1203 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1204
1205 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1206
1207 if (status) {
1208 if (conn && conn->state == BT_CONNECT) {
1209 if (status != 0x0c || conn->attempt > 2) {
1210 conn->state = BT_CLOSED;
1211 hci_proto_connect_cfm(conn, status);
1212 hci_conn_del(conn);
1213 } else
1214 conn->state = BT_CONNECT2;
1215 }
1216 } else {
1217 if (!conn) {
1218 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1219 if (conn) {
1220 conn->out = true;
1221 conn->link_mode |= HCI_LM_MASTER;
1222 } else
1223 BT_ERR("No memory for new connection");
1224 }
1225 }
1226
1227 hci_dev_unlock(hdev);
1228 }
1229
1230 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1231 {
1232 struct hci_cp_add_sco *cp;
1233 struct hci_conn *acl, *sco;
1234 __u16 handle;
1235
1236 BT_DBG("%s status 0x%x", hdev->name, status);
1237
1238 if (!status)
1239 return;
1240
1241 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1242 if (!cp)
1243 return;
1244
1245 handle = __le16_to_cpu(cp->handle);
1246
1247 BT_DBG("%s handle %d", hdev->name, handle);
1248
1249 hci_dev_lock(hdev);
1250
1251 acl = hci_conn_hash_lookup_handle(hdev, handle);
1252 if (acl) {
1253 sco = acl->link;
1254 if (sco) {
1255 sco->state = BT_CLOSED;
1256
1257 hci_proto_connect_cfm(sco, status);
1258 hci_conn_del(sco);
1259 }
1260 }
1261
1262 hci_dev_unlock(hdev);
1263 }
1264
1265 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1266 {
1267 struct hci_cp_auth_requested *cp;
1268 struct hci_conn *conn;
1269
1270 BT_DBG("%s status 0x%x", hdev->name, status);
1271
1272 if (!status)
1273 return;
1274
1275 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1276 if (!cp)
1277 return;
1278
1279 hci_dev_lock(hdev);
1280
1281 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1282 if (conn) {
1283 if (conn->state == BT_CONFIG) {
1284 hci_proto_connect_cfm(conn, status);
1285 hci_conn_put(conn);
1286 }
1287 }
1288
1289 hci_dev_unlock(hdev);
1290 }
1291
1292 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1293 {
1294 struct hci_cp_set_conn_encrypt *cp;
1295 struct hci_conn *conn;
1296
1297 BT_DBG("%s status 0x%x", hdev->name, status);
1298
1299 if (!status)
1300 return;
1301
1302 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1303 if (!cp)
1304 return;
1305
1306 hci_dev_lock(hdev);
1307
1308 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1309 if (conn) {
1310 if (conn->state == BT_CONFIG) {
1311 hci_proto_connect_cfm(conn, status);
1312 hci_conn_put(conn);
1313 }
1314 }
1315
1316 hci_dev_unlock(hdev);
1317 }
1318
1319 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1320 struct hci_conn *conn)
1321 {
1322 if (conn->state != BT_CONFIG || !conn->out)
1323 return 0;
1324
1325 if (conn->pending_sec_level == BT_SECURITY_SDP)
1326 return 0;
1327
1328 /* Only request authentication for SSP connections or non-SSP
1329 * devices with sec_level HIGH or if MITM protection is requested */
1330 if (!hci_conn_ssp_enabled(conn) &&
1331 conn->pending_sec_level != BT_SECURITY_HIGH &&
1332 !(conn->auth_type & 0x01))
1333 return 0;
1334
1335 return 1;
1336 }
1337
1338 static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e)
1339 {
1340 struct hci_cp_remote_name_req cp;
1341
1342 memset(&cp, 0, sizeof(cp));
1343
1344 bacpy(&cp.bdaddr, &e->data.bdaddr);
1345 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1346 cp.pscan_mode = e->data.pscan_mode;
1347 cp.clock_offset = e->data.clock_offset;
1348
1349 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1350 }
1351
1352 static bool hci_resolve_next_name(struct hci_dev *hdev)
1353 {
1354 struct discovery_state *discov = &hdev->discovery;
1355 struct inquiry_entry *e;
1356
1357 if (list_empty(&discov->resolve))
1358 return false;
1359
1360 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1361 if (hci_resolve_name(hdev, e) == 0) {
1362 e->name_state = NAME_PENDING;
1363 return true;
1364 }
1365
1366 return false;
1367 }
1368
1369 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1370 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1371 {
1372 struct discovery_state *discov = &hdev->discovery;
1373 struct inquiry_entry *e;
1374
1375 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1376 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0,
1377 name, name_len, conn->dev_class);
1378
1379 if (discov->state == DISCOVERY_STOPPED)
1380 return;
1381
1382 if (discov->state == DISCOVERY_STOPPING)
1383 goto discov_complete;
1384
1385 if (discov->state != DISCOVERY_RESOLVING)
1386 return;
1387
1388 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1389 if (e) {
1390 e->name_state = NAME_KNOWN;
1391 list_del(&e->list);
1392 if (name)
1393 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1394 e->data.rssi, name, name_len);
1395 }
1396
1397 if (hci_resolve_next_name(hdev))
1398 return;
1399
1400 discov_complete:
1401 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1402 }
1403
1404 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1405 {
1406 struct hci_cp_remote_name_req *cp;
1407 struct hci_conn *conn;
1408
1409 BT_DBG("%s status 0x%x", hdev->name, status);
1410
1411 /* If successful wait for the name req complete event before
1412 * checking for the need to do authentication */
1413 if (!status)
1414 return;
1415
1416 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1417 if (!cp)
1418 return;
1419
1420 hci_dev_lock(hdev);
1421
1422 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1423
1424 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1425 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1426
1427 if (!conn)
1428 goto unlock;
1429
1430 if (!hci_outgoing_auth_needed(hdev, conn))
1431 goto unlock;
1432
1433 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1434 struct hci_cp_auth_requested cp;
1435 cp.handle = __cpu_to_le16(conn->handle);
1436 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1437 }
1438
1439 unlock:
1440 hci_dev_unlock(hdev);
1441 }
1442
1443 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1444 {
1445 struct hci_cp_read_remote_features *cp;
1446 struct hci_conn *conn;
1447
1448 BT_DBG("%s status 0x%x", hdev->name, status);
1449
1450 if (!status)
1451 return;
1452
1453 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1454 if (!cp)
1455 return;
1456
1457 hci_dev_lock(hdev);
1458
1459 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1460 if (conn) {
1461 if (conn->state == BT_CONFIG) {
1462 hci_proto_connect_cfm(conn, status);
1463 hci_conn_put(conn);
1464 }
1465 }
1466
1467 hci_dev_unlock(hdev);
1468 }
1469
1470 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1471 {
1472 struct hci_cp_read_remote_ext_features *cp;
1473 struct hci_conn *conn;
1474
1475 BT_DBG("%s status 0x%x", hdev->name, status);
1476
1477 if (!status)
1478 return;
1479
1480 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1481 if (!cp)
1482 return;
1483
1484 hci_dev_lock(hdev);
1485
1486 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1487 if (conn) {
1488 if (conn->state == BT_CONFIG) {
1489 hci_proto_connect_cfm(conn, status);
1490 hci_conn_put(conn);
1491 }
1492 }
1493
1494 hci_dev_unlock(hdev);
1495 }
1496
1497 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1498 {
1499 struct hci_cp_setup_sync_conn *cp;
1500 struct hci_conn *acl, *sco;
1501 __u16 handle;
1502
1503 BT_DBG("%s status 0x%x", hdev->name, status);
1504
1505 if (!status)
1506 return;
1507
1508 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1509 if (!cp)
1510 return;
1511
1512 handle = __le16_to_cpu(cp->handle);
1513
1514 BT_DBG("%s handle %d", hdev->name, handle);
1515
1516 hci_dev_lock(hdev);
1517
1518 acl = hci_conn_hash_lookup_handle(hdev, handle);
1519 if (acl) {
1520 sco = acl->link;
1521 if (sco) {
1522 sco->state = BT_CLOSED;
1523
1524 hci_proto_connect_cfm(sco, status);
1525 hci_conn_del(sco);
1526 }
1527 }
1528
1529 hci_dev_unlock(hdev);
1530 }
1531
1532 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1533 {
1534 struct hci_cp_sniff_mode *cp;
1535 struct hci_conn *conn;
1536
1537 BT_DBG("%s status 0x%x", hdev->name, status);
1538
1539 if (!status)
1540 return;
1541
1542 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1543 if (!cp)
1544 return;
1545
1546 hci_dev_lock(hdev);
1547
1548 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1549 if (conn) {
1550 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1551
1552 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1553 hci_sco_setup(conn, status);
1554 }
1555
1556 hci_dev_unlock(hdev);
1557 }
1558
1559 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1560 {
1561 struct hci_cp_exit_sniff_mode *cp;
1562 struct hci_conn *conn;
1563
1564 BT_DBG("%s status 0x%x", hdev->name, status);
1565
1566 if (!status)
1567 return;
1568
1569 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1570 if (!cp)
1571 return;
1572
1573 hci_dev_lock(hdev);
1574
1575 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1576 if (conn) {
1577 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1578
1579 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1580 hci_sco_setup(conn, status);
1581 }
1582
1583 hci_dev_unlock(hdev);
1584 }
1585
1586 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1587 {
1588 struct hci_cp_disconnect *cp;
1589 struct hci_conn *conn;
1590
1591 if (!status)
1592 return;
1593
1594 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1595 if (!cp)
1596 return;
1597
1598 hci_dev_lock(hdev);
1599
1600 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1601 if (conn)
1602 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1603 conn->dst_type, status);
1604
1605 hci_dev_unlock(hdev);
1606 }
1607
1608 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1609 {
1610 struct hci_cp_le_create_conn *cp;
1611 struct hci_conn *conn;
1612
1613 BT_DBG("%s status 0x%x", hdev->name, status);
1614
1615 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1616 if (!cp)
1617 return;
1618
1619 hci_dev_lock(hdev);
1620
1621 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1622
1623 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1624 conn);
1625
1626 if (status) {
1627 if (conn && conn->state == BT_CONNECT) {
1628 conn->state = BT_CLOSED;
1629 hci_proto_connect_cfm(conn, status);
1630 hci_conn_del(conn);
1631 }
1632 } else {
1633 if (!conn) {
1634 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1635 if (conn) {
1636 conn->dst_type = cp->peer_addr_type;
1637 conn->out = true;
1638 } else {
1639 BT_ERR("No memory for new connection");
1640 }
1641 }
1642 }
1643
1644 hci_dev_unlock(hdev);
1645 }
1646
1647 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1648 {
1649 BT_DBG("%s status 0x%x", hdev->name, status);
1650 }
1651
1652 static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1653 {
1654 __u8 status = *((__u8 *) skb->data);
1655 struct discovery_state *discov = &hdev->discovery;
1656 struct inquiry_entry *e;
1657
1658 BT_DBG("%s status %d", hdev->name, status);
1659
1660 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1661
1662 hci_conn_check_pending(hdev);
1663
1664 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1665 return;
1666
1667 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1668 return;
1669
1670 hci_dev_lock(hdev);
1671
1672 if (discov->state != DISCOVERY_FINDING)
1673 goto unlock;
1674
1675 if (list_empty(&discov->resolve)) {
1676 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1677 goto unlock;
1678 }
1679
1680 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1681 if (e && hci_resolve_name(hdev, e) == 0) {
1682 e->name_state = NAME_PENDING;
1683 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1684 } else {
1685 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1686 }
1687
1688 unlock:
1689 hci_dev_unlock(hdev);
1690 }
1691
1692 static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1693 {
1694 struct inquiry_data data;
1695 struct inquiry_info *info = (void *) (skb->data + 1);
1696 int num_rsp = *((__u8 *) skb->data);
1697
1698 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1699
1700 if (!num_rsp)
1701 return;
1702
1703 hci_dev_lock(hdev);
1704
1705 for (; num_rsp; num_rsp--, info++) {
1706 bool name_known, ssp;
1707
1708 bacpy(&data.bdaddr, &info->bdaddr);
1709 data.pscan_rep_mode = info->pscan_rep_mode;
1710 data.pscan_period_mode = info->pscan_period_mode;
1711 data.pscan_mode = info->pscan_mode;
1712 memcpy(data.dev_class, info->dev_class, 3);
1713 data.clock_offset = info->clock_offset;
1714 data.rssi = 0x00;
1715 data.ssp_mode = 0x00;
1716
1717 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1718 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1719 info->dev_class, 0, !name_known, ssp,
1720 NULL, 0);
1721 }
1722
1723 hci_dev_unlock(hdev);
1724 }
1725
1726 static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1727 {
1728 struct hci_ev_conn_complete *ev = (void *) skb->data;
1729 struct hci_conn *conn;
1730
1731 BT_DBG("%s", hdev->name);
1732
1733 hci_dev_lock(hdev);
1734
1735 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1736 if (!conn) {
1737 if (ev->link_type != SCO_LINK)
1738 goto unlock;
1739
1740 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1741 if (!conn)
1742 goto unlock;
1743
1744 conn->type = SCO_LINK;
1745 }
1746
1747 if (!ev->status) {
1748 conn->handle = __le16_to_cpu(ev->handle);
1749
1750 if (conn->type == ACL_LINK) {
1751 conn->state = BT_CONFIG;
1752 hci_conn_hold(conn);
1753 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1754 } else
1755 conn->state = BT_CONNECTED;
1756
1757 hci_conn_hold_device(conn);
1758 hci_conn_add_sysfs(conn);
1759
1760 if (test_bit(HCI_AUTH, &hdev->flags))
1761 conn->link_mode |= HCI_LM_AUTH;
1762
1763 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1764 conn->link_mode |= HCI_LM_ENCRYPT;
1765
1766 /* Get remote features */
1767 if (conn->type == ACL_LINK) {
1768 struct hci_cp_read_remote_features cp;
1769 cp.handle = ev->handle;
1770 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1771 sizeof(cp), &cp);
1772 }
1773
1774 /* Set packet type for incoming connection */
1775 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1776 struct hci_cp_change_conn_ptype cp;
1777 cp.handle = ev->handle;
1778 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1779 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1780 sizeof(cp), &cp);
1781 }
1782 } else {
1783 conn->state = BT_CLOSED;
1784 if (conn->type == ACL_LINK)
1785 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1786 conn->dst_type, ev->status);
1787 }
1788
1789 if (conn->type == ACL_LINK)
1790 hci_sco_setup(conn, ev->status);
1791
1792 if (ev->status) {
1793 hci_proto_connect_cfm(conn, ev->status);
1794 hci_conn_del(conn);
1795 } else if (ev->link_type != ACL_LINK)
1796 hci_proto_connect_cfm(conn, ev->status);
1797
1798 unlock:
1799 hci_dev_unlock(hdev);
1800
1801 hci_conn_check_pending(hdev);
1802 }
1803
1804 static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1805 {
1806 struct hci_ev_conn_request *ev = (void *) skb->data;
1807 int mask = hdev->link_mode;
1808
1809 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1810 batostr(&ev->bdaddr), ev->link_type);
1811
1812 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1813
1814 if ((mask & HCI_LM_ACCEPT) &&
1815 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1816 /* Connection accepted */
1817 struct inquiry_entry *ie;
1818 struct hci_conn *conn;
1819
1820 hci_dev_lock(hdev);
1821
1822 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1823 if (ie)
1824 memcpy(ie->data.dev_class, ev->dev_class, 3);
1825
1826 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1827 if (!conn) {
1828 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1829 if (!conn) {
1830 BT_ERR("No memory for new connection");
1831 hci_dev_unlock(hdev);
1832 return;
1833 }
1834 }
1835
1836 memcpy(conn->dev_class, ev->dev_class, 3);
1837 conn->state = BT_CONNECT;
1838
1839 hci_dev_unlock(hdev);
1840
1841 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1842 struct hci_cp_accept_conn_req cp;
1843
1844 bacpy(&cp.bdaddr, &ev->bdaddr);
1845
1846 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1847 cp.role = 0x00; /* Become master */
1848 else
1849 cp.role = 0x01; /* Remain slave */
1850
1851 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1852 sizeof(cp), &cp);
1853 } else {
1854 struct hci_cp_accept_sync_conn_req cp;
1855
1856 bacpy(&cp.bdaddr, &ev->bdaddr);
1857 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1858
1859 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1860 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1861 cp.max_latency = cpu_to_le16(0xffff);
1862 cp.content_format = cpu_to_le16(hdev->voice_setting);
1863 cp.retrans_effort = 0xff;
1864
1865 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1866 sizeof(cp), &cp);
1867 }
1868 } else {
1869 /* Connection rejected */
1870 struct hci_cp_reject_conn_req cp;
1871
1872 bacpy(&cp.bdaddr, &ev->bdaddr);
1873 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1874 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1875 }
1876 }
1877
1878 static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1879 {
1880 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1881 struct hci_conn *conn;
1882
1883 BT_DBG("%s status %d", hdev->name, ev->status);
1884
1885 hci_dev_lock(hdev);
1886
1887 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1888 if (!conn)
1889 goto unlock;
1890
1891 if (ev->status == 0)
1892 conn->state = BT_CLOSED;
1893
1894 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1895 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1896 if (ev->status != 0)
1897 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1898 conn->dst_type, ev->status);
1899 else
1900 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1901 conn->dst_type);
1902 }
1903
1904 if (ev->status == 0) {
1905 hci_proto_disconn_cfm(conn, ev->reason);
1906 hci_conn_del(conn);
1907 }
1908
1909 unlock:
1910 hci_dev_unlock(hdev);
1911 }
1912
1913 static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1914 {
1915 struct hci_ev_auth_complete *ev = (void *) skb->data;
1916 struct hci_conn *conn;
1917
1918 BT_DBG("%s status %d", hdev->name, ev->status);
1919
1920 hci_dev_lock(hdev);
1921
1922 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1923 if (!conn)
1924 goto unlock;
1925
1926 if (!ev->status) {
1927 if (!hci_conn_ssp_enabled(conn) &&
1928 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1929 BT_INFO("re-auth of legacy device is not possible.");
1930 } else {
1931 conn->link_mode |= HCI_LM_AUTH;
1932 conn->sec_level = conn->pending_sec_level;
1933 }
1934 } else {
1935 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1936 ev->status);
1937 }
1938
1939 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1940 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1941
1942 if (conn->state == BT_CONFIG) {
1943 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1944 struct hci_cp_set_conn_encrypt cp;
1945 cp.handle = ev->handle;
1946 cp.encrypt = 0x01;
1947 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1948 &cp);
1949 } else {
1950 conn->state = BT_CONNECTED;
1951 hci_proto_connect_cfm(conn, ev->status);
1952 hci_conn_put(conn);
1953 }
1954 } else {
1955 hci_auth_cfm(conn, ev->status);
1956
1957 hci_conn_hold(conn);
1958 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1959 hci_conn_put(conn);
1960 }
1961
1962 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1963 if (!ev->status) {
1964 struct hci_cp_set_conn_encrypt cp;
1965 cp.handle = ev->handle;
1966 cp.encrypt = 0x01;
1967 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1968 &cp);
1969 } else {
1970 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1971 hci_encrypt_cfm(conn, ev->status, 0x00);
1972 }
1973 }
1974
1975 unlock:
1976 hci_dev_unlock(hdev);
1977 }
1978
1979 static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1980 {
1981 struct hci_ev_remote_name *ev = (void *) skb->data;
1982 struct hci_conn *conn;
1983
1984 BT_DBG("%s", hdev->name);
1985
1986 hci_conn_check_pending(hdev);
1987
1988 hci_dev_lock(hdev);
1989
1990 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1991
1992 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1993 goto check_auth;
1994
1995 if (ev->status == 0)
1996 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1997 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1998 else
1999 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2000
2001 check_auth:
2002 if (!conn)
2003 goto unlock;
2004
2005 if (!hci_outgoing_auth_needed(hdev, conn))
2006 goto unlock;
2007
2008 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2009 struct hci_cp_auth_requested cp;
2010 cp.handle = __cpu_to_le16(conn->handle);
2011 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2012 }
2013
2014 unlock:
2015 hci_dev_unlock(hdev);
2016 }
2017
2018 static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2019 {
2020 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2021 struct hci_conn *conn;
2022
2023 BT_DBG("%s status %d", hdev->name, ev->status);
2024
2025 hci_dev_lock(hdev);
2026
2027 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2028 if (conn) {
2029 if (!ev->status) {
2030 if (ev->encrypt) {
2031 /* Encryption implies authentication */
2032 conn->link_mode |= HCI_LM_AUTH;
2033 conn->link_mode |= HCI_LM_ENCRYPT;
2034 conn->sec_level = conn->pending_sec_level;
2035 } else
2036 conn->link_mode &= ~HCI_LM_ENCRYPT;
2037 }
2038
2039 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2040
2041 if (conn->state == BT_CONFIG) {
2042 if (!ev->status)
2043 conn->state = BT_CONNECTED;
2044
2045 hci_proto_connect_cfm(conn, ev->status);
2046 hci_conn_put(conn);
2047 } else
2048 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2049 }
2050
2051 hci_dev_unlock(hdev);
2052 }
2053
2054 static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2055 {
2056 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2057 struct hci_conn *conn;
2058
2059 BT_DBG("%s status %d", hdev->name, ev->status);
2060
2061 hci_dev_lock(hdev);
2062
2063 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2064 if (conn) {
2065 if (!ev->status)
2066 conn->link_mode |= HCI_LM_SECURE;
2067
2068 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2069
2070 hci_key_change_cfm(conn, ev->status);
2071 }
2072
2073 hci_dev_unlock(hdev);
2074 }
2075
2076 static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2077 {
2078 struct hci_ev_remote_features *ev = (void *) skb->data;
2079 struct hci_conn *conn;
2080
2081 BT_DBG("%s status %d", hdev->name, ev->status);
2082
2083 hci_dev_lock(hdev);
2084
2085 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2086 if (!conn)
2087 goto unlock;
2088
2089 if (!ev->status)
2090 memcpy(conn->features, ev->features, 8);
2091
2092 if (conn->state != BT_CONFIG)
2093 goto unlock;
2094
2095 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2096 struct hci_cp_read_remote_ext_features cp;
2097 cp.handle = ev->handle;
2098 cp.page = 0x01;
2099 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2100 sizeof(cp), &cp);
2101 goto unlock;
2102 }
2103
2104 if (!ev->status) {
2105 struct hci_cp_remote_name_req cp;
2106 memset(&cp, 0, sizeof(cp));
2107 bacpy(&cp.bdaddr, &conn->dst);
2108 cp.pscan_rep_mode = 0x02;
2109 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2110 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2111 mgmt_device_connected(hdev, &conn->dst, conn->type,
2112 conn->dst_type, 0, NULL, 0,
2113 conn->dev_class);
2114
2115 if (!hci_outgoing_auth_needed(hdev, conn)) {
2116 conn->state = BT_CONNECTED;
2117 hci_proto_connect_cfm(conn, ev->status);
2118 hci_conn_put(conn);
2119 }
2120
2121 unlock:
2122 hci_dev_unlock(hdev);
2123 }
2124
2125 static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2126 {
2127 BT_DBG("%s", hdev->name);
2128 }
2129
2130 static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2131 {
2132 BT_DBG("%s", hdev->name);
2133 }
2134
2135 static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2136 {
2137 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2138 __u16 opcode;
2139
2140 skb_pull(skb, sizeof(*ev));
2141
2142 opcode = __le16_to_cpu(ev->opcode);
2143
2144 switch (opcode) {
2145 case HCI_OP_INQUIRY_CANCEL:
2146 hci_cc_inquiry_cancel(hdev, skb);
2147 break;
2148
2149 case HCI_OP_EXIT_PERIODIC_INQ:
2150 hci_cc_exit_periodic_inq(hdev, skb);
2151 break;
2152
2153 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2154 hci_cc_remote_name_req_cancel(hdev, skb);
2155 break;
2156
2157 case HCI_OP_ROLE_DISCOVERY:
2158 hci_cc_role_discovery(hdev, skb);
2159 break;
2160
2161 case HCI_OP_READ_LINK_POLICY:
2162 hci_cc_read_link_policy(hdev, skb);
2163 break;
2164
2165 case HCI_OP_WRITE_LINK_POLICY:
2166 hci_cc_write_link_policy(hdev, skb);
2167 break;
2168
2169 case HCI_OP_READ_DEF_LINK_POLICY:
2170 hci_cc_read_def_link_policy(hdev, skb);
2171 break;
2172
2173 case HCI_OP_WRITE_DEF_LINK_POLICY:
2174 hci_cc_write_def_link_policy(hdev, skb);
2175 break;
2176
2177 case HCI_OP_RESET:
2178 hci_cc_reset(hdev, skb);
2179 break;
2180
2181 case HCI_OP_WRITE_LOCAL_NAME:
2182 hci_cc_write_local_name(hdev, skb);
2183 break;
2184
2185 case HCI_OP_READ_LOCAL_NAME:
2186 hci_cc_read_local_name(hdev, skb);
2187 break;
2188
2189 case HCI_OP_WRITE_AUTH_ENABLE:
2190 hci_cc_write_auth_enable(hdev, skb);
2191 break;
2192
2193 case HCI_OP_WRITE_ENCRYPT_MODE:
2194 hci_cc_write_encrypt_mode(hdev, skb);
2195 break;
2196
2197 case HCI_OP_WRITE_SCAN_ENABLE:
2198 hci_cc_write_scan_enable(hdev, skb);
2199 break;
2200
2201 case HCI_OP_READ_CLASS_OF_DEV:
2202 hci_cc_read_class_of_dev(hdev, skb);
2203 break;
2204
2205 case HCI_OP_WRITE_CLASS_OF_DEV:
2206 hci_cc_write_class_of_dev(hdev, skb);
2207 break;
2208
2209 case HCI_OP_READ_VOICE_SETTING:
2210 hci_cc_read_voice_setting(hdev, skb);
2211 break;
2212
2213 case HCI_OP_WRITE_VOICE_SETTING:
2214 hci_cc_write_voice_setting(hdev, skb);
2215 break;
2216
2217 case HCI_OP_HOST_BUFFER_SIZE:
2218 hci_cc_host_buffer_size(hdev, skb);
2219 break;
2220
2221 case HCI_OP_WRITE_SSP_MODE:
2222 hci_cc_write_ssp_mode(hdev, skb);
2223 break;
2224
2225 case HCI_OP_READ_LOCAL_VERSION:
2226 hci_cc_read_local_version(hdev, skb);
2227 break;
2228
2229 case HCI_OP_READ_LOCAL_COMMANDS:
2230 hci_cc_read_local_commands(hdev, skb);
2231 break;
2232
2233 case HCI_OP_READ_LOCAL_FEATURES:
2234 hci_cc_read_local_features(hdev, skb);
2235 break;
2236
2237 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2238 hci_cc_read_local_ext_features(hdev, skb);
2239 break;
2240
2241 case HCI_OP_READ_BUFFER_SIZE:
2242 hci_cc_read_buffer_size(hdev, skb);
2243 break;
2244
2245 case HCI_OP_READ_BD_ADDR:
2246 hci_cc_read_bd_addr(hdev, skb);
2247 break;
2248
2249 case HCI_OP_READ_DATA_BLOCK_SIZE:
2250 hci_cc_read_data_block_size(hdev, skb);
2251 break;
2252
2253 case HCI_OP_WRITE_CA_TIMEOUT:
2254 hci_cc_write_ca_timeout(hdev, skb);
2255 break;
2256
2257 case HCI_OP_READ_FLOW_CONTROL_MODE:
2258 hci_cc_read_flow_control_mode(hdev, skb);
2259 break;
2260
2261 case HCI_OP_READ_LOCAL_AMP_INFO:
2262 hci_cc_read_local_amp_info(hdev, skb);
2263 break;
2264
2265 case HCI_OP_DELETE_STORED_LINK_KEY:
2266 hci_cc_delete_stored_link_key(hdev, skb);
2267 break;
2268
2269 case HCI_OP_SET_EVENT_MASK:
2270 hci_cc_set_event_mask(hdev, skb);
2271 break;
2272
2273 case HCI_OP_WRITE_INQUIRY_MODE:
2274 hci_cc_write_inquiry_mode(hdev, skb);
2275 break;
2276
2277 case HCI_OP_READ_INQ_RSP_TX_POWER:
2278 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2279 break;
2280
2281 case HCI_OP_SET_EVENT_FLT:
2282 hci_cc_set_event_flt(hdev, skb);
2283 break;
2284
2285 case HCI_OP_PIN_CODE_REPLY:
2286 hci_cc_pin_code_reply(hdev, skb);
2287 break;
2288
2289 case HCI_OP_PIN_CODE_NEG_REPLY:
2290 hci_cc_pin_code_neg_reply(hdev, skb);
2291 break;
2292
2293 case HCI_OP_READ_LOCAL_OOB_DATA:
2294 hci_cc_read_local_oob_data_reply(hdev, skb);
2295 break;
2296
2297 case HCI_OP_LE_READ_BUFFER_SIZE:
2298 hci_cc_le_read_buffer_size(hdev, skb);
2299 break;
2300
2301 case HCI_OP_USER_CONFIRM_REPLY:
2302 hci_cc_user_confirm_reply(hdev, skb);
2303 break;
2304
2305 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2306 hci_cc_user_confirm_neg_reply(hdev, skb);
2307 break;
2308
2309 case HCI_OP_USER_PASSKEY_REPLY:
2310 hci_cc_user_passkey_reply(hdev, skb);
2311 break;
2312
2313 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2314 hci_cc_user_passkey_neg_reply(hdev, skb);
2315
2316 case HCI_OP_LE_SET_SCAN_PARAM:
2317 hci_cc_le_set_scan_param(hdev, skb);
2318 break;
2319
2320 case HCI_OP_LE_SET_SCAN_ENABLE:
2321 hci_cc_le_set_scan_enable(hdev, skb);
2322 break;
2323
2324 case HCI_OP_LE_LTK_REPLY:
2325 hci_cc_le_ltk_reply(hdev, skb);
2326 break;
2327
2328 case HCI_OP_LE_LTK_NEG_REPLY:
2329 hci_cc_le_ltk_neg_reply(hdev, skb);
2330 break;
2331
2332 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2333 hci_cc_write_le_host_supported(hdev, skb);
2334 break;
2335
2336 default:
2337 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2338 break;
2339 }
2340
2341 if (ev->opcode != HCI_OP_NOP)
2342 del_timer(&hdev->cmd_timer);
2343
2344 if (ev->ncmd) {
2345 atomic_set(&hdev->cmd_cnt, 1);
2346 if (!skb_queue_empty(&hdev->cmd_q))
2347 queue_work(hdev->workqueue, &hdev->cmd_work);
2348 }
2349 }
2350
2351 static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2352 {
2353 struct hci_ev_cmd_status *ev = (void *) skb->data;
2354 __u16 opcode;
2355
2356 skb_pull(skb, sizeof(*ev));
2357
2358 opcode = __le16_to_cpu(ev->opcode);
2359
2360 switch (opcode) {
2361 case HCI_OP_INQUIRY:
2362 hci_cs_inquiry(hdev, ev->status);
2363 break;
2364
2365 case HCI_OP_CREATE_CONN:
2366 hci_cs_create_conn(hdev, ev->status);
2367 break;
2368
2369 case HCI_OP_ADD_SCO:
2370 hci_cs_add_sco(hdev, ev->status);
2371 break;
2372
2373 case HCI_OP_AUTH_REQUESTED:
2374 hci_cs_auth_requested(hdev, ev->status);
2375 break;
2376
2377 case HCI_OP_SET_CONN_ENCRYPT:
2378 hci_cs_set_conn_encrypt(hdev, ev->status);
2379 break;
2380
2381 case HCI_OP_REMOTE_NAME_REQ:
2382 hci_cs_remote_name_req(hdev, ev->status);
2383 break;
2384
2385 case HCI_OP_READ_REMOTE_FEATURES:
2386 hci_cs_read_remote_features(hdev, ev->status);
2387 break;
2388
2389 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2390 hci_cs_read_remote_ext_features(hdev, ev->status);
2391 break;
2392
2393 case HCI_OP_SETUP_SYNC_CONN:
2394 hci_cs_setup_sync_conn(hdev, ev->status);
2395 break;
2396
2397 case HCI_OP_SNIFF_MODE:
2398 hci_cs_sniff_mode(hdev, ev->status);
2399 break;
2400
2401 case HCI_OP_EXIT_SNIFF_MODE:
2402 hci_cs_exit_sniff_mode(hdev, ev->status);
2403 break;
2404
2405 case HCI_OP_DISCONNECT:
2406 hci_cs_disconnect(hdev, ev->status);
2407 break;
2408
2409 case HCI_OP_LE_CREATE_CONN:
2410 hci_cs_le_create_conn(hdev, ev->status);
2411 break;
2412
2413 case HCI_OP_LE_START_ENC:
2414 hci_cs_le_start_enc(hdev, ev->status);
2415 break;
2416
2417 default:
2418 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2419 break;
2420 }
2421
2422 if (ev->opcode != HCI_OP_NOP)
2423 del_timer(&hdev->cmd_timer);
2424
2425 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2426 atomic_set(&hdev->cmd_cnt, 1);
2427 if (!skb_queue_empty(&hdev->cmd_q))
2428 queue_work(hdev->workqueue, &hdev->cmd_work);
2429 }
2430 }
2431
2432 static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2433 {
2434 struct hci_ev_role_change *ev = (void *) skb->data;
2435 struct hci_conn *conn;
2436
2437 BT_DBG("%s status %d", hdev->name, ev->status);
2438
2439 hci_dev_lock(hdev);
2440
2441 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2442 if (conn) {
2443 if (!ev->status) {
2444 if (ev->role)
2445 conn->link_mode &= ~HCI_LM_MASTER;
2446 else
2447 conn->link_mode |= HCI_LM_MASTER;
2448 }
2449
2450 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2451
2452 hci_role_switch_cfm(conn, ev->status, ev->role);
2453 }
2454
2455 hci_dev_unlock(hdev);
2456 }
2457
2458 static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2459 {
2460 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2461 int i;
2462
2463 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2464 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2465 return;
2466 }
2467
2468 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2469 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2470 BT_DBG("%s bad parameters", hdev->name);
2471 return;
2472 }
2473
2474 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2475
2476 for (i = 0; i < ev->num_hndl; i++) {
2477 struct hci_comp_pkts_info *info = &ev->handles[i];
2478 struct hci_conn *conn;
2479 __u16 handle, count;
2480
2481 handle = __le16_to_cpu(info->handle);
2482 count = __le16_to_cpu(info->count);
2483
2484 conn = hci_conn_hash_lookup_handle(hdev, handle);
2485 if (!conn)
2486 continue;
2487
2488 conn->sent -= count;
2489
2490 switch (conn->type) {
2491 case ACL_LINK:
2492 hdev->acl_cnt += count;
2493 if (hdev->acl_cnt > hdev->acl_pkts)
2494 hdev->acl_cnt = hdev->acl_pkts;
2495 break;
2496
2497 case LE_LINK:
2498 if (hdev->le_pkts) {
2499 hdev->le_cnt += count;
2500 if (hdev->le_cnt > hdev->le_pkts)
2501 hdev->le_cnt = hdev->le_pkts;
2502 } else {
2503 hdev->acl_cnt += count;
2504 if (hdev->acl_cnt > hdev->acl_pkts)
2505 hdev->acl_cnt = hdev->acl_pkts;
2506 }
2507 break;
2508
2509 case SCO_LINK:
2510 hdev->sco_cnt += count;
2511 if (hdev->sco_cnt > hdev->sco_pkts)
2512 hdev->sco_cnt = hdev->sco_pkts;
2513 break;
2514
2515 default:
2516 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2517 break;
2518 }
2519 }
2520
2521 queue_work(hdev->workqueue, &hdev->tx_work);
2522 }
2523
2524 static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2525 struct sk_buff *skb)
2526 {
2527 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2528 int i;
2529
2530 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2531 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2532 return;
2533 }
2534
2535 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2536 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2537 BT_DBG("%s bad parameters", hdev->name);
2538 return;
2539 }
2540
2541 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2542 ev->num_hndl);
2543
2544 for (i = 0; i < ev->num_hndl; i++) {
2545 struct hci_comp_blocks_info *info = &ev->handles[i];
2546 struct hci_conn *conn;
2547 __u16 handle, block_count;
2548
2549 handle = __le16_to_cpu(info->handle);
2550 block_count = __le16_to_cpu(info->blocks);
2551
2552 conn = hci_conn_hash_lookup_handle(hdev, handle);
2553 if (!conn)
2554 continue;
2555
2556 conn->sent -= block_count;
2557
2558 switch (conn->type) {
2559 case ACL_LINK:
2560 hdev->block_cnt += block_count;
2561 if (hdev->block_cnt > hdev->num_blocks)
2562 hdev->block_cnt = hdev->num_blocks;
2563 break;
2564
2565 default:
2566 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2567 break;
2568 }
2569 }
2570
2571 queue_work(hdev->workqueue, &hdev->tx_work);
2572 }
2573
2574 static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2575 {
2576 struct hci_ev_mode_change *ev = (void *) skb->data;
2577 struct hci_conn *conn;
2578
2579 BT_DBG("%s status %d", hdev->name, ev->status);
2580
2581 hci_dev_lock(hdev);
2582
2583 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2584 if (conn) {
2585 conn->mode = ev->mode;
2586 conn->interval = __le16_to_cpu(ev->interval);
2587
2588 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2589 if (conn->mode == HCI_CM_ACTIVE)
2590 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2591 else
2592 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2593 }
2594
2595 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2596 hci_sco_setup(conn, ev->status);
2597 }
2598
2599 hci_dev_unlock(hdev);
2600 }
2601
2602 static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2603 {
2604 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2605 struct hci_conn *conn;
2606
2607 BT_DBG("%s", hdev->name);
2608
2609 hci_dev_lock(hdev);
2610
2611 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2612 if (!conn)
2613 goto unlock;
2614
2615 if (conn->state == BT_CONNECTED) {
2616 hci_conn_hold(conn);
2617 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2618 hci_conn_put(conn);
2619 }
2620
2621 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2622 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2623 sizeof(ev->bdaddr), &ev->bdaddr);
2624 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2625 u8 secure;
2626
2627 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2628 secure = 1;
2629 else
2630 secure = 0;
2631
2632 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2633 }
2634
2635 unlock:
2636 hci_dev_unlock(hdev);
2637 }
2638
2639 static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2640 {
2641 struct hci_ev_link_key_req *ev = (void *) skb->data;
2642 struct hci_cp_link_key_reply cp;
2643 struct hci_conn *conn;
2644 struct link_key *key;
2645
2646 BT_DBG("%s", hdev->name);
2647
2648 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2649 return;
2650
2651 hci_dev_lock(hdev);
2652
2653 key = hci_find_link_key(hdev, &ev->bdaddr);
2654 if (!key) {
2655 BT_DBG("%s link key not found for %s", hdev->name,
2656 batostr(&ev->bdaddr));
2657 goto not_found;
2658 }
2659
2660 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2661 batostr(&ev->bdaddr));
2662
2663 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2664 key->type == HCI_LK_DEBUG_COMBINATION) {
2665 BT_DBG("%s ignoring debug key", hdev->name);
2666 goto not_found;
2667 }
2668
2669 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2670 if (conn) {
2671 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2672 conn->auth_type != 0xff &&
2673 (conn->auth_type & 0x01)) {
2674 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2675 goto not_found;
2676 }
2677
2678 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2679 conn->pending_sec_level == BT_SECURITY_HIGH) {
2680 BT_DBG("%s ignoring key unauthenticated for high \
2681 security", hdev->name);
2682 goto not_found;
2683 }
2684
2685 conn->key_type = key->type;
2686 conn->pin_length = key->pin_len;
2687 }
2688
2689 bacpy(&cp.bdaddr, &ev->bdaddr);
2690 memcpy(cp.link_key, key->val, 16);
2691
2692 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2693
2694 hci_dev_unlock(hdev);
2695
2696 return;
2697
2698 not_found:
2699 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2700 hci_dev_unlock(hdev);
2701 }
2702
2703 static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2704 {
2705 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2706 struct hci_conn *conn;
2707 u8 pin_len = 0;
2708
2709 BT_DBG("%s", hdev->name);
2710
2711 hci_dev_lock(hdev);
2712
2713 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2714 if (conn) {
2715 hci_conn_hold(conn);
2716 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2717 pin_len = conn->pin_length;
2718
2719 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2720 conn->key_type = ev->key_type;
2721
2722 hci_conn_put(conn);
2723 }
2724
2725 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2726 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2727 ev->key_type, pin_len);
2728
2729 hci_dev_unlock(hdev);
2730 }
2731
2732 static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2733 {
2734 struct hci_ev_clock_offset *ev = (void *) skb->data;
2735 struct hci_conn *conn;
2736
2737 BT_DBG("%s status %d", hdev->name, ev->status);
2738
2739 hci_dev_lock(hdev);
2740
2741 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2742 if (conn && !ev->status) {
2743 struct inquiry_entry *ie;
2744
2745 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2746 if (ie) {
2747 ie->data.clock_offset = ev->clock_offset;
2748 ie->timestamp = jiffies;
2749 }
2750 }
2751
2752 hci_dev_unlock(hdev);
2753 }
2754
2755 static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2756 {
2757 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2758 struct hci_conn *conn;
2759
2760 BT_DBG("%s status %d", hdev->name, ev->status);
2761
2762 hci_dev_lock(hdev);
2763
2764 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2765 if (conn && !ev->status)
2766 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2767
2768 hci_dev_unlock(hdev);
2769 }
2770
2771 static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2772 {
2773 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2774 struct inquiry_entry *ie;
2775
2776 BT_DBG("%s", hdev->name);
2777
2778 hci_dev_lock(hdev);
2779
2780 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2781 if (ie) {
2782 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2783 ie->timestamp = jiffies;
2784 }
2785
2786 hci_dev_unlock(hdev);
2787 }
2788
2789 static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2790 {
2791 struct inquiry_data data;
2792 int num_rsp = *((__u8 *) skb->data);
2793 bool name_known, ssp;
2794
2795 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2796
2797 if (!num_rsp)
2798 return;
2799
2800 hci_dev_lock(hdev);
2801
2802 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2803 struct inquiry_info_with_rssi_and_pscan_mode *info;
2804 info = (void *) (skb->data + 1);
2805
2806 for (; num_rsp; num_rsp--, info++) {
2807 bacpy(&data.bdaddr, &info->bdaddr);
2808 data.pscan_rep_mode = info->pscan_rep_mode;
2809 data.pscan_period_mode = info->pscan_period_mode;
2810 data.pscan_mode = info->pscan_mode;
2811 memcpy(data.dev_class, info->dev_class, 3);
2812 data.clock_offset = info->clock_offset;
2813 data.rssi = info->rssi;
2814 data.ssp_mode = 0x00;
2815
2816 name_known = hci_inquiry_cache_update(hdev, &data,
2817 false, &ssp);
2818 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2819 info->dev_class, info->rssi,
2820 !name_known, ssp, NULL, 0);
2821 }
2822 } else {
2823 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2824
2825 for (; num_rsp; num_rsp--, info++) {
2826 bacpy(&data.bdaddr, &info->bdaddr);
2827 data.pscan_rep_mode = info->pscan_rep_mode;
2828 data.pscan_period_mode = info->pscan_period_mode;
2829 data.pscan_mode = 0x00;
2830 memcpy(data.dev_class, info->dev_class, 3);
2831 data.clock_offset = info->clock_offset;
2832 data.rssi = info->rssi;
2833 data.ssp_mode = 0x00;
2834 name_known = hci_inquiry_cache_update(hdev, &data,
2835 false, &ssp);
2836 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2837 info->dev_class, info->rssi,
2838 !name_known, ssp, NULL, 0);
2839 }
2840 }
2841
2842 hci_dev_unlock(hdev);
2843 }
2844
2845 static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2846 {
2847 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2848 struct hci_conn *conn;
2849
2850 BT_DBG("%s", hdev->name);
2851
2852 hci_dev_lock(hdev);
2853
2854 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2855 if (!conn)
2856 goto unlock;
2857
2858 if (!ev->status && ev->page == 0x01) {
2859 struct inquiry_entry *ie;
2860
2861 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2862 if (ie)
2863 ie->data.ssp_mode = (ev->features[0] & 0x01);
2864
2865 if (ev->features[0] & 0x01)
2866 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2867 }
2868
2869 if (conn->state != BT_CONFIG)
2870 goto unlock;
2871
2872 if (!ev->status) {
2873 struct hci_cp_remote_name_req cp;
2874 memset(&cp, 0, sizeof(cp));
2875 bacpy(&cp.bdaddr, &conn->dst);
2876 cp.pscan_rep_mode = 0x02;
2877 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2878 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2879 mgmt_device_connected(hdev, &conn->dst, conn->type,
2880 conn->dst_type, 0, NULL, 0,
2881 conn->dev_class);
2882
2883 if (!hci_outgoing_auth_needed(hdev, conn)) {
2884 conn->state = BT_CONNECTED;
2885 hci_proto_connect_cfm(conn, ev->status);
2886 hci_conn_put(conn);
2887 }
2888
2889 unlock:
2890 hci_dev_unlock(hdev);
2891 }
2892
2893 static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2894 {
2895 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2896 struct hci_conn *conn;
2897
2898 BT_DBG("%s status %d", hdev->name, ev->status);
2899
2900 hci_dev_lock(hdev);
2901
2902 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2903 if (!conn) {
2904 if (ev->link_type == ESCO_LINK)
2905 goto unlock;
2906
2907 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2908 if (!conn)
2909 goto unlock;
2910
2911 conn->type = SCO_LINK;
2912 }
2913
2914 switch (ev->status) {
2915 case 0x00:
2916 conn->handle = __le16_to_cpu(ev->handle);
2917 conn->state = BT_CONNECTED;
2918
2919 hci_conn_hold_device(conn);
2920 hci_conn_add_sysfs(conn);
2921 break;
2922
2923 case 0x11: /* Unsupported Feature or Parameter Value */
2924 case 0x1c: /* SCO interval rejected */
2925 case 0x1a: /* Unsupported Remote Feature */
2926 case 0x1f: /* Unspecified error */
2927 if (conn->out && conn->attempt < 2) {
2928 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2929 (hdev->esco_type & EDR_ESCO_MASK);
2930 hci_setup_sync(conn, conn->link->handle);
2931 goto unlock;
2932 }
2933 /* fall through */
2934
2935 default:
2936 conn->state = BT_CLOSED;
2937 break;
2938 }
2939
2940 hci_proto_connect_cfm(conn, ev->status);
2941 if (ev->status)
2942 hci_conn_del(conn);
2943
2944 unlock:
2945 hci_dev_unlock(hdev);
2946 }
2947
2948 static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2949 {
2950 BT_DBG("%s", hdev->name);
2951 }
2952
2953 static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2954 {
2955 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2956
2957 BT_DBG("%s status %d", hdev->name, ev->status);
2958 }
2959
2960 static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2961 {
2962 struct inquiry_data data;
2963 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2964 int num_rsp = *((__u8 *) skb->data);
2965
2966 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2967
2968 if (!num_rsp)
2969 return;
2970
2971 hci_dev_lock(hdev);
2972
2973 for (; num_rsp; num_rsp--, info++) {
2974 bool name_known, ssp;
2975
2976 bacpy(&data.bdaddr, &info->bdaddr);
2977 data.pscan_rep_mode = info->pscan_rep_mode;
2978 data.pscan_period_mode = info->pscan_period_mode;
2979 data.pscan_mode = 0x00;
2980 memcpy(data.dev_class, info->dev_class, 3);
2981 data.clock_offset = info->clock_offset;
2982 data.rssi = info->rssi;
2983 data.ssp_mode = 0x01;
2984
2985 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2986 name_known = eir_has_data_type(info->data,
2987 sizeof(info->data),
2988 EIR_NAME_COMPLETE);
2989 else
2990 name_known = true;
2991
2992 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
2993 &ssp);
2994 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2995 info->dev_class, info->rssi,
2996 !name_known, ssp, info->data,
2997 sizeof(info->data));
2998 }
2999
3000 hci_dev_unlock(hdev);
3001 }
3002
3003 static inline u8 hci_get_auth_req(struct hci_conn *conn)
3004 {
3005 /* If remote requests dedicated bonding follow that lead */
3006 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3007 /* If both remote and local IO capabilities allow MITM
3008 * protection then require it, otherwise don't */
3009 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3010 return 0x02;
3011 else
3012 return 0x03;
3013 }
3014
3015 /* If remote requests no-bonding follow that lead */
3016 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3017 return conn->remote_auth | (conn->auth_type & 0x01);
3018
3019 return conn->auth_type;
3020 }
3021
3022 static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3023 {
3024 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3025 struct hci_conn *conn;
3026
3027 BT_DBG("%s", hdev->name);
3028
3029 hci_dev_lock(hdev);
3030
3031 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3032 if (!conn)
3033 goto unlock;
3034
3035 hci_conn_hold(conn);
3036
3037 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3038 goto unlock;
3039
3040 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3041 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3042 struct hci_cp_io_capability_reply cp;
3043
3044 bacpy(&cp.bdaddr, &ev->bdaddr);
3045 /* Change the IO capability from KeyboardDisplay
3046 * to DisplayYesNo as it is not supported by BT spec. */
3047 cp.capability = (conn->io_capability == 0x04) ?
3048 0x01 : conn->io_capability;
3049 conn->auth_type = hci_get_auth_req(conn);
3050 cp.authentication = conn->auth_type;
3051
3052 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3053 hci_find_remote_oob_data(hdev, &conn->dst))
3054 cp.oob_data = 0x01;
3055 else
3056 cp.oob_data = 0x00;
3057
3058 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3059 sizeof(cp), &cp);
3060 } else {
3061 struct hci_cp_io_capability_neg_reply cp;
3062
3063 bacpy(&cp.bdaddr, &ev->bdaddr);
3064 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3065
3066 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3067 sizeof(cp), &cp);
3068 }
3069
3070 unlock:
3071 hci_dev_unlock(hdev);
3072 }
3073
3074 static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3075 {
3076 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3077 struct hci_conn *conn;
3078
3079 BT_DBG("%s", hdev->name);
3080
3081 hci_dev_lock(hdev);
3082
3083 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3084 if (!conn)
3085 goto unlock;
3086
3087 conn->remote_cap = ev->capability;
3088 conn->remote_auth = ev->authentication;
3089 if (ev->oob_data)
3090 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3091
3092 unlock:
3093 hci_dev_unlock(hdev);
3094 }
3095
3096 static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3097 struct sk_buff *skb)
3098 {
3099 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3100 int loc_mitm, rem_mitm, confirm_hint = 0;
3101 struct hci_conn *conn;
3102
3103 BT_DBG("%s", hdev->name);
3104
3105 hci_dev_lock(hdev);
3106
3107 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3108 goto unlock;
3109
3110 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3111 if (!conn)
3112 goto unlock;
3113
3114 loc_mitm = (conn->auth_type & 0x01);
3115 rem_mitm = (conn->remote_auth & 0x01);
3116
3117 /* If we require MITM but the remote device can't provide that
3118 * (it has NoInputNoOutput) then reject the confirmation
3119 * request. The only exception is when we're dedicated bonding
3120 * initiators (connect_cfm_cb set) since then we always have the MITM
3121 * bit set. */
3122 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3123 BT_DBG("Rejecting request: remote device can't provide MITM");
3124 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3125 sizeof(ev->bdaddr), &ev->bdaddr);
3126 goto unlock;
3127 }
3128
3129 /* If no side requires MITM protection; auto-accept */
3130 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3131 (!rem_mitm || conn->io_capability == 0x03)) {
3132
3133 /* If we're not the initiators request authorization to
3134 * proceed from user space (mgmt_user_confirm with
3135 * confirm_hint set to 1). */
3136 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3137 BT_DBG("Confirming auto-accept as acceptor");
3138 confirm_hint = 1;
3139 goto confirm;
3140 }
3141
3142 BT_DBG("Auto-accept of user confirmation with %ums delay",
3143 hdev->auto_accept_delay);
3144
3145 if (hdev->auto_accept_delay > 0) {
3146 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3147 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3148 goto unlock;
3149 }
3150
3151 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3152 sizeof(ev->bdaddr), &ev->bdaddr);
3153 goto unlock;
3154 }
3155
3156 confirm:
3157 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3158 confirm_hint);
3159
3160 unlock:
3161 hci_dev_unlock(hdev);
3162 }
3163
3164 static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3165 struct sk_buff *skb)
3166 {
3167 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3168
3169 BT_DBG("%s", hdev->name);
3170
3171 hci_dev_lock(hdev);
3172
3173 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3174 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3175
3176 hci_dev_unlock(hdev);
3177 }
3178
3179 static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3180 {
3181 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3182 struct hci_conn *conn;
3183
3184 BT_DBG("%s", hdev->name);
3185
3186 hci_dev_lock(hdev);
3187
3188 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3189 if (!conn)
3190 goto unlock;
3191
3192 /* To avoid duplicate auth_failed events to user space we check
3193 * the HCI_CONN_AUTH_PEND flag which will be set if we
3194 * initiated the authentication. A traditional auth_complete
3195 * event gets always produced as initiator and is also mapped to
3196 * the mgmt_auth_failed event */
3197 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3198 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3199 ev->status);
3200
3201 hci_conn_put(conn);
3202
3203 unlock:
3204 hci_dev_unlock(hdev);
3205 }
3206
3207 static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3208 {
3209 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3210 struct inquiry_entry *ie;
3211
3212 BT_DBG("%s", hdev->name);
3213
3214 hci_dev_lock(hdev);
3215
3216 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3217 if (ie)
3218 ie->data.ssp_mode = (ev->features[0] & 0x01);
3219
3220 hci_dev_unlock(hdev);
3221 }
3222
3223 static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3224 struct sk_buff *skb)
3225 {
3226 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3227 struct oob_data *data;
3228
3229 BT_DBG("%s", hdev->name);
3230
3231 hci_dev_lock(hdev);
3232
3233 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3234 goto unlock;
3235
3236 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3237 if (data) {
3238 struct hci_cp_remote_oob_data_reply cp;
3239
3240 bacpy(&cp.bdaddr, &ev->bdaddr);
3241 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3242 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3243
3244 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3245 &cp);
3246 } else {
3247 struct hci_cp_remote_oob_data_neg_reply cp;
3248
3249 bacpy(&cp.bdaddr, &ev->bdaddr);
3250 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3251 &cp);
3252 }
3253
3254 unlock:
3255 hci_dev_unlock(hdev);
3256 }
3257
3258 static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3259 {
3260 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3261 struct hci_conn *conn;
3262
3263 BT_DBG("%s status %d", hdev->name, ev->status);
3264
3265 hci_dev_lock(hdev);
3266
3267 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3268 if (!conn) {
3269 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3270 if (!conn) {
3271 BT_ERR("No memory for new connection");
3272 hci_dev_unlock(hdev);
3273 return;
3274 }
3275
3276 conn->dst_type = ev->bdaddr_type;
3277 }
3278
3279 if (ev->status) {
3280 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3281 conn->dst_type, ev->status);
3282 hci_proto_connect_cfm(conn, ev->status);
3283 conn->state = BT_CLOSED;
3284 hci_conn_del(conn);
3285 goto unlock;
3286 }
3287
3288 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3289 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3290 conn->dst_type, 0, NULL, 0, 0);
3291
3292 conn->sec_level = BT_SECURITY_LOW;
3293 conn->handle = __le16_to_cpu(ev->handle);
3294 conn->state = BT_CONNECTED;
3295
3296 hci_conn_hold_device(conn);
3297 hci_conn_add_sysfs(conn);
3298
3299 hci_proto_connect_cfm(conn, ev->status);
3300
3301 unlock:
3302 hci_dev_unlock(hdev);
3303 }
3304
3305 static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3306 struct sk_buff *skb)
3307 {
3308 u8 num_reports = skb->data[0];
3309 void *ptr = &skb->data[1];
3310 s8 rssi;
3311
3312 hci_dev_lock(hdev);
3313
3314 while (num_reports--) {
3315 struct hci_ev_le_advertising_info *ev = ptr;
3316
3317 hci_add_adv_entry(hdev, ev);
3318
3319 rssi = ev->data[ev->length];
3320 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3321 NULL, rssi, 0, 1, ev->data,
3322 ev->length);
3323
3324 ptr += sizeof(*ev) + ev->length + 1;
3325 }
3326
3327 hci_dev_unlock(hdev);
3328 }
3329
3330 static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3331 struct sk_buff *skb)
3332 {
3333 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3334 struct hci_cp_le_ltk_reply cp;
3335 struct hci_cp_le_ltk_neg_reply neg;
3336 struct hci_conn *conn;
3337 struct smp_ltk *ltk;
3338
3339 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3340
3341 hci_dev_lock(hdev);
3342
3343 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3344 if (conn == NULL)
3345 goto not_found;
3346
3347 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3348 if (ltk == NULL)
3349 goto not_found;
3350
3351 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3352 cp.handle = cpu_to_le16(conn->handle);
3353
3354 if (ltk->authenticated)
3355 conn->sec_level = BT_SECURITY_HIGH;
3356
3357 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3358
3359 if (ltk->type & HCI_SMP_STK) {
3360 list_del(&ltk->list);
3361 kfree(ltk);
3362 }
3363
3364 hci_dev_unlock(hdev);
3365
3366 return;
3367
3368 not_found:
3369 neg.handle = ev->handle;
3370 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3371 hci_dev_unlock(hdev);
3372 }
3373
3374 static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3375 {
3376 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3377
3378 skb_pull(skb, sizeof(*le_ev));
3379
3380 switch (le_ev->subevent) {
3381 case HCI_EV_LE_CONN_COMPLETE:
3382 hci_le_conn_complete_evt(hdev, skb);
3383 break;
3384
3385 case HCI_EV_LE_ADVERTISING_REPORT:
3386 hci_le_adv_report_evt(hdev, skb);
3387 break;
3388
3389 case HCI_EV_LE_LTK_REQ:
3390 hci_le_ltk_request_evt(hdev, skb);
3391 break;
3392
3393 default:
3394 break;
3395 }
3396 }
3397
3398 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3399 {
3400 struct hci_event_hdr *hdr = (void *) skb->data;
3401 __u8 event = hdr->evt;
3402
3403 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3404
3405 switch (event) {
3406 case HCI_EV_INQUIRY_COMPLETE:
3407 hci_inquiry_complete_evt(hdev, skb);
3408 break;
3409
3410 case HCI_EV_INQUIRY_RESULT:
3411 hci_inquiry_result_evt(hdev, skb);
3412 break;
3413
3414 case HCI_EV_CONN_COMPLETE:
3415 hci_conn_complete_evt(hdev, skb);
3416 break;
3417
3418 case HCI_EV_CONN_REQUEST:
3419 hci_conn_request_evt(hdev, skb);
3420 break;
3421
3422 case HCI_EV_DISCONN_COMPLETE:
3423 hci_disconn_complete_evt(hdev, skb);
3424 break;
3425
3426 case HCI_EV_AUTH_COMPLETE:
3427 hci_auth_complete_evt(hdev, skb);
3428 break;
3429
3430 case HCI_EV_REMOTE_NAME:
3431 hci_remote_name_evt(hdev, skb);
3432 break;
3433
3434 case HCI_EV_ENCRYPT_CHANGE:
3435 hci_encrypt_change_evt(hdev, skb);
3436 break;
3437
3438 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3439 hci_change_link_key_complete_evt(hdev, skb);
3440 break;
3441
3442 case HCI_EV_REMOTE_FEATURES:
3443 hci_remote_features_evt(hdev, skb);
3444 break;
3445
3446 case HCI_EV_REMOTE_VERSION:
3447 hci_remote_version_evt(hdev, skb);
3448 break;
3449
3450 case HCI_EV_QOS_SETUP_COMPLETE:
3451 hci_qos_setup_complete_evt(hdev, skb);
3452 break;
3453
3454 case HCI_EV_CMD_COMPLETE:
3455 hci_cmd_complete_evt(hdev, skb);
3456 break;
3457
3458 case HCI_EV_CMD_STATUS:
3459 hci_cmd_status_evt(hdev, skb);
3460 break;
3461
3462 case HCI_EV_ROLE_CHANGE:
3463 hci_role_change_evt(hdev, skb);
3464 break;
3465
3466 case HCI_EV_NUM_COMP_PKTS:
3467 hci_num_comp_pkts_evt(hdev, skb);
3468 break;
3469
3470 case HCI_EV_MODE_CHANGE:
3471 hci_mode_change_evt(hdev, skb);
3472 break;
3473
3474 case HCI_EV_PIN_CODE_REQ:
3475 hci_pin_code_request_evt(hdev, skb);
3476 break;
3477
3478 case HCI_EV_LINK_KEY_REQ:
3479 hci_link_key_request_evt(hdev, skb);
3480 break;
3481
3482 case HCI_EV_LINK_KEY_NOTIFY:
3483 hci_link_key_notify_evt(hdev, skb);
3484 break;
3485
3486 case HCI_EV_CLOCK_OFFSET:
3487 hci_clock_offset_evt(hdev, skb);
3488 break;
3489
3490 case HCI_EV_PKT_TYPE_CHANGE:
3491 hci_pkt_type_change_evt(hdev, skb);
3492 break;
3493
3494 case HCI_EV_PSCAN_REP_MODE:
3495 hci_pscan_rep_mode_evt(hdev, skb);
3496 break;
3497
3498 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3499 hci_inquiry_result_with_rssi_evt(hdev, skb);
3500 break;
3501
3502 case HCI_EV_REMOTE_EXT_FEATURES:
3503 hci_remote_ext_features_evt(hdev, skb);
3504 break;
3505
3506 case HCI_EV_SYNC_CONN_COMPLETE:
3507 hci_sync_conn_complete_evt(hdev, skb);
3508 break;
3509
3510 case HCI_EV_SYNC_CONN_CHANGED:
3511 hci_sync_conn_changed_evt(hdev, skb);
3512 break;
3513
3514 case HCI_EV_SNIFF_SUBRATE:
3515 hci_sniff_subrate_evt(hdev, skb);
3516 break;
3517
3518 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3519 hci_extended_inquiry_result_evt(hdev, skb);
3520 break;
3521
3522 case HCI_EV_IO_CAPA_REQUEST:
3523 hci_io_capa_request_evt(hdev, skb);
3524 break;
3525
3526 case HCI_EV_IO_CAPA_REPLY:
3527 hci_io_capa_reply_evt(hdev, skb);
3528 break;
3529
3530 case HCI_EV_USER_CONFIRM_REQUEST:
3531 hci_user_confirm_request_evt(hdev, skb);
3532 break;
3533
3534 case HCI_EV_USER_PASSKEY_REQUEST:
3535 hci_user_passkey_request_evt(hdev, skb);
3536 break;
3537
3538 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3539 hci_simple_pair_complete_evt(hdev, skb);
3540 break;
3541
3542 case HCI_EV_REMOTE_HOST_FEATURES:
3543 hci_remote_host_features_evt(hdev, skb);
3544 break;
3545
3546 case HCI_EV_LE_META:
3547 hci_le_meta_evt(hdev, skb);
3548 break;
3549
3550 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3551 hci_remote_oob_data_request_evt(hdev, skb);
3552 break;
3553
3554 case HCI_EV_NUM_COMP_BLOCKS:
3555 hci_num_comp_blocks_evt(hdev, skb);
3556 break;
3557
3558 default:
3559 BT_DBG("%s event 0x%x", hdev->name, event);
3560 break;
3561 }
3562
3563 kfree_skb(skb);
3564 hdev->stat.evt_rx++;
3565 }
This page took 0.105123 seconds and 6 git commands to generate.