Bluetooth: Track discovery type
[deliverable/linux.git] / net / bluetooth / hci_event.c
... / ...
CommitLineData
1/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI event handling. */
26
27#include <linux/module.h>
28
29#include <linux/types.h>
30#include <linux/errno.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/poll.h>
34#include <linux/fcntl.h>
35#include <linux/init.h>
36#include <linux/skbuff.h>
37#include <linux/interrupt.h>
38#include <linux/notifier.h>
39#include <net/sock.h>
40
41#include <asm/system.h>
42#include <linux/uaccess.h>
43#include <asm/unaligned.h>
44
45#include <net/bluetooth/bluetooth.h>
46#include <net/bluetooth/hci_core.h>
47
48static bool enable_le;
49
50/* Handle HCI Event packets */
51
52static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
53{
54 __u8 status = *((__u8 *) skb->data);
55
56 BT_DBG("%s status 0x%x", hdev->name, status);
57
58 if (status) {
59 hci_dev_lock(hdev);
60 mgmt_stop_discovery_failed(hdev, status);
61 hci_dev_unlock(hdev);
62 return;
63 }
64
65 clear_bit(HCI_INQUIRY, &hdev->flags);
66
67 hci_dev_lock(hdev);
68 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
69 hci_dev_unlock(hdev);
70
71 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
72
73 hci_conn_check_pending(hdev);
74}
75
76static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
77{
78 __u8 status = *((__u8 *) skb->data);
79
80 BT_DBG("%s status 0x%x", hdev->name, status);
81
82 if (status)
83 return;
84
85 hci_conn_check_pending(hdev);
86}
87
88static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev, struct sk_buff *skb)
89{
90 BT_DBG("%s", hdev->name);
91}
92
93static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
94{
95 struct hci_rp_role_discovery *rp = (void *) skb->data;
96 struct hci_conn *conn;
97
98 BT_DBG("%s status 0x%x", hdev->name, rp->status);
99
100 if (rp->status)
101 return;
102
103 hci_dev_lock(hdev);
104
105 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
106 if (conn) {
107 if (rp->role)
108 conn->link_mode &= ~HCI_LM_MASTER;
109 else
110 conn->link_mode |= HCI_LM_MASTER;
111 }
112
113 hci_dev_unlock(hdev);
114}
115
116static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
117{
118 struct hci_rp_read_link_policy *rp = (void *) skb->data;
119 struct hci_conn *conn;
120
121 BT_DBG("%s status 0x%x", hdev->name, rp->status);
122
123 if (rp->status)
124 return;
125
126 hci_dev_lock(hdev);
127
128 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
129 if (conn)
130 conn->link_policy = __le16_to_cpu(rp->policy);
131
132 hci_dev_unlock(hdev);
133}
134
135static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
136{
137 struct hci_rp_write_link_policy *rp = (void *) skb->data;
138 struct hci_conn *conn;
139 void *sent;
140
141 BT_DBG("%s status 0x%x", hdev->name, rp->status);
142
143 if (rp->status)
144 return;
145
146 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
147 if (!sent)
148 return;
149
150 hci_dev_lock(hdev);
151
152 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
153 if (conn)
154 conn->link_policy = get_unaligned_le16(sent + 2);
155
156 hci_dev_unlock(hdev);
157}
158
159static void hci_cc_read_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
160{
161 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
162
163 BT_DBG("%s status 0x%x", hdev->name, rp->status);
164
165 if (rp->status)
166 return;
167
168 hdev->link_policy = __le16_to_cpu(rp->policy);
169}
170
171static void hci_cc_write_def_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
172{
173 __u8 status = *((__u8 *) skb->data);
174 void *sent;
175
176 BT_DBG("%s status 0x%x", hdev->name, status);
177
178 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
179 if (!sent)
180 return;
181
182 if (!status)
183 hdev->link_policy = get_unaligned_le16(sent);
184
185 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
186}
187
188static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
189{
190 __u8 status = *((__u8 *) skb->data);
191
192 BT_DBG("%s status 0x%x", hdev->name, status);
193
194 clear_bit(HCI_RESET, &hdev->flags);
195
196 hci_req_complete(hdev, HCI_OP_RESET, status);
197
198 /* Reset all flags, except persistent ones */
199 hdev->dev_flags &= BIT(HCI_MGMT) | BIT(HCI_SETUP) | BIT(HCI_AUTO_OFF) |
200 BIT(HCI_LINK_KEYS) | BIT(HCI_DEBUG_KEYS);
201}
202
203static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
204{
205 __u8 status = *((__u8 *) skb->data);
206 void *sent;
207
208 BT_DBG("%s status 0x%x", hdev->name, status);
209
210 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
211 if (!sent)
212 return;
213
214 hci_dev_lock(hdev);
215
216 if (test_bit(HCI_MGMT, &hdev->dev_flags))
217 mgmt_set_local_name_complete(hdev, sent, status);
218
219 if (status == 0)
220 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
221
222 hci_dev_unlock(hdev);
223}
224
225static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
226{
227 struct hci_rp_read_local_name *rp = (void *) skb->data;
228
229 BT_DBG("%s status 0x%x", hdev->name, rp->status);
230
231 if (rp->status)
232 return;
233
234 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
235}
236
237static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
238{
239 __u8 status = *((__u8 *) skb->data);
240 void *sent;
241
242 BT_DBG("%s status 0x%x", hdev->name, status);
243
244 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
245 if (!sent)
246 return;
247
248 if (!status) {
249 __u8 param = *((__u8 *) sent);
250
251 if (param == AUTH_ENABLED)
252 set_bit(HCI_AUTH, &hdev->flags);
253 else
254 clear_bit(HCI_AUTH, &hdev->flags);
255 }
256
257 if (test_bit(HCI_MGMT, &hdev->dev_flags))
258 mgmt_auth_enable_complete(hdev, status);
259
260 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
261}
262
263static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
264{
265 __u8 status = *((__u8 *) skb->data);
266 void *sent;
267
268 BT_DBG("%s status 0x%x", hdev->name, status);
269
270 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
271 if (!sent)
272 return;
273
274 if (!status) {
275 __u8 param = *((__u8 *) sent);
276
277 if (param)
278 set_bit(HCI_ENCRYPT, &hdev->flags);
279 else
280 clear_bit(HCI_ENCRYPT, &hdev->flags);
281 }
282
283 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
284}
285
286static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
287{
288 __u8 param, status = *((__u8 *) skb->data);
289 int old_pscan, old_iscan;
290 void *sent;
291
292 BT_DBG("%s status 0x%x", hdev->name, status);
293
294 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
295 if (!sent)
296 return;
297
298 param = *((__u8 *) sent);
299
300 hci_dev_lock(hdev);
301
302 if (status != 0) {
303 mgmt_write_scan_failed(hdev, param, status);
304 hdev->discov_timeout = 0;
305 goto done;
306 }
307
308 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
309 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
310
311 if (param & SCAN_INQUIRY) {
312 set_bit(HCI_ISCAN, &hdev->flags);
313 if (!old_iscan)
314 mgmt_discoverable(hdev, 1);
315 if (hdev->discov_timeout > 0) {
316 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
317 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
318 to);
319 }
320 } else if (old_iscan)
321 mgmt_discoverable(hdev, 0);
322
323 if (param & SCAN_PAGE) {
324 set_bit(HCI_PSCAN, &hdev->flags);
325 if (!old_pscan)
326 mgmt_connectable(hdev, 1);
327 } else if (old_pscan)
328 mgmt_connectable(hdev, 0);
329
330done:
331 hci_dev_unlock(hdev);
332 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
333}
334
335static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
336{
337 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
338
339 BT_DBG("%s status 0x%x", hdev->name, rp->status);
340
341 if (rp->status)
342 return;
343
344 memcpy(hdev->dev_class, rp->dev_class, 3);
345
346 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
347 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
348}
349
350static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
351{
352 __u8 status = *((__u8 *) skb->data);
353 void *sent;
354
355 BT_DBG("%s status 0x%x", hdev->name, status);
356
357 if (status)
358 return;
359
360 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
361 if (!sent)
362 return;
363
364 memcpy(hdev->dev_class, sent, 3);
365}
366
367static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
368{
369 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
370 __u16 setting;
371
372 BT_DBG("%s status 0x%x", hdev->name, rp->status);
373
374 if (rp->status)
375 return;
376
377 setting = __le16_to_cpu(rp->voice_setting);
378
379 if (hdev->voice_setting == setting)
380 return;
381
382 hdev->voice_setting = setting;
383
384 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
385
386 if (hdev->notify)
387 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
388}
389
390static void hci_cc_write_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
391{
392 __u8 status = *((__u8 *) skb->data);
393 __u16 setting;
394 void *sent;
395
396 BT_DBG("%s status 0x%x", hdev->name, status);
397
398 if (status)
399 return;
400
401 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
402 if (!sent)
403 return;
404
405 setting = get_unaligned_le16(sent);
406
407 if (hdev->voice_setting == setting)
408 return;
409
410 hdev->voice_setting = setting;
411
412 BT_DBG("%s voice setting 0x%04x", hdev->name, setting);
413
414 if (hdev->notify)
415 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
416}
417
418static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
419{
420 __u8 status = *((__u8 *) skb->data);
421
422 BT_DBG("%s status 0x%x", hdev->name, status);
423
424 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
425}
426
427static void hci_cc_read_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
428{
429 struct hci_rp_read_ssp_mode *rp = (void *) skb->data;
430
431 BT_DBG("%s status 0x%x", hdev->name, rp->status);
432
433 if (rp->status)
434 return;
435
436 if (rp->mode)
437 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
438 else
439 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
440}
441
442static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
443{
444 __u8 status = *((__u8 *) skb->data);
445 void *sent;
446
447 BT_DBG("%s status 0x%x", hdev->name, status);
448
449 if (status)
450 goto done;
451
452 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
453 if (!sent)
454 return;
455
456 if (*((u8 *) sent))
457 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 else
459 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
460
461done:
462 if (test_bit(HCI_MGMT, &hdev->dev_flags))
463 mgmt_ssp_enable_complete(hdev, status);
464}
465
466static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
467{
468 if (hdev->features[6] & LMP_EXT_INQ)
469 return 2;
470
471 if (hdev->features[3] & LMP_RSSI_INQ)
472 return 1;
473
474 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
475 hdev->lmp_subver == 0x0757)
476 return 1;
477
478 if (hdev->manufacturer == 15) {
479 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
480 return 1;
481 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
482 return 1;
483 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
484 return 1;
485 }
486
487 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
488 hdev->lmp_subver == 0x1805)
489 return 1;
490
491 return 0;
492}
493
494static void hci_setup_inquiry_mode(struct hci_dev *hdev)
495{
496 u8 mode;
497
498 mode = hci_get_inquiry_mode(hdev);
499
500 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
501}
502
503static void hci_setup_event_mask(struct hci_dev *hdev)
504{
505 /* The second byte is 0xff instead of 0x9f (two reserved bits
506 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
507 * command otherwise */
508 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
509
510 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
511 * any event mask for pre 1.2 devices */
512 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
513 return;
514
515 events[4] |= 0x01; /* Flow Specification Complete */
516 events[4] |= 0x02; /* Inquiry Result with RSSI */
517 events[4] |= 0x04; /* Read Remote Extended Features Complete */
518 events[5] |= 0x08; /* Synchronous Connection Complete */
519 events[5] |= 0x10; /* Synchronous Connection Changed */
520
521 if (hdev->features[3] & LMP_RSSI_INQ)
522 events[4] |= 0x04; /* Inquiry Result with RSSI */
523
524 if (hdev->features[5] & LMP_SNIFF_SUBR)
525 events[5] |= 0x20; /* Sniff Subrating */
526
527 if (hdev->features[5] & LMP_PAUSE_ENC)
528 events[5] |= 0x80; /* Encryption Key Refresh Complete */
529
530 if (hdev->features[6] & LMP_EXT_INQ)
531 events[5] |= 0x40; /* Extended Inquiry Result */
532
533 if (hdev->features[6] & LMP_NO_FLUSH)
534 events[7] |= 0x01; /* Enhanced Flush Complete */
535
536 if (hdev->features[7] & LMP_LSTO)
537 events[6] |= 0x80; /* Link Supervision Timeout Changed */
538
539 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
540 events[6] |= 0x01; /* IO Capability Request */
541 events[6] |= 0x02; /* IO Capability Response */
542 events[6] |= 0x04; /* User Confirmation Request */
543 events[6] |= 0x08; /* User Passkey Request */
544 events[6] |= 0x10; /* Remote OOB Data Request */
545 events[6] |= 0x20; /* Simple Pairing Complete */
546 events[7] |= 0x04; /* User Passkey Notification */
547 events[7] |= 0x08; /* Keypress Notification */
548 events[7] |= 0x10; /* Remote Host Supported
549 * Features Notification */
550 }
551
552 if (hdev->features[4] & LMP_LE)
553 events[7] |= 0x20; /* LE Meta-Event */
554
555 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
556}
557
558static void hci_set_le_support(struct hci_dev *hdev)
559{
560 struct hci_cp_write_le_host_supported cp;
561
562 memset(&cp, 0, sizeof(cp));
563
564 if (enable_le) {
565 cp.le = 1;
566 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
567 }
568
569 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp), &cp);
570}
571
572static void hci_setup(struct hci_dev *hdev)
573{
574 if (hdev->dev_type != HCI_BREDR)
575 return;
576
577 hci_setup_event_mask(hdev);
578
579 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
580 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
581
582 if (hdev->features[6] & LMP_SIMPLE_PAIR) {
583 u8 mode = 0x01;
584 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
585 }
586
587 if (hdev->features[3] & LMP_RSSI_INQ)
588 hci_setup_inquiry_mode(hdev);
589
590 if (hdev->features[7] & LMP_INQ_TX_PWR)
591 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
592
593 if (hdev->features[7] & LMP_EXTFEATURES) {
594 struct hci_cp_read_local_ext_features cp;
595
596 cp.page = 0x01;
597 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES,
598 sizeof(cp), &cp);
599 }
600
601 if (hdev->features[4] & LMP_LE)
602 hci_set_le_support(hdev);
603}
604
605static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
606{
607 struct hci_rp_read_local_version *rp = (void *) skb->data;
608
609 BT_DBG("%s status 0x%x", hdev->name, rp->status);
610
611 if (rp->status)
612 return;
613
614 hdev->hci_ver = rp->hci_ver;
615 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
616 hdev->lmp_ver = rp->lmp_ver;
617 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
618 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
619
620 BT_DBG("%s manufacturer %d hci ver %d:%d", hdev->name,
621 hdev->manufacturer,
622 hdev->hci_ver, hdev->hci_rev);
623
624 if (test_bit(HCI_INIT, &hdev->flags))
625 hci_setup(hdev);
626}
627
628static void hci_setup_link_policy(struct hci_dev *hdev)
629{
630 u16 link_policy = 0;
631
632 if (hdev->features[0] & LMP_RSWITCH)
633 link_policy |= HCI_LP_RSWITCH;
634 if (hdev->features[0] & LMP_HOLD)
635 link_policy |= HCI_LP_HOLD;
636 if (hdev->features[0] & LMP_SNIFF)
637 link_policy |= HCI_LP_SNIFF;
638 if (hdev->features[1] & LMP_PARK)
639 link_policy |= HCI_LP_PARK;
640
641 link_policy = cpu_to_le16(link_policy);
642 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY,
643 sizeof(link_policy), &link_policy);
644}
645
646static void hci_cc_read_local_commands(struct hci_dev *hdev, struct sk_buff *skb)
647{
648 struct hci_rp_read_local_commands *rp = (void *) skb->data;
649
650 BT_DBG("%s status 0x%x", hdev->name, rp->status);
651
652 if (rp->status)
653 goto done;
654
655 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
656
657 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
658 hci_setup_link_policy(hdev);
659
660done:
661 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
662}
663
664static void hci_cc_read_local_features(struct hci_dev *hdev, struct sk_buff *skb)
665{
666 struct hci_rp_read_local_features *rp = (void *) skb->data;
667
668 BT_DBG("%s status 0x%x", hdev->name, rp->status);
669
670 if (rp->status)
671 return;
672
673 memcpy(hdev->features, rp->features, 8);
674
675 /* Adjust default settings according to features
676 * supported by device. */
677
678 if (hdev->features[0] & LMP_3SLOT)
679 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
680
681 if (hdev->features[0] & LMP_5SLOT)
682 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
683
684 if (hdev->features[1] & LMP_HV2) {
685 hdev->pkt_type |= (HCI_HV2);
686 hdev->esco_type |= (ESCO_HV2);
687 }
688
689 if (hdev->features[1] & LMP_HV3) {
690 hdev->pkt_type |= (HCI_HV3);
691 hdev->esco_type |= (ESCO_HV3);
692 }
693
694 if (hdev->features[3] & LMP_ESCO)
695 hdev->esco_type |= (ESCO_EV3);
696
697 if (hdev->features[4] & LMP_EV4)
698 hdev->esco_type |= (ESCO_EV4);
699
700 if (hdev->features[4] & LMP_EV5)
701 hdev->esco_type |= (ESCO_EV5);
702
703 if (hdev->features[5] & LMP_EDR_ESCO_2M)
704 hdev->esco_type |= (ESCO_2EV3);
705
706 if (hdev->features[5] & LMP_EDR_ESCO_3M)
707 hdev->esco_type |= (ESCO_3EV3);
708
709 if (hdev->features[5] & LMP_EDR_3S_ESCO)
710 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
711
712 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
713 hdev->features[0], hdev->features[1],
714 hdev->features[2], hdev->features[3],
715 hdev->features[4], hdev->features[5],
716 hdev->features[6], hdev->features[7]);
717}
718
719static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
720 struct sk_buff *skb)
721{
722 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
723
724 BT_DBG("%s status 0x%x", hdev->name, rp->status);
725
726 if (rp->status)
727 return;
728
729 switch (rp->page) {
730 case 0:
731 memcpy(hdev->features, rp->features, 8);
732 break;
733 case 1:
734 memcpy(hdev->host_features, rp->features, 8);
735 break;
736 }
737
738 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
739}
740
741static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
742 struct sk_buff *skb)
743{
744 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
745
746 BT_DBG("%s status 0x%x", hdev->name, rp->status);
747
748 if (rp->status)
749 return;
750
751 hdev->flow_ctl_mode = rp->mode;
752
753 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
754}
755
756static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
757{
758 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
759
760 BT_DBG("%s status 0x%x", hdev->name, rp->status);
761
762 if (rp->status)
763 return;
764
765 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
766 hdev->sco_mtu = rp->sco_mtu;
767 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
768 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
769
770 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
771 hdev->sco_mtu = 64;
772 hdev->sco_pkts = 8;
773 }
774
775 hdev->acl_cnt = hdev->acl_pkts;
776 hdev->sco_cnt = hdev->sco_pkts;
777
778 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name,
779 hdev->acl_mtu, hdev->acl_pkts,
780 hdev->sco_mtu, hdev->sco_pkts);
781}
782
783static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
784{
785 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
786
787 BT_DBG("%s status 0x%x", hdev->name, rp->status);
788
789 if (!rp->status)
790 bacpy(&hdev->bdaddr, &rp->bdaddr);
791
792 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
793}
794
795static void hci_cc_read_data_block_size(struct hci_dev *hdev,
796 struct sk_buff *skb)
797{
798 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
799
800 BT_DBG("%s status 0x%x", hdev->name, rp->status);
801
802 if (rp->status)
803 return;
804
805 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
806 hdev->block_len = __le16_to_cpu(rp->block_len);
807 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
808
809 hdev->block_cnt = hdev->num_blocks;
810
811 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
812 hdev->block_cnt, hdev->block_len);
813
814 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
815}
816
817static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
818{
819 __u8 status = *((__u8 *) skb->data);
820
821 BT_DBG("%s status 0x%x", hdev->name, status);
822
823 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
824}
825
826static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
827 struct sk_buff *skb)
828{
829 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
830
831 BT_DBG("%s status 0x%x", hdev->name, rp->status);
832
833 if (rp->status)
834 return;
835
836 hdev->amp_status = rp->amp_status;
837 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
838 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
839 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
840 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
841 hdev->amp_type = rp->amp_type;
842 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
843 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
844 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
845 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
846
847 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
848}
849
850static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
851 struct sk_buff *skb)
852{
853 __u8 status = *((__u8 *) skb->data);
854
855 BT_DBG("%s status 0x%x", hdev->name, status);
856
857 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
858}
859
860static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
861{
862 __u8 status = *((__u8 *) skb->data);
863
864 BT_DBG("%s status 0x%x", hdev->name, status);
865
866 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
867}
868
869static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
870 struct sk_buff *skb)
871{
872 __u8 status = *((__u8 *) skb->data);
873
874 BT_DBG("%s status 0x%x", hdev->name, status);
875
876 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
877}
878
879static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
880 struct sk_buff *skb)
881{
882 __u8 status = *((__u8 *) skb->data);
883
884 BT_DBG("%s status 0x%x", hdev->name, status);
885
886 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, status);
887}
888
889static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
890{
891 __u8 status = *((__u8 *) skb->data);
892
893 BT_DBG("%s status 0x%x", hdev->name, status);
894
895 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
896}
897
898static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
899{
900 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
901 struct hci_cp_pin_code_reply *cp;
902 struct hci_conn *conn;
903
904 BT_DBG("%s status 0x%x", hdev->name, rp->status);
905
906 hci_dev_lock(hdev);
907
908 if (test_bit(HCI_MGMT, &hdev->dev_flags))
909 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
910
911 if (rp->status != 0)
912 goto unlock;
913
914 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
915 if (!cp)
916 goto unlock;
917
918 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
919 if (conn)
920 conn->pin_length = cp->pin_len;
921
922unlock:
923 hci_dev_unlock(hdev);
924}
925
926static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
927{
928 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
929
930 BT_DBG("%s status 0x%x", hdev->name, rp->status);
931
932 hci_dev_lock(hdev);
933
934 if (test_bit(HCI_MGMT, &hdev->dev_flags))
935 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
936 rp->status);
937
938 hci_dev_unlock(hdev);
939}
940
941static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
942 struct sk_buff *skb)
943{
944 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
945
946 BT_DBG("%s status 0x%x", hdev->name, rp->status);
947
948 if (rp->status)
949 return;
950
951 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
952 hdev->le_pkts = rp->le_max_pkt;
953
954 hdev->le_cnt = hdev->le_pkts;
955
956 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
957
958 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
959}
960
961static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
962{
963 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
964
965 BT_DBG("%s status 0x%x", hdev->name, rp->status);
966
967 hci_dev_lock(hdev);
968
969 if (test_bit(HCI_MGMT, &hdev->dev_flags))
970 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
971 0, rp->status);
972
973 hci_dev_unlock(hdev);
974}
975
976static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
977 struct sk_buff *skb)
978{
979 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
980
981 BT_DBG("%s status 0x%x", hdev->name, rp->status);
982
983 hci_dev_lock(hdev);
984
985 if (test_bit(HCI_MGMT, &hdev->dev_flags))
986 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
987 ACL_LINK, 0,
988 rp->status);
989
990 hci_dev_unlock(hdev);
991}
992
993static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
994{
995 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
996
997 BT_DBG("%s status 0x%x", hdev->name, rp->status);
998
999 hci_dev_lock(hdev);
1000
1001 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1002 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1003 0, rp->status);
1004
1005 hci_dev_unlock(hdev);
1006}
1007
1008static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1009 struct sk_buff *skb)
1010{
1011 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1012
1013 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1014
1015 hci_dev_lock(hdev);
1016
1017 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1018 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1019 ACL_LINK, 0,
1020 rp->status);
1021
1022 hci_dev_unlock(hdev);
1023}
1024
1025static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1026 struct sk_buff *skb)
1027{
1028 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1029
1030 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1031
1032 hci_dev_lock(hdev);
1033 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1034 rp->randomizer, rp->status);
1035 hci_dev_unlock(hdev);
1036}
1037
1038static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1039{
1040 __u8 status = *((__u8 *) skb->data);
1041
1042 BT_DBG("%s status 0x%x", hdev->name, status);
1043
1044 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1045
1046 if (status) {
1047 hci_dev_lock(hdev);
1048 mgmt_start_discovery_failed(hdev, status);
1049 hci_dev_unlock(hdev);
1050 return;
1051 }
1052}
1053
1054static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1055 struct sk_buff *skb)
1056{
1057 struct hci_cp_le_set_scan_enable *cp;
1058 __u8 status = *((__u8 *) skb->data);
1059
1060 BT_DBG("%s status 0x%x", hdev->name, status);
1061
1062 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1063 if (!cp)
1064 return;
1065
1066 switch (cp->enable) {
1067 case LE_SCANNING_ENABLED:
1068 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1069
1070 if (status) {
1071 hci_dev_lock(hdev);
1072 mgmt_start_discovery_failed(hdev, status);
1073 hci_dev_unlock(hdev);
1074 return;
1075 }
1076
1077 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1078
1079 cancel_delayed_work_sync(&hdev->adv_work);
1080
1081 hci_dev_lock(hdev);
1082 hci_adv_entries_clear(hdev);
1083 hci_discovery_set_state(hdev, DISCOVERY_LE_SCAN);
1084 hci_dev_unlock(hdev);
1085 break;
1086
1087 case LE_SCANNING_DISABLED:
1088 if (status)
1089 return;
1090
1091 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1092
1093 hci_dev_lock(hdev);
1094 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1095 hci_dev_unlock(hdev);
1096
1097 schedule_delayed_work(&hdev->adv_work, ADV_CLEAR_TIMEOUT);
1098 break;
1099
1100 default:
1101 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1102 break;
1103 }
1104}
1105
1106static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1107{
1108 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1109
1110 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1111
1112 if (rp->status)
1113 return;
1114
1115 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1116}
1117
1118static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1119{
1120 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1121
1122 BT_DBG("%s status 0x%x", hdev->name, rp->status);
1123
1124 if (rp->status)
1125 return;
1126
1127 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1128}
1129
1130static inline void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1131 struct sk_buff *skb)
1132{
1133 struct hci_cp_read_local_ext_features cp;
1134 __u8 status = *((__u8 *) skb->data);
1135
1136 BT_DBG("%s status 0x%x", hdev->name, status);
1137
1138 if (status)
1139 return;
1140
1141 cp.page = 0x01;
1142 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp), &cp);
1143}
1144
1145static inline void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1146{
1147 BT_DBG("%s status 0x%x", hdev->name, status);
1148
1149 if (status) {
1150 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1151 hci_conn_check_pending(hdev);
1152 hci_dev_lock(hdev);
1153 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1154 mgmt_start_discovery_failed(hdev, status);
1155 hci_dev_unlock(hdev);
1156 return;
1157 }
1158
1159 set_bit(HCI_INQUIRY, &hdev->flags);
1160
1161 hci_dev_lock(hdev);
1162 hci_discovery_set_state(hdev, DISCOVERY_INQUIRY);
1163 hci_dev_unlock(hdev);
1164}
1165
1166static inline void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1167{
1168 struct hci_cp_create_conn *cp;
1169 struct hci_conn *conn;
1170
1171 BT_DBG("%s status 0x%x", hdev->name, status);
1172
1173 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1174 if (!cp)
1175 return;
1176
1177 hci_dev_lock(hdev);
1178
1179 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1180
1181 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->bdaddr), conn);
1182
1183 if (status) {
1184 if (conn && conn->state == BT_CONNECT) {
1185 if (status != 0x0c || conn->attempt > 2) {
1186 conn->state = BT_CLOSED;
1187 hci_proto_connect_cfm(conn, status);
1188 hci_conn_del(conn);
1189 } else
1190 conn->state = BT_CONNECT2;
1191 }
1192 } else {
1193 if (!conn) {
1194 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1195 if (conn) {
1196 conn->out = true;
1197 conn->link_mode |= HCI_LM_MASTER;
1198 } else
1199 BT_ERR("No memory for new connection");
1200 }
1201 }
1202
1203 hci_dev_unlock(hdev);
1204}
1205
1206static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1207{
1208 struct hci_cp_add_sco *cp;
1209 struct hci_conn *acl, *sco;
1210 __u16 handle;
1211
1212 BT_DBG("%s status 0x%x", hdev->name, status);
1213
1214 if (!status)
1215 return;
1216
1217 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1218 if (!cp)
1219 return;
1220
1221 handle = __le16_to_cpu(cp->handle);
1222
1223 BT_DBG("%s handle %d", hdev->name, handle);
1224
1225 hci_dev_lock(hdev);
1226
1227 acl = hci_conn_hash_lookup_handle(hdev, handle);
1228 if (acl) {
1229 sco = acl->link;
1230 if (sco) {
1231 sco->state = BT_CLOSED;
1232
1233 hci_proto_connect_cfm(sco, status);
1234 hci_conn_del(sco);
1235 }
1236 }
1237
1238 hci_dev_unlock(hdev);
1239}
1240
1241static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1242{
1243 struct hci_cp_auth_requested *cp;
1244 struct hci_conn *conn;
1245
1246 BT_DBG("%s status 0x%x", hdev->name, status);
1247
1248 if (!status)
1249 return;
1250
1251 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1252 if (!cp)
1253 return;
1254
1255 hci_dev_lock(hdev);
1256
1257 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1258 if (conn) {
1259 if (conn->state == BT_CONFIG) {
1260 hci_proto_connect_cfm(conn, status);
1261 hci_conn_put(conn);
1262 }
1263 }
1264
1265 hci_dev_unlock(hdev);
1266}
1267
1268static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1269{
1270 struct hci_cp_set_conn_encrypt *cp;
1271 struct hci_conn *conn;
1272
1273 BT_DBG("%s status 0x%x", hdev->name, status);
1274
1275 if (!status)
1276 return;
1277
1278 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1279 if (!cp)
1280 return;
1281
1282 hci_dev_lock(hdev);
1283
1284 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1285 if (conn) {
1286 if (conn->state == BT_CONFIG) {
1287 hci_proto_connect_cfm(conn, status);
1288 hci_conn_put(conn);
1289 }
1290 }
1291
1292 hci_dev_unlock(hdev);
1293}
1294
1295static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1296 struct hci_conn *conn)
1297{
1298 if (conn->state != BT_CONFIG || !conn->out)
1299 return 0;
1300
1301 if (conn->pending_sec_level == BT_SECURITY_SDP)
1302 return 0;
1303
1304 /* Only request authentication for SSP connections or non-SSP
1305 * devices with sec_level HIGH or if MITM protection is requested */
1306 if (!hci_conn_ssp_enabled(conn) &&
1307 conn->pending_sec_level != BT_SECURITY_HIGH &&
1308 !(conn->auth_type & 0x01))
1309 return 0;
1310
1311 return 1;
1312}
1313
1314static inline int hci_resolve_name(struct hci_dev *hdev, struct inquiry_entry *e)
1315{
1316 struct hci_cp_remote_name_req cp;
1317
1318 memset(&cp, 0, sizeof(cp));
1319
1320 bacpy(&cp.bdaddr, &e->data.bdaddr);
1321 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1322 cp.pscan_mode = e->data.pscan_mode;
1323 cp.clock_offset = e->data.clock_offset;
1324
1325 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1326}
1327
1328static bool hci_resolve_next_name(struct hci_dev *hdev)
1329{
1330 struct discovery_state *discov = &hdev->discovery;
1331 struct inquiry_entry *e;
1332
1333 if (list_empty(&discov->resolve))
1334 return false;
1335
1336 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1337 if (hci_resolve_name(hdev, e) == 0) {
1338 e->name_state = NAME_PENDING;
1339 return true;
1340 }
1341
1342 return false;
1343}
1344
1345static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1346 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1347{
1348 struct discovery_state *discov = &hdev->discovery;
1349 struct inquiry_entry *e;
1350
1351 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1352 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00,
1353 name, name_len, conn->dev_class);
1354
1355 if (discov->state == DISCOVERY_STOPPED)
1356 return;
1357
1358 if (discov->state == DISCOVERY_STOPPING)
1359 goto discov_complete;
1360
1361 if (discov->state != DISCOVERY_RESOLVING)
1362 return;
1363
1364 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1365 if (e) {
1366 e->name_state = NAME_KNOWN;
1367 list_del(&e->list);
1368 if (name)
1369 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1370 e->data.rssi, name, name_len);
1371 }
1372
1373 if (hci_resolve_next_name(hdev))
1374 return;
1375
1376discov_complete:
1377 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1378}
1379
1380static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1381{
1382 struct hci_cp_remote_name_req *cp;
1383 struct hci_conn *conn;
1384
1385 BT_DBG("%s status 0x%x", hdev->name, status);
1386
1387 /* If successful wait for the name req complete event before
1388 * checking for the need to do authentication */
1389 if (!status)
1390 return;
1391
1392 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1393 if (!cp)
1394 return;
1395
1396 hci_dev_lock(hdev);
1397
1398 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1399
1400 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1401 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1402
1403 if (!conn)
1404 goto unlock;
1405
1406 if (!hci_outgoing_auth_needed(hdev, conn))
1407 goto unlock;
1408
1409 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1410 struct hci_cp_auth_requested cp;
1411 cp.handle = __cpu_to_le16(conn->handle);
1412 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1413 }
1414
1415unlock:
1416 hci_dev_unlock(hdev);
1417}
1418
1419static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1420{
1421 struct hci_cp_read_remote_features *cp;
1422 struct hci_conn *conn;
1423
1424 BT_DBG("%s status 0x%x", hdev->name, status);
1425
1426 if (!status)
1427 return;
1428
1429 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1430 if (!cp)
1431 return;
1432
1433 hci_dev_lock(hdev);
1434
1435 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1436 if (conn) {
1437 if (conn->state == BT_CONFIG) {
1438 hci_proto_connect_cfm(conn, status);
1439 hci_conn_put(conn);
1440 }
1441 }
1442
1443 hci_dev_unlock(hdev);
1444}
1445
1446static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1447{
1448 struct hci_cp_read_remote_ext_features *cp;
1449 struct hci_conn *conn;
1450
1451 BT_DBG("%s status 0x%x", hdev->name, status);
1452
1453 if (!status)
1454 return;
1455
1456 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1457 if (!cp)
1458 return;
1459
1460 hci_dev_lock(hdev);
1461
1462 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1463 if (conn) {
1464 if (conn->state == BT_CONFIG) {
1465 hci_proto_connect_cfm(conn, status);
1466 hci_conn_put(conn);
1467 }
1468 }
1469
1470 hci_dev_unlock(hdev);
1471}
1472
1473static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1474{
1475 struct hci_cp_setup_sync_conn *cp;
1476 struct hci_conn *acl, *sco;
1477 __u16 handle;
1478
1479 BT_DBG("%s status 0x%x", hdev->name, status);
1480
1481 if (!status)
1482 return;
1483
1484 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1485 if (!cp)
1486 return;
1487
1488 handle = __le16_to_cpu(cp->handle);
1489
1490 BT_DBG("%s handle %d", hdev->name, handle);
1491
1492 hci_dev_lock(hdev);
1493
1494 acl = hci_conn_hash_lookup_handle(hdev, handle);
1495 if (acl) {
1496 sco = acl->link;
1497 if (sco) {
1498 sco->state = BT_CLOSED;
1499
1500 hci_proto_connect_cfm(sco, status);
1501 hci_conn_del(sco);
1502 }
1503 }
1504
1505 hci_dev_unlock(hdev);
1506}
1507
1508static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1509{
1510 struct hci_cp_sniff_mode *cp;
1511 struct hci_conn *conn;
1512
1513 BT_DBG("%s status 0x%x", hdev->name, status);
1514
1515 if (!status)
1516 return;
1517
1518 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1519 if (!cp)
1520 return;
1521
1522 hci_dev_lock(hdev);
1523
1524 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1525 if (conn) {
1526 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1527
1528 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1529 hci_sco_setup(conn, status);
1530 }
1531
1532 hci_dev_unlock(hdev);
1533}
1534
1535static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1536{
1537 struct hci_cp_exit_sniff_mode *cp;
1538 struct hci_conn *conn;
1539
1540 BT_DBG("%s status 0x%x", hdev->name, status);
1541
1542 if (!status)
1543 return;
1544
1545 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1546 if (!cp)
1547 return;
1548
1549 hci_dev_lock(hdev);
1550
1551 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1552 if (conn) {
1553 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1554
1555 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1556 hci_sco_setup(conn, status);
1557 }
1558
1559 hci_dev_unlock(hdev);
1560}
1561
1562static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1563{
1564 struct hci_cp_disconnect *cp;
1565 struct hci_conn *conn;
1566
1567 if (!status)
1568 return;
1569
1570 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1571 if (!cp)
1572 return;
1573
1574 hci_dev_lock(hdev);
1575
1576 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1577 if (conn)
1578 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1579 conn->dst_type, status);
1580
1581 hci_dev_unlock(hdev);
1582}
1583
1584static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1585{
1586 struct hci_cp_le_create_conn *cp;
1587 struct hci_conn *conn;
1588
1589 BT_DBG("%s status 0x%x", hdev->name, status);
1590
1591 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN);
1592 if (!cp)
1593 return;
1594
1595 hci_dev_lock(hdev);
1596
1597 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
1598
1599 BT_DBG("%s bdaddr %s conn %p", hdev->name, batostr(&cp->peer_addr),
1600 conn);
1601
1602 if (status) {
1603 if (conn && conn->state == BT_CONNECT) {
1604 conn->state = BT_CLOSED;
1605 hci_proto_connect_cfm(conn, status);
1606 hci_conn_del(conn);
1607 }
1608 } else {
1609 if (!conn) {
1610 conn = hci_conn_add(hdev, LE_LINK, &cp->peer_addr);
1611 if (conn) {
1612 conn->dst_type = cp->peer_addr_type;
1613 conn->out = true;
1614 } else {
1615 BT_ERR("No memory for new connection");
1616 }
1617 }
1618 }
1619
1620 hci_dev_unlock(hdev);
1621}
1622
1623static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1624{
1625 BT_DBG("%s status 0x%x", hdev->name, status);
1626}
1627
1628static inline void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1629{
1630 __u8 status = *((__u8 *) skb->data);
1631 struct discovery_state *discov = &hdev->discovery;
1632 struct inquiry_entry *e;
1633
1634 BT_DBG("%s status %d", hdev->name, status);
1635
1636 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1637
1638 hci_conn_check_pending(hdev);
1639
1640 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1641 return;
1642
1643 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1644 return;
1645
1646 hci_dev_lock(hdev);
1647
1648 if (discov->state != DISCOVERY_INQUIRY)
1649 goto unlock;
1650
1651 if (list_empty(&discov->resolve)) {
1652 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1653 goto unlock;
1654 }
1655
1656 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1657 if (e && hci_resolve_name(hdev, e) == 0) {
1658 e->name_state = NAME_PENDING;
1659 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1660 } else {
1661 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1662 }
1663
1664unlock:
1665 hci_dev_unlock(hdev);
1666}
1667
1668static inline void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1669{
1670 struct inquiry_data data;
1671 struct inquiry_info *info = (void *) (skb->data + 1);
1672 int num_rsp = *((__u8 *) skb->data);
1673
1674 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1675
1676 if (!num_rsp)
1677 return;
1678
1679 hci_dev_lock(hdev);
1680
1681 for (; num_rsp; num_rsp--, info++) {
1682 bool name_known;
1683
1684 bacpy(&data.bdaddr, &info->bdaddr);
1685 data.pscan_rep_mode = info->pscan_rep_mode;
1686 data.pscan_period_mode = info->pscan_period_mode;
1687 data.pscan_mode = info->pscan_mode;
1688 memcpy(data.dev_class, info->dev_class, 3);
1689 data.clock_offset = info->clock_offset;
1690 data.rssi = 0x00;
1691 data.ssp_mode = 0x00;
1692
1693 name_known = hci_inquiry_cache_update(hdev, &data, false);
1694 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1695 info->dev_class, 0, !name_known,
1696 NULL, 0);
1697 }
1698
1699 hci_dev_unlock(hdev);
1700}
1701
1702static inline void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1703{
1704 struct hci_ev_conn_complete *ev = (void *) skb->data;
1705 struct hci_conn *conn;
1706
1707 BT_DBG("%s", hdev->name);
1708
1709 hci_dev_lock(hdev);
1710
1711 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1712 if (!conn) {
1713 if (ev->link_type != SCO_LINK)
1714 goto unlock;
1715
1716 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1717 if (!conn)
1718 goto unlock;
1719
1720 conn->type = SCO_LINK;
1721 }
1722
1723 if (!ev->status) {
1724 conn->handle = __le16_to_cpu(ev->handle);
1725
1726 if (conn->type == ACL_LINK) {
1727 conn->state = BT_CONFIG;
1728 hci_conn_hold(conn);
1729 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1730 } else
1731 conn->state = BT_CONNECTED;
1732
1733 hci_conn_hold_device(conn);
1734 hci_conn_add_sysfs(conn);
1735
1736 if (test_bit(HCI_AUTH, &hdev->flags))
1737 conn->link_mode |= HCI_LM_AUTH;
1738
1739 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1740 conn->link_mode |= HCI_LM_ENCRYPT;
1741
1742 /* Get remote features */
1743 if (conn->type == ACL_LINK) {
1744 struct hci_cp_read_remote_features cp;
1745 cp.handle = ev->handle;
1746 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1747 sizeof(cp), &cp);
1748 }
1749
1750 /* Set packet type for incoming connection */
1751 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1752 struct hci_cp_change_conn_ptype cp;
1753 cp.handle = ev->handle;
1754 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1755 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE,
1756 sizeof(cp), &cp);
1757 }
1758 } else {
1759 conn->state = BT_CLOSED;
1760 if (conn->type == ACL_LINK)
1761 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1762 conn->dst_type, ev->status);
1763 }
1764
1765 if (conn->type == ACL_LINK)
1766 hci_sco_setup(conn, ev->status);
1767
1768 if (ev->status) {
1769 hci_proto_connect_cfm(conn, ev->status);
1770 hci_conn_del(conn);
1771 } else if (ev->link_type != ACL_LINK)
1772 hci_proto_connect_cfm(conn, ev->status);
1773
1774unlock:
1775 hci_dev_unlock(hdev);
1776
1777 hci_conn_check_pending(hdev);
1778}
1779
1780static inline void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1781{
1782 struct hci_ev_conn_request *ev = (void *) skb->data;
1783 int mask = hdev->link_mode;
1784
1785 BT_DBG("%s bdaddr %s type 0x%x", hdev->name,
1786 batostr(&ev->bdaddr), ev->link_type);
1787
1788 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1789
1790 if ((mask & HCI_LM_ACCEPT) &&
1791 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1792 /* Connection accepted */
1793 struct inquiry_entry *ie;
1794 struct hci_conn *conn;
1795
1796 hci_dev_lock(hdev);
1797
1798 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
1799 if (ie)
1800 memcpy(ie->data.dev_class, ev->dev_class, 3);
1801
1802 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1803 if (!conn) {
1804 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
1805 if (!conn) {
1806 BT_ERR("No memory for new connection");
1807 hci_dev_unlock(hdev);
1808 return;
1809 }
1810 }
1811
1812 memcpy(conn->dev_class, ev->dev_class, 3);
1813 conn->state = BT_CONNECT;
1814
1815 hci_dev_unlock(hdev);
1816
1817 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
1818 struct hci_cp_accept_conn_req cp;
1819
1820 bacpy(&cp.bdaddr, &ev->bdaddr);
1821
1822 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
1823 cp.role = 0x00; /* Become master */
1824 else
1825 cp.role = 0x01; /* Remain slave */
1826
1827 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ,
1828 sizeof(cp), &cp);
1829 } else {
1830 struct hci_cp_accept_sync_conn_req cp;
1831
1832 bacpy(&cp.bdaddr, &ev->bdaddr);
1833 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1834
1835 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
1836 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
1837 cp.max_latency = cpu_to_le16(0xffff);
1838 cp.content_format = cpu_to_le16(hdev->voice_setting);
1839 cp.retrans_effort = 0xff;
1840
1841 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
1842 sizeof(cp), &cp);
1843 }
1844 } else {
1845 /* Connection rejected */
1846 struct hci_cp_reject_conn_req cp;
1847
1848 bacpy(&cp.bdaddr, &ev->bdaddr);
1849 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
1850 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
1851 }
1852}
1853
1854static inline void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1855{
1856 struct hci_ev_disconn_complete *ev = (void *) skb->data;
1857 struct hci_conn *conn;
1858
1859 BT_DBG("%s status %d", hdev->name, ev->status);
1860
1861 hci_dev_lock(hdev);
1862
1863 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1864 if (!conn)
1865 goto unlock;
1866
1867 if (ev->status == 0)
1868 conn->state = BT_CLOSED;
1869
1870 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
1871 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
1872 if (ev->status != 0)
1873 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1874 conn->dst_type, ev->status);
1875 else
1876 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
1877 conn->dst_type);
1878 }
1879
1880 if (ev->status == 0) {
1881 hci_proto_disconn_cfm(conn, ev->reason);
1882 hci_conn_del(conn);
1883 }
1884
1885unlock:
1886 hci_dev_unlock(hdev);
1887}
1888
1889static inline void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1890{
1891 struct hci_ev_auth_complete *ev = (void *) skb->data;
1892 struct hci_conn *conn;
1893
1894 BT_DBG("%s status %d", hdev->name, ev->status);
1895
1896 hci_dev_lock(hdev);
1897
1898 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
1899 if (!conn)
1900 goto unlock;
1901
1902 if (!ev->status) {
1903 if (!hci_conn_ssp_enabled(conn) &&
1904 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
1905 BT_INFO("re-auth of legacy device is not possible.");
1906 } else {
1907 conn->link_mode |= HCI_LM_AUTH;
1908 conn->sec_level = conn->pending_sec_level;
1909 }
1910 } else {
1911 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
1912 ev->status);
1913 }
1914
1915 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
1916 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1917
1918 if (conn->state == BT_CONFIG) {
1919 if (!ev->status && hci_conn_ssp_enabled(conn)) {
1920 struct hci_cp_set_conn_encrypt cp;
1921 cp.handle = ev->handle;
1922 cp.encrypt = 0x01;
1923 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1924 &cp);
1925 } else {
1926 conn->state = BT_CONNECTED;
1927 hci_proto_connect_cfm(conn, ev->status);
1928 hci_conn_put(conn);
1929 }
1930 } else {
1931 hci_auth_cfm(conn, ev->status);
1932
1933 hci_conn_hold(conn);
1934 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1935 hci_conn_put(conn);
1936 }
1937
1938 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1939 if (!ev->status) {
1940 struct hci_cp_set_conn_encrypt cp;
1941 cp.handle = ev->handle;
1942 cp.encrypt = 0x01;
1943 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1944 &cp);
1945 } else {
1946 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1947 hci_encrypt_cfm(conn, ev->status, 0x00);
1948 }
1949 }
1950
1951unlock:
1952 hci_dev_unlock(hdev);
1953}
1954
1955static inline void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
1956{
1957 struct hci_ev_remote_name *ev = (void *) skb->data;
1958 struct hci_conn *conn;
1959
1960 BT_DBG("%s", hdev->name);
1961
1962 hci_conn_check_pending(hdev);
1963
1964 hci_dev_lock(hdev);
1965
1966 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
1967
1968 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1969 goto check_auth;
1970
1971 if (ev->status == 0)
1972 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
1973 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
1974 else
1975 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
1976
1977check_auth:
1978 if (!conn)
1979 goto unlock;
1980
1981 if (!hci_outgoing_auth_needed(hdev, conn))
1982 goto unlock;
1983
1984 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1985 struct hci_cp_auth_requested cp;
1986 cp.handle = __cpu_to_le16(conn->handle);
1987 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1988 }
1989
1990unlock:
1991 hci_dev_unlock(hdev);
1992}
1993
1994static inline void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
1995{
1996 struct hci_ev_encrypt_change *ev = (void *) skb->data;
1997 struct hci_conn *conn;
1998
1999 BT_DBG("%s status %d", hdev->name, ev->status);
2000
2001 hci_dev_lock(hdev);
2002
2003 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2004 if (conn) {
2005 if (!ev->status) {
2006 if (ev->encrypt) {
2007 /* Encryption implies authentication */
2008 conn->link_mode |= HCI_LM_AUTH;
2009 conn->link_mode |= HCI_LM_ENCRYPT;
2010 conn->sec_level = conn->pending_sec_level;
2011 } else
2012 conn->link_mode &= ~HCI_LM_ENCRYPT;
2013 }
2014
2015 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2016
2017 if (conn->state == BT_CONFIG) {
2018 if (!ev->status)
2019 conn->state = BT_CONNECTED;
2020
2021 hci_proto_connect_cfm(conn, ev->status);
2022 hci_conn_put(conn);
2023 } else
2024 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2025 }
2026
2027 hci_dev_unlock(hdev);
2028}
2029
2030static inline void hci_change_link_key_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2031{
2032 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2033 struct hci_conn *conn;
2034
2035 BT_DBG("%s status %d", hdev->name, ev->status);
2036
2037 hci_dev_lock(hdev);
2038
2039 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2040 if (conn) {
2041 if (!ev->status)
2042 conn->link_mode |= HCI_LM_SECURE;
2043
2044 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2045
2046 hci_key_change_cfm(conn, ev->status);
2047 }
2048
2049 hci_dev_unlock(hdev);
2050}
2051
2052static inline void hci_remote_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2053{
2054 struct hci_ev_remote_features *ev = (void *) skb->data;
2055 struct hci_conn *conn;
2056
2057 BT_DBG("%s status %d", hdev->name, ev->status);
2058
2059 hci_dev_lock(hdev);
2060
2061 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2062 if (!conn)
2063 goto unlock;
2064
2065 if (!ev->status)
2066 memcpy(conn->features, ev->features, 8);
2067
2068 if (conn->state != BT_CONFIG)
2069 goto unlock;
2070
2071 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2072 struct hci_cp_read_remote_ext_features cp;
2073 cp.handle = ev->handle;
2074 cp.page = 0x01;
2075 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2076 sizeof(cp), &cp);
2077 goto unlock;
2078 }
2079
2080 if (!ev->status) {
2081 struct hci_cp_remote_name_req cp;
2082 memset(&cp, 0, sizeof(cp));
2083 bacpy(&cp.bdaddr, &conn->dst);
2084 cp.pscan_rep_mode = 0x02;
2085 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2086 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2087 mgmt_device_connected(hdev, &conn->dst, conn->type,
2088 conn->dst_type, NULL, 0,
2089 conn->dev_class);
2090
2091 if (!hci_outgoing_auth_needed(hdev, conn)) {
2092 conn->state = BT_CONNECTED;
2093 hci_proto_connect_cfm(conn, ev->status);
2094 hci_conn_put(conn);
2095 }
2096
2097unlock:
2098 hci_dev_unlock(hdev);
2099}
2100
2101static inline void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2102{
2103 BT_DBG("%s", hdev->name);
2104}
2105
2106static inline void hci_qos_setup_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2107{
2108 BT_DBG("%s", hdev->name);
2109}
2110
2111static inline void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2112{
2113 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2114 __u16 opcode;
2115
2116 skb_pull(skb, sizeof(*ev));
2117
2118 opcode = __le16_to_cpu(ev->opcode);
2119
2120 switch (opcode) {
2121 case HCI_OP_INQUIRY_CANCEL:
2122 hci_cc_inquiry_cancel(hdev, skb);
2123 break;
2124
2125 case HCI_OP_EXIT_PERIODIC_INQ:
2126 hci_cc_exit_periodic_inq(hdev, skb);
2127 break;
2128
2129 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2130 hci_cc_remote_name_req_cancel(hdev, skb);
2131 break;
2132
2133 case HCI_OP_ROLE_DISCOVERY:
2134 hci_cc_role_discovery(hdev, skb);
2135 break;
2136
2137 case HCI_OP_READ_LINK_POLICY:
2138 hci_cc_read_link_policy(hdev, skb);
2139 break;
2140
2141 case HCI_OP_WRITE_LINK_POLICY:
2142 hci_cc_write_link_policy(hdev, skb);
2143 break;
2144
2145 case HCI_OP_READ_DEF_LINK_POLICY:
2146 hci_cc_read_def_link_policy(hdev, skb);
2147 break;
2148
2149 case HCI_OP_WRITE_DEF_LINK_POLICY:
2150 hci_cc_write_def_link_policy(hdev, skb);
2151 break;
2152
2153 case HCI_OP_RESET:
2154 hci_cc_reset(hdev, skb);
2155 break;
2156
2157 case HCI_OP_WRITE_LOCAL_NAME:
2158 hci_cc_write_local_name(hdev, skb);
2159 break;
2160
2161 case HCI_OP_READ_LOCAL_NAME:
2162 hci_cc_read_local_name(hdev, skb);
2163 break;
2164
2165 case HCI_OP_WRITE_AUTH_ENABLE:
2166 hci_cc_write_auth_enable(hdev, skb);
2167 break;
2168
2169 case HCI_OP_WRITE_ENCRYPT_MODE:
2170 hci_cc_write_encrypt_mode(hdev, skb);
2171 break;
2172
2173 case HCI_OP_WRITE_SCAN_ENABLE:
2174 hci_cc_write_scan_enable(hdev, skb);
2175 break;
2176
2177 case HCI_OP_READ_CLASS_OF_DEV:
2178 hci_cc_read_class_of_dev(hdev, skb);
2179 break;
2180
2181 case HCI_OP_WRITE_CLASS_OF_DEV:
2182 hci_cc_write_class_of_dev(hdev, skb);
2183 break;
2184
2185 case HCI_OP_READ_VOICE_SETTING:
2186 hci_cc_read_voice_setting(hdev, skb);
2187 break;
2188
2189 case HCI_OP_WRITE_VOICE_SETTING:
2190 hci_cc_write_voice_setting(hdev, skb);
2191 break;
2192
2193 case HCI_OP_HOST_BUFFER_SIZE:
2194 hci_cc_host_buffer_size(hdev, skb);
2195 break;
2196
2197 case HCI_OP_READ_SSP_MODE:
2198 hci_cc_read_ssp_mode(hdev, skb);
2199 break;
2200
2201 case HCI_OP_WRITE_SSP_MODE:
2202 hci_cc_write_ssp_mode(hdev, skb);
2203 break;
2204
2205 case HCI_OP_READ_LOCAL_VERSION:
2206 hci_cc_read_local_version(hdev, skb);
2207 break;
2208
2209 case HCI_OP_READ_LOCAL_COMMANDS:
2210 hci_cc_read_local_commands(hdev, skb);
2211 break;
2212
2213 case HCI_OP_READ_LOCAL_FEATURES:
2214 hci_cc_read_local_features(hdev, skb);
2215 break;
2216
2217 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2218 hci_cc_read_local_ext_features(hdev, skb);
2219 break;
2220
2221 case HCI_OP_READ_BUFFER_SIZE:
2222 hci_cc_read_buffer_size(hdev, skb);
2223 break;
2224
2225 case HCI_OP_READ_BD_ADDR:
2226 hci_cc_read_bd_addr(hdev, skb);
2227 break;
2228
2229 case HCI_OP_READ_DATA_BLOCK_SIZE:
2230 hci_cc_read_data_block_size(hdev, skb);
2231 break;
2232
2233 case HCI_OP_WRITE_CA_TIMEOUT:
2234 hci_cc_write_ca_timeout(hdev, skb);
2235 break;
2236
2237 case HCI_OP_READ_FLOW_CONTROL_MODE:
2238 hci_cc_read_flow_control_mode(hdev, skb);
2239 break;
2240
2241 case HCI_OP_READ_LOCAL_AMP_INFO:
2242 hci_cc_read_local_amp_info(hdev, skb);
2243 break;
2244
2245 case HCI_OP_DELETE_STORED_LINK_KEY:
2246 hci_cc_delete_stored_link_key(hdev, skb);
2247 break;
2248
2249 case HCI_OP_SET_EVENT_MASK:
2250 hci_cc_set_event_mask(hdev, skb);
2251 break;
2252
2253 case HCI_OP_WRITE_INQUIRY_MODE:
2254 hci_cc_write_inquiry_mode(hdev, skb);
2255 break;
2256
2257 case HCI_OP_READ_INQ_RSP_TX_POWER:
2258 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2259 break;
2260
2261 case HCI_OP_SET_EVENT_FLT:
2262 hci_cc_set_event_flt(hdev, skb);
2263 break;
2264
2265 case HCI_OP_PIN_CODE_REPLY:
2266 hci_cc_pin_code_reply(hdev, skb);
2267 break;
2268
2269 case HCI_OP_PIN_CODE_NEG_REPLY:
2270 hci_cc_pin_code_neg_reply(hdev, skb);
2271 break;
2272
2273 case HCI_OP_READ_LOCAL_OOB_DATA:
2274 hci_cc_read_local_oob_data_reply(hdev, skb);
2275 break;
2276
2277 case HCI_OP_LE_READ_BUFFER_SIZE:
2278 hci_cc_le_read_buffer_size(hdev, skb);
2279 break;
2280
2281 case HCI_OP_USER_CONFIRM_REPLY:
2282 hci_cc_user_confirm_reply(hdev, skb);
2283 break;
2284
2285 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2286 hci_cc_user_confirm_neg_reply(hdev, skb);
2287 break;
2288
2289 case HCI_OP_USER_PASSKEY_REPLY:
2290 hci_cc_user_passkey_reply(hdev, skb);
2291 break;
2292
2293 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2294 hci_cc_user_passkey_neg_reply(hdev, skb);
2295
2296 case HCI_OP_LE_SET_SCAN_PARAM:
2297 hci_cc_le_set_scan_param(hdev, skb);
2298 break;
2299
2300 case HCI_OP_LE_SET_SCAN_ENABLE:
2301 hci_cc_le_set_scan_enable(hdev, skb);
2302 break;
2303
2304 case HCI_OP_LE_LTK_REPLY:
2305 hci_cc_le_ltk_reply(hdev, skb);
2306 break;
2307
2308 case HCI_OP_LE_LTK_NEG_REPLY:
2309 hci_cc_le_ltk_neg_reply(hdev, skb);
2310 break;
2311
2312 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2313 hci_cc_write_le_host_supported(hdev, skb);
2314 break;
2315
2316 default:
2317 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2318 break;
2319 }
2320
2321 if (ev->opcode != HCI_OP_NOP)
2322 del_timer(&hdev->cmd_timer);
2323
2324 if (ev->ncmd) {
2325 atomic_set(&hdev->cmd_cnt, 1);
2326 if (!skb_queue_empty(&hdev->cmd_q))
2327 queue_work(hdev->workqueue, &hdev->cmd_work);
2328 }
2329}
2330
2331static inline void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2332{
2333 struct hci_ev_cmd_status *ev = (void *) skb->data;
2334 __u16 opcode;
2335
2336 skb_pull(skb, sizeof(*ev));
2337
2338 opcode = __le16_to_cpu(ev->opcode);
2339
2340 switch (opcode) {
2341 case HCI_OP_INQUIRY:
2342 hci_cs_inquiry(hdev, ev->status);
2343 break;
2344
2345 case HCI_OP_CREATE_CONN:
2346 hci_cs_create_conn(hdev, ev->status);
2347 break;
2348
2349 case HCI_OP_ADD_SCO:
2350 hci_cs_add_sco(hdev, ev->status);
2351 break;
2352
2353 case HCI_OP_AUTH_REQUESTED:
2354 hci_cs_auth_requested(hdev, ev->status);
2355 break;
2356
2357 case HCI_OP_SET_CONN_ENCRYPT:
2358 hci_cs_set_conn_encrypt(hdev, ev->status);
2359 break;
2360
2361 case HCI_OP_REMOTE_NAME_REQ:
2362 hci_cs_remote_name_req(hdev, ev->status);
2363 break;
2364
2365 case HCI_OP_READ_REMOTE_FEATURES:
2366 hci_cs_read_remote_features(hdev, ev->status);
2367 break;
2368
2369 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2370 hci_cs_read_remote_ext_features(hdev, ev->status);
2371 break;
2372
2373 case HCI_OP_SETUP_SYNC_CONN:
2374 hci_cs_setup_sync_conn(hdev, ev->status);
2375 break;
2376
2377 case HCI_OP_SNIFF_MODE:
2378 hci_cs_sniff_mode(hdev, ev->status);
2379 break;
2380
2381 case HCI_OP_EXIT_SNIFF_MODE:
2382 hci_cs_exit_sniff_mode(hdev, ev->status);
2383 break;
2384
2385 case HCI_OP_DISCONNECT:
2386 hci_cs_disconnect(hdev, ev->status);
2387 break;
2388
2389 case HCI_OP_LE_CREATE_CONN:
2390 hci_cs_le_create_conn(hdev, ev->status);
2391 break;
2392
2393 case HCI_OP_LE_START_ENC:
2394 hci_cs_le_start_enc(hdev, ev->status);
2395 break;
2396
2397 default:
2398 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2399 break;
2400 }
2401
2402 if (ev->opcode != HCI_OP_NOP)
2403 del_timer(&hdev->cmd_timer);
2404
2405 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2406 atomic_set(&hdev->cmd_cnt, 1);
2407 if (!skb_queue_empty(&hdev->cmd_q))
2408 queue_work(hdev->workqueue, &hdev->cmd_work);
2409 }
2410}
2411
2412static inline void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2413{
2414 struct hci_ev_role_change *ev = (void *) skb->data;
2415 struct hci_conn *conn;
2416
2417 BT_DBG("%s status %d", hdev->name, ev->status);
2418
2419 hci_dev_lock(hdev);
2420
2421 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2422 if (conn) {
2423 if (!ev->status) {
2424 if (ev->role)
2425 conn->link_mode &= ~HCI_LM_MASTER;
2426 else
2427 conn->link_mode |= HCI_LM_MASTER;
2428 }
2429
2430 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2431
2432 hci_role_switch_cfm(conn, ev->status, ev->role);
2433 }
2434
2435 hci_dev_unlock(hdev);
2436}
2437
2438static inline void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2439{
2440 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2441 int i;
2442
2443 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2444 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2445 return;
2446 }
2447
2448 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2449 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2450 BT_DBG("%s bad parameters", hdev->name);
2451 return;
2452 }
2453
2454 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2455
2456 for (i = 0; i < ev->num_hndl; i++) {
2457 struct hci_comp_pkts_info *info = &ev->handles[i];
2458 struct hci_conn *conn;
2459 __u16 handle, count;
2460
2461 handle = __le16_to_cpu(info->handle);
2462 count = __le16_to_cpu(info->count);
2463
2464 conn = hci_conn_hash_lookup_handle(hdev, handle);
2465 if (!conn)
2466 continue;
2467
2468 conn->sent -= count;
2469
2470 switch (conn->type) {
2471 case ACL_LINK:
2472 hdev->acl_cnt += count;
2473 if (hdev->acl_cnt > hdev->acl_pkts)
2474 hdev->acl_cnt = hdev->acl_pkts;
2475 break;
2476
2477 case LE_LINK:
2478 if (hdev->le_pkts) {
2479 hdev->le_cnt += count;
2480 if (hdev->le_cnt > hdev->le_pkts)
2481 hdev->le_cnt = hdev->le_pkts;
2482 } else {
2483 hdev->acl_cnt += count;
2484 if (hdev->acl_cnt > hdev->acl_pkts)
2485 hdev->acl_cnt = hdev->acl_pkts;
2486 }
2487 break;
2488
2489 case SCO_LINK:
2490 hdev->sco_cnt += count;
2491 if (hdev->sco_cnt > hdev->sco_pkts)
2492 hdev->sco_cnt = hdev->sco_pkts;
2493 break;
2494
2495 default:
2496 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2497 break;
2498 }
2499 }
2500
2501 queue_work(hdev->workqueue, &hdev->tx_work);
2502}
2503
2504static inline void hci_num_comp_blocks_evt(struct hci_dev *hdev,
2505 struct sk_buff *skb)
2506{
2507 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2508 int i;
2509
2510 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2511 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2512 return;
2513 }
2514
2515 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2516 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2517 BT_DBG("%s bad parameters", hdev->name);
2518 return;
2519 }
2520
2521 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2522 ev->num_hndl);
2523
2524 for (i = 0; i < ev->num_hndl; i++) {
2525 struct hci_comp_blocks_info *info = &ev->handles[i];
2526 struct hci_conn *conn;
2527 __u16 handle, block_count;
2528
2529 handle = __le16_to_cpu(info->handle);
2530 block_count = __le16_to_cpu(info->blocks);
2531
2532 conn = hci_conn_hash_lookup_handle(hdev, handle);
2533 if (!conn)
2534 continue;
2535
2536 conn->sent -= block_count;
2537
2538 switch (conn->type) {
2539 case ACL_LINK:
2540 hdev->block_cnt += block_count;
2541 if (hdev->block_cnt > hdev->num_blocks)
2542 hdev->block_cnt = hdev->num_blocks;
2543 break;
2544
2545 default:
2546 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2547 break;
2548 }
2549 }
2550
2551 queue_work(hdev->workqueue, &hdev->tx_work);
2552}
2553
2554static inline void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2555{
2556 struct hci_ev_mode_change *ev = (void *) skb->data;
2557 struct hci_conn *conn;
2558
2559 BT_DBG("%s status %d", hdev->name, ev->status);
2560
2561 hci_dev_lock(hdev);
2562
2563 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2564 if (conn) {
2565 conn->mode = ev->mode;
2566 conn->interval = __le16_to_cpu(ev->interval);
2567
2568 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
2569 if (conn->mode == HCI_CM_ACTIVE)
2570 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2571 else
2572 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2573 }
2574
2575 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2576 hci_sco_setup(conn, ev->status);
2577 }
2578
2579 hci_dev_unlock(hdev);
2580}
2581
2582static inline void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2583{
2584 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2585 struct hci_conn *conn;
2586
2587 BT_DBG("%s", hdev->name);
2588
2589 hci_dev_lock(hdev);
2590
2591 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2592 if (!conn)
2593 goto unlock;
2594
2595 if (conn->state == BT_CONNECTED) {
2596 hci_conn_hold(conn);
2597 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2598 hci_conn_put(conn);
2599 }
2600
2601 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2602 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2603 sizeof(ev->bdaddr), &ev->bdaddr);
2604 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2605 u8 secure;
2606
2607 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2608 secure = 1;
2609 else
2610 secure = 0;
2611
2612 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2613 }
2614
2615unlock:
2616 hci_dev_unlock(hdev);
2617}
2618
2619static inline void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2620{
2621 struct hci_ev_link_key_req *ev = (void *) skb->data;
2622 struct hci_cp_link_key_reply cp;
2623 struct hci_conn *conn;
2624 struct link_key *key;
2625
2626 BT_DBG("%s", hdev->name);
2627
2628 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2629 return;
2630
2631 hci_dev_lock(hdev);
2632
2633 key = hci_find_link_key(hdev, &ev->bdaddr);
2634 if (!key) {
2635 BT_DBG("%s link key not found for %s", hdev->name,
2636 batostr(&ev->bdaddr));
2637 goto not_found;
2638 }
2639
2640 BT_DBG("%s found key type %u for %s", hdev->name, key->type,
2641 batostr(&ev->bdaddr));
2642
2643 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2644 key->type == HCI_LK_DEBUG_COMBINATION) {
2645 BT_DBG("%s ignoring debug key", hdev->name);
2646 goto not_found;
2647 }
2648
2649 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2650 if (conn) {
2651 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2652 conn->auth_type != 0xff &&
2653 (conn->auth_type & 0x01)) {
2654 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2655 goto not_found;
2656 }
2657
2658 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2659 conn->pending_sec_level == BT_SECURITY_HIGH) {
2660 BT_DBG("%s ignoring key unauthenticated for high \
2661 security", hdev->name);
2662 goto not_found;
2663 }
2664
2665 conn->key_type = key->type;
2666 conn->pin_length = key->pin_len;
2667 }
2668
2669 bacpy(&cp.bdaddr, &ev->bdaddr);
2670 memcpy(cp.link_key, key->val, 16);
2671
2672 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2673
2674 hci_dev_unlock(hdev);
2675
2676 return;
2677
2678not_found:
2679 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2680 hci_dev_unlock(hdev);
2681}
2682
2683static inline void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2684{
2685 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2686 struct hci_conn *conn;
2687 u8 pin_len = 0;
2688
2689 BT_DBG("%s", hdev->name);
2690
2691 hci_dev_lock(hdev);
2692
2693 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2694 if (conn) {
2695 hci_conn_hold(conn);
2696 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2697 pin_len = conn->pin_length;
2698
2699 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2700 conn->key_type = ev->key_type;
2701
2702 hci_conn_put(conn);
2703 }
2704
2705 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2706 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2707 ev->key_type, pin_len);
2708
2709 hci_dev_unlock(hdev);
2710}
2711
2712static inline void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2713{
2714 struct hci_ev_clock_offset *ev = (void *) skb->data;
2715 struct hci_conn *conn;
2716
2717 BT_DBG("%s status %d", hdev->name, ev->status);
2718
2719 hci_dev_lock(hdev);
2720
2721 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2722 if (conn && !ev->status) {
2723 struct inquiry_entry *ie;
2724
2725 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2726 if (ie) {
2727 ie->data.clock_offset = ev->clock_offset;
2728 ie->timestamp = jiffies;
2729 }
2730 }
2731
2732 hci_dev_unlock(hdev);
2733}
2734
2735static inline void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2736{
2737 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
2738 struct hci_conn *conn;
2739
2740 BT_DBG("%s status %d", hdev->name, ev->status);
2741
2742 hci_dev_lock(hdev);
2743
2744 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2745 if (conn && !ev->status)
2746 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
2747
2748 hci_dev_unlock(hdev);
2749}
2750
2751static inline void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
2752{
2753 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
2754 struct inquiry_entry *ie;
2755
2756 BT_DBG("%s", hdev->name);
2757
2758 hci_dev_lock(hdev);
2759
2760 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2761 if (ie) {
2762 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
2763 ie->timestamp = jiffies;
2764 }
2765
2766 hci_dev_unlock(hdev);
2767}
2768
2769static inline void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, struct sk_buff *skb)
2770{
2771 struct inquiry_data data;
2772 int num_rsp = *((__u8 *) skb->data);
2773 bool name_known;
2774
2775 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2776
2777 if (!num_rsp)
2778 return;
2779
2780 hci_dev_lock(hdev);
2781
2782 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
2783 struct inquiry_info_with_rssi_and_pscan_mode *info;
2784 info = (void *) (skb->data + 1);
2785
2786 for (; num_rsp; num_rsp--, info++) {
2787 bacpy(&data.bdaddr, &info->bdaddr);
2788 data.pscan_rep_mode = info->pscan_rep_mode;
2789 data.pscan_period_mode = info->pscan_period_mode;
2790 data.pscan_mode = info->pscan_mode;
2791 memcpy(data.dev_class, info->dev_class, 3);
2792 data.clock_offset = info->clock_offset;
2793 data.rssi = info->rssi;
2794 data.ssp_mode = 0x00;
2795
2796 name_known = hci_inquiry_cache_update(hdev, &data,
2797 false);
2798 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2799 info->dev_class, info->rssi,
2800 !name_known, NULL, 0);
2801 }
2802 } else {
2803 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
2804
2805 for (; num_rsp; num_rsp--, info++) {
2806 bacpy(&data.bdaddr, &info->bdaddr);
2807 data.pscan_rep_mode = info->pscan_rep_mode;
2808 data.pscan_period_mode = info->pscan_period_mode;
2809 data.pscan_mode = 0x00;
2810 memcpy(data.dev_class, info->dev_class, 3);
2811 data.clock_offset = info->clock_offset;
2812 data.rssi = info->rssi;
2813 data.ssp_mode = 0x00;
2814 name_known = hci_inquiry_cache_update(hdev, &data,
2815 false);
2816 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2817 info->dev_class, info->rssi,
2818 !name_known, NULL, 0);
2819 }
2820 }
2821
2822 hci_dev_unlock(hdev);
2823}
2824
2825static inline void hci_remote_ext_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
2826{
2827 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
2828 struct hci_conn *conn;
2829
2830 BT_DBG("%s", hdev->name);
2831
2832 hci_dev_lock(hdev);
2833
2834 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2835 if (!conn)
2836 goto unlock;
2837
2838 if (!ev->status && ev->page == 0x01) {
2839 struct inquiry_entry *ie;
2840
2841 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
2842 if (ie)
2843 ie->data.ssp_mode = (ev->features[0] & 0x01);
2844
2845 if (ev->features[0] & 0x01)
2846 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
2847 }
2848
2849 if (conn->state != BT_CONFIG)
2850 goto unlock;
2851
2852 if (!ev->status) {
2853 struct hci_cp_remote_name_req cp;
2854 memset(&cp, 0, sizeof(cp));
2855 bacpy(&cp.bdaddr, &conn->dst);
2856 cp.pscan_rep_mode = 0x02;
2857 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2858 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2859 mgmt_device_connected(hdev, &conn->dst, conn->type,
2860 conn->dst_type, NULL, 0,
2861 conn->dev_class);
2862
2863 if (!hci_outgoing_auth_needed(hdev, conn)) {
2864 conn->state = BT_CONNECTED;
2865 hci_proto_connect_cfm(conn, ev->status);
2866 hci_conn_put(conn);
2867 }
2868
2869unlock:
2870 hci_dev_unlock(hdev);
2871}
2872
2873static inline void hci_sync_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2874{
2875 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
2876 struct hci_conn *conn;
2877
2878 BT_DBG("%s status %d", hdev->name, ev->status);
2879
2880 hci_dev_lock(hdev);
2881
2882 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
2883 if (!conn) {
2884 if (ev->link_type == ESCO_LINK)
2885 goto unlock;
2886
2887 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
2888 if (!conn)
2889 goto unlock;
2890
2891 conn->type = SCO_LINK;
2892 }
2893
2894 switch (ev->status) {
2895 case 0x00:
2896 conn->handle = __le16_to_cpu(ev->handle);
2897 conn->state = BT_CONNECTED;
2898
2899 hci_conn_hold_device(conn);
2900 hci_conn_add_sysfs(conn);
2901 break;
2902
2903 case 0x11: /* Unsupported Feature or Parameter Value */
2904 case 0x1c: /* SCO interval rejected */
2905 case 0x1a: /* Unsupported Remote Feature */
2906 case 0x1f: /* Unspecified error */
2907 if (conn->out && conn->attempt < 2) {
2908 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
2909 (hdev->esco_type & EDR_ESCO_MASK);
2910 hci_setup_sync(conn, conn->link->handle);
2911 goto unlock;
2912 }
2913 /* fall through */
2914
2915 default:
2916 conn->state = BT_CLOSED;
2917 break;
2918 }
2919
2920 hci_proto_connect_cfm(conn, ev->status);
2921 if (ev->status)
2922 hci_conn_del(conn);
2923
2924unlock:
2925 hci_dev_unlock(hdev);
2926}
2927
2928static inline void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
2929{
2930 BT_DBG("%s", hdev->name);
2931}
2932
2933static inline void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
2934{
2935 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
2936
2937 BT_DBG("%s status %d", hdev->name, ev->status);
2938}
2939
2940static inline void hci_extended_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
2941{
2942 struct inquiry_data data;
2943 struct extended_inquiry_info *info = (void *) (skb->data + 1);
2944 int num_rsp = *((__u8 *) skb->data);
2945
2946 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
2947
2948 if (!num_rsp)
2949 return;
2950
2951 hci_dev_lock(hdev);
2952
2953 for (; num_rsp; num_rsp--, info++) {
2954 bool name_known;
2955
2956 bacpy(&data.bdaddr, &info->bdaddr);
2957 data.pscan_rep_mode = info->pscan_rep_mode;
2958 data.pscan_period_mode = info->pscan_period_mode;
2959 data.pscan_mode = 0x00;
2960 memcpy(data.dev_class, info->dev_class, 3);
2961 data.clock_offset = info->clock_offset;
2962 data.rssi = info->rssi;
2963 data.ssp_mode = 0x01;
2964
2965 if (test_bit(HCI_MGMT, &hdev->dev_flags))
2966 name_known = eir_has_data_type(info->data,
2967 sizeof(info->data),
2968 EIR_NAME_COMPLETE);
2969 else
2970 name_known = true;
2971
2972 name_known = hci_inquiry_cache_update(hdev, &data, name_known);
2973 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
2974 info->dev_class, info->rssi,
2975 !name_known, info->data,
2976 sizeof(info->data));
2977 }
2978
2979 hci_dev_unlock(hdev);
2980}
2981
2982static inline u8 hci_get_auth_req(struct hci_conn *conn)
2983{
2984 /* If remote requests dedicated bonding follow that lead */
2985 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
2986 /* If both remote and local IO capabilities allow MITM
2987 * protection then require it, otherwise don't */
2988 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
2989 return 0x02;
2990 else
2991 return 0x03;
2992 }
2993
2994 /* If remote requests no-bonding follow that lead */
2995 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
2996 return conn->remote_auth | (conn->auth_type & 0x01);
2997
2998 return conn->auth_type;
2999}
3000
3001static inline void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3002{
3003 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3004 struct hci_conn *conn;
3005
3006 BT_DBG("%s", hdev->name);
3007
3008 hci_dev_lock(hdev);
3009
3010 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3011 if (!conn)
3012 goto unlock;
3013
3014 hci_conn_hold(conn);
3015
3016 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3017 goto unlock;
3018
3019 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3020 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3021 struct hci_cp_io_capability_reply cp;
3022
3023 bacpy(&cp.bdaddr, &ev->bdaddr);
3024 /* Change the IO capability from KeyboardDisplay
3025 * to DisplayYesNo as it is not supported by BT spec. */
3026 cp.capability = (conn->io_capability == 0x04) ?
3027 0x01 : conn->io_capability;
3028 conn->auth_type = hci_get_auth_req(conn);
3029 cp.authentication = conn->auth_type;
3030
3031 if ((conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)) &&
3032 hci_find_remote_oob_data(hdev, &conn->dst))
3033 cp.oob_data = 0x01;
3034 else
3035 cp.oob_data = 0x00;
3036
3037 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3038 sizeof(cp), &cp);
3039 } else {
3040 struct hci_cp_io_capability_neg_reply cp;
3041
3042 bacpy(&cp.bdaddr, &ev->bdaddr);
3043 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3044
3045 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3046 sizeof(cp), &cp);
3047 }
3048
3049unlock:
3050 hci_dev_unlock(hdev);
3051}
3052
3053static inline void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3054{
3055 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3056 struct hci_conn *conn;
3057
3058 BT_DBG("%s", hdev->name);
3059
3060 hci_dev_lock(hdev);
3061
3062 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3063 if (!conn)
3064 goto unlock;
3065
3066 conn->remote_cap = ev->capability;
3067 conn->remote_auth = ev->authentication;
3068 if (ev->oob_data)
3069 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3070
3071unlock:
3072 hci_dev_unlock(hdev);
3073}
3074
3075static inline void hci_user_confirm_request_evt(struct hci_dev *hdev,
3076 struct sk_buff *skb)
3077{
3078 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3079 int loc_mitm, rem_mitm, confirm_hint = 0;
3080 struct hci_conn *conn;
3081
3082 BT_DBG("%s", hdev->name);
3083
3084 hci_dev_lock(hdev);
3085
3086 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3087 goto unlock;
3088
3089 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3090 if (!conn)
3091 goto unlock;
3092
3093 loc_mitm = (conn->auth_type & 0x01);
3094 rem_mitm = (conn->remote_auth & 0x01);
3095
3096 /* If we require MITM but the remote device can't provide that
3097 * (it has NoInputNoOutput) then reject the confirmation
3098 * request. The only exception is when we're dedicated bonding
3099 * initiators (connect_cfm_cb set) since then we always have the MITM
3100 * bit set. */
3101 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3102 BT_DBG("Rejecting request: remote device can't provide MITM");
3103 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3104 sizeof(ev->bdaddr), &ev->bdaddr);
3105 goto unlock;
3106 }
3107
3108 /* If no side requires MITM protection; auto-accept */
3109 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3110 (!rem_mitm || conn->io_capability == 0x03)) {
3111
3112 /* If we're not the initiators request authorization to
3113 * proceed from user space (mgmt_user_confirm with
3114 * confirm_hint set to 1). */
3115 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3116 BT_DBG("Confirming auto-accept as acceptor");
3117 confirm_hint = 1;
3118 goto confirm;
3119 }
3120
3121 BT_DBG("Auto-accept of user confirmation with %ums delay",
3122 hdev->auto_accept_delay);
3123
3124 if (hdev->auto_accept_delay > 0) {
3125 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3126 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3127 goto unlock;
3128 }
3129
3130 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3131 sizeof(ev->bdaddr), &ev->bdaddr);
3132 goto unlock;
3133 }
3134
3135confirm:
3136 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3137 confirm_hint);
3138
3139unlock:
3140 hci_dev_unlock(hdev);
3141}
3142
3143static inline void hci_user_passkey_request_evt(struct hci_dev *hdev,
3144 struct sk_buff *skb)
3145{
3146 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3147
3148 BT_DBG("%s", hdev->name);
3149
3150 hci_dev_lock(hdev);
3151
3152 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3153 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3154
3155 hci_dev_unlock(hdev);
3156}
3157
3158static inline void hci_simple_pair_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3159{
3160 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3161 struct hci_conn *conn;
3162
3163 BT_DBG("%s", hdev->name);
3164
3165 hci_dev_lock(hdev);
3166
3167 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3168 if (!conn)
3169 goto unlock;
3170
3171 /* To avoid duplicate auth_failed events to user space we check
3172 * the HCI_CONN_AUTH_PEND flag which will be set if we
3173 * initiated the authentication. A traditional auth_complete
3174 * event gets always produced as initiator and is also mapped to
3175 * the mgmt_auth_failed event */
3176 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status != 0)
3177 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3178 ev->status);
3179
3180 hci_conn_put(conn);
3181
3182unlock:
3183 hci_dev_unlock(hdev);
3184}
3185
3186static inline void hci_remote_host_features_evt(struct hci_dev *hdev, struct sk_buff *skb)
3187{
3188 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3189 struct inquiry_entry *ie;
3190
3191 BT_DBG("%s", hdev->name);
3192
3193 hci_dev_lock(hdev);
3194
3195 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3196 if (ie)
3197 ie->data.ssp_mode = (ev->features[0] & 0x01);
3198
3199 hci_dev_unlock(hdev);
3200}
3201
3202static inline void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3203 struct sk_buff *skb)
3204{
3205 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3206 struct oob_data *data;
3207
3208 BT_DBG("%s", hdev->name);
3209
3210 hci_dev_lock(hdev);
3211
3212 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3213 goto unlock;
3214
3215 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3216 if (data) {
3217 struct hci_cp_remote_oob_data_reply cp;
3218
3219 bacpy(&cp.bdaddr, &ev->bdaddr);
3220 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3221 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3222
3223 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3224 &cp);
3225 } else {
3226 struct hci_cp_remote_oob_data_neg_reply cp;
3227
3228 bacpy(&cp.bdaddr, &ev->bdaddr);
3229 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3230 &cp);
3231 }
3232
3233unlock:
3234 hci_dev_unlock(hdev);
3235}
3236
3237static inline void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3238{
3239 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3240 struct hci_conn *conn;
3241
3242 BT_DBG("%s status %d", hdev->name, ev->status);
3243
3244 hci_dev_lock(hdev);
3245
3246 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &ev->bdaddr);
3247 if (!conn) {
3248 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3249 if (!conn) {
3250 BT_ERR("No memory for new connection");
3251 hci_dev_unlock(hdev);
3252 return;
3253 }
3254
3255 conn->dst_type = ev->bdaddr_type;
3256 }
3257
3258 if (ev->status) {
3259 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
3260 conn->dst_type, ev->status);
3261 hci_proto_connect_cfm(conn, ev->status);
3262 conn->state = BT_CLOSED;
3263 hci_conn_del(conn);
3264 goto unlock;
3265 }
3266
3267 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3268 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3269 conn->dst_type, NULL, 0, 0);
3270
3271 conn->sec_level = BT_SECURITY_LOW;
3272 conn->handle = __le16_to_cpu(ev->handle);
3273 conn->state = BT_CONNECTED;
3274
3275 hci_conn_hold_device(conn);
3276 hci_conn_add_sysfs(conn);
3277
3278 hci_proto_connect_cfm(conn, ev->status);
3279
3280unlock:
3281 hci_dev_unlock(hdev);
3282}
3283
3284static inline void hci_le_adv_report_evt(struct hci_dev *hdev,
3285 struct sk_buff *skb)
3286{
3287 u8 num_reports = skb->data[0];
3288 void *ptr = &skb->data[1];
3289 s8 rssi;
3290
3291 hci_dev_lock(hdev);
3292
3293 while (num_reports--) {
3294 struct hci_ev_le_advertising_info *ev = ptr;
3295
3296 hci_add_adv_entry(hdev, ev);
3297
3298 rssi = ev->data[ev->length];
3299 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3300 NULL, rssi, 0, ev->data, ev->length);
3301
3302 ptr += sizeof(*ev) + ev->length + 1;
3303 }
3304
3305 hci_dev_unlock(hdev);
3306}
3307
3308static inline void hci_le_ltk_request_evt(struct hci_dev *hdev,
3309 struct sk_buff *skb)
3310{
3311 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3312 struct hci_cp_le_ltk_reply cp;
3313 struct hci_cp_le_ltk_neg_reply neg;
3314 struct hci_conn *conn;
3315 struct smp_ltk *ltk;
3316
3317 BT_DBG("%s handle %d", hdev->name, cpu_to_le16(ev->handle));
3318
3319 hci_dev_lock(hdev);
3320
3321 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3322 if (conn == NULL)
3323 goto not_found;
3324
3325 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3326 if (ltk == NULL)
3327 goto not_found;
3328
3329 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3330 cp.handle = cpu_to_le16(conn->handle);
3331
3332 if (ltk->authenticated)
3333 conn->sec_level = BT_SECURITY_HIGH;
3334
3335 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3336
3337 if (ltk->type & HCI_SMP_STK) {
3338 list_del(&ltk->list);
3339 kfree(ltk);
3340 }
3341
3342 hci_dev_unlock(hdev);
3343
3344 return;
3345
3346not_found:
3347 neg.handle = ev->handle;
3348 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3349 hci_dev_unlock(hdev);
3350}
3351
3352static inline void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3353{
3354 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3355
3356 skb_pull(skb, sizeof(*le_ev));
3357
3358 switch (le_ev->subevent) {
3359 case HCI_EV_LE_CONN_COMPLETE:
3360 hci_le_conn_complete_evt(hdev, skb);
3361 break;
3362
3363 case HCI_EV_LE_ADVERTISING_REPORT:
3364 hci_le_adv_report_evt(hdev, skb);
3365 break;
3366
3367 case HCI_EV_LE_LTK_REQ:
3368 hci_le_ltk_request_evt(hdev, skb);
3369 break;
3370
3371 default:
3372 break;
3373 }
3374}
3375
3376void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3377{
3378 struct hci_event_hdr *hdr = (void *) skb->data;
3379 __u8 event = hdr->evt;
3380
3381 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3382
3383 switch (event) {
3384 case HCI_EV_INQUIRY_COMPLETE:
3385 hci_inquiry_complete_evt(hdev, skb);
3386 break;
3387
3388 case HCI_EV_INQUIRY_RESULT:
3389 hci_inquiry_result_evt(hdev, skb);
3390 break;
3391
3392 case HCI_EV_CONN_COMPLETE:
3393 hci_conn_complete_evt(hdev, skb);
3394 break;
3395
3396 case HCI_EV_CONN_REQUEST:
3397 hci_conn_request_evt(hdev, skb);
3398 break;
3399
3400 case HCI_EV_DISCONN_COMPLETE:
3401 hci_disconn_complete_evt(hdev, skb);
3402 break;
3403
3404 case HCI_EV_AUTH_COMPLETE:
3405 hci_auth_complete_evt(hdev, skb);
3406 break;
3407
3408 case HCI_EV_REMOTE_NAME:
3409 hci_remote_name_evt(hdev, skb);
3410 break;
3411
3412 case HCI_EV_ENCRYPT_CHANGE:
3413 hci_encrypt_change_evt(hdev, skb);
3414 break;
3415
3416 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3417 hci_change_link_key_complete_evt(hdev, skb);
3418 break;
3419
3420 case HCI_EV_REMOTE_FEATURES:
3421 hci_remote_features_evt(hdev, skb);
3422 break;
3423
3424 case HCI_EV_REMOTE_VERSION:
3425 hci_remote_version_evt(hdev, skb);
3426 break;
3427
3428 case HCI_EV_QOS_SETUP_COMPLETE:
3429 hci_qos_setup_complete_evt(hdev, skb);
3430 break;
3431
3432 case HCI_EV_CMD_COMPLETE:
3433 hci_cmd_complete_evt(hdev, skb);
3434 break;
3435
3436 case HCI_EV_CMD_STATUS:
3437 hci_cmd_status_evt(hdev, skb);
3438 break;
3439
3440 case HCI_EV_ROLE_CHANGE:
3441 hci_role_change_evt(hdev, skb);
3442 break;
3443
3444 case HCI_EV_NUM_COMP_PKTS:
3445 hci_num_comp_pkts_evt(hdev, skb);
3446 break;
3447
3448 case HCI_EV_MODE_CHANGE:
3449 hci_mode_change_evt(hdev, skb);
3450 break;
3451
3452 case HCI_EV_PIN_CODE_REQ:
3453 hci_pin_code_request_evt(hdev, skb);
3454 break;
3455
3456 case HCI_EV_LINK_KEY_REQ:
3457 hci_link_key_request_evt(hdev, skb);
3458 break;
3459
3460 case HCI_EV_LINK_KEY_NOTIFY:
3461 hci_link_key_notify_evt(hdev, skb);
3462 break;
3463
3464 case HCI_EV_CLOCK_OFFSET:
3465 hci_clock_offset_evt(hdev, skb);
3466 break;
3467
3468 case HCI_EV_PKT_TYPE_CHANGE:
3469 hci_pkt_type_change_evt(hdev, skb);
3470 break;
3471
3472 case HCI_EV_PSCAN_REP_MODE:
3473 hci_pscan_rep_mode_evt(hdev, skb);
3474 break;
3475
3476 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3477 hci_inquiry_result_with_rssi_evt(hdev, skb);
3478 break;
3479
3480 case HCI_EV_REMOTE_EXT_FEATURES:
3481 hci_remote_ext_features_evt(hdev, skb);
3482 break;
3483
3484 case HCI_EV_SYNC_CONN_COMPLETE:
3485 hci_sync_conn_complete_evt(hdev, skb);
3486 break;
3487
3488 case HCI_EV_SYNC_CONN_CHANGED:
3489 hci_sync_conn_changed_evt(hdev, skb);
3490 break;
3491
3492 case HCI_EV_SNIFF_SUBRATE:
3493 hci_sniff_subrate_evt(hdev, skb);
3494 break;
3495
3496 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3497 hci_extended_inquiry_result_evt(hdev, skb);
3498 break;
3499
3500 case HCI_EV_IO_CAPA_REQUEST:
3501 hci_io_capa_request_evt(hdev, skb);
3502 break;
3503
3504 case HCI_EV_IO_CAPA_REPLY:
3505 hci_io_capa_reply_evt(hdev, skb);
3506 break;
3507
3508 case HCI_EV_USER_CONFIRM_REQUEST:
3509 hci_user_confirm_request_evt(hdev, skb);
3510 break;
3511
3512 case HCI_EV_USER_PASSKEY_REQUEST:
3513 hci_user_passkey_request_evt(hdev, skb);
3514 break;
3515
3516 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3517 hci_simple_pair_complete_evt(hdev, skb);
3518 break;
3519
3520 case HCI_EV_REMOTE_HOST_FEATURES:
3521 hci_remote_host_features_evt(hdev, skb);
3522 break;
3523
3524 case HCI_EV_LE_META:
3525 hci_le_meta_evt(hdev, skb);
3526 break;
3527
3528 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3529 hci_remote_oob_data_request_evt(hdev, skb);
3530 break;
3531
3532 case HCI_EV_NUM_COMP_BLOCKS:
3533 hci_num_comp_blocks_evt(hdev, skb);
3534 break;
3535
3536 default:
3537 BT_DBG("%s event 0x%x", hdev->name, event);
3538 break;
3539 }
3540
3541 kfree_skb(skb);
3542 hdev->stat.evt_rx++;
3543}
3544
3545/* Generate internal stack event */
3546void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
3547{
3548 struct hci_event_hdr *hdr;
3549 struct hci_ev_stack_internal *ev;
3550 struct sk_buff *skb;
3551
3552 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
3553 if (!skb)
3554 return;
3555
3556 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
3557 hdr->evt = HCI_EV_STACK_INTERNAL;
3558 hdr->plen = sizeof(*ev) + dlen;
3559
3560 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
3561 ev->type = type;
3562 memcpy(ev->data, data, dlen);
3563
3564 bt_cb(skb)->incoming = 1;
3565 __net_timestamp(skb);
3566
3567 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
3568 skb->dev = (void *) hdev;
3569 hci_send_to_sock(hdev, skb, NULL);
3570 kfree_skb(skb);
3571}
3572
3573module_param(enable_le, bool, 0644);
3574MODULE_PARM_DESC(enable_le, "Enable LE support");
This page took 0.035679 seconds and 5 git commands to generate.