Bluetooth: Read adversiting channel TX power during init sequence
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/a2mp.h>
34 #include <net/bluetooth/amp.h>
35
36 /* Handle HCI Event packets */
37
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 __u8 status = *((__u8 *) skb->data);
41
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43
44 if (status) {
45 hci_dev_lock(hdev);
46 mgmt_stop_discovery_failed(hdev, status);
47 hci_dev_unlock(hdev);
48 return;
49 }
50
51 clear_bit(HCI_INQUIRY, &hdev->flags);
52
53 hci_dev_lock(hdev);
54 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
55 hci_dev_unlock(hdev);
56
57 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
58
59 hci_conn_check_pending(hdev);
60 }
61
62 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
63 {
64 __u8 status = *((__u8 *) skb->data);
65
66 BT_DBG("%s status 0x%2.2x", hdev->name, status);
67
68 if (status)
69 return;
70
71 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
72 }
73
74 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
75 {
76 __u8 status = *((__u8 *) skb->data);
77
78 BT_DBG("%s status 0x%2.2x", hdev->name, status);
79
80 if (status)
81 return;
82
83 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
84
85 hci_conn_check_pending(hdev);
86 }
87
88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
89 struct sk_buff *skb)
90 {
91 BT_DBG("%s", hdev->name);
92 }
93
94 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
95 {
96 struct hci_rp_role_discovery *rp = (void *) skb->data;
97 struct hci_conn *conn;
98
99 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
100
101 if (rp->status)
102 return;
103
104 hci_dev_lock(hdev);
105
106 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
107 if (conn) {
108 if (rp->role)
109 conn->link_mode &= ~HCI_LM_MASTER;
110 else
111 conn->link_mode |= HCI_LM_MASTER;
112 }
113
114 hci_dev_unlock(hdev);
115 }
116
117 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
118 {
119 struct hci_rp_read_link_policy *rp = (void *) skb->data;
120 struct hci_conn *conn;
121
122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123
124 if (rp->status)
125 return;
126
127 hci_dev_lock(hdev);
128
129 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 if (conn)
131 conn->link_policy = __le16_to_cpu(rp->policy);
132
133 hci_dev_unlock(hdev);
134 }
135
136 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 {
138 struct hci_rp_write_link_policy *rp = (void *) skb->data;
139 struct hci_conn *conn;
140 void *sent;
141
142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
143
144 if (rp->status)
145 return;
146
147 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
148 if (!sent)
149 return;
150
151 hci_dev_lock(hdev);
152
153 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
154 if (conn)
155 conn->link_policy = get_unaligned_le16(sent + 2);
156
157 hci_dev_unlock(hdev);
158 }
159
160 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
161 struct sk_buff *skb)
162 {
163 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
164
165 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
166
167 if (rp->status)
168 return;
169
170 hdev->link_policy = __le16_to_cpu(rp->policy);
171 }
172
173 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
174 struct sk_buff *skb)
175 {
176 __u8 status = *((__u8 *) skb->data);
177 void *sent;
178
179 BT_DBG("%s status 0x%2.2x", hdev->name, status);
180
181 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
182 if (!sent)
183 return;
184
185 if (!status)
186 hdev->link_policy = get_unaligned_le16(sent);
187
188 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
189 }
190
191 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
192 {
193 __u8 status = *((__u8 *) skb->data);
194
195 BT_DBG("%s status 0x%2.2x", hdev->name, status);
196
197 clear_bit(HCI_RESET, &hdev->flags);
198
199 hci_req_complete(hdev, HCI_OP_RESET, status);
200
201 /* Reset all non-persistent flags */
202 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
203 BIT(HCI_PERIODIC_INQ));
204
205 hdev->discovery.state = DISCOVERY_STOPPED;
206 }
207
208 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
209 {
210 __u8 status = *((__u8 *) skb->data);
211 void *sent;
212
213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
214
215 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
216 if (!sent)
217 return;
218
219 hci_dev_lock(hdev);
220
221 if (test_bit(HCI_MGMT, &hdev->dev_flags))
222 mgmt_set_local_name_complete(hdev, sent, status);
223 else if (!status)
224 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
225
226 hci_dev_unlock(hdev);
227
228 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
229 }
230
231 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
232 {
233 struct hci_rp_read_local_name *rp = (void *) skb->data;
234
235 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
236
237 if (rp->status)
238 return;
239
240 if (test_bit(HCI_SETUP, &hdev->dev_flags))
241 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
242 }
243
244 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
245 {
246 __u8 status = *((__u8 *) skb->data);
247 void *sent;
248
249 BT_DBG("%s status 0x%2.2x", hdev->name, status);
250
251 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
252 if (!sent)
253 return;
254
255 if (!status) {
256 __u8 param = *((__u8 *) sent);
257
258 if (param == AUTH_ENABLED)
259 set_bit(HCI_AUTH, &hdev->flags);
260 else
261 clear_bit(HCI_AUTH, &hdev->flags);
262 }
263
264 if (test_bit(HCI_MGMT, &hdev->dev_flags))
265 mgmt_auth_enable_complete(hdev, status);
266
267 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
268 }
269
270 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
271 {
272 __u8 status = *((__u8 *) skb->data);
273 void *sent;
274
275 BT_DBG("%s status 0x%2.2x", hdev->name, status);
276
277 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
278 if (!sent)
279 return;
280
281 if (!status) {
282 __u8 param = *((__u8 *) sent);
283
284 if (param)
285 set_bit(HCI_ENCRYPT, &hdev->flags);
286 else
287 clear_bit(HCI_ENCRYPT, &hdev->flags);
288 }
289
290 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
291 }
292
293 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
294 {
295 __u8 param, status = *((__u8 *) skb->data);
296 int old_pscan, old_iscan;
297 void *sent;
298
299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
300
301 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
302 if (!sent)
303 return;
304
305 param = *((__u8 *) sent);
306
307 hci_dev_lock(hdev);
308
309 if (status) {
310 mgmt_write_scan_failed(hdev, param, status);
311 hdev->discov_timeout = 0;
312 goto done;
313 }
314
315 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
316 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
317
318 if (param & SCAN_INQUIRY) {
319 set_bit(HCI_ISCAN, &hdev->flags);
320 if (!old_iscan)
321 mgmt_discoverable(hdev, 1);
322 if (hdev->discov_timeout > 0) {
323 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
324 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
325 to);
326 }
327 } else if (old_iscan)
328 mgmt_discoverable(hdev, 0);
329
330 if (param & SCAN_PAGE) {
331 set_bit(HCI_PSCAN, &hdev->flags);
332 if (!old_pscan)
333 mgmt_connectable(hdev, 1);
334 } else if (old_pscan)
335 mgmt_connectable(hdev, 0);
336
337 done:
338 hci_dev_unlock(hdev);
339 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
340 }
341
342 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
343 {
344 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
345
346 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
347
348 if (rp->status)
349 return;
350
351 memcpy(hdev->dev_class, rp->dev_class, 3);
352
353 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
354 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
355 }
356
357 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
358 {
359 __u8 status = *((__u8 *) skb->data);
360 void *sent;
361
362 BT_DBG("%s status 0x%2.2x", hdev->name, status);
363
364 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
365 if (!sent)
366 return;
367
368 hci_dev_lock(hdev);
369
370 if (status == 0)
371 memcpy(hdev->dev_class, sent, 3);
372
373 if (test_bit(HCI_MGMT, &hdev->dev_flags))
374 mgmt_set_class_of_dev_complete(hdev, sent, status);
375
376 hci_dev_unlock(hdev);
377 }
378
379 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
380 {
381 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
382 __u16 setting;
383
384 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
385
386 if (rp->status)
387 return;
388
389 setting = __le16_to_cpu(rp->voice_setting);
390
391 if (hdev->voice_setting == setting)
392 return;
393
394 hdev->voice_setting = setting;
395
396 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
397
398 if (hdev->notify)
399 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
400 }
401
402 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
403 struct sk_buff *skb)
404 {
405 __u8 status = *((__u8 *) skb->data);
406 __u16 setting;
407 void *sent;
408
409 BT_DBG("%s status 0x%2.2x", hdev->name, status);
410
411 if (status)
412 return;
413
414 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
415 if (!sent)
416 return;
417
418 setting = get_unaligned_le16(sent);
419
420 if (hdev->voice_setting == setting)
421 return;
422
423 hdev->voice_setting = setting;
424
425 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
426
427 if (hdev->notify)
428 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
429 }
430
431 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
432 {
433 __u8 status = *((__u8 *) skb->data);
434
435 BT_DBG("%s status 0x%2.2x", hdev->name, status);
436
437 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
438 }
439
440 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
441 {
442 __u8 status = *((__u8 *) skb->data);
443 void *sent;
444
445 BT_DBG("%s status 0x%2.2x", hdev->name, status);
446
447 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
448 if (!sent)
449 return;
450
451 if (test_bit(HCI_MGMT, &hdev->dev_flags))
452 mgmt_ssp_enable_complete(hdev, *((u8 *) sent), status);
453 else if (!status) {
454 if (*((u8 *) sent))
455 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
456 else
457 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
458 }
459 }
460
461 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
462 {
463 if (hdev->features[6] & LMP_EXT_INQ)
464 return 2;
465
466 if (hdev->features[3] & LMP_RSSI_INQ)
467 return 1;
468
469 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
470 hdev->lmp_subver == 0x0757)
471 return 1;
472
473 if (hdev->manufacturer == 15) {
474 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
475 return 1;
476 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
477 return 1;
478 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
479 return 1;
480 }
481
482 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
483 hdev->lmp_subver == 0x1805)
484 return 1;
485
486 return 0;
487 }
488
489 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
490 {
491 u8 mode;
492
493 mode = hci_get_inquiry_mode(hdev);
494
495 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
496 }
497
498 static void hci_setup_event_mask(struct hci_dev *hdev)
499 {
500 /* The second byte is 0xff instead of 0x9f (two reserved bits
501 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
502 * command otherwise */
503 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
504
505 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
506 * any event mask for pre 1.2 devices */
507 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
508 return;
509
510 if (lmp_bredr_capable(hdev)) {
511 events[4] |= 0x01; /* Flow Specification Complete */
512 events[4] |= 0x02; /* Inquiry Result with RSSI */
513 events[4] |= 0x04; /* Read Remote Extended Features Complete */
514 events[5] |= 0x08; /* Synchronous Connection Complete */
515 events[5] |= 0x10; /* Synchronous Connection Changed */
516 }
517
518 if (hdev->features[3] & LMP_RSSI_INQ)
519 events[4] |= 0x02; /* Inquiry Result with RSSI */
520
521 if (lmp_sniffsubr_capable(hdev))
522 events[5] |= 0x20; /* Sniff Subrating */
523
524 if (hdev->features[5] & LMP_PAUSE_ENC)
525 events[5] |= 0x80; /* Encryption Key Refresh Complete */
526
527 if (hdev->features[6] & LMP_EXT_INQ)
528 events[5] |= 0x40; /* Extended Inquiry Result */
529
530 if (lmp_no_flush_capable(hdev))
531 events[7] |= 0x01; /* Enhanced Flush Complete */
532
533 if (hdev->features[7] & LMP_LSTO)
534 events[6] |= 0x80; /* Link Supervision Timeout Changed */
535
536 if (lmp_ssp_capable(hdev)) {
537 events[6] |= 0x01; /* IO Capability Request */
538 events[6] |= 0x02; /* IO Capability Response */
539 events[6] |= 0x04; /* User Confirmation Request */
540 events[6] |= 0x08; /* User Passkey Request */
541 events[6] |= 0x10; /* Remote OOB Data Request */
542 events[6] |= 0x20; /* Simple Pairing Complete */
543 events[7] |= 0x04; /* User Passkey Notification */
544 events[7] |= 0x08; /* Keypress Notification */
545 events[7] |= 0x10; /* Remote Host Supported
546 * Features Notification */
547 }
548
549 if (lmp_le_capable(hdev))
550 events[7] |= 0x20; /* LE Meta-Event */
551
552 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
553
554 if (lmp_le_capable(hdev)) {
555 memset(events, 0, sizeof(events));
556 events[0] = 0x1f;
557 hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
558 sizeof(events), events);
559 }
560 }
561
562 static void bredr_init(struct hci_dev *hdev)
563 {
564 struct hci_cp_delete_stored_link_key cp;
565 __le16 param;
566 __u8 flt_type;
567
568 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
569 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
570
571 /* Read Class of Device */
572 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
573
574 /* Read Local Name */
575 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
576
577 /* Read Voice Setting */
578 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
579
580 /* Clear Event Filters */
581 flt_type = HCI_FLT_CLEAR_ALL;
582 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
583
584 /* Connection accept timeout ~20 secs */
585 param = __constant_cpu_to_le16(0x7d00);
586 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
587
588 bacpy(&cp.bdaddr, BDADDR_ANY);
589 cp.delete_all = 1;
590 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
591 }
592
593 static void le_init(struct hci_dev *hdev)
594 {
595 /* Read LE Buffer Size */
596 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
597
598 /* Read LE Advertising Channel TX Power */
599 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
600 }
601
602 static void hci_setup(struct hci_dev *hdev)
603 {
604 if (hdev->dev_type != HCI_BREDR)
605 return;
606
607 /* Read BD Address */
608 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
609
610 if (lmp_bredr_capable(hdev))
611 bredr_init(hdev);
612
613 if (lmp_le_capable(hdev))
614 le_init(hdev);
615
616 hci_setup_event_mask(hdev);
617
618 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
619 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
620
621 if (lmp_ssp_capable(hdev)) {
622 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
623 u8 mode = 0x01;
624 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
625 sizeof(mode), &mode);
626 } else {
627 struct hci_cp_write_eir cp;
628
629 memset(hdev->eir, 0, sizeof(hdev->eir));
630 memset(&cp, 0, sizeof(cp));
631
632 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
633 }
634 }
635
636 if (hdev->features[3] & LMP_RSSI_INQ)
637 hci_setup_inquiry_mode(hdev);
638
639 if (hdev->features[7] & LMP_INQ_TX_PWR)
640 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
641
642 if (hdev->features[7] & LMP_EXTFEATURES) {
643 struct hci_cp_read_local_ext_features cp;
644
645 cp.page = 0x01;
646 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
647 &cp);
648 }
649
650 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
651 u8 enable = 1;
652 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
653 &enable);
654 }
655 }
656
657 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
658 {
659 struct hci_rp_read_local_version *rp = (void *) skb->data;
660
661 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
662
663 if (rp->status)
664 goto done;
665
666 hdev->hci_ver = rp->hci_ver;
667 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
668 hdev->lmp_ver = rp->lmp_ver;
669 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
670 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
671
672 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
673 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
674
675 if (test_bit(HCI_INIT, &hdev->flags))
676 hci_setup(hdev);
677
678 done:
679 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
680 }
681
682 static void hci_setup_link_policy(struct hci_dev *hdev)
683 {
684 struct hci_cp_write_def_link_policy cp;
685 u16 link_policy = 0;
686
687 if (lmp_rswitch_capable(hdev))
688 link_policy |= HCI_LP_RSWITCH;
689 if (hdev->features[0] & LMP_HOLD)
690 link_policy |= HCI_LP_HOLD;
691 if (lmp_sniff_capable(hdev))
692 link_policy |= HCI_LP_SNIFF;
693 if (hdev->features[1] & LMP_PARK)
694 link_policy |= HCI_LP_PARK;
695
696 cp.policy = cpu_to_le16(link_policy);
697 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
698 }
699
700 static void hci_cc_read_local_commands(struct hci_dev *hdev,
701 struct sk_buff *skb)
702 {
703 struct hci_rp_read_local_commands *rp = (void *) skb->data;
704
705 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
706
707 if (rp->status)
708 goto done;
709
710 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
711
712 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
713 hci_setup_link_policy(hdev);
714
715 done:
716 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
717 }
718
719 static void hci_cc_read_local_features(struct hci_dev *hdev,
720 struct sk_buff *skb)
721 {
722 struct hci_rp_read_local_features *rp = (void *) skb->data;
723
724 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
725
726 if (rp->status)
727 return;
728
729 memcpy(hdev->features, rp->features, 8);
730
731 /* Adjust default settings according to features
732 * supported by device. */
733
734 if (hdev->features[0] & LMP_3SLOT)
735 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
736
737 if (hdev->features[0] & LMP_5SLOT)
738 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
739
740 if (hdev->features[1] & LMP_HV2) {
741 hdev->pkt_type |= (HCI_HV2);
742 hdev->esco_type |= (ESCO_HV2);
743 }
744
745 if (hdev->features[1] & LMP_HV3) {
746 hdev->pkt_type |= (HCI_HV3);
747 hdev->esco_type |= (ESCO_HV3);
748 }
749
750 if (lmp_esco_capable(hdev))
751 hdev->esco_type |= (ESCO_EV3);
752
753 if (hdev->features[4] & LMP_EV4)
754 hdev->esco_type |= (ESCO_EV4);
755
756 if (hdev->features[4] & LMP_EV5)
757 hdev->esco_type |= (ESCO_EV5);
758
759 if (hdev->features[5] & LMP_EDR_ESCO_2M)
760 hdev->esco_type |= (ESCO_2EV3);
761
762 if (hdev->features[5] & LMP_EDR_ESCO_3M)
763 hdev->esco_type |= (ESCO_3EV3);
764
765 if (hdev->features[5] & LMP_EDR_3S_ESCO)
766 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
767
768 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
769 hdev->features[0], hdev->features[1],
770 hdev->features[2], hdev->features[3],
771 hdev->features[4], hdev->features[5],
772 hdev->features[6], hdev->features[7]);
773 }
774
775 static void hci_set_le_support(struct hci_dev *hdev)
776 {
777 struct hci_cp_write_le_host_supported cp;
778
779 memset(&cp, 0, sizeof(cp));
780
781 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
782 cp.le = 1;
783 cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
784 }
785
786 if (cp.le != !!(hdev->host_features[0] & LMP_HOST_LE))
787 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
788 &cp);
789 }
790
791 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
792 struct sk_buff *skb)
793 {
794 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
795
796 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
797
798 if (rp->status)
799 goto done;
800
801 switch (rp->page) {
802 case 0:
803 memcpy(hdev->features, rp->features, 8);
804 break;
805 case 1:
806 memcpy(hdev->host_features, rp->features, 8);
807 break;
808 }
809
810 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
811 hci_set_le_support(hdev);
812
813 done:
814 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
815 }
816
817 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
818 struct sk_buff *skb)
819 {
820 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
821
822 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
823
824 if (rp->status)
825 return;
826
827 hdev->flow_ctl_mode = rp->mode;
828
829 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
830 }
831
832 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
833 {
834 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
835
836 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
837
838 if (rp->status)
839 return;
840
841 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
842 hdev->sco_mtu = rp->sco_mtu;
843 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
844 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
845
846 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
847 hdev->sco_mtu = 64;
848 hdev->sco_pkts = 8;
849 }
850
851 hdev->acl_cnt = hdev->acl_pkts;
852 hdev->sco_cnt = hdev->sco_pkts;
853
854 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
855 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
856 }
857
858 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
859 {
860 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
861
862 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
863
864 if (!rp->status)
865 bacpy(&hdev->bdaddr, &rp->bdaddr);
866
867 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
868 }
869
870 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
871 struct sk_buff *skb)
872 {
873 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
874
875 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
876
877 if (rp->status)
878 return;
879
880 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
881 hdev->block_len = __le16_to_cpu(rp->block_len);
882 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
883
884 hdev->block_cnt = hdev->num_blocks;
885
886 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
887 hdev->block_cnt, hdev->block_len);
888
889 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
890 }
891
892 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
893 {
894 __u8 status = *((__u8 *) skb->data);
895
896 BT_DBG("%s status 0x%2.2x", hdev->name, status);
897
898 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
899 }
900
901 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
902 struct sk_buff *skb)
903 {
904 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
905
906 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
907
908 if (rp->status)
909 goto a2mp_rsp;
910
911 hdev->amp_status = rp->amp_status;
912 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
913 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
914 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
915 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
916 hdev->amp_type = rp->amp_type;
917 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
918 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
919 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
920 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
921
922 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
923
924 a2mp_rsp:
925 a2mp_send_getinfo_rsp(hdev);
926 }
927
928 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
929 struct sk_buff *skb)
930 {
931 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
932 struct amp_assoc *assoc = &hdev->loc_assoc;
933 size_t rem_len, frag_len;
934
935 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
936
937 if (rp->status)
938 goto a2mp_rsp;
939
940 frag_len = skb->len - sizeof(*rp);
941 rem_len = __le16_to_cpu(rp->rem_len);
942
943 if (rem_len > frag_len) {
944 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
945
946 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
947 assoc->offset += frag_len;
948
949 /* Read other fragments */
950 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
951
952 return;
953 }
954
955 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
956 assoc->len = assoc->offset + rem_len;
957 assoc->offset = 0;
958
959 a2mp_rsp:
960 /* Send A2MP Rsp when all fragments are received */
961 a2mp_send_getampassoc_rsp(hdev, rp->status);
962 a2mp_send_create_phy_link_req(hdev, rp->status);
963 }
964
965 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
966 struct sk_buff *skb)
967 {
968 __u8 status = *((__u8 *) skb->data);
969
970 BT_DBG("%s status 0x%2.2x", hdev->name, status);
971
972 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
973 }
974
975 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
976 {
977 __u8 status = *((__u8 *) skb->data);
978
979 BT_DBG("%s status 0x%2.2x", hdev->name, status);
980
981 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
982 }
983
984 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
985 struct sk_buff *skb)
986 {
987 __u8 status = *((__u8 *) skb->data);
988
989 BT_DBG("%s status 0x%2.2x", hdev->name, status);
990
991 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
992 }
993
994 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
995 struct sk_buff *skb)
996 {
997 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
998
999 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1000
1001 if (!rp->status)
1002 hdev->inq_tx_power = rp->tx_power;
1003
1004 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
1005 }
1006
1007 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
1008 {
1009 __u8 status = *((__u8 *) skb->data);
1010
1011 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1012
1013 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
1014 }
1015
1016 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
1017 {
1018 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
1019 struct hci_cp_pin_code_reply *cp;
1020 struct hci_conn *conn;
1021
1022 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1023
1024 hci_dev_lock(hdev);
1025
1026 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1027 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1028
1029 if (rp->status)
1030 goto unlock;
1031
1032 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1033 if (!cp)
1034 goto unlock;
1035
1036 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1037 if (conn)
1038 conn->pin_length = cp->pin_len;
1039
1040 unlock:
1041 hci_dev_unlock(hdev);
1042 }
1043
1044 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1045 {
1046 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1047
1048 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1049
1050 hci_dev_lock(hdev);
1051
1052 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1053 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1054 rp->status);
1055
1056 hci_dev_unlock(hdev);
1057 }
1058
1059 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1060 struct sk_buff *skb)
1061 {
1062 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1063
1064 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1065
1066 if (rp->status)
1067 return;
1068
1069 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1070 hdev->le_pkts = rp->le_max_pkt;
1071
1072 hdev->le_cnt = hdev->le_pkts;
1073
1074 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1075
1076 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
1077 }
1078
1079 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1080 struct sk_buff *skb)
1081 {
1082 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1083
1084 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1085
1086 if (!rp->status)
1087 hdev->adv_tx_power = rp->tx_power;
1088
1089 hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
1090 }
1091
1092 static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
1093 {
1094 __u8 status = *((__u8 *) skb->data);
1095
1096 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1097
1098 hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
1099 }
1100
1101 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1102 {
1103 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1104
1105 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1106
1107 hci_dev_lock(hdev);
1108
1109 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1110 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1111 rp->status);
1112
1113 hci_dev_unlock(hdev);
1114 }
1115
1116 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1117 struct sk_buff *skb)
1118 {
1119 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1120
1121 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1122
1123 hci_dev_lock(hdev);
1124
1125 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1126 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1127 ACL_LINK, 0, rp->status);
1128
1129 hci_dev_unlock(hdev);
1130 }
1131
1132 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1133 {
1134 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1135
1136 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1137
1138 hci_dev_lock(hdev);
1139
1140 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1141 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1142 0, rp->status);
1143
1144 hci_dev_unlock(hdev);
1145 }
1146
1147 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1148 struct sk_buff *skb)
1149 {
1150 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1151
1152 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1153
1154 hci_dev_lock(hdev);
1155
1156 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1157 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1158 ACL_LINK, 0, rp->status);
1159
1160 hci_dev_unlock(hdev);
1161 }
1162
1163 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1164 struct sk_buff *skb)
1165 {
1166 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1167
1168 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1169
1170 hci_dev_lock(hdev);
1171 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1172 rp->randomizer, rp->status);
1173 hci_dev_unlock(hdev);
1174 }
1175
1176 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1177 {
1178 __u8 status = *((__u8 *) skb->data);
1179
1180 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1181
1182 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1183
1184 if (status) {
1185 hci_dev_lock(hdev);
1186 mgmt_start_discovery_failed(hdev, status);
1187 hci_dev_unlock(hdev);
1188 return;
1189 }
1190 }
1191
1192 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1193 struct sk_buff *skb)
1194 {
1195 struct hci_cp_le_set_scan_enable *cp;
1196 __u8 status = *((__u8 *) skb->data);
1197
1198 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1199
1200 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1201 if (!cp)
1202 return;
1203
1204 switch (cp->enable) {
1205 case LE_SCANNING_ENABLED:
1206 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1207
1208 if (status) {
1209 hci_dev_lock(hdev);
1210 mgmt_start_discovery_failed(hdev, status);
1211 hci_dev_unlock(hdev);
1212 return;
1213 }
1214
1215 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1216
1217 hci_dev_lock(hdev);
1218 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1219 hci_dev_unlock(hdev);
1220 break;
1221
1222 case LE_SCANNING_DISABLED:
1223 if (status) {
1224 hci_dev_lock(hdev);
1225 mgmt_stop_discovery_failed(hdev, status);
1226 hci_dev_unlock(hdev);
1227 return;
1228 }
1229
1230 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1231
1232 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1233 hdev->discovery.state == DISCOVERY_FINDING) {
1234 mgmt_interleaved_discovery(hdev);
1235 } else {
1236 hci_dev_lock(hdev);
1237 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1238 hci_dev_unlock(hdev);
1239 }
1240
1241 break;
1242
1243 default:
1244 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1245 break;
1246 }
1247 }
1248
1249 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1250 {
1251 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1252
1253 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1254
1255 if (rp->status)
1256 return;
1257
1258 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1259 }
1260
1261 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1262 {
1263 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1264
1265 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1266
1267 if (rp->status)
1268 return;
1269
1270 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1271 }
1272
1273 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1274 struct sk_buff *skb)
1275 {
1276 struct hci_cp_write_le_host_supported *sent;
1277 __u8 status = *((__u8 *) skb->data);
1278
1279 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1280
1281 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1282 if (!sent)
1283 return;
1284
1285 if (!status) {
1286 if (sent->le)
1287 hdev->host_features[0] |= LMP_HOST_LE;
1288 else
1289 hdev->host_features[0] &= ~LMP_HOST_LE;
1290 }
1291
1292 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1293 !test_bit(HCI_INIT, &hdev->flags))
1294 mgmt_le_enable_complete(hdev, sent->le, status);
1295
1296 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1297 }
1298
1299 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1300 struct sk_buff *skb)
1301 {
1302 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1303
1304 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1305 hdev->name, rp->status, rp->phy_handle);
1306
1307 if (rp->status)
1308 return;
1309
1310 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1311 }
1312
1313 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1314 {
1315 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1316
1317 if (status) {
1318 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1319 hci_conn_check_pending(hdev);
1320 hci_dev_lock(hdev);
1321 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1322 mgmt_start_discovery_failed(hdev, status);
1323 hci_dev_unlock(hdev);
1324 return;
1325 }
1326
1327 set_bit(HCI_INQUIRY, &hdev->flags);
1328
1329 hci_dev_lock(hdev);
1330 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1331 hci_dev_unlock(hdev);
1332 }
1333
1334 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1335 {
1336 struct hci_cp_create_conn *cp;
1337 struct hci_conn *conn;
1338
1339 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1340
1341 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1342 if (!cp)
1343 return;
1344
1345 hci_dev_lock(hdev);
1346
1347 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1348
1349 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1350
1351 if (status) {
1352 if (conn && conn->state == BT_CONNECT) {
1353 if (status != 0x0c || conn->attempt > 2) {
1354 conn->state = BT_CLOSED;
1355 hci_proto_connect_cfm(conn, status);
1356 hci_conn_del(conn);
1357 } else
1358 conn->state = BT_CONNECT2;
1359 }
1360 } else {
1361 if (!conn) {
1362 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1363 if (conn) {
1364 conn->out = true;
1365 conn->link_mode |= HCI_LM_MASTER;
1366 } else
1367 BT_ERR("No memory for new connection");
1368 }
1369 }
1370
1371 hci_dev_unlock(hdev);
1372 }
1373
1374 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1375 {
1376 struct hci_cp_add_sco *cp;
1377 struct hci_conn *acl, *sco;
1378 __u16 handle;
1379
1380 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1381
1382 if (!status)
1383 return;
1384
1385 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1386 if (!cp)
1387 return;
1388
1389 handle = __le16_to_cpu(cp->handle);
1390
1391 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1392
1393 hci_dev_lock(hdev);
1394
1395 acl = hci_conn_hash_lookup_handle(hdev, handle);
1396 if (acl) {
1397 sco = acl->link;
1398 if (sco) {
1399 sco->state = BT_CLOSED;
1400
1401 hci_proto_connect_cfm(sco, status);
1402 hci_conn_del(sco);
1403 }
1404 }
1405
1406 hci_dev_unlock(hdev);
1407 }
1408
1409 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1410 {
1411 struct hci_cp_auth_requested *cp;
1412 struct hci_conn *conn;
1413
1414 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1415
1416 if (!status)
1417 return;
1418
1419 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1420 if (!cp)
1421 return;
1422
1423 hci_dev_lock(hdev);
1424
1425 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1426 if (conn) {
1427 if (conn->state == BT_CONFIG) {
1428 hci_proto_connect_cfm(conn, status);
1429 hci_conn_put(conn);
1430 }
1431 }
1432
1433 hci_dev_unlock(hdev);
1434 }
1435
1436 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1437 {
1438 struct hci_cp_set_conn_encrypt *cp;
1439 struct hci_conn *conn;
1440
1441 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1442
1443 if (!status)
1444 return;
1445
1446 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1447 if (!cp)
1448 return;
1449
1450 hci_dev_lock(hdev);
1451
1452 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1453 if (conn) {
1454 if (conn->state == BT_CONFIG) {
1455 hci_proto_connect_cfm(conn, status);
1456 hci_conn_put(conn);
1457 }
1458 }
1459
1460 hci_dev_unlock(hdev);
1461 }
1462
1463 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1464 struct hci_conn *conn)
1465 {
1466 if (conn->state != BT_CONFIG || !conn->out)
1467 return 0;
1468
1469 if (conn->pending_sec_level == BT_SECURITY_SDP)
1470 return 0;
1471
1472 /* Only request authentication for SSP connections or non-SSP
1473 * devices with sec_level HIGH or if MITM protection is requested */
1474 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1475 conn->pending_sec_level != BT_SECURITY_HIGH)
1476 return 0;
1477
1478 return 1;
1479 }
1480
1481 static int hci_resolve_name(struct hci_dev *hdev,
1482 struct inquiry_entry *e)
1483 {
1484 struct hci_cp_remote_name_req cp;
1485
1486 memset(&cp, 0, sizeof(cp));
1487
1488 bacpy(&cp.bdaddr, &e->data.bdaddr);
1489 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1490 cp.pscan_mode = e->data.pscan_mode;
1491 cp.clock_offset = e->data.clock_offset;
1492
1493 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1494 }
1495
1496 static bool hci_resolve_next_name(struct hci_dev *hdev)
1497 {
1498 struct discovery_state *discov = &hdev->discovery;
1499 struct inquiry_entry *e;
1500
1501 if (list_empty(&discov->resolve))
1502 return false;
1503
1504 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1505 if (!e)
1506 return false;
1507
1508 if (hci_resolve_name(hdev, e) == 0) {
1509 e->name_state = NAME_PENDING;
1510 return true;
1511 }
1512
1513 return false;
1514 }
1515
1516 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1517 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1518 {
1519 struct discovery_state *discov = &hdev->discovery;
1520 struct inquiry_entry *e;
1521
1522 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1523 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1524 name_len, conn->dev_class);
1525
1526 if (discov->state == DISCOVERY_STOPPED)
1527 return;
1528
1529 if (discov->state == DISCOVERY_STOPPING)
1530 goto discov_complete;
1531
1532 if (discov->state != DISCOVERY_RESOLVING)
1533 return;
1534
1535 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1536 /* If the device was not found in a list of found devices names of which
1537 * are pending. there is no need to continue resolving a next name as it
1538 * will be done upon receiving another Remote Name Request Complete
1539 * Event */
1540 if (!e)
1541 return;
1542
1543 list_del(&e->list);
1544 if (name) {
1545 e->name_state = NAME_KNOWN;
1546 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1547 e->data.rssi, name, name_len);
1548 } else {
1549 e->name_state = NAME_NOT_KNOWN;
1550 }
1551
1552 if (hci_resolve_next_name(hdev))
1553 return;
1554
1555 discov_complete:
1556 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1557 }
1558
1559 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1560 {
1561 struct hci_cp_remote_name_req *cp;
1562 struct hci_conn *conn;
1563
1564 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1565
1566 /* If successful wait for the name req complete event before
1567 * checking for the need to do authentication */
1568 if (!status)
1569 return;
1570
1571 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1572 if (!cp)
1573 return;
1574
1575 hci_dev_lock(hdev);
1576
1577 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1578
1579 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1580 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1581
1582 if (!conn)
1583 goto unlock;
1584
1585 if (!hci_outgoing_auth_needed(hdev, conn))
1586 goto unlock;
1587
1588 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1589 struct hci_cp_auth_requested cp;
1590 cp.handle = __cpu_to_le16(conn->handle);
1591 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1592 }
1593
1594 unlock:
1595 hci_dev_unlock(hdev);
1596 }
1597
1598 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1599 {
1600 struct hci_cp_read_remote_features *cp;
1601 struct hci_conn *conn;
1602
1603 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1604
1605 if (!status)
1606 return;
1607
1608 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1609 if (!cp)
1610 return;
1611
1612 hci_dev_lock(hdev);
1613
1614 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1615 if (conn) {
1616 if (conn->state == BT_CONFIG) {
1617 hci_proto_connect_cfm(conn, status);
1618 hci_conn_put(conn);
1619 }
1620 }
1621
1622 hci_dev_unlock(hdev);
1623 }
1624
1625 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1626 {
1627 struct hci_cp_read_remote_ext_features *cp;
1628 struct hci_conn *conn;
1629
1630 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1631
1632 if (!status)
1633 return;
1634
1635 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1636 if (!cp)
1637 return;
1638
1639 hci_dev_lock(hdev);
1640
1641 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1642 if (conn) {
1643 if (conn->state == BT_CONFIG) {
1644 hci_proto_connect_cfm(conn, status);
1645 hci_conn_put(conn);
1646 }
1647 }
1648
1649 hci_dev_unlock(hdev);
1650 }
1651
1652 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1653 {
1654 struct hci_cp_setup_sync_conn *cp;
1655 struct hci_conn *acl, *sco;
1656 __u16 handle;
1657
1658 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1659
1660 if (!status)
1661 return;
1662
1663 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1664 if (!cp)
1665 return;
1666
1667 handle = __le16_to_cpu(cp->handle);
1668
1669 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1670
1671 hci_dev_lock(hdev);
1672
1673 acl = hci_conn_hash_lookup_handle(hdev, handle);
1674 if (acl) {
1675 sco = acl->link;
1676 if (sco) {
1677 sco->state = BT_CLOSED;
1678
1679 hci_proto_connect_cfm(sco, status);
1680 hci_conn_del(sco);
1681 }
1682 }
1683
1684 hci_dev_unlock(hdev);
1685 }
1686
1687 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1688 {
1689 struct hci_cp_sniff_mode *cp;
1690 struct hci_conn *conn;
1691
1692 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1693
1694 if (!status)
1695 return;
1696
1697 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1698 if (!cp)
1699 return;
1700
1701 hci_dev_lock(hdev);
1702
1703 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1704 if (conn) {
1705 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1706
1707 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1708 hci_sco_setup(conn, status);
1709 }
1710
1711 hci_dev_unlock(hdev);
1712 }
1713
1714 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1715 {
1716 struct hci_cp_exit_sniff_mode *cp;
1717 struct hci_conn *conn;
1718
1719 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1720
1721 if (!status)
1722 return;
1723
1724 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1725 if (!cp)
1726 return;
1727
1728 hci_dev_lock(hdev);
1729
1730 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1731 if (conn) {
1732 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1733
1734 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1735 hci_sco_setup(conn, status);
1736 }
1737
1738 hci_dev_unlock(hdev);
1739 }
1740
1741 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1742 {
1743 struct hci_cp_disconnect *cp;
1744 struct hci_conn *conn;
1745
1746 if (!status)
1747 return;
1748
1749 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1750 if (!cp)
1751 return;
1752
1753 hci_dev_lock(hdev);
1754
1755 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1756 if (conn)
1757 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1758 conn->dst_type, status);
1759
1760 hci_dev_unlock(hdev);
1761 }
1762
1763 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1764 {
1765 struct hci_conn *conn;
1766
1767 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1768
1769 if (status) {
1770 hci_dev_lock(hdev);
1771
1772 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1773 if (!conn) {
1774 hci_dev_unlock(hdev);
1775 return;
1776 }
1777
1778 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1779
1780 conn->state = BT_CLOSED;
1781 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1782 conn->dst_type, status);
1783 hci_proto_connect_cfm(conn, status);
1784 hci_conn_del(conn);
1785
1786 hci_dev_unlock(hdev);
1787 }
1788 }
1789
1790 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1791 {
1792 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1793 }
1794
1795 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1796 {
1797 struct hci_cp_create_phy_link *cp;
1798
1799 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1800
1801 if (status)
1802 return;
1803
1804 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1805 if (!cp)
1806 return;
1807
1808 amp_write_remote_assoc(hdev, cp->phy_handle);
1809 }
1810
1811 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1812 {
1813 struct hci_cp_accept_phy_link *cp;
1814
1815 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1816
1817 if (status)
1818 return;
1819
1820 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1821 if (!cp)
1822 return;
1823
1824 amp_write_remote_assoc(hdev, cp->phy_handle);
1825 }
1826
1827 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1828 {
1829 __u8 status = *((__u8 *) skb->data);
1830 struct discovery_state *discov = &hdev->discovery;
1831 struct inquiry_entry *e;
1832
1833 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1834
1835 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1836
1837 hci_conn_check_pending(hdev);
1838
1839 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1840 return;
1841
1842 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1843 return;
1844
1845 hci_dev_lock(hdev);
1846
1847 if (discov->state != DISCOVERY_FINDING)
1848 goto unlock;
1849
1850 if (list_empty(&discov->resolve)) {
1851 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1852 goto unlock;
1853 }
1854
1855 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1856 if (e && hci_resolve_name(hdev, e) == 0) {
1857 e->name_state = NAME_PENDING;
1858 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1859 } else {
1860 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1861 }
1862
1863 unlock:
1864 hci_dev_unlock(hdev);
1865 }
1866
1867 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1868 {
1869 struct inquiry_data data;
1870 struct inquiry_info *info = (void *) (skb->data + 1);
1871 int num_rsp = *((__u8 *) skb->data);
1872
1873 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1874
1875 if (!num_rsp)
1876 return;
1877
1878 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1879 return;
1880
1881 hci_dev_lock(hdev);
1882
1883 for (; num_rsp; num_rsp--, info++) {
1884 bool name_known, ssp;
1885
1886 bacpy(&data.bdaddr, &info->bdaddr);
1887 data.pscan_rep_mode = info->pscan_rep_mode;
1888 data.pscan_period_mode = info->pscan_period_mode;
1889 data.pscan_mode = info->pscan_mode;
1890 memcpy(data.dev_class, info->dev_class, 3);
1891 data.clock_offset = info->clock_offset;
1892 data.rssi = 0x00;
1893 data.ssp_mode = 0x00;
1894
1895 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1896 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1897 info->dev_class, 0, !name_known, ssp, NULL,
1898 0);
1899 }
1900
1901 hci_dev_unlock(hdev);
1902 }
1903
1904 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1905 {
1906 struct hci_ev_conn_complete *ev = (void *) skb->data;
1907 struct hci_conn *conn;
1908
1909 BT_DBG("%s", hdev->name);
1910
1911 hci_dev_lock(hdev);
1912
1913 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1914 if (!conn) {
1915 if (ev->link_type != SCO_LINK)
1916 goto unlock;
1917
1918 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1919 if (!conn)
1920 goto unlock;
1921
1922 conn->type = SCO_LINK;
1923 }
1924
1925 if (!ev->status) {
1926 conn->handle = __le16_to_cpu(ev->handle);
1927
1928 if (conn->type == ACL_LINK) {
1929 conn->state = BT_CONFIG;
1930 hci_conn_hold(conn);
1931
1932 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1933 !hci_find_link_key(hdev, &ev->bdaddr))
1934 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1935 else
1936 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1937 } else
1938 conn->state = BT_CONNECTED;
1939
1940 hci_conn_hold_device(conn);
1941 hci_conn_add_sysfs(conn);
1942
1943 if (test_bit(HCI_AUTH, &hdev->flags))
1944 conn->link_mode |= HCI_LM_AUTH;
1945
1946 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1947 conn->link_mode |= HCI_LM_ENCRYPT;
1948
1949 /* Get remote features */
1950 if (conn->type == ACL_LINK) {
1951 struct hci_cp_read_remote_features cp;
1952 cp.handle = ev->handle;
1953 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1954 sizeof(cp), &cp);
1955 }
1956
1957 /* Set packet type for incoming connection */
1958 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1959 struct hci_cp_change_conn_ptype cp;
1960 cp.handle = ev->handle;
1961 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1962 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1963 &cp);
1964 }
1965 } else {
1966 conn->state = BT_CLOSED;
1967 if (conn->type == ACL_LINK)
1968 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1969 conn->dst_type, ev->status);
1970 }
1971
1972 if (conn->type == ACL_LINK)
1973 hci_sco_setup(conn, ev->status);
1974
1975 if (ev->status) {
1976 hci_proto_connect_cfm(conn, ev->status);
1977 hci_conn_del(conn);
1978 } else if (ev->link_type != ACL_LINK)
1979 hci_proto_connect_cfm(conn, ev->status);
1980
1981 unlock:
1982 hci_dev_unlock(hdev);
1983
1984 hci_conn_check_pending(hdev);
1985 }
1986
1987 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
1988 {
1989 struct hci_ev_conn_request *ev = (void *) skb->data;
1990 int mask = hdev->link_mode;
1991
1992 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
1993 ev->link_type);
1994
1995 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
1996
1997 if ((mask & HCI_LM_ACCEPT) &&
1998 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
1999 /* Connection accepted */
2000 struct inquiry_entry *ie;
2001 struct hci_conn *conn;
2002
2003 hci_dev_lock(hdev);
2004
2005 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2006 if (ie)
2007 memcpy(ie->data.dev_class, ev->dev_class, 3);
2008
2009 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2010 &ev->bdaddr);
2011 if (!conn) {
2012 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2013 if (!conn) {
2014 BT_ERR("No memory for new connection");
2015 hci_dev_unlock(hdev);
2016 return;
2017 }
2018 }
2019
2020 memcpy(conn->dev_class, ev->dev_class, 3);
2021 conn->state = BT_CONNECT;
2022
2023 hci_dev_unlock(hdev);
2024
2025 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
2026 struct hci_cp_accept_conn_req cp;
2027
2028 bacpy(&cp.bdaddr, &ev->bdaddr);
2029
2030 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2031 cp.role = 0x00; /* Become master */
2032 else
2033 cp.role = 0x01; /* Remain slave */
2034
2035 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2036 &cp);
2037 } else {
2038 struct hci_cp_accept_sync_conn_req cp;
2039
2040 bacpy(&cp.bdaddr, &ev->bdaddr);
2041 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2042
2043 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2044 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2045 cp.max_latency = __constant_cpu_to_le16(0xffff);
2046 cp.content_format = cpu_to_le16(hdev->voice_setting);
2047 cp.retrans_effort = 0xff;
2048
2049 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2050 sizeof(cp), &cp);
2051 }
2052 } else {
2053 /* Connection rejected */
2054 struct hci_cp_reject_conn_req cp;
2055
2056 bacpy(&cp.bdaddr, &ev->bdaddr);
2057 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2058 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2059 }
2060 }
2061
2062 static u8 hci_to_mgmt_reason(u8 err)
2063 {
2064 switch (err) {
2065 case HCI_ERROR_CONNECTION_TIMEOUT:
2066 return MGMT_DEV_DISCONN_TIMEOUT;
2067 case HCI_ERROR_REMOTE_USER_TERM:
2068 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2069 case HCI_ERROR_REMOTE_POWER_OFF:
2070 return MGMT_DEV_DISCONN_REMOTE;
2071 case HCI_ERROR_LOCAL_HOST_TERM:
2072 return MGMT_DEV_DISCONN_LOCAL_HOST;
2073 default:
2074 return MGMT_DEV_DISCONN_UNKNOWN;
2075 }
2076 }
2077
2078 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2079 {
2080 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2081 struct hci_conn *conn;
2082
2083 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2084
2085 hci_dev_lock(hdev);
2086
2087 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2088 if (!conn)
2089 goto unlock;
2090
2091 if (ev->status == 0)
2092 conn->state = BT_CLOSED;
2093
2094 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
2095 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
2096 if (ev->status) {
2097 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2098 conn->dst_type, ev->status);
2099 } else {
2100 u8 reason = hci_to_mgmt_reason(ev->reason);
2101
2102 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
2103 conn->dst_type, reason);
2104 }
2105 }
2106
2107 if (ev->status == 0) {
2108 if (conn->type == ACL_LINK && conn->flush_key)
2109 hci_remove_link_key(hdev, &conn->dst);
2110 hci_proto_disconn_cfm(conn, ev->reason);
2111 hci_conn_del(conn);
2112 }
2113
2114 unlock:
2115 hci_dev_unlock(hdev);
2116 }
2117
2118 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2119 {
2120 struct hci_ev_auth_complete *ev = (void *) skb->data;
2121 struct hci_conn *conn;
2122
2123 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2124
2125 hci_dev_lock(hdev);
2126
2127 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2128 if (!conn)
2129 goto unlock;
2130
2131 if (!ev->status) {
2132 if (!hci_conn_ssp_enabled(conn) &&
2133 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2134 BT_INFO("re-auth of legacy device is not possible.");
2135 } else {
2136 conn->link_mode |= HCI_LM_AUTH;
2137 conn->sec_level = conn->pending_sec_level;
2138 }
2139 } else {
2140 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2141 ev->status);
2142 }
2143
2144 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2145 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2146
2147 if (conn->state == BT_CONFIG) {
2148 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2149 struct hci_cp_set_conn_encrypt cp;
2150 cp.handle = ev->handle;
2151 cp.encrypt = 0x01;
2152 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2153 &cp);
2154 } else {
2155 conn->state = BT_CONNECTED;
2156 hci_proto_connect_cfm(conn, ev->status);
2157 hci_conn_put(conn);
2158 }
2159 } else {
2160 hci_auth_cfm(conn, ev->status);
2161
2162 hci_conn_hold(conn);
2163 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2164 hci_conn_put(conn);
2165 }
2166
2167 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2168 if (!ev->status) {
2169 struct hci_cp_set_conn_encrypt cp;
2170 cp.handle = ev->handle;
2171 cp.encrypt = 0x01;
2172 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2173 &cp);
2174 } else {
2175 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2176 hci_encrypt_cfm(conn, ev->status, 0x00);
2177 }
2178 }
2179
2180 unlock:
2181 hci_dev_unlock(hdev);
2182 }
2183
2184 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2185 {
2186 struct hci_ev_remote_name *ev = (void *) skb->data;
2187 struct hci_conn *conn;
2188
2189 BT_DBG("%s", hdev->name);
2190
2191 hci_conn_check_pending(hdev);
2192
2193 hci_dev_lock(hdev);
2194
2195 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2196
2197 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2198 goto check_auth;
2199
2200 if (ev->status == 0)
2201 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2202 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2203 else
2204 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2205
2206 check_auth:
2207 if (!conn)
2208 goto unlock;
2209
2210 if (!hci_outgoing_auth_needed(hdev, conn))
2211 goto unlock;
2212
2213 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2214 struct hci_cp_auth_requested cp;
2215 cp.handle = __cpu_to_le16(conn->handle);
2216 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2217 }
2218
2219 unlock:
2220 hci_dev_unlock(hdev);
2221 }
2222
2223 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2224 {
2225 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2226 struct hci_conn *conn;
2227
2228 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2229
2230 hci_dev_lock(hdev);
2231
2232 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2233 if (conn) {
2234 if (!ev->status) {
2235 if (ev->encrypt) {
2236 /* Encryption implies authentication */
2237 conn->link_mode |= HCI_LM_AUTH;
2238 conn->link_mode |= HCI_LM_ENCRYPT;
2239 conn->sec_level = conn->pending_sec_level;
2240 } else
2241 conn->link_mode &= ~HCI_LM_ENCRYPT;
2242 }
2243
2244 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2245
2246 if (ev->status && conn->state == BT_CONNECTED) {
2247 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2248 hci_conn_put(conn);
2249 goto unlock;
2250 }
2251
2252 if (conn->state == BT_CONFIG) {
2253 if (!ev->status)
2254 conn->state = BT_CONNECTED;
2255
2256 hci_proto_connect_cfm(conn, ev->status);
2257 hci_conn_put(conn);
2258 } else
2259 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2260 }
2261
2262 unlock:
2263 hci_dev_unlock(hdev);
2264 }
2265
2266 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2267 struct sk_buff *skb)
2268 {
2269 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2270 struct hci_conn *conn;
2271
2272 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2273
2274 hci_dev_lock(hdev);
2275
2276 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2277 if (conn) {
2278 if (!ev->status)
2279 conn->link_mode |= HCI_LM_SECURE;
2280
2281 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2282
2283 hci_key_change_cfm(conn, ev->status);
2284 }
2285
2286 hci_dev_unlock(hdev);
2287 }
2288
2289 static void hci_remote_features_evt(struct hci_dev *hdev,
2290 struct sk_buff *skb)
2291 {
2292 struct hci_ev_remote_features *ev = (void *) skb->data;
2293 struct hci_conn *conn;
2294
2295 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2296
2297 hci_dev_lock(hdev);
2298
2299 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2300 if (!conn)
2301 goto unlock;
2302
2303 if (!ev->status)
2304 memcpy(conn->features, ev->features, 8);
2305
2306 if (conn->state != BT_CONFIG)
2307 goto unlock;
2308
2309 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2310 struct hci_cp_read_remote_ext_features cp;
2311 cp.handle = ev->handle;
2312 cp.page = 0x01;
2313 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2314 sizeof(cp), &cp);
2315 goto unlock;
2316 }
2317
2318 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2319 struct hci_cp_remote_name_req cp;
2320 memset(&cp, 0, sizeof(cp));
2321 bacpy(&cp.bdaddr, &conn->dst);
2322 cp.pscan_rep_mode = 0x02;
2323 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2324 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2325 mgmt_device_connected(hdev, &conn->dst, conn->type,
2326 conn->dst_type, 0, NULL, 0,
2327 conn->dev_class);
2328
2329 if (!hci_outgoing_auth_needed(hdev, conn)) {
2330 conn->state = BT_CONNECTED;
2331 hci_proto_connect_cfm(conn, ev->status);
2332 hci_conn_put(conn);
2333 }
2334
2335 unlock:
2336 hci_dev_unlock(hdev);
2337 }
2338
2339 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2340 {
2341 BT_DBG("%s", hdev->name);
2342 }
2343
2344 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2345 struct sk_buff *skb)
2346 {
2347 BT_DBG("%s", hdev->name);
2348 }
2349
2350 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2351 {
2352 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2353 __u16 opcode;
2354
2355 skb_pull(skb, sizeof(*ev));
2356
2357 opcode = __le16_to_cpu(ev->opcode);
2358
2359 switch (opcode) {
2360 case HCI_OP_INQUIRY_CANCEL:
2361 hci_cc_inquiry_cancel(hdev, skb);
2362 break;
2363
2364 case HCI_OP_PERIODIC_INQ:
2365 hci_cc_periodic_inq(hdev, skb);
2366 break;
2367
2368 case HCI_OP_EXIT_PERIODIC_INQ:
2369 hci_cc_exit_periodic_inq(hdev, skb);
2370 break;
2371
2372 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2373 hci_cc_remote_name_req_cancel(hdev, skb);
2374 break;
2375
2376 case HCI_OP_ROLE_DISCOVERY:
2377 hci_cc_role_discovery(hdev, skb);
2378 break;
2379
2380 case HCI_OP_READ_LINK_POLICY:
2381 hci_cc_read_link_policy(hdev, skb);
2382 break;
2383
2384 case HCI_OP_WRITE_LINK_POLICY:
2385 hci_cc_write_link_policy(hdev, skb);
2386 break;
2387
2388 case HCI_OP_READ_DEF_LINK_POLICY:
2389 hci_cc_read_def_link_policy(hdev, skb);
2390 break;
2391
2392 case HCI_OP_WRITE_DEF_LINK_POLICY:
2393 hci_cc_write_def_link_policy(hdev, skb);
2394 break;
2395
2396 case HCI_OP_RESET:
2397 hci_cc_reset(hdev, skb);
2398 break;
2399
2400 case HCI_OP_WRITE_LOCAL_NAME:
2401 hci_cc_write_local_name(hdev, skb);
2402 break;
2403
2404 case HCI_OP_READ_LOCAL_NAME:
2405 hci_cc_read_local_name(hdev, skb);
2406 break;
2407
2408 case HCI_OP_WRITE_AUTH_ENABLE:
2409 hci_cc_write_auth_enable(hdev, skb);
2410 break;
2411
2412 case HCI_OP_WRITE_ENCRYPT_MODE:
2413 hci_cc_write_encrypt_mode(hdev, skb);
2414 break;
2415
2416 case HCI_OP_WRITE_SCAN_ENABLE:
2417 hci_cc_write_scan_enable(hdev, skb);
2418 break;
2419
2420 case HCI_OP_READ_CLASS_OF_DEV:
2421 hci_cc_read_class_of_dev(hdev, skb);
2422 break;
2423
2424 case HCI_OP_WRITE_CLASS_OF_DEV:
2425 hci_cc_write_class_of_dev(hdev, skb);
2426 break;
2427
2428 case HCI_OP_READ_VOICE_SETTING:
2429 hci_cc_read_voice_setting(hdev, skb);
2430 break;
2431
2432 case HCI_OP_WRITE_VOICE_SETTING:
2433 hci_cc_write_voice_setting(hdev, skb);
2434 break;
2435
2436 case HCI_OP_HOST_BUFFER_SIZE:
2437 hci_cc_host_buffer_size(hdev, skb);
2438 break;
2439
2440 case HCI_OP_WRITE_SSP_MODE:
2441 hci_cc_write_ssp_mode(hdev, skb);
2442 break;
2443
2444 case HCI_OP_READ_LOCAL_VERSION:
2445 hci_cc_read_local_version(hdev, skb);
2446 break;
2447
2448 case HCI_OP_READ_LOCAL_COMMANDS:
2449 hci_cc_read_local_commands(hdev, skb);
2450 break;
2451
2452 case HCI_OP_READ_LOCAL_FEATURES:
2453 hci_cc_read_local_features(hdev, skb);
2454 break;
2455
2456 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2457 hci_cc_read_local_ext_features(hdev, skb);
2458 break;
2459
2460 case HCI_OP_READ_BUFFER_SIZE:
2461 hci_cc_read_buffer_size(hdev, skb);
2462 break;
2463
2464 case HCI_OP_READ_BD_ADDR:
2465 hci_cc_read_bd_addr(hdev, skb);
2466 break;
2467
2468 case HCI_OP_READ_DATA_BLOCK_SIZE:
2469 hci_cc_read_data_block_size(hdev, skb);
2470 break;
2471
2472 case HCI_OP_WRITE_CA_TIMEOUT:
2473 hci_cc_write_ca_timeout(hdev, skb);
2474 break;
2475
2476 case HCI_OP_READ_FLOW_CONTROL_MODE:
2477 hci_cc_read_flow_control_mode(hdev, skb);
2478 break;
2479
2480 case HCI_OP_READ_LOCAL_AMP_INFO:
2481 hci_cc_read_local_amp_info(hdev, skb);
2482 break;
2483
2484 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2485 hci_cc_read_local_amp_assoc(hdev, skb);
2486 break;
2487
2488 case HCI_OP_DELETE_STORED_LINK_KEY:
2489 hci_cc_delete_stored_link_key(hdev, skb);
2490 break;
2491
2492 case HCI_OP_SET_EVENT_MASK:
2493 hci_cc_set_event_mask(hdev, skb);
2494 break;
2495
2496 case HCI_OP_WRITE_INQUIRY_MODE:
2497 hci_cc_write_inquiry_mode(hdev, skb);
2498 break;
2499
2500 case HCI_OP_READ_INQ_RSP_TX_POWER:
2501 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2502 break;
2503
2504 case HCI_OP_SET_EVENT_FLT:
2505 hci_cc_set_event_flt(hdev, skb);
2506 break;
2507
2508 case HCI_OP_PIN_CODE_REPLY:
2509 hci_cc_pin_code_reply(hdev, skb);
2510 break;
2511
2512 case HCI_OP_PIN_CODE_NEG_REPLY:
2513 hci_cc_pin_code_neg_reply(hdev, skb);
2514 break;
2515
2516 case HCI_OP_READ_LOCAL_OOB_DATA:
2517 hci_cc_read_local_oob_data_reply(hdev, skb);
2518 break;
2519
2520 case HCI_OP_LE_READ_BUFFER_SIZE:
2521 hci_cc_le_read_buffer_size(hdev, skb);
2522 break;
2523
2524 case HCI_OP_LE_READ_ADV_TX_POWER:
2525 hci_cc_le_read_adv_tx_power(hdev, skb);
2526 break;
2527
2528 case HCI_OP_LE_SET_EVENT_MASK:
2529 hci_cc_le_set_event_mask(hdev, skb);
2530 break;
2531
2532 case HCI_OP_USER_CONFIRM_REPLY:
2533 hci_cc_user_confirm_reply(hdev, skb);
2534 break;
2535
2536 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2537 hci_cc_user_confirm_neg_reply(hdev, skb);
2538 break;
2539
2540 case HCI_OP_USER_PASSKEY_REPLY:
2541 hci_cc_user_passkey_reply(hdev, skb);
2542 break;
2543
2544 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2545 hci_cc_user_passkey_neg_reply(hdev, skb);
2546 break;
2547
2548 case HCI_OP_LE_SET_SCAN_PARAM:
2549 hci_cc_le_set_scan_param(hdev, skb);
2550 break;
2551
2552 case HCI_OP_LE_SET_SCAN_ENABLE:
2553 hci_cc_le_set_scan_enable(hdev, skb);
2554 break;
2555
2556 case HCI_OP_LE_LTK_REPLY:
2557 hci_cc_le_ltk_reply(hdev, skb);
2558 break;
2559
2560 case HCI_OP_LE_LTK_NEG_REPLY:
2561 hci_cc_le_ltk_neg_reply(hdev, skb);
2562 break;
2563
2564 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2565 hci_cc_write_le_host_supported(hdev, skb);
2566 break;
2567
2568 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2569 hci_cc_write_remote_amp_assoc(hdev, skb);
2570 break;
2571
2572 default:
2573 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2574 break;
2575 }
2576
2577 if (ev->opcode != HCI_OP_NOP)
2578 del_timer(&hdev->cmd_timer);
2579
2580 if (ev->ncmd) {
2581 atomic_set(&hdev->cmd_cnt, 1);
2582 if (!skb_queue_empty(&hdev->cmd_q))
2583 queue_work(hdev->workqueue, &hdev->cmd_work);
2584 }
2585 }
2586
2587 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2588 {
2589 struct hci_ev_cmd_status *ev = (void *) skb->data;
2590 __u16 opcode;
2591
2592 skb_pull(skb, sizeof(*ev));
2593
2594 opcode = __le16_to_cpu(ev->opcode);
2595
2596 switch (opcode) {
2597 case HCI_OP_INQUIRY:
2598 hci_cs_inquiry(hdev, ev->status);
2599 break;
2600
2601 case HCI_OP_CREATE_CONN:
2602 hci_cs_create_conn(hdev, ev->status);
2603 break;
2604
2605 case HCI_OP_ADD_SCO:
2606 hci_cs_add_sco(hdev, ev->status);
2607 break;
2608
2609 case HCI_OP_AUTH_REQUESTED:
2610 hci_cs_auth_requested(hdev, ev->status);
2611 break;
2612
2613 case HCI_OP_SET_CONN_ENCRYPT:
2614 hci_cs_set_conn_encrypt(hdev, ev->status);
2615 break;
2616
2617 case HCI_OP_REMOTE_NAME_REQ:
2618 hci_cs_remote_name_req(hdev, ev->status);
2619 break;
2620
2621 case HCI_OP_READ_REMOTE_FEATURES:
2622 hci_cs_read_remote_features(hdev, ev->status);
2623 break;
2624
2625 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2626 hci_cs_read_remote_ext_features(hdev, ev->status);
2627 break;
2628
2629 case HCI_OP_SETUP_SYNC_CONN:
2630 hci_cs_setup_sync_conn(hdev, ev->status);
2631 break;
2632
2633 case HCI_OP_SNIFF_MODE:
2634 hci_cs_sniff_mode(hdev, ev->status);
2635 break;
2636
2637 case HCI_OP_EXIT_SNIFF_MODE:
2638 hci_cs_exit_sniff_mode(hdev, ev->status);
2639 break;
2640
2641 case HCI_OP_DISCONNECT:
2642 hci_cs_disconnect(hdev, ev->status);
2643 break;
2644
2645 case HCI_OP_LE_CREATE_CONN:
2646 hci_cs_le_create_conn(hdev, ev->status);
2647 break;
2648
2649 case HCI_OP_LE_START_ENC:
2650 hci_cs_le_start_enc(hdev, ev->status);
2651 break;
2652
2653 case HCI_OP_CREATE_PHY_LINK:
2654 hci_cs_create_phylink(hdev, ev->status);
2655 break;
2656
2657 case HCI_OP_ACCEPT_PHY_LINK:
2658 hci_cs_accept_phylink(hdev, ev->status);
2659 break;
2660
2661 default:
2662 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2663 break;
2664 }
2665
2666 if (ev->opcode != HCI_OP_NOP)
2667 del_timer(&hdev->cmd_timer);
2668
2669 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2670 atomic_set(&hdev->cmd_cnt, 1);
2671 if (!skb_queue_empty(&hdev->cmd_q))
2672 queue_work(hdev->workqueue, &hdev->cmd_work);
2673 }
2674 }
2675
2676 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2677 {
2678 struct hci_ev_role_change *ev = (void *) skb->data;
2679 struct hci_conn *conn;
2680
2681 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2682
2683 hci_dev_lock(hdev);
2684
2685 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2686 if (conn) {
2687 if (!ev->status) {
2688 if (ev->role)
2689 conn->link_mode &= ~HCI_LM_MASTER;
2690 else
2691 conn->link_mode |= HCI_LM_MASTER;
2692 }
2693
2694 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2695
2696 hci_role_switch_cfm(conn, ev->status, ev->role);
2697 }
2698
2699 hci_dev_unlock(hdev);
2700 }
2701
2702 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2703 {
2704 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2705 int i;
2706
2707 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2708 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2709 return;
2710 }
2711
2712 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2713 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2714 BT_DBG("%s bad parameters", hdev->name);
2715 return;
2716 }
2717
2718 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2719
2720 for (i = 0; i < ev->num_hndl; i++) {
2721 struct hci_comp_pkts_info *info = &ev->handles[i];
2722 struct hci_conn *conn;
2723 __u16 handle, count;
2724
2725 handle = __le16_to_cpu(info->handle);
2726 count = __le16_to_cpu(info->count);
2727
2728 conn = hci_conn_hash_lookup_handle(hdev, handle);
2729 if (!conn)
2730 continue;
2731
2732 conn->sent -= count;
2733
2734 switch (conn->type) {
2735 case ACL_LINK:
2736 hdev->acl_cnt += count;
2737 if (hdev->acl_cnt > hdev->acl_pkts)
2738 hdev->acl_cnt = hdev->acl_pkts;
2739 break;
2740
2741 case LE_LINK:
2742 if (hdev->le_pkts) {
2743 hdev->le_cnt += count;
2744 if (hdev->le_cnt > hdev->le_pkts)
2745 hdev->le_cnt = hdev->le_pkts;
2746 } else {
2747 hdev->acl_cnt += count;
2748 if (hdev->acl_cnt > hdev->acl_pkts)
2749 hdev->acl_cnt = hdev->acl_pkts;
2750 }
2751 break;
2752
2753 case SCO_LINK:
2754 hdev->sco_cnt += count;
2755 if (hdev->sco_cnt > hdev->sco_pkts)
2756 hdev->sco_cnt = hdev->sco_pkts;
2757 break;
2758
2759 default:
2760 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2761 break;
2762 }
2763 }
2764
2765 queue_work(hdev->workqueue, &hdev->tx_work);
2766 }
2767
2768 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2769 __u16 handle)
2770 {
2771 struct hci_chan *chan;
2772
2773 switch (hdev->dev_type) {
2774 case HCI_BREDR:
2775 return hci_conn_hash_lookup_handle(hdev, handle);
2776 case HCI_AMP:
2777 chan = hci_chan_lookup_handle(hdev, handle);
2778 if (chan)
2779 return chan->conn;
2780 break;
2781 default:
2782 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2783 break;
2784 }
2785
2786 return NULL;
2787 }
2788
2789 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2790 {
2791 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2792 int i;
2793
2794 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2795 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2796 return;
2797 }
2798
2799 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2800 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2801 BT_DBG("%s bad parameters", hdev->name);
2802 return;
2803 }
2804
2805 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2806 ev->num_hndl);
2807
2808 for (i = 0; i < ev->num_hndl; i++) {
2809 struct hci_comp_blocks_info *info = &ev->handles[i];
2810 struct hci_conn *conn = NULL;
2811 __u16 handle, block_count;
2812
2813 handle = __le16_to_cpu(info->handle);
2814 block_count = __le16_to_cpu(info->blocks);
2815
2816 conn = __hci_conn_lookup_handle(hdev, handle);
2817 if (!conn)
2818 continue;
2819
2820 conn->sent -= block_count;
2821
2822 switch (conn->type) {
2823 case ACL_LINK:
2824 case AMP_LINK:
2825 hdev->block_cnt += block_count;
2826 if (hdev->block_cnt > hdev->num_blocks)
2827 hdev->block_cnt = hdev->num_blocks;
2828 break;
2829
2830 default:
2831 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2832 break;
2833 }
2834 }
2835
2836 queue_work(hdev->workqueue, &hdev->tx_work);
2837 }
2838
2839 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2840 {
2841 struct hci_ev_mode_change *ev = (void *) skb->data;
2842 struct hci_conn *conn;
2843
2844 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2845
2846 hci_dev_lock(hdev);
2847
2848 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2849 if (conn) {
2850 conn->mode = ev->mode;
2851 conn->interval = __le16_to_cpu(ev->interval);
2852
2853 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2854 &conn->flags)) {
2855 if (conn->mode == HCI_CM_ACTIVE)
2856 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2857 else
2858 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2859 }
2860
2861 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2862 hci_sco_setup(conn, ev->status);
2863 }
2864
2865 hci_dev_unlock(hdev);
2866 }
2867
2868 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2869 {
2870 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2871 struct hci_conn *conn;
2872
2873 BT_DBG("%s", hdev->name);
2874
2875 hci_dev_lock(hdev);
2876
2877 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2878 if (!conn)
2879 goto unlock;
2880
2881 if (conn->state == BT_CONNECTED) {
2882 hci_conn_hold(conn);
2883 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2884 hci_conn_put(conn);
2885 }
2886
2887 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2888 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2889 sizeof(ev->bdaddr), &ev->bdaddr);
2890 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2891 u8 secure;
2892
2893 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2894 secure = 1;
2895 else
2896 secure = 0;
2897
2898 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2899 }
2900
2901 unlock:
2902 hci_dev_unlock(hdev);
2903 }
2904
2905 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2906 {
2907 struct hci_ev_link_key_req *ev = (void *) skb->data;
2908 struct hci_cp_link_key_reply cp;
2909 struct hci_conn *conn;
2910 struct link_key *key;
2911
2912 BT_DBG("%s", hdev->name);
2913
2914 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2915 return;
2916
2917 hci_dev_lock(hdev);
2918
2919 key = hci_find_link_key(hdev, &ev->bdaddr);
2920 if (!key) {
2921 BT_DBG("%s link key not found for %pMR", hdev->name,
2922 &ev->bdaddr);
2923 goto not_found;
2924 }
2925
2926 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2927 &ev->bdaddr);
2928
2929 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2930 key->type == HCI_LK_DEBUG_COMBINATION) {
2931 BT_DBG("%s ignoring debug key", hdev->name);
2932 goto not_found;
2933 }
2934
2935 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2936 if (conn) {
2937 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2938 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2939 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2940 goto not_found;
2941 }
2942
2943 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2944 conn->pending_sec_level == BT_SECURITY_HIGH) {
2945 BT_DBG("%s ignoring key unauthenticated for high security",
2946 hdev->name);
2947 goto not_found;
2948 }
2949
2950 conn->key_type = key->type;
2951 conn->pin_length = key->pin_len;
2952 }
2953
2954 bacpy(&cp.bdaddr, &ev->bdaddr);
2955 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2956
2957 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2958
2959 hci_dev_unlock(hdev);
2960
2961 return;
2962
2963 not_found:
2964 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2965 hci_dev_unlock(hdev);
2966 }
2967
2968 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2969 {
2970 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2971 struct hci_conn *conn;
2972 u8 pin_len = 0;
2973
2974 BT_DBG("%s", hdev->name);
2975
2976 hci_dev_lock(hdev);
2977
2978 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2979 if (conn) {
2980 hci_conn_hold(conn);
2981 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2982 pin_len = conn->pin_length;
2983
2984 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2985 conn->key_type = ev->key_type;
2986
2987 hci_conn_put(conn);
2988 }
2989
2990 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2991 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
2992 ev->key_type, pin_len);
2993
2994 hci_dev_unlock(hdev);
2995 }
2996
2997 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
2998 {
2999 struct hci_ev_clock_offset *ev = (void *) skb->data;
3000 struct hci_conn *conn;
3001
3002 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3003
3004 hci_dev_lock(hdev);
3005
3006 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3007 if (conn && !ev->status) {
3008 struct inquiry_entry *ie;
3009
3010 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3011 if (ie) {
3012 ie->data.clock_offset = ev->clock_offset;
3013 ie->timestamp = jiffies;
3014 }
3015 }
3016
3017 hci_dev_unlock(hdev);
3018 }
3019
3020 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3021 {
3022 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3023 struct hci_conn *conn;
3024
3025 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3026
3027 hci_dev_lock(hdev);
3028
3029 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3030 if (conn && !ev->status)
3031 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3032
3033 hci_dev_unlock(hdev);
3034 }
3035
3036 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3037 {
3038 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3039 struct inquiry_entry *ie;
3040
3041 BT_DBG("%s", hdev->name);
3042
3043 hci_dev_lock(hdev);
3044
3045 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3046 if (ie) {
3047 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3048 ie->timestamp = jiffies;
3049 }
3050
3051 hci_dev_unlock(hdev);
3052 }
3053
3054 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3055 struct sk_buff *skb)
3056 {
3057 struct inquiry_data data;
3058 int num_rsp = *((__u8 *) skb->data);
3059 bool name_known, ssp;
3060
3061 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3062
3063 if (!num_rsp)
3064 return;
3065
3066 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3067 return;
3068
3069 hci_dev_lock(hdev);
3070
3071 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3072 struct inquiry_info_with_rssi_and_pscan_mode *info;
3073 info = (void *) (skb->data + 1);
3074
3075 for (; num_rsp; num_rsp--, info++) {
3076 bacpy(&data.bdaddr, &info->bdaddr);
3077 data.pscan_rep_mode = info->pscan_rep_mode;
3078 data.pscan_period_mode = info->pscan_period_mode;
3079 data.pscan_mode = info->pscan_mode;
3080 memcpy(data.dev_class, info->dev_class, 3);
3081 data.clock_offset = info->clock_offset;
3082 data.rssi = info->rssi;
3083 data.ssp_mode = 0x00;
3084
3085 name_known = hci_inquiry_cache_update(hdev, &data,
3086 false, &ssp);
3087 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3088 info->dev_class, info->rssi,
3089 !name_known, ssp, NULL, 0);
3090 }
3091 } else {
3092 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3093
3094 for (; num_rsp; num_rsp--, info++) {
3095 bacpy(&data.bdaddr, &info->bdaddr);
3096 data.pscan_rep_mode = info->pscan_rep_mode;
3097 data.pscan_period_mode = info->pscan_period_mode;
3098 data.pscan_mode = 0x00;
3099 memcpy(data.dev_class, info->dev_class, 3);
3100 data.clock_offset = info->clock_offset;
3101 data.rssi = info->rssi;
3102 data.ssp_mode = 0x00;
3103 name_known = hci_inquiry_cache_update(hdev, &data,
3104 false, &ssp);
3105 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3106 info->dev_class, info->rssi,
3107 !name_known, ssp, NULL, 0);
3108 }
3109 }
3110
3111 hci_dev_unlock(hdev);
3112 }
3113
3114 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3115 struct sk_buff *skb)
3116 {
3117 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3118 struct hci_conn *conn;
3119
3120 BT_DBG("%s", hdev->name);
3121
3122 hci_dev_lock(hdev);
3123
3124 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3125 if (!conn)
3126 goto unlock;
3127
3128 if (!ev->status && ev->page == 0x01) {
3129 struct inquiry_entry *ie;
3130
3131 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3132 if (ie)
3133 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3134
3135 if (ev->features[0] & LMP_HOST_SSP)
3136 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3137 }
3138
3139 if (conn->state != BT_CONFIG)
3140 goto unlock;
3141
3142 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3143 struct hci_cp_remote_name_req cp;
3144 memset(&cp, 0, sizeof(cp));
3145 bacpy(&cp.bdaddr, &conn->dst);
3146 cp.pscan_rep_mode = 0x02;
3147 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3148 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3149 mgmt_device_connected(hdev, &conn->dst, conn->type,
3150 conn->dst_type, 0, NULL, 0,
3151 conn->dev_class);
3152
3153 if (!hci_outgoing_auth_needed(hdev, conn)) {
3154 conn->state = BT_CONNECTED;
3155 hci_proto_connect_cfm(conn, ev->status);
3156 hci_conn_put(conn);
3157 }
3158
3159 unlock:
3160 hci_dev_unlock(hdev);
3161 }
3162
3163 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3164 struct sk_buff *skb)
3165 {
3166 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3167 struct hci_conn *conn;
3168
3169 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3170
3171 hci_dev_lock(hdev);
3172
3173 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3174 if (!conn) {
3175 if (ev->link_type == ESCO_LINK)
3176 goto unlock;
3177
3178 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3179 if (!conn)
3180 goto unlock;
3181
3182 conn->type = SCO_LINK;
3183 }
3184
3185 switch (ev->status) {
3186 case 0x00:
3187 conn->handle = __le16_to_cpu(ev->handle);
3188 conn->state = BT_CONNECTED;
3189
3190 hci_conn_hold_device(conn);
3191 hci_conn_add_sysfs(conn);
3192 break;
3193
3194 case 0x11: /* Unsupported Feature or Parameter Value */
3195 case 0x1c: /* SCO interval rejected */
3196 case 0x1a: /* Unsupported Remote Feature */
3197 case 0x1f: /* Unspecified error */
3198 if (conn->out && conn->attempt < 2) {
3199 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3200 (hdev->esco_type & EDR_ESCO_MASK);
3201 hci_setup_sync(conn, conn->link->handle);
3202 goto unlock;
3203 }
3204 /* fall through */
3205
3206 default:
3207 conn->state = BT_CLOSED;
3208 break;
3209 }
3210
3211 hci_proto_connect_cfm(conn, ev->status);
3212 if (ev->status)
3213 hci_conn_del(conn);
3214
3215 unlock:
3216 hci_dev_unlock(hdev);
3217 }
3218
3219 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3220 {
3221 BT_DBG("%s", hdev->name);
3222 }
3223
3224 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3225 {
3226 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3227
3228 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3229 }
3230
3231 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3232 struct sk_buff *skb)
3233 {
3234 struct inquiry_data data;
3235 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3236 int num_rsp = *((__u8 *) skb->data);
3237 size_t eir_len;
3238
3239 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3240
3241 if (!num_rsp)
3242 return;
3243
3244 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3245 return;
3246
3247 hci_dev_lock(hdev);
3248
3249 for (; num_rsp; num_rsp--, info++) {
3250 bool name_known, ssp;
3251
3252 bacpy(&data.bdaddr, &info->bdaddr);
3253 data.pscan_rep_mode = info->pscan_rep_mode;
3254 data.pscan_period_mode = info->pscan_period_mode;
3255 data.pscan_mode = 0x00;
3256 memcpy(data.dev_class, info->dev_class, 3);
3257 data.clock_offset = info->clock_offset;
3258 data.rssi = info->rssi;
3259 data.ssp_mode = 0x01;
3260
3261 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3262 name_known = eir_has_data_type(info->data,
3263 sizeof(info->data),
3264 EIR_NAME_COMPLETE);
3265 else
3266 name_known = true;
3267
3268 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3269 &ssp);
3270 eir_len = eir_get_length(info->data, sizeof(info->data));
3271 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3272 info->dev_class, info->rssi, !name_known,
3273 ssp, info->data, eir_len);
3274 }
3275
3276 hci_dev_unlock(hdev);
3277 }
3278
3279 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3280 struct sk_buff *skb)
3281 {
3282 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3283 struct hci_conn *conn;
3284
3285 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3286 __le16_to_cpu(ev->handle));
3287
3288 hci_dev_lock(hdev);
3289
3290 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3291 if (!conn)
3292 goto unlock;
3293
3294 if (!ev->status)
3295 conn->sec_level = conn->pending_sec_level;
3296
3297 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3298
3299 if (ev->status && conn->state == BT_CONNECTED) {
3300 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3301 hci_conn_put(conn);
3302 goto unlock;
3303 }
3304
3305 if (conn->state == BT_CONFIG) {
3306 if (!ev->status)
3307 conn->state = BT_CONNECTED;
3308
3309 hci_proto_connect_cfm(conn, ev->status);
3310 hci_conn_put(conn);
3311 } else {
3312 hci_auth_cfm(conn, ev->status);
3313
3314 hci_conn_hold(conn);
3315 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3316 hci_conn_put(conn);
3317 }
3318
3319 unlock:
3320 hci_dev_unlock(hdev);
3321 }
3322
3323 static u8 hci_get_auth_req(struct hci_conn *conn)
3324 {
3325 /* If remote requests dedicated bonding follow that lead */
3326 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3327 /* If both remote and local IO capabilities allow MITM
3328 * protection then require it, otherwise don't */
3329 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3330 return 0x02;
3331 else
3332 return 0x03;
3333 }
3334
3335 /* If remote requests no-bonding follow that lead */
3336 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3337 return conn->remote_auth | (conn->auth_type & 0x01);
3338
3339 return conn->auth_type;
3340 }
3341
3342 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3343 {
3344 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3345 struct hci_conn *conn;
3346
3347 BT_DBG("%s", hdev->name);
3348
3349 hci_dev_lock(hdev);
3350
3351 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3352 if (!conn)
3353 goto unlock;
3354
3355 hci_conn_hold(conn);
3356
3357 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3358 goto unlock;
3359
3360 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3361 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3362 struct hci_cp_io_capability_reply cp;
3363
3364 bacpy(&cp.bdaddr, &ev->bdaddr);
3365 /* Change the IO capability from KeyboardDisplay
3366 * to DisplayYesNo as it is not supported by BT spec. */
3367 cp.capability = (conn->io_capability == 0x04) ?
3368 0x01 : conn->io_capability;
3369 conn->auth_type = hci_get_auth_req(conn);
3370 cp.authentication = conn->auth_type;
3371
3372 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3373 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3374 cp.oob_data = 0x01;
3375 else
3376 cp.oob_data = 0x00;
3377
3378 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3379 sizeof(cp), &cp);
3380 } else {
3381 struct hci_cp_io_capability_neg_reply cp;
3382
3383 bacpy(&cp.bdaddr, &ev->bdaddr);
3384 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3385
3386 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3387 sizeof(cp), &cp);
3388 }
3389
3390 unlock:
3391 hci_dev_unlock(hdev);
3392 }
3393
3394 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3395 {
3396 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3397 struct hci_conn *conn;
3398
3399 BT_DBG("%s", hdev->name);
3400
3401 hci_dev_lock(hdev);
3402
3403 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3404 if (!conn)
3405 goto unlock;
3406
3407 conn->remote_cap = ev->capability;
3408 conn->remote_auth = ev->authentication;
3409 if (ev->oob_data)
3410 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3411
3412 unlock:
3413 hci_dev_unlock(hdev);
3414 }
3415
3416 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3417 struct sk_buff *skb)
3418 {
3419 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3420 int loc_mitm, rem_mitm, confirm_hint = 0;
3421 struct hci_conn *conn;
3422
3423 BT_DBG("%s", hdev->name);
3424
3425 hci_dev_lock(hdev);
3426
3427 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3428 goto unlock;
3429
3430 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3431 if (!conn)
3432 goto unlock;
3433
3434 loc_mitm = (conn->auth_type & 0x01);
3435 rem_mitm = (conn->remote_auth & 0x01);
3436
3437 /* If we require MITM but the remote device can't provide that
3438 * (it has NoInputNoOutput) then reject the confirmation
3439 * request. The only exception is when we're dedicated bonding
3440 * initiators (connect_cfm_cb set) since then we always have the MITM
3441 * bit set. */
3442 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3443 BT_DBG("Rejecting request: remote device can't provide MITM");
3444 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3445 sizeof(ev->bdaddr), &ev->bdaddr);
3446 goto unlock;
3447 }
3448
3449 /* If no side requires MITM protection; auto-accept */
3450 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3451 (!rem_mitm || conn->io_capability == 0x03)) {
3452
3453 /* If we're not the initiators request authorization to
3454 * proceed from user space (mgmt_user_confirm with
3455 * confirm_hint set to 1). */
3456 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3457 BT_DBG("Confirming auto-accept as acceptor");
3458 confirm_hint = 1;
3459 goto confirm;
3460 }
3461
3462 BT_DBG("Auto-accept of user confirmation with %ums delay",
3463 hdev->auto_accept_delay);
3464
3465 if (hdev->auto_accept_delay > 0) {
3466 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3467 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3468 goto unlock;
3469 }
3470
3471 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3472 sizeof(ev->bdaddr), &ev->bdaddr);
3473 goto unlock;
3474 }
3475
3476 confirm:
3477 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3478 confirm_hint);
3479
3480 unlock:
3481 hci_dev_unlock(hdev);
3482 }
3483
3484 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3485 struct sk_buff *skb)
3486 {
3487 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3488
3489 BT_DBG("%s", hdev->name);
3490
3491 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3492 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3493 }
3494
3495 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3496 struct sk_buff *skb)
3497 {
3498 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3499 struct hci_conn *conn;
3500
3501 BT_DBG("%s", hdev->name);
3502
3503 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3504 if (!conn)
3505 return;
3506
3507 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3508 conn->passkey_entered = 0;
3509
3510 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3511 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3512 conn->dst_type, conn->passkey_notify,
3513 conn->passkey_entered);
3514 }
3515
3516 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3517 {
3518 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3519 struct hci_conn *conn;
3520
3521 BT_DBG("%s", hdev->name);
3522
3523 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3524 if (!conn)
3525 return;
3526
3527 switch (ev->type) {
3528 case HCI_KEYPRESS_STARTED:
3529 conn->passkey_entered = 0;
3530 return;
3531
3532 case HCI_KEYPRESS_ENTERED:
3533 conn->passkey_entered++;
3534 break;
3535
3536 case HCI_KEYPRESS_ERASED:
3537 conn->passkey_entered--;
3538 break;
3539
3540 case HCI_KEYPRESS_CLEARED:
3541 conn->passkey_entered = 0;
3542 break;
3543
3544 case HCI_KEYPRESS_COMPLETED:
3545 return;
3546 }
3547
3548 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3549 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3550 conn->dst_type, conn->passkey_notify,
3551 conn->passkey_entered);
3552 }
3553
3554 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3555 struct sk_buff *skb)
3556 {
3557 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3558 struct hci_conn *conn;
3559
3560 BT_DBG("%s", hdev->name);
3561
3562 hci_dev_lock(hdev);
3563
3564 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3565 if (!conn)
3566 goto unlock;
3567
3568 /* To avoid duplicate auth_failed events to user space we check
3569 * the HCI_CONN_AUTH_PEND flag which will be set if we
3570 * initiated the authentication. A traditional auth_complete
3571 * event gets always produced as initiator and is also mapped to
3572 * the mgmt_auth_failed event */
3573 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3574 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3575 ev->status);
3576
3577 hci_conn_put(conn);
3578
3579 unlock:
3580 hci_dev_unlock(hdev);
3581 }
3582
3583 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3584 struct sk_buff *skb)
3585 {
3586 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3587 struct inquiry_entry *ie;
3588
3589 BT_DBG("%s", hdev->name);
3590
3591 hci_dev_lock(hdev);
3592
3593 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3594 if (ie)
3595 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3596
3597 hci_dev_unlock(hdev);
3598 }
3599
3600 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3601 struct sk_buff *skb)
3602 {
3603 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3604 struct oob_data *data;
3605
3606 BT_DBG("%s", hdev->name);
3607
3608 hci_dev_lock(hdev);
3609
3610 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3611 goto unlock;
3612
3613 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3614 if (data) {
3615 struct hci_cp_remote_oob_data_reply cp;
3616
3617 bacpy(&cp.bdaddr, &ev->bdaddr);
3618 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3619 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3620
3621 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3622 &cp);
3623 } else {
3624 struct hci_cp_remote_oob_data_neg_reply cp;
3625
3626 bacpy(&cp.bdaddr, &ev->bdaddr);
3627 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3628 &cp);
3629 }
3630
3631 unlock:
3632 hci_dev_unlock(hdev);
3633 }
3634
3635 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3636 {
3637 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3638 struct hci_conn *conn;
3639
3640 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3641
3642 hci_dev_lock(hdev);
3643
3644 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3645 if (!conn) {
3646 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3647 if (!conn) {
3648 BT_ERR("No memory for new connection");
3649 goto unlock;
3650 }
3651
3652 conn->dst_type = ev->bdaddr_type;
3653
3654 if (ev->role == LE_CONN_ROLE_MASTER) {
3655 conn->out = true;
3656 conn->link_mode |= HCI_LM_MASTER;
3657 }
3658 }
3659
3660 if (ev->status) {
3661 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3662 conn->dst_type, ev->status);
3663 hci_proto_connect_cfm(conn, ev->status);
3664 conn->state = BT_CLOSED;
3665 hci_conn_del(conn);
3666 goto unlock;
3667 }
3668
3669 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3670 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3671 conn->dst_type, 0, NULL, 0, NULL);
3672
3673 conn->sec_level = BT_SECURITY_LOW;
3674 conn->handle = __le16_to_cpu(ev->handle);
3675 conn->state = BT_CONNECTED;
3676
3677 hci_conn_hold_device(conn);
3678 hci_conn_add_sysfs(conn);
3679
3680 hci_proto_connect_cfm(conn, ev->status);
3681
3682 unlock:
3683 hci_dev_unlock(hdev);
3684 }
3685
3686 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3687 {
3688 u8 num_reports = skb->data[0];
3689 void *ptr = &skb->data[1];
3690 s8 rssi;
3691
3692 hci_dev_lock(hdev);
3693
3694 while (num_reports--) {
3695 struct hci_ev_le_advertising_info *ev = ptr;
3696
3697 rssi = ev->data[ev->length];
3698 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3699 NULL, rssi, 0, 1, ev->data, ev->length);
3700
3701 ptr += sizeof(*ev) + ev->length + 1;
3702 }
3703
3704 hci_dev_unlock(hdev);
3705 }
3706
3707 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3708 {
3709 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3710 struct hci_cp_le_ltk_reply cp;
3711 struct hci_cp_le_ltk_neg_reply neg;
3712 struct hci_conn *conn;
3713 struct smp_ltk *ltk;
3714
3715 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3716
3717 hci_dev_lock(hdev);
3718
3719 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3720 if (conn == NULL)
3721 goto not_found;
3722
3723 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3724 if (ltk == NULL)
3725 goto not_found;
3726
3727 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3728 cp.handle = cpu_to_le16(conn->handle);
3729
3730 if (ltk->authenticated)
3731 conn->sec_level = BT_SECURITY_HIGH;
3732
3733 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3734
3735 if (ltk->type & HCI_SMP_STK) {
3736 list_del(&ltk->list);
3737 kfree(ltk);
3738 }
3739
3740 hci_dev_unlock(hdev);
3741
3742 return;
3743
3744 not_found:
3745 neg.handle = ev->handle;
3746 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3747 hci_dev_unlock(hdev);
3748 }
3749
3750 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3751 {
3752 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3753
3754 skb_pull(skb, sizeof(*le_ev));
3755
3756 switch (le_ev->subevent) {
3757 case HCI_EV_LE_CONN_COMPLETE:
3758 hci_le_conn_complete_evt(hdev, skb);
3759 break;
3760
3761 case HCI_EV_LE_ADVERTISING_REPORT:
3762 hci_le_adv_report_evt(hdev, skb);
3763 break;
3764
3765 case HCI_EV_LE_LTK_REQ:
3766 hci_le_ltk_request_evt(hdev, skb);
3767 break;
3768
3769 default:
3770 break;
3771 }
3772 }
3773
3774 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3775 {
3776 struct hci_ev_channel_selected *ev = (void *) skb->data;
3777 struct hci_conn *hcon;
3778
3779 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3780
3781 skb_pull(skb, sizeof(*ev));
3782
3783 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3784 if (!hcon)
3785 return;
3786
3787 amp_read_loc_assoc_final_data(hdev, hcon);
3788 }
3789
3790 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3791 {
3792 struct hci_event_hdr *hdr = (void *) skb->data;
3793 __u8 event = hdr->evt;
3794
3795 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3796
3797 switch (event) {
3798 case HCI_EV_INQUIRY_COMPLETE:
3799 hci_inquiry_complete_evt(hdev, skb);
3800 break;
3801
3802 case HCI_EV_INQUIRY_RESULT:
3803 hci_inquiry_result_evt(hdev, skb);
3804 break;
3805
3806 case HCI_EV_CONN_COMPLETE:
3807 hci_conn_complete_evt(hdev, skb);
3808 break;
3809
3810 case HCI_EV_CONN_REQUEST:
3811 hci_conn_request_evt(hdev, skb);
3812 break;
3813
3814 case HCI_EV_DISCONN_COMPLETE:
3815 hci_disconn_complete_evt(hdev, skb);
3816 break;
3817
3818 case HCI_EV_AUTH_COMPLETE:
3819 hci_auth_complete_evt(hdev, skb);
3820 break;
3821
3822 case HCI_EV_REMOTE_NAME:
3823 hci_remote_name_evt(hdev, skb);
3824 break;
3825
3826 case HCI_EV_ENCRYPT_CHANGE:
3827 hci_encrypt_change_evt(hdev, skb);
3828 break;
3829
3830 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3831 hci_change_link_key_complete_evt(hdev, skb);
3832 break;
3833
3834 case HCI_EV_REMOTE_FEATURES:
3835 hci_remote_features_evt(hdev, skb);
3836 break;
3837
3838 case HCI_EV_REMOTE_VERSION:
3839 hci_remote_version_evt(hdev, skb);
3840 break;
3841
3842 case HCI_EV_QOS_SETUP_COMPLETE:
3843 hci_qos_setup_complete_evt(hdev, skb);
3844 break;
3845
3846 case HCI_EV_CMD_COMPLETE:
3847 hci_cmd_complete_evt(hdev, skb);
3848 break;
3849
3850 case HCI_EV_CMD_STATUS:
3851 hci_cmd_status_evt(hdev, skb);
3852 break;
3853
3854 case HCI_EV_ROLE_CHANGE:
3855 hci_role_change_evt(hdev, skb);
3856 break;
3857
3858 case HCI_EV_NUM_COMP_PKTS:
3859 hci_num_comp_pkts_evt(hdev, skb);
3860 break;
3861
3862 case HCI_EV_MODE_CHANGE:
3863 hci_mode_change_evt(hdev, skb);
3864 break;
3865
3866 case HCI_EV_PIN_CODE_REQ:
3867 hci_pin_code_request_evt(hdev, skb);
3868 break;
3869
3870 case HCI_EV_LINK_KEY_REQ:
3871 hci_link_key_request_evt(hdev, skb);
3872 break;
3873
3874 case HCI_EV_LINK_KEY_NOTIFY:
3875 hci_link_key_notify_evt(hdev, skb);
3876 break;
3877
3878 case HCI_EV_CLOCK_OFFSET:
3879 hci_clock_offset_evt(hdev, skb);
3880 break;
3881
3882 case HCI_EV_PKT_TYPE_CHANGE:
3883 hci_pkt_type_change_evt(hdev, skb);
3884 break;
3885
3886 case HCI_EV_PSCAN_REP_MODE:
3887 hci_pscan_rep_mode_evt(hdev, skb);
3888 break;
3889
3890 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3891 hci_inquiry_result_with_rssi_evt(hdev, skb);
3892 break;
3893
3894 case HCI_EV_REMOTE_EXT_FEATURES:
3895 hci_remote_ext_features_evt(hdev, skb);
3896 break;
3897
3898 case HCI_EV_SYNC_CONN_COMPLETE:
3899 hci_sync_conn_complete_evt(hdev, skb);
3900 break;
3901
3902 case HCI_EV_SYNC_CONN_CHANGED:
3903 hci_sync_conn_changed_evt(hdev, skb);
3904 break;
3905
3906 case HCI_EV_SNIFF_SUBRATE:
3907 hci_sniff_subrate_evt(hdev, skb);
3908 break;
3909
3910 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3911 hci_extended_inquiry_result_evt(hdev, skb);
3912 break;
3913
3914 case HCI_EV_KEY_REFRESH_COMPLETE:
3915 hci_key_refresh_complete_evt(hdev, skb);
3916 break;
3917
3918 case HCI_EV_IO_CAPA_REQUEST:
3919 hci_io_capa_request_evt(hdev, skb);
3920 break;
3921
3922 case HCI_EV_IO_CAPA_REPLY:
3923 hci_io_capa_reply_evt(hdev, skb);
3924 break;
3925
3926 case HCI_EV_USER_CONFIRM_REQUEST:
3927 hci_user_confirm_request_evt(hdev, skb);
3928 break;
3929
3930 case HCI_EV_USER_PASSKEY_REQUEST:
3931 hci_user_passkey_request_evt(hdev, skb);
3932 break;
3933
3934 case HCI_EV_USER_PASSKEY_NOTIFY:
3935 hci_user_passkey_notify_evt(hdev, skb);
3936 break;
3937
3938 case HCI_EV_KEYPRESS_NOTIFY:
3939 hci_keypress_notify_evt(hdev, skb);
3940 break;
3941
3942 case HCI_EV_SIMPLE_PAIR_COMPLETE:
3943 hci_simple_pair_complete_evt(hdev, skb);
3944 break;
3945
3946 case HCI_EV_REMOTE_HOST_FEATURES:
3947 hci_remote_host_features_evt(hdev, skb);
3948 break;
3949
3950 case HCI_EV_LE_META:
3951 hci_le_meta_evt(hdev, skb);
3952 break;
3953
3954 case HCI_EV_CHANNEL_SELECTED:
3955 hci_chan_selected_evt(hdev, skb);
3956 break;
3957
3958 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
3959 hci_remote_oob_data_request_evt(hdev, skb);
3960 break;
3961
3962 case HCI_EV_NUM_COMP_BLOCKS:
3963 hci_num_comp_blocks_evt(hdev, skb);
3964 break;
3965
3966 default:
3967 BT_DBG("%s event 0x%2.2x", hdev->name, event);
3968 break;
3969 }
3970
3971 kfree_skb(skb);
3972 hdev->stat.evt_rx++;
3973 }
This page took 0.143369 seconds and 5 git commands to generate.