Bluetooth: AMP: Process Physical Link Complete evt
[deliverable/linux.git] / net / bluetooth / hci_event.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI event handling. */
26
27 #include <linux/export.h>
28 #include <asm/unaligned.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/mgmt.h>
33 #include <net/bluetooth/a2mp.h>
34 #include <net/bluetooth/amp.h>
35
36 /* Handle HCI Event packets */
37
38 static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
39 {
40 __u8 status = *((__u8 *) skb->data);
41
42 BT_DBG("%s status 0x%2.2x", hdev->name, status);
43
44 if (status) {
45 hci_dev_lock(hdev);
46 mgmt_stop_discovery_failed(hdev, status);
47 hci_dev_unlock(hdev);
48 return;
49 }
50
51 clear_bit(HCI_INQUIRY, &hdev->flags);
52
53 hci_dev_lock(hdev);
54 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
55 hci_dev_unlock(hdev);
56
57 hci_req_complete(hdev, HCI_OP_INQUIRY_CANCEL, status);
58
59 hci_conn_check_pending(hdev);
60 }
61
62 static void hci_cc_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
63 {
64 __u8 status = *((__u8 *) skb->data);
65
66 BT_DBG("%s status 0x%2.2x", hdev->name, status);
67
68 if (status)
69 return;
70
71 set_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
72 }
73
74 static void hci_cc_exit_periodic_inq(struct hci_dev *hdev, struct sk_buff *skb)
75 {
76 __u8 status = *((__u8 *) skb->data);
77
78 BT_DBG("%s status 0x%2.2x", hdev->name, status);
79
80 if (status)
81 return;
82
83 clear_bit(HCI_PERIODIC_INQ, &hdev->dev_flags);
84
85 hci_conn_check_pending(hdev);
86 }
87
88 static void hci_cc_remote_name_req_cancel(struct hci_dev *hdev,
89 struct sk_buff *skb)
90 {
91 BT_DBG("%s", hdev->name);
92 }
93
94 static void hci_cc_role_discovery(struct hci_dev *hdev, struct sk_buff *skb)
95 {
96 struct hci_rp_role_discovery *rp = (void *) skb->data;
97 struct hci_conn *conn;
98
99 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
100
101 if (rp->status)
102 return;
103
104 hci_dev_lock(hdev);
105
106 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
107 if (conn) {
108 if (rp->role)
109 conn->link_mode &= ~HCI_LM_MASTER;
110 else
111 conn->link_mode |= HCI_LM_MASTER;
112 }
113
114 hci_dev_unlock(hdev);
115 }
116
117 static void hci_cc_read_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
118 {
119 struct hci_rp_read_link_policy *rp = (void *) skb->data;
120 struct hci_conn *conn;
121
122 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
123
124 if (rp->status)
125 return;
126
127 hci_dev_lock(hdev);
128
129 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
130 if (conn)
131 conn->link_policy = __le16_to_cpu(rp->policy);
132
133 hci_dev_unlock(hdev);
134 }
135
136 static void hci_cc_write_link_policy(struct hci_dev *hdev, struct sk_buff *skb)
137 {
138 struct hci_rp_write_link_policy *rp = (void *) skb->data;
139 struct hci_conn *conn;
140 void *sent;
141
142 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
143
144 if (rp->status)
145 return;
146
147 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY);
148 if (!sent)
149 return;
150
151 hci_dev_lock(hdev);
152
153 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle));
154 if (conn)
155 conn->link_policy = get_unaligned_le16(sent + 2);
156
157 hci_dev_unlock(hdev);
158 }
159
160 static void hci_cc_read_def_link_policy(struct hci_dev *hdev,
161 struct sk_buff *skb)
162 {
163 struct hci_rp_read_def_link_policy *rp = (void *) skb->data;
164
165 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
166
167 if (rp->status)
168 return;
169
170 hdev->link_policy = __le16_to_cpu(rp->policy);
171 }
172
173 static void hci_cc_write_def_link_policy(struct hci_dev *hdev,
174 struct sk_buff *skb)
175 {
176 __u8 status = *((__u8 *) skb->data);
177 void *sent;
178
179 BT_DBG("%s status 0x%2.2x", hdev->name, status);
180
181 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY);
182 if (!sent)
183 return;
184
185 if (!status)
186 hdev->link_policy = get_unaligned_le16(sent);
187
188 hci_req_complete(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, status);
189 }
190
191 static void hci_cc_reset(struct hci_dev *hdev, struct sk_buff *skb)
192 {
193 __u8 status = *((__u8 *) skb->data);
194
195 BT_DBG("%s status 0x%2.2x", hdev->name, status);
196
197 clear_bit(HCI_RESET, &hdev->flags);
198
199 hci_req_complete(hdev, HCI_OP_RESET, status);
200
201 /* Reset all non-persistent flags */
202 hdev->dev_flags &= ~(BIT(HCI_LE_SCAN) | BIT(HCI_PENDING_CLASS) |
203 BIT(HCI_PERIODIC_INQ));
204
205 hdev->discovery.state = DISCOVERY_STOPPED;
206 }
207
208 static void hci_cc_write_local_name(struct hci_dev *hdev, struct sk_buff *skb)
209 {
210 __u8 status = *((__u8 *) skb->data);
211 void *sent;
212
213 BT_DBG("%s status 0x%2.2x", hdev->name, status);
214
215 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME);
216 if (!sent)
217 return;
218
219 hci_dev_lock(hdev);
220
221 if (test_bit(HCI_MGMT, &hdev->dev_flags))
222 mgmt_set_local_name_complete(hdev, sent, status);
223 else if (!status)
224 memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH);
225
226 hci_dev_unlock(hdev);
227
228 hci_req_complete(hdev, HCI_OP_WRITE_LOCAL_NAME, status);
229 }
230
231 static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
232 {
233 struct hci_rp_read_local_name *rp = (void *) skb->data;
234
235 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
236
237 if (rp->status)
238 return;
239
240 if (test_bit(HCI_SETUP, &hdev->dev_flags))
241 memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
242 }
243
244 static void hci_cc_write_auth_enable(struct hci_dev *hdev, struct sk_buff *skb)
245 {
246 __u8 status = *((__u8 *) skb->data);
247 void *sent;
248
249 BT_DBG("%s status 0x%2.2x", hdev->name, status);
250
251 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE);
252 if (!sent)
253 return;
254
255 if (!status) {
256 __u8 param = *((__u8 *) sent);
257
258 if (param == AUTH_ENABLED)
259 set_bit(HCI_AUTH, &hdev->flags);
260 else
261 clear_bit(HCI_AUTH, &hdev->flags);
262 }
263
264 if (test_bit(HCI_MGMT, &hdev->dev_flags))
265 mgmt_auth_enable_complete(hdev, status);
266
267 hci_req_complete(hdev, HCI_OP_WRITE_AUTH_ENABLE, status);
268 }
269
270 static void hci_cc_write_encrypt_mode(struct hci_dev *hdev, struct sk_buff *skb)
271 {
272 __u8 status = *((__u8 *) skb->data);
273 void *sent;
274
275 BT_DBG("%s status 0x%2.2x", hdev->name, status);
276
277 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE);
278 if (!sent)
279 return;
280
281 if (!status) {
282 __u8 param = *((__u8 *) sent);
283
284 if (param)
285 set_bit(HCI_ENCRYPT, &hdev->flags);
286 else
287 clear_bit(HCI_ENCRYPT, &hdev->flags);
288 }
289
290 hci_req_complete(hdev, HCI_OP_WRITE_ENCRYPT_MODE, status);
291 }
292
293 static void hci_cc_write_scan_enable(struct hci_dev *hdev, struct sk_buff *skb)
294 {
295 __u8 param, status = *((__u8 *) skb->data);
296 int old_pscan, old_iscan;
297 void *sent;
298
299 BT_DBG("%s status 0x%2.2x", hdev->name, status);
300
301 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE);
302 if (!sent)
303 return;
304
305 param = *((__u8 *) sent);
306
307 hci_dev_lock(hdev);
308
309 if (status) {
310 mgmt_write_scan_failed(hdev, param, status);
311 hdev->discov_timeout = 0;
312 goto done;
313 }
314
315 old_pscan = test_and_clear_bit(HCI_PSCAN, &hdev->flags);
316 old_iscan = test_and_clear_bit(HCI_ISCAN, &hdev->flags);
317
318 if (param & SCAN_INQUIRY) {
319 set_bit(HCI_ISCAN, &hdev->flags);
320 if (!old_iscan)
321 mgmt_discoverable(hdev, 1);
322 if (hdev->discov_timeout > 0) {
323 int to = msecs_to_jiffies(hdev->discov_timeout * 1000);
324 queue_delayed_work(hdev->workqueue, &hdev->discov_off,
325 to);
326 }
327 } else if (old_iscan)
328 mgmt_discoverable(hdev, 0);
329
330 if (param & SCAN_PAGE) {
331 set_bit(HCI_PSCAN, &hdev->flags);
332 if (!old_pscan)
333 mgmt_connectable(hdev, 1);
334 } else if (old_pscan)
335 mgmt_connectable(hdev, 0);
336
337 done:
338 hci_dev_unlock(hdev);
339 hci_req_complete(hdev, HCI_OP_WRITE_SCAN_ENABLE, status);
340 }
341
342 static void hci_cc_read_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
343 {
344 struct hci_rp_read_class_of_dev *rp = (void *) skb->data;
345
346 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
347
348 if (rp->status)
349 return;
350
351 memcpy(hdev->dev_class, rp->dev_class, 3);
352
353 BT_DBG("%s class 0x%.2x%.2x%.2x", hdev->name,
354 hdev->dev_class[2], hdev->dev_class[1], hdev->dev_class[0]);
355 }
356
357 static void hci_cc_write_class_of_dev(struct hci_dev *hdev, struct sk_buff *skb)
358 {
359 __u8 status = *((__u8 *) skb->data);
360 void *sent;
361
362 BT_DBG("%s status 0x%2.2x", hdev->name, status);
363
364 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV);
365 if (!sent)
366 return;
367
368 hci_dev_lock(hdev);
369
370 if (status == 0)
371 memcpy(hdev->dev_class, sent, 3);
372
373 if (test_bit(HCI_MGMT, &hdev->dev_flags))
374 mgmt_set_class_of_dev_complete(hdev, sent, status);
375
376 hci_dev_unlock(hdev);
377 }
378
379 static void hci_cc_read_voice_setting(struct hci_dev *hdev, struct sk_buff *skb)
380 {
381 struct hci_rp_read_voice_setting *rp = (void *) skb->data;
382 __u16 setting;
383
384 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
385
386 if (rp->status)
387 return;
388
389 setting = __le16_to_cpu(rp->voice_setting);
390
391 if (hdev->voice_setting == setting)
392 return;
393
394 hdev->voice_setting = setting;
395
396 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
397
398 if (hdev->notify)
399 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
400 }
401
402 static void hci_cc_write_voice_setting(struct hci_dev *hdev,
403 struct sk_buff *skb)
404 {
405 __u8 status = *((__u8 *) skb->data);
406 __u16 setting;
407 void *sent;
408
409 BT_DBG("%s status 0x%2.2x", hdev->name, status);
410
411 if (status)
412 return;
413
414 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING);
415 if (!sent)
416 return;
417
418 setting = get_unaligned_le16(sent);
419
420 if (hdev->voice_setting == setting)
421 return;
422
423 hdev->voice_setting = setting;
424
425 BT_DBG("%s voice setting 0x%4.4x", hdev->name, setting);
426
427 if (hdev->notify)
428 hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING);
429 }
430
431 static void hci_cc_host_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
432 {
433 __u8 status = *((__u8 *) skb->data);
434
435 BT_DBG("%s status 0x%2.2x", hdev->name, status);
436
437 hci_req_complete(hdev, HCI_OP_HOST_BUFFER_SIZE, status);
438 }
439
440 static void hci_cc_write_ssp_mode(struct hci_dev *hdev, struct sk_buff *skb)
441 {
442 __u8 status = *((__u8 *) skb->data);
443 struct hci_cp_write_ssp_mode *sent;
444
445 BT_DBG("%s status 0x%2.2x", hdev->name, status);
446
447 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE);
448 if (!sent)
449 return;
450
451 if (!status) {
452 if (sent->mode)
453 hdev->host_features[0] |= LMP_HOST_SSP;
454 else
455 hdev->host_features[0] &= ~LMP_HOST_SSP;
456 }
457
458 if (test_bit(HCI_MGMT, &hdev->dev_flags))
459 mgmt_ssp_enable_complete(hdev, sent->mode, status);
460 else if (!status) {
461 if (sent->mode)
462 set_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
463 else
464 clear_bit(HCI_SSP_ENABLED, &hdev->dev_flags);
465 }
466 }
467
468 static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
469 {
470 if (lmp_ext_inq_capable(hdev))
471 return 2;
472
473 if (lmp_inq_rssi_capable(hdev))
474 return 1;
475
476 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
477 hdev->lmp_subver == 0x0757)
478 return 1;
479
480 if (hdev->manufacturer == 15) {
481 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
482 return 1;
483 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
484 return 1;
485 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
486 return 1;
487 }
488
489 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
490 hdev->lmp_subver == 0x1805)
491 return 1;
492
493 return 0;
494 }
495
496 static void hci_setup_inquiry_mode(struct hci_dev *hdev)
497 {
498 u8 mode;
499
500 mode = hci_get_inquiry_mode(hdev);
501
502 hci_send_cmd(hdev, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
503 }
504
505 static void hci_setup_event_mask(struct hci_dev *hdev)
506 {
507 /* The second byte is 0xff instead of 0x9f (two reserved bits
508 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
509 * command otherwise */
510 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
511
512 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
513 * any event mask for pre 1.2 devices */
514 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
515 return;
516
517 if (lmp_bredr_capable(hdev)) {
518 events[4] |= 0x01; /* Flow Specification Complete */
519 events[4] |= 0x02; /* Inquiry Result with RSSI */
520 events[4] |= 0x04; /* Read Remote Extended Features Complete */
521 events[5] |= 0x08; /* Synchronous Connection Complete */
522 events[5] |= 0x10; /* Synchronous Connection Changed */
523 }
524
525 if (lmp_inq_rssi_capable(hdev))
526 events[4] |= 0x02; /* Inquiry Result with RSSI */
527
528 if (lmp_sniffsubr_capable(hdev))
529 events[5] |= 0x20; /* Sniff Subrating */
530
531 if (lmp_pause_enc_capable(hdev))
532 events[5] |= 0x80; /* Encryption Key Refresh Complete */
533
534 if (lmp_ext_inq_capable(hdev))
535 events[5] |= 0x40; /* Extended Inquiry Result */
536
537 if (lmp_no_flush_capable(hdev))
538 events[7] |= 0x01; /* Enhanced Flush Complete */
539
540 if (lmp_lsto_capable(hdev))
541 events[6] |= 0x80; /* Link Supervision Timeout Changed */
542
543 if (lmp_ssp_capable(hdev)) {
544 events[6] |= 0x01; /* IO Capability Request */
545 events[6] |= 0x02; /* IO Capability Response */
546 events[6] |= 0x04; /* User Confirmation Request */
547 events[6] |= 0x08; /* User Passkey Request */
548 events[6] |= 0x10; /* Remote OOB Data Request */
549 events[6] |= 0x20; /* Simple Pairing Complete */
550 events[7] |= 0x04; /* User Passkey Notification */
551 events[7] |= 0x08; /* Keypress Notification */
552 events[7] |= 0x10; /* Remote Host Supported
553 * Features Notification */
554 }
555
556 if (lmp_le_capable(hdev))
557 events[7] |= 0x20; /* LE Meta-Event */
558
559 hci_send_cmd(hdev, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
560
561 if (lmp_le_capable(hdev)) {
562 memset(events, 0, sizeof(events));
563 events[0] = 0x1f;
564 hci_send_cmd(hdev, HCI_OP_LE_SET_EVENT_MASK,
565 sizeof(events), events);
566 }
567 }
568
569 static void bredr_setup(struct hci_dev *hdev)
570 {
571 struct hci_cp_delete_stored_link_key cp;
572 __le16 param;
573 __u8 flt_type;
574
575 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
576 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
577
578 /* Read Class of Device */
579 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
580
581 /* Read Local Name */
582 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
583
584 /* Read Voice Setting */
585 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
586
587 /* Clear Event Filters */
588 flt_type = HCI_FLT_CLEAR_ALL;
589 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
590
591 /* Connection accept timeout ~20 secs */
592 param = __constant_cpu_to_le16(0x7d00);
593 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
594
595 bacpy(&cp.bdaddr, BDADDR_ANY);
596 cp.delete_all = 1;
597 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
598 }
599
600 static void le_setup(struct hci_dev *hdev)
601 {
602 /* Read LE Buffer Size */
603 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
604
605 /* Read LE Advertising Channel TX Power */
606 hci_send_cmd(hdev, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
607 }
608
609 static void hci_setup(struct hci_dev *hdev)
610 {
611 if (hdev->dev_type != HCI_BREDR)
612 return;
613
614 /* Read BD Address */
615 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
616
617 if (lmp_bredr_capable(hdev))
618 bredr_setup(hdev);
619
620 if (lmp_le_capable(hdev))
621 le_setup(hdev);
622
623 hci_setup_event_mask(hdev);
624
625 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
626 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
627
628 if (lmp_ssp_capable(hdev)) {
629 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
630 u8 mode = 0x01;
631 hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE,
632 sizeof(mode), &mode);
633 } else {
634 struct hci_cp_write_eir cp;
635
636 memset(hdev->eir, 0, sizeof(hdev->eir));
637 memset(&cp, 0, sizeof(cp));
638
639 hci_send_cmd(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
640 }
641 }
642
643 if (lmp_inq_rssi_capable(hdev))
644 hci_setup_inquiry_mode(hdev);
645
646 if (lmp_inq_tx_pwr_capable(hdev))
647 hci_send_cmd(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
648
649 if (lmp_ext_feat_capable(hdev)) {
650 struct hci_cp_read_local_ext_features cp;
651
652 cp.page = 0x01;
653 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, sizeof(cp),
654 &cp);
655 }
656
657 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
658 u8 enable = 1;
659 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
660 &enable);
661 }
662 }
663
664 static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
665 {
666 struct hci_rp_read_local_version *rp = (void *) skb->data;
667
668 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
669
670 if (rp->status)
671 goto done;
672
673 hdev->hci_ver = rp->hci_ver;
674 hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
675 hdev->lmp_ver = rp->lmp_ver;
676 hdev->manufacturer = __le16_to_cpu(rp->manufacturer);
677 hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver);
678
679 BT_DBG("%s manufacturer 0x%4.4x hci ver %d:%d", hdev->name,
680 hdev->manufacturer, hdev->hci_ver, hdev->hci_rev);
681
682 if (test_bit(HCI_INIT, &hdev->flags))
683 hci_setup(hdev);
684
685 done:
686 hci_req_complete(hdev, HCI_OP_READ_LOCAL_VERSION, rp->status);
687 }
688
689 static void hci_setup_link_policy(struct hci_dev *hdev)
690 {
691 struct hci_cp_write_def_link_policy cp;
692 u16 link_policy = 0;
693
694 if (lmp_rswitch_capable(hdev))
695 link_policy |= HCI_LP_RSWITCH;
696 if (lmp_hold_capable(hdev))
697 link_policy |= HCI_LP_HOLD;
698 if (lmp_sniff_capable(hdev))
699 link_policy |= HCI_LP_SNIFF;
700 if (lmp_park_capable(hdev))
701 link_policy |= HCI_LP_PARK;
702
703 cp.policy = cpu_to_le16(link_policy);
704 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
705 }
706
707 static void hci_cc_read_local_commands(struct hci_dev *hdev,
708 struct sk_buff *skb)
709 {
710 struct hci_rp_read_local_commands *rp = (void *) skb->data;
711
712 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
713
714 if (rp->status)
715 goto done;
716
717 memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
718
719 if (test_bit(HCI_INIT, &hdev->flags) && (hdev->commands[5] & 0x10))
720 hci_setup_link_policy(hdev);
721
722 done:
723 hci_req_complete(hdev, HCI_OP_READ_LOCAL_COMMANDS, rp->status);
724 }
725
726 static void hci_cc_read_local_features(struct hci_dev *hdev,
727 struct sk_buff *skb)
728 {
729 struct hci_rp_read_local_features *rp = (void *) skb->data;
730
731 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
732
733 if (rp->status)
734 return;
735
736 memcpy(hdev->features, rp->features, 8);
737
738 /* Adjust default settings according to features
739 * supported by device. */
740
741 if (hdev->features[0] & LMP_3SLOT)
742 hdev->pkt_type |= (HCI_DM3 | HCI_DH3);
743
744 if (hdev->features[0] & LMP_5SLOT)
745 hdev->pkt_type |= (HCI_DM5 | HCI_DH5);
746
747 if (hdev->features[1] & LMP_HV2) {
748 hdev->pkt_type |= (HCI_HV2);
749 hdev->esco_type |= (ESCO_HV2);
750 }
751
752 if (hdev->features[1] & LMP_HV3) {
753 hdev->pkt_type |= (HCI_HV3);
754 hdev->esco_type |= (ESCO_HV3);
755 }
756
757 if (lmp_esco_capable(hdev))
758 hdev->esco_type |= (ESCO_EV3);
759
760 if (hdev->features[4] & LMP_EV4)
761 hdev->esco_type |= (ESCO_EV4);
762
763 if (hdev->features[4] & LMP_EV5)
764 hdev->esco_type |= (ESCO_EV5);
765
766 if (hdev->features[5] & LMP_EDR_ESCO_2M)
767 hdev->esco_type |= (ESCO_2EV3);
768
769 if (hdev->features[5] & LMP_EDR_ESCO_3M)
770 hdev->esco_type |= (ESCO_3EV3);
771
772 if (hdev->features[5] & LMP_EDR_3S_ESCO)
773 hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5);
774
775 BT_DBG("%s features 0x%.2x%.2x%.2x%.2x%.2x%.2x%.2x%.2x", hdev->name,
776 hdev->features[0], hdev->features[1],
777 hdev->features[2], hdev->features[3],
778 hdev->features[4], hdev->features[5],
779 hdev->features[6], hdev->features[7]);
780 }
781
782 static void hci_set_le_support(struct hci_dev *hdev)
783 {
784 struct hci_cp_write_le_host_supported cp;
785
786 memset(&cp, 0, sizeof(cp));
787
788 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
789 cp.le = 1;
790 cp.simul = !!lmp_le_br_capable(hdev);
791 }
792
793 if (cp.le != !!lmp_host_le_capable(hdev))
794 hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
795 &cp);
796 }
797
798 static void hci_cc_read_local_ext_features(struct hci_dev *hdev,
799 struct sk_buff *skb)
800 {
801 struct hci_rp_read_local_ext_features *rp = (void *) skb->data;
802
803 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
804
805 if (rp->status)
806 goto done;
807
808 switch (rp->page) {
809 case 0:
810 memcpy(hdev->features, rp->features, 8);
811 break;
812 case 1:
813 memcpy(hdev->host_features, rp->features, 8);
814 break;
815 }
816
817 if (test_bit(HCI_INIT, &hdev->flags) && lmp_le_capable(hdev))
818 hci_set_le_support(hdev);
819
820 done:
821 hci_req_complete(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, rp->status);
822 }
823
824 static void hci_cc_read_flow_control_mode(struct hci_dev *hdev,
825 struct sk_buff *skb)
826 {
827 struct hci_rp_read_flow_control_mode *rp = (void *) skb->data;
828
829 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
830
831 if (rp->status)
832 return;
833
834 hdev->flow_ctl_mode = rp->mode;
835
836 hci_req_complete(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, rp->status);
837 }
838
839 static void hci_cc_read_buffer_size(struct hci_dev *hdev, struct sk_buff *skb)
840 {
841 struct hci_rp_read_buffer_size *rp = (void *) skb->data;
842
843 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
844
845 if (rp->status)
846 return;
847
848 hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu);
849 hdev->sco_mtu = rp->sco_mtu;
850 hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt);
851 hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt);
852
853 if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) {
854 hdev->sco_mtu = 64;
855 hdev->sco_pkts = 8;
856 }
857
858 hdev->acl_cnt = hdev->acl_pkts;
859 hdev->sco_cnt = hdev->sco_pkts;
860
861 BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu,
862 hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts);
863 }
864
865 static void hci_cc_read_bd_addr(struct hci_dev *hdev, struct sk_buff *skb)
866 {
867 struct hci_rp_read_bd_addr *rp = (void *) skb->data;
868
869 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
870
871 if (!rp->status)
872 bacpy(&hdev->bdaddr, &rp->bdaddr);
873
874 hci_req_complete(hdev, HCI_OP_READ_BD_ADDR, rp->status);
875 }
876
877 static void hci_cc_read_data_block_size(struct hci_dev *hdev,
878 struct sk_buff *skb)
879 {
880 struct hci_rp_read_data_block_size *rp = (void *) skb->data;
881
882 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
883
884 if (rp->status)
885 return;
886
887 hdev->block_mtu = __le16_to_cpu(rp->max_acl_len);
888 hdev->block_len = __le16_to_cpu(rp->block_len);
889 hdev->num_blocks = __le16_to_cpu(rp->num_blocks);
890
891 hdev->block_cnt = hdev->num_blocks;
892
893 BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu,
894 hdev->block_cnt, hdev->block_len);
895
896 hci_req_complete(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, rp->status);
897 }
898
899 static void hci_cc_write_ca_timeout(struct hci_dev *hdev, struct sk_buff *skb)
900 {
901 __u8 status = *((__u8 *) skb->data);
902
903 BT_DBG("%s status 0x%2.2x", hdev->name, status);
904
905 hci_req_complete(hdev, HCI_OP_WRITE_CA_TIMEOUT, status);
906 }
907
908 static void hci_cc_read_local_amp_info(struct hci_dev *hdev,
909 struct sk_buff *skb)
910 {
911 struct hci_rp_read_local_amp_info *rp = (void *) skb->data;
912
913 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
914
915 if (rp->status)
916 goto a2mp_rsp;
917
918 hdev->amp_status = rp->amp_status;
919 hdev->amp_total_bw = __le32_to_cpu(rp->total_bw);
920 hdev->amp_max_bw = __le32_to_cpu(rp->max_bw);
921 hdev->amp_min_latency = __le32_to_cpu(rp->min_latency);
922 hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu);
923 hdev->amp_type = rp->amp_type;
924 hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap);
925 hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size);
926 hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to);
927 hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to);
928
929 hci_req_complete(hdev, HCI_OP_READ_LOCAL_AMP_INFO, rp->status);
930
931 a2mp_rsp:
932 a2mp_send_getinfo_rsp(hdev);
933 }
934
935 static void hci_cc_read_local_amp_assoc(struct hci_dev *hdev,
936 struct sk_buff *skb)
937 {
938 struct hci_rp_read_local_amp_assoc *rp = (void *) skb->data;
939 struct amp_assoc *assoc = &hdev->loc_assoc;
940 size_t rem_len, frag_len;
941
942 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
943
944 if (rp->status)
945 goto a2mp_rsp;
946
947 frag_len = skb->len - sizeof(*rp);
948 rem_len = __le16_to_cpu(rp->rem_len);
949
950 if (rem_len > frag_len) {
951 BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len);
952
953 memcpy(assoc->data + assoc->offset, rp->frag, frag_len);
954 assoc->offset += frag_len;
955
956 /* Read other fragments */
957 amp_read_loc_assoc_frag(hdev, rp->phy_handle);
958
959 return;
960 }
961
962 memcpy(assoc->data + assoc->offset, rp->frag, rem_len);
963 assoc->len = assoc->offset + rem_len;
964 assoc->offset = 0;
965
966 a2mp_rsp:
967 /* Send A2MP Rsp when all fragments are received */
968 a2mp_send_getampassoc_rsp(hdev, rp->status);
969 a2mp_send_create_phy_link_req(hdev, rp->status);
970 }
971
972 static void hci_cc_delete_stored_link_key(struct hci_dev *hdev,
973 struct sk_buff *skb)
974 {
975 __u8 status = *((__u8 *) skb->data);
976
977 BT_DBG("%s status 0x%2.2x", hdev->name, status);
978
979 hci_req_complete(hdev, HCI_OP_DELETE_STORED_LINK_KEY, status);
980 }
981
982 static void hci_cc_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
983 {
984 __u8 status = *((__u8 *) skb->data);
985
986 BT_DBG("%s status 0x%2.2x", hdev->name, status);
987
988 hci_req_complete(hdev, HCI_OP_SET_EVENT_MASK, status);
989 }
990
991 static void hci_cc_write_inquiry_mode(struct hci_dev *hdev,
992 struct sk_buff *skb)
993 {
994 __u8 status = *((__u8 *) skb->data);
995
996 BT_DBG("%s status 0x%2.2x", hdev->name, status);
997
998 hci_req_complete(hdev, HCI_OP_WRITE_INQUIRY_MODE, status);
999 }
1000
1001 static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
1002 struct sk_buff *skb)
1003 {
1004 struct hci_rp_read_inq_rsp_tx_power *rp = (void *) skb->data;
1005
1006 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1007
1008 if (!rp->status)
1009 hdev->inq_tx_power = rp->tx_power;
1010
1011 hci_req_complete(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, rp->status);
1012 }
1013
1014 static void hci_cc_set_event_flt(struct hci_dev *hdev, struct sk_buff *skb)
1015 {
1016 __u8 status = *((__u8 *) skb->data);
1017
1018 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1019
1020 hci_req_complete(hdev, HCI_OP_SET_EVENT_FLT, status);
1021 }
1022
1023 static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
1024 {
1025 struct hci_rp_pin_code_reply *rp = (void *) skb->data;
1026 struct hci_cp_pin_code_reply *cp;
1027 struct hci_conn *conn;
1028
1029 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1030
1031 hci_dev_lock(hdev);
1032
1033 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1034 mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status);
1035
1036 if (rp->status)
1037 goto unlock;
1038
1039 cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY);
1040 if (!cp)
1041 goto unlock;
1042
1043 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1044 if (conn)
1045 conn->pin_length = cp->pin_len;
1046
1047 unlock:
1048 hci_dev_unlock(hdev);
1049 }
1050
1051 static void hci_cc_pin_code_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1052 {
1053 struct hci_rp_pin_code_neg_reply *rp = (void *) skb->data;
1054
1055 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1056
1057 hci_dev_lock(hdev);
1058
1059 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1060 mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr,
1061 rp->status);
1062
1063 hci_dev_unlock(hdev);
1064 }
1065
1066 static void hci_cc_le_read_buffer_size(struct hci_dev *hdev,
1067 struct sk_buff *skb)
1068 {
1069 struct hci_rp_le_read_buffer_size *rp = (void *) skb->data;
1070
1071 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1072
1073 if (rp->status)
1074 return;
1075
1076 hdev->le_mtu = __le16_to_cpu(rp->le_mtu);
1077 hdev->le_pkts = rp->le_max_pkt;
1078
1079 hdev->le_cnt = hdev->le_pkts;
1080
1081 BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts);
1082
1083 hci_req_complete(hdev, HCI_OP_LE_READ_BUFFER_SIZE, rp->status);
1084 }
1085
1086 static void hci_cc_le_read_adv_tx_power(struct hci_dev *hdev,
1087 struct sk_buff *skb)
1088 {
1089 struct hci_rp_le_read_adv_tx_power *rp = (void *) skb->data;
1090
1091 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1092
1093 if (!rp->status)
1094 hdev->adv_tx_power = rp->tx_power;
1095
1096 hci_req_complete(hdev, HCI_OP_LE_READ_ADV_TX_POWER, rp->status);
1097 }
1098
1099 static void hci_cc_le_set_event_mask(struct hci_dev *hdev, struct sk_buff *skb)
1100 {
1101 __u8 status = *((__u8 *) skb->data);
1102
1103 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1104
1105 hci_req_complete(hdev, HCI_OP_LE_SET_EVENT_MASK, status);
1106 }
1107
1108 static void hci_cc_user_confirm_reply(struct hci_dev *hdev, struct sk_buff *skb)
1109 {
1110 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1111
1112 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1113
1114 hci_dev_lock(hdev);
1115
1116 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1117 mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0,
1118 rp->status);
1119
1120 hci_dev_unlock(hdev);
1121 }
1122
1123 static void hci_cc_user_confirm_neg_reply(struct hci_dev *hdev,
1124 struct sk_buff *skb)
1125 {
1126 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1127
1128 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1129
1130 hci_dev_lock(hdev);
1131
1132 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1133 mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr,
1134 ACL_LINK, 0, rp->status);
1135
1136 hci_dev_unlock(hdev);
1137 }
1138
1139 static void hci_cc_user_passkey_reply(struct hci_dev *hdev, struct sk_buff *skb)
1140 {
1141 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1142
1143 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1144
1145 hci_dev_lock(hdev);
1146
1147 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1148 mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK,
1149 0, rp->status);
1150
1151 hci_dev_unlock(hdev);
1152 }
1153
1154 static void hci_cc_user_passkey_neg_reply(struct hci_dev *hdev,
1155 struct sk_buff *skb)
1156 {
1157 struct hci_rp_user_confirm_reply *rp = (void *) skb->data;
1158
1159 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1160
1161 hci_dev_lock(hdev);
1162
1163 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1164 mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr,
1165 ACL_LINK, 0, rp->status);
1166
1167 hci_dev_unlock(hdev);
1168 }
1169
1170 static void hci_cc_read_local_oob_data_reply(struct hci_dev *hdev,
1171 struct sk_buff *skb)
1172 {
1173 struct hci_rp_read_local_oob_data *rp = (void *) skb->data;
1174
1175 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1176
1177 hci_dev_lock(hdev);
1178 mgmt_read_local_oob_data_reply_complete(hdev, rp->hash,
1179 rp->randomizer, rp->status);
1180 hci_dev_unlock(hdev);
1181 }
1182
1183 static void hci_cc_le_set_scan_param(struct hci_dev *hdev, struct sk_buff *skb)
1184 {
1185 __u8 status = *((__u8 *) skb->data);
1186
1187 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1188
1189 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_PARAM, status);
1190
1191 if (status) {
1192 hci_dev_lock(hdev);
1193 mgmt_start_discovery_failed(hdev, status);
1194 hci_dev_unlock(hdev);
1195 return;
1196 }
1197 }
1198
1199 static void hci_cc_le_set_scan_enable(struct hci_dev *hdev,
1200 struct sk_buff *skb)
1201 {
1202 struct hci_cp_le_set_scan_enable *cp;
1203 __u8 status = *((__u8 *) skb->data);
1204
1205 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1206
1207 cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE);
1208 if (!cp)
1209 return;
1210
1211 switch (cp->enable) {
1212 case LE_SCANNING_ENABLED:
1213 hci_req_complete(hdev, HCI_OP_LE_SET_SCAN_ENABLE, status);
1214
1215 if (status) {
1216 hci_dev_lock(hdev);
1217 mgmt_start_discovery_failed(hdev, status);
1218 hci_dev_unlock(hdev);
1219 return;
1220 }
1221
1222 set_bit(HCI_LE_SCAN, &hdev->dev_flags);
1223
1224 hci_dev_lock(hdev);
1225 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1226 hci_dev_unlock(hdev);
1227 break;
1228
1229 case LE_SCANNING_DISABLED:
1230 if (status) {
1231 hci_dev_lock(hdev);
1232 mgmt_stop_discovery_failed(hdev, status);
1233 hci_dev_unlock(hdev);
1234 return;
1235 }
1236
1237 clear_bit(HCI_LE_SCAN, &hdev->dev_flags);
1238
1239 if (hdev->discovery.type == DISCOV_TYPE_INTERLEAVED &&
1240 hdev->discovery.state == DISCOVERY_FINDING) {
1241 mgmt_interleaved_discovery(hdev);
1242 } else {
1243 hci_dev_lock(hdev);
1244 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1245 hci_dev_unlock(hdev);
1246 }
1247
1248 break;
1249
1250 default:
1251 BT_ERR("Used reserved LE_Scan_Enable param %d", cp->enable);
1252 break;
1253 }
1254 }
1255
1256 static void hci_cc_le_ltk_reply(struct hci_dev *hdev, struct sk_buff *skb)
1257 {
1258 struct hci_rp_le_ltk_reply *rp = (void *) skb->data;
1259
1260 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1261
1262 if (rp->status)
1263 return;
1264
1265 hci_req_complete(hdev, HCI_OP_LE_LTK_REPLY, rp->status);
1266 }
1267
1268 static void hci_cc_le_ltk_neg_reply(struct hci_dev *hdev, struct sk_buff *skb)
1269 {
1270 struct hci_rp_le_ltk_neg_reply *rp = (void *) skb->data;
1271
1272 BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
1273
1274 if (rp->status)
1275 return;
1276
1277 hci_req_complete(hdev, HCI_OP_LE_LTK_NEG_REPLY, rp->status);
1278 }
1279
1280 static void hci_cc_write_le_host_supported(struct hci_dev *hdev,
1281 struct sk_buff *skb)
1282 {
1283 struct hci_cp_write_le_host_supported *sent;
1284 __u8 status = *((__u8 *) skb->data);
1285
1286 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1287
1288 sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED);
1289 if (!sent)
1290 return;
1291
1292 if (!status) {
1293 if (sent->le)
1294 hdev->host_features[0] |= LMP_HOST_LE;
1295 else
1296 hdev->host_features[0] &= ~LMP_HOST_LE;
1297
1298 if (sent->simul)
1299 hdev->host_features[0] |= LMP_HOST_LE_BREDR;
1300 else
1301 hdev->host_features[0] &= ~LMP_HOST_LE_BREDR;
1302 }
1303
1304 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
1305 !test_bit(HCI_INIT, &hdev->flags))
1306 mgmt_le_enable_complete(hdev, sent->le, status);
1307
1308 hci_req_complete(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, status);
1309 }
1310
1311 static void hci_cc_write_remote_amp_assoc(struct hci_dev *hdev,
1312 struct sk_buff *skb)
1313 {
1314 struct hci_rp_write_remote_amp_assoc *rp = (void *) skb->data;
1315
1316 BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x",
1317 hdev->name, rp->status, rp->phy_handle);
1318
1319 if (rp->status)
1320 return;
1321
1322 amp_write_rem_assoc_continue(hdev, rp->phy_handle);
1323 }
1324
1325 static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status)
1326 {
1327 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1328
1329 if (status) {
1330 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1331 hci_conn_check_pending(hdev);
1332 hci_dev_lock(hdev);
1333 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1334 mgmt_start_discovery_failed(hdev, status);
1335 hci_dev_unlock(hdev);
1336 return;
1337 }
1338
1339 set_bit(HCI_INQUIRY, &hdev->flags);
1340
1341 hci_dev_lock(hdev);
1342 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1343 hci_dev_unlock(hdev);
1344 }
1345
1346 static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status)
1347 {
1348 struct hci_cp_create_conn *cp;
1349 struct hci_conn *conn;
1350
1351 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1352
1353 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN);
1354 if (!cp)
1355 return;
1356
1357 hci_dev_lock(hdev);
1358
1359 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1360
1361 BT_DBG("%s bdaddr %pMR hcon %p", hdev->name, &cp->bdaddr, conn);
1362
1363 if (status) {
1364 if (conn && conn->state == BT_CONNECT) {
1365 if (status != 0x0c || conn->attempt > 2) {
1366 conn->state = BT_CLOSED;
1367 hci_proto_connect_cfm(conn, status);
1368 hci_conn_del(conn);
1369 } else
1370 conn->state = BT_CONNECT2;
1371 }
1372 } else {
1373 if (!conn) {
1374 conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr);
1375 if (conn) {
1376 conn->out = true;
1377 conn->link_mode |= HCI_LM_MASTER;
1378 } else
1379 BT_ERR("No memory for new connection");
1380 }
1381 }
1382
1383 hci_dev_unlock(hdev);
1384 }
1385
1386 static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status)
1387 {
1388 struct hci_cp_add_sco *cp;
1389 struct hci_conn *acl, *sco;
1390 __u16 handle;
1391
1392 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1393
1394 if (!status)
1395 return;
1396
1397 cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO);
1398 if (!cp)
1399 return;
1400
1401 handle = __le16_to_cpu(cp->handle);
1402
1403 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1404
1405 hci_dev_lock(hdev);
1406
1407 acl = hci_conn_hash_lookup_handle(hdev, handle);
1408 if (acl) {
1409 sco = acl->link;
1410 if (sco) {
1411 sco->state = BT_CLOSED;
1412
1413 hci_proto_connect_cfm(sco, status);
1414 hci_conn_del(sco);
1415 }
1416 }
1417
1418 hci_dev_unlock(hdev);
1419 }
1420
1421 static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status)
1422 {
1423 struct hci_cp_auth_requested *cp;
1424 struct hci_conn *conn;
1425
1426 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1427
1428 if (!status)
1429 return;
1430
1431 cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED);
1432 if (!cp)
1433 return;
1434
1435 hci_dev_lock(hdev);
1436
1437 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1438 if (conn) {
1439 if (conn->state == BT_CONFIG) {
1440 hci_proto_connect_cfm(conn, status);
1441 hci_conn_put(conn);
1442 }
1443 }
1444
1445 hci_dev_unlock(hdev);
1446 }
1447
1448 static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status)
1449 {
1450 struct hci_cp_set_conn_encrypt *cp;
1451 struct hci_conn *conn;
1452
1453 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1454
1455 if (!status)
1456 return;
1457
1458 cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT);
1459 if (!cp)
1460 return;
1461
1462 hci_dev_lock(hdev);
1463
1464 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1465 if (conn) {
1466 if (conn->state == BT_CONFIG) {
1467 hci_proto_connect_cfm(conn, status);
1468 hci_conn_put(conn);
1469 }
1470 }
1471
1472 hci_dev_unlock(hdev);
1473 }
1474
1475 static int hci_outgoing_auth_needed(struct hci_dev *hdev,
1476 struct hci_conn *conn)
1477 {
1478 if (conn->state != BT_CONFIG || !conn->out)
1479 return 0;
1480
1481 if (conn->pending_sec_level == BT_SECURITY_SDP)
1482 return 0;
1483
1484 /* Only request authentication for SSP connections or non-SSP
1485 * devices with sec_level HIGH or if MITM protection is requested */
1486 if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) &&
1487 conn->pending_sec_level != BT_SECURITY_HIGH)
1488 return 0;
1489
1490 return 1;
1491 }
1492
1493 static int hci_resolve_name(struct hci_dev *hdev,
1494 struct inquiry_entry *e)
1495 {
1496 struct hci_cp_remote_name_req cp;
1497
1498 memset(&cp, 0, sizeof(cp));
1499
1500 bacpy(&cp.bdaddr, &e->data.bdaddr);
1501 cp.pscan_rep_mode = e->data.pscan_rep_mode;
1502 cp.pscan_mode = e->data.pscan_mode;
1503 cp.clock_offset = e->data.clock_offset;
1504
1505 return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
1506 }
1507
1508 static bool hci_resolve_next_name(struct hci_dev *hdev)
1509 {
1510 struct discovery_state *discov = &hdev->discovery;
1511 struct inquiry_entry *e;
1512
1513 if (list_empty(&discov->resolve))
1514 return false;
1515
1516 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1517 if (!e)
1518 return false;
1519
1520 if (hci_resolve_name(hdev, e) == 0) {
1521 e->name_state = NAME_PENDING;
1522 return true;
1523 }
1524
1525 return false;
1526 }
1527
1528 static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn,
1529 bdaddr_t *bdaddr, u8 *name, u8 name_len)
1530 {
1531 struct discovery_state *discov = &hdev->discovery;
1532 struct inquiry_entry *e;
1533
1534 if (conn && !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
1535 mgmt_device_connected(hdev, bdaddr, ACL_LINK, 0x00, 0, name,
1536 name_len, conn->dev_class);
1537
1538 if (discov->state == DISCOVERY_STOPPED)
1539 return;
1540
1541 if (discov->state == DISCOVERY_STOPPING)
1542 goto discov_complete;
1543
1544 if (discov->state != DISCOVERY_RESOLVING)
1545 return;
1546
1547 e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING);
1548 /* If the device was not found in a list of found devices names of which
1549 * are pending. there is no need to continue resolving a next name as it
1550 * will be done upon receiving another Remote Name Request Complete
1551 * Event */
1552 if (!e)
1553 return;
1554
1555 list_del(&e->list);
1556 if (name) {
1557 e->name_state = NAME_KNOWN;
1558 mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00,
1559 e->data.rssi, name, name_len);
1560 } else {
1561 e->name_state = NAME_NOT_KNOWN;
1562 }
1563
1564 if (hci_resolve_next_name(hdev))
1565 return;
1566
1567 discov_complete:
1568 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1569 }
1570
1571 static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status)
1572 {
1573 struct hci_cp_remote_name_req *cp;
1574 struct hci_conn *conn;
1575
1576 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1577
1578 /* If successful wait for the name req complete event before
1579 * checking for the need to do authentication */
1580 if (!status)
1581 return;
1582
1583 cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ);
1584 if (!cp)
1585 return;
1586
1587 hci_dev_lock(hdev);
1588
1589 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr);
1590
1591 if (test_bit(HCI_MGMT, &hdev->dev_flags))
1592 hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0);
1593
1594 if (!conn)
1595 goto unlock;
1596
1597 if (!hci_outgoing_auth_needed(hdev, conn))
1598 goto unlock;
1599
1600 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1601 struct hci_cp_auth_requested cp;
1602 cp.handle = __cpu_to_le16(conn->handle);
1603 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
1604 }
1605
1606 unlock:
1607 hci_dev_unlock(hdev);
1608 }
1609
1610 static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status)
1611 {
1612 struct hci_cp_read_remote_features *cp;
1613 struct hci_conn *conn;
1614
1615 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1616
1617 if (!status)
1618 return;
1619
1620 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES);
1621 if (!cp)
1622 return;
1623
1624 hci_dev_lock(hdev);
1625
1626 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1627 if (conn) {
1628 if (conn->state == BT_CONFIG) {
1629 hci_proto_connect_cfm(conn, status);
1630 hci_conn_put(conn);
1631 }
1632 }
1633
1634 hci_dev_unlock(hdev);
1635 }
1636
1637 static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status)
1638 {
1639 struct hci_cp_read_remote_ext_features *cp;
1640 struct hci_conn *conn;
1641
1642 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1643
1644 if (!status)
1645 return;
1646
1647 cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES);
1648 if (!cp)
1649 return;
1650
1651 hci_dev_lock(hdev);
1652
1653 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1654 if (conn) {
1655 if (conn->state == BT_CONFIG) {
1656 hci_proto_connect_cfm(conn, status);
1657 hci_conn_put(conn);
1658 }
1659 }
1660
1661 hci_dev_unlock(hdev);
1662 }
1663
1664 static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status)
1665 {
1666 struct hci_cp_setup_sync_conn *cp;
1667 struct hci_conn *acl, *sco;
1668 __u16 handle;
1669
1670 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1671
1672 if (!status)
1673 return;
1674
1675 cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN);
1676 if (!cp)
1677 return;
1678
1679 handle = __le16_to_cpu(cp->handle);
1680
1681 BT_DBG("%s handle 0x%4.4x", hdev->name, handle);
1682
1683 hci_dev_lock(hdev);
1684
1685 acl = hci_conn_hash_lookup_handle(hdev, handle);
1686 if (acl) {
1687 sco = acl->link;
1688 if (sco) {
1689 sco->state = BT_CLOSED;
1690
1691 hci_proto_connect_cfm(sco, status);
1692 hci_conn_del(sco);
1693 }
1694 }
1695
1696 hci_dev_unlock(hdev);
1697 }
1698
1699 static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status)
1700 {
1701 struct hci_cp_sniff_mode *cp;
1702 struct hci_conn *conn;
1703
1704 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1705
1706 if (!status)
1707 return;
1708
1709 cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE);
1710 if (!cp)
1711 return;
1712
1713 hci_dev_lock(hdev);
1714
1715 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1716 if (conn) {
1717 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1718
1719 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1720 hci_sco_setup(conn, status);
1721 }
1722
1723 hci_dev_unlock(hdev);
1724 }
1725
1726 static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status)
1727 {
1728 struct hci_cp_exit_sniff_mode *cp;
1729 struct hci_conn *conn;
1730
1731 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1732
1733 if (!status)
1734 return;
1735
1736 cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE);
1737 if (!cp)
1738 return;
1739
1740 hci_dev_lock(hdev);
1741
1742 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1743 if (conn) {
1744 clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags);
1745
1746 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
1747 hci_sco_setup(conn, status);
1748 }
1749
1750 hci_dev_unlock(hdev);
1751 }
1752
1753 static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
1754 {
1755 struct hci_cp_disconnect *cp;
1756 struct hci_conn *conn;
1757
1758 if (!status)
1759 return;
1760
1761 cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT);
1762 if (!cp)
1763 return;
1764
1765 hci_dev_lock(hdev);
1766
1767 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
1768 if (conn)
1769 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
1770 conn->dst_type, status);
1771
1772 hci_dev_unlock(hdev);
1773 }
1774
1775 static void hci_cs_le_create_conn(struct hci_dev *hdev, __u8 status)
1776 {
1777 struct hci_conn *conn;
1778
1779 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1780
1781 if (status) {
1782 hci_dev_lock(hdev);
1783
1784 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1785 if (!conn) {
1786 hci_dev_unlock(hdev);
1787 return;
1788 }
1789
1790 BT_DBG("%s bdaddr %pMR conn %p", hdev->name, &conn->dst, conn);
1791
1792 conn->state = BT_CLOSED;
1793 mgmt_connect_failed(hdev, &conn->dst, conn->type,
1794 conn->dst_type, status);
1795 hci_proto_connect_cfm(conn, status);
1796 hci_conn_del(conn);
1797
1798 hci_dev_unlock(hdev);
1799 }
1800 }
1801
1802 static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status)
1803 {
1804 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1805 }
1806
1807 static void hci_cs_create_phylink(struct hci_dev *hdev, u8 status)
1808 {
1809 struct hci_cp_create_phy_link *cp;
1810
1811 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1812
1813 if (status)
1814 return;
1815
1816 cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK);
1817 if (!cp)
1818 return;
1819
1820 amp_write_remote_assoc(hdev, cp->phy_handle);
1821 }
1822
1823 static void hci_cs_accept_phylink(struct hci_dev *hdev, u8 status)
1824 {
1825 struct hci_cp_accept_phy_link *cp;
1826
1827 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1828
1829 if (status)
1830 return;
1831
1832 cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK);
1833 if (!cp)
1834 return;
1835
1836 amp_write_remote_assoc(hdev, cp->phy_handle);
1837 }
1838
1839 static void hci_inquiry_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1840 {
1841 __u8 status = *((__u8 *) skb->data);
1842 struct discovery_state *discov = &hdev->discovery;
1843 struct inquiry_entry *e;
1844
1845 BT_DBG("%s status 0x%2.2x", hdev->name, status);
1846
1847 hci_req_complete(hdev, HCI_OP_INQUIRY, status);
1848
1849 hci_conn_check_pending(hdev);
1850
1851 if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags))
1852 return;
1853
1854 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1855 return;
1856
1857 hci_dev_lock(hdev);
1858
1859 if (discov->state != DISCOVERY_FINDING)
1860 goto unlock;
1861
1862 if (list_empty(&discov->resolve)) {
1863 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1864 goto unlock;
1865 }
1866
1867 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED);
1868 if (e && hci_resolve_name(hdev, e) == 0) {
1869 e->name_state = NAME_PENDING;
1870 hci_discovery_set_state(hdev, DISCOVERY_RESOLVING);
1871 } else {
1872 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1873 }
1874
1875 unlock:
1876 hci_dev_unlock(hdev);
1877 }
1878
1879 static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
1880 {
1881 struct inquiry_data data;
1882 struct inquiry_info *info = (void *) (skb->data + 1);
1883 int num_rsp = *((__u8 *) skb->data);
1884
1885 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
1886
1887 if (!num_rsp)
1888 return;
1889
1890 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
1891 return;
1892
1893 hci_dev_lock(hdev);
1894
1895 for (; num_rsp; num_rsp--, info++) {
1896 bool name_known, ssp;
1897
1898 bacpy(&data.bdaddr, &info->bdaddr);
1899 data.pscan_rep_mode = info->pscan_rep_mode;
1900 data.pscan_period_mode = info->pscan_period_mode;
1901 data.pscan_mode = info->pscan_mode;
1902 memcpy(data.dev_class, info->dev_class, 3);
1903 data.clock_offset = info->clock_offset;
1904 data.rssi = 0x00;
1905 data.ssp_mode = 0x00;
1906
1907 name_known = hci_inquiry_cache_update(hdev, &data, false, &ssp);
1908 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
1909 info->dev_class, 0, !name_known, ssp, NULL,
1910 0);
1911 }
1912
1913 hci_dev_unlock(hdev);
1914 }
1915
1916 static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
1917 {
1918 struct hci_ev_conn_complete *ev = (void *) skb->data;
1919 struct hci_conn *conn;
1920
1921 BT_DBG("%s", hdev->name);
1922
1923 hci_dev_lock(hdev);
1924
1925 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
1926 if (!conn) {
1927 if (ev->link_type != SCO_LINK)
1928 goto unlock;
1929
1930 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
1931 if (!conn)
1932 goto unlock;
1933
1934 conn->type = SCO_LINK;
1935 }
1936
1937 if (!ev->status) {
1938 conn->handle = __le16_to_cpu(ev->handle);
1939
1940 if (conn->type == ACL_LINK) {
1941 conn->state = BT_CONFIG;
1942 hci_conn_hold(conn);
1943
1944 if (!conn->out && !hci_conn_ssp_enabled(conn) &&
1945 !hci_find_link_key(hdev, &ev->bdaddr))
1946 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
1947 else
1948 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
1949 } else
1950 conn->state = BT_CONNECTED;
1951
1952 hci_conn_hold_device(conn);
1953 hci_conn_add_sysfs(conn);
1954
1955 if (test_bit(HCI_AUTH, &hdev->flags))
1956 conn->link_mode |= HCI_LM_AUTH;
1957
1958 if (test_bit(HCI_ENCRYPT, &hdev->flags))
1959 conn->link_mode |= HCI_LM_ENCRYPT;
1960
1961 /* Get remote features */
1962 if (conn->type == ACL_LINK) {
1963 struct hci_cp_read_remote_features cp;
1964 cp.handle = ev->handle;
1965 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES,
1966 sizeof(cp), &cp);
1967 }
1968
1969 /* Set packet type for incoming connection */
1970 if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) {
1971 struct hci_cp_change_conn_ptype cp;
1972 cp.handle = ev->handle;
1973 cp.pkt_type = cpu_to_le16(conn->pkt_type);
1974 hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp),
1975 &cp);
1976 }
1977 } else {
1978 conn->state = BT_CLOSED;
1979 if (conn->type == ACL_LINK)
1980 mgmt_connect_failed(hdev, &ev->bdaddr, conn->type,
1981 conn->dst_type, ev->status);
1982 }
1983
1984 if (conn->type == ACL_LINK)
1985 hci_sco_setup(conn, ev->status);
1986
1987 if (ev->status) {
1988 hci_proto_connect_cfm(conn, ev->status);
1989 hci_conn_del(conn);
1990 } else if (ev->link_type != ACL_LINK)
1991 hci_proto_connect_cfm(conn, ev->status);
1992
1993 unlock:
1994 hci_dev_unlock(hdev);
1995
1996 hci_conn_check_pending(hdev);
1997 }
1998
1999 static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2000 {
2001 struct hci_ev_conn_request *ev = (void *) skb->data;
2002 int mask = hdev->link_mode;
2003
2004 BT_DBG("%s bdaddr %pMR type 0x%x", hdev->name, &ev->bdaddr,
2005 ev->link_type);
2006
2007 mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type);
2008
2009 if ((mask & HCI_LM_ACCEPT) &&
2010 !hci_blacklist_lookup(hdev, &ev->bdaddr)) {
2011 /* Connection accepted */
2012 struct inquiry_entry *ie;
2013 struct hci_conn *conn;
2014
2015 hci_dev_lock(hdev);
2016
2017 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
2018 if (ie)
2019 memcpy(ie->data.dev_class, ev->dev_class, 3);
2020
2021 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type,
2022 &ev->bdaddr);
2023 if (!conn) {
2024 conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr);
2025 if (!conn) {
2026 BT_ERR("No memory for new connection");
2027 hci_dev_unlock(hdev);
2028 return;
2029 }
2030 }
2031
2032 memcpy(conn->dev_class, ev->dev_class, 3);
2033 conn->state = BT_CONNECT;
2034
2035 hci_dev_unlock(hdev);
2036
2037 if (ev->link_type == ACL_LINK || !lmp_esco_capable(hdev)) {
2038 struct hci_cp_accept_conn_req cp;
2039
2040 bacpy(&cp.bdaddr, &ev->bdaddr);
2041
2042 if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER))
2043 cp.role = 0x00; /* Become master */
2044 else
2045 cp.role = 0x01; /* Remain slave */
2046
2047 hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp),
2048 &cp);
2049 } else {
2050 struct hci_cp_accept_sync_conn_req cp;
2051
2052 bacpy(&cp.bdaddr, &ev->bdaddr);
2053 cp.pkt_type = cpu_to_le16(conn->pkt_type);
2054
2055 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2056 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
2057 cp.max_latency = __constant_cpu_to_le16(0xffff);
2058 cp.content_format = cpu_to_le16(hdev->voice_setting);
2059 cp.retrans_effort = 0xff;
2060
2061 hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ,
2062 sizeof(cp), &cp);
2063 }
2064 } else {
2065 /* Connection rejected */
2066 struct hci_cp_reject_conn_req cp;
2067
2068 bacpy(&cp.bdaddr, &ev->bdaddr);
2069 cp.reason = HCI_ERROR_REJ_BAD_ADDR;
2070 hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp);
2071 }
2072 }
2073
2074 static u8 hci_to_mgmt_reason(u8 err)
2075 {
2076 switch (err) {
2077 case HCI_ERROR_CONNECTION_TIMEOUT:
2078 return MGMT_DEV_DISCONN_TIMEOUT;
2079 case HCI_ERROR_REMOTE_USER_TERM:
2080 case HCI_ERROR_REMOTE_LOW_RESOURCES:
2081 case HCI_ERROR_REMOTE_POWER_OFF:
2082 return MGMT_DEV_DISCONN_REMOTE;
2083 case HCI_ERROR_LOCAL_HOST_TERM:
2084 return MGMT_DEV_DISCONN_LOCAL_HOST;
2085 default:
2086 return MGMT_DEV_DISCONN_UNKNOWN;
2087 }
2088 }
2089
2090 static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2091 {
2092 struct hci_ev_disconn_complete *ev = (void *) skb->data;
2093 struct hci_conn *conn;
2094
2095 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2096
2097 hci_dev_lock(hdev);
2098
2099 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2100 if (!conn)
2101 goto unlock;
2102
2103 if (ev->status == 0)
2104 conn->state = BT_CLOSED;
2105
2106 if (test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags) &&
2107 (conn->type == ACL_LINK || conn->type == LE_LINK)) {
2108 if (ev->status) {
2109 mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
2110 conn->dst_type, ev->status);
2111 } else {
2112 u8 reason = hci_to_mgmt_reason(ev->reason);
2113
2114 mgmt_device_disconnected(hdev, &conn->dst, conn->type,
2115 conn->dst_type, reason);
2116 }
2117 }
2118
2119 if (ev->status == 0) {
2120 if (conn->type == ACL_LINK && conn->flush_key)
2121 hci_remove_link_key(hdev, &conn->dst);
2122 hci_proto_disconn_cfm(conn, ev->reason);
2123 hci_conn_del(conn);
2124 }
2125
2126 unlock:
2127 hci_dev_unlock(hdev);
2128 }
2129
2130 static void hci_auth_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2131 {
2132 struct hci_ev_auth_complete *ev = (void *) skb->data;
2133 struct hci_conn *conn;
2134
2135 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2136
2137 hci_dev_lock(hdev);
2138
2139 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2140 if (!conn)
2141 goto unlock;
2142
2143 if (!ev->status) {
2144 if (!hci_conn_ssp_enabled(conn) &&
2145 test_bit(HCI_CONN_REAUTH_PEND, &conn->flags)) {
2146 BT_INFO("re-auth of legacy device is not possible.");
2147 } else {
2148 conn->link_mode |= HCI_LM_AUTH;
2149 conn->sec_level = conn->pending_sec_level;
2150 }
2151 } else {
2152 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
2153 ev->status);
2154 }
2155
2156 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2157 clear_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
2158
2159 if (conn->state == BT_CONFIG) {
2160 if (!ev->status && hci_conn_ssp_enabled(conn)) {
2161 struct hci_cp_set_conn_encrypt cp;
2162 cp.handle = ev->handle;
2163 cp.encrypt = 0x01;
2164 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2165 &cp);
2166 } else {
2167 conn->state = BT_CONNECTED;
2168 hci_proto_connect_cfm(conn, ev->status);
2169 hci_conn_put(conn);
2170 }
2171 } else {
2172 hci_auth_cfm(conn, ev->status);
2173
2174 hci_conn_hold(conn);
2175 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2176 hci_conn_put(conn);
2177 }
2178
2179 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
2180 if (!ev->status) {
2181 struct hci_cp_set_conn_encrypt cp;
2182 cp.handle = ev->handle;
2183 cp.encrypt = 0x01;
2184 hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
2185 &cp);
2186 } else {
2187 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2188 hci_encrypt_cfm(conn, ev->status, 0x00);
2189 }
2190 }
2191
2192 unlock:
2193 hci_dev_unlock(hdev);
2194 }
2195
2196 static void hci_remote_name_evt(struct hci_dev *hdev, struct sk_buff *skb)
2197 {
2198 struct hci_ev_remote_name *ev = (void *) skb->data;
2199 struct hci_conn *conn;
2200
2201 BT_DBG("%s", hdev->name);
2202
2203 hci_conn_check_pending(hdev);
2204
2205 hci_dev_lock(hdev);
2206
2207 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2208
2209 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
2210 goto check_auth;
2211
2212 if (ev->status == 0)
2213 hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name,
2214 strnlen(ev->name, HCI_MAX_NAME_LENGTH));
2215 else
2216 hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0);
2217
2218 check_auth:
2219 if (!conn)
2220 goto unlock;
2221
2222 if (!hci_outgoing_auth_needed(hdev, conn))
2223 goto unlock;
2224
2225 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
2226 struct hci_cp_auth_requested cp;
2227 cp.handle = __cpu_to_le16(conn->handle);
2228 hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp);
2229 }
2230
2231 unlock:
2232 hci_dev_unlock(hdev);
2233 }
2234
2235 static void hci_encrypt_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2236 {
2237 struct hci_ev_encrypt_change *ev = (void *) skb->data;
2238 struct hci_conn *conn;
2239
2240 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2241
2242 hci_dev_lock(hdev);
2243
2244 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2245 if (conn) {
2246 if (!ev->status) {
2247 if (ev->encrypt) {
2248 /* Encryption implies authentication */
2249 conn->link_mode |= HCI_LM_AUTH;
2250 conn->link_mode |= HCI_LM_ENCRYPT;
2251 conn->sec_level = conn->pending_sec_level;
2252 } else
2253 conn->link_mode &= ~HCI_LM_ENCRYPT;
2254 }
2255
2256 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
2257
2258 if (ev->status && conn->state == BT_CONNECTED) {
2259 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
2260 hci_conn_put(conn);
2261 goto unlock;
2262 }
2263
2264 if (conn->state == BT_CONFIG) {
2265 if (!ev->status)
2266 conn->state = BT_CONNECTED;
2267
2268 hci_proto_connect_cfm(conn, ev->status);
2269 hci_conn_put(conn);
2270 } else
2271 hci_encrypt_cfm(conn, ev->status, ev->encrypt);
2272 }
2273
2274 unlock:
2275 hci_dev_unlock(hdev);
2276 }
2277
2278 static void hci_change_link_key_complete_evt(struct hci_dev *hdev,
2279 struct sk_buff *skb)
2280 {
2281 struct hci_ev_change_link_key_complete *ev = (void *) skb->data;
2282 struct hci_conn *conn;
2283
2284 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2285
2286 hci_dev_lock(hdev);
2287
2288 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2289 if (conn) {
2290 if (!ev->status)
2291 conn->link_mode |= HCI_LM_SECURE;
2292
2293 clear_bit(HCI_CONN_AUTH_PEND, &conn->flags);
2294
2295 hci_key_change_cfm(conn, ev->status);
2296 }
2297
2298 hci_dev_unlock(hdev);
2299 }
2300
2301 static void hci_remote_features_evt(struct hci_dev *hdev,
2302 struct sk_buff *skb)
2303 {
2304 struct hci_ev_remote_features *ev = (void *) skb->data;
2305 struct hci_conn *conn;
2306
2307 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2308
2309 hci_dev_lock(hdev);
2310
2311 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2312 if (!conn)
2313 goto unlock;
2314
2315 if (!ev->status)
2316 memcpy(conn->features, ev->features, 8);
2317
2318 if (conn->state != BT_CONFIG)
2319 goto unlock;
2320
2321 if (!ev->status && lmp_ssp_capable(hdev) && lmp_ssp_capable(conn)) {
2322 struct hci_cp_read_remote_ext_features cp;
2323 cp.handle = ev->handle;
2324 cp.page = 0x01;
2325 hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES,
2326 sizeof(cp), &cp);
2327 goto unlock;
2328 }
2329
2330 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
2331 struct hci_cp_remote_name_req cp;
2332 memset(&cp, 0, sizeof(cp));
2333 bacpy(&cp.bdaddr, &conn->dst);
2334 cp.pscan_rep_mode = 0x02;
2335 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
2336 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2337 mgmt_device_connected(hdev, &conn->dst, conn->type,
2338 conn->dst_type, 0, NULL, 0,
2339 conn->dev_class);
2340
2341 if (!hci_outgoing_auth_needed(hdev, conn)) {
2342 conn->state = BT_CONNECTED;
2343 hci_proto_connect_cfm(conn, ev->status);
2344 hci_conn_put(conn);
2345 }
2346
2347 unlock:
2348 hci_dev_unlock(hdev);
2349 }
2350
2351 static void hci_remote_version_evt(struct hci_dev *hdev, struct sk_buff *skb)
2352 {
2353 BT_DBG("%s", hdev->name);
2354 }
2355
2356 static void hci_qos_setup_complete_evt(struct hci_dev *hdev,
2357 struct sk_buff *skb)
2358 {
2359 BT_DBG("%s", hdev->name);
2360 }
2361
2362 static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
2363 {
2364 struct hci_ev_cmd_complete *ev = (void *) skb->data;
2365 __u16 opcode;
2366
2367 skb_pull(skb, sizeof(*ev));
2368
2369 opcode = __le16_to_cpu(ev->opcode);
2370
2371 switch (opcode) {
2372 case HCI_OP_INQUIRY_CANCEL:
2373 hci_cc_inquiry_cancel(hdev, skb);
2374 break;
2375
2376 case HCI_OP_PERIODIC_INQ:
2377 hci_cc_periodic_inq(hdev, skb);
2378 break;
2379
2380 case HCI_OP_EXIT_PERIODIC_INQ:
2381 hci_cc_exit_periodic_inq(hdev, skb);
2382 break;
2383
2384 case HCI_OP_REMOTE_NAME_REQ_CANCEL:
2385 hci_cc_remote_name_req_cancel(hdev, skb);
2386 break;
2387
2388 case HCI_OP_ROLE_DISCOVERY:
2389 hci_cc_role_discovery(hdev, skb);
2390 break;
2391
2392 case HCI_OP_READ_LINK_POLICY:
2393 hci_cc_read_link_policy(hdev, skb);
2394 break;
2395
2396 case HCI_OP_WRITE_LINK_POLICY:
2397 hci_cc_write_link_policy(hdev, skb);
2398 break;
2399
2400 case HCI_OP_READ_DEF_LINK_POLICY:
2401 hci_cc_read_def_link_policy(hdev, skb);
2402 break;
2403
2404 case HCI_OP_WRITE_DEF_LINK_POLICY:
2405 hci_cc_write_def_link_policy(hdev, skb);
2406 break;
2407
2408 case HCI_OP_RESET:
2409 hci_cc_reset(hdev, skb);
2410 break;
2411
2412 case HCI_OP_WRITE_LOCAL_NAME:
2413 hci_cc_write_local_name(hdev, skb);
2414 break;
2415
2416 case HCI_OP_READ_LOCAL_NAME:
2417 hci_cc_read_local_name(hdev, skb);
2418 break;
2419
2420 case HCI_OP_WRITE_AUTH_ENABLE:
2421 hci_cc_write_auth_enable(hdev, skb);
2422 break;
2423
2424 case HCI_OP_WRITE_ENCRYPT_MODE:
2425 hci_cc_write_encrypt_mode(hdev, skb);
2426 break;
2427
2428 case HCI_OP_WRITE_SCAN_ENABLE:
2429 hci_cc_write_scan_enable(hdev, skb);
2430 break;
2431
2432 case HCI_OP_READ_CLASS_OF_DEV:
2433 hci_cc_read_class_of_dev(hdev, skb);
2434 break;
2435
2436 case HCI_OP_WRITE_CLASS_OF_DEV:
2437 hci_cc_write_class_of_dev(hdev, skb);
2438 break;
2439
2440 case HCI_OP_READ_VOICE_SETTING:
2441 hci_cc_read_voice_setting(hdev, skb);
2442 break;
2443
2444 case HCI_OP_WRITE_VOICE_SETTING:
2445 hci_cc_write_voice_setting(hdev, skb);
2446 break;
2447
2448 case HCI_OP_HOST_BUFFER_SIZE:
2449 hci_cc_host_buffer_size(hdev, skb);
2450 break;
2451
2452 case HCI_OP_WRITE_SSP_MODE:
2453 hci_cc_write_ssp_mode(hdev, skb);
2454 break;
2455
2456 case HCI_OP_READ_LOCAL_VERSION:
2457 hci_cc_read_local_version(hdev, skb);
2458 break;
2459
2460 case HCI_OP_READ_LOCAL_COMMANDS:
2461 hci_cc_read_local_commands(hdev, skb);
2462 break;
2463
2464 case HCI_OP_READ_LOCAL_FEATURES:
2465 hci_cc_read_local_features(hdev, skb);
2466 break;
2467
2468 case HCI_OP_READ_LOCAL_EXT_FEATURES:
2469 hci_cc_read_local_ext_features(hdev, skb);
2470 break;
2471
2472 case HCI_OP_READ_BUFFER_SIZE:
2473 hci_cc_read_buffer_size(hdev, skb);
2474 break;
2475
2476 case HCI_OP_READ_BD_ADDR:
2477 hci_cc_read_bd_addr(hdev, skb);
2478 break;
2479
2480 case HCI_OP_READ_DATA_BLOCK_SIZE:
2481 hci_cc_read_data_block_size(hdev, skb);
2482 break;
2483
2484 case HCI_OP_WRITE_CA_TIMEOUT:
2485 hci_cc_write_ca_timeout(hdev, skb);
2486 break;
2487
2488 case HCI_OP_READ_FLOW_CONTROL_MODE:
2489 hci_cc_read_flow_control_mode(hdev, skb);
2490 break;
2491
2492 case HCI_OP_READ_LOCAL_AMP_INFO:
2493 hci_cc_read_local_amp_info(hdev, skb);
2494 break;
2495
2496 case HCI_OP_READ_LOCAL_AMP_ASSOC:
2497 hci_cc_read_local_amp_assoc(hdev, skb);
2498 break;
2499
2500 case HCI_OP_DELETE_STORED_LINK_KEY:
2501 hci_cc_delete_stored_link_key(hdev, skb);
2502 break;
2503
2504 case HCI_OP_SET_EVENT_MASK:
2505 hci_cc_set_event_mask(hdev, skb);
2506 break;
2507
2508 case HCI_OP_WRITE_INQUIRY_MODE:
2509 hci_cc_write_inquiry_mode(hdev, skb);
2510 break;
2511
2512 case HCI_OP_READ_INQ_RSP_TX_POWER:
2513 hci_cc_read_inq_rsp_tx_power(hdev, skb);
2514 break;
2515
2516 case HCI_OP_SET_EVENT_FLT:
2517 hci_cc_set_event_flt(hdev, skb);
2518 break;
2519
2520 case HCI_OP_PIN_CODE_REPLY:
2521 hci_cc_pin_code_reply(hdev, skb);
2522 break;
2523
2524 case HCI_OP_PIN_CODE_NEG_REPLY:
2525 hci_cc_pin_code_neg_reply(hdev, skb);
2526 break;
2527
2528 case HCI_OP_READ_LOCAL_OOB_DATA:
2529 hci_cc_read_local_oob_data_reply(hdev, skb);
2530 break;
2531
2532 case HCI_OP_LE_READ_BUFFER_SIZE:
2533 hci_cc_le_read_buffer_size(hdev, skb);
2534 break;
2535
2536 case HCI_OP_LE_READ_ADV_TX_POWER:
2537 hci_cc_le_read_adv_tx_power(hdev, skb);
2538 break;
2539
2540 case HCI_OP_LE_SET_EVENT_MASK:
2541 hci_cc_le_set_event_mask(hdev, skb);
2542 break;
2543
2544 case HCI_OP_USER_CONFIRM_REPLY:
2545 hci_cc_user_confirm_reply(hdev, skb);
2546 break;
2547
2548 case HCI_OP_USER_CONFIRM_NEG_REPLY:
2549 hci_cc_user_confirm_neg_reply(hdev, skb);
2550 break;
2551
2552 case HCI_OP_USER_PASSKEY_REPLY:
2553 hci_cc_user_passkey_reply(hdev, skb);
2554 break;
2555
2556 case HCI_OP_USER_PASSKEY_NEG_REPLY:
2557 hci_cc_user_passkey_neg_reply(hdev, skb);
2558 break;
2559
2560 case HCI_OP_LE_SET_SCAN_PARAM:
2561 hci_cc_le_set_scan_param(hdev, skb);
2562 break;
2563
2564 case HCI_OP_LE_SET_SCAN_ENABLE:
2565 hci_cc_le_set_scan_enable(hdev, skb);
2566 break;
2567
2568 case HCI_OP_LE_LTK_REPLY:
2569 hci_cc_le_ltk_reply(hdev, skb);
2570 break;
2571
2572 case HCI_OP_LE_LTK_NEG_REPLY:
2573 hci_cc_le_ltk_neg_reply(hdev, skb);
2574 break;
2575
2576 case HCI_OP_WRITE_LE_HOST_SUPPORTED:
2577 hci_cc_write_le_host_supported(hdev, skb);
2578 break;
2579
2580 case HCI_OP_WRITE_REMOTE_AMP_ASSOC:
2581 hci_cc_write_remote_amp_assoc(hdev, skb);
2582 break;
2583
2584 default:
2585 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2586 break;
2587 }
2588
2589 if (ev->opcode != HCI_OP_NOP)
2590 del_timer(&hdev->cmd_timer);
2591
2592 if (ev->ncmd) {
2593 atomic_set(&hdev->cmd_cnt, 1);
2594 if (!skb_queue_empty(&hdev->cmd_q))
2595 queue_work(hdev->workqueue, &hdev->cmd_work);
2596 }
2597 }
2598
2599 static void hci_cmd_status_evt(struct hci_dev *hdev, struct sk_buff *skb)
2600 {
2601 struct hci_ev_cmd_status *ev = (void *) skb->data;
2602 __u16 opcode;
2603
2604 skb_pull(skb, sizeof(*ev));
2605
2606 opcode = __le16_to_cpu(ev->opcode);
2607
2608 switch (opcode) {
2609 case HCI_OP_INQUIRY:
2610 hci_cs_inquiry(hdev, ev->status);
2611 break;
2612
2613 case HCI_OP_CREATE_CONN:
2614 hci_cs_create_conn(hdev, ev->status);
2615 break;
2616
2617 case HCI_OP_ADD_SCO:
2618 hci_cs_add_sco(hdev, ev->status);
2619 break;
2620
2621 case HCI_OP_AUTH_REQUESTED:
2622 hci_cs_auth_requested(hdev, ev->status);
2623 break;
2624
2625 case HCI_OP_SET_CONN_ENCRYPT:
2626 hci_cs_set_conn_encrypt(hdev, ev->status);
2627 break;
2628
2629 case HCI_OP_REMOTE_NAME_REQ:
2630 hci_cs_remote_name_req(hdev, ev->status);
2631 break;
2632
2633 case HCI_OP_READ_REMOTE_FEATURES:
2634 hci_cs_read_remote_features(hdev, ev->status);
2635 break;
2636
2637 case HCI_OP_READ_REMOTE_EXT_FEATURES:
2638 hci_cs_read_remote_ext_features(hdev, ev->status);
2639 break;
2640
2641 case HCI_OP_SETUP_SYNC_CONN:
2642 hci_cs_setup_sync_conn(hdev, ev->status);
2643 break;
2644
2645 case HCI_OP_SNIFF_MODE:
2646 hci_cs_sniff_mode(hdev, ev->status);
2647 break;
2648
2649 case HCI_OP_EXIT_SNIFF_MODE:
2650 hci_cs_exit_sniff_mode(hdev, ev->status);
2651 break;
2652
2653 case HCI_OP_DISCONNECT:
2654 hci_cs_disconnect(hdev, ev->status);
2655 break;
2656
2657 case HCI_OP_LE_CREATE_CONN:
2658 hci_cs_le_create_conn(hdev, ev->status);
2659 break;
2660
2661 case HCI_OP_LE_START_ENC:
2662 hci_cs_le_start_enc(hdev, ev->status);
2663 break;
2664
2665 case HCI_OP_CREATE_PHY_LINK:
2666 hci_cs_create_phylink(hdev, ev->status);
2667 break;
2668
2669 case HCI_OP_ACCEPT_PHY_LINK:
2670 hci_cs_accept_phylink(hdev, ev->status);
2671 break;
2672
2673 default:
2674 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
2675 break;
2676 }
2677
2678 if (ev->opcode != HCI_OP_NOP)
2679 del_timer(&hdev->cmd_timer);
2680
2681 if (ev->ncmd && !test_bit(HCI_RESET, &hdev->flags)) {
2682 atomic_set(&hdev->cmd_cnt, 1);
2683 if (!skb_queue_empty(&hdev->cmd_q))
2684 queue_work(hdev->workqueue, &hdev->cmd_work);
2685 }
2686 }
2687
2688 static void hci_role_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2689 {
2690 struct hci_ev_role_change *ev = (void *) skb->data;
2691 struct hci_conn *conn;
2692
2693 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2694
2695 hci_dev_lock(hdev);
2696
2697 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2698 if (conn) {
2699 if (!ev->status) {
2700 if (ev->role)
2701 conn->link_mode &= ~HCI_LM_MASTER;
2702 else
2703 conn->link_mode |= HCI_LM_MASTER;
2704 }
2705
2706 clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags);
2707
2708 hci_role_switch_cfm(conn, ev->status, ev->role);
2709 }
2710
2711 hci_dev_unlock(hdev);
2712 }
2713
2714 static void hci_num_comp_pkts_evt(struct hci_dev *hdev, struct sk_buff *skb)
2715 {
2716 struct hci_ev_num_comp_pkts *ev = (void *) skb->data;
2717 int i;
2718
2719 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) {
2720 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2721 return;
2722 }
2723
2724 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2725 ev->num_hndl * sizeof(struct hci_comp_pkts_info)) {
2726 BT_DBG("%s bad parameters", hdev->name);
2727 return;
2728 }
2729
2730 BT_DBG("%s num_hndl %d", hdev->name, ev->num_hndl);
2731
2732 for (i = 0; i < ev->num_hndl; i++) {
2733 struct hci_comp_pkts_info *info = &ev->handles[i];
2734 struct hci_conn *conn;
2735 __u16 handle, count;
2736
2737 handle = __le16_to_cpu(info->handle);
2738 count = __le16_to_cpu(info->count);
2739
2740 conn = hci_conn_hash_lookup_handle(hdev, handle);
2741 if (!conn)
2742 continue;
2743
2744 conn->sent -= count;
2745
2746 switch (conn->type) {
2747 case ACL_LINK:
2748 hdev->acl_cnt += count;
2749 if (hdev->acl_cnt > hdev->acl_pkts)
2750 hdev->acl_cnt = hdev->acl_pkts;
2751 break;
2752
2753 case LE_LINK:
2754 if (hdev->le_pkts) {
2755 hdev->le_cnt += count;
2756 if (hdev->le_cnt > hdev->le_pkts)
2757 hdev->le_cnt = hdev->le_pkts;
2758 } else {
2759 hdev->acl_cnt += count;
2760 if (hdev->acl_cnt > hdev->acl_pkts)
2761 hdev->acl_cnt = hdev->acl_pkts;
2762 }
2763 break;
2764
2765 case SCO_LINK:
2766 hdev->sco_cnt += count;
2767 if (hdev->sco_cnt > hdev->sco_pkts)
2768 hdev->sco_cnt = hdev->sco_pkts;
2769 break;
2770
2771 default:
2772 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2773 break;
2774 }
2775 }
2776
2777 queue_work(hdev->workqueue, &hdev->tx_work);
2778 }
2779
2780 static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev,
2781 __u16 handle)
2782 {
2783 struct hci_chan *chan;
2784
2785 switch (hdev->dev_type) {
2786 case HCI_BREDR:
2787 return hci_conn_hash_lookup_handle(hdev, handle);
2788 case HCI_AMP:
2789 chan = hci_chan_lookup_handle(hdev, handle);
2790 if (chan)
2791 return chan->conn;
2792 break;
2793 default:
2794 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2795 break;
2796 }
2797
2798 return NULL;
2799 }
2800
2801 static void hci_num_comp_blocks_evt(struct hci_dev *hdev, struct sk_buff *skb)
2802 {
2803 struct hci_ev_num_comp_blocks *ev = (void *) skb->data;
2804 int i;
2805
2806 if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) {
2807 BT_ERR("Wrong event for mode %d", hdev->flow_ctl_mode);
2808 return;
2809 }
2810
2811 if (skb->len < sizeof(*ev) || skb->len < sizeof(*ev) +
2812 ev->num_hndl * sizeof(struct hci_comp_blocks_info)) {
2813 BT_DBG("%s bad parameters", hdev->name);
2814 return;
2815 }
2816
2817 BT_DBG("%s num_blocks %d num_hndl %d", hdev->name, ev->num_blocks,
2818 ev->num_hndl);
2819
2820 for (i = 0; i < ev->num_hndl; i++) {
2821 struct hci_comp_blocks_info *info = &ev->handles[i];
2822 struct hci_conn *conn = NULL;
2823 __u16 handle, block_count;
2824
2825 handle = __le16_to_cpu(info->handle);
2826 block_count = __le16_to_cpu(info->blocks);
2827
2828 conn = __hci_conn_lookup_handle(hdev, handle);
2829 if (!conn)
2830 continue;
2831
2832 conn->sent -= block_count;
2833
2834 switch (conn->type) {
2835 case ACL_LINK:
2836 case AMP_LINK:
2837 hdev->block_cnt += block_count;
2838 if (hdev->block_cnt > hdev->num_blocks)
2839 hdev->block_cnt = hdev->num_blocks;
2840 break;
2841
2842 default:
2843 BT_ERR("Unknown type %d conn %p", conn->type, conn);
2844 break;
2845 }
2846 }
2847
2848 queue_work(hdev->workqueue, &hdev->tx_work);
2849 }
2850
2851 static void hci_mode_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
2852 {
2853 struct hci_ev_mode_change *ev = (void *) skb->data;
2854 struct hci_conn *conn;
2855
2856 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
2857
2858 hci_dev_lock(hdev);
2859
2860 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
2861 if (conn) {
2862 conn->mode = ev->mode;
2863 conn->interval = __le16_to_cpu(ev->interval);
2864
2865 if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND,
2866 &conn->flags)) {
2867 if (conn->mode == HCI_CM_ACTIVE)
2868 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2869 else
2870 clear_bit(HCI_CONN_POWER_SAVE, &conn->flags);
2871 }
2872
2873 if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags))
2874 hci_sco_setup(conn, ev->status);
2875 }
2876
2877 hci_dev_unlock(hdev);
2878 }
2879
2880 static void hci_pin_code_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2881 {
2882 struct hci_ev_pin_code_req *ev = (void *) skb->data;
2883 struct hci_conn *conn;
2884
2885 BT_DBG("%s", hdev->name);
2886
2887 hci_dev_lock(hdev);
2888
2889 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2890 if (!conn)
2891 goto unlock;
2892
2893 if (conn->state == BT_CONNECTED) {
2894 hci_conn_hold(conn);
2895 conn->disc_timeout = HCI_PAIRING_TIMEOUT;
2896 hci_conn_put(conn);
2897 }
2898
2899 if (!test_bit(HCI_PAIRABLE, &hdev->dev_flags))
2900 hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY,
2901 sizeof(ev->bdaddr), &ev->bdaddr);
2902 else if (test_bit(HCI_MGMT, &hdev->dev_flags)) {
2903 u8 secure;
2904
2905 if (conn->pending_sec_level == BT_SECURITY_HIGH)
2906 secure = 1;
2907 else
2908 secure = 0;
2909
2910 mgmt_pin_code_request(hdev, &ev->bdaddr, secure);
2911 }
2912
2913 unlock:
2914 hci_dev_unlock(hdev);
2915 }
2916
2917 static void hci_link_key_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
2918 {
2919 struct hci_ev_link_key_req *ev = (void *) skb->data;
2920 struct hci_cp_link_key_reply cp;
2921 struct hci_conn *conn;
2922 struct link_key *key;
2923
2924 BT_DBG("%s", hdev->name);
2925
2926 if (!test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
2927 return;
2928
2929 hci_dev_lock(hdev);
2930
2931 key = hci_find_link_key(hdev, &ev->bdaddr);
2932 if (!key) {
2933 BT_DBG("%s link key not found for %pMR", hdev->name,
2934 &ev->bdaddr);
2935 goto not_found;
2936 }
2937
2938 BT_DBG("%s found key type %u for %pMR", hdev->name, key->type,
2939 &ev->bdaddr);
2940
2941 if (!test_bit(HCI_DEBUG_KEYS, &hdev->dev_flags) &&
2942 key->type == HCI_LK_DEBUG_COMBINATION) {
2943 BT_DBG("%s ignoring debug key", hdev->name);
2944 goto not_found;
2945 }
2946
2947 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2948 if (conn) {
2949 if (key->type == HCI_LK_UNAUTH_COMBINATION &&
2950 conn->auth_type != 0xff && (conn->auth_type & 0x01)) {
2951 BT_DBG("%s ignoring unauthenticated key", hdev->name);
2952 goto not_found;
2953 }
2954
2955 if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 &&
2956 conn->pending_sec_level == BT_SECURITY_HIGH) {
2957 BT_DBG("%s ignoring key unauthenticated for high security",
2958 hdev->name);
2959 goto not_found;
2960 }
2961
2962 conn->key_type = key->type;
2963 conn->pin_length = key->pin_len;
2964 }
2965
2966 bacpy(&cp.bdaddr, &ev->bdaddr);
2967 memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE);
2968
2969 hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp);
2970
2971 hci_dev_unlock(hdev);
2972
2973 return;
2974
2975 not_found:
2976 hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr);
2977 hci_dev_unlock(hdev);
2978 }
2979
2980 static void hci_link_key_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
2981 {
2982 struct hci_ev_link_key_notify *ev = (void *) skb->data;
2983 struct hci_conn *conn;
2984 u8 pin_len = 0;
2985
2986 BT_DBG("%s", hdev->name);
2987
2988 hci_dev_lock(hdev);
2989
2990 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
2991 if (conn) {
2992 hci_conn_hold(conn);
2993 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
2994 pin_len = conn->pin_length;
2995
2996 if (ev->key_type != HCI_LK_CHANGED_COMBINATION)
2997 conn->key_type = ev->key_type;
2998
2999 hci_conn_put(conn);
3000 }
3001
3002 if (test_bit(HCI_LINK_KEYS, &hdev->dev_flags))
3003 hci_add_link_key(hdev, conn, 1, &ev->bdaddr, ev->link_key,
3004 ev->key_type, pin_len);
3005
3006 hci_dev_unlock(hdev);
3007 }
3008
3009 static void hci_clock_offset_evt(struct hci_dev *hdev, struct sk_buff *skb)
3010 {
3011 struct hci_ev_clock_offset *ev = (void *) skb->data;
3012 struct hci_conn *conn;
3013
3014 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3015
3016 hci_dev_lock(hdev);
3017
3018 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3019 if (conn && !ev->status) {
3020 struct inquiry_entry *ie;
3021
3022 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3023 if (ie) {
3024 ie->data.clock_offset = ev->clock_offset;
3025 ie->timestamp = jiffies;
3026 }
3027 }
3028
3029 hci_dev_unlock(hdev);
3030 }
3031
3032 static void hci_pkt_type_change_evt(struct hci_dev *hdev, struct sk_buff *skb)
3033 {
3034 struct hci_ev_pkt_type_change *ev = (void *) skb->data;
3035 struct hci_conn *conn;
3036
3037 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3038
3039 hci_dev_lock(hdev);
3040
3041 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3042 if (conn && !ev->status)
3043 conn->pkt_type = __le16_to_cpu(ev->pkt_type);
3044
3045 hci_dev_unlock(hdev);
3046 }
3047
3048 static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, struct sk_buff *skb)
3049 {
3050 struct hci_ev_pscan_rep_mode *ev = (void *) skb->data;
3051 struct inquiry_entry *ie;
3052
3053 BT_DBG("%s", hdev->name);
3054
3055 hci_dev_lock(hdev);
3056
3057 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3058 if (ie) {
3059 ie->data.pscan_rep_mode = ev->pscan_rep_mode;
3060 ie->timestamp = jiffies;
3061 }
3062
3063 hci_dev_unlock(hdev);
3064 }
3065
3066 static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev,
3067 struct sk_buff *skb)
3068 {
3069 struct inquiry_data data;
3070 int num_rsp = *((__u8 *) skb->data);
3071 bool name_known, ssp;
3072
3073 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3074
3075 if (!num_rsp)
3076 return;
3077
3078 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3079 return;
3080
3081 hci_dev_lock(hdev);
3082
3083 if ((skb->len - 1) / num_rsp != sizeof(struct inquiry_info_with_rssi)) {
3084 struct inquiry_info_with_rssi_and_pscan_mode *info;
3085 info = (void *) (skb->data + 1);
3086
3087 for (; num_rsp; num_rsp--, info++) {
3088 bacpy(&data.bdaddr, &info->bdaddr);
3089 data.pscan_rep_mode = info->pscan_rep_mode;
3090 data.pscan_period_mode = info->pscan_period_mode;
3091 data.pscan_mode = info->pscan_mode;
3092 memcpy(data.dev_class, info->dev_class, 3);
3093 data.clock_offset = info->clock_offset;
3094 data.rssi = info->rssi;
3095 data.ssp_mode = 0x00;
3096
3097 name_known = hci_inquiry_cache_update(hdev, &data,
3098 false, &ssp);
3099 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3100 info->dev_class, info->rssi,
3101 !name_known, ssp, NULL, 0);
3102 }
3103 } else {
3104 struct inquiry_info_with_rssi *info = (void *) (skb->data + 1);
3105
3106 for (; num_rsp; num_rsp--, info++) {
3107 bacpy(&data.bdaddr, &info->bdaddr);
3108 data.pscan_rep_mode = info->pscan_rep_mode;
3109 data.pscan_period_mode = info->pscan_period_mode;
3110 data.pscan_mode = 0x00;
3111 memcpy(data.dev_class, info->dev_class, 3);
3112 data.clock_offset = info->clock_offset;
3113 data.rssi = info->rssi;
3114 data.ssp_mode = 0x00;
3115 name_known = hci_inquiry_cache_update(hdev, &data,
3116 false, &ssp);
3117 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3118 info->dev_class, info->rssi,
3119 !name_known, ssp, NULL, 0);
3120 }
3121 }
3122
3123 hci_dev_unlock(hdev);
3124 }
3125
3126 static void hci_remote_ext_features_evt(struct hci_dev *hdev,
3127 struct sk_buff *skb)
3128 {
3129 struct hci_ev_remote_ext_features *ev = (void *) skb->data;
3130 struct hci_conn *conn;
3131
3132 BT_DBG("%s", hdev->name);
3133
3134 hci_dev_lock(hdev);
3135
3136 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3137 if (!conn)
3138 goto unlock;
3139
3140 if (!ev->status && ev->page == 0x01) {
3141 struct inquiry_entry *ie;
3142
3143 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
3144 if (ie)
3145 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3146
3147 if (ev->features[0] & LMP_HOST_SSP)
3148 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
3149 }
3150
3151 if (conn->state != BT_CONFIG)
3152 goto unlock;
3153
3154 if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) {
3155 struct hci_cp_remote_name_req cp;
3156 memset(&cp, 0, sizeof(cp));
3157 bacpy(&cp.bdaddr, &conn->dst);
3158 cp.pscan_rep_mode = 0x02;
3159 hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp);
3160 } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3161 mgmt_device_connected(hdev, &conn->dst, conn->type,
3162 conn->dst_type, 0, NULL, 0,
3163 conn->dev_class);
3164
3165 if (!hci_outgoing_auth_needed(hdev, conn)) {
3166 conn->state = BT_CONNECTED;
3167 hci_proto_connect_cfm(conn, ev->status);
3168 hci_conn_put(conn);
3169 }
3170
3171 unlock:
3172 hci_dev_unlock(hdev);
3173 }
3174
3175 static void hci_sync_conn_complete_evt(struct hci_dev *hdev,
3176 struct sk_buff *skb)
3177 {
3178 struct hci_ev_sync_conn_complete *ev = (void *) skb->data;
3179 struct hci_conn *conn;
3180
3181 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3182
3183 hci_dev_lock(hdev);
3184
3185 conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
3186 if (!conn) {
3187 if (ev->link_type == ESCO_LINK)
3188 goto unlock;
3189
3190 conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr);
3191 if (!conn)
3192 goto unlock;
3193
3194 conn->type = SCO_LINK;
3195 }
3196
3197 switch (ev->status) {
3198 case 0x00:
3199 conn->handle = __le16_to_cpu(ev->handle);
3200 conn->state = BT_CONNECTED;
3201
3202 hci_conn_hold_device(conn);
3203 hci_conn_add_sysfs(conn);
3204 break;
3205
3206 case 0x11: /* Unsupported Feature or Parameter Value */
3207 case 0x1c: /* SCO interval rejected */
3208 case 0x1a: /* Unsupported Remote Feature */
3209 case 0x1f: /* Unspecified error */
3210 if (conn->out && conn->attempt < 2) {
3211 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
3212 (hdev->esco_type & EDR_ESCO_MASK);
3213 hci_setup_sync(conn, conn->link->handle);
3214 goto unlock;
3215 }
3216 /* fall through */
3217
3218 default:
3219 conn->state = BT_CLOSED;
3220 break;
3221 }
3222
3223 hci_proto_connect_cfm(conn, ev->status);
3224 if (ev->status)
3225 hci_conn_del(conn);
3226
3227 unlock:
3228 hci_dev_unlock(hdev);
3229 }
3230
3231 static void hci_sync_conn_changed_evt(struct hci_dev *hdev, struct sk_buff *skb)
3232 {
3233 BT_DBG("%s", hdev->name);
3234 }
3235
3236 static void hci_sniff_subrate_evt(struct hci_dev *hdev, struct sk_buff *skb)
3237 {
3238 struct hci_ev_sniff_subrate *ev = (void *) skb->data;
3239
3240 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3241 }
3242
3243 static void hci_extended_inquiry_result_evt(struct hci_dev *hdev,
3244 struct sk_buff *skb)
3245 {
3246 struct inquiry_data data;
3247 struct extended_inquiry_info *info = (void *) (skb->data + 1);
3248 int num_rsp = *((__u8 *) skb->data);
3249 size_t eir_len;
3250
3251 BT_DBG("%s num_rsp %d", hdev->name, num_rsp);
3252
3253 if (!num_rsp)
3254 return;
3255
3256 if (test_bit(HCI_PERIODIC_INQ, &hdev->dev_flags))
3257 return;
3258
3259 hci_dev_lock(hdev);
3260
3261 for (; num_rsp; num_rsp--, info++) {
3262 bool name_known, ssp;
3263
3264 bacpy(&data.bdaddr, &info->bdaddr);
3265 data.pscan_rep_mode = info->pscan_rep_mode;
3266 data.pscan_period_mode = info->pscan_period_mode;
3267 data.pscan_mode = 0x00;
3268 memcpy(data.dev_class, info->dev_class, 3);
3269 data.clock_offset = info->clock_offset;
3270 data.rssi = info->rssi;
3271 data.ssp_mode = 0x01;
3272
3273 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3274 name_known = eir_has_data_type(info->data,
3275 sizeof(info->data),
3276 EIR_NAME_COMPLETE);
3277 else
3278 name_known = true;
3279
3280 name_known = hci_inquiry_cache_update(hdev, &data, name_known,
3281 &ssp);
3282 eir_len = eir_get_length(info->data, sizeof(info->data));
3283 mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,
3284 info->dev_class, info->rssi, !name_known,
3285 ssp, info->data, eir_len);
3286 }
3287
3288 hci_dev_unlock(hdev);
3289 }
3290
3291 static void hci_key_refresh_complete_evt(struct hci_dev *hdev,
3292 struct sk_buff *skb)
3293 {
3294 struct hci_ev_key_refresh_complete *ev = (void *) skb->data;
3295 struct hci_conn *conn;
3296
3297 BT_DBG("%s status 0x%2.2x handle 0x%4.4x", hdev->name, ev->status,
3298 __le16_to_cpu(ev->handle));
3299
3300 hci_dev_lock(hdev);
3301
3302 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3303 if (!conn)
3304 goto unlock;
3305
3306 if (!ev->status)
3307 conn->sec_level = conn->pending_sec_level;
3308
3309 clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
3310
3311 if (ev->status && conn->state == BT_CONNECTED) {
3312 hci_acl_disconn(conn, HCI_ERROR_AUTH_FAILURE);
3313 hci_conn_put(conn);
3314 goto unlock;
3315 }
3316
3317 if (conn->state == BT_CONFIG) {
3318 if (!ev->status)
3319 conn->state = BT_CONNECTED;
3320
3321 hci_proto_connect_cfm(conn, ev->status);
3322 hci_conn_put(conn);
3323 } else {
3324 hci_auth_cfm(conn, ev->status);
3325
3326 hci_conn_hold(conn);
3327 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
3328 hci_conn_put(conn);
3329 }
3330
3331 unlock:
3332 hci_dev_unlock(hdev);
3333 }
3334
3335 static u8 hci_get_auth_req(struct hci_conn *conn)
3336 {
3337 /* If remote requests dedicated bonding follow that lead */
3338 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) {
3339 /* If both remote and local IO capabilities allow MITM
3340 * protection then require it, otherwise don't */
3341 if (conn->remote_cap == 0x03 || conn->io_capability == 0x03)
3342 return 0x02;
3343 else
3344 return 0x03;
3345 }
3346
3347 /* If remote requests no-bonding follow that lead */
3348 if (conn->remote_auth == 0x00 || conn->remote_auth == 0x01)
3349 return conn->remote_auth | (conn->auth_type & 0x01);
3350
3351 return conn->auth_type;
3352 }
3353
3354 static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3355 {
3356 struct hci_ev_io_capa_request *ev = (void *) skb->data;
3357 struct hci_conn *conn;
3358
3359 BT_DBG("%s", hdev->name);
3360
3361 hci_dev_lock(hdev);
3362
3363 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3364 if (!conn)
3365 goto unlock;
3366
3367 hci_conn_hold(conn);
3368
3369 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3370 goto unlock;
3371
3372 if (test_bit(HCI_PAIRABLE, &hdev->dev_flags) ||
3373 (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) {
3374 struct hci_cp_io_capability_reply cp;
3375
3376 bacpy(&cp.bdaddr, &ev->bdaddr);
3377 /* Change the IO capability from KeyboardDisplay
3378 * to DisplayYesNo as it is not supported by BT spec. */
3379 cp.capability = (conn->io_capability == 0x04) ?
3380 0x01 : conn->io_capability;
3381 conn->auth_type = hci_get_auth_req(conn);
3382 cp.authentication = conn->auth_type;
3383
3384 if (hci_find_remote_oob_data(hdev, &conn->dst) &&
3385 (conn->out || test_bit(HCI_CONN_REMOTE_OOB, &conn->flags)))
3386 cp.oob_data = 0x01;
3387 else
3388 cp.oob_data = 0x00;
3389
3390 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY,
3391 sizeof(cp), &cp);
3392 } else {
3393 struct hci_cp_io_capability_neg_reply cp;
3394
3395 bacpy(&cp.bdaddr, &ev->bdaddr);
3396 cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED;
3397
3398 hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY,
3399 sizeof(cp), &cp);
3400 }
3401
3402 unlock:
3403 hci_dev_unlock(hdev);
3404 }
3405
3406 static void hci_io_capa_reply_evt(struct hci_dev *hdev, struct sk_buff *skb)
3407 {
3408 struct hci_ev_io_capa_reply *ev = (void *) skb->data;
3409 struct hci_conn *conn;
3410
3411 BT_DBG("%s", hdev->name);
3412
3413 hci_dev_lock(hdev);
3414
3415 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3416 if (!conn)
3417 goto unlock;
3418
3419 conn->remote_cap = ev->capability;
3420 conn->remote_auth = ev->authentication;
3421 if (ev->oob_data)
3422 set_bit(HCI_CONN_REMOTE_OOB, &conn->flags);
3423
3424 unlock:
3425 hci_dev_unlock(hdev);
3426 }
3427
3428 static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3429 struct sk_buff *skb)
3430 {
3431 struct hci_ev_user_confirm_req *ev = (void *) skb->data;
3432 int loc_mitm, rem_mitm, confirm_hint = 0;
3433 struct hci_conn *conn;
3434
3435 BT_DBG("%s", hdev->name);
3436
3437 hci_dev_lock(hdev);
3438
3439 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3440 goto unlock;
3441
3442 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3443 if (!conn)
3444 goto unlock;
3445
3446 loc_mitm = (conn->auth_type & 0x01);
3447 rem_mitm = (conn->remote_auth & 0x01);
3448
3449 /* If we require MITM but the remote device can't provide that
3450 * (it has NoInputNoOutput) then reject the confirmation
3451 * request. The only exception is when we're dedicated bonding
3452 * initiators (connect_cfm_cb set) since then we always have the MITM
3453 * bit set. */
3454 if (!conn->connect_cfm_cb && loc_mitm && conn->remote_cap == 0x03) {
3455 BT_DBG("Rejecting request: remote device can't provide MITM");
3456 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY,
3457 sizeof(ev->bdaddr), &ev->bdaddr);
3458 goto unlock;
3459 }
3460
3461 /* If no side requires MITM protection; auto-accept */
3462 if ((!loc_mitm || conn->remote_cap == 0x03) &&
3463 (!rem_mitm || conn->io_capability == 0x03)) {
3464
3465 /* If we're not the initiators request authorization to
3466 * proceed from user space (mgmt_user_confirm with
3467 * confirm_hint set to 1). */
3468 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
3469 BT_DBG("Confirming auto-accept as acceptor");
3470 confirm_hint = 1;
3471 goto confirm;
3472 }
3473
3474 BT_DBG("Auto-accept of user confirmation with %ums delay",
3475 hdev->auto_accept_delay);
3476
3477 if (hdev->auto_accept_delay > 0) {
3478 int delay = msecs_to_jiffies(hdev->auto_accept_delay);
3479 mod_timer(&conn->auto_accept_timer, jiffies + delay);
3480 goto unlock;
3481 }
3482
3483 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY,
3484 sizeof(ev->bdaddr), &ev->bdaddr);
3485 goto unlock;
3486 }
3487
3488 confirm:
3489 mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, ev->passkey,
3490 confirm_hint);
3491
3492 unlock:
3493 hci_dev_unlock(hdev);
3494 }
3495
3496 static void hci_user_passkey_request_evt(struct hci_dev *hdev,
3497 struct sk_buff *skb)
3498 {
3499 struct hci_ev_user_passkey_req *ev = (void *) skb->data;
3500
3501 BT_DBG("%s", hdev->name);
3502
3503 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3504 mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0);
3505 }
3506
3507 static void hci_user_passkey_notify_evt(struct hci_dev *hdev,
3508 struct sk_buff *skb)
3509 {
3510 struct hci_ev_user_passkey_notify *ev = (void *) skb->data;
3511 struct hci_conn *conn;
3512
3513 BT_DBG("%s", hdev->name);
3514
3515 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3516 if (!conn)
3517 return;
3518
3519 conn->passkey_notify = __le32_to_cpu(ev->passkey);
3520 conn->passkey_entered = 0;
3521
3522 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3523 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3524 conn->dst_type, conn->passkey_notify,
3525 conn->passkey_entered);
3526 }
3527
3528 static void hci_keypress_notify_evt(struct hci_dev *hdev, struct sk_buff *skb)
3529 {
3530 struct hci_ev_keypress_notify *ev = (void *) skb->data;
3531 struct hci_conn *conn;
3532
3533 BT_DBG("%s", hdev->name);
3534
3535 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3536 if (!conn)
3537 return;
3538
3539 switch (ev->type) {
3540 case HCI_KEYPRESS_STARTED:
3541 conn->passkey_entered = 0;
3542 return;
3543
3544 case HCI_KEYPRESS_ENTERED:
3545 conn->passkey_entered++;
3546 break;
3547
3548 case HCI_KEYPRESS_ERASED:
3549 conn->passkey_entered--;
3550 break;
3551
3552 case HCI_KEYPRESS_CLEARED:
3553 conn->passkey_entered = 0;
3554 break;
3555
3556 case HCI_KEYPRESS_COMPLETED:
3557 return;
3558 }
3559
3560 if (test_bit(HCI_MGMT, &hdev->dev_flags))
3561 mgmt_user_passkey_notify(hdev, &conn->dst, conn->type,
3562 conn->dst_type, conn->passkey_notify,
3563 conn->passkey_entered);
3564 }
3565
3566 static void hci_simple_pair_complete_evt(struct hci_dev *hdev,
3567 struct sk_buff *skb)
3568 {
3569 struct hci_ev_simple_pair_complete *ev = (void *) skb->data;
3570 struct hci_conn *conn;
3571
3572 BT_DBG("%s", hdev->name);
3573
3574 hci_dev_lock(hdev);
3575
3576 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr);
3577 if (!conn)
3578 goto unlock;
3579
3580 /* To avoid duplicate auth_failed events to user space we check
3581 * the HCI_CONN_AUTH_PEND flag which will be set if we
3582 * initiated the authentication. A traditional auth_complete
3583 * event gets always produced as initiator and is also mapped to
3584 * the mgmt_auth_failed event */
3585 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status)
3586 mgmt_auth_failed(hdev, &conn->dst, conn->type, conn->dst_type,
3587 ev->status);
3588
3589 hci_conn_put(conn);
3590
3591 unlock:
3592 hci_dev_unlock(hdev);
3593 }
3594
3595 static void hci_remote_host_features_evt(struct hci_dev *hdev,
3596 struct sk_buff *skb)
3597 {
3598 struct hci_ev_remote_host_features *ev = (void *) skb->data;
3599 struct inquiry_entry *ie;
3600
3601 BT_DBG("%s", hdev->name);
3602
3603 hci_dev_lock(hdev);
3604
3605 ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
3606 if (ie)
3607 ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP);
3608
3609 hci_dev_unlock(hdev);
3610 }
3611
3612 static void hci_remote_oob_data_request_evt(struct hci_dev *hdev,
3613 struct sk_buff *skb)
3614 {
3615 struct hci_ev_remote_oob_data_request *ev = (void *) skb->data;
3616 struct oob_data *data;
3617
3618 BT_DBG("%s", hdev->name);
3619
3620 hci_dev_lock(hdev);
3621
3622 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
3623 goto unlock;
3624
3625 data = hci_find_remote_oob_data(hdev, &ev->bdaddr);
3626 if (data) {
3627 struct hci_cp_remote_oob_data_reply cp;
3628
3629 bacpy(&cp.bdaddr, &ev->bdaddr);
3630 memcpy(cp.hash, data->hash, sizeof(cp.hash));
3631 memcpy(cp.randomizer, data->randomizer, sizeof(cp.randomizer));
3632
3633 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, sizeof(cp),
3634 &cp);
3635 } else {
3636 struct hci_cp_remote_oob_data_neg_reply cp;
3637
3638 bacpy(&cp.bdaddr, &ev->bdaddr);
3639 hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, sizeof(cp),
3640 &cp);
3641 }
3642
3643 unlock:
3644 hci_dev_unlock(hdev);
3645 }
3646
3647 static void hci_phy_link_complete_evt(struct hci_dev *hdev,
3648 struct sk_buff *skb)
3649 {
3650 struct hci_ev_phy_link_complete *ev = (void *) skb->data;
3651 struct hci_conn *hcon, *bredr_hcon;
3652
3653 BT_DBG("%s handle 0x%2.2x status 0x%2.2x", hdev->name, ev->phy_handle,
3654 ev->status);
3655
3656 hci_dev_lock(hdev);
3657
3658 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3659 if (!hcon) {
3660 hci_dev_unlock(hdev);
3661 return;
3662 }
3663
3664 if (ev->status) {
3665 hci_conn_del(hcon);
3666 hci_dev_unlock(hdev);
3667 return;
3668 }
3669
3670 bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon;
3671
3672 hcon->state = BT_CONNECTED;
3673 bacpy(&hcon->dst, &bredr_hcon->dst);
3674
3675 hci_conn_hold(hcon);
3676 hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3677 hci_conn_put(hcon);
3678
3679 hci_conn_hold_device(hcon);
3680 hci_conn_add_sysfs(hcon);
3681
3682 hci_dev_unlock(hdev);
3683
3684 if (hcon->out) {
3685 struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev);
3686
3687 if (!bredr_hdev)
3688 return;
3689
3690 /* Placeholder - create chan req
3691 l2cap_chan_create_cfm(bredr_hcon, hcon->remote_id);
3692 */
3693
3694 hci_dev_put(bredr_hdev);
3695 }
3696 }
3697
3698 static void hci_le_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
3699 {
3700 struct hci_ev_le_conn_complete *ev = (void *) skb->data;
3701 struct hci_conn *conn;
3702
3703 BT_DBG("%s status 0x%2.2x", hdev->name, ev->status);
3704
3705 hci_dev_lock(hdev);
3706
3707 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
3708 if (!conn) {
3709 conn = hci_conn_add(hdev, LE_LINK, &ev->bdaddr);
3710 if (!conn) {
3711 BT_ERR("No memory for new connection");
3712 goto unlock;
3713 }
3714
3715 conn->dst_type = ev->bdaddr_type;
3716
3717 if (ev->role == LE_CONN_ROLE_MASTER) {
3718 conn->out = true;
3719 conn->link_mode |= HCI_LM_MASTER;
3720 }
3721 }
3722
3723 if (ev->status) {
3724 mgmt_connect_failed(hdev, &conn->dst, conn->type,
3725 conn->dst_type, ev->status);
3726 hci_proto_connect_cfm(conn, ev->status);
3727 conn->state = BT_CLOSED;
3728 hci_conn_del(conn);
3729 goto unlock;
3730 }
3731
3732 if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
3733 mgmt_device_connected(hdev, &ev->bdaddr, conn->type,
3734 conn->dst_type, 0, NULL, 0, NULL);
3735
3736 conn->sec_level = BT_SECURITY_LOW;
3737 conn->handle = __le16_to_cpu(ev->handle);
3738 conn->state = BT_CONNECTED;
3739
3740 hci_conn_hold_device(conn);
3741 hci_conn_add_sysfs(conn);
3742
3743 hci_proto_connect_cfm(conn, ev->status);
3744
3745 unlock:
3746 hci_dev_unlock(hdev);
3747 }
3748
3749 static void hci_le_adv_report_evt(struct hci_dev *hdev, struct sk_buff *skb)
3750 {
3751 u8 num_reports = skb->data[0];
3752 void *ptr = &skb->data[1];
3753 s8 rssi;
3754
3755 hci_dev_lock(hdev);
3756
3757 while (num_reports--) {
3758 struct hci_ev_le_advertising_info *ev = ptr;
3759
3760 rssi = ev->data[ev->length];
3761 mgmt_device_found(hdev, &ev->bdaddr, LE_LINK, ev->bdaddr_type,
3762 NULL, rssi, 0, 1, ev->data, ev->length);
3763
3764 ptr += sizeof(*ev) + ev->length + 1;
3765 }
3766
3767 hci_dev_unlock(hdev);
3768 }
3769
3770 static void hci_le_ltk_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3771 {
3772 struct hci_ev_le_ltk_req *ev = (void *) skb->data;
3773 struct hci_cp_le_ltk_reply cp;
3774 struct hci_cp_le_ltk_neg_reply neg;
3775 struct hci_conn *conn;
3776 struct smp_ltk *ltk;
3777
3778 BT_DBG("%s handle 0x%4.4x", hdev->name, __le16_to_cpu(ev->handle));
3779
3780 hci_dev_lock(hdev);
3781
3782 conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle));
3783 if (conn == NULL)
3784 goto not_found;
3785
3786 ltk = hci_find_ltk(hdev, ev->ediv, ev->random);
3787 if (ltk == NULL)
3788 goto not_found;
3789
3790 memcpy(cp.ltk, ltk->val, sizeof(ltk->val));
3791 cp.handle = cpu_to_le16(conn->handle);
3792
3793 if (ltk->authenticated)
3794 conn->sec_level = BT_SECURITY_HIGH;
3795
3796 hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp);
3797
3798 if (ltk->type & HCI_SMP_STK) {
3799 list_del(&ltk->list);
3800 kfree(ltk);
3801 }
3802
3803 hci_dev_unlock(hdev);
3804
3805 return;
3806
3807 not_found:
3808 neg.handle = ev->handle;
3809 hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg);
3810 hci_dev_unlock(hdev);
3811 }
3812
3813 static void hci_le_meta_evt(struct hci_dev *hdev, struct sk_buff *skb)
3814 {
3815 struct hci_ev_le_meta *le_ev = (void *) skb->data;
3816
3817 skb_pull(skb, sizeof(*le_ev));
3818
3819 switch (le_ev->subevent) {
3820 case HCI_EV_LE_CONN_COMPLETE:
3821 hci_le_conn_complete_evt(hdev, skb);
3822 break;
3823
3824 case HCI_EV_LE_ADVERTISING_REPORT:
3825 hci_le_adv_report_evt(hdev, skb);
3826 break;
3827
3828 case HCI_EV_LE_LTK_REQ:
3829 hci_le_ltk_request_evt(hdev, skb);
3830 break;
3831
3832 default:
3833 break;
3834 }
3835 }
3836
3837 static void hci_chan_selected_evt(struct hci_dev *hdev, struct sk_buff *skb)
3838 {
3839 struct hci_ev_channel_selected *ev = (void *) skb->data;
3840 struct hci_conn *hcon;
3841
3842 BT_DBG("%s handle 0x%2.2x", hdev->name, ev->phy_handle);
3843
3844 skb_pull(skb, sizeof(*ev));
3845
3846 hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle);
3847 if (!hcon)
3848 return;
3849
3850 amp_read_loc_assoc_final_data(hdev, hcon);
3851 }
3852
3853 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
3854 {
3855 struct hci_event_hdr *hdr = (void *) skb->data;
3856 __u8 event = hdr->evt;
3857
3858 skb_pull(skb, HCI_EVENT_HDR_SIZE);
3859
3860 switch (event) {
3861 case HCI_EV_INQUIRY_COMPLETE:
3862 hci_inquiry_complete_evt(hdev, skb);
3863 break;
3864
3865 case HCI_EV_INQUIRY_RESULT:
3866 hci_inquiry_result_evt(hdev, skb);
3867 break;
3868
3869 case HCI_EV_CONN_COMPLETE:
3870 hci_conn_complete_evt(hdev, skb);
3871 break;
3872
3873 case HCI_EV_CONN_REQUEST:
3874 hci_conn_request_evt(hdev, skb);
3875 break;
3876
3877 case HCI_EV_DISCONN_COMPLETE:
3878 hci_disconn_complete_evt(hdev, skb);
3879 break;
3880
3881 case HCI_EV_AUTH_COMPLETE:
3882 hci_auth_complete_evt(hdev, skb);
3883 break;
3884
3885 case HCI_EV_REMOTE_NAME:
3886 hci_remote_name_evt(hdev, skb);
3887 break;
3888
3889 case HCI_EV_ENCRYPT_CHANGE:
3890 hci_encrypt_change_evt(hdev, skb);
3891 break;
3892
3893 case HCI_EV_CHANGE_LINK_KEY_COMPLETE:
3894 hci_change_link_key_complete_evt(hdev, skb);
3895 break;
3896
3897 case HCI_EV_REMOTE_FEATURES:
3898 hci_remote_features_evt(hdev, skb);
3899 break;
3900
3901 case HCI_EV_REMOTE_VERSION:
3902 hci_remote_version_evt(hdev, skb);
3903 break;
3904
3905 case HCI_EV_QOS_SETUP_COMPLETE:
3906 hci_qos_setup_complete_evt(hdev, skb);
3907 break;
3908
3909 case HCI_EV_CMD_COMPLETE:
3910 hci_cmd_complete_evt(hdev, skb);
3911 break;
3912
3913 case HCI_EV_CMD_STATUS:
3914 hci_cmd_status_evt(hdev, skb);
3915 break;
3916
3917 case HCI_EV_ROLE_CHANGE:
3918 hci_role_change_evt(hdev, skb);
3919 break;
3920
3921 case HCI_EV_NUM_COMP_PKTS:
3922 hci_num_comp_pkts_evt(hdev, skb);
3923 break;
3924
3925 case HCI_EV_MODE_CHANGE:
3926 hci_mode_change_evt(hdev, skb);
3927 break;
3928
3929 case HCI_EV_PIN_CODE_REQ:
3930 hci_pin_code_request_evt(hdev, skb);
3931 break;
3932
3933 case HCI_EV_LINK_KEY_REQ:
3934 hci_link_key_request_evt(hdev, skb);
3935 break;
3936
3937 case HCI_EV_LINK_KEY_NOTIFY:
3938 hci_link_key_notify_evt(hdev, skb);
3939 break;
3940
3941 case HCI_EV_CLOCK_OFFSET:
3942 hci_clock_offset_evt(hdev, skb);
3943 break;
3944
3945 case HCI_EV_PKT_TYPE_CHANGE:
3946 hci_pkt_type_change_evt(hdev, skb);
3947 break;
3948
3949 case HCI_EV_PSCAN_REP_MODE:
3950 hci_pscan_rep_mode_evt(hdev, skb);
3951 break;
3952
3953 case HCI_EV_INQUIRY_RESULT_WITH_RSSI:
3954 hci_inquiry_result_with_rssi_evt(hdev, skb);
3955 break;
3956
3957 case HCI_EV_REMOTE_EXT_FEATURES:
3958 hci_remote_ext_features_evt(hdev, skb);
3959 break;
3960
3961 case HCI_EV_SYNC_CONN_COMPLETE:
3962 hci_sync_conn_complete_evt(hdev, skb);
3963 break;
3964
3965 case HCI_EV_SYNC_CONN_CHANGED:
3966 hci_sync_conn_changed_evt(hdev, skb);
3967 break;
3968
3969 case HCI_EV_SNIFF_SUBRATE:
3970 hci_sniff_subrate_evt(hdev, skb);
3971 break;
3972
3973 case HCI_EV_EXTENDED_INQUIRY_RESULT:
3974 hci_extended_inquiry_result_evt(hdev, skb);
3975 break;
3976
3977 case HCI_EV_KEY_REFRESH_COMPLETE:
3978 hci_key_refresh_complete_evt(hdev, skb);
3979 break;
3980
3981 case HCI_EV_IO_CAPA_REQUEST:
3982 hci_io_capa_request_evt(hdev, skb);
3983 break;
3984
3985 case HCI_EV_IO_CAPA_REPLY:
3986 hci_io_capa_reply_evt(hdev, skb);
3987 break;
3988
3989 case HCI_EV_USER_CONFIRM_REQUEST:
3990 hci_user_confirm_request_evt(hdev, skb);
3991 break;
3992
3993 case HCI_EV_USER_PASSKEY_REQUEST:
3994 hci_user_passkey_request_evt(hdev, skb);
3995 break;
3996
3997 case HCI_EV_USER_PASSKEY_NOTIFY:
3998 hci_user_passkey_notify_evt(hdev, skb);
3999 break;
4000
4001 case HCI_EV_KEYPRESS_NOTIFY:
4002 hci_keypress_notify_evt(hdev, skb);
4003 break;
4004
4005 case HCI_EV_SIMPLE_PAIR_COMPLETE:
4006 hci_simple_pair_complete_evt(hdev, skb);
4007 break;
4008
4009 case HCI_EV_REMOTE_HOST_FEATURES:
4010 hci_remote_host_features_evt(hdev, skb);
4011 break;
4012
4013 case HCI_EV_LE_META:
4014 hci_le_meta_evt(hdev, skb);
4015 break;
4016
4017 case HCI_EV_CHANNEL_SELECTED:
4018 hci_chan_selected_evt(hdev, skb);
4019 break;
4020
4021 case HCI_EV_REMOTE_OOB_DATA_REQUEST:
4022 hci_remote_oob_data_request_evt(hdev, skb);
4023 break;
4024
4025 case HCI_EV_PHY_LINK_COMPLETE:
4026 hci_phy_link_complete_evt(hdev, skb);
4027 break;
4028
4029 case HCI_EV_NUM_COMP_BLOCKS:
4030 hci_num_comp_blocks_evt(hdev, skb);
4031 break;
4032
4033 default:
4034 BT_DBG("%s event 0x%2.2x", hdev->name, event);
4035 break;
4036 }
4037
4038 kfree_skb(skb);
4039 hdev->stat.evt_rx++;
4040 }
This page took 0.116045 seconds and 5 git commands to generate.