Bluetooth: Remove auth_type parameter from hci_connect_le()
[deliverable/linux.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
32
33 #include "smp.h"
34 #include "a2mp.h"
35
36 struct sco_param {
37 u16 pkt_type;
38 u16 max_latency;
39 };
40
41 static const struct sco_param sco_param_cvsd[] = {
42 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
43 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
44 { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */
45 { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */
46 { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */
47 };
48
49 static const struct sco_param sco_param_wideband[] = {
50 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
51 { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
52 };
53
54 static void hci_le_create_connection_cancel(struct hci_conn *conn)
55 {
56 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
57 }
58
59 static void hci_acl_create_connection(struct hci_conn *conn)
60 {
61 struct hci_dev *hdev = conn->hdev;
62 struct inquiry_entry *ie;
63 struct hci_cp_create_conn cp;
64
65 BT_DBG("hcon %p", conn);
66
67 conn->state = BT_CONNECT;
68 conn->out = true;
69
70 set_bit(HCI_CONN_MASTER, &conn->flags);
71
72 conn->attempt++;
73
74 conn->link_policy = hdev->link_policy;
75
76 memset(&cp, 0, sizeof(cp));
77 bacpy(&cp.bdaddr, &conn->dst);
78 cp.pscan_rep_mode = 0x02;
79
80 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
81 if (ie) {
82 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
83 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
84 cp.pscan_mode = ie->data.pscan_mode;
85 cp.clock_offset = ie->data.clock_offset |
86 cpu_to_le16(0x8000);
87 }
88
89 memcpy(conn->dev_class, ie->data.dev_class, 3);
90 if (ie->data.ssp_mode > 0)
91 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
92 }
93
94 cp.pkt_type = cpu_to_le16(conn->pkt_type);
95 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
96 cp.role_switch = 0x01;
97 else
98 cp.role_switch = 0x00;
99
100 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
101 }
102
103 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
104 {
105 struct hci_cp_create_conn_cancel cp;
106
107 BT_DBG("hcon %p", conn);
108
109 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
110 return;
111
112 bacpy(&cp.bdaddr, &conn->dst);
113 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
114 }
115
116 static void hci_reject_sco(struct hci_conn *conn)
117 {
118 struct hci_cp_reject_sync_conn_req cp;
119
120 cp.reason = HCI_ERROR_REMOTE_USER_TERM;
121 bacpy(&cp.bdaddr, &conn->dst);
122
123 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
124 }
125
126 void hci_disconnect(struct hci_conn *conn, __u8 reason)
127 {
128 struct hci_cp_disconnect cp;
129
130 BT_DBG("hcon %p", conn);
131
132 conn->state = BT_DISCONN;
133
134 cp.handle = cpu_to_le16(conn->handle);
135 cp.reason = reason;
136 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
137 }
138
139 static void hci_amp_disconn(struct hci_conn *conn)
140 {
141 struct hci_cp_disconn_phy_link cp;
142
143 BT_DBG("hcon %p", conn);
144
145 conn->state = BT_DISCONN;
146
147 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
148 cp.reason = hci_proto_disconn_ind(conn);
149 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
150 sizeof(cp), &cp);
151 }
152
153 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
154 {
155 struct hci_dev *hdev = conn->hdev;
156 struct hci_cp_add_sco cp;
157
158 BT_DBG("hcon %p", conn);
159
160 conn->state = BT_CONNECT;
161 conn->out = true;
162
163 conn->attempt++;
164
165 cp.handle = cpu_to_le16(handle);
166 cp.pkt_type = cpu_to_le16(conn->pkt_type);
167
168 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
169 }
170
171 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
172 {
173 struct hci_dev *hdev = conn->hdev;
174 struct hci_cp_setup_sync_conn cp;
175 const struct sco_param *param;
176
177 BT_DBG("hcon %p", conn);
178
179 conn->state = BT_CONNECT;
180 conn->out = true;
181
182 conn->attempt++;
183
184 cp.handle = cpu_to_le16(handle);
185
186 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
187 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
188 cp.voice_setting = cpu_to_le16(conn->setting);
189
190 switch (conn->setting & SCO_AIRMODE_MASK) {
191 case SCO_AIRMODE_TRANSP:
192 if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
193 return false;
194 cp.retrans_effort = 0x02;
195 param = &sco_param_wideband[conn->attempt - 1];
196 break;
197 case SCO_AIRMODE_CVSD:
198 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
199 return false;
200 cp.retrans_effort = 0x01;
201 param = &sco_param_cvsd[conn->attempt - 1];
202 break;
203 default:
204 return false;
205 }
206
207 cp.pkt_type = __cpu_to_le16(param->pkt_type);
208 cp.max_latency = __cpu_to_le16(param->max_latency);
209
210 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
211 return false;
212
213 return true;
214 }
215
216 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
217 u16 to_multiplier)
218 {
219 struct hci_dev *hdev = conn->hdev;
220 struct hci_conn_params *params;
221 struct hci_cp_le_conn_update cp;
222
223 hci_dev_lock(hdev);
224
225 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
226 if (params) {
227 params->conn_min_interval = min;
228 params->conn_max_interval = max;
229 params->conn_latency = latency;
230 params->supervision_timeout = to_multiplier;
231 }
232
233 hci_dev_unlock(hdev);
234
235 memset(&cp, 0, sizeof(cp));
236 cp.handle = cpu_to_le16(conn->handle);
237 cp.conn_interval_min = cpu_to_le16(min);
238 cp.conn_interval_max = cpu_to_le16(max);
239 cp.conn_latency = cpu_to_le16(latency);
240 cp.supervision_timeout = cpu_to_le16(to_multiplier);
241 cp.min_ce_len = cpu_to_le16(0x0000);
242 cp.max_ce_len = cpu_to_le16(0x0000);
243
244 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
245
246 if (params)
247 return 0x01;
248
249 return 0x00;
250 }
251
252 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
253 __u8 ltk[16])
254 {
255 struct hci_dev *hdev = conn->hdev;
256 struct hci_cp_le_start_enc cp;
257
258 BT_DBG("hcon %p", conn);
259
260 memset(&cp, 0, sizeof(cp));
261
262 cp.handle = cpu_to_le16(conn->handle);
263 cp.rand = rand;
264 cp.ediv = ediv;
265 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
266
267 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
268 }
269
270 /* Device _must_ be locked */
271 void hci_sco_setup(struct hci_conn *conn, __u8 status)
272 {
273 struct hci_conn *sco = conn->link;
274
275 if (!sco)
276 return;
277
278 BT_DBG("hcon %p", conn);
279
280 if (!status) {
281 if (lmp_esco_capable(conn->hdev))
282 hci_setup_sync(sco, conn->handle);
283 else
284 hci_add_sco(sco, conn->handle);
285 } else {
286 hci_proto_connect_cfm(sco, status);
287 hci_conn_del(sco);
288 }
289 }
290
291 static void hci_conn_timeout(struct work_struct *work)
292 {
293 struct hci_conn *conn = container_of(work, struct hci_conn,
294 disc_work.work);
295 int refcnt = atomic_read(&conn->refcnt);
296
297 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
298
299 WARN_ON(refcnt < 0);
300
301 /* FIXME: It was observed that in pairing failed scenario, refcnt
302 * drops below 0. Probably this is because l2cap_conn_del calls
303 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
304 * dropped. After that loop hci_chan_del is called which also drops
305 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
306 * otherwise drop it.
307 */
308 if (refcnt > 0)
309 return;
310
311 switch (conn->state) {
312 case BT_CONNECT:
313 case BT_CONNECT2:
314 if (conn->out) {
315 if (conn->type == ACL_LINK)
316 hci_acl_create_connection_cancel(conn);
317 else if (conn->type == LE_LINK)
318 hci_le_create_connection_cancel(conn);
319 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
320 hci_reject_sco(conn);
321 }
322 break;
323 case BT_CONFIG:
324 case BT_CONNECTED:
325 if (conn->type == AMP_LINK) {
326 hci_amp_disconn(conn);
327 } else {
328 __u8 reason = hci_proto_disconn_ind(conn);
329
330 /* When we are master of an established connection
331 * and it enters the disconnect timeout, then go
332 * ahead and try to read the current clock offset.
333 *
334 * Processing of the result is done within the
335 * event handling and hci_clock_offset_evt function.
336 */
337 if (conn->type == ACL_LINK &&
338 test_bit(HCI_CONN_MASTER, &conn->flags)) {
339 struct hci_dev *hdev = conn->hdev;
340 struct hci_cp_read_clock_offset cp;
341
342 cp.handle = cpu_to_le16(conn->handle);
343
344 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET,
345 sizeof(cp), &cp);
346 }
347
348 hci_disconnect(conn, reason);
349 }
350 break;
351 default:
352 conn->state = BT_CLOSED;
353 break;
354 }
355 }
356
357 /* Enter sniff mode */
358 static void hci_conn_idle(struct work_struct *work)
359 {
360 struct hci_conn *conn = container_of(work, struct hci_conn,
361 idle_work.work);
362 struct hci_dev *hdev = conn->hdev;
363
364 BT_DBG("hcon %p mode %d", conn, conn->mode);
365
366 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
367 return;
368
369 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
370 return;
371
372 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
373 struct hci_cp_sniff_subrate cp;
374 cp.handle = cpu_to_le16(conn->handle);
375 cp.max_latency = cpu_to_le16(0);
376 cp.min_remote_timeout = cpu_to_le16(0);
377 cp.min_local_timeout = cpu_to_le16(0);
378 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
379 }
380
381 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
382 struct hci_cp_sniff_mode cp;
383 cp.handle = cpu_to_le16(conn->handle);
384 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
385 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
386 cp.attempt = cpu_to_le16(4);
387 cp.timeout = cpu_to_le16(1);
388 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
389 }
390 }
391
392 static void hci_conn_auto_accept(struct work_struct *work)
393 {
394 struct hci_conn *conn = container_of(work, struct hci_conn,
395 auto_accept_work.work);
396
397 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
398 &conn->dst);
399 }
400
401 static void le_conn_timeout(struct work_struct *work)
402 {
403 struct hci_conn *conn = container_of(work, struct hci_conn,
404 le_conn_timeout.work);
405 struct hci_dev *hdev = conn->hdev;
406
407 BT_DBG("");
408
409 /* We could end up here due to having done directed advertising,
410 * so clean up the state if necessary. This should however only
411 * happen with broken hardware or if low duty cycle was used
412 * (which doesn't have a timeout of its own).
413 */
414 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
415 u8 enable = 0x00;
416 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
417 &enable);
418 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
419 return;
420 }
421
422 hci_le_create_connection_cancel(conn);
423 }
424
425 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
426 {
427 struct hci_conn *conn;
428
429 BT_DBG("%s dst %pMR", hdev->name, dst);
430
431 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
432 if (!conn)
433 return NULL;
434
435 bacpy(&conn->dst, dst);
436 bacpy(&conn->src, &hdev->bdaddr);
437 conn->hdev = hdev;
438 conn->type = type;
439 conn->mode = HCI_CM_ACTIVE;
440 conn->state = BT_OPEN;
441 conn->auth_type = HCI_AT_GENERAL_BONDING;
442 conn->io_capability = hdev->io_capability;
443 conn->remote_auth = 0xff;
444 conn->key_type = 0xff;
445 conn->tx_power = HCI_TX_POWER_INVALID;
446 conn->max_tx_power = HCI_TX_POWER_INVALID;
447
448 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
449 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
450
451 switch (type) {
452 case ACL_LINK:
453 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
454 break;
455 case LE_LINK:
456 /* conn->src should reflect the local identity address */
457 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
458 break;
459 case SCO_LINK:
460 if (lmp_esco_capable(hdev))
461 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
462 (hdev->esco_type & EDR_ESCO_MASK);
463 else
464 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
465 break;
466 case ESCO_LINK:
467 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
468 break;
469 }
470
471 skb_queue_head_init(&conn->data_q);
472
473 INIT_LIST_HEAD(&conn->chan_list);
474
475 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
476 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
477 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
478 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
479
480 atomic_set(&conn->refcnt, 0);
481
482 hci_dev_hold(hdev);
483
484 hci_conn_hash_add(hdev, conn);
485 if (hdev->notify)
486 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
487
488 hci_conn_init_sysfs(conn);
489
490 return conn;
491 }
492
493 int hci_conn_del(struct hci_conn *conn)
494 {
495 struct hci_dev *hdev = conn->hdev;
496
497 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
498
499 cancel_delayed_work_sync(&conn->disc_work);
500 cancel_delayed_work_sync(&conn->auto_accept_work);
501 cancel_delayed_work_sync(&conn->idle_work);
502
503 if (conn->type == ACL_LINK) {
504 struct hci_conn *sco = conn->link;
505 if (sco)
506 sco->link = NULL;
507
508 /* Unacked frames */
509 hdev->acl_cnt += conn->sent;
510 } else if (conn->type == LE_LINK) {
511 cancel_delayed_work_sync(&conn->le_conn_timeout);
512
513 if (hdev->le_pkts)
514 hdev->le_cnt += conn->sent;
515 else
516 hdev->acl_cnt += conn->sent;
517 } else {
518 struct hci_conn *acl = conn->link;
519 if (acl) {
520 acl->link = NULL;
521 hci_conn_drop(acl);
522 }
523 }
524
525 hci_chan_list_flush(conn);
526
527 if (conn->amp_mgr)
528 amp_mgr_put(conn->amp_mgr);
529
530 hci_conn_hash_del(hdev, conn);
531 if (hdev->notify)
532 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
533
534 skb_queue_purge(&conn->data_q);
535
536 hci_conn_del_sysfs(conn);
537
538 hci_dev_put(hdev);
539
540 hci_conn_put(conn);
541
542 return 0;
543 }
544
545 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
546 {
547 int use_src = bacmp(src, BDADDR_ANY);
548 struct hci_dev *hdev = NULL, *d;
549
550 BT_DBG("%pMR -> %pMR", src, dst);
551
552 read_lock(&hci_dev_list_lock);
553
554 list_for_each_entry(d, &hci_dev_list, list) {
555 if (!test_bit(HCI_UP, &d->flags) ||
556 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
557 d->dev_type != HCI_BREDR)
558 continue;
559
560 /* Simple routing:
561 * No source address - find interface with bdaddr != dst
562 * Source address - find interface with bdaddr == src
563 */
564
565 if (use_src) {
566 if (!bacmp(&d->bdaddr, src)) {
567 hdev = d; break;
568 }
569 } else {
570 if (bacmp(&d->bdaddr, dst)) {
571 hdev = d; break;
572 }
573 }
574 }
575
576 if (hdev)
577 hdev = hci_dev_hold(hdev);
578
579 read_unlock(&hci_dev_list_lock);
580 return hdev;
581 }
582 EXPORT_SYMBOL(hci_get_route);
583
584 /* This function requires the caller holds hdev->lock */
585 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
586 {
587 struct hci_dev *hdev = conn->hdev;
588
589 conn->state = BT_CLOSED;
590
591 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
592 status);
593
594 hci_proto_connect_cfm(conn, status);
595
596 hci_conn_del(conn);
597
598 /* Since we may have temporarily stopped the background scanning in
599 * favor of connection establishment, we should restart it.
600 */
601 hci_update_background_scan(hdev);
602
603 /* Re-enable advertising in case this was a failed connection
604 * attempt as a peripheral.
605 */
606 mgmt_reenable_advertising(hdev);
607 }
608
609 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
610 {
611 struct hci_conn *conn;
612
613 if (status == 0)
614 return;
615
616 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
617 status);
618
619 hci_dev_lock(hdev);
620
621 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
622 if (!conn)
623 goto done;
624
625 hci_le_conn_failed(conn, status);
626
627 done:
628 hci_dev_unlock(hdev);
629 }
630
631 static void hci_req_add_le_create_conn(struct hci_request *req,
632 struct hci_conn *conn)
633 {
634 struct hci_cp_le_create_conn cp;
635 struct hci_dev *hdev = conn->hdev;
636 u8 own_addr_type;
637
638 memset(&cp, 0, sizeof(cp));
639
640 /* Update random address, but set require_privacy to false so
641 * that we never connect with an unresolvable address.
642 */
643 if (hci_update_random_address(req, false, &own_addr_type))
644 return;
645
646 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
647 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
648 bacpy(&cp.peer_addr, &conn->dst);
649 cp.peer_addr_type = conn->dst_type;
650 cp.own_address_type = own_addr_type;
651 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
652 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
653 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
654 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
655 cp.min_ce_len = cpu_to_le16(0x0000);
656 cp.max_ce_len = cpu_to_le16(0x0000);
657
658 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
659
660 conn->state = BT_CONNECT;
661 }
662
663 static void hci_req_directed_advertising(struct hci_request *req,
664 struct hci_conn *conn)
665 {
666 struct hci_dev *hdev = req->hdev;
667 struct hci_cp_le_set_adv_param cp;
668 u8 own_addr_type;
669 u8 enable;
670
671 enable = 0x00;
672 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
673
674 /* Clear the HCI_ADVERTISING bit temporarily so that the
675 * hci_update_random_address knows that it's safe to go ahead
676 * and write a new random address. The flag will be set back on
677 * as soon as the SET_ADV_ENABLE HCI command completes.
678 */
679 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
680
681 /* Set require_privacy to false so that the remote device has a
682 * chance of identifying us.
683 */
684 if (hci_update_random_address(req, false, &own_addr_type) < 0)
685 return;
686
687 memset(&cp, 0, sizeof(cp));
688 cp.type = LE_ADV_DIRECT_IND;
689 cp.own_address_type = own_addr_type;
690 cp.direct_addr_type = conn->dst_type;
691 bacpy(&cp.direct_addr, &conn->dst);
692 cp.channel_map = hdev->le_adv_channel_map;
693
694 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
695
696 enable = 0x01;
697 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
698
699 conn->state = BT_CONNECT;
700 }
701
702 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
703 u8 dst_type, u8 sec_level, u16 conn_timeout)
704 {
705 struct hci_conn_params *params;
706 struct hci_conn *conn;
707 struct smp_irk *irk;
708 struct hci_request req;
709 int err;
710
711 /* Some devices send ATT messages as soon as the physical link is
712 * established. To be able to handle these ATT messages, the user-
713 * space first establishes the connection and then starts the pairing
714 * process.
715 *
716 * So if a hci_conn object already exists for the following connection
717 * attempt, we simply update pending_sec_level and auth_type fields
718 * and return the object found.
719 */
720 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
721 if (conn) {
722 conn->pending_sec_level = sec_level;
723 goto done;
724 }
725
726 /* Since the controller supports only one LE connection attempt at a
727 * time, we return -EBUSY if there is any connection attempt running.
728 */
729 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
730 if (conn)
731 return ERR_PTR(-EBUSY);
732
733 /* When given an identity address with existing identity
734 * resolving key, the connection needs to be established
735 * to a resolvable random address.
736 *
737 * This uses the cached random resolvable address from
738 * a previous scan. When no cached address is available,
739 * try connecting to the identity address instead.
740 *
741 * Storing the resolvable random address is required here
742 * to handle connection failures. The address will later
743 * be resolved back into the original identity address
744 * from the connect request.
745 */
746 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
747 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
748 dst = &irk->rpa;
749 dst_type = ADDR_LE_DEV_RANDOM;
750 }
751
752 conn = hci_conn_add(hdev, LE_LINK, dst);
753 if (!conn)
754 return ERR_PTR(-ENOMEM);
755
756 conn->dst_type = dst_type;
757 conn->sec_level = BT_SECURITY_LOW;
758 conn->pending_sec_level = sec_level;
759 conn->conn_timeout = conn_timeout;
760
761 hci_req_init(&req, hdev);
762
763 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
764 hci_req_directed_advertising(&req, conn);
765 goto create_conn;
766 }
767
768 conn->out = true;
769 set_bit(HCI_CONN_MASTER, &conn->flags);
770
771 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
772 if (params) {
773 conn->le_conn_min_interval = params->conn_min_interval;
774 conn->le_conn_max_interval = params->conn_max_interval;
775 conn->le_conn_latency = params->conn_latency;
776 conn->le_supv_timeout = params->supervision_timeout;
777 } else {
778 conn->le_conn_min_interval = hdev->le_conn_min_interval;
779 conn->le_conn_max_interval = hdev->le_conn_max_interval;
780 conn->le_conn_latency = hdev->le_conn_latency;
781 conn->le_supv_timeout = hdev->le_supv_timeout;
782 }
783
784 /* If controller is scanning, we stop it since some controllers are
785 * not able to scan and connect at the same time. Also set the
786 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
787 * handler for scan disabling knows to set the correct discovery
788 * state.
789 */
790 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
791 hci_req_add_le_scan_disable(&req);
792 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
793 }
794
795 hci_req_add_le_create_conn(&req, conn);
796
797 create_conn:
798 err = hci_req_run(&req, create_le_conn_complete);
799 if (err) {
800 hci_conn_del(conn);
801 return ERR_PTR(err);
802 }
803
804 done:
805 hci_conn_hold(conn);
806 return conn;
807 }
808
809 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
810 u8 sec_level, u8 auth_type)
811 {
812 struct hci_conn *acl;
813
814 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
815 return ERR_PTR(-ENOTSUPP);
816
817 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
818 if (!acl) {
819 acl = hci_conn_add(hdev, ACL_LINK, dst);
820 if (!acl)
821 return ERR_PTR(-ENOMEM);
822 }
823
824 hci_conn_hold(acl);
825
826 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
827 acl->sec_level = BT_SECURITY_LOW;
828 acl->pending_sec_level = sec_level;
829 acl->auth_type = auth_type;
830 hci_acl_create_connection(acl);
831 }
832
833 return acl;
834 }
835
836 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
837 __u16 setting)
838 {
839 struct hci_conn *acl;
840 struct hci_conn *sco;
841
842 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
843 if (IS_ERR(acl))
844 return acl;
845
846 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
847 if (!sco) {
848 sco = hci_conn_add(hdev, type, dst);
849 if (!sco) {
850 hci_conn_drop(acl);
851 return ERR_PTR(-ENOMEM);
852 }
853 }
854
855 acl->link = sco;
856 sco->link = acl;
857
858 hci_conn_hold(sco);
859
860 sco->setting = setting;
861
862 if (acl->state == BT_CONNECTED &&
863 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
864 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
865 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
866
867 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
868 /* defer SCO setup until mode change completed */
869 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
870 return sco;
871 }
872
873 hci_sco_setup(acl, 0x00);
874 }
875
876 return sco;
877 }
878
879 /* Check link security requirement */
880 int hci_conn_check_link_mode(struct hci_conn *conn)
881 {
882 BT_DBG("hcon %p", conn);
883
884 /* In Secure Connections Only mode, it is required that Secure
885 * Connections is used and the link is encrypted with AES-CCM
886 * using a P-256 authenticated combination key.
887 */
888 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
889 if (!hci_conn_sc_enabled(conn) ||
890 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
891 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
892 return 0;
893 }
894
895 if (hci_conn_ssp_enabled(conn) &&
896 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
897 return 0;
898
899 return 1;
900 }
901
902 /* Authenticate remote device */
903 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
904 {
905 BT_DBG("hcon %p", conn);
906
907 if (conn->pending_sec_level > sec_level)
908 sec_level = conn->pending_sec_level;
909
910 if (sec_level > conn->sec_level)
911 conn->pending_sec_level = sec_level;
912 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
913 return 1;
914
915 /* Make sure we preserve an existing MITM requirement*/
916 auth_type |= (conn->auth_type & 0x01);
917
918 conn->auth_type = auth_type;
919
920 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
921 struct hci_cp_auth_requested cp;
922
923 cp.handle = cpu_to_le16(conn->handle);
924 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
925 sizeof(cp), &cp);
926
927 /* If we're already encrypted set the REAUTH_PEND flag,
928 * otherwise set the ENCRYPT_PEND.
929 */
930 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
931 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
932 else
933 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
934 }
935
936 return 0;
937 }
938
939 /* Encrypt the the link */
940 static void hci_conn_encrypt(struct hci_conn *conn)
941 {
942 BT_DBG("hcon %p", conn);
943
944 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
945 struct hci_cp_set_conn_encrypt cp;
946 cp.handle = cpu_to_le16(conn->handle);
947 cp.encrypt = 0x01;
948 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
949 &cp);
950 }
951 }
952
953 /* Enable security */
954 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
955 {
956 BT_DBG("hcon %p", conn);
957
958 if (conn->type == LE_LINK)
959 return smp_conn_security(conn, sec_level);
960
961 /* For sdp we don't need the link key. */
962 if (sec_level == BT_SECURITY_SDP)
963 return 1;
964
965 /* For non 2.1 devices and low security level we don't need the link
966 key. */
967 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
968 return 1;
969
970 /* For other security levels we need the link key. */
971 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
972 goto auth;
973
974 /* An authenticated FIPS approved combination key has sufficient
975 * security for security level 4. */
976 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
977 sec_level == BT_SECURITY_FIPS)
978 goto encrypt;
979
980 /* An authenticated combination key has sufficient security for
981 security level 3. */
982 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
983 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
984 sec_level == BT_SECURITY_HIGH)
985 goto encrypt;
986
987 /* An unauthenticated combination key has sufficient security for
988 security level 1 and 2. */
989 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
990 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
991 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
992 goto encrypt;
993
994 /* A combination key has always sufficient security for the security
995 levels 1 or 2. High security level requires the combination key
996 is generated using maximum PIN code length (16).
997 For pre 2.1 units. */
998 if (conn->key_type == HCI_LK_COMBINATION &&
999 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1000 conn->pin_length == 16))
1001 goto encrypt;
1002
1003 auth:
1004 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1005 return 0;
1006
1007 if (!hci_conn_auth(conn, sec_level, auth_type))
1008 return 0;
1009
1010 encrypt:
1011 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1012 return 1;
1013
1014 hci_conn_encrypt(conn);
1015 return 0;
1016 }
1017 EXPORT_SYMBOL(hci_conn_security);
1018
1019 /* Check secure link requirement */
1020 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1021 {
1022 BT_DBG("hcon %p", conn);
1023
1024 /* Accept if non-secure or higher security level is required */
1025 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1026 return 1;
1027
1028 /* Accept if secure or higher security level is already present */
1029 if (conn->sec_level == BT_SECURITY_HIGH ||
1030 conn->sec_level == BT_SECURITY_FIPS)
1031 return 1;
1032
1033 /* Reject not secure link */
1034 return 0;
1035 }
1036 EXPORT_SYMBOL(hci_conn_check_secure);
1037
1038 /* Change link key */
1039 int hci_conn_change_link_key(struct hci_conn *conn)
1040 {
1041 BT_DBG("hcon %p", conn);
1042
1043 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1044 struct hci_cp_change_conn_link_key cp;
1045 cp.handle = cpu_to_le16(conn->handle);
1046 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1047 sizeof(cp), &cp);
1048 }
1049
1050 return 0;
1051 }
1052
1053 /* Switch role */
1054 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1055 {
1056 BT_DBG("hcon %p", conn);
1057
1058 if (!role && test_bit(HCI_CONN_MASTER, &conn->flags))
1059 return 1;
1060
1061 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1062 struct hci_cp_switch_role cp;
1063 bacpy(&cp.bdaddr, &conn->dst);
1064 cp.role = role;
1065 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1066 }
1067
1068 return 0;
1069 }
1070 EXPORT_SYMBOL(hci_conn_switch_role);
1071
1072 /* Enter active mode */
1073 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1074 {
1075 struct hci_dev *hdev = conn->hdev;
1076
1077 BT_DBG("hcon %p mode %d", conn, conn->mode);
1078
1079 if (conn->mode != HCI_CM_SNIFF)
1080 goto timer;
1081
1082 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1083 goto timer;
1084
1085 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1086 struct hci_cp_exit_sniff_mode cp;
1087 cp.handle = cpu_to_le16(conn->handle);
1088 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1089 }
1090
1091 timer:
1092 if (hdev->idle_timeout > 0)
1093 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1094 msecs_to_jiffies(hdev->idle_timeout));
1095 }
1096
1097 /* Drop all connection on the device */
1098 void hci_conn_hash_flush(struct hci_dev *hdev)
1099 {
1100 struct hci_conn_hash *h = &hdev->conn_hash;
1101 struct hci_conn *c, *n;
1102
1103 BT_DBG("hdev %s", hdev->name);
1104
1105 list_for_each_entry_safe(c, n, &h->list, list) {
1106 c->state = BT_CLOSED;
1107
1108 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1109 hci_conn_del(c);
1110 }
1111 }
1112
1113 /* Check pending connect attempts */
1114 void hci_conn_check_pending(struct hci_dev *hdev)
1115 {
1116 struct hci_conn *conn;
1117
1118 BT_DBG("hdev %s", hdev->name);
1119
1120 hci_dev_lock(hdev);
1121
1122 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1123 if (conn)
1124 hci_acl_create_connection(conn);
1125
1126 hci_dev_unlock(hdev);
1127 }
1128
1129 static u32 get_link_mode(struct hci_conn *conn)
1130 {
1131 u32 link_mode = 0;
1132
1133 if (test_bit(HCI_CONN_MASTER, &conn->flags))
1134 link_mode |= HCI_LM_MASTER;
1135
1136 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1137 link_mode |= HCI_LM_ENCRYPT;
1138
1139 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1140 link_mode |= HCI_LM_AUTH;
1141
1142 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1143 link_mode |= HCI_LM_SECURE;
1144
1145 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1146 link_mode |= HCI_LM_FIPS;
1147
1148 return link_mode;
1149 }
1150
1151 int hci_get_conn_list(void __user *arg)
1152 {
1153 struct hci_conn *c;
1154 struct hci_conn_list_req req, *cl;
1155 struct hci_conn_info *ci;
1156 struct hci_dev *hdev;
1157 int n = 0, size, err;
1158
1159 if (copy_from_user(&req, arg, sizeof(req)))
1160 return -EFAULT;
1161
1162 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1163 return -EINVAL;
1164
1165 size = sizeof(req) + req.conn_num * sizeof(*ci);
1166
1167 cl = kmalloc(size, GFP_KERNEL);
1168 if (!cl)
1169 return -ENOMEM;
1170
1171 hdev = hci_dev_get(req.dev_id);
1172 if (!hdev) {
1173 kfree(cl);
1174 return -ENODEV;
1175 }
1176
1177 ci = cl->conn_info;
1178
1179 hci_dev_lock(hdev);
1180 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1181 bacpy(&(ci + n)->bdaddr, &c->dst);
1182 (ci + n)->handle = c->handle;
1183 (ci + n)->type = c->type;
1184 (ci + n)->out = c->out;
1185 (ci + n)->state = c->state;
1186 (ci + n)->link_mode = get_link_mode(c);
1187 if (++n >= req.conn_num)
1188 break;
1189 }
1190 hci_dev_unlock(hdev);
1191
1192 cl->dev_id = hdev->id;
1193 cl->conn_num = n;
1194 size = sizeof(req) + n * sizeof(*ci);
1195
1196 hci_dev_put(hdev);
1197
1198 err = copy_to_user(arg, cl, size);
1199 kfree(cl);
1200
1201 return err ? -EFAULT : 0;
1202 }
1203
1204 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1205 {
1206 struct hci_conn_info_req req;
1207 struct hci_conn_info ci;
1208 struct hci_conn *conn;
1209 char __user *ptr = arg + sizeof(req);
1210
1211 if (copy_from_user(&req, arg, sizeof(req)))
1212 return -EFAULT;
1213
1214 hci_dev_lock(hdev);
1215 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1216 if (conn) {
1217 bacpy(&ci.bdaddr, &conn->dst);
1218 ci.handle = conn->handle;
1219 ci.type = conn->type;
1220 ci.out = conn->out;
1221 ci.state = conn->state;
1222 ci.link_mode = get_link_mode(conn);
1223 }
1224 hci_dev_unlock(hdev);
1225
1226 if (!conn)
1227 return -ENOENT;
1228
1229 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1230 }
1231
1232 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1233 {
1234 struct hci_auth_info_req req;
1235 struct hci_conn *conn;
1236
1237 if (copy_from_user(&req, arg, sizeof(req)))
1238 return -EFAULT;
1239
1240 hci_dev_lock(hdev);
1241 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1242 if (conn)
1243 req.type = conn->auth_type;
1244 hci_dev_unlock(hdev);
1245
1246 if (!conn)
1247 return -ENOENT;
1248
1249 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1250 }
1251
1252 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1253 {
1254 struct hci_dev *hdev = conn->hdev;
1255 struct hci_chan *chan;
1256
1257 BT_DBG("%s hcon %p", hdev->name, conn);
1258
1259 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
1260 if (!chan)
1261 return NULL;
1262
1263 chan->conn = conn;
1264 skb_queue_head_init(&chan->data_q);
1265 chan->state = BT_CONNECTED;
1266
1267 list_add_rcu(&chan->list, &conn->chan_list);
1268
1269 return chan;
1270 }
1271
1272 void hci_chan_del(struct hci_chan *chan)
1273 {
1274 struct hci_conn *conn = chan->conn;
1275 struct hci_dev *hdev = conn->hdev;
1276
1277 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1278
1279 list_del_rcu(&chan->list);
1280
1281 synchronize_rcu();
1282
1283 hci_conn_drop(conn);
1284
1285 skb_queue_purge(&chan->data_q);
1286 kfree(chan);
1287 }
1288
1289 void hci_chan_list_flush(struct hci_conn *conn)
1290 {
1291 struct hci_chan *chan, *n;
1292
1293 BT_DBG("hcon %p", conn);
1294
1295 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1296 hci_chan_del(chan);
1297 }
1298
1299 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1300 __u16 handle)
1301 {
1302 struct hci_chan *hchan;
1303
1304 list_for_each_entry(hchan, &hcon->chan_list, list) {
1305 if (hchan->handle == handle)
1306 return hchan;
1307 }
1308
1309 return NULL;
1310 }
1311
1312 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1313 {
1314 struct hci_conn_hash *h = &hdev->conn_hash;
1315 struct hci_conn *hcon;
1316 struct hci_chan *hchan = NULL;
1317
1318 rcu_read_lock();
1319
1320 list_for_each_entry_rcu(hcon, &h->list, list) {
1321 hchan = __hci_chan_lookup_handle(hcon, handle);
1322 if (hchan)
1323 break;
1324 }
1325
1326 rcu_read_unlock();
1327
1328 return hchan;
1329 }
This page took 0.077156 seconds and 5 git commands to generate.