Merge branch 'strscpy' of git://git.kernel.org/pub/scm/linux/kernel/git/cmetcalf...
[deliverable/linux.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28 #include <linux/debugfs.h>
29
30 #include <net/bluetooth/bluetooth.h>
31 #include <net/bluetooth/hci_core.h>
32 #include <net/bluetooth/l2cap.h>
33
34 #include "hci_request.h"
35 #include "smp.h"
36 #include "a2mp.h"
37
38 struct sco_param {
39 u16 pkt_type;
40 u16 max_latency;
41 u8 retrans_effort;
42 };
43
44 static const struct sco_param esco_param_cvsd[] = {
45 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */
46 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */
47 { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */
48 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */
49 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */
50 };
51
52 static const struct sco_param sco_param_cvsd[] = {
53 { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */
54 { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */
55 };
56
57 static const struct sco_param esco_param_msbc[] = {
58 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */
59 { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
60 };
61
62 static void hci_le_create_connection_cancel(struct hci_conn *conn)
63 {
64 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
65 }
66
67 /* This function requires the caller holds hdev->lock */
68 static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
69 {
70 struct hci_conn_params *params;
71 struct smp_irk *irk;
72 bdaddr_t *bdaddr;
73 u8 bdaddr_type;
74
75 bdaddr = &conn->dst;
76 bdaddr_type = conn->dst_type;
77
78 /* Check if we need to convert to identity address */
79 irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
80 if (irk) {
81 bdaddr = &irk->bdaddr;
82 bdaddr_type = irk->addr_type;
83 }
84
85 params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
86 if (!params)
87 return;
88
89 /* The connection attempt was doing scan for new RPA, and is
90 * in scan phase. If params are not associated with any other
91 * autoconnect action, remove them completely. If they are, just unmark
92 * them as waiting for connection, by clearing explicit_connect field.
93 */
94 if (params->auto_connect == HCI_AUTO_CONN_EXPLICIT)
95 hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
96 else
97 params->explicit_connect = false;
98 }
99
100 /* This function requires the caller holds hdev->lock */
101 static void hci_connect_le_scan_remove(struct hci_conn *conn)
102 {
103 hci_connect_le_scan_cleanup(conn);
104
105 hci_conn_hash_del(conn->hdev, conn);
106 hci_update_background_scan(conn->hdev);
107 }
108
109 static void hci_acl_create_connection(struct hci_conn *conn)
110 {
111 struct hci_dev *hdev = conn->hdev;
112 struct inquiry_entry *ie;
113 struct hci_cp_create_conn cp;
114
115 BT_DBG("hcon %p", conn);
116
117 conn->state = BT_CONNECT;
118 conn->out = true;
119 conn->role = HCI_ROLE_MASTER;
120
121 conn->attempt++;
122
123 conn->link_policy = hdev->link_policy;
124
125 memset(&cp, 0, sizeof(cp));
126 bacpy(&cp.bdaddr, &conn->dst);
127 cp.pscan_rep_mode = 0x02;
128
129 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
130 if (ie) {
131 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
132 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
133 cp.pscan_mode = ie->data.pscan_mode;
134 cp.clock_offset = ie->data.clock_offset |
135 cpu_to_le16(0x8000);
136 }
137
138 memcpy(conn->dev_class, ie->data.dev_class, 3);
139 if (ie->data.ssp_mode > 0)
140 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
141 }
142
143 cp.pkt_type = cpu_to_le16(conn->pkt_type);
144 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
145 cp.role_switch = 0x01;
146 else
147 cp.role_switch = 0x00;
148
149 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
150 }
151
152 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
153 {
154 struct hci_cp_create_conn_cancel cp;
155
156 BT_DBG("hcon %p", conn);
157
158 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
159 return;
160
161 bacpy(&cp.bdaddr, &conn->dst);
162 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
163 }
164
165 static void hci_reject_sco(struct hci_conn *conn)
166 {
167 struct hci_cp_reject_sync_conn_req cp;
168
169 cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
170 bacpy(&cp.bdaddr, &conn->dst);
171
172 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
173 }
174
175 int hci_disconnect(struct hci_conn *conn, __u8 reason)
176 {
177 struct hci_cp_disconnect cp;
178
179 BT_DBG("hcon %p", conn);
180
181 /* When we are master of an established connection and it enters
182 * the disconnect timeout, then go ahead and try to read the
183 * current clock offset. Processing of the result is done
184 * within the event handling and hci_clock_offset_evt function.
185 */
186 if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
187 struct hci_dev *hdev = conn->hdev;
188 struct hci_cp_read_clock_offset clkoff_cp;
189
190 clkoff_cp.handle = cpu_to_le16(conn->handle);
191 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
192 &clkoff_cp);
193 }
194
195 conn->state = BT_DISCONN;
196
197 cp.handle = cpu_to_le16(conn->handle);
198 cp.reason = reason;
199 return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
200 }
201
202 static void hci_amp_disconn(struct hci_conn *conn)
203 {
204 struct hci_cp_disconn_phy_link cp;
205
206 BT_DBG("hcon %p", conn);
207
208 conn->state = BT_DISCONN;
209
210 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
211 cp.reason = hci_proto_disconn_ind(conn);
212 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
213 sizeof(cp), &cp);
214 }
215
216 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
217 {
218 struct hci_dev *hdev = conn->hdev;
219 struct hci_cp_add_sco cp;
220
221 BT_DBG("hcon %p", conn);
222
223 conn->state = BT_CONNECT;
224 conn->out = true;
225
226 conn->attempt++;
227
228 cp.handle = cpu_to_le16(handle);
229 cp.pkt_type = cpu_to_le16(conn->pkt_type);
230
231 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
232 }
233
234 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
235 {
236 struct hci_dev *hdev = conn->hdev;
237 struct hci_cp_setup_sync_conn cp;
238 const struct sco_param *param;
239
240 BT_DBG("hcon %p", conn);
241
242 conn->state = BT_CONNECT;
243 conn->out = true;
244
245 conn->attempt++;
246
247 cp.handle = cpu_to_le16(handle);
248
249 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
250 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
251 cp.voice_setting = cpu_to_le16(conn->setting);
252
253 switch (conn->setting & SCO_AIRMODE_MASK) {
254 case SCO_AIRMODE_TRANSP:
255 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
256 return false;
257 param = &esco_param_msbc[conn->attempt - 1];
258 break;
259 case SCO_AIRMODE_CVSD:
260 if (lmp_esco_capable(conn->link)) {
261 if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
262 return false;
263 param = &esco_param_cvsd[conn->attempt - 1];
264 } else {
265 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
266 return false;
267 param = &sco_param_cvsd[conn->attempt - 1];
268 }
269 break;
270 default:
271 return false;
272 }
273
274 cp.retrans_effort = param->retrans_effort;
275 cp.pkt_type = __cpu_to_le16(param->pkt_type);
276 cp.max_latency = __cpu_to_le16(param->max_latency);
277
278 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
279 return false;
280
281 return true;
282 }
283
284 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
285 u16 to_multiplier)
286 {
287 struct hci_dev *hdev = conn->hdev;
288 struct hci_conn_params *params;
289 struct hci_cp_le_conn_update cp;
290
291 hci_dev_lock(hdev);
292
293 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
294 if (params) {
295 params->conn_min_interval = min;
296 params->conn_max_interval = max;
297 params->conn_latency = latency;
298 params->supervision_timeout = to_multiplier;
299 }
300
301 hci_dev_unlock(hdev);
302
303 memset(&cp, 0, sizeof(cp));
304 cp.handle = cpu_to_le16(conn->handle);
305 cp.conn_interval_min = cpu_to_le16(min);
306 cp.conn_interval_max = cpu_to_le16(max);
307 cp.conn_latency = cpu_to_le16(latency);
308 cp.supervision_timeout = cpu_to_le16(to_multiplier);
309 cp.min_ce_len = cpu_to_le16(0x0000);
310 cp.max_ce_len = cpu_to_le16(0x0000);
311
312 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
313
314 if (params)
315 return 0x01;
316
317 return 0x00;
318 }
319
320 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
321 __u8 ltk[16], __u8 key_size)
322 {
323 struct hci_dev *hdev = conn->hdev;
324 struct hci_cp_le_start_enc cp;
325
326 BT_DBG("hcon %p", conn);
327
328 memset(&cp, 0, sizeof(cp));
329
330 cp.handle = cpu_to_le16(conn->handle);
331 cp.rand = rand;
332 cp.ediv = ediv;
333 memcpy(cp.ltk, ltk, key_size);
334
335 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
336 }
337
338 /* Device _must_ be locked */
339 void hci_sco_setup(struct hci_conn *conn, __u8 status)
340 {
341 struct hci_conn *sco = conn->link;
342
343 if (!sco)
344 return;
345
346 BT_DBG("hcon %p", conn);
347
348 if (!status) {
349 if (lmp_esco_capable(conn->hdev))
350 hci_setup_sync(sco, conn->handle);
351 else
352 hci_add_sco(sco, conn->handle);
353 } else {
354 hci_connect_cfm(sco, status);
355 hci_conn_del(sco);
356 }
357 }
358
359 static void hci_conn_timeout(struct work_struct *work)
360 {
361 struct hci_conn *conn = container_of(work, struct hci_conn,
362 disc_work.work);
363 int refcnt = atomic_read(&conn->refcnt);
364
365 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
366
367 WARN_ON(refcnt < 0);
368
369 /* FIXME: It was observed that in pairing failed scenario, refcnt
370 * drops below 0. Probably this is because l2cap_conn_del calls
371 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
372 * dropped. After that loop hci_chan_del is called which also drops
373 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
374 * otherwise drop it.
375 */
376 if (refcnt > 0)
377 return;
378
379 switch (conn->state) {
380 case BT_CONNECT:
381 case BT_CONNECT2:
382 if (conn->out) {
383 if (conn->type == ACL_LINK)
384 hci_acl_create_connection_cancel(conn);
385 else if (conn->type == LE_LINK) {
386 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
387 hci_connect_le_scan_remove(conn);
388 else
389 hci_le_create_connection_cancel(conn);
390 }
391 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
392 hci_reject_sco(conn);
393 }
394 break;
395 case BT_CONFIG:
396 case BT_CONNECTED:
397 if (conn->type == AMP_LINK) {
398 hci_amp_disconn(conn);
399 } else {
400 __u8 reason = hci_proto_disconn_ind(conn);
401 hci_disconnect(conn, reason);
402 }
403 break;
404 default:
405 conn->state = BT_CLOSED;
406 break;
407 }
408 }
409
410 /* Enter sniff mode */
411 static void hci_conn_idle(struct work_struct *work)
412 {
413 struct hci_conn *conn = container_of(work, struct hci_conn,
414 idle_work.work);
415 struct hci_dev *hdev = conn->hdev;
416
417 BT_DBG("hcon %p mode %d", conn, conn->mode);
418
419 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
420 return;
421
422 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
423 return;
424
425 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
426 struct hci_cp_sniff_subrate cp;
427 cp.handle = cpu_to_le16(conn->handle);
428 cp.max_latency = cpu_to_le16(0);
429 cp.min_remote_timeout = cpu_to_le16(0);
430 cp.min_local_timeout = cpu_to_le16(0);
431 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
432 }
433
434 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
435 struct hci_cp_sniff_mode cp;
436 cp.handle = cpu_to_le16(conn->handle);
437 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
438 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
439 cp.attempt = cpu_to_le16(4);
440 cp.timeout = cpu_to_le16(1);
441 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
442 }
443 }
444
445 static void hci_conn_auto_accept(struct work_struct *work)
446 {
447 struct hci_conn *conn = container_of(work, struct hci_conn,
448 auto_accept_work.work);
449
450 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
451 &conn->dst);
452 }
453
454 static void le_conn_timeout(struct work_struct *work)
455 {
456 struct hci_conn *conn = container_of(work, struct hci_conn,
457 le_conn_timeout.work);
458 struct hci_dev *hdev = conn->hdev;
459
460 BT_DBG("");
461
462 /* We could end up here due to having done directed advertising,
463 * so clean up the state if necessary. This should however only
464 * happen with broken hardware or if low duty cycle was used
465 * (which doesn't have a timeout of its own).
466 */
467 if (conn->role == HCI_ROLE_SLAVE) {
468 u8 enable = 0x00;
469 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
470 &enable);
471 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
472 return;
473 }
474
475 hci_le_create_connection_cancel(conn);
476 }
477
478 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
479 u8 role)
480 {
481 struct hci_conn *conn;
482
483 BT_DBG("%s dst %pMR", hdev->name, dst);
484
485 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
486 if (!conn)
487 return NULL;
488
489 bacpy(&conn->dst, dst);
490 bacpy(&conn->src, &hdev->bdaddr);
491 conn->hdev = hdev;
492 conn->type = type;
493 conn->role = role;
494 conn->mode = HCI_CM_ACTIVE;
495 conn->state = BT_OPEN;
496 conn->auth_type = HCI_AT_GENERAL_BONDING;
497 conn->io_capability = hdev->io_capability;
498 conn->remote_auth = 0xff;
499 conn->key_type = 0xff;
500 conn->rssi = HCI_RSSI_INVALID;
501 conn->tx_power = HCI_TX_POWER_INVALID;
502 conn->max_tx_power = HCI_TX_POWER_INVALID;
503
504 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
505 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
506
507 if (conn->role == HCI_ROLE_MASTER)
508 conn->out = true;
509
510 switch (type) {
511 case ACL_LINK:
512 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
513 break;
514 case LE_LINK:
515 /* conn->src should reflect the local identity address */
516 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
517 break;
518 case SCO_LINK:
519 if (lmp_esco_capable(hdev))
520 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
521 (hdev->esco_type & EDR_ESCO_MASK);
522 else
523 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
524 break;
525 case ESCO_LINK:
526 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
527 break;
528 }
529
530 skb_queue_head_init(&conn->data_q);
531
532 INIT_LIST_HEAD(&conn->chan_list);
533
534 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
535 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
536 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
537 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
538
539 atomic_set(&conn->refcnt, 0);
540
541 hci_dev_hold(hdev);
542
543 hci_conn_hash_add(hdev, conn);
544 if (hdev->notify)
545 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
546
547 hci_conn_init_sysfs(conn);
548
549 return conn;
550 }
551
552 int hci_conn_del(struct hci_conn *conn)
553 {
554 struct hci_dev *hdev = conn->hdev;
555
556 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
557
558 cancel_delayed_work_sync(&conn->disc_work);
559 cancel_delayed_work_sync(&conn->auto_accept_work);
560 cancel_delayed_work_sync(&conn->idle_work);
561
562 if (conn->type == ACL_LINK) {
563 struct hci_conn *sco = conn->link;
564 if (sco)
565 sco->link = NULL;
566
567 /* Unacked frames */
568 hdev->acl_cnt += conn->sent;
569 } else if (conn->type == LE_LINK) {
570 cancel_delayed_work(&conn->le_conn_timeout);
571
572 if (hdev->le_pkts)
573 hdev->le_cnt += conn->sent;
574 else
575 hdev->acl_cnt += conn->sent;
576 } else {
577 struct hci_conn *acl = conn->link;
578 if (acl) {
579 acl->link = NULL;
580 hci_conn_drop(acl);
581 }
582 }
583
584 hci_chan_list_flush(conn);
585
586 if (conn->amp_mgr)
587 amp_mgr_put(conn->amp_mgr);
588
589 hci_conn_hash_del(hdev, conn);
590 if (hdev->notify)
591 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
592
593 skb_queue_purge(&conn->data_q);
594
595 hci_conn_del_sysfs(conn);
596
597 debugfs_remove_recursive(conn->debugfs);
598
599 if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
600 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
601
602 hci_dev_put(hdev);
603
604 hci_conn_put(conn);
605
606 return 0;
607 }
608
609 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
610 {
611 int use_src = bacmp(src, BDADDR_ANY);
612 struct hci_dev *hdev = NULL, *d;
613
614 BT_DBG("%pMR -> %pMR", src, dst);
615
616 read_lock(&hci_dev_list_lock);
617
618 list_for_each_entry(d, &hci_dev_list, list) {
619 if (!test_bit(HCI_UP, &d->flags) ||
620 hci_dev_test_flag(d, HCI_USER_CHANNEL) ||
621 d->dev_type != HCI_BREDR)
622 continue;
623
624 /* Simple routing:
625 * No source address - find interface with bdaddr != dst
626 * Source address - find interface with bdaddr == src
627 */
628
629 if (use_src) {
630 if (!bacmp(&d->bdaddr, src)) {
631 hdev = d; break;
632 }
633 } else {
634 if (bacmp(&d->bdaddr, dst)) {
635 hdev = d; break;
636 }
637 }
638 }
639
640 if (hdev)
641 hdev = hci_dev_hold(hdev);
642
643 read_unlock(&hci_dev_list_lock);
644 return hdev;
645 }
646 EXPORT_SYMBOL(hci_get_route);
647
648 /* This function requires the caller holds hdev->lock */
649 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
650 {
651 struct hci_dev *hdev = conn->hdev;
652 struct hci_conn_params *params;
653
654 params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
655 conn->dst_type);
656 if (params && params->conn) {
657 hci_conn_drop(params->conn);
658 hci_conn_put(params->conn);
659 params->conn = NULL;
660 }
661
662 conn->state = BT_CLOSED;
663
664 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
665 status);
666
667 hci_connect_cfm(conn, status);
668
669 hci_conn_del(conn);
670
671 /* Since we may have temporarily stopped the background scanning in
672 * favor of connection establishment, we should restart it.
673 */
674 hci_update_background_scan(hdev);
675
676 /* Re-enable advertising in case this was a failed connection
677 * attempt as a peripheral.
678 */
679 mgmt_reenable_advertising(hdev);
680 }
681
682 static void create_le_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
683 {
684 struct hci_conn *conn;
685
686 hci_dev_lock(hdev);
687
688 conn = hci_lookup_le_connect(hdev);
689
690 if (!status) {
691 hci_connect_le_scan_cleanup(conn);
692 goto done;
693 }
694
695 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
696 status);
697
698 if (!conn)
699 goto done;
700
701 hci_le_conn_failed(conn, status);
702
703 done:
704 hci_dev_unlock(hdev);
705 }
706
707 static void hci_req_add_le_create_conn(struct hci_request *req,
708 struct hci_conn *conn)
709 {
710 struct hci_cp_le_create_conn cp;
711 struct hci_dev *hdev = conn->hdev;
712 u8 own_addr_type;
713
714 memset(&cp, 0, sizeof(cp));
715
716 /* Update random address, but set require_privacy to false so
717 * that we never connect with an non-resolvable address.
718 */
719 if (hci_update_random_address(req, false, &own_addr_type))
720 return;
721
722 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
723 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
724 bacpy(&cp.peer_addr, &conn->dst);
725 cp.peer_addr_type = conn->dst_type;
726 cp.own_address_type = own_addr_type;
727 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
728 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
729 cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
730 cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
731 cp.min_ce_len = cpu_to_le16(0x0000);
732 cp.max_ce_len = cpu_to_le16(0x0000);
733
734 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
735
736 conn->state = BT_CONNECT;
737 clear_bit(HCI_CONN_SCANNING, &conn->flags);
738 }
739
740 static void hci_req_directed_advertising(struct hci_request *req,
741 struct hci_conn *conn)
742 {
743 struct hci_dev *hdev = req->hdev;
744 struct hci_cp_le_set_adv_param cp;
745 u8 own_addr_type;
746 u8 enable;
747
748 /* Clear the HCI_LE_ADV bit temporarily so that the
749 * hci_update_random_address knows that it's safe to go ahead
750 * and write a new random address. The flag will be set back on
751 * as soon as the SET_ADV_ENABLE HCI command completes.
752 */
753 hci_dev_clear_flag(hdev, HCI_LE_ADV);
754
755 /* Set require_privacy to false so that the remote device has a
756 * chance of identifying us.
757 */
758 if (hci_update_random_address(req, false, &own_addr_type) < 0)
759 return;
760
761 memset(&cp, 0, sizeof(cp));
762 cp.type = LE_ADV_DIRECT_IND;
763 cp.own_address_type = own_addr_type;
764 cp.direct_addr_type = conn->dst_type;
765 bacpy(&cp.direct_addr, &conn->dst);
766 cp.channel_map = hdev->le_adv_channel_map;
767
768 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
769
770 enable = 0x01;
771 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
772
773 conn->state = BT_CONNECT;
774 }
775
776 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
777 u8 dst_type, u8 sec_level, u16 conn_timeout,
778 u8 role)
779 {
780 struct hci_conn_params *params;
781 struct hci_conn *conn, *conn_unfinished;
782 struct smp_irk *irk;
783 struct hci_request req;
784 int err;
785
786 /* Let's make sure that le is enabled.*/
787 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
788 if (lmp_le_capable(hdev))
789 return ERR_PTR(-ECONNREFUSED);
790
791 return ERR_PTR(-EOPNOTSUPP);
792 }
793
794 /* Some devices send ATT messages as soon as the physical link is
795 * established. To be able to handle these ATT messages, the user-
796 * space first establishes the connection and then starts the pairing
797 * process.
798 *
799 * So if a hci_conn object already exists for the following connection
800 * attempt, we simply update pending_sec_level and auth_type fields
801 * and return the object found.
802 */
803 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
804 conn_unfinished = NULL;
805 if (conn) {
806 if (conn->state == BT_CONNECT &&
807 test_bit(HCI_CONN_SCANNING, &conn->flags)) {
808 BT_DBG("will continue unfinished conn %pMR", dst);
809 conn_unfinished = conn;
810 } else {
811 if (conn->pending_sec_level < sec_level)
812 conn->pending_sec_level = sec_level;
813 goto done;
814 }
815 }
816
817 /* Since the controller supports only one LE connection attempt at a
818 * time, we return -EBUSY if there is any connection attempt running.
819 */
820 if (hci_lookup_le_connect(hdev))
821 return ERR_PTR(-EBUSY);
822
823 /* When given an identity address with existing identity
824 * resolving key, the connection needs to be established
825 * to a resolvable random address.
826 *
827 * Storing the resolvable random address is required here
828 * to handle connection failures. The address will later
829 * be resolved back into the original identity address
830 * from the connect request.
831 */
832 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
833 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
834 dst = &irk->rpa;
835 dst_type = ADDR_LE_DEV_RANDOM;
836 }
837
838 if (conn_unfinished) {
839 conn = conn_unfinished;
840 bacpy(&conn->dst, dst);
841 } else {
842 conn = hci_conn_add(hdev, LE_LINK, dst, role);
843 }
844
845 if (!conn)
846 return ERR_PTR(-ENOMEM);
847
848 conn->dst_type = dst_type;
849 conn->sec_level = BT_SECURITY_LOW;
850 conn->conn_timeout = conn_timeout;
851
852 if (!conn_unfinished)
853 conn->pending_sec_level = sec_level;
854
855 hci_req_init(&req, hdev);
856
857 /* Disable advertising if we're active. For master role
858 * connections most controllers will refuse to connect if
859 * advertising is enabled, and for slave role connections we
860 * anyway have to disable it in order to start directed
861 * advertising.
862 */
863 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
864 u8 enable = 0x00;
865 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
866 &enable);
867 }
868
869 /* If requested to connect as slave use directed advertising */
870 if (conn->role == HCI_ROLE_SLAVE) {
871 /* If we're active scanning most controllers are unable
872 * to initiate advertising. Simply reject the attempt.
873 */
874 if (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&
875 hdev->le_scan_type == LE_SCAN_ACTIVE) {
876 skb_queue_purge(&req.cmd_q);
877 hci_conn_del(conn);
878 return ERR_PTR(-EBUSY);
879 }
880
881 hci_req_directed_advertising(&req, conn);
882 goto create_conn;
883 }
884
885 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
886 if (params) {
887 conn->le_conn_min_interval = params->conn_min_interval;
888 conn->le_conn_max_interval = params->conn_max_interval;
889 conn->le_conn_latency = params->conn_latency;
890 conn->le_supv_timeout = params->supervision_timeout;
891 } else {
892 conn->le_conn_min_interval = hdev->le_conn_min_interval;
893 conn->le_conn_max_interval = hdev->le_conn_max_interval;
894 conn->le_conn_latency = hdev->le_conn_latency;
895 conn->le_supv_timeout = hdev->le_supv_timeout;
896 }
897
898 /* If controller is scanning, we stop it since some controllers are
899 * not able to scan and connect at the same time. Also set the
900 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
901 * handler for scan disabling knows to set the correct discovery
902 * state.
903 */
904 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
905 hci_req_add_le_scan_disable(&req);
906 hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED);
907 }
908
909 hci_req_add_le_create_conn(&req, conn);
910
911 create_conn:
912 err = hci_req_run(&req, create_le_conn_complete);
913 if (err) {
914 hci_conn_del(conn);
915 return ERR_PTR(err);
916 }
917
918 done:
919 /* If this is continuation of connect started by hci_connect_le_scan,
920 * it already called hci_conn_hold and calling it again would mess the
921 * counter.
922 */
923 if (!conn_unfinished)
924 hci_conn_hold(conn);
925
926 return conn;
927 }
928
929 static void hci_connect_le_scan_complete(struct hci_dev *hdev, u8 status,
930 u16 opcode)
931 {
932 struct hci_conn *conn;
933
934 if (!status)
935 return;
936
937 BT_ERR("Failed to add device to auto conn whitelist: status 0x%2.2x",
938 status);
939
940 hci_dev_lock(hdev);
941
942 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
943 if (conn)
944 hci_le_conn_failed(conn, status);
945
946 hci_dev_unlock(hdev);
947 }
948
949 static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
950 {
951 struct hci_conn *conn;
952
953 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
954 if (!conn)
955 return false;
956
957 if (conn->dst_type != type)
958 return false;
959
960 if (conn->state != BT_CONNECTED)
961 return false;
962
963 return true;
964 }
965
966 /* This function requires the caller holds hdev->lock */
967 static int hci_explicit_conn_params_set(struct hci_request *req,
968 bdaddr_t *addr, u8 addr_type)
969 {
970 struct hci_dev *hdev = req->hdev;
971 struct hci_conn_params *params;
972
973 if (is_connected(hdev, addr, addr_type))
974 return -EISCONN;
975
976 params = hci_conn_params_add(hdev, addr, addr_type);
977 if (!params)
978 return -EIO;
979
980 /* If we created new params, or existing params were marked as disabled,
981 * mark them to be used just once to connect.
982 */
983 if (params->auto_connect == HCI_AUTO_CONN_DISABLED) {
984 params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
985 list_del_init(&params->action);
986 list_add(&params->action, &hdev->pend_le_conns);
987 }
988
989 params->explicit_connect = true;
990 __hci_update_background_scan(req);
991
992 BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type,
993 params->auto_connect);
994
995 return 0;
996 }
997
998 /* This function requires the caller holds hdev->lock */
999 struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
1000 u8 dst_type, u8 sec_level,
1001 u16 conn_timeout, u8 role)
1002 {
1003 struct hci_conn *conn;
1004 struct hci_request req;
1005 int err;
1006
1007 /* Let's make sure that le is enabled.*/
1008 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
1009 if (lmp_le_capable(hdev))
1010 return ERR_PTR(-ECONNREFUSED);
1011
1012 return ERR_PTR(-EOPNOTSUPP);
1013 }
1014
1015 /* Some devices send ATT messages as soon as the physical link is
1016 * established. To be able to handle these ATT messages, the user-
1017 * space first establishes the connection and then starts the pairing
1018 * process.
1019 *
1020 * So if a hci_conn object already exists for the following connection
1021 * attempt, we simply update pending_sec_level and auth_type fields
1022 * and return the object found.
1023 */
1024 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
1025 if (conn) {
1026 if (conn->pending_sec_level < sec_level)
1027 conn->pending_sec_level = sec_level;
1028 goto done;
1029 }
1030
1031 BT_DBG("requesting refresh of dst_addr");
1032
1033 conn = hci_conn_add(hdev, LE_LINK, dst, role);
1034 if (!conn)
1035 return ERR_PTR(-ENOMEM);
1036
1037 hci_req_init(&req, hdev);
1038
1039 if (hci_explicit_conn_params_set(&req, dst, dst_type) < 0)
1040 return ERR_PTR(-EBUSY);
1041
1042 conn->state = BT_CONNECT;
1043 set_bit(HCI_CONN_SCANNING, &conn->flags);
1044
1045 err = hci_req_run(&req, hci_connect_le_scan_complete);
1046 if (err && err != -ENODATA) {
1047 hci_conn_del(conn);
1048 return ERR_PTR(err);
1049 }
1050
1051 conn->dst_type = dst_type;
1052 conn->sec_level = BT_SECURITY_LOW;
1053 conn->pending_sec_level = sec_level;
1054 conn->conn_timeout = conn_timeout;
1055
1056 done:
1057 hci_conn_hold(conn);
1058 return conn;
1059 }
1060
1061 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
1062 u8 sec_level, u8 auth_type)
1063 {
1064 struct hci_conn *acl;
1065
1066 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1067 if (lmp_bredr_capable(hdev))
1068 return ERR_PTR(-ECONNREFUSED);
1069
1070 return ERR_PTR(-EOPNOTSUPP);
1071 }
1072
1073 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1074 if (!acl) {
1075 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
1076 if (!acl)
1077 return ERR_PTR(-ENOMEM);
1078 }
1079
1080 hci_conn_hold(acl);
1081
1082 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
1083 acl->sec_level = BT_SECURITY_LOW;
1084 acl->pending_sec_level = sec_level;
1085 acl->auth_type = auth_type;
1086 hci_acl_create_connection(acl);
1087 }
1088
1089 return acl;
1090 }
1091
1092 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
1093 __u16 setting)
1094 {
1095 struct hci_conn *acl;
1096 struct hci_conn *sco;
1097
1098 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
1099 if (IS_ERR(acl))
1100 return acl;
1101
1102 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
1103 if (!sco) {
1104 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
1105 if (!sco) {
1106 hci_conn_drop(acl);
1107 return ERR_PTR(-ENOMEM);
1108 }
1109 }
1110
1111 acl->link = sco;
1112 sco->link = acl;
1113
1114 hci_conn_hold(sco);
1115
1116 sco->setting = setting;
1117
1118 if (acl->state == BT_CONNECTED &&
1119 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
1120 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
1121 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
1122
1123 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
1124 /* defer SCO setup until mode change completed */
1125 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
1126 return sco;
1127 }
1128
1129 hci_sco_setup(acl, 0x00);
1130 }
1131
1132 return sco;
1133 }
1134
1135 /* Check link security requirement */
1136 int hci_conn_check_link_mode(struct hci_conn *conn)
1137 {
1138 BT_DBG("hcon %p", conn);
1139
1140 /* In Secure Connections Only mode, it is required that Secure
1141 * Connections is used and the link is encrypted with AES-CCM
1142 * using a P-256 authenticated combination key.
1143 */
1144 if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) {
1145 if (!hci_conn_sc_enabled(conn) ||
1146 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
1147 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
1148 return 0;
1149 }
1150
1151 if (hci_conn_ssp_enabled(conn) &&
1152 !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1153 return 0;
1154
1155 return 1;
1156 }
1157
1158 /* Authenticate remote device */
1159 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
1160 {
1161 BT_DBG("hcon %p", conn);
1162
1163 if (conn->pending_sec_level > sec_level)
1164 sec_level = conn->pending_sec_level;
1165
1166 if (sec_level > conn->sec_level)
1167 conn->pending_sec_level = sec_level;
1168 else if (test_bit(HCI_CONN_AUTH, &conn->flags))
1169 return 1;
1170
1171 /* Make sure we preserve an existing MITM requirement*/
1172 auth_type |= (conn->auth_type & 0x01);
1173
1174 conn->auth_type = auth_type;
1175
1176 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1177 struct hci_cp_auth_requested cp;
1178
1179 cp.handle = cpu_to_le16(conn->handle);
1180 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
1181 sizeof(cp), &cp);
1182
1183 /* If we're already encrypted set the REAUTH_PEND flag,
1184 * otherwise set the ENCRYPT_PEND.
1185 */
1186 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1187 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
1188 else
1189 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
1190 }
1191
1192 return 0;
1193 }
1194
1195 /* Encrypt the the link */
1196 static void hci_conn_encrypt(struct hci_conn *conn)
1197 {
1198 BT_DBG("hcon %p", conn);
1199
1200 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
1201 struct hci_cp_set_conn_encrypt cp;
1202 cp.handle = cpu_to_le16(conn->handle);
1203 cp.encrypt = 0x01;
1204 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
1205 &cp);
1206 }
1207 }
1208
1209 /* Enable security */
1210 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
1211 bool initiator)
1212 {
1213 BT_DBG("hcon %p", conn);
1214
1215 if (conn->type == LE_LINK)
1216 return smp_conn_security(conn, sec_level);
1217
1218 /* For sdp we don't need the link key. */
1219 if (sec_level == BT_SECURITY_SDP)
1220 return 1;
1221
1222 /* For non 2.1 devices and low security level we don't need the link
1223 key. */
1224 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1225 return 1;
1226
1227 /* For other security levels we need the link key. */
1228 if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1229 goto auth;
1230
1231 /* An authenticated FIPS approved combination key has sufficient
1232 * security for security level 4. */
1233 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1234 sec_level == BT_SECURITY_FIPS)
1235 goto encrypt;
1236
1237 /* An authenticated combination key has sufficient security for
1238 security level 3. */
1239 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1240 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1241 sec_level == BT_SECURITY_HIGH)
1242 goto encrypt;
1243
1244 /* An unauthenticated combination key has sufficient security for
1245 security level 1 and 2. */
1246 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1247 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1248 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1249 goto encrypt;
1250
1251 /* A combination key has always sufficient security for the security
1252 levels 1 or 2. High security level requires the combination key
1253 is generated using maximum PIN code length (16).
1254 For pre 2.1 units. */
1255 if (conn->key_type == HCI_LK_COMBINATION &&
1256 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1257 conn->pin_length == 16))
1258 goto encrypt;
1259
1260 auth:
1261 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1262 return 0;
1263
1264 if (initiator)
1265 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1266
1267 if (!hci_conn_auth(conn, sec_level, auth_type))
1268 return 0;
1269
1270 encrypt:
1271 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1272 return 1;
1273
1274 hci_conn_encrypt(conn);
1275 return 0;
1276 }
1277 EXPORT_SYMBOL(hci_conn_security);
1278
1279 /* Check secure link requirement */
1280 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1281 {
1282 BT_DBG("hcon %p", conn);
1283
1284 /* Accept if non-secure or higher security level is required */
1285 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1286 return 1;
1287
1288 /* Accept if secure or higher security level is already present */
1289 if (conn->sec_level == BT_SECURITY_HIGH ||
1290 conn->sec_level == BT_SECURITY_FIPS)
1291 return 1;
1292
1293 /* Reject not secure link */
1294 return 0;
1295 }
1296 EXPORT_SYMBOL(hci_conn_check_secure);
1297
1298 /* Switch role */
1299 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1300 {
1301 BT_DBG("hcon %p", conn);
1302
1303 if (role == conn->role)
1304 return 1;
1305
1306 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1307 struct hci_cp_switch_role cp;
1308 bacpy(&cp.bdaddr, &conn->dst);
1309 cp.role = role;
1310 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1311 }
1312
1313 return 0;
1314 }
1315 EXPORT_SYMBOL(hci_conn_switch_role);
1316
1317 /* Enter active mode */
1318 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1319 {
1320 struct hci_dev *hdev = conn->hdev;
1321
1322 BT_DBG("hcon %p mode %d", conn, conn->mode);
1323
1324 if (conn->mode != HCI_CM_SNIFF)
1325 goto timer;
1326
1327 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1328 goto timer;
1329
1330 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1331 struct hci_cp_exit_sniff_mode cp;
1332 cp.handle = cpu_to_le16(conn->handle);
1333 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1334 }
1335
1336 timer:
1337 if (hdev->idle_timeout > 0)
1338 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1339 msecs_to_jiffies(hdev->idle_timeout));
1340 }
1341
1342 /* Drop all connection on the device */
1343 void hci_conn_hash_flush(struct hci_dev *hdev)
1344 {
1345 struct hci_conn_hash *h = &hdev->conn_hash;
1346 struct hci_conn *c, *n;
1347
1348 BT_DBG("hdev %s", hdev->name);
1349
1350 list_for_each_entry_safe(c, n, &h->list, list) {
1351 c->state = BT_CLOSED;
1352
1353 hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1354 hci_conn_del(c);
1355 }
1356 }
1357
1358 /* Check pending connect attempts */
1359 void hci_conn_check_pending(struct hci_dev *hdev)
1360 {
1361 struct hci_conn *conn;
1362
1363 BT_DBG("hdev %s", hdev->name);
1364
1365 hci_dev_lock(hdev);
1366
1367 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1368 if (conn)
1369 hci_acl_create_connection(conn);
1370
1371 hci_dev_unlock(hdev);
1372 }
1373
1374 static u32 get_link_mode(struct hci_conn *conn)
1375 {
1376 u32 link_mode = 0;
1377
1378 if (conn->role == HCI_ROLE_MASTER)
1379 link_mode |= HCI_LM_MASTER;
1380
1381 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1382 link_mode |= HCI_LM_ENCRYPT;
1383
1384 if (test_bit(HCI_CONN_AUTH, &conn->flags))
1385 link_mode |= HCI_LM_AUTH;
1386
1387 if (test_bit(HCI_CONN_SECURE, &conn->flags))
1388 link_mode |= HCI_LM_SECURE;
1389
1390 if (test_bit(HCI_CONN_FIPS, &conn->flags))
1391 link_mode |= HCI_LM_FIPS;
1392
1393 return link_mode;
1394 }
1395
1396 int hci_get_conn_list(void __user *arg)
1397 {
1398 struct hci_conn *c;
1399 struct hci_conn_list_req req, *cl;
1400 struct hci_conn_info *ci;
1401 struct hci_dev *hdev;
1402 int n = 0, size, err;
1403
1404 if (copy_from_user(&req, arg, sizeof(req)))
1405 return -EFAULT;
1406
1407 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1408 return -EINVAL;
1409
1410 size = sizeof(req) + req.conn_num * sizeof(*ci);
1411
1412 cl = kmalloc(size, GFP_KERNEL);
1413 if (!cl)
1414 return -ENOMEM;
1415
1416 hdev = hci_dev_get(req.dev_id);
1417 if (!hdev) {
1418 kfree(cl);
1419 return -ENODEV;
1420 }
1421
1422 ci = cl->conn_info;
1423
1424 hci_dev_lock(hdev);
1425 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1426 bacpy(&(ci + n)->bdaddr, &c->dst);
1427 (ci + n)->handle = c->handle;
1428 (ci + n)->type = c->type;
1429 (ci + n)->out = c->out;
1430 (ci + n)->state = c->state;
1431 (ci + n)->link_mode = get_link_mode(c);
1432 if (++n >= req.conn_num)
1433 break;
1434 }
1435 hci_dev_unlock(hdev);
1436
1437 cl->dev_id = hdev->id;
1438 cl->conn_num = n;
1439 size = sizeof(req) + n * sizeof(*ci);
1440
1441 hci_dev_put(hdev);
1442
1443 err = copy_to_user(arg, cl, size);
1444 kfree(cl);
1445
1446 return err ? -EFAULT : 0;
1447 }
1448
1449 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1450 {
1451 struct hci_conn_info_req req;
1452 struct hci_conn_info ci;
1453 struct hci_conn *conn;
1454 char __user *ptr = arg + sizeof(req);
1455
1456 if (copy_from_user(&req, arg, sizeof(req)))
1457 return -EFAULT;
1458
1459 hci_dev_lock(hdev);
1460 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1461 if (conn) {
1462 bacpy(&ci.bdaddr, &conn->dst);
1463 ci.handle = conn->handle;
1464 ci.type = conn->type;
1465 ci.out = conn->out;
1466 ci.state = conn->state;
1467 ci.link_mode = get_link_mode(conn);
1468 }
1469 hci_dev_unlock(hdev);
1470
1471 if (!conn)
1472 return -ENOENT;
1473
1474 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1475 }
1476
1477 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1478 {
1479 struct hci_auth_info_req req;
1480 struct hci_conn *conn;
1481
1482 if (copy_from_user(&req, arg, sizeof(req)))
1483 return -EFAULT;
1484
1485 hci_dev_lock(hdev);
1486 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1487 if (conn)
1488 req.type = conn->auth_type;
1489 hci_dev_unlock(hdev);
1490
1491 if (!conn)
1492 return -ENOENT;
1493
1494 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1495 }
1496
1497 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1498 {
1499 struct hci_dev *hdev = conn->hdev;
1500 struct hci_chan *chan;
1501
1502 BT_DBG("%s hcon %p", hdev->name, conn);
1503
1504 if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1505 BT_DBG("Refusing to create new hci_chan");
1506 return NULL;
1507 }
1508
1509 chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1510 if (!chan)
1511 return NULL;
1512
1513 chan->conn = hci_conn_get(conn);
1514 skb_queue_head_init(&chan->data_q);
1515 chan->state = BT_CONNECTED;
1516
1517 list_add_rcu(&chan->list, &conn->chan_list);
1518
1519 return chan;
1520 }
1521
1522 void hci_chan_del(struct hci_chan *chan)
1523 {
1524 struct hci_conn *conn = chan->conn;
1525 struct hci_dev *hdev = conn->hdev;
1526
1527 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1528
1529 list_del_rcu(&chan->list);
1530
1531 synchronize_rcu();
1532
1533 /* Prevent new hci_chan's to be created for this hci_conn */
1534 set_bit(HCI_CONN_DROP, &conn->flags);
1535
1536 hci_conn_put(conn);
1537
1538 skb_queue_purge(&chan->data_q);
1539 kfree(chan);
1540 }
1541
1542 void hci_chan_list_flush(struct hci_conn *conn)
1543 {
1544 struct hci_chan *chan, *n;
1545
1546 BT_DBG("hcon %p", conn);
1547
1548 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1549 hci_chan_del(chan);
1550 }
1551
1552 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1553 __u16 handle)
1554 {
1555 struct hci_chan *hchan;
1556
1557 list_for_each_entry(hchan, &hcon->chan_list, list) {
1558 if (hchan->handle == handle)
1559 return hchan;
1560 }
1561
1562 return NULL;
1563 }
1564
1565 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1566 {
1567 struct hci_conn_hash *h = &hdev->conn_hash;
1568 struct hci_conn *hcon;
1569 struct hci_chan *hchan = NULL;
1570
1571 rcu_read_lock();
1572
1573 list_for_each_entry_rcu(hcon, &h->list, list) {
1574 hchan = __hci_chan_lookup_handle(hcon, handle);
1575 if (hchan)
1576 break;
1577 }
1578
1579 rcu_read_unlock();
1580
1581 return hchan;
1582 }
This page took 0.094873 seconds and 6 git commands to generate.