Merge tag 'urgent-slab-fix' of git://git.kernel.org/pub/scm/linux/kernel/git/device...
[deliverable/linux.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
32
33 #include "smp.h"
34 #include "a2mp.h"
35
36 struct sco_param {
37 u16 pkt_type;
38 u16 max_latency;
39 };
40
41 static const struct sco_param sco_param_cvsd[] = {
42 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a }, /* S3 */
43 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007 }, /* S2 */
44 { EDR_ESCO_MASK | ESCO_EV3, 0x0007 }, /* S1 */
45 { EDR_ESCO_MASK | ESCO_HV3, 0xffff }, /* D1 */
46 { EDR_ESCO_MASK | ESCO_HV1, 0xffff }, /* D0 */
47 };
48
49 static const struct sco_param sco_param_wideband[] = {
50 { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d }, /* T2 */
51 { EDR_ESCO_MASK | ESCO_EV3, 0x0008 }, /* T1 */
52 };
53
54 static void hci_le_create_connection_cancel(struct hci_conn *conn)
55 {
56 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
57 }
58
59 static void hci_acl_create_connection(struct hci_conn *conn)
60 {
61 struct hci_dev *hdev = conn->hdev;
62 struct inquiry_entry *ie;
63 struct hci_cp_create_conn cp;
64
65 BT_DBG("hcon %p", conn);
66
67 conn->state = BT_CONNECT;
68 conn->out = true;
69
70 conn->link_mode = HCI_LM_MASTER;
71
72 conn->attempt++;
73
74 conn->link_policy = hdev->link_policy;
75
76 memset(&cp, 0, sizeof(cp));
77 bacpy(&cp.bdaddr, &conn->dst);
78 cp.pscan_rep_mode = 0x02;
79
80 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
81 if (ie) {
82 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
83 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
84 cp.pscan_mode = ie->data.pscan_mode;
85 cp.clock_offset = ie->data.clock_offset |
86 cpu_to_le16(0x8000);
87 }
88
89 memcpy(conn->dev_class, ie->data.dev_class, 3);
90 if (ie->data.ssp_mode > 0)
91 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
92 }
93
94 cp.pkt_type = cpu_to_le16(conn->pkt_type);
95 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
96 cp.role_switch = 0x01;
97 else
98 cp.role_switch = 0x00;
99
100 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
101 }
102
103 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
104 {
105 struct hci_cp_create_conn_cancel cp;
106
107 BT_DBG("hcon %p", conn);
108
109 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
110 return;
111
112 bacpy(&cp.bdaddr, &conn->dst);
113 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
114 }
115
116 static void hci_reject_sco(struct hci_conn *conn)
117 {
118 struct hci_cp_reject_sync_conn_req cp;
119
120 cp.reason = HCI_ERROR_REMOTE_USER_TERM;
121 bacpy(&cp.bdaddr, &conn->dst);
122
123 hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
124 }
125
126 void hci_disconnect(struct hci_conn *conn, __u8 reason)
127 {
128 struct hci_cp_disconnect cp;
129
130 BT_DBG("hcon %p", conn);
131
132 conn->state = BT_DISCONN;
133
134 cp.handle = cpu_to_le16(conn->handle);
135 cp.reason = reason;
136 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
137 }
138
139 static void hci_amp_disconn(struct hci_conn *conn, __u8 reason)
140 {
141 struct hci_cp_disconn_phy_link cp;
142
143 BT_DBG("hcon %p", conn);
144
145 conn->state = BT_DISCONN;
146
147 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
148 cp.reason = reason;
149 hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
150 sizeof(cp), &cp);
151 }
152
153 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
154 {
155 struct hci_dev *hdev = conn->hdev;
156 struct hci_cp_add_sco cp;
157
158 BT_DBG("hcon %p", conn);
159
160 conn->state = BT_CONNECT;
161 conn->out = true;
162
163 conn->attempt++;
164
165 cp.handle = cpu_to_le16(handle);
166 cp.pkt_type = cpu_to_le16(conn->pkt_type);
167
168 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
169 }
170
171 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
172 {
173 struct hci_dev *hdev = conn->hdev;
174 struct hci_cp_setup_sync_conn cp;
175 const struct sco_param *param;
176
177 BT_DBG("hcon %p", conn);
178
179 conn->state = BT_CONNECT;
180 conn->out = true;
181
182 conn->attempt++;
183
184 cp.handle = cpu_to_le16(handle);
185
186 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
187 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
188 cp.voice_setting = cpu_to_le16(conn->setting);
189
190 switch (conn->setting & SCO_AIRMODE_MASK) {
191 case SCO_AIRMODE_TRANSP:
192 if (conn->attempt > ARRAY_SIZE(sco_param_wideband))
193 return false;
194 cp.retrans_effort = 0x02;
195 param = &sco_param_wideband[conn->attempt - 1];
196 break;
197 case SCO_AIRMODE_CVSD:
198 if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
199 return false;
200 cp.retrans_effort = 0x01;
201 param = &sco_param_cvsd[conn->attempt - 1];
202 break;
203 default:
204 return false;
205 }
206
207 cp.pkt_type = __cpu_to_le16(param->pkt_type);
208 cp.max_latency = __cpu_to_le16(param->max_latency);
209
210 if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
211 return false;
212
213 return true;
214 }
215
216 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
217 u16 latency, u16 to_multiplier)
218 {
219 struct hci_cp_le_conn_update cp;
220 struct hci_dev *hdev = conn->hdev;
221
222 memset(&cp, 0, sizeof(cp));
223
224 cp.handle = cpu_to_le16(conn->handle);
225 cp.conn_interval_min = cpu_to_le16(min);
226 cp.conn_interval_max = cpu_to_le16(max);
227 cp.conn_latency = cpu_to_le16(latency);
228 cp.supervision_timeout = cpu_to_le16(to_multiplier);
229 cp.min_ce_len = cpu_to_le16(0x0000);
230 cp.max_ce_len = cpu_to_le16(0x0000);
231
232 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
233 }
234
235 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
236 __u8 ltk[16])
237 {
238 struct hci_dev *hdev = conn->hdev;
239 struct hci_cp_le_start_enc cp;
240
241 BT_DBG("hcon %p", conn);
242
243 memset(&cp, 0, sizeof(cp));
244
245 cp.handle = cpu_to_le16(conn->handle);
246 cp.rand = rand;
247 cp.ediv = ediv;
248 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
249
250 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
251 }
252
253 /* Device _must_ be locked */
254 void hci_sco_setup(struct hci_conn *conn, __u8 status)
255 {
256 struct hci_conn *sco = conn->link;
257
258 if (!sco)
259 return;
260
261 BT_DBG("hcon %p", conn);
262
263 if (!status) {
264 if (lmp_esco_capable(conn->hdev))
265 hci_setup_sync(sco, conn->handle);
266 else
267 hci_add_sco(sco, conn->handle);
268 } else {
269 hci_proto_connect_cfm(sco, status);
270 hci_conn_del(sco);
271 }
272 }
273
274 static void hci_conn_disconnect(struct hci_conn *conn)
275 {
276 __u8 reason = hci_proto_disconn_ind(conn);
277
278 switch (conn->type) {
279 case AMP_LINK:
280 hci_amp_disconn(conn, reason);
281 break;
282 default:
283 hci_disconnect(conn, reason);
284 break;
285 }
286 }
287
288 static void hci_conn_timeout(struct work_struct *work)
289 {
290 struct hci_conn *conn = container_of(work, struct hci_conn,
291 disc_work.work);
292 int refcnt = atomic_read(&conn->refcnt);
293
294 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
295
296 WARN_ON(refcnt < 0);
297
298 /* FIXME: It was observed that in pairing failed scenario, refcnt
299 * drops below 0. Probably this is because l2cap_conn_del calls
300 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
301 * dropped. After that loop hci_chan_del is called which also drops
302 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
303 * otherwise drop it.
304 */
305 if (refcnt > 0)
306 return;
307
308 switch (conn->state) {
309 case BT_CONNECT:
310 case BT_CONNECT2:
311 if (conn->out) {
312 if (conn->type == ACL_LINK)
313 hci_acl_create_connection_cancel(conn);
314 else if (conn->type == LE_LINK)
315 hci_le_create_connection_cancel(conn);
316 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
317 hci_reject_sco(conn);
318 }
319 break;
320 case BT_CONFIG:
321 case BT_CONNECTED:
322 hci_conn_disconnect(conn);
323 break;
324 default:
325 conn->state = BT_CLOSED;
326 break;
327 }
328 }
329
330 /* Enter sniff mode */
331 static void hci_conn_idle(struct work_struct *work)
332 {
333 struct hci_conn *conn = container_of(work, struct hci_conn,
334 idle_work.work);
335 struct hci_dev *hdev = conn->hdev;
336
337 BT_DBG("hcon %p mode %d", conn, conn->mode);
338
339 if (test_bit(HCI_RAW, &hdev->flags))
340 return;
341
342 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
343 return;
344
345 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
346 return;
347
348 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
349 struct hci_cp_sniff_subrate cp;
350 cp.handle = cpu_to_le16(conn->handle);
351 cp.max_latency = cpu_to_le16(0);
352 cp.min_remote_timeout = cpu_to_le16(0);
353 cp.min_local_timeout = cpu_to_le16(0);
354 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
355 }
356
357 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
358 struct hci_cp_sniff_mode cp;
359 cp.handle = cpu_to_le16(conn->handle);
360 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
361 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
362 cp.attempt = cpu_to_le16(4);
363 cp.timeout = cpu_to_le16(1);
364 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
365 }
366 }
367
368 static void hci_conn_auto_accept(struct work_struct *work)
369 {
370 struct hci_conn *conn = container_of(work, struct hci_conn,
371 auto_accept_work.work);
372
373 hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
374 &conn->dst);
375 }
376
377 static void le_conn_timeout(struct work_struct *work)
378 {
379 struct hci_conn *conn = container_of(work, struct hci_conn,
380 le_conn_timeout.work);
381 struct hci_dev *hdev = conn->hdev;
382
383 BT_DBG("");
384
385 /* We could end up here due to having done directed advertising,
386 * so clean up the state if necessary. This should however only
387 * happen with broken hardware or if low duty cycle was used
388 * (which doesn't have a timeout of its own).
389 */
390 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
391 u8 enable = 0x00;
392 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
393 &enable);
394 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
395 return;
396 }
397
398 hci_le_create_connection_cancel(conn);
399 }
400
401 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
402 {
403 struct hci_conn *conn;
404
405 BT_DBG("%s dst %pMR", hdev->name, dst);
406
407 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
408 if (!conn)
409 return NULL;
410
411 bacpy(&conn->dst, dst);
412 bacpy(&conn->src, &hdev->bdaddr);
413 conn->hdev = hdev;
414 conn->type = type;
415 conn->mode = HCI_CM_ACTIVE;
416 conn->state = BT_OPEN;
417 conn->auth_type = HCI_AT_GENERAL_BONDING;
418 conn->io_capability = hdev->io_capability;
419 conn->remote_auth = 0xff;
420 conn->key_type = 0xff;
421 conn->tx_power = HCI_TX_POWER_INVALID;
422 conn->max_tx_power = HCI_TX_POWER_INVALID;
423
424 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
425 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
426
427 switch (type) {
428 case ACL_LINK:
429 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
430 break;
431 case LE_LINK:
432 /* conn->src should reflect the local identity address */
433 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
434 break;
435 case SCO_LINK:
436 if (lmp_esco_capable(hdev))
437 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
438 (hdev->esco_type & EDR_ESCO_MASK);
439 else
440 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
441 break;
442 case ESCO_LINK:
443 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
444 break;
445 }
446
447 skb_queue_head_init(&conn->data_q);
448
449 INIT_LIST_HEAD(&conn->chan_list);
450
451 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
452 INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
453 INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
454 INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
455
456 atomic_set(&conn->refcnt, 0);
457
458 hci_dev_hold(hdev);
459
460 hci_conn_hash_add(hdev, conn);
461 if (hdev->notify)
462 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
463
464 hci_conn_init_sysfs(conn);
465
466 return conn;
467 }
468
469 int hci_conn_del(struct hci_conn *conn)
470 {
471 struct hci_dev *hdev = conn->hdev;
472
473 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
474
475 cancel_delayed_work_sync(&conn->disc_work);
476 cancel_delayed_work_sync(&conn->auto_accept_work);
477 cancel_delayed_work_sync(&conn->idle_work);
478
479 if (conn->type == ACL_LINK) {
480 struct hci_conn *sco = conn->link;
481 if (sco)
482 sco->link = NULL;
483
484 /* Unacked frames */
485 hdev->acl_cnt += conn->sent;
486 } else if (conn->type == LE_LINK) {
487 cancel_delayed_work_sync(&conn->le_conn_timeout);
488
489 if (hdev->le_pkts)
490 hdev->le_cnt += conn->sent;
491 else
492 hdev->acl_cnt += conn->sent;
493 } else {
494 struct hci_conn *acl = conn->link;
495 if (acl) {
496 acl->link = NULL;
497 hci_conn_drop(acl);
498 }
499 }
500
501 hci_chan_list_flush(conn);
502
503 if (conn->amp_mgr)
504 amp_mgr_put(conn->amp_mgr);
505
506 hci_conn_hash_del(hdev, conn);
507 if (hdev->notify)
508 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
509
510 skb_queue_purge(&conn->data_q);
511
512 hci_conn_del_sysfs(conn);
513
514 hci_dev_put(hdev);
515
516 hci_conn_put(conn);
517
518 return 0;
519 }
520
521 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
522 {
523 int use_src = bacmp(src, BDADDR_ANY);
524 struct hci_dev *hdev = NULL, *d;
525
526 BT_DBG("%pMR -> %pMR", src, dst);
527
528 read_lock(&hci_dev_list_lock);
529
530 list_for_each_entry(d, &hci_dev_list, list) {
531 if (!test_bit(HCI_UP, &d->flags) ||
532 test_bit(HCI_RAW, &d->flags) ||
533 test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
534 d->dev_type != HCI_BREDR)
535 continue;
536
537 /* Simple routing:
538 * No source address - find interface with bdaddr != dst
539 * Source address - find interface with bdaddr == src
540 */
541
542 if (use_src) {
543 if (!bacmp(&d->bdaddr, src)) {
544 hdev = d; break;
545 }
546 } else {
547 if (bacmp(&d->bdaddr, dst)) {
548 hdev = d; break;
549 }
550 }
551 }
552
553 if (hdev)
554 hdev = hci_dev_hold(hdev);
555
556 read_unlock(&hci_dev_list_lock);
557 return hdev;
558 }
559 EXPORT_SYMBOL(hci_get_route);
560
561 /* This function requires the caller holds hdev->lock */
562 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
563 {
564 struct hci_dev *hdev = conn->hdev;
565
566 conn->state = BT_CLOSED;
567
568 mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
569 status);
570
571 hci_proto_connect_cfm(conn, status);
572
573 hci_conn_del(conn);
574
575 /* Since we may have temporarily stopped the background scanning in
576 * favor of connection establishment, we should restart it.
577 */
578 hci_update_background_scan(hdev);
579
580 /* Re-enable advertising in case this was a failed connection
581 * attempt as a peripheral.
582 */
583 mgmt_reenable_advertising(hdev);
584 }
585
586 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
587 {
588 struct hci_conn *conn;
589
590 if (status == 0)
591 return;
592
593 BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
594 status);
595
596 hci_dev_lock(hdev);
597
598 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
599 if (!conn)
600 goto done;
601
602 hci_le_conn_failed(conn, status);
603
604 done:
605 hci_dev_unlock(hdev);
606 }
607
608 static void hci_req_add_le_create_conn(struct hci_request *req,
609 struct hci_conn *conn)
610 {
611 struct hci_cp_le_create_conn cp;
612 struct hci_dev *hdev = conn->hdev;
613 u8 own_addr_type;
614
615 memset(&cp, 0, sizeof(cp));
616
617 /* Update random address, but set require_privacy to false so
618 * that we never connect with an unresolvable address.
619 */
620 if (hci_update_random_address(req, false, &own_addr_type))
621 return;
622
623 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
624 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
625 bacpy(&cp.peer_addr, &conn->dst);
626 cp.peer_addr_type = conn->dst_type;
627 cp.own_address_type = own_addr_type;
628 cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
629 cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
630 cp.supervision_timeout = cpu_to_le16(0x002a);
631 cp.min_ce_len = cpu_to_le16(0x0000);
632 cp.max_ce_len = cpu_to_le16(0x0000);
633
634 hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
635
636 conn->state = BT_CONNECT;
637 }
638
639 static void hci_req_directed_advertising(struct hci_request *req,
640 struct hci_conn *conn)
641 {
642 struct hci_dev *hdev = req->hdev;
643 struct hci_cp_le_set_adv_param cp;
644 u8 own_addr_type;
645 u8 enable;
646
647 enable = 0x00;
648 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
649
650 /* Clear the HCI_ADVERTISING bit temporarily so that the
651 * hci_update_random_address knows that it's safe to go ahead
652 * and write a new random address. The flag will be set back on
653 * as soon as the SET_ADV_ENABLE HCI command completes.
654 */
655 clear_bit(HCI_ADVERTISING, &hdev->dev_flags);
656
657 /* Set require_privacy to false so that the remote device has a
658 * chance of identifying us.
659 */
660 if (hci_update_random_address(req, false, &own_addr_type) < 0)
661 return;
662
663 memset(&cp, 0, sizeof(cp));
664 cp.type = LE_ADV_DIRECT_IND;
665 cp.own_address_type = own_addr_type;
666 cp.direct_addr_type = conn->dst_type;
667 bacpy(&cp.direct_addr, &conn->dst);
668 cp.channel_map = hdev->le_adv_channel_map;
669
670 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
671
672 enable = 0x01;
673 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
674
675 conn->state = BT_CONNECT;
676 }
677
678 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
679 u8 dst_type, u8 sec_level, u8 auth_type)
680 {
681 struct hci_conn_params *params;
682 struct hci_conn *conn;
683 struct smp_irk *irk;
684 struct hci_request req;
685 int err;
686
687 /* Some devices send ATT messages as soon as the physical link is
688 * established. To be able to handle these ATT messages, the user-
689 * space first establishes the connection and then starts the pairing
690 * process.
691 *
692 * So if a hci_conn object already exists for the following connection
693 * attempt, we simply update pending_sec_level and auth_type fields
694 * and return the object found.
695 */
696 conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
697 if (conn) {
698 conn->pending_sec_level = sec_level;
699 conn->auth_type = auth_type;
700 goto done;
701 }
702
703 /* Since the controller supports only one LE connection attempt at a
704 * time, we return -EBUSY if there is any connection attempt running.
705 */
706 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
707 if (conn)
708 return ERR_PTR(-EBUSY);
709
710 /* When given an identity address with existing identity
711 * resolving key, the connection needs to be established
712 * to a resolvable random address.
713 *
714 * This uses the cached random resolvable address from
715 * a previous scan. When no cached address is available,
716 * try connecting to the identity address instead.
717 *
718 * Storing the resolvable random address is required here
719 * to handle connection failures. The address will later
720 * be resolved back into the original identity address
721 * from the connect request.
722 */
723 irk = hci_find_irk_by_addr(hdev, dst, dst_type);
724 if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
725 dst = &irk->rpa;
726 dst_type = ADDR_LE_DEV_RANDOM;
727 }
728
729 conn = hci_conn_add(hdev, LE_LINK, dst);
730 if (!conn)
731 return ERR_PTR(-ENOMEM);
732
733 conn->dst_type = dst_type;
734 conn->sec_level = BT_SECURITY_LOW;
735 conn->pending_sec_level = sec_level;
736 conn->auth_type = auth_type;
737
738 hci_req_init(&req, hdev);
739
740 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) {
741 hci_req_directed_advertising(&req, conn);
742 goto create_conn;
743 }
744
745 conn->out = true;
746 conn->link_mode |= HCI_LM_MASTER;
747
748 params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
749 if (params) {
750 conn->le_conn_min_interval = params->conn_min_interval;
751 conn->le_conn_max_interval = params->conn_max_interval;
752 } else {
753 conn->le_conn_min_interval = hdev->le_conn_min_interval;
754 conn->le_conn_max_interval = hdev->le_conn_max_interval;
755 }
756
757 /* If controller is scanning, we stop it since some controllers are
758 * not able to scan and connect at the same time. Also set the
759 * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
760 * handler for scan disabling knows to set the correct discovery
761 * state.
762 */
763 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
764 hci_req_add_le_scan_disable(&req);
765 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
766 }
767
768 hci_req_add_le_create_conn(&req, conn);
769
770 create_conn:
771 err = hci_req_run(&req, create_le_conn_complete);
772 if (err) {
773 hci_conn_del(conn);
774 return ERR_PTR(err);
775 }
776
777 done:
778 hci_conn_hold(conn);
779 return conn;
780 }
781
782 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
783 u8 sec_level, u8 auth_type)
784 {
785 struct hci_conn *acl;
786
787 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
788 return ERR_PTR(-ENOTSUPP);
789
790 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
791 if (!acl) {
792 acl = hci_conn_add(hdev, ACL_LINK, dst);
793 if (!acl)
794 return ERR_PTR(-ENOMEM);
795 }
796
797 hci_conn_hold(acl);
798
799 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
800 acl->sec_level = BT_SECURITY_LOW;
801 acl->pending_sec_level = sec_level;
802 acl->auth_type = auth_type;
803 hci_acl_create_connection(acl);
804 }
805
806 return acl;
807 }
808
809 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
810 __u16 setting)
811 {
812 struct hci_conn *acl;
813 struct hci_conn *sco;
814
815 acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
816 if (IS_ERR(acl))
817 return acl;
818
819 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
820 if (!sco) {
821 sco = hci_conn_add(hdev, type, dst);
822 if (!sco) {
823 hci_conn_drop(acl);
824 return ERR_PTR(-ENOMEM);
825 }
826 }
827
828 acl->link = sco;
829 sco->link = acl;
830
831 hci_conn_hold(sco);
832
833 sco->setting = setting;
834
835 if (acl->state == BT_CONNECTED &&
836 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
837 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
838 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
839
840 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
841 /* defer SCO setup until mode change completed */
842 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
843 return sco;
844 }
845
846 hci_sco_setup(acl, 0x00);
847 }
848
849 return sco;
850 }
851
852 /* Check link security requirement */
853 int hci_conn_check_link_mode(struct hci_conn *conn)
854 {
855 BT_DBG("hcon %p", conn);
856
857 /* In Secure Connections Only mode, it is required that Secure
858 * Connections is used and the link is encrypted with AES-CCM
859 * using a P-256 authenticated combination key.
860 */
861 if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
862 if (!hci_conn_sc_enabled(conn) ||
863 !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
864 conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
865 return 0;
866 }
867
868 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
869 return 0;
870
871 return 1;
872 }
873
874 /* Authenticate remote device */
875 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
876 {
877 BT_DBG("hcon %p", conn);
878
879 if (conn->pending_sec_level > sec_level)
880 sec_level = conn->pending_sec_level;
881
882 if (sec_level > conn->sec_level)
883 conn->pending_sec_level = sec_level;
884 else if (conn->link_mode & HCI_LM_AUTH)
885 return 1;
886
887 /* Make sure we preserve an existing MITM requirement*/
888 auth_type |= (conn->auth_type & 0x01);
889
890 conn->auth_type = auth_type;
891
892 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
893 struct hci_cp_auth_requested cp;
894
895 cp.handle = cpu_to_le16(conn->handle);
896 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
897 sizeof(cp), &cp);
898
899 /* If we're already encrypted set the REAUTH_PEND flag,
900 * otherwise set the ENCRYPT_PEND.
901 */
902 if (conn->link_mode & HCI_LM_ENCRYPT)
903 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
904 else
905 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
906 }
907
908 return 0;
909 }
910
911 /* Encrypt the the link */
912 static void hci_conn_encrypt(struct hci_conn *conn)
913 {
914 BT_DBG("hcon %p", conn);
915
916 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
917 struct hci_cp_set_conn_encrypt cp;
918 cp.handle = cpu_to_le16(conn->handle);
919 cp.encrypt = 0x01;
920 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
921 &cp);
922 }
923 }
924
925 /* Enable security */
926 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
927 {
928 BT_DBG("hcon %p", conn);
929
930 if (conn->type == LE_LINK)
931 return smp_conn_security(conn, sec_level);
932
933 /* For sdp we don't need the link key. */
934 if (sec_level == BT_SECURITY_SDP)
935 return 1;
936
937 /* For non 2.1 devices and low security level we don't need the link
938 key. */
939 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
940 return 1;
941
942 /* For other security levels we need the link key. */
943 if (!(conn->link_mode & HCI_LM_AUTH))
944 goto auth;
945
946 /* An authenticated FIPS approved combination key has sufficient
947 * security for security level 4. */
948 if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
949 sec_level == BT_SECURITY_FIPS)
950 goto encrypt;
951
952 /* An authenticated combination key has sufficient security for
953 security level 3. */
954 if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
955 conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
956 sec_level == BT_SECURITY_HIGH)
957 goto encrypt;
958
959 /* An unauthenticated combination key has sufficient security for
960 security level 1 and 2. */
961 if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
962 conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
963 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
964 goto encrypt;
965
966 /* A combination key has always sufficient security for the security
967 levels 1 or 2. High security level requires the combination key
968 is generated using maximum PIN code length (16).
969 For pre 2.1 units. */
970 if (conn->key_type == HCI_LK_COMBINATION &&
971 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
972 conn->pin_length == 16))
973 goto encrypt;
974
975 auth:
976 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
977 return 0;
978
979 if (!hci_conn_auth(conn, sec_level, auth_type))
980 return 0;
981
982 encrypt:
983 if (conn->link_mode & HCI_LM_ENCRYPT)
984 return 1;
985
986 hci_conn_encrypt(conn);
987 return 0;
988 }
989 EXPORT_SYMBOL(hci_conn_security);
990
991 /* Check secure link requirement */
992 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
993 {
994 BT_DBG("hcon %p", conn);
995
996 /* Accept if non-secure or higher security level is required */
997 if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
998 return 1;
999
1000 /* Accept if secure or higher security level is already present */
1001 if (conn->sec_level == BT_SECURITY_HIGH ||
1002 conn->sec_level == BT_SECURITY_FIPS)
1003 return 1;
1004
1005 /* Reject not secure link */
1006 return 0;
1007 }
1008 EXPORT_SYMBOL(hci_conn_check_secure);
1009
1010 /* Change link key */
1011 int hci_conn_change_link_key(struct hci_conn *conn)
1012 {
1013 BT_DBG("hcon %p", conn);
1014
1015 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1016 struct hci_cp_change_conn_link_key cp;
1017 cp.handle = cpu_to_le16(conn->handle);
1018 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1019 sizeof(cp), &cp);
1020 }
1021
1022 return 0;
1023 }
1024
1025 /* Switch role */
1026 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1027 {
1028 BT_DBG("hcon %p", conn);
1029
1030 if (!role && conn->link_mode & HCI_LM_MASTER)
1031 return 1;
1032
1033 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1034 struct hci_cp_switch_role cp;
1035 bacpy(&cp.bdaddr, &conn->dst);
1036 cp.role = role;
1037 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1038 }
1039
1040 return 0;
1041 }
1042 EXPORT_SYMBOL(hci_conn_switch_role);
1043
1044 /* Enter active mode */
1045 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1046 {
1047 struct hci_dev *hdev = conn->hdev;
1048
1049 BT_DBG("hcon %p mode %d", conn, conn->mode);
1050
1051 if (test_bit(HCI_RAW, &hdev->flags))
1052 return;
1053
1054 if (conn->mode != HCI_CM_SNIFF)
1055 goto timer;
1056
1057 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1058 goto timer;
1059
1060 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1061 struct hci_cp_exit_sniff_mode cp;
1062 cp.handle = cpu_to_le16(conn->handle);
1063 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1064 }
1065
1066 timer:
1067 if (hdev->idle_timeout > 0)
1068 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1069 msecs_to_jiffies(hdev->idle_timeout));
1070 }
1071
1072 /* Drop all connection on the device */
1073 void hci_conn_hash_flush(struct hci_dev *hdev)
1074 {
1075 struct hci_conn_hash *h = &hdev->conn_hash;
1076 struct hci_conn *c, *n;
1077
1078 BT_DBG("hdev %s", hdev->name);
1079
1080 list_for_each_entry_safe(c, n, &h->list, list) {
1081 c->state = BT_CLOSED;
1082
1083 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1084 hci_conn_del(c);
1085 }
1086 }
1087
1088 /* Check pending connect attempts */
1089 void hci_conn_check_pending(struct hci_dev *hdev)
1090 {
1091 struct hci_conn *conn;
1092
1093 BT_DBG("hdev %s", hdev->name);
1094
1095 hci_dev_lock(hdev);
1096
1097 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1098 if (conn)
1099 hci_acl_create_connection(conn);
1100
1101 hci_dev_unlock(hdev);
1102 }
1103
1104 int hci_get_conn_list(void __user *arg)
1105 {
1106 struct hci_conn *c;
1107 struct hci_conn_list_req req, *cl;
1108 struct hci_conn_info *ci;
1109 struct hci_dev *hdev;
1110 int n = 0, size, err;
1111
1112 if (copy_from_user(&req, arg, sizeof(req)))
1113 return -EFAULT;
1114
1115 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1116 return -EINVAL;
1117
1118 size = sizeof(req) + req.conn_num * sizeof(*ci);
1119
1120 cl = kmalloc(size, GFP_KERNEL);
1121 if (!cl)
1122 return -ENOMEM;
1123
1124 hdev = hci_dev_get(req.dev_id);
1125 if (!hdev) {
1126 kfree(cl);
1127 return -ENODEV;
1128 }
1129
1130 ci = cl->conn_info;
1131
1132 hci_dev_lock(hdev);
1133 list_for_each_entry(c, &hdev->conn_hash.list, list) {
1134 bacpy(&(ci + n)->bdaddr, &c->dst);
1135 (ci + n)->handle = c->handle;
1136 (ci + n)->type = c->type;
1137 (ci + n)->out = c->out;
1138 (ci + n)->state = c->state;
1139 (ci + n)->link_mode = c->link_mode;
1140 if (++n >= req.conn_num)
1141 break;
1142 }
1143 hci_dev_unlock(hdev);
1144
1145 cl->dev_id = hdev->id;
1146 cl->conn_num = n;
1147 size = sizeof(req) + n * sizeof(*ci);
1148
1149 hci_dev_put(hdev);
1150
1151 err = copy_to_user(arg, cl, size);
1152 kfree(cl);
1153
1154 return err ? -EFAULT : 0;
1155 }
1156
1157 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1158 {
1159 struct hci_conn_info_req req;
1160 struct hci_conn_info ci;
1161 struct hci_conn *conn;
1162 char __user *ptr = arg + sizeof(req);
1163
1164 if (copy_from_user(&req, arg, sizeof(req)))
1165 return -EFAULT;
1166
1167 hci_dev_lock(hdev);
1168 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1169 if (conn) {
1170 bacpy(&ci.bdaddr, &conn->dst);
1171 ci.handle = conn->handle;
1172 ci.type = conn->type;
1173 ci.out = conn->out;
1174 ci.state = conn->state;
1175 ci.link_mode = conn->link_mode;
1176 }
1177 hci_dev_unlock(hdev);
1178
1179 if (!conn)
1180 return -ENOENT;
1181
1182 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1183 }
1184
1185 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1186 {
1187 struct hci_auth_info_req req;
1188 struct hci_conn *conn;
1189
1190 if (copy_from_user(&req, arg, sizeof(req)))
1191 return -EFAULT;
1192
1193 hci_dev_lock(hdev);
1194 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1195 if (conn)
1196 req.type = conn->auth_type;
1197 hci_dev_unlock(hdev);
1198
1199 if (!conn)
1200 return -ENOENT;
1201
1202 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1203 }
1204
1205 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1206 {
1207 struct hci_dev *hdev = conn->hdev;
1208 struct hci_chan *chan;
1209
1210 BT_DBG("%s hcon %p", hdev->name, conn);
1211
1212 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
1213 if (!chan)
1214 return NULL;
1215
1216 chan->conn = conn;
1217 skb_queue_head_init(&chan->data_q);
1218 chan->state = BT_CONNECTED;
1219
1220 list_add_rcu(&chan->list, &conn->chan_list);
1221
1222 return chan;
1223 }
1224
1225 void hci_chan_del(struct hci_chan *chan)
1226 {
1227 struct hci_conn *conn = chan->conn;
1228 struct hci_dev *hdev = conn->hdev;
1229
1230 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1231
1232 list_del_rcu(&chan->list);
1233
1234 synchronize_rcu();
1235
1236 hci_conn_drop(conn);
1237
1238 skb_queue_purge(&chan->data_q);
1239 kfree(chan);
1240 }
1241
1242 void hci_chan_list_flush(struct hci_conn *conn)
1243 {
1244 struct hci_chan *chan, *n;
1245
1246 BT_DBG("hcon %p", conn);
1247
1248 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1249 hci_chan_del(chan);
1250 }
1251
1252 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1253 __u16 handle)
1254 {
1255 struct hci_chan *hchan;
1256
1257 list_for_each_entry(hchan, &hcon->chan_list, list) {
1258 if (hchan->handle == handle)
1259 return hchan;
1260 }
1261
1262 return NULL;
1263 }
1264
1265 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1266 {
1267 struct hci_conn_hash *h = &hdev->conn_hash;
1268 struct hci_conn *hcon;
1269 struct hci_chan *hchan = NULL;
1270
1271 rcu_read_lock();
1272
1273 list_for_each_entry_rcu(hcon, &h->list, list) {
1274 hchan = __hci_chan_lookup_handle(hcon, handle);
1275 if (hchan)
1276 break;
1277 }
1278
1279 rcu_read_unlock();
1280
1281 return hchan;
1282 }
This page took 0.057936 seconds and 5 git commands to generate.