Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
[deliverable/linux.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/a2mp.h>
32 #include <net/bluetooth/smp.h>
33
34 static void hci_le_create_connection(struct hci_conn *conn)
35 {
36 struct hci_dev *hdev = conn->hdev;
37 struct hci_cp_le_create_conn cp;
38
39 conn->state = BT_CONNECT;
40 conn->out = true;
41 conn->link_mode |= HCI_LM_MASTER;
42 conn->sec_level = BT_SECURITY_LOW;
43
44 memset(&cp, 0, sizeof(cp));
45 cp.scan_interval = __constant_cpu_to_le16(0x0060);
46 cp.scan_window = __constant_cpu_to_le16(0x0030);
47 bacpy(&cp.peer_addr, &conn->dst);
48 cp.peer_addr_type = conn->dst_type;
49 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
50 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
51 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
52 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
53 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
54
55 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
56 }
57
58 static void hci_le_create_connection_cancel(struct hci_conn *conn)
59 {
60 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
61 }
62
63 static void hci_acl_create_connection(struct hci_conn *conn)
64 {
65 struct hci_dev *hdev = conn->hdev;
66 struct inquiry_entry *ie;
67 struct hci_cp_create_conn cp;
68
69 BT_DBG("hcon %p", conn);
70
71 conn->state = BT_CONNECT;
72 conn->out = true;
73
74 conn->link_mode = HCI_LM_MASTER;
75
76 conn->attempt++;
77
78 conn->link_policy = hdev->link_policy;
79
80 memset(&cp, 0, sizeof(cp));
81 bacpy(&cp.bdaddr, &conn->dst);
82 cp.pscan_rep_mode = 0x02;
83
84 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
85 if (ie) {
86 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
87 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
88 cp.pscan_mode = ie->data.pscan_mode;
89 cp.clock_offset = ie->data.clock_offset |
90 __constant_cpu_to_le16(0x8000);
91 }
92
93 memcpy(conn->dev_class, ie->data.dev_class, 3);
94 if (ie->data.ssp_mode > 0)
95 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
96 }
97
98 cp.pkt_type = cpu_to_le16(conn->pkt_type);
99 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
100 cp.role_switch = 0x01;
101 else
102 cp.role_switch = 0x00;
103
104 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
105 }
106
107 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
108 {
109 struct hci_cp_create_conn_cancel cp;
110
111 BT_DBG("hcon %p", conn);
112
113 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
114 return;
115
116 bacpy(&cp.bdaddr, &conn->dst);
117 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
118 }
119
120 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
121 {
122 struct hci_cp_disconnect cp;
123
124 BT_DBG("hcon %p", conn);
125
126 conn->state = BT_DISCONN;
127
128 cp.handle = cpu_to_le16(conn->handle);
129 cp.reason = reason;
130 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
131 }
132
133 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
134 {
135 struct hci_dev *hdev = conn->hdev;
136 struct hci_cp_add_sco cp;
137
138 BT_DBG("hcon %p", conn);
139
140 conn->state = BT_CONNECT;
141 conn->out = true;
142
143 conn->attempt++;
144
145 cp.handle = cpu_to_le16(handle);
146 cp.pkt_type = cpu_to_le16(conn->pkt_type);
147
148 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
149 }
150
151 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
152 {
153 struct hci_dev *hdev = conn->hdev;
154 struct hci_cp_setup_sync_conn cp;
155
156 BT_DBG("hcon %p", conn);
157
158 conn->state = BT_CONNECT;
159 conn->out = true;
160
161 conn->attempt++;
162
163 cp.handle = cpu_to_le16(handle);
164 cp.pkt_type = cpu_to_le16(conn->pkt_type);
165
166 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
167 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
168 cp.max_latency = __constant_cpu_to_le16(0xffff);
169 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
170 cp.retrans_effort = 0xff;
171
172 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
173 }
174
175 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
176 u16 latency, u16 to_multiplier)
177 {
178 struct hci_cp_le_conn_update cp;
179 struct hci_dev *hdev = conn->hdev;
180
181 memset(&cp, 0, sizeof(cp));
182
183 cp.handle = cpu_to_le16(conn->handle);
184 cp.conn_interval_min = cpu_to_le16(min);
185 cp.conn_interval_max = cpu_to_le16(max);
186 cp.conn_latency = cpu_to_le16(latency);
187 cp.supervision_timeout = cpu_to_le16(to_multiplier);
188 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
189 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
190
191 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
192 }
193
194 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
195 __u8 ltk[16])
196 {
197 struct hci_dev *hdev = conn->hdev;
198 struct hci_cp_le_start_enc cp;
199
200 BT_DBG("hcon %p", conn);
201
202 memset(&cp, 0, sizeof(cp));
203
204 cp.handle = cpu_to_le16(conn->handle);
205 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
206 cp.ediv = ediv;
207 memcpy(cp.rand, rand, sizeof(cp.rand));
208
209 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
210 }
211
212 /* Device _must_ be locked */
213 void hci_sco_setup(struct hci_conn *conn, __u8 status)
214 {
215 struct hci_conn *sco = conn->link;
216
217 if (!sco)
218 return;
219
220 BT_DBG("hcon %p", conn);
221
222 if (!status) {
223 if (lmp_esco_capable(conn->hdev))
224 hci_setup_sync(sco, conn->handle);
225 else
226 hci_add_sco(sco, conn->handle);
227 } else {
228 hci_proto_connect_cfm(sco, status);
229 hci_conn_del(sco);
230 }
231 }
232
233 static void hci_conn_timeout(struct work_struct *work)
234 {
235 struct hci_conn *conn = container_of(work, struct hci_conn,
236 disc_work.work);
237 __u8 reason;
238
239 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
240
241 if (atomic_read(&conn->refcnt))
242 return;
243
244 switch (conn->state) {
245 case BT_CONNECT:
246 case BT_CONNECT2:
247 if (conn->out) {
248 if (conn->type == ACL_LINK)
249 hci_acl_create_connection_cancel(conn);
250 else if (conn->type == LE_LINK)
251 hci_le_create_connection_cancel(conn);
252 }
253 break;
254 case BT_CONFIG:
255 case BT_CONNECTED:
256 reason = hci_proto_disconn_ind(conn);
257 hci_acl_disconn(conn, reason);
258 break;
259 default:
260 conn->state = BT_CLOSED;
261 break;
262 }
263 }
264
265 /* Enter sniff mode */
266 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
267 {
268 struct hci_dev *hdev = conn->hdev;
269
270 BT_DBG("hcon %p mode %d", conn, conn->mode);
271
272 if (test_bit(HCI_RAW, &hdev->flags))
273 return;
274
275 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
276 return;
277
278 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
279 return;
280
281 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
282 struct hci_cp_sniff_subrate cp;
283 cp.handle = cpu_to_le16(conn->handle);
284 cp.max_latency = __constant_cpu_to_le16(0);
285 cp.min_remote_timeout = __constant_cpu_to_le16(0);
286 cp.min_local_timeout = __constant_cpu_to_le16(0);
287 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
288 }
289
290 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
291 struct hci_cp_sniff_mode cp;
292 cp.handle = cpu_to_le16(conn->handle);
293 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
294 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
295 cp.attempt = __constant_cpu_to_le16(4);
296 cp.timeout = __constant_cpu_to_le16(1);
297 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
298 }
299 }
300
301 static void hci_conn_idle(unsigned long arg)
302 {
303 struct hci_conn *conn = (void *) arg;
304
305 BT_DBG("hcon %p mode %d", conn, conn->mode);
306
307 hci_conn_enter_sniff_mode(conn);
308 }
309
310 static void hci_conn_auto_accept(unsigned long arg)
311 {
312 struct hci_conn *conn = (void *) arg;
313 struct hci_dev *hdev = conn->hdev;
314
315 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
316 &conn->dst);
317 }
318
319 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
320 {
321 struct hci_conn *conn;
322
323 BT_DBG("%s dst %s", hdev->name, batostr(dst));
324
325 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
326 if (!conn)
327 return NULL;
328
329 bacpy(&conn->dst, dst);
330 conn->hdev = hdev;
331 conn->type = type;
332 conn->mode = HCI_CM_ACTIVE;
333 conn->state = BT_OPEN;
334 conn->auth_type = HCI_AT_GENERAL_BONDING;
335 conn->io_capability = hdev->io_capability;
336 conn->remote_auth = 0xff;
337 conn->key_type = 0xff;
338
339 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
340 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
341
342 switch (type) {
343 case ACL_LINK:
344 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
345 break;
346 case SCO_LINK:
347 if (lmp_esco_capable(hdev))
348 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
349 (hdev->esco_type & EDR_ESCO_MASK);
350 else
351 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
352 break;
353 case ESCO_LINK:
354 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
355 break;
356 }
357
358 skb_queue_head_init(&conn->data_q);
359
360 INIT_LIST_HEAD(&conn->chan_list);
361
362 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
363 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
364 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
365 (unsigned long) conn);
366
367 atomic_set(&conn->refcnt, 0);
368
369 hci_dev_hold(hdev);
370
371 hci_conn_hash_add(hdev, conn);
372 if (hdev->notify)
373 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
374
375 atomic_set(&conn->devref, 0);
376
377 hci_conn_init_sysfs(conn);
378
379 return conn;
380 }
381
382 int hci_conn_del(struct hci_conn *conn)
383 {
384 struct hci_dev *hdev = conn->hdev;
385
386 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
387
388 del_timer(&conn->idle_timer);
389
390 cancel_delayed_work_sync(&conn->disc_work);
391
392 del_timer(&conn->auto_accept_timer);
393
394 if (conn->type == ACL_LINK) {
395 struct hci_conn *sco = conn->link;
396 if (sco)
397 sco->link = NULL;
398
399 /* Unacked frames */
400 hdev->acl_cnt += conn->sent;
401 } else if (conn->type == LE_LINK) {
402 if (hdev->le_pkts)
403 hdev->le_cnt += conn->sent;
404 else
405 hdev->acl_cnt += conn->sent;
406 } else {
407 struct hci_conn *acl = conn->link;
408 if (acl) {
409 acl->link = NULL;
410 hci_conn_put(acl);
411 }
412 }
413
414 hci_chan_list_flush(conn);
415
416 if (conn->amp_mgr)
417 amp_mgr_put(conn->amp_mgr);
418
419 hci_conn_hash_del(hdev, conn);
420 if (hdev->notify)
421 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
422
423 skb_queue_purge(&conn->data_q);
424
425 hci_conn_put_device(conn);
426
427 hci_dev_put(hdev);
428
429 if (conn->handle == 0)
430 kfree(conn);
431
432 return 0;
433 }
434
435 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
436 {
437 int use_src = bacmp(src, BDADDR_ANY);
438 struct hci_dev *hdev = NULL, *d;
439
440 BT_DBG("%s -> %s", batostr(src), batostr(dst));
441
442 read_lock(&hci_dev_list_lock);
443
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (!test_bit(HCI_UP, &d->flags) ||
446 test_bit(HCI_RAW, &d->flags) ||
447 d->dev_type != HCI_BREDR)
448 continue;
449
450 /* Simple routing:
451 * No source address - find interface with bdaddr != dst
452 * Source address - find interface with bdaddr == src
453 */
454
455 if (use_src) {
456 if (!bacmp(&d->bdaddr, src)) {
457 hdev = d; break;
458 }
459 } else {
460 if (bacmp(&d->bdaddr, dst)) {
461 hdev = d; break;
462 }
463 }
464 }
465
466 if (hdev)
467 hdev = hci_dev_hold(hdev);
468
469 read_unlock(&hci_dev_list_lock);
470 return hdev;
471 }
472 EXPORT_SYMBOL(hci_get_route);
473
474 static struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
475 u8 dst_type, u8 sec_level, u8 auth_type)
476 {
477 struct hci_conn *le;
478
479 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
480 if (!le) {
481 le = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
482 if (le)
483 return ERR_PTR(-EBUSY);
484
485 le = hci_conn_add(hdev, LE_LINK, dst);
486 if (!le)
487 return ERR_PTR(-ENOMEM);
488
489 le->dst_type = bdaddr_to_le(dst_type);
490 hci_le_create_connection(le);
491 }
492
493 le->pending_sec_level = sec_level;
494 le->auth_type = auth_type;
495
496 hci_conn_hold(le);
497
498 return le;
499 }
500
501 static struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
502 u8 sec_level, u8 auth_type)
503 {
504 struct hci_conn *acl;
505
506 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
507 if (!acl) {
508 acl = hci_conn_add(hdev, ACL_LINK, dst);
509 if (!acl)
510 return ERR_PTR(-ENOMEM);
511 }
512
513 hci_conn_hold(acl);
514
515 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
516 acl->sec_level = BT_SECURITY_LOW;
517 acl->pending_sec_level = sec_level;
518 acl->auth_type = auth_type;
519 hci_acl_create_connection(acl);
520 }
521
522 return acl;
523 }
524
525 static struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type,
526 bdaddr_t *dst, u8 sec_level, u8 auth_type)
527 {
528 struct hci_conn *acl;
529 struct hci_conn *sco;
530
531 acl = hci_connect_acl(hdev, dst, sec_level, auth_type);
532 if (IS_ERR(acl))
533 return acl;
534
535 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
536 if (!sco) {
537 sco = hci_conn_add(hdev, type, dst);
538 if (!sco) {
539 hci_conn_put(acl);
540 return ERR_PTR(-ENOMEM);
541 }
542 }
543
544 acl->link = sco;
545 sco->link = acl;
546
547 hci_conn_hold(sco);
548
549 if (acl->state == BT_CONNECTED &&
550 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
551 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
552 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
553
554 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
555 /* defer SCO setup until mode change completed */
556 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
557 return sco;
558 }
559
560 hci_sco_setup(acl, 0x00);
561 }
562
563 return sco;
564 }
565
566 /* Create SCO, ACL or LE connection. */
567 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
568 __u8 dst_type, __u8 sec_level, __u8 auth_type)
569 {
570 BT_DBG("%s dst %s type 0x%x", hdev->name, batostr(dst), type);
571
572 switch (type) {
573 case LE_LINK:
574 return hci_connect_le(hdev, dst, dst_type, sec_level, auth_type);
575 case ACL_LINK:
576 return hci_connect_acl(hdev, dst, sec_level, auth_type);
577 case SCO_LINK:
578 case ESCO_LINK:
579 return hci_connect_sco(hdev, type, dst, sec_level, auth_type);
580 }
581
582 return ERR_PTR(-EINVAL);
583 }
584
585 /* Check link security requirement */
586 int hci_conn_check_link_mode(struct hci_conn *conn)
587 {
588 BT_DBG("hcon %p", conn);
589
590 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
591 return 0;
592
593 return 1;
594 }
595
596 /* Authenticate remote device */
597 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
598 {
599 BT_DBG("hcon %p", conn);
600
601 if (conn->pending_sec_level > sec_level)
602 sec_level = conn->pending_sec_level;
603
604 if (sec_level > conn->sec_level)
605 conn->pending_sec_level = sec_level;
606 else if (conn->link_mode & HCI_LM_AUTH)
607 return 1;
608
609 /* Make sure we preserve an existing MITM requirement*/
610 auth_type |= (conn->auth_type & 0x01);
611
612 conn->auth_type = auth_type;
613
614 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
615 struct hci_cp_auth_requested cp;
616
617 /* encrypt must be pending if auth is also pending */
618 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
619
620 cp.handle = cpu_to_le16(conn->handle);
621 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
622 sizeof(cp), &cp);
623 if (conn->key_type != 0xff)
624 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
625 }
626
627 return 0;
628 }
629
630 /* Encrypt the the link */
631 static void hci_conn_encrypt(struct hci_conn *conn)
632 {
633 BT_DBG("hcon %p", conn);
634
635 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
636 struct hci_cp_set_conn_encrypt cp;
637 cp.handle = cpu_to_le16(conn->handle);
638 cp.encrypt = 0x01;
639 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
640 &cp);
641 }
642 }
643
644 /* Enable security */
645 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
646 {
647 BT_DBG("hcon %p", conn);
648
649 if (conn->type == LE_LINK)
650 return smp_conn_security(conn, sec_level);
651
652 /* For sdp we don't need the link key. */
653 if (sec_level == BT_SECURITY_SDP)
654 return 1;
655
656 /* For non 2.1 devices and low security level we don't need the link
657 key. */
658 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
659 return 1;
660
661 /* For other security levels we need the link key. */
662 if (!(conn->link_mode & HCI_LM_AUTH))
663 goto auth;
664
665 /* An authenticated combination key has sufficient security for any
666 security level. */
667 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
668 goto encrypt;
669
670 /* An unauthenticated combination key has sufficient security for
671 security level 1 and 2. */
672 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
673 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
674 goto encrypt;
675
676 /* A combination key has always sufficient security for the security
677 levels 1 or 2. High security level requires the combination key
678 is generated using maximum PIN code length (16).
679 For pre 2.1 units. */
680 if (conn->key_type == HCI_LK_COMBINATION &&
681 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
682 goto encrypt;
683
684 auth:
685 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
686 return 0;
687
688 if (!hci_conn_auth(conn, sec_level, auth_type))
689 return 0;
690
691 encrypt:
692 if (conn->link_mode & HCI_LM_ENCRYPT)
693 return 1;
694
695 hci_conn_encrypt(conn);
696 return 0;
697 }
698 EXPORT_SYMBOL(hci_conn_security);
699
700 /* Check secure link requirement */
701 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
702 {
703 BT_DBG("hcon %p", conn);
704
705 if (sec_level != BT_SECURITY_HIGH)
706 return 1; /* Accept if non-secure is required */
707
708 if (conn->sec_level == BT_SECURITY_HIGH)
709 return 1;
710
711 return 0; /* Reject not secure link */
712 }
713 EXPORT_SYMBOL(hci_conn_check_secure);
714
715 /* Change link key */
716 int hci_conn_change_link_key(struct hci_conn *conn)
717 {
718 BT_DBG("hcon %p", conn);
719
720 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
721 struct hci_cp_change_conn_link_key cp;
722 cp.handle = cpu_to_le16(conn->handle);
723 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
724 sizeof(cp), &cp);
725 }
726
727 return 0;
728 }
729
730 /* Switch role */
731 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
732 {
733 BT_DBG("hcon %p", conn);
734
735 if (!role && conn->link_mode & HCI_LM_MASTER)
736 return 1;
737
738 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
739 struct hci_cp_switch_role cp;
740 bacpy(&cp.bdaddr, &conn->dst);
741 cp.role = role;
742 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
743 }
744
745 return 0;
746 }
747 EXPORT_SYMBOL(hci_conn_switch_role);
748
749 /* Enter active mode */
750 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
751 {
752 struct hci_dev *hdev = conn->hdev;
753
754 BT_DBG("hcon %p mode %d", conn, conn->mode);
755
756 if (test_bit(HCI_RAW, &hdev->flags))
757 return;
758
759 if (conn->mode != HCI_CM_SNIFF)
760 goto timer;
761
762 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
763 goto timer;
764
765 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
766 struct hci_cp_exit_sniff_mode cp;
767 cp.handle = cpu_to_le16(conn->handle);
768 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
769 }
770
771 timer:
772 if (hdev->idle_timeout > 0)
773 mod_timer(&conn->idle_timer,
774 jiffies + msecs_to_jiffies(hdev->idle_timeout));
775 }
776
777 /* Drop all connection on the device */
778 void hci_conn_hash_flush(struct hci_dev *hdev)
779 {
780 struct hci_conn_hash *h = &hdev->conn_hash;
781 struct hci_conn *c, *n;
782
783 BT_DBG("hdev %s", hdev->name);
784
785 list_for_each_entry_safe(c, n, &h->list, list) {
786 c->state = BT_CLOSED;
787
788 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
789 hci_conn_del(c);
790 }
791 }
792
793 /* Check pending connect attempts */
794 void hci_conn_check_pending(struct hci_dev *hdev)
795 {
796 struct hci_conn *conn;
797
798 BT_DBG("hdev %s", hdev->name);
799
800 hci_dev_lock(hdev);
801
802 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
803 if (conn)
804 hci_acl_create_connection(conn);
805
806 hci_dev_unlock(hdev);
807 }
808
809 void hci_conn_hold_device(struct hci_conn *conn)
810 {
811 atomic_inc(&conn->devref);
812 }
813 EXPORT_SYMBOL(hci_conn_hold_device);
814
815 void hci_conn_put_device(struct hci_conn *conn)
816 {
817 if (atomic_dec_and_test(&conn->devref))
818 hci_conn_del_sysfs(conn);
819 }
820 EXPORT_SYMBOL(hci_conn_put_device);
821
822 int hci_get_conn_list(void __user *arg)
823 {
824 struct hci_conn *c;
825 struct hci_conn_list_req req, *cl;
826 struct hci_conn_info *ci;
827 struct hci_dev *hdev;
828 int n = 0, size, err;
829
830 if (copy_from_user(&req, arg, sizeof(req)))
831 return -EFAULT;
832
833 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
834 return -EINVAL;
835
836 size = sizeof(req) + req.conn_num * sizeof(*ci);
837
838 cl = kmalloc(size, GFP_KERNEL);
839 if (!cl)
840 return -ENOMEM;
841
842 hdev = hci_dev_get(req.dev_id);
843 if (!hdev) {
844 kfree(cl);
845 return -ENODEV;
846 }
847
848 ci = cl->conn_info;
849
850 hci_dev_lock(hdev);
851 list_for_each_entry(c, &hdev->conn_hash.list, list) {
852 bacpy(&(ci + n)->bdaddr, &c->dst);
853 (ci + n)->handle = c->handle;
854 (ci + n)->type = c->type;
855 (ci + n)->out = c->out;
856 (ci + n)->state = c->state;
857 (ci + n)->link_mode = c->link_mode;
858 if (++n >= req.conn_num)
859 break;
860 }
861 hci_dev_unlock(hdev);
862
863 cl->dev_id = hdev->id;
864 cl->conn_num = n;
865 size = sizeof(req) + n * sizeof(*ci);
866
867 hci_dev_put(hdev);
868
869 err = copy_to_user(arg, cl, size);
870 kfree(cl);
871
872 return err ? -EFAULT : 0;
873 }
874
875 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
876 {
877 struct hci_conn_info_req req;
878 struct hci_conn_info ci;
879 struct hci_conn *conn;
880 char __user *ptr = arg + sizeof(req);
881
882 if (copy_from_user(&req, arg, sizeof(req)))
883 return -EFAULT;
884
885 hci_dev_lock(hdev);
886 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
887 if (conn) {
888 bacpy(&ci.bdaddr, &conn->dst);
889 ci.handle = conn->handle;
890 ci.type = conn->type;
891 ci.out = conn->out;
892 ci.state = conn->state;
893 ci.link_mode = conn->link_mode;
894 }
895 hci_dev_unlock(hdev);
896
897 if (!conn)
898 return -ENOENT;
899
900 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
901 }
902
903 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
904 {
905 struct hci_auth_info_req req;
906 struct hci_conn *conn;
907
908 if (copy_from_user(&req, arg, sizeof(req)))
909 return -EFAULT;
910
911 hci_dev_lock(hdev);
912 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
913 if (conn)
914 req.type = conn->auth_type;
915 hci_dev_unlock(hdev);
916
917 if (!conn)
918 return -ENOENT;
919
920 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
921 }
922
923 struct hci_chan *hci_chan_create(struct hci_conn *conn)
924 {
925 struct hci_dev *hdev = conn->hdev;
926 struct hci_chan *chan;
927
928 BT_DBG("%s hcon %p", hdev->name, conn);
929
930 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
931 if (!chan)
932 return NULL;
933
934 chan->conn = conn;
935 skb_queue_head_init(&chan->data_q);
936
937 list_add_rcu(&chan->list, &conn->chan_list);
938
939 return chan;
940 }
941
942 void hci_chan_del(struct hci_chan *chan)
943 {
944 struct hci_conn *conn = chan->conn;
945 struct hci_dev *hdev = conn->hdev;
946
947 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
948
949 list_del_rcu(&chan->list);
950
951 synchronize_rcu();
952
953 skb_queue_purge(&chan->data_q);
954 kfree(chan);
955 }
956
957 void hci_chan_list_flush(struct hci_conn *conn)
958 {
959 struct hci_chan *chan, *n;
960
961 BT_DBG("hcon %p", conn);
962
963 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
964 hci_chan_del(chan);
965 }
This page took 0.051906 seconds and 5 git commands to generate.