batman-adv: make batadv_test_bit() return 0 or 1 only
[deliverable/linux.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/a2mp.h>
32 #include <net/bluetooth/smp.h>
33
34 static void hci_le_connect(struct hci_conn *conn)
35 {
36 struct hci_dev *hdev = conn->hdev;
37 struct hci_cp_le_create_conn cp;
38
39 conn->state = BT_CONNECT;
40 conn->out = true;
41 conn->link_mode |= HCI_LM_MASTER;
42 conn->sec_level = BT_SECURITY_LOW;
43
44 memset(&cp, 0, sizeof(cp));
45 cp.scan_interval = __constant_cpu_to_le16(0x0060);
46 cp.scan_window = __constant_cpu_to_le16(0x0030);
47 bacpy(&cp.peer_addr, &conn->dst);
48 cp.peer_addr_type = conn->dst_type;
49 cp.conn_interval_min = __constant_cpu_to_le16(0x0028);
50 cp.conn_interval_max = __constant_cpu_to_le16(0x0038);
51 cp.supervision_timeout = __constant_cpu_to_le16(0x002a);
52 cp.min_ce_len = __constant_cpu_to_le16(0x0000);
53 cp.max_ce_len = __constant_cpu_to_le16(0x0000);
54
55 hci_send_cmd(hdev, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
56 }
57
58 static void hci_le_connect_cancel(struct hci_conn *conn)
59 {
60 hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
61 }
62
63 void hci_acl_connect(struct hci_conn *conn)
64 {
65 struct hci_dev *hdev = conn->hdev;
66 struct inquiry_entry *ie;
67 struct hci_cp_create_conn cp;
68
69 BT_DBG("hcon %p", conn);
70
71 conn->state = BT_CONNECT;
72 conn->out = true;
73
74 conn->link_mode = HCI_LM_MASTER;
75
76 conn->attempt++;
77
78 conn->link_policy = hdev->link_policy;
79
80 memset(&cp, 0, sizeof(cp));
81 bacpy(&cp.bdaddr, &conn->dst);
82 cp.pscan_rep_mode = 0x02;
83
84 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
85 if (ie) {
86 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
87 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
88 cp.pscan_mode = ie->data.pscan_mode;
89 cp.clock_offset = ie->data.clock_offset |
90 __constant_cpu_to_le16(0x8000);
91 }
92
93 memcpy(conn->dev_class, ie->data.dev_class, 3);
94 if (ie->data.ssp_mode > 0)
95 set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
96 }
97
98 cp.pkt_type = cpu_to_le16(conn->pkt_type);
99 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
100 cp.role_switch = 0x01;
101 else
102 cp.role_switch = 0x00;
103
104 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
105 }
106
107 static void hci_acl_connect_cancel(struct hci_conn *conn)
108 {
109 struct hci_cp_create_conn_cancel cp;
110
111 BT_DBG("hcon %p", conn);
112
113 if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
114 return;
115
116 bacpy(&cp.bdaddr, &conn->dst);
117 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
118 }
119
120 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
121 {
122 struct hci_cp_disconnect cp;
123
124 BT_DBG("hcon %p", conn);
125
126 conn->state = BT_DISCONN;
127
128 cp.handle = cpu_to_le16(conn->handle);
129 cp.reason = reason;
130 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
131 }
132
133 void hci_add_sco(struct hci_conn *conn, __u16 handle)
134 {
135 struct hci_dev *hdev = conn->hdev;
136 struct hci_cp_add_sco cp;
137
138 BT_DBG("hcon %p", conn);
139
140 conn->state = BT_CONNECT;
141 conn->out = true;
142
143 conn->attempt++;
144
145 cp.handle = cpu_to_le16(handle);
146 cp.pkt_type = cpu_to_le16(conn->pkt_type);
147
148 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
149 }
150
151 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
152 {
153 struct hci_dev *hdev = conn->hdev;
154 struct hci_cp_setup_sync_conn cp;
155
156 BT_DBG("hcon %p", conn);
157
158 conn->state = BT_CONNECT;
159 conn->out = true;
160
161 conn->attempt++;
162
163 cp.handle = cpu_to_le16(handle);
164 cp.pkt_type = cpu_to_le16(conn->pkt_type);
165
166 cp.tx_bandwidth = __constant_cpu_to_le32(0x00001f40);
167 cp.rx_bandwidth = __constant_cpu_to_le32(0x00001f40);
168 cp.max_latency = __constant_cpu_to_le16(0xffff);
169 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
170 cp.retrans_effort = 0xff;
171
172 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
173 }
174
175 void hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max,
176 u16 latency, u16 to_multiplier)
177 {
178 struct hci_cp_le_conn_update cp;
179 struct hci_dev *hdev = conn->hdev;
180
181 memset(&cp, 0, sizeof(cp));
182
183 cp.handle = cpu_to_le16(conn->handle);
184 cp.conn_interval_min = cpu_to_le16(min);
185 cp.conn_interval_max = cpu_to_le16(max);
186 cp.conn_latency = cpu_to_le16(latency);
187 cp.supervision_timeout = cpu_to_le16(to_multiplier);
188 cp.min_ce_len = __constant_cpu_to_le16(0x0001);
189 cp.max_ce_len = __constant_cpu_to_le16(0x0001);
190
191 hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
192 }
193
194 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __u8 rand[8],
195 __u8 ltk[16])
196 {
197 struct hci_dev *hdev = conn->hdev;
198 struct hci_cp_le_start_enc cp;
199
200 BT_DBG("hcon %p", conn);
201
202 memset(&cp, 0, sizeof(cp));
203
204 cp.handle = cpu_to_le16(conn->handle);
205 memcpy(cp.ltk, ltk, sizeof(cp.ltk));
206 cp.ediv = ediv;
207 memcpy(cp.rand, rand, sizeof(cp.rand));
208
209 hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
210 }
211
212 /* Device _must_ be locked */
213 void hci_sco_setup(struct hci_conn *conn, __u8 status)
214 {
215 struct hci_conn *sco = conn->link;
216
217 if (!sco)
218 return;
219
220 BT_DBG("hcon %p", conn);
221
222 if (!status) {
223 if (lmp_esco_capable(conn->hdev))
224 hci_setup_sync(sco, conn->handle);
225 else
226 hci_add_sco(sco, conn->handle);
227 } else {
228 hci_proto_connect_cfm(sco, status);
229 hci_conn_del(sco);
230 }
231 }
232
233 static void hci_conn_timeout(struct work_struct *work)
234 {
235 struct hci_conn *conn = container_of(work, struct hci_conn,
236 disc_work.work);
237 __u8 reason;
238
239 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
240
241 if (atomic_read(&conn->refcnt))
242 return;
243
244 switch (conn->state) {
245 case BT_CONNECT:
246 case BT_CONNECT2:
247 if (conn->out) {
248 if (conn->type == ACL_LINK)
249 hci_acl_connect_cancel(conn);
250 else if (conn->type == LE_LINK)
251 hci_le_connect_cancel(conn);
252 }
253 break;
254 case BT_CONFIG:
255 case BT_CONNECTED:
256 reason = hci_proto_disconn_ind(conn);
257 hci_acl_disconn(conn, reason);
258 break;
259 default:
260 conn->state = BT_CLOSED;
261 break;
262 }
263 }
264
265 /* Enter sniff mode */
266 static void hci_conn_enter_sniff_mode(struct hci_conn *conn)
267 {
268 struct hci_dev *hdev = conn->hdev;
269
270 BT_DBG("hcon %p mode %d", conn, conn->mode);
271
272 if (test_bit(HCI_RAW, &hdev->flags))
273 return;
274
275 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
276 return;
277
278 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
279 return;
280
281 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
282 struct hci_cp_sniff_subrate cp;
283 cp.handle = cpu_to_le16(conn->handle);
284 cp.max_latency = __constant_cpu_to_le16(0);
285 cp.min_remote_timeout = __constant_cpu_to_le16(0);
286 cp.min_local_timeout = __constant_cpu_to_le16(0);
287 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
288 }
289
290 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
291 struct hci_cp_sniff_mode cp;
292 cp.handle = cpu_to_le16(conn->handle);
293 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
294 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
295 cp.attempt = __constant_cpu_to_le16(4);
296 cp.timeout = __constant_cpu_to_le16(1);
297 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
298 }
299 }
300
301 static void hci_conn_idle(unsigned long arg)
302 {
303 struct hci_conn *conn = (void *) arg;
304
305 BT_DBG("hcon %p mode %d", conn, conn->mode);
306
307 hci_conn_enter_sniff_mode(conn);
308 }
309
310 static void hci_conn_auto_accept(unsigned long arg)
311 {
312 struct hci_conn *conn = (void *) arg;
313 struct hci_dev *hdev = conn->hdev;
314
315 hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
316 &conn->dst);
317 }
318
319 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
320 {
321 struct hci_conn *conn;
322
323 BT_DBG("%s dst %s", hdev->name, batostr(dst));
324
325 conn = kzalloc(sizeof(struct hci_conn), GFP_KERNEL);
326 if (!conn)
327 return NULL;
328
329 bacpy(&conn->dst, dst);
330 conn->hdev = hdev;
331 conn->type = type;
332 conn->mode = HCI_CM_ACTIVE;
333 conn->state = BT_OPEN;
334 conn->auth_type = HCI_AT_GENERAL_BONDING;
335 conn->io_capability = hdev->io_capability;
336 conn->remote_auth = 0xff;
337 conn->key_type = 0xff;
338
339 set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
340 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
341
342 switch (type) {
343 case ACL_LINK:
344 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
345 break;
346 case SCO_LINK:
347 if (lmp_esco_capable(hdev))
348 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
349 (hdev->esco_type & EDR_ESCO_MASK);
350 else
351 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
352 break;
353 case ESCO_LINK:
354 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
355 break;
356 }
357
358 skb_queue_head_init(&conn->data_q);
359
360 INIT_LIST_HEAD(&conn->chan_list);
361
362 INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
363 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
364 setup_timer(&conn->auto_accept_timer, hci_conn_auto_accept,
365 (unsigned long) conn);
366
367 atomic_set(&conn->refcnt, 0);
368
369 hci_dev_hold(hdev);
370
371 hci_conn_hash_add(hdev, conn);
372 if (hdev->notify)
373 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
374
375 atomic_set(&conn->devref, 0);
376
377 hci_conn_init_sysfs(conn);
378
379 return conn;
380 }
381
382 int hci_conn_del(struct hci_conn *conn)
383 {
384 struct hci_dev *hdev = conn->hdev;
385
386 BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
387
388 del_timer(&conn->idle_timer);
389
390 cancel_delayed_work_sync(&conn->disc_work);
391
392 del_timer(&conn->auto_accept_timer);
393
394 if (conn->type == ACL_LINK) {
395 struct hci_conn *sco = conn->link;
396 if (sco)
397 sco->link = NULL;
398
399 /* Unacked frames */
400 hdev->acl_cnt += conn->sent;
401 } else if (conn->type == LE_LINK) {
402 if (hdev->le_pkts)
403 hdev->le_cnt += conn->sent;
404 else
405 hdev->acl_cnt += conn->sent;
406 } else {
407 struct hci_conn *acl = conn->link;
408 if (acl) {
409 acl->link = NULL;
410 hci_conn_put(acl);
411 }
412 }
413
414 hci_chan_list_flush(conn);
415
416 if (conn->amp_mgr)
417 amp_mgr_put(conn->amp_mgr);
418
419 hci_conn_hash_del(hdev, conn);
420 if (hdev->notify)
421 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
422
423 skb_queue_purge(&conn->data_q);
424
425 hci_conn_put_device(conn);
426
427 hci_dev_put(hdev);
428
429 if (conn->handle == 0)
430 kfree(conn);
431
432 return 0;
433 }
434
435 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
436 {
437 int use_src = bacmp(src, BDADDR_ANY);
438 struct hci_dev *hdev = NULL, *d;
439
440 BT_DBG("%s -> %s", batostr(src), batostr(dst));
441
442 read_lock(&hci_dev_list_lock);
443
444 list_for_each_entry(d, &hci_dev_list, list) {
445 if (!test_bit(HCI_UP, &d->flags) ||
446 test_bit(HCI_RAW, &d->flags) ||
447 d->dev_type != HCI_BREDR)
448 continue;
449
450 /* Simple routing:
451 * No source address - find interface with bdaddr != dst
452 * Source address - find interface with bdaddr == src
453 */
454
455 if (use_src) {
456 if (!bacmp(&d->bdaddr, src)) {
457 hdev = d; break;
458 }
459 } else {
460 if (bacmp(&d->bdaddr, dst)) {
461 hdev = d; break;
462 }
463 }
464 }
465
466 if (hdev)
467 hdev = hci_dev_hold(hdev);
468
469 read_unlock(&hci_dev_list_lock);
470 return hdev;
471 }
472 EXPORT_SYMBOL(hci_get_route);
473
474 /* Create SCO, ACL or LE connection.
475 * Device _must_ be locked */
476 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst,
477 __u8 dst_type, __u8 sec_level, __u8 auth_type)
478 {
479 struct hci_conn *acl;
480 struct hci_conn *sco;
481 struct hci_conn *le;
482
483 BT_DBG("%s dst %s", hdev->name, batostr(dst));
484
485 if (type == LE_LINK) {
486 le = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
487 if (!le) {
488 le = hci_conn_hash_lookup_state(hdev, LE_LINK,
489 BT_CONNECT);
490 if (le)
491 return ERR_PTR(-EBUSY);
492
493 le = hci_conn_add(hdev, LE_LINK, dst);
494 if (!le)
495 return ERR_PTR(-ENOMEM);
496
497 le->dst_type = bdaddr_to_le(dst_type);
498 hci_le_connect(le);
499 }
500
501 le->pending_sec_level = sec_level;
502 le->auth_type = auth_type;
503
504 hci_conn_hold(le);
505
506 return le;
507 }
508
509 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
510 if (!acl) {
511 acl = hci_conn_add(hdev, ACL_LINK, dst);
512 if (!acl)
513 return ERR_PTR(-ENOMEM);
514 }
515
516 hci_conn_hold(acl);
517
518 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
519 acl->sec_level = BT_SECURITY_LOW;
520 acl->pending_sec_level = sec_level;
521 acl->auth_type = auth_type;
522 hci_acl_connect(acl);
523 }
524
525 if (type == ACL_LINK)
526 return acl;
527
528 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
529 if (!sco) {
530 sco = hci_conn_add(hdev, type, dst);
531 if (!sco) {
532 hci_conn_put(acl);
533 return ERR_PTR(-ENOMEM);
534 }
535 }
536
537 acl->link = sco;
538 sco->link = acl;
539
540 hci_conn_hold(sco);
541
542 if (acl->state == BT_CONNECTED &&
543 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
544 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
545 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
546
547 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
548 /* defer SCO setup until mode change completed */
549 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
550 return sco;
551 }
552
553 hci_sco_setup(acl, 0x00);
554 }
555
556 return sco;
557 }
558
559 /* Check link security requirement */
560 int hci_conn_check_link_mode(struct hci_conn *conn)
561 {
562 BT_DBG("hcon %p", conn);
563
564 if (hci_conn_ssp_enabled(conn) && !(conn->link_mode & HCI_LM_ENCRYPT))
565 return 0;
566
567 return 1;
568 }
569
570 /* Authenticate remote device */
571 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
572 {
573 BT_DBG("hcon %p", conn);
574
575 if (conn->pending_sec_level > sec_level)
576 sec_level = conn->pending_sec_level;
577
578 if (sec_level > conn->sec_level)
579 conn->pending_sec_level = sec_level;
580 else if (conn->link_mode & HCI_LM_AUTH)
581 return 1;
582
583 /* Make sure we preserve an existing MITM requirement*/
584 auth_type |= (conn->auth_type & 0x01);
585
586 conn->auth_type = auth_type;
587
588 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
589 struct hci_cp_auth_requested cp;
590
591 /* encrypt must be pending if auth is also pending */
592 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
593
594 cp.handle = cpu_to_le16(conn->handle);
595 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
596 sizeof(cp), &cp);
597 if (conn->key_type != 0xff)
598 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
599 }
600
601 return 0;
602 }
603
604 /* Encrypt the the link */
605 static void hci_conn_encrypt(struct hci_conn *conn)
606 {
607 BT_DBG("hcon %p", conn);
608
609 if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
610 struct hci_cp_set_conn_encrypt cp;
611 cp.handle = cpu_to_le16(conn->handle);
612 cp.encrypt = 0x01;
613 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
614 &cp);
615 }
616 }
617
618 /* Enable security */
619 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
620 {
621 BT_DBG("hcon %p", conn);
622
623 if (conn->type == LE_LINK)
624 return smp_conn_security(conn, sec_level);
625
626 /* For sdp we don't need the link key. */
627 if (sec_level == BT_SECURITY_SDP)
628 return 1;
629
630 /* For non 2.1 devices and low security level we don't need the link
631 key. */
632 if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
633 return 1;
634
635 /* For other security levels we need the link key. */
636 if (!(conn->link_mode & HCI_LM_AUTH))
637 goto auth;
638
639 /* An authenticated combination key has sufficient security for any
640 security level. */
641 if (conn->key_type == HCI_LK_AUTH_COMBINATION)
642 goto encrypt;
643
644 /* An unauthenticated combination key has sufficient security for
645 security level 1 and 2. */
646 if (conn->key_type == HCI_LK_UNAUTH_COMBINATION &&
647 (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
648 goto encrypt;
649
650 /* A combination key has always sufficient security for the security
651 levels 1 or 2. High security level requires the combination key
652 is generated using maximum PIN code length (16).
653 For pre 2.1 units. */
654 if (conn->key_type == HCI_LK_COMBINATION &&
655 (sec_level != BT_SECURITY_HIGH || conn->pin_length == 16))
656 goto encrypt;
657
658 auth:
659 if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
660 return 0;
661
662 if (!hci_conn_auth(conn, sec_level, auth_type))
663 return 0;
664
665 encrypt:
666 if (conn->link_mode & HCI_LM_ENCRYPT)
667 return 1;
668
669 hci_conn_encrypt(conn);
670 return 0;
671 }
672 EXPORT_SYMBOL(hci_conn_security);
673
674 /* Check secure link requirement */
675 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
676 {
677 BT_DBG("hcon %p", conn);
678
679 if (sec_level != BT_SECURITY_HIGH)
680 return 1; /* Accept if non-secure is required */
681
682 if (conn->sec_level == BT_SECURITY_HIGH)
683 return 1;
684
685 return 0; /* Reject not secure link */
686 }
687 EXPORT_SYMBOL(hci_conn_check_secure);
688
689 /* Change link key */
690 int hci_conn_change_link_key(struct hci_conn *conn)
691 {
692 BT_DBG("hcon %p", conn);
693
694 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
695 struct hci_cp_change_conn_link_key cp;
696 cp.handle = cpu_to_le16(conn->handle);
697 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
698 sizeof(cp), &cp);
699 }
700
701 return 0;
702 }
703
704 /* Switch role */
705 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
706 {
707 BT_DBG("hcon %p", conn);
708
709 if (!role && conn->link_mode & HCI_LM_MASTER)
710 return 1;
711
712 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
713 struct hci_cp_switch_role cp;
714 bacpy(&cp.bdaddr, &conn->dst);
715 cp.role = role;
716 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
717 }
718
719 return 0;
720 }
721 EXPORT_SYMBOL(hci_conn_switch_role);
722
723 /* Enter active mode */
724 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
725 {
726 struct hci_dev *hdev = conn->hdev;
727
728 BT_DBG("hcon %p mode %d", conn, conn->mode);
729
730 if (test_bit(HCI_RAW, &hdev->flags))
731 return;
732
733 if (conn->mode != HCI_CM_SNIFF)
734 goto timer;
735
736 if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
737 goto timer;
738
739 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
740 struct hci_cp_exit_sniff_mode cp;
741 cp.handle = cpu_to_le16(conn->handle);
742 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
743 }
744
745 timer:
746 if (hdev->idle_timeout > 0)
747 mod_timer(&conn->idle_timer,
748 jiffies + msecs_to_jiffies(hdev->idle_timeout));
749 }
750
751 /* Drop all connection on the device */
752 void hci_conn_hash_flush(struct hci_dev *hdev)
753 {
754 struct hci_conn_hash *h = &hdev->conn_hash;
755 struct hci_conn *c, *n;
756
757 BT_DBG("hdev %s", hdev->name);
758
759 list_for_each_entry_safe(c, n, &h->list, list) {
760 c->state = BT_CLOSED;
761
762 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
763 hci_conn_del(c);
764 }
765 }
766
767 /* Check pending connect attempts */
768 void hci_conn_check_pending(struct hci_dev *hdev)
769 {
770 struct hci_conn *conn;
771
772 BT_DBG("hdev %s", hdev->name);
773
774 hci_dev_lock(hdev);
775
776 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
777 if (conn)
778 hci_acl_connect(conn);
779
780 hci_dev_unlock(hdev);
781 }
782
783 void hci_conn_hold_device(struct hci_conn *conn)
784 {
785 atomic_inc(&conn->devref);
786 }
787 EXPORT_SYMBOL(hci_conn_hold_device);
788
789 void hci_conn_put_device(struct hci_conn *conn)
790 {
791 if (atomic_dec_and_test(&conn->devref))
792 hci_conn_del_sysfs(conn);
793 }
794 EXPORT_SYMBOL(hci_conn_put_device);
795
796 int hci_get_conn_list(void __user *arg)
797 {
798 struct hci_conn *c;
799 struct hci_conn_list_req req, *cl;
800 struct hci_conn_info *ci;
801 struct hci_dev *hdev;
802 int n = 0, size, err;
803
804 if (copy_from_user(&req, arg, sizeof(req)))
805 return -EFAULT;
806
807 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
808 return -EINVAL;
809
810 size = sizeof(req) + req.conn_num * sizeof(*ci);
811
812 cl = kmalloc(size, GFP_KERNEL);
813 if (!cl)
814 return -ENOMEM;
815
816 hdev = hci_dev_get(req.dev_id);
817 if (!hdev) {
818 kfree(cl);
819 return -ENODEV;
820 }
821
822 ci = cl->conn_info;
823
824 hci_dev_lock(hdev);
825 list_for_each_entry(c, &hdev->conn_hash.list, list) {
826 bacpy(&(ci + n)->bdaddr, &c->dst);
827 (ci + n)->handle = c->handle;
828 (ci + n)->type = c->type;
829 (ci + n)->out = c->out;
830 (ci + n)->state = c->state;
831 (ci + n)->link_mode = c->link_mode;
832 if (++n >= req.conn_num)
833 break;
834 }
835 hci_dev_unlock(hdev);
836
837 cl->dev_id = hdev->id;
838 cl->conn_num = n;
839 size = sizeof(req) + n * sizeof(*ci);
840
841 hci_dev_put(hdev);
842
843 err = copy_to_user(arg, cl, size);
844 kfree(cl);
845
846 return err ? -EFAULT : 0;
847 }
848
849 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
850 {
851 struct hci_conn_info_req req;
852 struct hci_conn_info ci;
853 struct hci_conn *conn;
854 char __user *ptr = arg + sizeof(req);
855
856 if (copy_from_user(&req, arg, sizeof(req)))
857 return -EFAULT;
858
859 hci_dev_lock(hdev);
860 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
861 if (conn) {
862 bacpy(&ci.bdaddr, &conn->dst);
863 ci.handle = conn->handle;
864 ci.type = conn->type;
865 ci.out = conn->out;
866 ci.state = conn->state;
867 ci.link_mode = conn->link_mode;
868 }
869 hci_dev_unlock(hdev);
870
871 if (!conn)
872 return -ENOENT;
873
874 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
875 }
876
877 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
878 {
879 struct hci_auth_info_req req;
880 struct hci_conn *conn;
881
882 if (copy_from_user(&req, arg, sizeof(req)))
883 return -EFAULT;
884
885 hci_dev_lock(hdev);
886 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
887 if (conn)
888 req.type = conn->auth_type;
889 hci_dev_unlock(hdev);
890
891 if (!conn)
892 return -ENOENT;
893
894 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
895 }
896
897 struct hci_chan *hci_chan_create(struct hci_conn *conn)
898 {
899 struct hci_dev *hdev = conn->hdev;
900 struct hci_chan *chan;
901
902 BT_DBG("%s hcon %p", hdev->name, conn);
903
904 chan = kzalloc(sizeof(struct hci_chan), GFP_KERNEL);
905 if (!chan)
906 return NULL;
907
908 chan->conn = conn;
909 skb_queue_head_init(&chan->data_q);
910
911 list_add_rcu(&chan->list, &conn->chan_list);
912
913 return chan;
914 }
915
916 int hci_chan_del(struct hci_chan *chan)
917 {
918 struct hci_conn *conn = chan->conn;
919 struct hci_dev *hdev = conn->hdev;
920
921 BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
922
923 list_del_rcu(&chan->list);
924
925 synchronize_rcu();
926
927 skb_queue_purge(&chan->data_q);
928 kfree(chan);
929
930 return 0;
931 }
932
933 void hci_chan_list_flush(struct hci_conn *conn)
934 {
935 struct hci_chan *chan, *n;
936
937 BT_DBG("hcon %p", conn);
938
939 list_for_each_entry_safe(chan, n, &conn->chan_list, list)
940 hci_chan_del(chan);
941 }
This page took 0.117695 seconds and 5 git commands to generate.