Merge branch 'omap-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[deliverable/linux.git] / net / bluetooth / hci_conn.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/errno.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/poll.h>
34 #include <linux/fcntl.h>
35 #include <linux/init.h>
36 #include <linux/skbuff.h>
37 #include <linux/interrupt.h>
38 #include <linux/notifier.h>
39 #include <net/sock.h>
40
41 #include <asm/system.h>
42 #include <linux/uaccess.h>
43 #include <asm/unaligned.h>
44
45 #include <net/bluetooth/bluetooth.h>
46 #include <net/bluetooth/hci_core.h>
47
48 void hci_acl_connect(struct hci_conn *conn)
49 {
50 struct hci_dev *hdev = conn->hdev;
51 struct inquiry_entry *ie;
52 struct hci_cp_create_conn cp;
53
54 BT_DBG("%p", conn);
55
56 conn->state = BT_CONNECT;
57 conn->out = 1;
58
59 conn->link_mode = HCI_LM_MASTER;
60
61 conn->attempt++;
62
63 conn->link_policy = hdev->link_policy;
64
65 memset(&cp, 0, sizeof(cp));
66 bacpy(&cp.bdaddr, &conn->dst);
67 cp.pscan_rep_mode = 0x02;
68
69 ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
70 if (ie) {
71 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
72 cp.pscan_rep_mode = ie->data.pscan_rep_mode;
73 cp.pscan_mode = ie->data.pscan_mode;
74 cp.clock_offset = ie->data.clock_offset |
75 cpu_to_le16(0x8000);
76 }
77
78 memcpy(conn->dev_class, ie->data.dev_class, 3);
79 conn->ssp_mode = ie->data.ssp_mode;
80 }
81
82 cp.pkt_type = cpu_to_le16(conn->pkt_type);
83 if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
84 cp.role_switch = 0x01;
85 else
86 cp.role_switch = 0x00;
87
88 hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
89 }
90
91 static void hci_acl_connect_cancel(struct hci_conn *conn)
92 {
93 struct hci_cp_create_conn_cancel cp;
94
95 BT_DBG("%p", conn);
96
97 if (conn->hdev->hci_ver < 2)
98 return;
99
100 bacpy(&cp.bdaddr, &conn->dst);
101 hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
102 }
103
104 void hci_acl_disconn(struct hci_conn *conn, __u8 reason)
105 {
106 struct hci_cp_disconnect cp;
107
108 BT_DBG("%p", conn);
109
110 conn->state = BT_DISCONN;
111
112 cp.handle = cpu_to_le16(conn->handle);
113 cp.reason = reason;
114 hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
115 }
116
117 void hci_add_sco(struct hci_conn *conn, __u16 handle)
118 {
119 struct hci_dev *hdev = conn->hdev;
120 struct hci_cp_add_sco cp;
121
122 BT_DBG("%p", conn);
123
124 conn->state = BT_CONNECT;
125 conn->out = 1;
126
127 conn->attempt++;
128
129 cp.handle = cpu_to_le16(handle);
130 cp.pkt_type = cpu_to_le16(conn->pkt_type);
131
132 hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
133 }
134
135 void hci_setup_sync(struct hci_conn *conn, __u16 handle)
136 {
137 struct hci_dev *hdev = conn->hdev;
138 struct hci_cp_setup_sync_conn cp;
139
140 BT_DBG("%p", conn);
141
142 conn->state = BT_CONNECT;
143 conn->out = 1;
144
145 conn->attempt++;
146
147 cp.handle = cpu_to_le16(handle);
148 cp.pkt_type = cpu_to_le16(conn->pkt_type);
149
150 cp.tx_bandwidth = cpu_to_le32(0x00001f40);
151 cp.rx_bandwidth = cpu_to_le32(0x00001f40);
152 cp.max_latency = cpu_to_le16(0xffff);
153 cp.voice_setting = cpu_to_le16(hdev->voice_setting);
154 cp.retrans_effort = 0xff;
155
156 hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp);
157 }
158
159 /* Device _must_ be locked */
160 void hci_sco_setup(struct hci_conn *conn, __u8 status)
161 {
162 struct hci_conn *sco = conn->link;
163
164 BT_DBG("%p", conn);
165
166 if (!sco)
167 return;
168
169 if (!status) {
170 if (lmp_esco_capable(conn->hdev))
171 hci_setup_sync(sco, conn->handle);
172 else
173 hci_add_sco(sco, conn->handle);
174 } else {
175 hci_proto_connect_cfm(sco, status);
176 hci_conn_del(sco);
177 }
178 }
179
180 static void hci_conn_timeout(unsigned long arg)
181 {
182 struct hci_conn *conn = (void *) arg;
183 struct hci_dev *hdev = conn->hdev;
184 __u8 reason;
185
186 BT_DBG("conn %p state %d", conn, conn->state);
187
188 if (atomic_read(&conn->refcnt))
189 return;
190
191 hci_dev_lock(hdev);
192
193 switch (conn->state) {
194 case BT_CONNECT:
195 case BT_CONNECT2:
196 if (conn->type == ACL_LINK && conn->out)
197 hci_acl_connect_cancel(conn);
198 break;
199 case BT_CONFIG:
200 case BT_CONNECTED:
201 reason = hci_proto_disconn_ind(conn);
202 hci_acl_disconn(conn, reason);
203 break;
204 default:
205 conn->state = BT_CLOSED;
206 break;
207 }
208
209 hci_dev_unlock(hdev);
210 }
211
212 static void hci_conn_idle(unsigned long arg)
213 {
214 struct hci_conn *conn = (void *) arg;
215
216 BT_DBG("conn %p mode %d", conn, conn->mode);
217
218 hci_conn_enter_sniff_mode(conn);
219 }
220
221 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
222 {
223 struct hci_conn *conn;
224
225 BT_DBG("%s dst %s", hdev->name, batostr(dst));
226
227 conn = kzalloc(sizeof(struct hci_conn), GFP_ATOMIC);
228 if (!conn)
229 return NULL;
230
231 bacpy(&conn->dst, dst);
232 conn->hdev = hdev;
233 conn->type = type;
234 conn->mode = HCI_CM_ACTIVE;
235 conn->state = BT_OPEN;
236 conn->auth_type = HCI_AT_GENERAL_BONDING;
237
238 conn->power_save = 1;
239 conn->disc_timeout = HCI_DISCONN_TIMEOUT;
240
241 switch (type) {
242 case ACL_LINK:
243 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
244 break;
245 case SCO_LINK:
246 if (lmp_esco_capable(hdev))
247 conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
248 (hdev->esco_type & EDR_ESCO_MASK);
249 else
250 conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
251 break;
252 case ESCO_LINK:
253 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
254 break;
255 }
256
257 skb_queue_head_init(&conn->data_q);
258
259 setup_timer(&conn->disc_timer, hci_conn_timeout, (unsigned long)conn);
260 setup_timer(&conn->idle_timer, hci_conn_idle, (unsigned long)conn);
261
262 atomic_set(&conn->refcnt, 0);
263
264 hci_dev_hold(hdev);
265
266 tasklet_disable(&hdev->tx_task);
267
268 hci_conn_hash_add(hdev, conn);
269 if (hdev->notify)
270 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
271
272 atomic_set(&conn->devref, 0);
273
274 hci_conn_init_sysfs(conn);
275
276 tasklet_enable(&hdev->tx_task);
277
278 return conn;
279 }
280
281 int hci_conn_del(struct hci_conn *conn)
282 {
283 struct hci_dev *hdev = conn->hdev;
284
285 BT_DBG("%s conn %p handle %d", hdev->name, conn, conn->handle);
286
287 del_timer(&conn->idle_timer);
288
289 del_timer(&conn->disc_timer);
290
291 if (conn->type == ACL_LINK) {
292 struct hci_conn *sco = conn->link;
293 if (sco)
294 sco->link = NULL;
295
296 /* Unacked frames */
297 hdev->acl_cnt += conn->sent;
298 } else {
299 struct hci_conn *acl = conn->link;
300 if (acl) {
301 acl->link = NULL;
302 hci_conn_put(acl);
303 }
304 }
305
306 tasklet_disable(&hdev->tx_task);
307
308 hci_conn_hash_del(hdev, conn);
309 if (hdev->notify)
310 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
311
312 tasklet_enable(&hdev->tx_task);
313
314 skb_queue_purge(&conn->data_q);
315
316 hci_conn_put_device(conn);
317
318 hci_dev_put(hdev);
319
320 return 0;
321 }
322
323 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
324 {
325 int use_src = bacmp(src, BDADDR_ANY);
326 struct hci_dev *hdev = NULL;
327 struct list_head *p;
328
329 BT_DBG("%s -> %s", batostr(src), batostr(dst));
330
331 read_lock_bh(&hci_dev_list_lock);
332
333 list_for_each(p, &hci_dev_list) {
334 struct hci_dev *d = list_entry(p, struct hci_dev, list);
335
336 if (!test_bit(HCI_UP, &d->flags) || test_bit(HCI_RAW, &d->flags))
337 continue;
338
339 /* Simple routing:
340 * No source address - find interface with bdaddr != dst
341 * Source address - find interface with bdaddr == src
342 */
343
344 if (use_src) {
345 if (!bacmp(&d->bdaddr, src)) {
346 hdev = d; break;
347 }
348 } else {
349 if (bacmp(&d->bdaddr, dst)) {
350 hdev = d; break;
351 }
352 }
353 }
354
355 if (hdev)
356 hdev = hci_dev_hold(hdev);
357
358 read_unlock_bh(&hci_dev_list_lock);
359 return hdev;
360 }
361 EXPORT_SYMBOL(hci_get_route);
362
363 /* Create SCO or ACL connection.
364 * Device _must_ be locked */
365 struct hci_conn *hci_connect(struct hci_dev *hdev, int type, bdaddr_t *dst, __u8 sec_level, __u8 auth_type)
366 {
367 struct hci_conn *acl;
368 struct hci_conn *sco;
369
370 BT_DBG("%s dst %s", hdev->name, batostr(dst));
371
372 acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
373 if (!acl) {
374 acl = hci_conn_add(hdev, ACL_LINK, dst);
375 if (!acl)
376 return NULL;
377 }
378
379 hci_conn_hold(acl);
380
381 if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
382 acl->sec_level = BT_SECURITY_LOW;
383 acl->pending_sec_level = sec_level;
384 acl->auth_type = auth_type;
385 hci_acl_connect(acl);
386 }
387
388 if (type == ACL_LINK)
389 return acl;
390
391 sco = hci_conn_hash_lookup_ba(hdev, type, dst);
392 if (!sco) {
393 sco = hci_conn_add(hdev, type, dst);
394 if (!sco) {
395 hci_conn_put(acl);
396 return NULL;
397 }
398 }
399
400 acl->link = sco;
401 sco->link = acl;
402
403 hci_conn_hold(sco);
404
405 if (acl->state == BT_CONNECTED &&
406 (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
407 acl->power_save = 1;
408 hci_conn_enter_active_mode(acl);
409
410 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->pend)) {
411 /* defer SCO setup until mode change completed */
412 set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->pend);
413 return sco;
414 }
415
416 hci_sco_setup(acl, 0x00);
417 }
418
419 return sco;
420 }
421 EXPORT_SYMBOL(hci_connect);
422
423 /* Check link security requirement */
424 int hci_conn_check_link_mode(struct hci_conn *conn)
425 {
426 BT_DBG("conn %p", conn);
427
428 if (conn->ssp_mode > 0 && conn->hdev->ssp_mode > 0 &&
429 !(conn->link_mode & HCI_LM_ENCRYPT))
430 return 0;
431
432 return 1;
433 }
434 EXPORT_SYMBOL(hci_conn_check_link_mode);
435
436 /* Authenticate remote device */
437 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
438 {
439 BT_DBG("conn %p", conn);
440
441 if (conn->pending_sec_level > sec_level)
442 sec_level = conn->pending_sec_level;
443
444 if (sec_level > conn->sec_level)
445 conn->pending_sec_level = sec_level;
446 else if (conn->link_mode & HCI_LM_AUTH)
447 return 1;
448
449 /* Make sure we preserve an existing MITM requirement*/
450 auth_type |= (conn->auth_type & 0x01);
451
452 conn->auth_type = auth_type;
453
454 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
455 struct hci_cp_auth_requested cp;
456 cp.handle = cpu_to_le16(conn->handle);
457 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
458 sizeof(cp), &cp);
459 }
460
461 return 0;
462 }
463
464 /* Enable security */
465 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
466 {
467 BT_DBG("conn %p", conn);
468
469 if (sec_level == BT_SECURITY_SDP)
470 return 1;
471
472 if (sec_level == BT_SECURITY_LOW &&
473 (!conn->ssp_mode || !conn->hdev->ssp_mode))
474 return 1;
475
476 if (conn->link_mode & HCI_LM_ENCRYPT)
477 return hci_conn_auth(conn, sec_level, auth_type);
478
479 if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->pend))
480 return 0;
481
482 if (hci_conn_auth(conn, sec_level, auth_type)) {
483 struct hci_cp_set_conn_encrypt cp;
484 cp.handle = cpu_to_le16(conn->handle);
485 cp.encrypt = 1;
486 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT,
487 sizeof(cp), &cp);
488 }
489
490 return 0;
491 }
492 EXPORT_SYMBOL(hci_conn_security);
493
494 /* Change link key */
495 int hci_conn_change_link_key(struct hci_conn *conn)
496 {
497 BT_DBG("conn %p", conn);
498
499 if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->pend)) {
500 struct hci_cp_change_conn_link_key cp;
501 cp.handle = cpu_to_le16(conn->handle);
502 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
503 sizeof(cp), &cp);
504 }
505
506 return 0;
507 }
508 EXPORT_SYMBOL(hci_conn_change_link_key);
509
510 /* Switch role */
511 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
512 {
513 BT_DBG("conn %p", conn);
514
515 if (!role && conn->link_mode & HCI_LM_MASTER)
516 return 1;
517
518 if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->pend)) {
519 struct hci_cp_switch_role cp;
520 bacpy(&cp.bdaddr, &conn->dst);
521 cp.role = role;
522 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
523 }
524
525 return 0;
526 }
527 EXPORT_SYMBOL(hci_conn_switch_role);
528
529 /* Enter active mode */
530 void hci_conn_enter_active_mode(struct hci_conn *conn)
531 {
532 struct hci_dev *hdev = conn->hdev;
533
534 BT_DBG("conn %p mode %d", conn, conn->mode);
535
536 if (test_bit(HCI_RAW, &hdev->flags))
537 return;
538
539 if (conn->mode != HCI_CM_SNIFF || !conn->power_save)
540 goto timer;
541
542 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
543 struct hci_cp_exit_sniff_mode cp;
544 cp.handle = cpu_to_le16(conn->handle);
545 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
546 }
547
548 timer:
549 if (hdev->idle_timeout > 0)
550 mod_timer(&conn->idle_timer,
551 jiffies + msecs_to_jiffies(hdev->idle_timeout));
552 }
553
554 /* Enter sniff mode */
555 void hci_conn_enter_sniff_mode(struct hci_conn *conn)
556 {
557 struct hci_dev *hdev = conn->hdev;
558
559 BT_DBG("conn %p mode %d", conn, conn->mode);
560
561 if (test_bit(HCI_RAW, &hdev->flags))
562 return;
563
564 if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
565 return;
566
567 if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
568 return;
569
570 if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
571 struct hci_cp_sniff_subrate cp;
572 cp.handle = cpu_to_le16(conn->handle);
573 cp.max_latency = cpu_to_le16(0);
574 cp.min_remote_timeout = cpu_to_le16(0);
575 cp.min_local_timeout = cpu_to_le16(0);
576 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
577 }
578
579 if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->pend)) {
580 struct hci_cp_sniff_mode cp;
581 cp.handle = cpu_to_le16(conn->handle);
582 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
583 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
584 cp.attempt = cpu_to_le16(4);
585 cp.timeout = cpu_to_le16(1);
586 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
587 }
588 }
589
590 /* Drop all connection on the device */
591 void hci_conn_hash_flush(struct hci_dev *hdev)
592 {
593 struct hci_conn_hash *h = &hdev->conn_hash;
594 struct list_head *p;
595
596 BT_DBG("hdev %s", hdev->name);
597
598 p = h->list.next;
599 while (p != &h->list) {
600 struct hci_conn *c;
601
602 c = list_entry(p, struct hci_conn, list);
603 p = p->next;
604
605 c->state = BT_CLOSED;
606
607 hci_proto_disconn_cfm(c, 0x16);
608 hci_conn_del(c);
609 }
610 }
611
612 /* Check pending connect attempts */
613 void hci_conn_check_pending(struct hci_dev *hdev)
614 {
615 struct hci_conn *conn;
616
617 BT_DBG("hdev %s", hdev->name);
618
619 hci_dev_lock(hdev);
620
621 conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
622 if (conn)
623 hci_acl_connect(conn);
624
625 hci_dev_unlock(hdev);
626 }
627
628 void hci_conn_hold_device(struct hci_conn *conn)
629 {
630 atomic_inc(&conn->devref);
631 }
632 EXPORT_SYMBOL(hci_conn_hold_device);
633
634 void hci_conn_put_device(struct hci_conn *conn)
635 {
636 if (atomic_dec_and_test(&conn->devref))
637 hci_conn_del_sysfs(conn);
638 }
639 EXPORT_SYMBOL(hci_conn_put_device);
640
641 int hci_get_conn_list(void __user *arg)
642 {
643 struct hci_conn_list_req req, *cl;
644 struct hci_conn_info *ci;
645 struct hci_dev *hdev;
646 struct list_head *p;
647 int n = 0, size, err;
648
649 if (copy_from_user(&req, arg, sizeof(req)))
650 return -EFAULT;
651
652 if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
653 return -EINVAL;
654
655 size = sizeof(req) + req.conn_num * sizeof(*ci);
656
657 cl = kmalloc(size, GFP_KERNEL);
658 if (!cl)
659 return -ENOMEM;
660
661 hdev = hci_dev_get(req.dev_id);
662 if (!hdev) {
663 kfree(cl);
664 return -ENODEV;
665 }
666
667 ci = cl->conn_info;
668
669 hci_dev_lock_bh(hdev);
670 list_for_each(p, &hdev->conn_hash.list) {
671 register struct hci_conn *c;
672 c = list_entry(p, struct hci_conn, list);
673
674 bacpy(&(ci + n)->bdaddr, &c->dst);
675 (ci + n)->handle = c->handle;
676 (ci + n)->type = c->type;
677 (ci + n)->out = c->out;
678 (ci + n)->state = c->state;
679 (ci + n)->link_mode = c->link_mode;
680 if (++n >= req.conn_num)
681 break;
682 }
683 hci_dev_unlock_bh(hdev);
684
685 cl->dev_id = hdev->id;
686 cl->conn_num = n;
687 size = sizeof(req) + n * sizeof(*ci);
688
689 hci_dev_put(hdev);
690
691 err = copy_to_user(arg, cl, size);
692 kfree(cl);
693
694 return err ? -EFAULT : 0;
695 }
696
697 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
698 {
699 struct hci_conn_info_req req;
700 struct hci_conn_info ci;
701 struct hci_conn *conn;
702 char __user *ptr = arg + sizeof(req);
703
704 if (copy_from_user(&req, arg, sizeof(req)))
705 return -EFAULT;
706
707 hci_dev_lock_bh(hdev);
708 conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
709 if (conn) {
710 bacpy(&ci.bdaddr, &conn->dst);
711 ci.handle = conn->handle;
712 ci.type = conn->type;
713 ci.out = conn->out;
714 ci.state = conn->state;
715 ci.link_mode = conn->link_mode;
716 }
717 hci_dev_unlock_bh(hdev);
718
719 if (!conn)
720 return -ENOENT;
721
722 return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
723 }
724
725 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
726 {
727 struct hci_auth_info_req req;
728 struct hci_conn *conn;
729
730 if (copy_from_user(&req, arg, sizeof(req)))
731 return -EFAULT;
732
733 hci_dev_lock_bh(hdev);
734 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
735 if (conn)
736 req.type = conn->auth_type;
737 hci_dev_unlock_bh(hdev);
738
739 if (!conn)
740 return -ENOENT;
741
742 return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
743 }
This page took 0.057278 seconds and 5 git commands to generate.