powerpc/mm: Runtime allocation of mmu context maps for nohash CPUs
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <net/sock.h>
44
45 #include <asm/system.h>
46 #include <asm/uaccess.h>
47 #include <asm/unaligned.h>
48
49 #include <net/bluetooth/bluetooth.h>
50 #include <net/bluetooth/hci_core.h>
51 #include <net/bluetooth/l2cap.h>
52
53 #ifndef CONFIG_BT_L2CAP_DEBUG
54 #undef BT_DBG
55 #define BT_DBG(D...)
56 #endif
57
58 #define VERSION "2.11"
59
60 static u32 l2cap_feat_mask = 0x0000;
61
62 static const struct proto_ops l2cap_sock_ops;
63
64 static struct bt_sock_list l2cap_sk_list = {
65 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
66 };
67
68 static void __l2cap_sock_close(struct sock *sk, int reason);
69 static void l2cap_sock_close(struct sock *sk);
70 static void l2cap_sock_kill(struct sock *sk);
71
72 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
73 u8 code, u8 ident, u16 dlen, void *data);
74
75 /* ---- L2CAP timers ---- */
76 static void l2cap_sock_timeout(unsigned long arg)
77 {
78 struct sock *sk = (struct sock *) arg;
79 int reason;
80
81 BT_DBG("sock %p state %d", sk, sk->sk_state);
82
83 bh_lock_sock(sk);
84
85 if (sk->sk_state == BT_CONNECT &&
86 (l2cap_pi(sk)->link_mode & (L2CAP_LM_AUTH |
87 L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)))
88 reason = ECONNREFUSED;
89 else
90 reason = ETIMEDOUT;
91
92 __l2cap_sock_close(sk, reason);
93
94 bh_unlock_sock(sk);
95
96 l2cap_sock_kill(sk);
97 sock_put(sk);
98 }
99
100 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
101 {
102 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
103 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
104 }
105
106 static void l2cap_sock_clear_timer(struct sock *sk)
107 {
108 BT_DBG("sock %p state %d", sk, sk->sk_state);
109 sk_stop_timer(sk, &sk->sk_timer);
110 }
111
112 /* ---- L2CAP channels ---- */
113 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
114 {
115 struct sock *s;
116 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
117 if (l2cap_pi(s)->dcid == cid)
118 break;
119 }
120 return s;
121 }
122
123 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
124 {
125 struct sock *s;
126 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
127 if (l2cap_pi(s)->scid == cid)
128 break;
129 }
130 return s;
131 }
132
133 /* Find channel with given SCID.
134 * Returns locked socket */
135 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 {
137 struct sock *s;
138 read_lock(&l->lock);
139 s = __l2cap_get_chan_by_scid(l, cid);
140 if (s) bh_lock_sock(s);
141 read_unlock(&l->lock);
142 return s;
143 }
144
145 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
146 {
147 struct sock *s;
148 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
149 if (l2cap_pi(s)->ident == ident)
150 break;
151 }
152 return s;
153 }
154
155 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
156 {
157 struct sock *s;
158 read_lock(&l->lock);
159 s = __l2cap_get_chan_by_ident(l, ident);
160 if (s) bh_lock_sock(s);
161 read_unlock(&l->lock);
162 return s;
163 }
164
165 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
166 {
167 u16 cid = 0x0040;
168
169 for (; cid < 0xffff; cid++) {
170 if(!__l2cap_get_chan_by_scid(l, cid))
171 return cid;
172 }
173
174 return 0;
175 }
176
177 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
178 {
179 sock_hold(sk);
180
181 if (l->head)
182 l2cap_pi(l->head)->prev_c = sk;
183
184 l2cap_pi(sk)->next_c = l->head;
185 l2cap_pi(sk)->prev_c = NULL;
186 l->head = sk;
187 }
188
189 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
190 {
191 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
192
193 write_lock_bh(&l->lock);
194 if (sk == l->head)
195 l->head = next;
196
197 if (next)
198 l2cap_pi(next)->prev_c = prev;
199 if (prev)
200 l2cap_pi(prev)->next_c = next;
201 write_unlock_bh(&l->lock);
202
203 __sock_put(sk);
204 }
205
206 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
207 {
208 struct l2cap_chan_list *l = &conn->chan_list;
209
210 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
211
212 l2cap_pi(sk)->conn = conn;
213
214 if (sk->sk_type == SOCK_SEQPACKET) {
215 /* Alloc CID for connection-oriented socket */
216 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
217 } else if (sk->sk_type == SOCK_DGRAM) {
218 /* Connectionless socket */
219 l2cap_pi(sk)->scid = 0x0002;
220 l2cap_pi(sk)->dcid = 0x0002;
221 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
222 } else {
223 /* Raw socket can send/recv signalling messages only */
224 l2cap_pi(sk)->scid = 0x0001;
225 l2cap_pi(sk)->dcid = 0x0001;
226 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
227 }
228
229 __l2cap_chan_link(l, sk);
230
231 if (parent)
232 bt_accept_enqueue(parent, sk);
233 }
234
235 /* Delete channel.
236 * Must be called on the locked socket. */
237 static void l2cap_chan_del(struct sock *sk, int err)
238 {
239 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
240 struct sock *parent = bt_sk(sk)->parent;
241
242 l2cap_sock_clear_timer(sk);
243
244 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
245
246 if (conn) {
247 /* Unlink from channel list */
248 l2cap_chan_unlink(&conn->chan_list, sk);
249 l2cap_pi(sk)->conn = NULL;
250 hci_conn_put(conn->hcon);
251 }
252
253 sk->sk_state = BT_CLOSED;
254 sock_set_flag(sk, SOCK_ZAPPED);
255
256 if (err)
257 sk->sk_err = err;
258
259 if (parent) {
260 bt_accept_unlink(sk);
261 parent->sk_data_ready(parent, 0);
262 } else
263 sk->sk_state_change(sk);
264 }
265
266 /* Service level security */
267 static inline int l2cap_check_link_mode(struct sock *sk)
268 {
269 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
270
271 if ((l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT) ||
272 (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE))
273 return hci_conn_encrypt(conn->hcon);
274
275 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH)
276 return hci_conn_auth(conn->hcon);
277
278 return 1;
279 }
280
281 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
282 {
283 u8 id;
284
285 /* Get next available identificator.
286 * 1 - 128 are used by kernel.
287 * 129 - 199 are reserved.
288 * 200 - 254 are used by utilities like l2ping, etc.
289 */
290
291 spin_lock_bh(&conn->lock);
292
293 if (++conn->tx_ident > 128)
294 conn->tx_ident = 1;
295
296 id = conn->tx_ident;
297
298 spin_unlock_bh(&conn->lock);
299
300 return id;
301 }
302
303 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
304 {
305 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
306
307 BT_DBG("code 0x%2.2x", code);
308
309 if (!skb)
310 return -ENOMEM;
311
312 return hci_send_acl(conn->hcon, skb, 0);
313 }
314
315 static void l2cap_do_start(struct sock *sk)
316 {
317 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
318
319 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
320 if (l2cap_check_link_mode(sk)) {
321 struct l2cap_conn_req req;
322 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
323 req.psm = l2cap_pi(sk)->psm;
324
325 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
326
327 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
328 L2CAP_CONN_REQ, sizeof(req), &req);
329 }
330 } else {
331 struct l2cap_info_req req;
332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
333
334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
335 conn->info_ident = l2cap_get_ident(conn);
336
337 mod_timer(&conn->info_timer, jiffies +
338 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
339
340 l2cap_send_cmd(conn, conn->info_ident,
341 L2CAP_INFO_REQ, sizeof(req), &req);
342 }
343 }
344
345 /* ---- L2CAP connections ---- */
346 static void l2cap_conn_start(struct l2cap_conn *conn)
347 {
348 struct l2cap_chan_list *l = &conn->chan_list;
349 struct sock *sk;
350
351 BT_DBG("conn %p", conn);
352
353 read_lock(&l->lock);
354
355 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
356 bh_lock_sock(sk);
357
358 if (sk->sk_type != SOCK_SEQPACKET) {
359 bh_unlock_sock(sk);
360 continue;
361 }
362
363 if (sk->sk_state == BT_CONNECT) {
364 if (l2cap_check_link_mode(sk)) {
365 struct l2cap_conn_req req;
366 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
367 req.psm = l2cap_pi(sk)->psm;
368
369 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
370
371 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
372 L2CAP_CONN_REQ, sizeof(req), &req);
373 }
374 } else if (sk->sk_state == BT_CONNECT2) {
375 struct l2cap_conn_rsp rsp;
376 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
377 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
378
379 if (l2cap_check_link_mode(sk)) {
380 sk->sk_state = BT_CONFIG;
381 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
382 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
383 } else {
384 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
385 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
386 }
387
388 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
389 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
390 }
391
392 bh_unlock_sock(sk);
393 }
394
395 read_unlock(&l->lock);
396 }
397
398 static void l2cap_conn_ready(struct l2cap_conn *conn)
399 {
400 struct l2cap_chan_list *l = &conn->chan_list;
401 struct sock *sk;
402
403 BT_DBG("conn %p", conn);
404
405 read_lock(&l->lock);
406
407 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
408 bh_lock_sock(sk);
409
410 if (sk->sk_type != SOCK_SEQPACKET) {
411 l2cap_sock_clear_timer(sk);
412 sk->sk_state = BT_CONNECTED;
413 sk->sk_state_change(sk);
414 } else if (sk->sk_state == BT_CONNECT)
415 l2cap_do_start(sk);
416
417 bh_unlock_sock(sk);
418 }
419
420 read_unlock(&l->lock);
421 }
422
423 /* Notify sockets that we cannot guaranty reliability anymore */
424 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
425 {
426 struct l2cap_chan_list *l = &conn->chan_list;
427 struct sock *sk;
428
429 BT_DBG("conn %p", conn);
430
431 read_lock(&l->lock);
432
433 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
434 if (l2cap_pi(sk)->link_mode & L2CAP_LM_RELIABLE)
435 sk->sk_err = err;
436 }
437
438 read_unlock(&l->lock);
439 }
440
441 static void l2cap_info_timeout(unsigned long arg)
442 {
443 struct l2cap_conn *conn = (void *) arg;
444
445 conn->info_ident = 0;
446
447 l2cap_conn_start(conn);
448 }
449
450 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
451 {
452 struct l2cap_conn *conn = hcon->l2cap_data;
453
454 if (conn || status)
455 return conn;
456
457 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
458 if (!conn)
459 return NULL;
460
461 hcon->l2cap_data = conn;
462 conn->hcon = hcon;
463
464 BT_DBG("hcon %p conn %p", hcon, conn);
465
466 conn->mtu = hcon->hdev->acl_mtu;
467 conn->src = &hcon->hdev->bdaddr;
468 conn->dst = &hcon->dst;
469
470 conn->feat_mask = 0;
471
472 setup_timer(&conn->info_timer, l2cap_info_timeout,
473 (unsigned long) conn);
474
475 spin_lock_init(&conn->lock);
476 rwlock_init(&conn->chan_list.lock);
477
478 return conn;
479 }
480
481 static void l2cap_conn_del(struct hci_conn *hcon, int err)
482 {
483 struct l2cap_conn *conn = hcon->l2cap_data;
484 struct sock *sk;
485
486 if (!conn)
487 return;
488
489 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
490
491 if (conn->rx_skb)
492 kfree_skb(conn->rx_skb);
493
494 /* Kill channels */
495 while ((sk = conn->chan_list.head)) {
496 bh_lock_sock(sk);
497 l2cap_chan_del(sk, err);
498 bh_unlock_sock(sk);
499 l2cap_sock_kill(sk);
500 }
501
502 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
503 del_timer_sync(&conn->info_timer);
504
505 hcon->l2cap_data = NULL;
506 kfree(conn);
507 }
508
509 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
510 {
511 struct l2cap_chan_list *l = &conn->chan_list;
512 write_lock_bh(&l->lock);
513 __l2cap_chan_add(conn, sk, parent);
514 write_unlock_bh(&l->lock);
515 }
516
517 /* ---- Socket interface ---- */
518 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
519 {
520 struct sock *sk;
521 struct hlist_node *node;
522 sk_for_each(sk, node, &l2cap_sk_list.head)
523 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
524 goto found;
525 sk = NULL;
526 found:
527 return sk;
528 }
529
530 /* Find socket with psm and source bdaddr.
531 * Returns closest match.
532 */
533 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
534 {
535 struct sock *sk = NULL, *sk1 = NULL;
536 struct hlist_node *node;
537
538 sk_for_each(sk, node, &l2cap_sk_list.head) {
539 if (state && sk->sk_state != state)
540 continue;
541
542 if (l2cap_pi(sk)->psm == psm) {
543 /* Exact match. */
544 if (!bacmp(&bt_sk(sk)->src, src))
545 break;
546
547 /* Closest match */
548 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
549 sk1 = sk;
550 }
551 }
552 return node ? sk : sk1;
553 }
554
555 /* Find socket with given address (psm, src).
556 * Returns locked socket */
557 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
558 {
559 struct sock *s;
560 read_lock(&l2cap_sk_list.lock);
561 s = __l2cap_get_sock_by_psm(state, psm, src);
562 if (s) bh_lock_sock(s);
563 read_unlock(&l2cap_sk_list.lock);
564 return s;
565 }
566
567 static void l2cap_sock_destruct(struct sock *sk)
568 {
569 BT_DBG("sk %p", sk);
570
571 skb_queue_purge(&sk->sk_receive_queue);
572 skb_queue_purge(&sk->sk_write_queue);
573 }
574
575 static void l2cap_sock_cleanup_listen(struct sock *parent)
576 {
577 struct sock *sk;
578
579 BT_DBG("parent %p", parent);
580
581 /* Close not yet accepted channels */
582 while ((sk = bt_accept_dequeue(parent, NULL)))
583 l2cap_sock_close(sk);
584
585 parent->sk_state = BT_CLOSED;
586 sock_set_flag(parent, SOCK_ZAPPED);
587 }
588
589 /* Kill socket (only if zapped and orphan)
590 * Must be called on unlocked socket.
591 */
592 static void l2cap_sock_kill(struct sock *sk)
593 {
594 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
595 return;
596
597 BT_DBG("sk %p state %d", sk, sk->sk_state);
598
599 /* Kill poor orphan */
600 bt_sock_unlink(&l2cap_sk_list, sk);
601 sock_set_flag(sk, SOCK_DEAD);
602 sock_put(sk);
603 }
604
605 static void __l2cap_sock_close(struct sock *sk, int reason)
606 {
607 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
608
609 switch (sk->sk_state) {
610 case BT_LISTEN:
611 l2cap_sock_cleanup_listen(sk);
612 break;
613
614 case BT_CONNECTED:
615 case BT_CONFIG:
616 case BT_CONNECT2:
617 if (sk->sk_type == SOCK_SEQPACKET) {
618 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
619 struct l2cap_disconn_req req;
620
621 sk->sk_state = BT_DISCONN;
622 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
623
624 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
625 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
626 l2cap_send_cmd(conn, l2cap_get_ident(conn),
627 L2CAP_DISCONN_REQ, sizeof(req), &req);
628 } else
629 l2cap_chan_del(sk, reason);
630 break;
631
632 case BT_CONNECT:
633 case BT_DISCONN:
634 l2cap_chan_del(sk, reason);
635 break;
636
637 default:
638 sock_set_flag(sk, SOCK_ZAPPED);
639 break;
640 }
641 }
642
643 /* Must be called on unlocked socket. */
644 static void l2cap_sock_close(struct sock *sk)
645 {
646 l2cap_sock_clear_timer(sk);
647 lock_sock(sk);
648 __l2cap_sock_close(sk, ECONNRESET);
649 release_sock(sk);
650 l2cap_sock_kill(sk);
651 }
652
653 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
654 {
655 struct l2cap_pinfo *pi = l2cap_pi(sk);
656
657 BT_DBG("sk %p", sk);
658
659 if (parent) {
660 sk->sk_type = parent->sk_type;
661 pi->imtu = l2cap_pi(parent)->imtu;
662 pi->omtu = l2cap_pi(parent)->omtu;
663 pi->link_mode = l2cap_pi(parent)->link_mode;
664 } else {
665 pi->imtu = L2CAP_DEFAULT_MTU;
666 pi->omtu = 0;
667 pi->link_mode = 0;
668 }
669
670 /* Default config options */
671 pi->conf_len = 0;
672 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
673 }
674
675 static struct proto l2cap_proto = {
676 .name = "L2CAP",
677 .owner = THIS_MODULE,
678 .obj_size = sizeof(struct l2cap_pinfo)
679 };
680
681 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
682 {
683 struct sock *sk;
684
685 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
686 if (!sk)
687 return NULL;
688
689 sock_init_data(sock, sk);
690 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
691
692 sk->sk_destruct = l2cap_sock_destruct;
693 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
694
695 sock_reset_flag(sk, SOCK_ZAPPED);
696
697 sk->sk_protocol = proto;
698 sk->sk_state = BT_OPEN;
699
700 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
701
702 bt_sock_link(&l2cap_sk_list, sk);
703 return sk;
704 }
705
706 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol)
707 {
708 struct sock *sk;
709
710 BT_DBG("sock %p", sock);
711
712 sock->state = SS_UNCONNECTED;
713
714 if (sock->type != SOCK_SEQPACKET &&
715 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
716 return -ESOCKTNOSUPPORT;
717
718 if (sock->type == SOCK_RAW && !capable(CAP_NET_RAW))
719 return -EPERM;
720
721 sock->ops = &l2cap_sock_ops;
722
723 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
724 if (!sk)
725 return -ENOMEM;
726
727 l2cap_sock_init(sk, NULL);
728 return 0;
729 }
730
731 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
732 {
733 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
734 struct sock *sk = sock->sk;
735 int err = 0;
736
737 BT_DBG("sk %p, %s %d", sk, batostr(&la->l2_bdaddr), la->l2_psm);
738
739 if (!addr || addr->sa_family != AF_BLUETOOTH)
740 return -EINVAL;
741
742 lock_sock(sk);
743
744 if (sk->sk_state != BT_OPEN) {
745 err = -EBADFD;
746 goto done;
747 }
748
749 if (la->l2_psm && btohs(la->l2_psm) < 0x1001 &&
750 !capable(CAP_NET_BIND_SERVICE)) {
751 err = -EACCES;
752 goto done;
753 }
754
755 write_lock_bh(&l2cap_sk_list.lock);
756
757 if (la->l2_psm && __l2cap_get_sock_by_addr(la->l2_psm, &la->l2_bdaddr)) {
758 err = -EADDRINUSE;
759 } else {
760 /* Save source address */
761 bacpy(&bt_sk(sk)->src, &la->l2_bdaddr);
762 l2cap_pi(sk)->psm = la->l2_psm;
763 l2cap_pi(sk)->sport = la->l2_psm;
764 sk->sk_state = BT_BOUND;
765 }
766
767 write_unlock_bh(&l2cap_sk_list.lock);
768
769 done:
770 release_sock(sk);
771 return err;
772 }
773
774 static int l2cap_do_connect(struct sock *sk)
775 {
776 bdaddr_t *src = &bt_sk(sk)->src;
777 bdaddr_t *dst = &bt_sk(sk)->dst;
778 struct l2cap_conn *conn;
779 struct hci_conn *hcon;
780 struct hci_dev *hdev;
781 __u8 auth_type;
782 int err = 0;
783
784 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst), l2cap_pi(sk)->psm);
785
786 if (!(hdev = hci_get_route(dst, src)))
787 return -EHOSTUNREACH;
788
789 hci_dev_lock_bh(hdev);
790
791 err = -ENOMEM;
792
793 if (l2cap_pi(sk)->link_mode & L2CAP_LM_AUTH ||
794 l2cap_pi(sk)->link_mode & L2CAP_LM_ENCRYPT ||
795 l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
796 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
797 auth_type = HCI_AT_NO_BONDING_MITM;
798 else
799 auth_type = HCI_AT_GENERAL_BONDING_MITM;
800 } else {
801 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001))
802 auth_type = HCI_AT_NO_BONDING;
803 else
804 auth_type = HCI_AT_GENERAL_BONDING;
805 }
806
807 hcon = hci_connect(hdev, ACL_LINK, dst, auth_type);
808 if (!hcon)
809 goto done;
810
811 conn = l2cap_conn_add(hcon, 0);
812 if (!conn) {
813 hci_conn_put(hcon);
814 goto done;
815 }
816
817 err = 0;
818
819 /* Update source addr of the socket */
820 bacpy(src, conn->src);
821
822 l2cap_chan_add(conn, sk, NULL);
823
824 sk->sk_state = BT_CONNECT;
825 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
826
827 if (hcon->state == BT_CONNECTED) {
828 if (sk->sk_type != SOCK_SEQPACKET) {
829 l2cap_sock_clear_timer(sk);
830 sk->sk_state = BT_CONNECTED;
831 } else
832 l2cap_do_start(sk);
833 }
834
835 done:
836 hci_dev_unlock_bh(hdev);
837 hci_dev_put(hdev);
838 return err;
839 }
840
841 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
842 {
843 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
844 struct sock *sk = sock->sk;
845 int err = 0;
846
847 lock_sock(sk);
848
849 BT_DBG("sk %p", sk);
850
851 if (addr->sa_family != AF_BLUETOOTH || alen < sizeof(struct sockaddr_l2)) {
852 err = -EINVAL;
853 goto done;
854 }
855
856 if (sk->sk_type == SOCK_SEQPACKET && !la->l2_psm) {
857 err = -EINVAL;
858 goto done;
859 }
860
861 switch(sk->sk_state) {
862 case BT_CONNECT:
863 case BT_CONNECT2:
864 case BT_CONFIG:
865 /* Already connecting */
866 goto wait;
867
868 case BT_CONNECTED:
869 /* Already connected */
870 goto done;
871
872 case BT_OPEN:
873 case BT_BOUND:
874 /* Can connect */
875 break;
876
877 default:
878 err = -EBADFD;
879 goto done;
880 }
881
882 /* Set destination address and psm */
883 bacpy(&bt_sk(sk)->dst, &la->l2_bdaddr);
884 l2cap_pi(sk)->psm = la->l2_psm;
885
886 if ((err = l2cap_do_connect(sk)))
887 goto done;
888
889 wait:
890 err = bt_sock_wait_state(sk, BT_CONNECTED,
891 sock_sndtimeo(sk, flags & O_NONBLOCK));
892 done:
893 release_sock(sk);
894 return err;
895 }
896
897 static int l2cap_sock_listen(struct socket *sock, int backlog)
898 {
899 struct sock *sk = sock->sk;
900 int err = 0;
901
902 BT_DBG("sk %p backlog %d", sk, backlog);
903
904 lock_sock(sk);
905
906 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
907 err = -EBADFD;
908 goto done;
909 }
910
911 if (!l2cap_pi(sk)->psm) {
912 bdaddr_t *src = &bt_sk(sk)->src;
913 u16 psm;
914
915 err = -EINVAL;
916
917 write_lock_bh(&l2cap_sk_list.lock);
918
919 for (psm = 0x1001; psm < 0x1100; psm += 2)
920 if (!__l2cap_get_sock_by_addr(htobs(psm), src)) {
921 l2cap_pi(sk)->psm = htobs(psm);
922 l2cap_pi(sk)->sport = htobs(psm);
923 err = 0;
924 break;
925 }
926
927 write_unlock_bh(&l2cap_sk_list.lock);
928
929 if (err < 0)
930 goto done;
931 }
932
933 sk->sk_max_ack_backlog = backlog;
934 sk->sk_ack_backlog = 0;
935 sk->sk_state = BT_LISTEN;
936
937 done:
938 release_sock(sk);
939 return err;
940 }
941
942 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
943 {
944 DECLARE_WAITQUEUE(wait, current);
945 struct sock *sk = sock->sk, *nsk;
946 long timeo;
947 int err = 0;
948
949 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
950
951 if (sk->sk_state != BT_LISTEN) {
952 err = -EBADFD;
953 goto done;
954 }
955
956 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
957
958 BT_DBG("sk %p timeo %ld", sk, timeo);
959
960 /* Wait for an incoming connection. (wake-one). */
961 add_wait_queue_exclusive(sk->sk_sleep, &wait);
962 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
963 set_current_state(TASK_INTERRUPTIBLE);
964 if (!timeo) {
965 err = -EAGAIN;
966 break;
967 }
968
969 release_sock(sk);
970 timeo = schedule_timeout(timeo);
971 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
972
973 if (sk->sk_state != BT_LISTEN) {
974 err = -EBADFD;
975 break;
976 }
977
978 if (signal_pending(current)) {
979 err = sock_intr_errno(timeo);
980 break;
981 }
982 }
983 set_current_state(TASK_RUNNING);
984 remove_wait_queue(sk->sk_sleep, &wait);
985
986 if (err)
987 goto done;
988
989 newsock->state = SS_CONNECTED;
990
991 BT_DBG("new socket %p", nsk);
992
993 done:
994 release_sock(sk);
995 return err;
996 }
997
998 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
999 {
1000 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1001 struct sock *sk = sock->sk;
1002
1003 BT_DBG("sock %p, sk %p", sock, sk);
1004
1005 addr->sa_family = AF_BLUETOOTH;
1006 *len = sizeof(struct sockaddr_l2);
1007
1008 if (peer)
1009 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1010 else
1011 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1012
1013 la->l2_psm = l2cap_pi(sk)->psm;
1014 return 0;
1015 }
1016
1017 static inline int l2cap_do_send(struct sock *sk, struct msghdr *msg, int len)
1018 {
1019 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1020 struct sk_buff *skb, **frag;
1021 int err, hlen, count, sent=0;
1022 struct l2cap_hdr *lh;
1023
1024 BT_DBG("sk %p len %d", sk, len);
1025
1026 /* First fragment (with L2CAP header) */
1027 if (sk->sk_type == SOCK_DGRAM)
1028 hlen = L2CAP_HDR_SIZE + 2;
1029 else
1030 hlen = L2CAP_HDR_SIZE;
1031
1032 count = min_t(unsigned int, (conn->mtu - hlen), len);
1033
1034 skb = bt_skb_send_alloc(sk, hlen + count,
1035 msg->msg_flags & MSG_DONTWAIT, &err);
1036 if (!skb)
1037 return err;
1038
1039 /* Create L2CAP header */
1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1041 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1042 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1043
1044 if (sk->sk_type == SOCK_DGRAM)
1045 put_unaligned(l2cap_pi(sk)->psm, (__le16 *) skb_put(skb, 2));
1046
1047 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1048 err = -EFAULT;
1049 goto fail;
1050 }
1051
1052 sent += count;
1053 len -= count;
1054
1055 /* Continuation fragments (no L2CAP header) */
1056 frag = &skb_shinfo(skb)->frag_list;
1057 while (len) {
1058 count = min_t(unsigned int, conn->mtu, len);
1059
1060 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1061 if (!*frag)
1062 goto fail;
1063
1064 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count)) {
1065 err = -EFAULT;
1066 goto fail;
1067 }
1068
1069 sent += count;
1070 len -= count;
1071
1072 frag = &(*frag)->next;
1073 }
1074
1075 if ((err = hci_send_acl(conn->hcon, skb, 0)) < 0)
1076 goto fail;
1077
1078 return sent;
1079
1080 fail:
1081 kfree_skb(skb);
1082 return err;
1083 }
1084
1085 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1086 {
1087 struct sock *sk = sock->sk;
1088 int err = 0;
1089
1090 BT_DBG("sock %p, sk %p", sock, sk);
1091
1092 err = sock_error(sk);
1093 if (err)
1094 return err;
1095
1096 if (msg->msg_flags & MSG_OOB)
1097 return -EOPNOTSUPP;
1098
1099 /* Check outgoing MTU */
1100 if (sk->sk_type != SOCK_RAW && len > l2cap_pi(sk)->omtu)
1101 return -EINVAL;
1102
1103 lock_sock(sk);
1104
1105 if (sk->sk_state == BT_CONNECTED)
1106 err = l2cap_do_send(sk, msg, len);
1107 else
1108 err = -ENOTCONN;
1109
1110 release_sock(sk);
1111 return err;
1112 }
1113
1114 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, int optlen)
1115 {
1116 struct sock *sk = sock->sk;
1117 struct l2cap_options opts;
1118 int err = 0, len;
1119 u32 opt;
1120
1121 BT_DBG("sk %p", sk);
1122
1123 lock_sock(sk);
1124
1125 switch (optname) {
1126 case L2CAP_OPTIONS:
1127 opts.imtu = l2cap_pi(sk)->imtu;
1128 opts.omtu = l2cap_pi(sk)->omtu;
1129 opts.flush_to = l2cap_pi(sk)->flush_to;
1130 opts.mode = L2CAP_MODE_BASIC;
1131
1132 len = min_t(unsigned int, sizeof(opts), optlen);
1133 if (copy_from_user((char *) &opts, optval, len)) {
1134 err = -EFAULT;
1135 break;
1136 }
1137
1138 l2cap_pi(sk)->imtu = opts.imtu;
1139 l2cap_pi(sk)->omtu = opts.omtu;
1140 break;
1141
1142 case L2CAP_LM:
1143 if (get_user(opt, (u32 __user *) optval)) {
1144 err = -EFAULT;
1145 break;
1146 }
1147
1148 l2cap_pi(sk)->link_mode = opt;
1149 break;
1150
1151 default:
1152 err = -ENOPROTOOPT;
1153 break;
1154 }
1155
1156 release_sock(sk);
1157 return err;
1158 }
1159
1160 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1161 {
1162 struct sock *sk = sock->sk;
1163 struct l2cap_options opts;
1164 struct l2cap_conninfo cinfo;
1165 int len, err = 0;
1166
1167 BT_DBG("sk %p", sk);
1168
1169 if (get_user(len, optlen))
1170 return -EFAULT;
1171
1172 lock_sock(sk);
1173
1174 switch (optname) {
1175 case L2CAP_OPTIONS:
1176 opts.imtu = l2cap_pi(sk)->imtu;
1177 opts.omtu = l2cap_pi(sk)->omtu;
1178 opts.flush_to = l2cap_pi(sk)->flush_to;
1179 opts.mode = L2CAP_MODE_BASIC;
1180
1181 len = min_t(unsigned int, len, sizeof(opts));
1182 if (copy_to_user(optval, (char *) &opts, len))
1183 err = -EFAULT;
1184
1185 break;
1186
1187 case L2CAP_LM:
1188 if (put_user(l2cap_pi(sk)->link_mode, (u32 __user *) optval))
1189 err = -EFAULT;
1190 break;
1191
1192 case L2CAP_CONNINFO:
1193 if (sk->sk_state != BT_CONNECTED) {
1194 err = -ENOTCONN;
1195 break;
1196 }
1197
1198 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1199 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1200
1201 len = min_t(unsigned int, len, sizeof(cinfo));
1202 if (copy_to_user(optval, (char *) &cinfo, len))
1203 err = -EFAULT;
1204
1205 break;
1206
1207 default:
1208 err = -ENOPROTOOPT;
1209 break;
1210 }
1211
1212 release_sock(sk);
1213 return err;
1214 }
1215
1216 static int l2cap_sock_shutdown(struct socket *sock, int how)
1217 {
1218 struct sock *sk = sock->sk;
1219 int err = 0;
1220
1221 BT_DBG("sock %p, sk %p", sock, sk);
1222
1223 if (!sk)
1224 return 0;
1225
1226 lock_sock(sk);
1227 if (!sk->sk_shutdown) {
1228 sk->sk_shutdown = SHUTDOWN_MASK;
1229 l2cap_sock_clear_timer(sk);
1230 __l2cap_sock_close(sk, 0);
1231
1232 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
1233 err = bt_sock_wait_state(sk, BT_CLOSED,
1234 sk->sk_lingertime);
1235 }
1236 release_sock(sk);
1237 return err;
1238 }
1239
1240 static int l2cap_sock_release(struct socket *sock)
1241 {
1242 struct sock *sk = sock->sk;
1243 int err;
1244
1245 BT_DBG("sock %p, sk %p", sock, sk);
1246
1247 if (!sk)
1248 return 0;
1249
1250 err = l2cap_sock_shutdown(sock, 2);
1251
1252 sock_orphan(sk);
1253 l2cap_sock_kill(sk);
1254 return err;
1255 }
1256
1257 static void l2cap_chan_ready(struct sock *sk)
1258 {
1259 struct sock *parent = bt_sk(sk)->parent;
1260
1261 BT_DBG("sk %p, parent %p", sk, parent);
1262
1263 l2cap_pi(sk)->conf_state = 0;
1264 l2cap_sock_clear_timer(sk);
1265
1266 if (!parent) {
1267 /* Outgoing channel.
1268 * Wake up socket sleeping on connect.
1269 */
1270 sk->sk_state = BT_CONNECTED;
1271 sk->sk_state_change(sk);
1272 } else {
1273 /* Incoming channel.
1274 * Wake up socket sleeping on accept.
1275 */
1276 parent->sk_data_ready(parent, 0);
1277 }
1278
1279 if (l2cap_pi(sk)->link_mode & L2CAP_LM_SECURE) {
1280 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1281 hci_conn_change_link_key(conn->hcon);
1282 }
1283 }
1284
1285 /* Copy frame to all raw sockets on that connection */
1286 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1287 {
1288 struct l2cap_chan_list *l = &conn->chan_list;
1289 struct sk_buff *nskb;
1290 struct sock * sk;
1291
1292 BT_DBG("conn %p", conn);
1293
1294 read_lock(&l->lock);
1295 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1296 if (sk->sk_type != SOCK_RAW)
1297 continue;
1298
1299 /* Don't send frame to the socket it came from */
1300 if (skb->sk == sk)
1301 continue;
1302
1303 if (!(nskb = skb_clone(skb, GFP_ATOMIC)))
1304 continue;
1305
1306 if (sock_queue_rcv_skb(sk, nskb))
1307 kfree_skb(nskb);
1308 }
1309 read_unlock(&l->lock);
1310 }
1311
1312 /* ---- L2CAP signalling commands ---- */
1313 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1314 u8 code, u8 ident, u16 dlen, void *data)
1315 {
1316 struct sk_buff *skb, **frag;
1317 struct l2cap_cmd_hdr *cmd;
1318 struct l2cap_hdr *lh;
1319 int len, count;
1320
1321 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen);
1322
1323 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1324 count = min_t(unsigned int, conn->mtu, len);
1325
1326 skb = bt_skb_alloc(count, GFP_ATOMIC);
1327 if (!skb)
1328 return NULL;
1329
1330 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1331 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1332 lh->cid = cpu_to_le16(0x0001);
1333
1334 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1335 cmd->code = code;
1336 cmd->ident = ident;
1337 cmd->len = cpu_to_le16(dlen);
1338
1339 if (dlen) {
1340 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1341 memcpy(skb_put(skb, count), data, count);
1342 data += count;
1343 }
1344
1345 len -= skb->len;
1346
1347 /* Continuation fragments (no L2CAP header) */
1348 frag = &skb_shinfo(skb)->frag_list;
1349 while (len) {
1350 count = min_t(unsigned int, conn->mtu, len);
1351
1352 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1353 if (!*frag)
1354 goto fail;
1355
1356 memcpy(skb_put(*frag, count), data, count);
1357
1358 len -= count;
1359 data += count;
1360
1361 frag = &(*frag)->next;
1362 }
1363
1364 return skb;
1365
1366 fail:
1367 kfree_skb(skb);
1368 return NULL;
1369 }
1370
1371 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1372 {
1373 struct l2cap_conf_opt *opt = *ptr;
1374 int len;
1375
1376 len = L2CAP_CONF_OPT_SIZE + opt->len;
1377 *ptr += len;
1378
1379 *type = opt->type;
1380 *olen = opt->len;
1381
1382 switch (opt->len) {
1383 case 1:
1384 *val = *((u8 *) opt->val);
1385 break;
1386
1387 case 2:
1388 *val = __le16_to_cpu(*((__le16 *) opt->val));
1389 break;
1390
1391 case 4:
1392 *val = __le32_to_cpu(*((__le32 *) opt->val));
1393 break;
1394
1395 default:
1396 *val = (unsigned long) opt->val;
1397 break;
1398 }
1399
1400 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1401 return len;
1402 }
1403
1404 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1405 {
1406 struct l2cap_conf_opt *opt = *ptr;
1407
1408 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1409
1410 opt->type = type;
1411 opt->len = len;
1412
1413 switch (len) {
1414 case 1:
1415 *((u8 *) opt->val) = val;
1416 break;
1417
1418 case 2:
1419 *((__le16 *) opt->val) = cpu_to_le16(val);
1420 break;
1421
1422 case 4:
1423 *((__le32 *) opt->val) = cpu_to_le32(val);
1424 break;
1425
1426 default:
1427 memcpy(opt->val, (void *) val, len);
1428 break;
1429 }
1430
1431 *ptr += L2CAP_CONF_OPT_SIZE + len;
1432 }
1433
1434 static int l2cap_build_conf_req(struct sock *sk, void *data)
1435 {
1436 struct l2cap_pinfo *pi = l2cap_pi(sk);
1437 struct l2cap_conf_req *req = data;
1438 void *ptr = req->data;
1439
1440 BT_DBG("sk %p", sk);
1441
1442 if (pi->imtu != L2CAP_DEFAULT_MTU)
1443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1444
1445 /* FIXME: Need actual value of the flush timeout */
1446 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1447 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1448
1449 req->dcid = cpu_to_le16(pi->dcid);
1450 req->flags = cpu_to_le16(0);
1451
1452 return ptr - data;
1453 }
1454
1455 static int l2cap_parse_conf_req(struct sock *sk, void *data)
1456 {
1457 struct l2cap_pinfo *pi = l2cap_pi(sk);
1458 struct l2cap_conf_rsp *rsp = data;
1459 void *ptr = rsp->data;
1460 void *req = pi->conf_req;
1461 int len = pi->conf_len;
1462 int type, hint, olen;
1463 unsigned long val;
1464 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1465 u16 mtu = L2CAP_DEFAULT_MTU;
1466 u16 result = L2CAP_CONF_SUCCESS;
1467
1468 BT_DBG("sk %p", sk);
1469
1470 while (len >= L2CAP_CONF_OPT_SIZE) {
1471 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1472
1473 hint = type & 0x80;
1474 type &= 0x7f;
1475
1476 switch (type) {
1477 case L2CAP_CONF_MTU:
1478 mtu = val;
1479 break;
1480
1481 case L2CAP_CONF_FLUSH_TO:
1482 pi->flush_to = val;
1483 break;
1484
1485 case L2CAP_CONF_QOS:
1486 break;
1487
1488 case L2CAP_CONF_RFC:
1489 if (olen == sizeof(rfc))
1490 memcpy(&rfc, (void *) val, olen);
1491 break;
1492
1493 default:
1494 if (hint)
1495 break;
1496
1497 result = L2CAP_CONF_UNKNOWN;
1498 *((u8 *) ptr++) = type;
1499 break;
1500 }
1501 }
1502
1503 if (result == L2CAP_CONF_SUCCESS) {
1504 /* Configure output options and let the other side know
1505 * which ones we don't like. */
1506
1507 if (rfc.mode == L2CAP_MODE_BASIC) {
1508 if (mtu < pi->omtu)
1509 result = L2CAP_CONF_UNACCEPT;
1510 else {
1511 pi->omtu = mtu;
1512 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1513 }
1514
1515 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1516 } else {
1517 result = L2CAP_CONF_UNACCEPT;
1518
1519 memset(&rfc, 0, sizeof(rfc));
1520 rfc.mode = L2CAP_MODE_BASIC;
1521
1522 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1523 sizeof(rfc), (unsigned long) &rfc);
1524 }
1525 }
1526
1527 rsp->scid = cpu_to_le16(pi->dcid);
1528 rsp->result = cpu_to_le16(result);
1529 rsp->flags = cpu_to_le16(0x0000);
1530
1531 return ptr - data;
1532 }
1533
1534 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1535 {
1536 struct l2cap_conf_rsp *rsp = data;
1537 void *ptr = rsp->data;
1538
1539 BT_DBG("sk %p", sk);
1540
1541 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1542 rsp->result = cpu_to_le16(result);
1543 rsp->flags = cpu_to_le16(flags);
1544
1545 return ptr - data;
1546 }
1547
1548 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1549 {
1550 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
1551
1552 if (rej->reason != 0x0000)
1553 return 0;
1554
1555 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
1556 cmd->ident == conn->info_ident) {
1557 conn->info_ident = 0;
1558 del_timer(&conn->info_timer);
1559 l2cap_conn_start(conn);
1560 }
1561
1562 return 0;
1563 }
1564
1565 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1566 {
1567 struct l2cap_chan_list *list = &conn->chan_list;
1568 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
1569 struct l2cap_conn_rsp rsp;
1570 struct sock *sk, *parent;
1571 int result, status = L2CAP_CS_NO_INFO;
1572
1573 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
1574 __le16 psm = req->psm;
1575
1576 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
1577
1578 /* Check if we have socket listening on psm */
1579 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
1580 if (!parent) {
1581 result = L2CAP_CR_BAD_PSM;
1582 goto sendresp;
1583 }
1584
1585 /* Check if the ACL is secure enough (if not SDP) */
1586 if (psm != cpu_to_le16(0x0001) &&
1587 !hci_conn_check_link_mode(conn->hcon)) {
1588 result = L2CAP_CR_SEC_BLOCK;
1589 goto response;
1590 }
1591
1592 result = L2CAP_CR_NO_MEM;
1593
1594 /* Check for backlog size */
1595 if (sk_acceptq_is_full(parent)) {
1596 BT_DBG("backlog full %d", parent->sk_ack_backlog);
1597 goto response;
1598 }
1599
1600 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
1601 if (!sk)
1602 goto response;
1603
1604 write_lock_bh(&list->lock);
1605
1606 /* Check if we already have channel with that dcid */
1607 if (__l2cap_get_chan_by_dcid(list, scid)) {
1608 write_unlock_bh(&list->lock);
1609 sock_set_flag(sk, SOCK_ZAPPED);
1610 l2cap_sock_kill(sk);
1611 goto response;
1612 }
1613
1614 hci_conn_hold(conn->hcon);
1615
1616 l2cap_sock_init(sk, parent);
1617 bacpy(&bt_sk(sk)->src, conn->src);
1618 bacpy(&bt_sk(sk)->dst, conn->dst);
1619 l2cap_pi(sk)->psm = psm;
1620 l2cap_pi(sk)->dcid = scid;
1621
1622 __l2cap_chan_add(conn, sk, parent);
1623 dcid = l2cap_pi(sk)->scid;
1624
1625 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1626
1627 l2cap_pi(sk)->ident = cmd->ident;
1628
1629 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
1630 if (l2cap_check_link_mode(sk)) {
1631 sk->sk_state = BT_CONFIG;
1632 result = L2CAP_CR_SUCCESS;
1633 status = L2CAP_CS_NO_INFO;
1634 } else {
1635 sk->sk_state = BT_CONNECT2;
1636 result = L2CAP_CR_PEND;
1637 status = L2CAP_CS_AUTHEN_PEND;
1638 }
1639 } else {
1640 sk->sk_state = BT_CONNECT2;
1641 result = L2CAP_CR_PEND;
1642 status = L2CAP_CS_NO_INFO;
1643 }
1644
1645 write_unlock_bh(&list->lock);
1646
1647 response:
1648 bh_unlock_sock(parent);
1649
1650 sendresp:
1651 rsp.scid = cpu_to_le16(scid);
1652 rsp.dcid = cpu_to_le16(dcid);
1653 rsp.result = cpu_to_le16(result);
1654 rsp.status = cpu_to_le16(status);
1655 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1656
1657 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
1658 struct l2cap_info_req info;
1659 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1660
1661 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1662 conn->info_ident = l2cap_get_ident(conn);
1663
1664 mod_timer(&conn->info_timer, jiffies +
1665 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
1666
1667 l2cap_send_cmd(conn, conn->info_ident,
1668 L2CAP_INFO_REQ, sizeof(info), &info);
1669 }
1670
1671 return 0;
1672 }
1673
1674 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1675 {
1676 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
1677 u16 scid, dcid, result, status;
1678 struct sock *sk;
1679 u8 req[128];
1680
1681 scid = __le16_to_cpu(rsp->scid);
1682 dcid = __le16_to_cpu(rsp->dcid);
1683 result = __le16_to_cpu(rsp->result);
1684 status = __le16_to_cpu(rsp->status);
1685
1686 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
1687
1688 if (scid) {
1689 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1690 return 0;
1691 } else {
1692 if (!(sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident)))
1693 return 0;
1694 }
1695
1696 switch (result) {
1697 case L2CAP_CR_SUCCESS:
1698 sk->sk_state = BT_CONFIG;
1699 l2cap_pi(sk)->ident = 0;
1700 l2cap_pi(sk)->dcid = dcid;
1701 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1702
1703 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1704 l2cap_build_conf_req(sk, req), req);
1705 break;
1706
1707 case L2CAP_CR_PEND:
1708 break;
1709
1710 default:
1711 l2cap_chan_del(sk, ECONNREFUSED);
1712 break;
1713 }
1714
1715 bh_unlock_sock(sk);
1716 return 0;
1717 }
1718
1719 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
1720 {
1721 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
1722 u16 dcid, flags;
1723 u8 rsp[64];
1724 struct sock *sk;
1725 int len;
1726
1727 dcid = __le16_to_cpu(req->dcid);
1728 flags = __le16_to_cpu(req->flags);
1729
1730 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
1731
1732 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1733 return -ENOENT;
1734
1735 if (sk->sk_state == BT_DISCONN)
1736 goto unlock;
1737
1738 /* Reject if config buffer is too small. */
1739 len = cmd_len - sizeof(*req);
1740 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
1741 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1742 l2cap_build_conf_rsp(sk, rsp,
1743 L2CAP_CONF_REJECT, flags), rsp);
1744 goto unlock;
1745 }
1746
1747 /* Store config. */
1748 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
1749 l2cap_pi(sk)->conf_len += len;
1750
1751 if (flags & 0x0001) {
1752 /* Incomplete config. Send empty response. */
1753 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
1754 l2cap_build_conf_rsp(sk, rsp,
1755 L2CAP_CONF_SUCCESS, 0x0001), rsp);
1756 goto unlock;
1757 }
1758
1759 /* Complete config. */
1760 len = l2cap_parse_conf_req(sk, rsp);
1761 if (len < 0)
1762 goto unlock;
1763
1764 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
1765
1766 /* Reset config buffer. */
1767 l2cap_pi(sk)->conf_len = 0;
1768
1769 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
1770 goto unlock;
1771
1772 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
1773 sk->sk_state = BT_CONNECTED;
1774 l2cap_chan_ready(sk);
1775 goto unlock;
1776 }
1777
1778 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
1779 u8 buf[64];
1780 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1781 l2cap_build_conf_req(sk, buf), buf);
1782 }
1783
1784 unlock:
1785 bh_unlock_sock(sk);
1786 return 0;
1787 }
1788
1789 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1790 {
1791 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
1792 u16 scid, flags, result;
1793 struct sock *sk;
1794
1795 scid = __le16_to_cpu(rsp->scid);
1796 flags = __le16_to_cpu(rsp->flags);
1797 result = __le16_to_cpu(rsp->result);
1798
1799 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x", scid, flags, result);
1800
1801 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1802 return 0;
1803
1804 switch (result) {
1805 case L2CAP_CONF_SUCCESS:
1806 break;
1807
1808 case L2CAP_CONF_UNACCEPT:
1809 if (++l2cap_pi(sk)->conf_retry < L2CAP_CONF_MAX_RETRIES) {
1810 char req[128];
1811 /* It does not make sense to adjust L2CAP parameters
1812 * that are currently defined in the spec. We simply
1813 * resend config request that we sent earlier. It is
1814 * stupid, but it helps qualification testing which
1815 * expects at least some response from us. */
1816 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1817 l2cap_build_conf_req(sk, req), req);
1818 goto done;
1819 }
1820
1821 default:
1822 sk->sk_state = BT_DISCONN;
1823 sk->sk_err = ECONNRESET;
1824 l2cap_sock_set_timer(sk, HZ * 5);
1825 {
1826 struct l2cap_disconn_req req;
1827 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
1828 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
1829 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1830 L2CAP_DISCONN_REQ, sizeof(req), &req);
1831 }
1832 goto done;
1833 }
1834
1835 if (flags & 0x01)
1836 goto done;
1837
1838 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
1839
1840 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
1841 sk->sk_state = BT_CONNECTED;
1842 l2cap_chan_ready(sk);
1843 }
1844
1845 done:
1846 bh_unlock_sock(sk);
1847 return 0;
1848 }
1849
1850 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1851 {
1852 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
1853 struct l2cap_disconn_rsp rsp;
1854 u16 dcid, scid;
1855 struct sock *sk;
1856
1857 scid = __le16_to_cpu(req->scid);
1858 dcid = __le16_to_cpu(req->dcid);
1859
1860 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
1861
1862 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid)))
1863 return 0;
1864
1865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1866 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1867 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
1868
1869 sk->sk_shutdown = SHUTDOWN_MASK;
1870
1871 l2cap_chan_del(sk, ECONNRESET);
1872 bh_unlock_sock(sk);
1873
1874 l2cap_sock_kill(sk);
1875 return 0;
1876 }
1877
1878 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1879 {
1880 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
1881 u16 dcid, scid;
1882 struct sock *sk;
1883
1884 scid = __le16_to_cpu(rsp->scid);
1885 dcid = __le16_to_cpu(rsp->dcid);
1886
1887 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
1888
1889 if (!(sk = l2cap_get_chan_by_scid(&conn->chan_list, scid)))
1890 return 0;
1891
1892 l2cap_chan_del(sk, 0);
1893 bh_unlock_sock(sk);
1894
1895 l2cap_sock_kill(sk);
1896 return 0;
1897 }
1898
1899 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1900 {
1901 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
1902 u16 type;
1903
1904 type = __le16_to_cpu(req->type);
1905
1906 BT_DBG("type 0x%4.4x", type);
1907
1908 if (type == L2CAP_IT_FEAT_MASK) {
1909 u8 buf[8];
1910 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
1911 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1912 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
1913 put_unaligned(cpu_to_le32(l2cap_feat_mask), (__le32 *) rsp->data);
1914 l2cap_send_cmd(conn, cmd->ident,
1915 L2CAP_INFO_RSP, sizeof(buf), buf);
1916 } else {
1917 struct l2cap_info_rsp rsp;
1918 rsp.type = cpu_to_le16(type);
1919 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
1920 l2cap_send_cmd(conn, cmd->ident,
1921 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
1922 }
1923
1924 return 0;
1925 }
1926
1927 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
1928 {
1929 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
1930 u16 type, result;
1931
1932 type = __le16_to_cpu(rsp->type);
1933 result = __le16_to_cpu(rsp->result);
1934
1935 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
1936
1937 conn->info_ident = 0;
1938
1939 del_timer(&conn->info_timer);
1940
1941 if (type == L2CAP_IT_FEAT_MASK)
1942 conn->feat_mask = get_unaligned_le32(rsp->data);
1943
1944 l2cap_conn_start(conn);
1945
1946 return 0;
1947 }
1948
1949 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
1950 {
1951 u8 *data = skb->data;
1952 int len = skb->len;
1953 struct l2cap_cmd_hdr cmd;
1954 int err = 0;
1955
1956 l2cap_raw_recv(conn, skb);
1957
1958 while (len >= L2CAP_CMD_HDR_SIZE) {
1959 u16 cmd_len;
1960 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
1961 data += L2CAP_CMD_HDR_SIZE;
1962 len -= L2CAP_CMD_HDR_SIZE;
1963
1964 cmd_len = le16_to_cpu(cmd.len);
1965
1966 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
1967
1968 if (cmd_len > len || !cmd.ident) {
1969 BT_DBG("corrupted command");
1970 break;
1971 }
1972
1973 switch (cmd.code) {
1974 case L2CAP_COMMAND_REJ:
1975 l2cap_command_rej(conn, &cmd, data);
1976 break;
1977
1978 case L2CAP_CONN_REQ:
1979 err = l2cap_connect_req(conn, &cmd, data);
1980 break;
1981
1982 case L2CAP_CONN_RSP:
1983 err = l2cap_connect_rsp(conn, &cmd, data);
1984 break;
1985
1986 case L2CAP_CONF_REQ:
1987 err = l2cap_config_req(conn, &cmd, cmd_len, data);
1988 break;
1989
1990 case L2CAP_CONF_RSP:
1991 err = l2cap_config_rsp(conn, &cmd, data);
1992 break;
1993
1994 case L2CAP_DISCONN_REQ:
1995 err = l2cap_disconnect_req(conn, &cmd, data);
1996 break;
1997
1998 case L2CAP_DISCONN_RSP:
1999 err = l2cap_disconnect_rsp(conn, &cmd, data);
2000 break;
2001
2002 case L2CAP_ECHO_REQ:
2003 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
2004 break;
2005
2006 case L2CAP_ECHO_RSP:
2007 break;
2008
2009 case L2CAP_INFO_REQ:
2010 err = l2cap_information_req(conn, &cmd, data);
2011 break;
2012
2013 case L2CAP_INFO_RSP:
2014 err = l2cap_information_rsp(conn, &cmd, data);
2015 break;
2016
2017 default:
2018 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
2019 err = -EINVAL;
2020 break;
2021 }
2022
2023 if (err) {
2024 struct l2cap_cmd_rej rej;
2025 BT_DBG("error %d", err);
2026
2027 /* FIXME: Map err to a valid reason */
2028 rej.reason = cpu_to_le16(0);
2029 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2030 }
2031
2032 data += cmd_len;
2033 len -= cmd_len;
2034 }
2035
2036 kfree_skb(skb);
2037 }
2038
2039 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
2040 {
2041 struct sock *sk;
2042
2043 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
2044 if (!sk) {
2045 BT_DBG("unknown cid 0x%4.4x", cid);
2046 goto drop;
2047 }
2048
2049 BT_DBG("sk %p, len %d", sk, skb->len);
2050
2051 if (sk->sk_state != BT_CONNECTED)
2052 goto drop;
2053
2054 if (l2cap_pi(sk)->imtu < skb->len)
2055 goto drop;
2056
2057 /* If socket recv buffers overflows we drop data here
2058 * which is *bad* because L2CAP has to be reliable.
2059 * But we don't have any other choice. L2CAP doesn't
2060 * provide flow control mechanism. */
2061
2062 if (!sock_queue_rcv_skb(sk, skb))
2063 goto done;
2064
2065 drop:
2066 kfree_skb(skb);
2067
2068 done:
2069 if (sk)
2070 bh_unlock_sock(sk);
2071
2072 return 0;
2073 }
2074
2075 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
2076 {
2077 struct sock *sk;
2078
2079 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
2080 if (!sk)
2081 goto drop;
2082
2083 BT_DBG("sk %p, len %d", sk, skb->len);
2084
2085 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
2086 goto drop;
2087
2088 if (l2cap_pi(sk)->imtu < skb->len)
2089 goto drop;
2090
2091 if (!sock_queue_rcv_skb(sk, skb))
2092 goto done;
2093
2094 drop:
2095 kfree_skb(skb);
2096
2097 done:
2098 if (sk) bh_unlock_sock(sk);
2099 return 0;
2100 }
2101
2102 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
2103 {
2104 struct l2cap_hdr *lh = (void *) skb->data;
2105 u16 cid, len;
2106 __le16 psm;
2107
2108 skb_pull(skb, L2CAP_HDR_SIZE);
2109 cid = __le16_to_cpu(lh->cid);
2110 len = __le16_to_cpu(lh->len);
2111
2112 BT_DBG("len %d, cid 0x%4.4x", len, cid);
2113
2114 switch (cid) {
2115 case 0x0001:
2116 l2cap_sig_channel(conn, skb);
2117 break;
2118
2119 case 0x0002:
2120 psm = get_unaligned((__le16 *) skb->data);
2121 skb_pull(skb, 2);
2122 l2cap_conless_channel(conn, psm, skb);
2123 break;
2124
2125 default:
2126 l2cap_data_channel(conn, cid, skb);
2127 break;
2128 }
2129 }
2130
2131 /* ---- L2CAP interface with lower layer (HCI) ---- */
2132
2133 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
2134 {
2135 int exact = 0, lm1 = 0, lm2 = 0;
2136 register struct sock *sk;
2137 struct hlist_node *node;
2138
2139 if (type != ACL_LINK)
2140 return 0;
2141
2142 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
2143
2144 /* Find listening sockets and check their link_mode */
2145 read_lock(&l2cap_sk_list.lock);
2146 sk_for_each(sk, node, &l2cap_sk_list.head) {
2147 if (sk->sk_state != BT_LISTEN)
2148 continue;
2149
2150 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
2151 lm1 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2152 exact++;
2153 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
2154 lm2 |= (HCI_LM_ACCEPT | l2cap_pi(sk)->link_mode);
2155 }
2156 read_unlock(&l2cap_sk_list.lock);
2157
2158 return exact ? lm1 : lm2;
2159 }
2160
2161 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
2162 {
2163 struct l2cap_conn *conn;
2164
2165 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
2166
2167 if (hcon->type != ACL_LINK)
2168 return 0;
2169
2170 if (!status) {
2171 conn = l2cap_conn_add(hcon, status);
2172 if (conn)
2173 l2cap_conn_ready(conn);
2174 } else
2175 l2cap_conn_del(hcon, bt_err(status));
2176
2177 return 0;
2178 }
2179
2180 static int l2cap_disconn_ind(struct hci_conn *hcon, u8 reason)
2181 {
2182 BT_DBG("hcon %p reason %d", hcon, reason);
2183
2184 if (hcon->type != ACL_LINK)
2185 return 0;
2186
2187 l2cap_conn_del(hcon, bt_err(reason));
2188
2189 return 0;
2190 }
2191
2192 static int l2cap_auth_cfm(struct hci_conn *hcon, u8 status)
2193 {
2194 struct l2cap_chan_list *l;
2195 struct l2cap_conn *conn = hcon->l2cap_data;
2196 struct sock *sk;
2197
2198 if (!conn)
2199 return 0;
2200
2201 l = &conn->chan_list;
2202
2203 BT_DBG("conn %p", conn);
2204
2205 read_lock(&l->lock);
2206
2207 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2208 struct l2cap_pinfo *pi = l2cap_pi(sk);
2209
2210 bh_lock_sock(sk);
2211
2212 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2213 !(hcon->link_mode & HCI_LM_ENCRYPT) &&
2214 !status) {
2215 bh_unlock_sock(sk);
2216 continue;
2217 }
2218
2219 if (sk->sk_state == BT_CONNECT) {
2220 if (!status) {
2221 struct l2cap_conn_req req;
2222 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2223 req.psm = l2cap_pi(sk)->psm;
2224
2225 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2226
2227 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2228 L2CAP_CONN_REQ, sizeof(req), &req);
2229 } else {
2230 l2cap_sock_clear_timer(sk);
2231 l2cap_sock_set_timer(sk, HZ / 10);
2232 }
2233 } else if (sk->sk_state == BT_CONNECT2) {
2234 struct l2cap_conn_rsp rsp;
2235 __u16 result;
2236
2237 if (!status) {
2238 sk->sk_state = BT_CONFIG;
2239 result = L2CAP_CR_SUCCESS;
2240 } else {
2241 sk->sk_state = BT_DISCONN;
2242 l2cap_sock_set_timer(sk, HZ / 10);
2243 result = L2CAP_CR_SEC_BLOCK;
2244 }
2245
2246 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2247 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2248 rsp.result = cpu_to_le16(result);
2249 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2250 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2251 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2252 }
2253
2254 bh_unlock_sock(sk);
2255 }
2256
2257 read_unlock(&l->lock);
2258
2259 return 0;
2260 }
2261
2262 static int l2cap_encrypt_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
2263 {
2264 struct l2cap_chan_list *l;
2265 struct l2cap_conn *conn = hcon->l2cap_data;
2266 struct sock *sk;
2267
2268 if (!conn)
2269 return 0;
2270
2271 l = &conn->chan_list;
2272
2273 BT_DBG("conn %p", conn);
2274
2275 read_lock(&l->lock);
2276
2277 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2278 struct l2cap_pinfo *pi = l2cap_pi(sk);
2279
2280 bh_lock_sock(sk);
2281
2282 if ((pi->link_mode & (L2CAP_LM_ENCRYPT | L2CAP_LM_SECURE)) &&
2283 (sk->sk_state == BT_CONNECTED ||
2284 sk->sk_state == BT_CONFIG) &&
2285 !status && encrypt == 0x00) {
2286 __l2cap_sock_close(sk, ECONNREFUSED);
2287 bh_unlock_sock(sk);
2288 continue;
2289 }
2290
2291 if (sk->sk_state == BT_CONNECT) {
2292 if (!status) {
2293 struct l2cap_conn_req req;
2294 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
2295 req.psm = l2cap_pi(sk)->psm;
2296
2297 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
2298
2299 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2300 L2CAP_CONN_REQ, sizeof(req), &req);
2301 } else {
2302 l2cap_sock_clear_timer(sk);
2303 l2cap_sock_set_timer(sk, HZ / 10);
2304 }
2305 } else if (sk->sk_state == BT_CONNECT2) {
2306 struct l2cap_conn_rsp rsp;
2307 __u16 result;
2308
2309 if (!status) {
2310 sk->sk_state = BT_CONFIG;
2311 result = L2CAP_CR_SUCCESS;
2312 } else {
2313 sk->sk_state = BT_DISCONN;
2314 l2cap_sock_set_timer(sk, HZ / 10);
2315 result = L2CAP_CR_SEC_BLOCK;
2316 }
2317
2318 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2319 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2320 rsp.result = cpu_to_le16(result);
2321 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2322 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
2323 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2324 }
2325
2326 bh_unlock_sock(sk);
2327 }
2328
2329 read_unlock(&l->lock);
2330
2331 return 0;
2332 }
2333
2334 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
2335 {
2336 struct l2cap_conn *conn = hcon->l2cap_data;
2337
2338 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
2339 goto drop;
2340
2341 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
2342
2343 if (flags & ACL_START) {
2344 struct l2cap_hdr *hdr;
2345 int len;
2346
2347 if (conn->rx_len) {
2348 BT_ERR("Unexpected start frame (len %d)", skb->len);
2349 kfree_skb(conn->rx_skb);
2350 conn->rx_skb = NULL;
2351 conn->rx_len = 0;
2352 l2cap_conn_unreliable(conn, ECOMM);
2353 }
2354
2355 if (skb->len < 2) {
2356 BT_ERR("Frame is too short (len %d)", skb->len);
2357 l2cap_conn_unreliable(conn, ECOMM);
2358 goto drop;
2359 }
2360
2361 hdr = (struct l2cap_hdr *) skb->data;
2362 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
2363
2364 if (len == skb->len) {
2365 /* Complete frame received */
2366 l2cap_recv_frame(conn, skb);
2367 return 0;
2368 }
2369
2370 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
2371
2372 if (skb->len > len) {
2373 BT_ERR("Frame is too long (len %d, expected len %d)",
2374 skb->len, len);
2375 l2cap_conn_unreliable(conn, ECOMM);
2376 goto drop;
2377 }
2378
2379 /* Allocate skb for the complete frame (with header) */
2380 if (!(conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC)))
2381 goto drop;
2382
2383 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2384 skb->len);
2385 conn->rx_len = len - skb->len;
2386 } else {
2387 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
2388
2389 if (!conn->rx_len) {
2390 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
2391 l2cap_conn_unreliable(conn, ECOMM);
2392 goto drop;
2393 }
2394
2395 if (skb->len > conn->rx_len) {
2396 BT_ERR("Fragment is too long (len %d, expected %d)",
2397 skb->len, conn->rx_len);
2398 kfree_skb(conn->rx_skb);
2399 conn->rx_skb = NULL;
2400 conn->rx_len = 0;
2401 l2cap_conn_unreliable(conn, ECOMM);
2402 goto drop;
2403 }
2404
2405 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
2406 skb->len);
2407 conn->rx_len -= skb->len;
2408
2409 if (!conn->rx_len) {
2410 /* Complete frame received */
2411 l2cap_recv_frame(conn, conn->rx_skb);
2412 conn->rx_skb = NULL;
2413 }
2414 }
2415
2416 drop:
2417 kfree_skb(skb);
2418 return 0;
2419 }
2420
2421 static ssize_t l2cap_sysfs_show(struct class *dev, char *buf)
2422 {
2423 struct sock *sk;
2424 struct hlist_node *node;
2425 char *str = buf;
2426
2427 read_lock_bh(&l2cap_sk_list.lock);
2428
2429 sk_for_each(sk, node, &l2cap_sk_list.head) {
2430 struct l2cap_pinfo *pi = l2cap_pi(sk);
2431
2432 str += sprintf(str, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
2433 batostr(&bt_sk(sk)->src), batostr(&bt_sk(sk)->dst),
2434 sk->sk_state, btohs(pi->psm), pi->scid, pi->dcid,
2435 pi->imtu, pi->omtu, pi->link_mode);
2436 }
2437
2438 read_unlock_bh(&l2cap_sk_list.lock);
2439
2440 return (str - buf);
2441 }
2442
2443 static CLASS_ATTR(l2cap, S_IRUGO, l2cap_sysfs_show, NULL);
2444
2445 static const struct proto_ops l2cap_sock_ops = {
2446 .family = PF_BLUETOOTH,
2447 .owner = THIS_MODULE,
2448 .release = l2cap_sock_release,
2449 .bind = l2cap_sock_bind,
2450 .connect = l2cap_sock_connect,
2451 .listen = l2cap_sock_listen,
2452 .accept = l2cap_sock_accept,
2453 .getname = l2cap_sock_getname,
2454 .sendmsg = l2cap_sock_sendmsg,
2455 .recvmsg = bt_sock_recvmsg,
2456 .poll = bt_sock_poll,
2457 .ioctl = bt_sock_ioctl,
2458 .mmap = sock_no_mmap,
2459 .socketpair = sock_no_socketpair,
2460 .shutdown = l2cap_sock_shutdown,
2461 .setsockopt = l2cap_sock_setsockopt,
2462 .getsockopt = l2cap_sock_getsockopt
2463 };
2464
2465 static struct net_proto_family l2cap_sock_family_ops = {
2466 .family = PF_BLUETOOTH,
2467 .owner = THIS_MODULE,
2468 .create = l2cap_sock_create,
2469 };
2470
2471 static struct hci_proto l2cap_hci_proto = {
2472 .name = "L2CAP",
2473 .id = HCI_PROTO_L2CAP,
2474 .connect_ind = l2cap_connect_ind,
2475 .connect_cfm = l2cap_connect_cfm,
2476 .disconn_ind = l2cap_disconn_ind,
2477 .auth_cfm = l2cap_auth_cfm,
2478 .encrypt_cfm = l2cap_encrypt_cfm,
2479 .recv_acldata = l2cap_recv_acldata
2480 };
2481
2482 static int __init l2cap_init(void)
2483 {
2484 int err;
2485
2486 err = proto_register(&l2cap_proto, 0);
2487 if (err < 0)
2488 return err;
2489
2490 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
2491 if (err < 0) {
2492 BT_ERR("L2CAP socket registration failed");
2493 goto error;
2494 }
2495
2496 err = hci_register_proto(&l2cap_hci_proto);
2497 if (err < 0) {
2498 BT_ERR("L2CAP protocol registration failed");
2499 bt_sock_unregister(BTPROTO_L2CAP);
2500 goto error;
2501 }
2502
2503 if (class_create_file(bt_class, &class_attr_l2cap) < 0)
2504 BT_ERR("Failed to create L2CAP info file");
2505
2506 BT_INFO("L2CAP ver %s", VERSION);
2507 BT_INFO("L2CAP socket layer initialized");
2508
2509 return 0;
2510
2511 error:
2512 proto_unregister(&l2cap_proto);
2513 return err;
2514 }
2515
2516 static void __exit l2cap_exit(void)
2517 {
2518 class_remove_file(bt_class, &class_attr_l2cap);
2519
2520 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
2521 BT_ERR("L2CAP socket unregistration failed");
2522
2523 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
2524 BT_ERR("L2CAP protocol unregistration failed");
2525
2526 proto_unregister(&l2cap_proto);
2527 }
2528
2529 void l2cap_load(void)
2530 {
2531 /* Dummy function to trigger automatic L2CAP module loading by
2532 * other modules that use L2CAP sockets but don't use any other
2533 * symbols from it. */
2534 return;
2535 }
2536 EXPORT_SYMBOL(l2cap_load);
2537
2538 module_init(l2cap_init);
2539 module_exit(l2cap_exit);
2540
2541 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
2542 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
2543 MODULE_VERSION(VERSION);
2544 MODULE_LICENSE("GPL");
2545 MODULE_ALIAS("bt-proto-0");
This page took 0.082218 seconds and 5 git commands to generate.