ARM: imx: move mx1 support to mach-imx
[deliverable/linux.git] / net / bluetooth / l2cap.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth L2CAP core and sockets. */
26
27 #include <linux/module.h>
28
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
55
56 #define VERSION "2.14"
57
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
60 #else
61 static int enable_ertm = 0;
62 #endif
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
65
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
68
69 static const struct proto_ops l2cap_sock_ops;
70
71 static struct workqueue_struct *_busy_wq;
72
73 static struct bt_sock_list l2cap_sk_list = {
74 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
75 };
76
77 static void l2cap_busy_work(struct work_struct *work);
78
79 static void __l2cap_sock_close(struct sock *sk, int reason);
80 static void l2cap_sock_close(struct sock *sk);
81 static void l2cap_sock_kill(struct sock *sk);
82
83 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
84 u8 code, u8 ident, u16 dlen, void *data);
85
86 /* ---- L2CAP timers ---- */
87 static void l2cap_sock_timeout(unsigned long arg)
88 {
89 struct sock *sk = (struct sock *) arg;
90 int reason;
91
92 BT_DBG("sock %p state %d", sk, sk->sk_state);
93
94 bh_lock_sock(sk);
95
96 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
97 reason = ECONNREFUSED;
98 else if (sk->sk_state == BT_CONNECT &&
99 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
100 reason = ECONNREFUSED;
101 else
102 reason = ETIMEDOUT;
103
104 __l2cap_sock_close(sk, reason);
105
106 bh_unlock_sock(sk);
107
108 l2cap_sock_kill(sk);
109 sock_put(sk);
110 }
111
112 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
113 {
114 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
115 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
116 }
117
118 static void l2cap_sock_clear_timer(struct sock *sk)
119 {
120 BT_DBG("sock %p state %d", sk, sk->sk_state);
121 sk_stop_timer(sk, &sk->sk_timer);
122 }
123
124 /* ---- L2CAP channels ---- */
125 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
126 {
127 struct sock *s;
128 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
129 if (l2cap_pi(s)->dcid == cid)
130 break;
131 }
132 return s;
133 }
134
135 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
136 {
137 struct sock *s;
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->scid == cid)
140 break;
141 }
142 return s;
143 }
144
145 /* Find channel with given SCID.
146 * Returns locked socket */
147 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
148 {
149 struct sock *s;
150 read_lock(&l->lock);
151 s = __l2cap_get_chan_by_scid(l, cid);
152 if (s)
153 bh_lock_sock(s);
154 read_unlock(&l->lock);
155 return s;
156 }
157
158 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
159 {
160 struct sock *s;
161 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
162 if (l2cap_pi(s)->ident == ident)
163 break;
164 }
165 return s;
166 }
167
168 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
169 {
170 struct sock *s;
171 read_lock(&l->lock);
172 s = __l2cap_get_chan_by_ident(l, ident);
173 if (s)
174 bh_lock_sock(s);
175 read_unlock(&l->lock);
176 return s;
177 }
178
179 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
180 {
181 u16 cid = L2CAP_CID_DYN_START;
182
183 for (; cid < L2CAP_CID_DYN_END; cid++) {
184 if (!__l2cap_get_chan_by_scid(l, cid))
185 return cid;
186 }
187
188 return 0;
189 }
190
191 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 {
193 sock_hold(sk);
194
195 if (l->head)
196 l2cap_pi(l->head)->prev_c = sk;
197
198 l2cap_pi(sk)->next_c = l->head;
199 l2cap_pi(sk)->prev_c = NULL;
200 l->head = sk;
201 }
202
203 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
204 {
205 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
206
207 write_lock_bh(&l->lock);
208 if (sk == l->head)
209 l->head = next;
210
211 if (next)
212 l2cap_pi(next)->prev_c = prev;
213 if (prev)
214 l2cap_pi(prev)->next_c = next;
215 write_unlock_bh(&l->lock);
216
217 __sock_put(sk);
218 }
219
220 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
221 {
222 struct l2cap_chan_list *l = &conn->chan_list;
223
224 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
225 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
226
227 conn->disc_reason = 0x13;
228
229 l2cap_pi(sk)->conn = conn;
230
231 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
232 /* Alloc CID for connection-oriented socket */
233 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
234 } else if (sk->sk_type == SOCK_DGRAM) {
235 /* Connectionless socket */
236 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
238 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
239 } else {
240 /* Raw socket can send/recv signalling messages only */
241 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
243 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
244 }
245
246 __l2cap_chan_link(l, sk);
247
248 if (parent)
249 bt_accept_enqueue(parent, sk);
250 }
251
252 /* Delete channel.
253 * Must be called on the locked socket. */
254 static void l2cap_chan_del(struct sock *sk, int err)
255 {
256 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
257 struct sock *parent = bt_sk(sk)->parent;
258
259 l2cap_sock_clear_timer(sk);
260
261 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
262
263 if (conn) {
264 /* Unlink from channel list */
265 l2cap_chan_unlink(&conn->chan_list, sk);
266 l2cap_pi(sk)->conn = NULL;
267 hci_conn_put(conn->hcon);
268 }
269
270 sk->sk_state = BT_CLOSED;
271 sock_set_flag(sk, SOCK_ZAPPED);
272
273 if (err)
274 sk->sk_err = err;
275
276 if (parent) {
277 bt_accept_unlink(sk);
278 parent->sk_data_ready(parent, 0);
279 } else
280 sk->sk_state_change(sk);
281 }
282
283 /* Service level security */
284 static inline int l2cap_check_security(struct sock *sk)
285 {
286 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
287 __u8 auth_type;
288
289 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
290 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
291 auth_type = HCI_AT_NO_BONDING_MITM;
292 else
293 auth_type = HCI_AT_NO_BONDING;
294
295 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
296 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
297 } else {
298 switch (l2cap_pi(sk)->sec_level) {
299 case BT_SECURITY_HIGH:
300 auth_type = HCI_AT_GENERAL_BONDING_MITM;
301 break;
302 case BT_SECURITY_MEDIUM:
303 auth_type = HCI_AT_GENERAL_BONDING;
304 break;
305 default:
306 auth_type = HCI_AT_NO_BONDING;
307 break;
308 }
309 }
310
311 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
312 auth_type);
313 }
314
315 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
316 {
317 u8 id;
318
319 /* Get next available identificator.
320 * 1 - 128 are used by kernel.
321 * 129 - 199 are reserved.
322 * 200 - 254 are used by utilities like l2ping, etc.
323 */
324
325 spin_lock_bh(&conn->lock);
326
327 if (++conn->tx_ident > 128)
328 conn->tx_ident = 1;
329
330 id = conn->tx_ident;
331
332 spin_unlock_bh(&conn->lock);
333
334 return id;
335 }
336
337 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
338 {
339 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
340
341 BT_DBG("code 0x%2.2x", code);
342
343 if (!skb)
344 return;
345
346 hci_send_acl(conn->hcon, skb, 0);
347 }
348
349 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
350 {
351 struct sk_buff *skb;
352 struct l2cap_hdr *lh;
353 struct l2cap_conn *conn = pi->conn;
354 int count, hlen = L2CAP_HDR_SIZE + 2;
355
356 if (pi->fcs == L2CAP_FCS_CRC16)
357 hlen += 2;
358
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
360
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
363
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
367 }
368
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
372 }
373
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
375 if (!skb)
376 return;
377
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
382
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
386 }
387
388 hci_send_acl(pi->conn->hcon, skb, 0);
389 }
390
391 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
392 {
393 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
394 control |= L2CAP_SUPER_RCV_NOT_READY;
395 pi->conn_state |= L2CAP_CONN_RNR_SENT;
396 } else
397 control |= L2CAP_SUPER_RCV_READY;
398
399 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
400
401 l2cap_send_sframe(pi, control);
402 }
403
404 static void l2cap_do_start(struct sock *sk)
405 {
406 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
407
408 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
409 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
410 return;
411
412 if (l2cap_check_security(sk)) {
413 struct l2cap_conn_req req;
414 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
415 req.psm = l2cap_pi(sk)->psm;
416
417 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
418
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
421 }
422 } else {
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
425
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
428
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
431
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
434 }
435 }
436
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
438 {
439 struct l2cap_disconn_req req;
440
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
445 }
446
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
449 {
450 struct l2cap_chan_list *l = &conn->chan_list;
451 struct sock *sk;
452
453 BT_DBG("conn %p", conn);
454
455 read_lock(&l->lock);
456
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
458 bh_lock_sock(sk);
459
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
462 bh_unlock_sock(sk);
463 continue;
464 }
465
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk)) {
468 struct l2cap_conn_req req;
469 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
470 req.psm = l2cap_pi(sk)->psm;
471
472 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
473
474 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
475 L2CAP_CONN_REQ, sizeof(req), &req);
476 }
477 } else if (sk->sk_state == BT_CONNECT2) {
478 struct l2cap_conn_rsp rsp;
479 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
481
482 if (l2cap_check_security(sk)) {
483 if (bt_sk(sk)->defer_setup) {
484 struct sock *parent = bt_sk(sk)->parent;
485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
487 parent->sk_data_ready(parent, 0);
488
489 } else {
490 sk->sk_state = BT_CONFIG;
491 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
492 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
493 }
494 } else {
495 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
496 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
497 }
498
499 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
500 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
501 }
502
503 bh_unlock_sock(sk);
504 }
505
506 read_unlock(&l->lock);
507 }
508
509 static void l2cap_conn_ready(struct l2cap_conn *conn)
510 {
511 struct l2cap_chan_list *l = &conn->chan_list;
512 struct sock *sk;
513
514 BT_DBG("conn %p", conn);
515
516 read_lock(&l->lock);
517
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
519 bh_lock_sock(sk);
520
521 if (sk->sk_type != SOCK_SEQPACKET &&
522 sk->sk_type != SOCK_STREAM) {
523 l2cap_sock_clear_timer(sk);
524 sk->sk_state = BT_CONNECTED;
525 sk->sk_state_change(sk);
526 } else if (sk->sk_state == BT_CONNECT)
527 l2cap_do_start(sk);
528
529 bh_unlock_sock(sk);
530 }
531
532 read_unlock(&l->lock);
533 }
534
535 /* Notify sockets that we cannot guaranty reliability anymore */
536 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
537 {
538 struct l2cap_chan_list *l = &conn->chan_list;
539 struct sock *sk;
540
541 BT_DBG("conn %p", conn);
542
543 read_lock(&l->lock);
544
545 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
546 if (l2cap_pi(sk)->force_reliable)
547 sk->sk_err = err;
548 }
549
550 read_unlock(&l->lock);
551 }
552
553 static void l2cap_info_timeout(unsigned long arg)
554 {
555 struct l2cap_conn *conn = (void *) arg;
556
557 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
558 conn->info_ident = 0;
559
560 l2cap_conn_start(conn);
561 }
562
563 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
564 {
565 struct l2cap_conn *conn = hcon->l2cap_data;
566
567 if (conn || status)
568 return conn;
569
570 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
571 if (!conn)
572 return NULL;
573
574 hcon->l2cap_data = conn;
575 conn->hcon = hcon;
576
577 BT_DBG("hcon %p conn %p", hcon, conn);
578
579 conn->mtu = hcon->hdev->acl_mtu;
580 conn->src = &hcon->hdev->bdaddr;
581 conn->dst = &hcon->dst;
582
583 conn->feat_mask = 0;
584
585 spin_lock_init(&conn->lock);
586 rwlock_init(&conn->chan_list.lock);
587
588 setup_timer(&conn->info_timer, l2cap_info_timeout,
589 (unsigned long) conn);
590
591 conn->disc_reason = 0x13;
592
593 return conn;
594 }
595
596 static void l2cap_conn_del(struct hci_conn *hcon, int err)
597 {
598 struct l2cap_conn *conn = hcon->l2cap_data;
599 struct sock *sk;
600
601 if (!conn)
602 return;
603
604 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
605
606 kfree_skb(conn->rx_skb);
607
608 /* Kill channels */
609 while ((sk = conn->chan_list.head)) {
610 bh_lock_sock(sk);
611 l2cap_chan_del(sk, err);
612 bh_unlock_sock(sk);
613 l2cap_sock_kill(sk);
614 }
615
616 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
617 del_timer_sync(&conn->info_timer);
618
619 hcon->l2cap_data = NULL;
620 kfree(conn);
621 }
622
623 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
624 {
625 struct l2cap_chan_list *l = &conn->chan_list;
626 write_lock_bh(&l->lock);
627 __l2cap_chan_add(conn, sk, parent);
628 write_unlock_bh(&l->lock);
629 }
630
631 /* ---- Socket interface ---- */
632 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
633 {
634 struct sock *sk;
635 struct hlist_node *node;
636 sk_for_each(sk, node, &l2cap_sk_list.head)
637 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
638 goto found;
639 sk = NULL;
640 found:
641 return sk;
642 }
643
644 /* Find socket with psm and source bdaddr.
645 * Returns closest match.
646 */
647 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
648 {
649 struct sock *sk = NULL, *sk1 = NULL;
650 struct hlist_node *node;
651
652 sk_for_each(sk, node, &l2cap_sk_list.head) {
653 if (state && sk->sk_state != state)
654 continue;
655
656 if (l2cap_pi(sk)->psm == psm) {
657 /* Exact match. */
658 if (!bacmp(&bt_sk(sk)->src, src))
659 break;
660
661 /* Closest match */
662 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
663 sk1 = sk;
664 }
665 }
666 return node ? sk : sk1;
667 }
668
669 /* Find socket with given address (psm, src).
670 * Returns locked socket */
671 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
672 {
673 struct sock *s;
674 read_lock(&l2cap_sk_list.lock);
675 s = __l2cap_get_sock_by_psm(state, psm, src);
676 if (s)
677 bh_lock_sock(s);
678 read_unlock(&l2cap_sk_list.lock);
679 return s;
680 }
681
682 static void l2cap_sock_destruct(struct sock *sk)
683 {
684 BT_DBG("sk %p", sk);
685
686 skb_queue_purge(&sk->sk_receive_queue);
687 skb_queue_purge(&sk->sk_write_queue);
688 }
689
690 static void l2cap_sock_cleanup_listen(struct sock *parent)
691 {
692 struct sock *sk;
693
694 BT_DBG("parent %p", parent);
695
696 /* Close not yet accepted channels */
697 while ((sk = bt_accept_dequeue(parent, NULL)))
698 l2cap_sock_close(sk);
699
700 parent->sk_state = BT_CLOSED;
701 sock_set_flag(parent, SOCK_ZAPPED);
702 }
703
704 /* Kill socket (only if zapped and orphan)
705 * Must be called on unlocked socket.
706 */
707 static void l2cap_sock_kill(struct sock *sk)
708 {
709 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
710 return;
711
712 BT_DBG("sk %p state %d", sk, sk->sk_state);
713
714 /* Kill poor orphan */
715 bt_sock_unlink(&l2cap_sk_list, sk);
716 sock_set_flag(sk, SOCK_DEAD);
717 sock_put(sk);
718 }
719
720 static void __l2cap_sock_close(struct sock *sk, int reason)
721 {
722 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
723
724 switch (sk->sk_state) {
725 case BT_LISTEN:
726 l2cap_sock_cleanup_listen(sk);
727 break;
728
729 case BT_CONNECTED:
730 case BT_CONFIG:
731 if (sk->sk_type == SOCK_SEQPACKET ||
732 sk->sk_type == SOCK_STREAM) {
733 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
734
735 sk->sk_state = BT_DISCONN;
736 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
737 l2cap_send_disconn_req(conn, sk);
738 } else
739 l2cap_chan_del(sk, reason);
740 break;
741
742 case BT_CONNECT2:
743 if (sk->sk_type == SOCK_SEQPACKET ||
744 sk->sk_type == SOCK_STREAM) {
745 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
746 struct l2cap_conn_rsp rsp;
747 __u16 result;
748
749 if (bt_sk(sk)->defer_setup)
750 result = L2CAP_CR_SEC_BLOCK;
751 else
752 result = L2CAP_CR_BAD_PSM;
753
754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
755 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
756 rsp.result = cpu_to_le16(result);
757 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
758 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
759 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
760 } else
761 l2cap_chan_del(sk, reason);
762 break;
763
764 case BT_CONNECT:
765 case BT_DISCONN:
766 l2cap_chan_del(sk, reason);
767 break;
768
769 default:
770 sock_set_flag(sk, SOCK_ZAPPED);
771 break;
772 }
773 }
774
775 /* Must be called on unlocked socket. */
776 static void l2cap_sock_close(struct sock *sk)
777 {
778 l2cap_sock_clear_timer(sk);
779 lock_sock(sk);
780 __l2cap_sock_close(sk, ECONNRESET);
781 release_sock(sk);
782 l2cap_sock_kill(sk);
783 }
784
785 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
786 {
787 struct l2cap_pinfo *pi = l2cap_pi(sk);
788
789 BT_DBG("sk %p", sk);
790
791 if (parent) {
792 sk->sk_type = parent->sk_type;
793 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
794
795 pi->imtu = l2cap_pi(parent)->imtu;
796 pi->omtu = l2cap_pi(parent)->omtu;
797 pi->mode = l2cap_pi(parent)->mode;
798 pi->fcs = l2cap_pi(parent)->fcs;
799 pi->max_tx = l2cap_pi(parent)->max_tx;
800 pi->tx_win = l2cap_pi(parent)->tx_win;
801 pi->sec_level = l2cap_pi(parent)->sec_level;
802 pi->role_switch = l2cap_pi(parent)->role_switch;
803 pi->force_reliable = l2cap_pi(parent)->force_reliable;
804 } else {
805 pi->imtu = L2CAP_DEFAULT_MTU;
806 pi->omtu = 0;
807 if (enable_ertm && sk->sk_type == SOCK_STREAM)
808 pi->mode = L2CAP_MODE_ERTM;
809 else
810 pi->mode = L2CAP_MODE_BASIC;
811 pi->max_tx = max_transmit;
812 pi->fcs = L2CAP_FCS_CRC16;
813 pi->tx_win = tx_window;
814 pi->sec_level = BT_SECURITY_LOW;
815 pi->role_switch = 0;
816 pi->force_reliable = 0;
817 }
818
819 /* Default config options */
820 pi->conf_len = 0;
821 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
822 skb_queue_head_init(TX_QUEUE(sk));
823 skb_queue_head_init(SREJ_QUEUE(sk));
824 skb_queue_head_init(BUSY_QUEUE(sk));
825 INIT_LIST_HEAD(SREJ_LIST(sk));
826 }
827
828 static struct proto l2cap_proto = {
829 .name = "L2CAP",
830 .owner = THIS_MODULE,
831 .obj_size = sizeof(struct l2cap_pinfo)
832 };
833
834 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
835 {
836 struct sock *sk;
837
838 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
839 if (!sk)
840 return NULL;
841
842 sock_init_data(sock, sk);
843 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
844
845 sk->sk_destruct = l2cap_sock_destruct;
846 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
847
848 sock_reset_flag(sk, SOCK_ZAPPED);
849
850 sk->sk_protocol = proto;
851 sk->sk_state = BT_OPEN;
852
853 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
854
855 bt_sock_link(&l2cap_sk_list, sk);
856 return sk;
857 }
858
859 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
860 int kern)
861 {
862 struct sock *sk;
863
864 BT_DBG("sock %p", sock);
865
866 sock->state = SS_UNCONNECTED;
867
868 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
869 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
870 return -ESOCKTNOSUPPORT;
871
872 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
873 return -EPERM;
874
875 sock->ops = &l2cap_sock_ops;
876
877 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
878 if (!sk)
879 return -ENOMEM;
880
881 l2cap_sock_init(sk, NULL);
882 return 0;
883 }
884
885 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
886 {
887 struct sock *sk = sock->sk;
888 struct sockaddr_l2 la;
889 int len, err = 0;
890
891 BT_DBG("sk %p", sk);
892
893 if (!addr || addr->sa_family != AF_BLUETOOTH)
894 return -EINVAL;
895
896 memset(&la, 0, sizeof(la));
897 len = min_t(unsigned int, sizeof(la), alen);
898 memcpy(&la, addr, len);
899
900 if (la.l2_cid)
901 return -EINVAL;
902
903 lock_sock(sk);
904
905 if (sk->sk_state != BT_OPEN) {
906 err = -EBADFD;
907 goto done;
908 }
909
910 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
911 !capable(CAP_NET_BIND_SERVICE)) {
912 err = -EACCES;
913 goto done;
914 }
915
916 write_lock_bh(&l2cap_sk_list.lock);
917
918 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
919 err = -EADDRINUSE;
920 } else {
921 /* Save source address */
922 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
923 l2cap_pi(sk)->psm = la.l2_psm;
924 l2cap_pi(sk)->sport = la.l2_psm;
925 sk->sk_state = BT_BOUND;
926
927 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
928 __le16_to_cpu(la.l2_psm) == 0x0003)
929 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
930 }
931
932 write_unlock_bh(&l2cap_sk_list.lock);
933
934 done:
935 release_sock(sk);
936 return err;
937 }
938
939 static int l2cap_do_connect(struct sock *sk)
940 {
941 bdaddr_t *src = &bt_sk(sk)->src;
942 bdaddr_t *dst = &bt_sk(sk)->dst;
943 struct l2cap_conn *conn;
944 struct hci_conn *hcon;
945 struct hci_dev *hdev;
946 __u8 auth_type;
947 int err;
948
949 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
950 l2cap_pi(sk)->psm);
951
952 hdev = hci_get_route(dst, src);
953 if (!hdev)
954 return -EHOSTUNREACH;
955
956 hci_dev_lock_bh(hdev);
957
958 err = -ENOMEM;
959
960 if (sk->sk_type == SOCK_RAW) {
961 switch (l2cap_pi(sk)->sec_level) {
962 case BT_SECURITY_HIGH:
963 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
964 break;
965 case BT_SECURITY_MEDIUM:
966 auth_type = HCI_AT_DEDICATED_BONDING;
967 break;
968 default:
969 auth_type = HCI_AT_NO_BONDING;
970 break;
971 }
972 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
973 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
974 auth_type = HCI_AT_NO_BONDING_MITM;
975 else
976 auth_type = HCI_AT_NO_BONDING;
977
978 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
979 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
980 } else {
981 switch (l2cap_pi(sk)->sec_level) {
982 case BT_SECURITY_HIGH:
983 auth_type = HCI_AT_GENERAL_BONDING_MITM;
984 break;
985 case BT_SECURITY_MEDIUM:
986 auth_type = HCI_AT_GENERAL_BONDING;
987 break;
988 default:
989 auth_type = HCI_AT_NO_BONDING;
990 break;
991 }
992 }
993
994 hcon = hci_connect(hdev, ACL_LINK, dst,
995 l2cap_pi(sk)->sec_level, auth_type);
996 if (!hcon)
997 goto done;
998
999 conn = l2cap_conn_add(hcon, 0);
1000 if (!conn) {
1001 hci_conn_put(hcon);
1002 goto done;
1003 }
1004
1005 err = 0;
1006
1007 /* Update source addr of the socket */
1008 bacpy(src, conn->src);
1009
1010 l2cap_chan_add(conn, sk, NULL);
1011
1012 sk->sk_state = BT_CONNECT;
1013 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1014
1015 if (hcon->state == BT_CONNECTED) {
1016 if (sk->sk_type != SOCK_SEQPACKET &&
1017 sk->sk_type != SOCK_STREAM) {
1018 l2cap_sock_clear_timer(sk);
1019 sk->sk_state = BT_CONNECTED;
1020 } else
1021 l2cap_do_start(sk);
1022 }
1023
1024 done:
1025 hci_dev_unlock_bh(hdev);
1026 hci_dev_put(hdev);
1027 return err;
1028 }
1029
1030 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1031 {
1032 struct sock *sk = sock->sk;
1033 struct sockaddr_l2 la;
1034 int len, err = 0;
1035
1036 BT_DBG("sk %p", sk);
1037
1038 if (!addr || alen < sizeof(addr->sa_family) ||
1039 addr->sa_family != AF_BLUETOOTH)
1040 return -EINVAL;
1041
1042 memset(&la, 0, sizeof(la));
1043 len = min_t(unsigned int, sizeof(la), alen);
1044 memcpy(&la, addr, len);
1045
1046 if (la.l2_cid)
1047 return -EINVAL;
1048
1049 lock_sock(sk);
1050
1051 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1052 && !la.l2_psm) {
1053 err = -EINVAL;
1054 goto done;
1055 }
1056
1057 switch (l2cap_pi(sk)->mode) {
1058 case L2CAP_MODE_BASIC:
1059 break;
1060 case L2CAP_MODE_ERTM:
1061 case L2CAP_MODE_STREAMING:
1062 if (enable_ertm)
1063 break;
1064 /* fall through */
1065 default:
1066 err = -ENOTSUPP;
1067 goto done;
1068 }
1069
1070 switch (sk->sk_state) {
1071 case BT_CONNECT:
1072 case BT_CONNECT2:
1073 case BT_CONFIG:
1074 /* Already connecting */
1075 goto wait;
1076
1077 case BT_CONNECTED:
1078 /* Already connected */
1079 goto done;
1080
1081 case BT_OPEN:
1082 case BT_BOUND:
1083 /* Can connect */
1084 break;
1085
1086 default:
1087 err = -EBADFD;
1088 goto done;
1089 }
1090
1091 /* Set destination address and psm */
1092 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1093 l2cap_pi(sk)->psm = la.l2_psm;
1094
1095 err = l2cap_do_connect(sk);
1096 if (err)
1097 goto done;
1098
1099 wait:
1100 err = bt_sock_wait_state(sk, BT_CONNECTED,
1101 sock_sndtimeo(sk, flags & O_NONBLOCK));
1102 done:
1103 release_sock(sk);
1104 return err;
1105 }
1106
1107 static int l2cap_sock_listen(struct socket *sock, int backlog)
1108 {
1109 struct sock *sk = sock->sk;
1110 int err = 0;
1111
1112 BT_DBG("sk %p backlog %d", sk, backlog);
1113
1114 lock_sock(sk);
1115
1116 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1117 || sk->sk_state != BT_BOUND) {
1118 err = -EBADFD;
1119 goto done;
1120 }
1121
1122 switch (l2cap_pi(sk)->mode) {
1123 case L2CAP_MODE_BASIC:
1124 break;
1125 case L2CAP_MODE_ERTM:
1126 case L2CAP_MODE_STREAMING:
1127 if (enable_ertm)
1128 break;
1129 /* fall through */
1130 default:
1131 err = -ENOTSUPP;
1132 goto done;
1133 }
1134
1135 if (!l2cap_pi(sk)->psm) {
1136 bdaddr_t *src = &bt_sk(sk)->src;
1137 u16 psm;
1138
1139 err = -EINVAL;
1140
1141 write_lock_bh(&l2cap_sk_list.lock);
1142
1143 for (psm = 0x1001; psm < 0x1100; psm += 2)
1144 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1145 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1146 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1147 err = 0;
1148 break;
1149 }
1150
1151 write_unlock_bh(&l2cap_sk_list.lock);
1152
1153 if (err < 0)
1154 goto done;
1155 }
1156
1157 sk->sk_max_ack_backlog = backlog;
1158 sk->sk_ack_backlog = 0;
1159 sk->sk_state = BT_LISTEN;
1160
1161 done:
1162 release_sock(sk);
1163 return err;
1164 }
1165
1166 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1167 {
1168 DECLARE_WAITQUEUE(wait, current);
1169 struct sock *sk = sock->sk, *nsk;
1170 long timeo;
1171 int err = 0;
1172
1173 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1174
1175 if (sk->sk_state != BT_LISTEN) {
1176 err = -EBADFD;
1177 goto done;
1178 }
1179
1180 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1181
1182 BT_DBG("sk %p timeo %ld", sk, timeo);
1183
1184 /* Wait for an incoming connection. (wake-one). */
1185 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1186 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1187 set_current_state(TASK_INTERRUPTIBLE);
1188 if (!timeo) {
1189 err = -EAGAIN;
1190 break;
1191 }
1192
1193 release_sock(sk);
1194 timeo = schedule_timeout(timeo);
1195 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1196
1197 if (sk->sk_state != BT_LISTEN) {
1198 err = -EBADFD;
1199 break;
1200 }
1201
1202 if (signal_pending(current)) {
1203 err = sock_intr_errno(timeo);
1204 break;
1205 }
1206 }
1207 set_current_state(TASK_RUNNING);
1208 remove_wait_queue(sk_sleep(sk), &wait);
1209
1210 if (err)
1211 goto done;
1212
1213 newsock->state = SS_CONNECTED;
1214
1215 BT_DBG("new socket %p", nsk);
1216
1217 done:
1218 release_sock(sk);
1219 return err;
1220 }
1221
1222 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1223 {
1224 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1225 struct sock *sk = sock->sk;
1226
1227 BT_DBG("sock %p, sk %p", sock, sk);
1228
1229 addr->sa_family = AF_BLUETOOTH;
1230 *len = sizeof(struct sockaddr_l2);
1231
1232 if (peer) {
1233 la->l2_psm = l2cap_pi(sk)->psm;
1234 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1235 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1236 } else {
1237 la->l2_psm = l2cap_pi(sk)->sport;
1238 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1239 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1240 }
1241
1242 return 0;
1243 }
1244
1245 static int __l2cap_wait_ack(struct sock *sk)
1246 {
1247 DECLARE_WAITQUEUE(wait, current);
1248 int err = 0;
1249 int timeo = HZ/5;
1250
1251 add_wait_queue(sk_sleep(sk), &wait);
1252 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1253 set_current_state(TASK_INTERRUPTIBLE);
1254
1255 if (!timeo)
1256 timeo = HZ/5;
1257
1258 if (signal_pending(current)) {
1259 err = sock_intr_errno(timeo);
1260 break;
1261 }
1262
1263 release_sock(sk);
1264 timeo = schedule_timeout(timeo);
1265 lock_sock(sk);
1266
1267 err = sock_error(sk);
1268 if (err)
1269 break;
1270 }
1271 set_current_state(TASK_RUNNING);
1272 remove_wait_queue(sk_sleep(sk), &wait);
1273 return err;
1274 }
1275
1276 static void l2cap_monitor_timeout(unsigned long arg)
1277 {
1278 struct sock *sk = (void *) arg;
1279
1280 bh_lock_sock(sk);
1281 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1282 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1283 bh_unlock_sock(sk);
1284 return;
1285 }
1286
1287 l2cap_pi(sk)->retry_count++;
1288 __mod_monitor_timer();
1289
1290 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1291 bh_unlock_sock(sk);
1292 }
1293
1294 static void l2cap_retrans_timeout(unsigned long arg)
1295 {
1296 struct sock *sk = (void *) arg;
1297
1298 bh_lock_sock(sk);
1299 l2cap_pi(sk)->retry_count = 1;
1300 __mod_monitor_timer();
1301
1302 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1303
1304 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1305 bh_unlock_sock(sk);
1306 }
1307
1308 static void l2cap_drop_acked_frames(struct sock *sk)
1309 {
1310 struct sk_buff *skb;
1311
1312 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1313 l2cap_pi(sk)->unacked_frames) {
1314 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1315 break;
1316
1317 skb = skb_dequeue(TX_QUEUE(sk));
1318 kfree_skb(skb);
1319
1320 l2cap_pi(sk)->unacked_frames--;
1321 }
1322
1323 if (!l2cap_pi(sk)->unacked_frames)
1324 del_timer(&l2cap_pi(sk)->retrans_timer);
1325 }
1326
1327 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1328 {
1329 struct l2cap_pinfo *pi = l2cap_pi(sk);
1330
1331 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1332
1333 hci_send_acl(pi->conn->hcon, skb, 0);
1334 }
1335
1336 static int l2cap_streaming_send(struct sock *sk)
1337 {
1338 struct sk_buff *skb, *tx_skb;
1339 struct l2cap_pinfo *pi = l2cap_pi(sk);
1340 u16 control, fcs;
1341
1342 while ((skb = sk->sk_send_head)) {
1343 tx_skb = skb_clone(skb, GFP_ATOMIC);
1344
1345 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1346 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1347 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1348
1349 if (pi->fcs == L2CAP_FCS_CRC16) {
1350 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1351 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1352 }
1353
1354 l2cap_do_send(sk, tx_skb);
1355
1356 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1357
1358 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1359 sk->sk_send_head = NULL;
1360 else
1361 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1362
1363 skb = skb_dequeue(TX_QUEUE(sk));
1364 kfree_skb(skb);
1365 }
1366 return 0;
1367 }
1368
1369 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1370 {
1371 struct l2cap_pinfo *pi = l2cap_pi(sk);
1372 struct sk_buff *skb, *tx_skb;
1373 u16 control, fcs;
1374
1375 skb = skb_peek(TX_QUEUE(sk));
1376 if (!skb)
1377 return;
1378
1379 do {
1380 if (bt_cb(skb)->tx_seq == tx_seq)
1381 break;
1382
1383 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1384 return;
1385
1386 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1387
1388 if (pi->remote_max_tx &&
1389 bt_cb(skb)->retries == pi->remote_max_tx) {
1390 l2cap_send_disconn_req(pi->conn, sk);
1391 return;
1392 }
1393
1394 tx_skb = skb_clone(skb, GFP_ATOMIC);
1395 bt_cb(skb)->retries++;
1396 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1397 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1398 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1399 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1400
1401 if (pi->fcs == L2CAP_FCS_CRC16) {
1402 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1403 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1404 }
1405
1406 l2cap_do_send(sk, tx_skb);
1407 }
1408
1409 static int l2cap_ertm_send(struct sock *sk)
1410 {
1411 struct sk_buff *skb, *tx_skb;
1412 struct l2cap_pinfo *pi = l2cap_pi(sk);
1413 u16 control, fcs;
1414 int nsent = 0;
1415
1416 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1417 return 0;
1418
1419 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1420 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1421
1422 if (pi->remote_max_tx &&
1423 bt_cb(skb)->retries == pi->remote_max_tx) {
1424 l2cap_send_disconn_req(pi->conn, sk);
1425 break;
1426 }
1427
1428 tx_skb = skb_clone(skb, GFP_ATOMIC);
1429
1430 bt_cb(skb)->retries++;
1431
1432 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1433 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1434 control |= L2CAP_CTRL_FINAL;
1435 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1436 }
1437 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1438 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1439 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1440
1441
1442 if (pi->fcs == L2CAP_FCS_CRC16) {
1443 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1444 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1445 }
1446
1447 l2cap_do_send(sk, tx_skb);
1448
1449 __mod_retrans_timer();
1450
1451 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1452 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1453
1454 pi->unacked_frames++;
1455 pi->frames_sent++;
1456
1457 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1458 sk->sk_send_head = NULL;
1459 else
1460 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1461
1462 nsent++;
1463 }
1464
1465 return nsent;
1466 }
1467
1468 static int l2cap_retransmit_frames(struct sock *sk)
1469 {
1470 struct l2cap_pinfo *pi = l2cap_pi(sk);
1471 int ret;
1472
1473 spin_lock_bh(&pi->send_lock);
1474
1475 if (!skb_queue_empty(TX_QUEUE(sk)))
1476 sk->sk_send_head = TX_QUEUE(sk)->next;
1477
1478 pi->next_tx_seq = pi->expected_ack_seq;
1479 ret = l2cap_ertm_send(sk);
1480
1481 spin_unlock_bh(&pi->send_lock);
1482
1483 return ret;
1484 }
1485
1486 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1487 {
1488 struct sock *sk = (struct sock *)pi;
1489 u16 control = 0;
1490 int nframes;
1491
1492 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1493
1494 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1495 control |= L2CAP_SUPER_RCV_NOT_READY;
1496 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1497 l2cap_send_sframe(pi, control);
1498 return;
1499 }
1500
1501 spin_lock_bh(&pi->send_lock);
1502 nframes = l2cap_ertm_send(sk);
1503 spin_unlock_bh(&pi->send_lock);
1504
1505 if (nframes > 0)
1506 return;
1507
1508 control |= L2CAP_SUPER_RCV_READY;
1509 l2cap_send_sframe(pi, control);
1510 }
1511
1512 static void l2cap_send_srejtail(struct sock *sk)
1513 {
1514 struct srej_list *tail;
1515 u16 control;
1516
1517 control = L2CAP_SUPER_SELECT_REJECT;
1518 control |= L2CAP_CTRL_FINAL;
1519
1520 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1521 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1522
1523 l2cap_send_sframe(l2cap_pi(sk), control);
1524 }
1525
1526 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1527 {
1528 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1529 struct sk_buff **frag;
1530 int err, sent = 0;
1531
1532 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1533 return -EFAULT;
1534
1535 sent += count;
1536 len -= count;
1537
1538 /* Continuation fragments (no L2CAP header) */
1539 frag = &skb_shinfo(skb)->frag_list;
1540 while (len) {
1541 count = min_t(unsigned int, conn->mtu, len);
1542
1543 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1544 if (!*frag)
1545 return -EFAULT;
1546 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1547 return -EFAULT;
1548
1549 sent += count;
1550 len -= count;
1551
1552 frag = &(*frag)->next;
1553 }
1554
1555 return sent;
1556 }
1557
1558 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1559 {
1560 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1561 struct sk_buff *skb;
1562 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1563 struct l2cap_hdr *lh;
1564
1565 BT_DBG("sk %p len %d", sk, (int)len);
1566
1567 count = min_t(unsigned int, (conn->mtu - hlen), len);
1568 skb = bt_skb_send_alloc(sk, count + hlen,
1569 msg->msg_flags & MSG_DONTWAIT, &err);
1570 if (!skb)
1571 return ERR_PTR(-ENOMEM);
1572
1573 /* Create L2CAP header */
1574 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1575 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1576 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1577 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1578
1579 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1580 if (unlikely(err < 0)) {
1581 kfree_skb(skb);
1582 return ERR_PTR(err);
1583 }
1584 return skb;
1585 }
1586
1587 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1588 {
1589 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1590 struct sk_buff *skb;
1591 int err, count, hlen = L2CAP_HDR_SIZE;
1592 struct l2cap_hdr *lh;
1593
1594 BT_DBG("sk %p len %d", sk, (int)len);
1595
1596 count = min_t(unsigned int, (conn->mtu - hlen), len);
1597 skb = bt_skb_send_alloc(sk, count + hlen,
1598 msg->msg_flags & MSG_DONTWAIT, &err);
1599 if (!skb)
1600 return ERR_PTR(-ENOMEM);
1601
1602 /* Create L2CAP header */
1603 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1604 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1605 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1606
1607 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1608 if (unlikely(err < 0)) {
1609 kfree_skb(skb);
1610 return ERR_PTR(err);
1611 }
1612 return skb;
1613 }
1614
1615 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1616 {
1617 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1618 struct sk_buff *skb;
1619 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1620 struct l2cap_hdr *lh;
1621
1622 BT_DBG("sk %p len %d", sk, (int)len);
1623
1624 if (!conn)
1625 return ERR_PTR(-ENOTCONN);
1626
1627 if (sdulen)
1628 hlen += 2;
1629
1630 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1631 hlen += 2;
1632
1633 count = min_t(unsigned int, (conn->mtu - hlen), len);
1634 skb = bt_skb_send_alloc(sk, count + hlen,
1635 msg->msg_flags & MSG_DONTWAIT, &err);
1636 if (!skb)
1637 return ERR_PTR(-ENOMEM);
1638
1639 /* Create L2CAP header */
1640 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1641 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1642 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1643 put_unaligned_le16(control, skb_put(skb, 2));
1644 if (sdulen)
1645 put_unaligned_le16(sdulen, skb_put(skb, 2));
1646
1647 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1648 if (unlikely(err < 0)) {
1649 kfree_skb(skb);
1650 return ERR_PTR(err);
1651 }
1652
1653 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1654 put_unaligned_le16(0, skb_put(skb, 2));
1655
1656 bt_cb(skb)->retries = 0;
1657 return skb;
1658 }
1659
1660 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1661 {
1662 struct l2cap_pinfo *pi = l2cap_pi(sk);
1663 struct sk_buff *skb;
1664 struct sk_buff_head sar_queue;
1665 u16 control;
1666 size_t size = 0;
1667
1668 skb_queue_head_init(&sar_queue);
1669 control = L2CAP_SDU_START;
1670 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1671 if (IS_ERR(skb))
1672 return PTR_ERR(skb);
1673
1674 __skb_queue_tail(&sar_queue, skb);
1675 len -= pi->remote_mps;
1676 size += pi->remote_mps;
1677
1678 while (len > 0) {
1679 size_t buflen;
1680
1681 if (len > pi->remote_mps) {
1682 control = L2CAP_SDU_CONTINUE;
1683 buflen = pi->remote_mps;
1684 } else {
1685 control = L2CAP_SDU_END;
1686 buflen = len;
1687 }
1688
1689 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1690 if (IS_ERR(skb)) {
1691 skb_queue_purge(&sar_queue);
1692 return PTR_ERR(skb);
1693 }
1694
1695 __skb_queue_tail(&sar_queue, skb);
1696 len -= buflen;
1697 size += buflen;
1698 }
1699 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1700 spin_lock_bh(&pi->send_lock);
1701 if (sk->sk_send_head == NULL)
1702 sk->sk_send_head = sar_queue.next;
1703 spin_unlock_bh(&pi->send_lock);
1704
1705 return size;
1706 }
1707
1708 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1709 {
1710 struct sock *sk = sock->sk;
1711 struct l2cap_pinfo *pi = l2cap_pi(sk);
1712 struct sk_buff *skb;
1713 u16 control;
1714 int err;
1715
1716 BT_DBG("sock %p, sk %p", sock, sk);
1717
1718 err = sock_error(sk);
1719 if (err)
1720 return err;
1721
1722 if (msg->msg_flags & MSG_OOB)
1723 return -EOPNOTSUPP;
1724
1725 lock_sock(sk);
1726
1727 if (sk->sk_state != BT_CONNECTED) {
1728 err = -ENOTCONN;
1729 goto done;
1730 }
1731
1732 /* Connectionless channel */
1733 if (sk->sk_type == SOCK_DGRAM) {
1734 skb = l2cap_create_connless_pdu(sk, msg, len);
1735 if (IS_ERR(skb)) {
1736 err = PTR_ERR(skb);
1737 } else {
1738 l2cap_do_send(sk, skb);
1739 err = len;
1740 }
1741 goto done;
1742 }
1743
1744 switch (pi->mode) {
1745 case L2CAP_MODE_BASIC:
1746 /* Check outgoing MTU */
1747 if (len > pi->omtu) {
1748 err = -EINVAL;
1749 goto done;
1750 }
1751
1752 /* Create a basic PDU */
1753 skb = l2cap_create_basic_pdu(sk, msg, len);
1754 if (IS_ERR(skb)) {
1755 err = PTR_ERR(skb);
1756 goto done;
1757 }
1758
1759 l2cap_do_send(sk, skb);
1760 err = len;
1761 break;
1762
1763 case L2CAP_MODE_ERTM:
1764 case L2CAP_MODE_STREAMING:
1765 /* Entire SDU fits into one PDU */
1766 if (len <= pi->remote_mps) {
1767 control = L2CAP_SDU_UNSEGMENTED;
1768 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1769 if (IS_ERR(skb)) {
1770 err = PTR_ERR(skb);
1771 goto done;
1772 }
1773 __skb_queue_tail(TX_QUEUE(sk), skb);
1774
1775 if (pi->mode == L2CAP_MODE_ERTM)
1776 spin_lock_bh(&pi->send_lock);
1777
1778 if (sk->sk_send_head == NULL)
1779 sk->sk_send_head = skb;
1780
1781 if (pi->mode == L2CAP_MODE_ERTM)
1782 spin_unlock_bh(&pi->send_lock);
1783 } else {
1784 /* Segment SDU into multiples PDUs */
1785 err = l2cap_sar_segment_sdu(sk, msg, len);
1786 if (err < 0)
1787 goto done;
1788 }
1789
1790 if (pi->mode == L2CAP_MODE_STREAMING) {
1791 err = l2cap_streaming_send(sk);
1792 } else {
1793 spin_lock_bh(&pi->send_lock);
1794 err = l2cap_ertm_send(sk);
1795 spin_unlock_bh(&pi->send_lock);
1796 }
1797
1798 if (err >= 0)
1799 err = len;
1800 break;
1801
1802 default:
1803 BT_DBG("bad state %1.1x", pi->mode);
1804 err = -EINVAL;
1805 }
1806
1807 done:
1808 release_sock(sk);
1809 return err;
1810 }
1811
1812 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1813 {
1814 struct sock *sk = sock->sk;
1815
1816 lock_sock(sk);
1817
1818 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1819 struct l2cap_conn_rsp rsp;
1820
1821 sk->sk_state = BT_CONFIG;
1822
1823 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1824 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1825 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1826 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1827 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1828 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1829
1830 release_sock(sk);
1831 return 0;
1832 }
1833
1834 release_sock(sk);
1835
1836 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1837 }
1838
1839 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1840 {
1841 struct sock *sk = sock->sk;
1842 struct l2cap_options opts;
1843 int len, err = 0;
1844 u32 opt;
1845
1846 BT_DBG("sk %p", sk);
1847
1848 lock_sock(sk);
1849
1850 switch (optname) {
1851 case L2CAP_OPTIONS:
1852 opts.imtu = l2cap_pi(sk)->imtu;
1853 opts.omtu = l2cap_pi(sk)->omtu;
1854 opts.flush_to = l2cap_pi(sk)->flush_to;
1855 opts.mode = l2cap_pi(sk)->mode;
1856 opts.fcs = l2cap_pi(sk)->fcs;
1857 opts.max_tx = l2cap_pi(sk)->max_tx;
1858 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1859
1860 len = min_t(unsigned int, sizeof(opts), optlen);
1861 if (copy_from_user((char *) &opts, optval, len)) {
1862 err = -EFAULT;
1863 break;
1864 }
1865
1866 l2cap_pi(sk)->mode = opts.mode;
1867 switch (l2cap_pi(sk)->mode) {
1868 case L2CAP_MODE_BASIC:
1869 break;
1870 case L2CAP_MODE_ERTM:
1871 case L2CAP_MODE_STREAMING:
1872 if (enable_ertm)
1873 break;
1874 /* fall through */
1875 default:
1876 err = -EINVAL;
1877 break;
1878 }
1879
1880 l2cap_pi(sk)->imtu = opts.imtu;
1881 l2cap_pi(sk)->omtu = opts.omtu;
1882 l2cap_pi(sk)->fcs = opts.fcs;
1883 l2cap_pi(sk)->max_tx = opts.max_tx;
1884 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1885 break;
1886
1887 case L2CAP_LM:
1888 if (get_user(opt, (u32 __user *) optval)) {
1889 err = -EFAULT;
1890 break;
1891 }
1892
1893 if (opt & L2CAP_LM_AUTH)
1894 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1895 if (opt & L2CAP_LM_ENCRYPT)
1896 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1897 if (opt & L2CAP_LM_SECURE)
1898 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1899
1900 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1901 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1902 break;
1903
1904 default:
1905 err = -ENOPROTOOPT;
1906 break;
1907 }
1908
1909 release_sock(sk);
1910 return err;
1911 }
1912
1913 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1914 {
1915 struct sock *sk = sock->sk;
1916 struct bt_security sec;
1917 int len, err = 0;
1918 u32 opt;
1919
1920 BT_DBG("sk %p", sk);
1921
1922 if (level == SOL_L2CAP)
1923 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1924
1925 if (level != SOL_BLUETOOTH)
1926 return -ENOPROTOOPT;
1927
1928 lock_sock(sk);
1929
1930 switch (optname) {
1931 case BT_SECURITY:
1932 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1933 && sk->sk_type != SOCK_RAW) {
1934 err = -EINVAL;
1935 break;
1936 }
1937
1938 sec.level = BT_SECURITY_LOW;
1939
1940 len = min_t(unsigned int, sizeof(sec), optlen);
1941 if (copy_from_user((char *) &sec, optval, len)) {
1942 err = -EFAULT;
1943 break;
1944 }
1945
1946 if (sec.level < BT_SECURITY_LOW ||
1947 sec.level > BT_SECURITY_HIGH) {
1948 err = -EINVAL;
1949 break;
1950 }
1951
1952 l2cap_pi(sk)->sec_level = sec.level;
1953 break;
1954
1955 case BT_DEFER_SETUP:
1956 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1957 err = -EINVAL;
1958 break;
1959 }
1960
1961 if (get_user(opt, (u32 __user *) optval)) {
1962 err = -EFAULT;
1963 break;
1964 }
1965
1966 bt_sk(sk)->defer_setup = opt;
1967 break;
1968
1969 default:
1970 err = -ENOPROTOOPT;
1971 break;
1972 }
1973
1974 release_sock(sk);
1975 return err;
1976 }
1977
1978 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1979 {
1980 struct sock *sk = sock->sk;
1981 struct l2cap_options opts;
1982 struct l2cap_conninfo cinfo;
1983 int len, err = 0;
1984 u32 opt;
1985
1986 BT_DBG("sk %p", sk);
1987
1988 if (get_user(len, optlen))
1989 return -EFAULT;
1990
1991 lock_sock(sk);
1992
1993 switch (optname) {
1994 case L2CAP_OPTIONS:
1995 opts.imtu = l2cap_pi(sk)->imtu;
1996 opts.omtu = l2cap_pi(sk)->omtu;
1997 opts.flush_to = l2cap_pi(sk)->flush_to;
1998 opts.mode = l2cap_pi(sk)->mode;
1999 opts.fcs = l2cap_pi(sk)->fcs;
2000 opts.max_tx = l2cap_pi(sk)->max_tx;
2001 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2002
2003 len = min_t(unsigned int, len, sizeof(opts));
2004 if (copy_to_user(optval, (char *) &opts, len))
2005 err = -EFAULT;
2006
2007 break;
2008
2009 case L2CAP_LM:
2010 switch (l2cap_pi(sk)->sec_level) {
2011 case BT_SECURITY_LOW:
2012 opt = L2CAP_LM_AUTH;
2013 break;
2014 case BT_SECURITY_MEDIUM:
2015 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2016 break;
2017 case BT_SECURITY_HIGH:
2018 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2019 L2CAP_LM_SECURE;
2020 break;
2021 default:
2022 opt = 0;
2023 break;
2024 }
2025
2026 if (l2cap_pi(sk)->role_switch)
2027 opt |= L2CAP_LM_MASTER;
2028
2029 if (l2cap_pi(sk)->force_reliable)
2030 opt |= L2CAP_LM_RELIABLE;
2031
2032 if (put_user(opt, (u32 __user *) optval))
2033 err = -EFAULT;
2034 break;
2035
2036 case L2CAP_CONNINFO:
2037 if (sk->sk_state != BT_CONNECTED &&
2038 !(sk->sk_state == BT_CONNECT2 &&
2039 bt_sk(sk)->defer_setup)) {
2040 err = -ENOTCONN;
2041 break;
2042 }
2043
2044 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2045 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2046
2047 len = min_t(unsigned int, len, sizeof(cinfo));
2048 if (copy_to_user(optval, (char *) &cinfo, len))
2049 err = -EFAULT;
2050
2051 break;
2052
2053 default:
2054 err = -ENOPROTOOPT;
2055 break;
2056 }
2057
2058 release_sock(sk);
2059 return err;
2060 }
2061
2062 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2063 {
2064 struct sock *sk = sock->sk;
2065 struct bt_security sec;
2066 int len, err = 0;
2067
2068 BT_DBG("sk %p", sk);
2069
2070 if (level == SOL_L2CAP)
2071 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2072
2073 if (level != SOL_BLUETOOTH)
2074 return -ENOPROTOOPT;
2075
2076 if (get_user(len, optlen))
2077 return -EFAULT;
2078
2079 lock_sock(sk);
2080
2081 switch (optname) {
2082 case BT_SECURITY:
2083 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2084 && sk->sk_type != SOCK_RAW) {
2085 err = -EINVAL;
2086 break;
2087 }
2088
2089 sec.level = l2cap_pi(sk)->sec_level;
2090
2091 len = min_t(unsigned int, len, sizeof(sec));
2092 if (copy_to_user(optval, (char *) &sec, len))
2093 err = -EFAULT;
2094
2095 break;
2096
2097 case BT_DEFER_SETUP:
2098 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2099 err = -EINVAL;
2100 break;
2101 }
2102
2103 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2104 err = -EFAULT;
2105
2106 break;
2107
2108 default:
2109 err = -ENOPROTOOPT;
2110 break;
2111 }
2112
2113 release_sock(sk);
2114 return err;
2115 }
2116
2117 static int l2cap_sock_shutdown(struct socket *sock, int how)
2118 {
2119 struct sock *sk = sock->sk;
2120 int err = 0;
2121
2122 BT_DBG("sock %p, sk %p", sock, sk);
2123
2124 if (!sk)
2125 return 0;
2126
2127 lock_sock(sk);
2128 if (!sk->sk_shutdown) {
2129 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2130 err = __l2cap_wait_ack(sk);
2131
2132 sk->sk_shutdown = SHUTDOWN_MASK;
2133 l2cap_sock_clear_timer(sk);
2134 __l2cap_sock_close(sk, 0);
2135
2136 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2137 err = bt_sock_wait_state(sk, BT_CLOSED,
2138 sk->sk_lingertime);
2139 }
2140 release_sock(sk);
2141 return err;
2142 }
2143
2144 static int l2cap_sock_release(struct socket *sock)
2145 {
2146 struct sock *sk = sock->sk;
2147 int err;
2148
2149 BT_DBG("sock %p, sk %p", sock, sk);
2150
2151 if (!sk)
2152 return 0;
2153
2154 err = l2cap_sock_shutdown(sock, 2);
2155
2156 sock_orphan(sk);
2157 l2cap_sock_kill(sk);
2158 return err;
2159 }
2160
2161 static void l2cap_chan_ready(struct sock *sk)
2162 {
2163 struct sock *parent = bt_sk(sk)->parent;
2164
2165 BT_DBG("sk %p, parent %p", sk, parent);
2166
2167 l2cap_pi(sk)->conf_state = 0;
2168 l2cap_sock_clear_timer(sk);
2169
2170 if (!parent) {
2171 /* Outgoing channel.
2172 * Wake up socket sleeping on connect.
2173 */
2174 sk->sk_state = BT_CONNECTED;
2175 sk->sk_state_change(sk);
2176 } else {
2177 /* Incoming channel.
2178 * Wake up socket sleeping on accept.
2179 */
2180 parent->sk_data_ready(parent, 0);
2181 }
2182 }
2183
2184 /* Copy frame to all raw sockets on that connection */
2185 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2186 {
2187 struct l2cap_chan_list *l = &conn->chan_list;
2188 struct sk_buff *nskb;
2189 struct sock *sk;
2190
2191 BT_DBG("conn %p", conn);
2192
2193 read_lock(&l->lock);
2194 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2195 if (sk->sk_type != SOCK_RAW)
2196 continue;
2197
2198 /* Don't send frame to the socket it came from */
2199 if (skb->sk == sk)
2200 continue;
2201 nskb = skb_clone(skb, GFP_ATOMIC);
2202 if (!nskb)
2203 continue;
2204
2205 if (sock_queue_rcv_skb(sk, nskb))
2206 kfree_skb(nskb);
2207 }
2208 read_unlock(&l->lock);
2209 }
2210
2211 /* ---- L2CAP signalling commands ---- */
2212 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2213 u8 code, u8 ident, u16 dlen, void *data)
2214 {
2215 struct sk_buff *skb, **frag;
2216 struct l2cap_cmd_hdr *cmd;
2217 struct l2cap_hdr *lh;
2218 int len, count;
2219
2220 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2221 conn, code, ident, dlen);
2222
2223 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2224 count = min_t(unsigned int, conn->mtu, len);
2225
2226 skb = bt_skb_alloc(count, GFP_ATOMIC);
2227 if (!skb)
2228 return NULL;
2229
2230 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2231 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2232 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2233
2234 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2235 cmd->code = code;
2236 cmd->ident = ident;
2237 cmd->len = cpu_to_le16(dlen);
2238
2239 if (dlen) {
2240 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2241 memcpy(skb_put(skb, count), data, count);
2242 data += count;
2243 }
2244
2245 len -= skb->len;
2246
2247 /* Continuation fragments (no L2CAP header) */
2248 frag = &skb_shinfo(skb)->frag_list;
2249 while (len) {
2250 count = min_t(unsigned int, conn->mtu, len);
2251
2252 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2253 if (!*frag)
2254 goto fail;
2255
2256 memcpy(skb_put(*frag, count), data, count);
2257
2258 len -= count;
2259 data += count;
2260
2261 frag = &(*frag)->next;
2262 }
2263
2264 return skb;
2265
2266 fail:
2267 kfree_skb(skb);
2268 return NULL;
2269 }
2270
2271 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2272 {
2273 struct l2cap_conf_opt *opt = *ptr;
2274 int len;
2275
2276 len = L2CAP_CONF_OPT_SIZE + opt->len;
2277 *ptr += len;
2278
2279 *type = opt->type;
2280 *olen = opt->len;
2281
2282 switch (opt->len) {
2283 case 1:
2284 *val = *((u8 *) opt->val);
2285 break;
2286
2287 case 2:
2288 *val = __le16_to_cpu(*((__le16 *) opt->val));
2289 break;
2290
2291 case 4:
2292 *val = __le32_to_cpu(*((__le32 *) opt->val));
2293 break;
2294
2295 default:
2296 *val = (unsigned long) opt->val;
2297 break;
2298 }
2299
2300 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2301 return len;
2302 }
2303
2304 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2305 {
2306 struct l2cap_conf_opt *opt = *ptr;
2307
2308 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2309
2310 opt->type = type;
2311 opt->len = len;
2312
2313 switch (len) {
2314 case 1:
2315 *((u8 *) opt->val) = val;
2316 break;
2317
2318 case 2:
2319 *((__le16 *) opt->val) = cpu_to_le16(val);
2320 break;
2321
2322 case 4:
2323 *((__le32 *) opt->val) = cpu_to_le32(val);
2324 break;
2325
2326 default:
2327 memcpy(opt->val, (void *) val, len);
2328 break;
2329 }
2330
2331 *ptr += L2CAP_CONF_OPT_SIZE + len;
2332 }
2333
2334 static void l2cap_ack_timeout(unsigned long arg)
2335 {
2336 struct sock *sk = (void *) arg;
2337
2338 bh_lock_sock(sk);
2339 l2cap_send_ack(l2cap_pi(sk));
2340 bh_unlock_sock(sk);
2341 }
2342
2343 static inline void l2cap_ertm_init(struct sock *sk)
2344 {
2345 l2cap_pi(sk)->expected_ack_seq = 0;
2346 l2cap_pi(sk)->unacked_frames = 0;
2347 l2cap_pi(sk)->buffer_seq = 0;
2348 l2cap_pi(sk)->num_acked = 0;
2349 l2cap_pi(sk)->frames_sent = 0;
2350
2351 setup_timer(&l2cap_pi(sk)->retrans_timer,
2352 l2cap_retrans_timeout, (unsigned long) sk);
2353 setup_timer(&l2cap_pi(sk)->monitor_timer,
2354 l2cap_monitor_timeout, (unsigned long) sk);
2355 setup_timer(&l2cap_pi(sk)->ack_timer,
2356 l2cap_ack_timeout, (unsigned long) sk);
2357
2358 __skb_queue_head_init(SREJ_QUEUE(sk));
2359 __skb_queue_head_init(BUSY_QUEUE(sk));
2360 spin_lock_init(&l2cap_pi(sk)->send_lock);
2361
2362 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2363 }
2364
2365 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2366 {
2367 u32 local_feat_mask = l2cap_feat_mask;
2368 if (enable_ertm)
2369 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2370
2371 switch (mode) {
2372 case L2CAP_MODE_ERTM:
2373 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2374 case L2CAP_MODE_STREAMING:
2375 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2376 default:
2377 return 0x00;
2378 }
2379 }
2380
2381 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2382 {
2383 switch (mode) {
2384 case L2CAP_MODE_STREAMING:
2385 case L2CAP_MODE_ERTM:
2386 if (l2cap_mode_supported(mode, remote_feat_mask))
2387 return mode;
2388 /* fall through */
2389 default:
2390 return L2CAP_MODE_BASIC;
2391 }
2392 }
2393
2394 static int l2cap_build_conf_req(struct sock *sk, void *data)
2395 {
2396 struct l2cap_pinfo *pi = l2cap_pi(sk);
2397 struct l2cap_conf_req *req = data;
2398 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2399 void *ptr = req->data;
2400
2401 BT_DBG("sk %p", sk);
2402
2403 if (pi->num_conf_req || pi->num_conf_rsp)
2404 goto done;
2405
2406 switch (pi->mode) {
2407 case L2CAP_MODE_STREAMING:
2408 case L2CAP_MODE_ERTM:
2409 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2410 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2411 l2cap_send_disconn_req(pi->conn, sk);
2412 break;
2413 default:
2414 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2415 break;
2416 }
2417
2418 done:
2419 switch (pi->mode) {
2420 case L2CAP_MODE_BASIC:
2421 if (pi->imtu != L2CAP_DEFAULT_MTU)
2422 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2423 break;
2424
2425 case L2CAP_MODE_ERTM:
2426 rfc.mode = L2CAP_MODE_ERTM;
2427 rfc.txwin_size = pi->tx_win;
2428 rfc.max_transmit = pi->max_tx;
2429 rfc.retrans_timeout = 0;
2430 rfc.monitor_timeout = 0;
2431 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2432 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2433 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2434
2435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2436 sizeof(rfc), (unsigned long) &rfc);
2437
2438 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2439 break;
2440
2441 if (pi->fcs == L2CAP_FCS_NONE ||
2442 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2443 pi->fcs = L2CAP_FCS_NONE;
2444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2445 }
2446 break;
2447
2448 case L2CAP_MODE_STREAMING:
2449 rfc.mode = L2CAP_MODE_STREAMING;
2450 rfc.txwin_size = 0;
2451 rfc.max_transmit = 0;
2452 rfc.retrans_timeout = 0;
2453 rfc.monitor_timeout = 0;
2454 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2455 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2456 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2457
2458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2459 sizeof(rfc), (unsigned long) &rfc);
2460
2461 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2462 break;
2463
2464 if (pi->fcs == L2CAP_FCS_NONE ||
2465 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2466 pi->fcs = L2CAP_FCS_NONE;
2467 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2468 }
2469 break;
2470 }
2471
2472 /* FIXME: Need actual value of the flush timeout */
2473 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2474 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2475
2476 req->dcid = cpu_to_le16(pi->dcid);
2477 req->flags = cpu_to_le16(0);
2478
2479 return ptr - data;
2480 }
2481
2482 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2483 {
2484 struct l2cap_pinfo *pi = l2cap_pi(sk);
2485 struct l2cap_conf_rsp *rsp = data;
2486 void *ptr = rsp->data;
2487 void *req = pi->conf_req;
2488 int len = pi->conf_len;
2489 int type, hint, olen;
2490 unsigned long val;
2491 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2492 u16 mtu = L2CAP_DEFAULT_MTU;
2493 u16 result = L2CAP_CONF_SUCCESS;
2494
2495 BT_DBG("sk %p", sk);
2496
2497 while (len >= L2CAP_CONF_OPT_SIZE) {
2498 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2499
2500 hint = type & L2CAP_CONF_HINT;
2501 type &= L2CAP_CONF_MASK;
2502
2503 switch (type) {
2504 case L2CAP_CONF_MTU:
2505 mtu = val;
2506 break;
2507
2508 case L2CAP_CONF_FLUSH_TO:
2509 pi->flush_to = val;
2510 break;
2511
2512 case L2CAP_CONF_QOS:
2513 break;
2514
2515 case L2CAP_CONF_RFC:
2516 if (olen == sizeof(rfc))
2517 memcpy(&rfc, (void *) val, olen);
2518 break;
2519
2520 case L2CAP_CONF_FCS:
2521 if (val == L2CAP_FCS_NONE)
2522 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2523
2524 break;
2525
2526 default:
2527 if (hint)
2528 break;
2529
2530 result = L2CAP_CONF_UNKNOWN;
2531 *((u8 *) ptr++) = type;
2532 break;
2533 }
2534 }
2535
2536 if (pi->num_conf_rsp || pi->num_conf_req)
2537 goto done;
2538
2539 switch (pi->mode) {
2540 case L2CAP_MODE_STREAMING:
2541 case L2CAP_MODE_ERTM:
2542 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2543 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2544 return -ECONNREFUSED;
2545 break;
2546 default:
2547 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2548 break;
2549 }
2550
2551 done:
2552 if (pi->mode != rfc.mode) {
2553 result = L2CAP_CONF_UNACCEPT;
2554 rfc.mode = pi->mode;
2555
2556 if (pi->num_conf_rsp == 1)
2557 return -ECONNREFUSED;
2558
2559 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2560 sizeof(rfc), (unsigned long) &rfc);
2561 }
2562
2563
2564 if (result == L2CAP_CONF_SUCCESS) {
2565 /* Configure output options and let the other side know
2566 * which ones we don't like. */
2567
2568 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2569 result = L2CAP_CONF_UNACCEPT;
2570 else {
2571 pi->omtu = mtu;
2572 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2573 }
2574 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2575
2576 switch (rfc.mode) {
2577 case L2CAP_MODE_BASIC:
2578 pi->fcs = L2CAP_FCS_NONE;
2579 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2580 break;
2581
2582 case L2CAP_MODE_ERTM:
2583 pi->remote_tx_win = rfc.txwin_size;
2584 pi->remote_max_tx = rfc.max_transmit;
2585 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2586 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2587
2588 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2589
2590 rfc.retrans_timeout =
2591 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2592 rfc.monitor_timeout =
2593 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2594
2595 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2596
2597 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2598 sizeof(rfc), (unsigned long) &rfc);
2599
2600 break;
2601
2602 case L2CAP_MODE_STREAMING:
2603 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2604 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2605
2606 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2607
2608 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2609
2610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2611 sizeof(rfc), (unsigned long) &rfc);
2612
2613 break;
2614
2615 default:
2616 result = L2CAP_CONF_UNACCEPT;
2617
2618 memset(&rfc, 0, sizeof(rfc));
2619 rfc.mode = pi->mode;
2620 }
2621
2622 if (result == L2CAP_CONF_SUCCESS)
2623 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2624 }
2625 rsp->scid = cpu_to_le16(pi->dcid);
2626 rsp->result = cpu_to_le16(result);
2627 rsp->flags = cpu_to_le16(0x0000);
2628
2629 return ptr - data;
2630 }
2631
2632 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2633 {
2634 struct l2cap_pinfo *pi = l2cap_pi(sk);
2635 struct l2cap_conf_req *req = data;
2636 void *ptr = req->data;
2637 int type, olen;
2638 unsigned long val;
2639 struct l2cap_conf_rfc rfc;
2640
2641 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2642
2643 while (len >= L2CAP_CONF_OPT_SIZE) {
2644 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2645
2646 switch (type) {
2647 case L2CAP_CONF_MTU:
2648 if (val < L2CAP_DEFAULT_MIN_MTU) {
2649 *result = L2CAP_CONF_UNACCEPT;
2650 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2651 } else
2652 pi->omtu = val;
2653 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2654 break;
2655
2656 case L2CAP_CONF_FLUSH_TO:
2657 pi->flush_to = val;
2658 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2659 2, pi->flush_to);
2660 break;
2661
2662 case L2CAP_CONF_RFC:
2663 if (olen == sizeof(rfc))
2664 memcpy(&rfc, (void *)val, olen);
2665
2666 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2667 rfc.mode != pi->mode)
2668 return -ECONNREFUSED;
2669
2670 pi->mode = rfc.mode;
2671 pi->fcs = 0;
2672
2673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2674 sizeof(rfc), (unsigned long) &rfc);
2675 break;
2676 }
2677 }
2678
2679 if (*result == L2CAP_CONF_SUCCESS) {
2680 switch (rfc.mode) {
2681 case L2CAP_MODE_ERTM:
2682 pi->remote_tx_win = rfc.txwin_size;
2683 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2684 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2685 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2686 break;
2687 case L2CAP_MODE_STREAMING:
2688 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2689 }
2690 }
2691
2692 req->dcid = cpu_to_le16(pi->dcid);
2693 req->flags = cpu_to_le16(0x0000);
2694
2695 return ptr - data;
2696 }
2697
2698 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2699 {
2700 struct l2cap_conf_rsp *rsp = data;
2701 void *ptr = rsp->data;
2702
2703 BT_DBG("sk %p", sk);
2704
2705 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2706 rsp->result = cpu_to_le16(result);
2707 rsp->flags = cpu_to_le16(flags);
2708
2709 return ptr - data;
2710 }
2711
2712 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2713 {
2714 struct l2cap_pinfo *pi = l2cap_pi(sk);
2715 int type, olen;
2716 unsigned long val;
2717 struct l2cap_conf_rfc rfc;
2718
2719 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2720
2721 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2722 return;
2723
2724 while (len >= L2CAP_CONF_OPT_SIZE) {
2725 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2726
2727 switch (type) {
2728 case L2CAP_CONF_RFC:
2729 if (olen == sizeof(rfc))
2730 memcpy(&rfc, (void *)val, olen);
2731 goto done;
2732 }
2733 }
2734
2735 done:
2736 switch (rfc.mode) {
2737 case L2CAP_MODE_ERTM:
2738 pi->remote_tx_win = rfc.txwin_size;
2739 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2740 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2741 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2742 break;
2743 case L2CAP_MODE_STREAMING:
2744 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2745 }
2746 }
2747
2748 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2749 {
2750 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2751
2752 if (rej->reason != 0x0000)
2753 return 0;
2754
2755 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2756 cmd->ident == conn->info_ident) {
2757 del_timer(&conn->info_timer);
2758
2759 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2760 conn->info_ident = 0;
2761
2762 l2cap_conn_start(conn);
2763 }
2764
2765 return 0;
2766 }
2767
2768 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2769 {
2770 struct l2cap_chan_list *list = &conn->chan_list;
2771 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2772 struct l2cap_conn_rsp rsp;
2773 struct sock *sk, *parent;
2774 int result, status = L2CAP_CS_NO_INFO;
2775
2776 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2777 __le16 psm = req->psm;
2778
2779 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2780
2781 /* Check if we have socket listening on psm */
2782 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2783 if (!parent) {
2784 result = L2CAP_CR_BAD_PSM;
2785 goto sendresp;
2786 }
2787
2788 /* Check if the ACL is secure enough (if not SDP) */
2789 if (psm != cpu_to_le16(0x0001) &&
2790 !hci_conn_check_link_mode(conn->hcon)) {
2791 conn->disc_reason = 0x05;
2792 result = L2CAP_CR_SEC_BLOCK;
2793 goto response;
2794 }
2795
2796 result = L2CAP_CR_NO_MEM;
2797
2798 /* Check for backlog size */
2799 if (sk_acceptq_is_full(parent)) {
2800 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2801 goto response;
2802 }
2803
2804 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2805 if (!sk)
2806 goto response;
2807
2808 write_lock_bh(&list->lock);
2809
2810 /* Check if we already have channel with that dcid */
2811 if (__l2cap_get_chan_by_dcid(list, scid)) {
2812 write_unlock_bh(&list->lock);
2813 sock_set_flag(sk, SOCK_ZAPPED);
2814 l2cap_sock_kill(sk);
2815 goto response;
2816 }
2817
2818 hci_conn_hold(conn->hcon);
2819
2820 l2cap_sock_init(sk, parent);
2821 bacpy(&bt_sk(sk)->src, conn->src);
2822 bacpy(&bt_sk(sk)->dst, conn->dst);
2823 l2cap_pi(sk)->psm = psm;
2824 l2cap_pi(sk)->dcid = scid;
2825
2826 __l2cap_chan_add(conn, sk, parent);
2827 dcid = l2cap_pi(sk)->scid;
2828
2829 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2830
2831 l2cap_pi(sk)->ident = cmd->ident;
2832
2833 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2834 if (l2cap_check_security(sk)) {
2835 if (bt_sk(sk)->defer_setup) {
2836 sk->sk_state = BT_CONNECT2;
2837 result = L2CAP_CR_PEND;
2838 status = L2CAP_CS_AUTHOR_PEND;
2839 parent->sk_data_ready(parent, 0);
2840 } else {
2841 sk->sk_state = BT_CONFIG;
2842 result = L2CAP_CR_SUCCESS;
2843 status = L2CAP_CS_NO_INFO;
2844 }
2845 } else {
2846 sk->sk_state = BT_CONNECT2;
2847 result = L2CAP_CR_PEND;
2848 status = L2CAP_CS_AUTHEN_PEND;
2849 }
2850 } else {
2851 sk->sk_state = BT_CONNECT2;
2852 result = L2CAP_CR_PEND;
2853 status = L2CAP_CS_NO_INFO;
2854 }
2855
2856 write_unlock_bh(&list->lock);
2857
2858 response:
2859 bh_unlock_sock(parent);
2860
2861 sendresp:
2862 rsp.scid = cpu_to_le16(scid);
2863 rsp.dcid = cpu_to_le16(dcid);
2864 rsp.result = cpu_to_le16(result);
2865 rsp.status = cpu_to_le16(status);
2866 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2867
2868 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2869 struct l2cap_info_req info;
2870 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2871
2872 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2873 conn->info_ident = l2cap_get_ident(conn);
2874
2875 mod_timer(&conn->info_timer, jiffies +
2876 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2877
2878 l2cap_send_cmd(conn, conn->info_ident,
2879 L2CAP_INFO_REQ, sizeof(info), &info);
2880 }
2881
2882 return 0;
2883 }
2884
2885 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2886 {
2887 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2888 u16 scid, dcid, result, status;
2889 struct sock *sk;
2890 u8 req[128];
2891
2892 scid = __le16_to_cpu(rsp->scid);
2893 dcid = __le16_to_cpu(rsp->dcid);
2894 result = __le16_to_cpu(rsp->result);
2895 status = __le16_to_cpu(rsp->status);
2896
2897 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2898
2899 if (scid) {
2900 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2901 if (!sk)
2902 return 0;
2903 } else {
2904 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2905 if (!sk)
2906 return 0;
2907 }
2908
2909 switch (result) {
2910 case L2CAP_CR_SUCCESS:
2911 sk->sk_state = BT_CONFIG;
2912 l2cap_pi(sk)->ident = 0;
2913 l2cap_pi(sk)->dcid = dcid;
2914 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2915
2916 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2917
2918 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2919 l2cap_build_conf_req(sk, req), req);
2920 l2cap_pi(sk)->num_conf_req++;
2921 break;
2922
2923 case L2CAP_CR_PEND:
2924 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2925 break;
2926
2927 default:
2928 l2cap_chan_del(sk, ECONNREFUSED);
2929 break;
2930 }
2931
2932 bh_unlock_sock(sk);
2933 return 0;
2934 }
2935
2936 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2937 {
2938 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2939 u16 dcid, flags;
2940 u8 rsp[64];
2941 struct sock *sk;
2942 int len;
2943
2944 dcid = __le16_to_cpu(req->dcid);
2945 flags = __le16_to_cpu(req->flags);
2946
2947 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2948
2949 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2950 if (!sk)
2951 return -ENOENT;
2952
2953 if (sk->sk_state == BT_DISCONN)
2954 goto unlock;
2955
2956 /* Reject if config buffer is too small. */
2957 len = cmd_len - sizeof(*req);
2958 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2959 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2960 l2cap_build_conf_rsp(sk, rsp,
2961 L2CAP_CONF_REJECT, flags), rsp);
2962 goto unlock;
2963 }
2964
2965 /* Store config. */
2966 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2967 l2cap_pi(sk)->conf_len += len;
2968
2969 if (flags & 0x0001) {
2970 /* Incomplete config. Send empty response. */
2971 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2972 l2cap_build_conf_rsp(sk, rsp,
2973 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2974 goto unlock;
2975 }
2976
2977 /* Complete config. */
2978 len = l2cap_parse_conf_req(sk, rsp);
2979 if (len < 0) {
2980 l2cap_send_disconn_req(conn, sk);
2981 goto unlock;
2982 }
2983
2984 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2985 l2cap_pi(sk)->num_conf_rsp++;
2986
2987 /* Reset config buffer. */
2988 l2cap_pi(sk)->conf_len = 0;
2989
2990 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2991 goto unlock;
2992
2993 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2994 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2995 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2996 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2997
2998 sk->sk_state = BT_CONNECTED;
2999
3000 l2cap_pi(sk)->next_tx_seq = 0;
3001 l2cap_pi(sk)->expected_tx_seq = 0;
3002 __skb_queue_head_init(TX_QUEUE(sk));
3003 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3004 l2cap_ertm_init(sk);
3005
3006 l2cap_chan_ready(sk);
3007 goto unlock;
3008 }
3009
3010 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3011 u8 buf[64];
3012 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3013 l2cap_build_conf_req(sk, buf), buf);
3014 l2cap_pi(sk)->num_conf_req++;
3015 }
3016
3017 unlock:
3018 bh_unlock_sock(sk);
3019 return 0;
3020 }
3021
3022 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3023 {
3024 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3025 u16 scid, flags, result;
3026 struct sock *sk;
3027 int len = cmd->len - sizeof(*rsp);
3028
3029 scid = __le16_to_cpu(rsp->scid);
3030 flags = __le16_to_cpu(rsp->flags);
3031 result = __le16_to_cpu(rsp->result);
3032
3033 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3034 scid, flags, result);
3035
3036 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3037 if (!sk)
3038 return 0;
3039
3040 switch (result) {
3041 case L2CAP_CONF_SUCCESS:
3042 l2cap_conf_rfc_get(sk, rsp->data, len);
3043 break;
3044
3045 case L2CAP_CONF_UNACCEPT:
3046 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3047 char req[64];
3048
3049 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3050 l2cap_send_disconn_req(conn, sk);
3051 goto done;
3052 }
3053
3054 /* throw out any old stored conf requests */
3055 result = L2CAP_CONF_SUCCESS;
3056 len = l2cap_parse_conf_rsp(sk, rsp->data,
3057 len, req, &result);
3058 if (len < 0) {
3059 l2cap_send_disconn_req(conn, sk);
3060 goto done;
3061 }
3062
3063 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3064 L2CAP_CONF_REQ, len, req);
3065 l2cap_pi(sk)->num_conf_req++;
3066 if (result != L2CAP_CONF_SUCCESS)
3067 goto done;
3068 break;
3069 }
3070
3071 default:
3072 sk->sk_state = BT_DISCONN;
3073 sk->sk_err = ECONNRESET;
3074 l2cap_sock_set_timer(sk, HZ * 5);
3075 l2cap_send_disconn_req(conn, sk);
3076 goto done;
3077 }
3078
3079 if (flags & 0x01)
3080 goto done;
3081
3082 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3083
3084 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3085 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3086 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3087 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3088
3089 sk->sk_state = BT_CONNECTED;
3090 l2cap_pi(sk)->next_tx_seq = 0;
3091 l2cap_pi(sk)->expected_tx_seq = 0;
3092 __skb_queue_head_init(TX_QUEUE(sk));
3093 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3094 l2cap_ertm_init(sk);
3095
3096 l2cap_chan_ready(sk);
3097 }
3098
3099 done:
3100 bh_unlock_sock(sk);
3101 return 0;
3102 }
3103
3104 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3105 {
3106 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3107 struct l2cap_disconn_rsp rsp;
3108 u16 dcid, scid;
3109 struct sock *sk;
3110
3111 scid = __le16_to_cpu(req->scid);
3112 dcid = __le16_to_cpu(req->dcid);
3113
3114 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3115
3116 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3117 if (!sk)
3118 return 0;
3119
3120 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3121 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3122 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3123
3124 sk->sk_shutdown = SHUTDOWN_MASK;
3125
3126 skb_queue_purge(TX_QUEUE(sk));
3127
3128 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3129 skb_queue_purge(SREJ_QUEUE(sk));
3130 skb_queue_purge(BUSY_QUEUE(sk));
3131 del_timer(&l2cap_pi(sk)->retrans_timer);
3132 del_timer(&l2cap_pi(sk)->monitor_timer);
3133 del_timer(&l2cap_pi(sk)->ack_timer);
3134 }
3135
3136 l2cap_chan_del(sk, ECONNRESET);
3137 bh_unlock_sock(sk);
3138
3139 l2cap_sock_kill(sk);
3140 return 0;
3141 }
3142
3143 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3144 {
3145 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3146 u16 dcid, scid;
3147 struct sock *sk;
3148
3149 scid = __le16_to_cpu(rsp->scid);
3150 dcid = __le16_to_cpu(rsp->dcid);
3151
3152 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3153
3154 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3155 if (!sk)
3156 return 0;
3157
3158 skb_queue_purge(TX_QUEUE(sk));
3159
3160 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3161 skb_queue_purge(SREJ_QUEUE(sk));
3162 skb_queue_purge(BUSY_QUEUE(sk));
3163 del_timer(&l2cap_pi(sk)->retrans_timer);
3164 del_timer(&l2cap_pi(sk)->monitor_timer);
3165 del_timer(&l2cap_pi(sk)->ack_timer);
3166 }
3167
3168 l2cap_chan_del(sk, 0);
3169 bh_unlock_sock(sk);
3170
3171 l2cap_sock_kill(sk);
3172 return 0;
3173 }
3174
3175 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3176 {
3177 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3178 u16 type;
3179
3180 type = __le16_to_cpu(req->type);
3181
3182 BT_DBG("type 0x%4.4x", type);
3183
3184 if (type == L2CAP_IT_FEAT_MASK) {
3185 u8 buf[8];
3186 u32 feat_mask = l2cap_feat_mask;
3187 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3188 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3189 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3190 if (enable_ertm)
3191 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3192 | L2CAP_FEAT_FCS;
3193 put_unaligned_le32(feat_mask, rsp->data);
3194 l2cap_send_cmd(conn, cmd->ident,
3195 L2CAP_INFO_RSP, sizeof(buf), buf);
3196 } else if (type == L2CAP_IT_FIXED_CHAN) {
3197 u8 buf[12];
3198 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3199 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3200 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3201 memcpy(buf + 4, l2cap_fixed_chan, 8);
3202 l2cap_send_cmd(conn, cmd->ident,
3203 L2CAP_INFO_RSP, sizeof(buf), buf);
3204 } else {
3205 struct l2cap_info_rsp rsp;
3206 rsp.type = cpu_to_le16(type);
3207 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3208 l2cap_send_cmd(conn, cmd->ident,
3209 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3210 }
3211
3212 return 0;
3213 }
3214
3215 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3216 {
3217 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3218 u16 type, result;
3219
3220 type = __le16_to_cpu(rsp->type);
3221 result = __le16_to_cpu(rsp->result);
3222
3223 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3224
3225 del_timer(&conn->info_timer);
3226
3227 if (type == L2CAP_IT_FEAT_MASK) {
3228 conn->feat_mask = get_unaligned_le32(rsp->data);
3229
3230 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3231 struct l2cap_info_req req;
3232 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3233
3234 conn->info_ident = l2cap_get_ident(conn);
3235
3236 l2cap_send_cmd(conn, conn->info_ident,
3237 L2CAP_INFO_REQ, sizeof(req), &req);
3238 } else {
3239 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3240 conn->info_ident = 0;
3241
3242 l2cap_conn_start(conn);
3243 }
3244 } else if (type == L2CAP_IT_FIXED_CHAN) {
3245 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3246 conn->info_ident = 0;
3247
3248 l2cap_conn_start(conn);
3249 }
3250
3251 return 0;
3252 }
3253
3254 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3255 {
3256 u8 *data = skb->data;
3257 int len = skb->len;
3258 struct l2cap_cmd_hdr cmd;
3259 int err = 0;
3260
3261 l2cap_raw_recv(conn, skb);
3262
3263 while (len >= L2CAP_CMD_HDR_SIZE) {
3264 u16 cmd_len;
3265 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3266 data += L2CAP_CMD_HDR_SIZE;
3267 len -= L2CAP_CMD_HDR_SIZE;
3268
3269 cmd_len = le16_to_cpu(cmd.len);
3270
3271 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3272
3273 if (cmd_len > len || !cmd.ident) {
3274 BT_DBG("corrupted command");
3275 break;
3276 }
3277
3278 switch (cmd.code) {
3279 case L2CAP_COMMAND_REJ:
3280 l2cap_command_rej(conn, &cmd, data);
3281 break;
3282
3283 case L2CAP_CONN_REQ:
3284 err = l2cap_connect_req(conn, &cmd, data);
3285 break;
3286
3287 case L2CAP_CONN_RSP:
3288 err = l2cap_connect_rsp(conn, &cmd, data);
3289 break;
3290
3291 case L2CAP_CONF_REQ:
3292 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3293 break;
3294
3295 case L2CAP_CONF_RSP:
3296 err = l2cap_config_rsp(conn, &cmd, data);
3297 break;
3298
3299 case L2CAP_DISCONN_REQ:
3300 err = l2cap_disconnect_req(conn, &cmd, data);
3301 break;
3302
3303 case L2CAP_DISCONN_RSP:
3304 err = l2cap_disconnect_rsp(conn, &cmd, data);
3305 break;
3306
3307 case L2CAP_ECHO_REQ:
3308 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3309 break;
3310
3311 case L2CAP_ECHO_RSP:
3312 break;
3313
3314 case L2CAP_INFO_REQ:
3315 err = l2cap_information_req(conn, &cmd, data);
3316 break;
3317
3318 case L2CAP_INFO_RSP:
3319 err = l2cap_information_rsp(conn, &cmd, data);
3320 break;
3321
3322 default:
3323 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3324 err = -EINVAL;
3325 break;
3326 }
3327
3328 if (err) {
3329 struct l2cap_cmd_rej rej;
3330 BT_DBG("error %d", err);
3331
3332 /* FIXME: Map err to a valid reason */
3333 rej.reason = cpu_to_le16(0);
3334 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3335 }
3336
3337 data += cmd_len;
3338 len -= cmd_len;
3339 }
3340
3341 kfree_skb(skb);
3342 }
3343
3344 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3345 {
3346 u16 our_fcs, rcv_fcs;
3347 int hdr_size = L2CAP_HDR_SIZE + 2;
3348
3349 if (pi->fcs == L2CAP_FCS_CRC16) {
3350 skb_trim(skb, skb->len - 2);
3351 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3352 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3353
3354 if (our_fcs != rcv_fcs)
3355 return -EINVAL;
3356 }
3357 return 0;
3358 }
3359
3360 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3361 {
3362 struct l2cap_pinfo *pi = l2cap_pi(sk);
3363 u16 control = 0;
3364
3365 pi->frames_sent = 0;
3366 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3367
3368 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3369
3370 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3371 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3372 l2cap_send_sframe(pi, control);
3373 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3374 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3375 }
3376
3377 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3378 __mod_retrans_timer();
3379
3380 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3381
3382 spin_lock_bh(&pi->send_lock);
3383 l2cap_ertm_send(sk);
3384 spin_unlock_bh(&pi->send_lock);
3385
3386 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3387 pi->frames_sent == 0) {
3388 control |= L2CAP_SUPER_RCV_READY;
3389 l2cap_send_sframe(pi, control);
3390 }
3391 }
3392
3393 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3394 {
3395 struct sk_buff *next_skb;
3396
3397 bt_cb(skb)->tx_seq = tx_seq;
3398 bt_cb(skb)->sar = sar;
3399
3400 next_skb = skb_peek(SREJ_QUEUE(sk));
3401 if (!next_skb) {
3402 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3403 return 0;
3404 }
3405
3406 do {
3407 if (bt_cb(next_skb)->tx_seq == tx_seq)
3408 return -EINVAL;
3409
3410 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3411 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3412 return 0;
3413 }
3414
3415 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3416 break;
3417
3418 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3419
3420 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3421
3422 return 0;
3423 }
3424
3425 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3426 {
3427 struct l2cap_pinfo *pi = l2cap_pi(sk);
3428 struct sk_buff *_skb;
3429 int err;
3430
3431 switch (control & L2CAP_CTRL_SAR) {
3432 case L2CAP_SDU_UNSEGMENTED:
3433 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3434 goto drop;
3435
3436 err = sock_queue_rcv_skb(sk, skb);
3437 if (!err)
3438 return err;
3439
3440 break;
3441
3442 case L2CAP_SDU_START:
3443 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3444 goto drop;
3445
3446 pi->sdu_len = get_unaligned_le16(skb->data);
3447
3448 if (pi->sdu_len > pi->imtu)
3449 goto disconnect;
3450
3451 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3452 if (!pi->sdu)
3453 return -ENOMEM;
3454
3455 /* pull sdu_len bytes only after alloc, because of Local Busy
3456 * condition we have to be sure that this will be executed
3457 * only once, i.e., when alloc does not fail */
3458 skb_pull(skb, 2);
3459
3460 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3461
3462 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3463 pi->partial_sdu_len = skb->len;
3464 break;
3465
3466 case L2CAP_SDU_CONTINUE:
3467 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3468 goto disconnect;
3469
3470 if (!pi->sdu)
3471 goto disconnect;
3472
3473 pi->partial_sdu_len += skb->len;
3474 if (pi->partial_sdu_len > pi->sdu_len)
3475 goto drop;
3476
3477 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3478
3479 break;
3480
3481 case L2CAP_SDU_END:
3482 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3483 goto disconnect;
3484
3485 if (!pi->sdu)
3486 goto disconnect;
3487
3488 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3489 pi->partial_sdu_len += skb->len;
3490
3491 if (pi->partial_sdu_len > pi->imtu)
3492 goto drop;
3493
3494 if (pi->partial_sdu_len != pi->sdu_len)
3495 goto drop;
3496
3497 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3498 }
3499
3500 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3501 if (!_skb) {
3502 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3503 return -ENOMEM;
3504 }
3505
3506 err = sock_queue_rcv_skb(sk, _skb);
3507 if (err < 0) {
3508 kfree_skb(_skb);
3509 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3510 return err;
3511 }
3512
3513 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3514 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3515
3516 kfree_skb(pi->sdu);
3517 break;
3518 }
3519
3520 kfree_skb(skb);
3521 return 0;
3522
3523 drop:
3524 kfree_skb(pi->sdu);
3525 pi->sdu = NULL;
3526
3527 disconnect:
3528 l2cap_send_disconn_req(pi->conn, sk);
3529 kfree_skb(skb);
3530 return 0;
3531 }
3532
3533 static void l2cap_busy_work(struct work_struct *work)
3534 {
3535 DECLARE_WAITQUEUE(wait, current);
3536 struct l2cap_pinfo *pi =
3537 container_of(work, struct l2cap_pinfo, busy_work);
3538 struct sock *sk = (struct sock *)pi;
3539 int n_tries = 0, timeo = HZ/5, err;
3540 struct sk_buff *skb;
3541 u16 control;
3542
3543 lock_sock(sk);
3544
3545 add_wait_queue(sk_sleep(sk), &wait);
3546 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3547 set_current_state(TASK_INTERRUPTIBLE);
3548
3549 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3550 err = -EBUSY;
3551 l2cap_send_disconn_req(pi->conn, sk);
3552 goto done;
3553 }
3554
3555 if (!timeo)
3556 timeo = HZ/5;
3557
3558 if (signal_pending(current)) {
3559 err = sock_intr_errno(timeo);
3560 goto done;
3561 }
3562
3563 release_sock(sk);
3564 timeo = schedule_timeout(timeo);
3565 lock_sock(sk);
3566
3567 err = sock_error(sk);
3568 if (err)
3569 goto done;
3570
3571 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3572 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3573 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3574 if (err < 0) {
3575 skb_queue_head(BUSY_QUEUE(sk), skb);
3576 break;
3577 }
3578
3579 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3580 }
3581
3582 if (!skb)
3583 break;
3584 }
3585
3586 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3587 goto done;
3588
3589 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3590 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3591 l2cap_send_sframe(pi, control);
3592 l2cap_pi(sk)->retry_count = 1;
3593
3594 del_timer(&pi->retrans_timer);
3595 __mod_monitor_timer();
3596
3597 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3598
3599 done:
3600 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3601 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3602
3603 set_current_state(TASK_RUNNING);
3604 remove_wait_queue(sk_sleep(sk), &wait);
3605
3606 release_sock(sk);
3607 }
3608
3609 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3610 {
3611 struct l2cap_pinfo *pi = l2cap_pi(sk);
3612 int sctrl, err;
3613
3614 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3615 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3616 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3617 return -EBUSY;
3618 }
3619
3620 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3621 if (err >= 0) {
3622 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3623 return err;
3624 }
3625
3626 /* Busy Condition */
3627 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3628 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3629 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3630
3631 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3632 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3633 l2cap_send_sframe(pi, sctrl);
3634
3635 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3636
3637 queue_work(_busy_wq, &pi->busy_work);
3638
3639 return err;
3640 }
3641
3642 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3643 {
3644 struct l2cap_pinfo *pi = l2cap_pi(sk);
3645 struct sk_buff *_skb;
3646 int err = -EINVAL;
3647
3648 /*
3649 * TODO: We have to notify the userland if some data is lost with the
3650 * Streaming Mode.
3651 */
3652
3653 switch (control & L2CAP_CTRL_SAR) {
3654 case L2CAP_SDU_UNSEGMENTED:
3655 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3656 kfree_skb(pi->sdu);
3657 break;
3658 }
3659
3660 err = sock_queue_rcv_skb(sk, skb);
3661 if (!err)
3662 return 0;
3663
3664 break;
3665
3666 case L2CAP_SDU_START:
3667 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3668 kfree_skb(pi->sdu);
3669 break;
3670 }
3671
3672 pi->sdu_len = get_unaligned_le16(skb->data);
3673 skb_pull(skb, 2);
3674
3675 if (pi->sdu_len > pi->imtu) {
3676 err = -EMSGSIZE;
3677 break;
3678 }
3679
3680 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3681 if (!pi->sdu) {
3682 err = -ENOMEM;
3683 break;
3684 }
3685
3686 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3687
3688 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3689 pi->partial_sdu_len = skb->len;
3690 err = 0;
3691 break;
3692
3693 case L2CAP_SDU_CONTINUE:
3694 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3695 break;
3696
3697 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3698
3699 pi->partial_sdu_len += skb->len;
3700 if (pi->partial_sdu_len > pi->sdu_len)
3701 kfree_skb(pi->sdu);
3702 else
3703 err = 0;
3704
3705 break;
3706
3707 case L2CAP_SDU_END:
3708 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3709 break;
3710
3711 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3712
3713 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3714 pi->partial_sdu_len += skb->len;
3715
3716 if (pi->partial_sdu_len > pi->imtu)
3717 goto drop;
3718
3719 if (pi->partial_sdu_len == pi->sdu_len) {
3720 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3721 err = sock_queue_rcv_skb(sk, _skb);
3722 if (err < 0)
3723 kfree_skb(_skb);
3724 }
3725 err = 0;
3726
3727 drop:
3728 kfree_skb(pi->sdu);
3729 break;
3730 }
3731
3732 kfree_skb(skb);
3733 return err;
3734 }
3735
3736 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3737 {
3738 struct sk_buff *skb;
3739 u16 control;
3740
3741 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3742 if (bt_cb(skb)->tx_seq != tx_seq)
3743 break;
3744
3745 skb = skb_dequeue(SREJ_QUEUE(sk));
3746 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3747 l2cap_ertm_reassembly_sdu(sk, skb, control);
3748 l2cap_pi(sk)->buffer_seq_srej =
3749 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3750 tx_seq++;
3751 }
3752 }
3753
3754 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3755 {
3756 struct l2cap_pinfo *pi = l2cap_pi(sk);
3757 struct srej_list *l, *tmp;
3758 u16 control;
3759
3760 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3761 if (l->tx_seq == tx_seq) {
3762 list_del(&l->list);
3763 kfree(l);
3764 return;
3765 }
3766 control = L2CAP_SUPER_SELECT_REJECT;
3767 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3768 l2cap_send_sframe(pi, control);
3769 list_del(&l->list);
3770 list_add_tail(&l->list, SREJ_LIST(sk));
3771 }
3772 }
3773
3774 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3775 {
3776 struct l2cap_pinfo *pi = l2cap_pi(sk);
3777 struct srej_list *new;
3778 u16 control;
3779
3780 while (tx_seq != pi->expected_tx_seq) {
3781 control = L2CAP_SUPER_SELECT_REJECT;
3782 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3783 l2cap_send_sframe(pi, control);
3784
3785 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3786 new->tx_seq = pi->expected_tx_seq++;
3787 list_add_tail(&new->list, SREJ_LIST(sk));
3788 }
3789 pi->expected_tx_seq++;
3790 }
3791
3792 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3793 {
3794 struct l2cap_pinfo *pi = l2cap_pi(sk);
3795 u8 tx_seq = __get_txseq(rx_control);
3796 u8 req_seq = __get_reqseq(rx_control);
3797 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3798 u8 tx_seq_offset, expected_tx_seq_offset;
3799 int num_to_ack = (pi->tx_win/6) + 1;
3800 int err = 0;
3801
3802 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3803
3804 if (L2CAP_CTRL_FINAL & rx_control &&
3805 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3806 del_timer(&pi->monitor_timer);
3807 if (pi->unacked_frames > 0)
3808 __mod_retrans_timer();
3809 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3810 }
3811
3812 pi->expected_ack_seq = req_seq;
3813 l2cap_drop_acked_frames(sk);
3814
3815 if (tx_seq == pi->expected_tx_seq)
3816 goto expected;
3817
3818 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3819 if (tx_seq_offset < 0)
3820 tx_seq_offset += 64;
3821
3822 /* invalid tx_seq */
3823 if (tx_seq_offset >= pi->tx_win) {
3824 l2cap_send_disconn_req(pi->conn, sk);
3825 goto drop;
3826 }
3827
3828 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3829 goto drop;
3830
3831 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3832 struct srej_list *first;
3833
3834 first = list_first_entry(SREJ_LIST(sk),
3835 struct srej_list, list);
3836 if (tx_seq == first->tx_seq) {
3837 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3838 l2cap_check_srej_gap(sk, tx_seq);
3839
3840 list_del(&first->list);
3841 kfree(first);
3842
3843 if (list_empty(SREJ_LIST(sk))) {
3844 pi->buffer_seq = pi->buffer_seq_srej;
3845 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3846 l2cap_send_ack(pi);
3847 }
3848 } else {
3849 struct srej_list *l;
3850
3851 /* duplicated tx_seq */
3852 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3853 goto drop;
3854
3855 list_for_each_entry(l, SREJ_LIST(sk), list) {
3856 if (l->tx_seq == tx_seq) {
3857 l2cap_resend_srejframe(sk, tx_seq);
3858 return 0;
3859 }
3860 }
3861 l2cap_send_srejframe(sk, tx_seq);
3862 }
3863 } else {
3864 expected_tx_seq_offset =
3865 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3866 if (expected_tx_seq_offset < 0)
3867 expected_tx_seq_offset += 64;
3868
3869 /* duplicated tx_seq */
3870 if (tx_seq_offset < expected_tx_seq_offset)
3871 goto drop;
3872
3873 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3874
3875 INIT_LIST_HEAD(SREJ_LIST(sk));
3876 pi->buffer_seq_srej = pi->buffer_seq;
3877
3878 __skb_queue_head_init(SREJ_QUEUE(sk));
3879 __skb_queue_head_init(BUSY_QUEUE(sk));
3880 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3881
3882 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3883
3884 l2cap_send_srejframe(sk, tx_seq);
3885 }
3886 return 0;
3887
3888 expected:
3889 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3890
3891 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3892 bt_cb(skb)->tx_seq = tx_seq;
3893 bt_cb(skb)->sar = sar;
3894 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3895 return 0;
3896 }
3897
3898 if (rx_control & L2CAP_CTRL_FINAL) {
3899 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3900 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3901 else
3902 l2cap_retransmit_frames(sk);
3903 }
3904
3905 err = l2cap_push_rx_skb(sk, skb, rx_control);
3906 if (err < 0)
3907 return 0;
3908
3909 __mod_ack_timer();
3910
3911 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3912 if (pi->num_acked == num_to_ack - 1)
3913 l2cap_send_ack(pi);
3914
3915 return 0;
3916
3917 drop:
3918 kfree_skb(skb);
3919 return 0;
3920 }
3921
3922 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3923 {
3924 struct l2cap_pinfo *pi = l2cap_pi(sk);
3925
3926 pi->expected_ack_seq = __get_reqseq(rx_control);
3927 l2cap_drop_acked_frames(sk);
3928
3929 if (rx_control & L2CAP_CTRL_POLL) {
3930 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3931 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3932 (pi->unacked_frames > 0))
3933 __mod_retrans_timer();
3934
3935 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3936 l2cap_send_srejtail(sk);
3937 } else {
3938 l2cap_send_i_or_rr_or_rnr(sk);
3939 }
3940
3941 } else if (rx_control & L2CAP_CTRL_FINAL) {
3942 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3943
3944 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3945 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3946 else
3947 l2cap_retransmit_frames(sk);
3948
3949 } else {
3950 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3951 (pi->unacked_frames > 0))
3952 __mod_retrans_timer();
3953
3954 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3955 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3956 l2cap_send_ack(pi);
3957 } else {
3958 spin_lock_bh(&pi->send_lock);
3959 l2cap_ertm_send(sk);
3960 spin_unlock_bh(&pi->send_lock);
3961 }
3962 }
3963 }
3964
3965 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3966 {
3967 struct l2cap_pinfo *pi = l2cap_pi(sk);
3968 u8 tx_seq = __get_reqseq(rx_control);
3969
3970 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3971
3972 pi->expected_ack_seq = tx_seq;
3973 l2cap_drop_acked_frames(sk);
3974
3975 if (rx_control & L2CAP_CTRL_FINAL) {
3976 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3977 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3978 else
3979 l2cap_retransmit_frames(sk);
3980 } else {
3981 l2cap_retransmit_frames(sk);
3982
3983 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3984 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3985 }
3986 }
3987 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3988 {
3989 struct l2cap_pinfo *pi = l2cap_pi(sk);
3990 u8 tx_seq = __get_reqseq(rx_control);
3991
3992 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3993
3994 if (rx_control & L2CAP_CTRL_POLL) {
3995 pi->expected_ack_seq = tx_seq;
3996 l2cap_drop_acked_frames(sk);
3997 l2cap_retransmit_one_frame(sk, tx_seq);
3998
3999 spin_lock_bh(&pi->send_lock);
4000 l2cap_ertm_send(sk);
4001 spin_unlock_bh(&pi->send_lock);
4002
4003 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4004 pi->srej_save_reqseq = tx_seq;
4005 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4006 }
4007 } else if (rx_control & L2CAP_CTRL_FINAL) {
4008 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4009 pi->srej_save_reqseq == tx_seq)
4010 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4011 else
4012 l2cap_retransmit_one_frame(sk, tx_seq);
4013 } else {
4014 l2cap_retransmit_one_frame(sk, tx_seq);
4015 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4016 pi->srej_save_reqseq = tx_seq;
4017 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4018 }
4019 }
4020 }
4021
4022 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4023 {
4024 struct l2cap_pinfo *pi = l2cap_pi(sk);
4025 u8 tx_seq = __get_reqseq(rx_control);
4026
4027 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4028 pi->expected_ack_seq = tx_seq;
4029 l2cap_drop_acked_frames(sk);
4030
4031 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4032 del_timer(&pi->retrans_timer);
4033 if (rx_control & L2CAP_CTRL_POLL)
4034 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4035 return;
4036 }
4037
4038 if (rx_control & L2CAP_CTRL_POLL)
4039 l2cap_send_srejtail(sk);
4040 else
4041 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4042 }
4043
4044 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4045 {
4046 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4047
4048 if (L2CAP_CTRL_FINAL & rx_control &&
4049 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4050 del_timer(&l2cap_pi(sk)->monitor_timer);
4051 if (l2cap_pi(sk)->unacked_frames > 0)
4052 __mod_retrans_timer();
4053 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4054 }
4055
4056 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4057 case L2CAP_SUPER_RCV_READY:
4058 l2cap_data_channel_rrframe(sk, rx_control);
4059 break;
4060
4061 case L2CAP_SUPER_REJECT:
4062 l2cap_data_channel_rejframe(sk, rx_control);
4063 break;
4064
4065 case L2CAP_SUPER_SELECT_REJECT:
4066 l2cap_data_channel_srejframe(sk, rx_control);
4067 break;
4068
4069 case L2CAP_SUPER_RCV_NOT_READY:
4070 l2cap_data_channel_rnrframe(sk, rx_control);
4071 break;
4072 }
4073
4074 kfree_skb(skb);
4075 return 0;
4076 }
4077
4078 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4079 {
4080 struct sock *sk;
4081 struct l2cap_pinfo *pi;
4082 u16 control, len;
4083 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
4084
4085 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4086 if (!sk) {
4087 BT_DBG("unknown cid 0x%4.4x", cid);
4088 goto drop;
4089 }
4090
4091 pi = l2cap_pi(sk);
4092
4093 BT_DBG("sk %p, len %d", sk, skb->len);
4094
4095 if (sk->sk_state != BT_CONNECTED)
4096 goto drop;
4097
4098 switch (pi->mode) {
4099 case L2CAP_MODE_BASIC:
4100 /* If socket recv buffers overflows we drop data here
4101 * which is *bad* because L2CAP has to be reliable.
4102 * But we don't have any other choice. L2CAP doesn't
4103 * provide flow control mechanism. */
4104
4105 if (pi->imtu < skb->len)
4106 goto drop;
4107
4108 if (!sock_queue_rcv_skb(sk, skb))
4109 goto done;
4110 break;
4111
4112 case L2CAP_MODE_ERTM:
4113 control = get_unaligned_le16(skb->data);
4114 skb_pull(skb, 2);
4115 len = skb->len;
4116
4117 if (__is_sar_start(control))
4118 len -= 2;
4119
4120 if (pi->fcs == L2CAP_FCS_CRC16)
4121 len -= 2;
4122
4123 /*
4124 * We can just drop the corrupted I-frame here.
4125 * Receiver will miss it and start proper recovery
4126 * procedures and ask retransmission.
4127 */
4128 if (len > pi->mps) {
4129 l2cap_send_disconn_req(pi->conn, sk);
4130 goto drop;
4131 }
4132
4133 if (l2cap_check_fcs(pi, skb))
4134 goto drop;
4135
4136 req_seq = __get_reqseq(control);
4137 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4138 if (req_seq_offset < 0)
4139 req_seq_offset += 64;
4140
4141 next_tx_seq_offset =
4142 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4143 if (next_tx_seq_offset < 0)
4144 next_tx_seq_offset += 64;
4145
4146 /* check for invalid req-seq */
4147 if (req_seq_offset > next_tx_seq_offset) {
4148 l2cap_send_disconn_req(pi->conn, sk);
4149 goto drop;
4150 }
4151
4152 if (__is_iframe(control)) {
4153 if (len < 4) {
4154 l2cap_send_disconn_req(pi->conn, sk);
4155 goto drop;
4156 }
4157
4158 l2cap_data_channel_iframe(sk, control, skb);
4159 } else {
4160 if (len != 0) {
4161 l2cap_send_disconn_req(pi->conn, sk);
4162 goto drop;
4163 }
4164
4165 l2cap_data_channel_sframe(sk, control, skb);
4166 }
4167
4168 goto done;
4169
4170 case L2CAP_MODE_STREAMING:
4171 control = get_unaligned_le16(skb->data);
4172 skb_pull(skb, 2);
4173 len = skb->len;
4174
4175 if (__is_sar_start(control))
4176 len -= 2;
4177
4178 if (pi->fcs == L2CAP_FCS_CRC16)
4179 len -= 2;
4180
4181 if (len > pi->mps || len < 4 || __is_sframe(control))
4182 goto drop;
4183
4184 if (l2cap_check_fcs(pi, skb))
4185 goto drop;
4186
4187 tx_seq = __get_txseq(control);
4188
4189 if (pi->expected_tx_seq == tx_seq)
4190 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4191 else
4192 pi->expected_tx_seq = (tx_seq + 1) % 64;
4193
4194 l2cap_streaming_reassembly_sdu(sk, skb, control);
4195
4196 goto done;
4197
4198 default:
4199 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4200 break;
4201 }
4202
4203 drop:
4204 kfree_skb(skb);
4205
4206 done:
4207 if (sk)
4208 bh_unlock_sock(sk);
4209
4210 return 0;
4211 }
4212
4213 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4214 {
4215 struct sock *sk;
4216
4217 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4218 if (!sk)
4219 goto drop;
4220
4221 BT_DBG("sk %p, len %d", sk, skb->len);
4222
4223 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4224 goto drop;
4225
4226 if (l2cap_pi(sk)->imtu < skb->len)
4227 goto drop;
4228
4229 if (!sock_queue_rcv_skb(sk, skb))
4230 goto done;
4231
4232 drop:
4233 kfree_skb(skb);
4234
4235 done:
4236 if (sk)
4237 bh_unlock_sock(sk);
4238 return 0;
4239 }
4240
4241 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4242 {
4243 struct l2cap_hdr *lh = (void *) skb->data;
4244 u16 cid, len;
4245 __le16 psm;
4246
4247 skb_pull(skb, L2CAP_HDR_SIZE);
4248 cid = __le16_to_cpu(lh->cid);
4249 len = __le16_to_cpu(lh->len);
4250
4251 if (len != skb->len) {
4252 kfree_skb(skb);
4253 return;
4254 }
4255
4256 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4257
4258 switch (cid) {
4259 case L2CAP_CID_SIGNALING:
4260 l2cap_sig_channel(conn, skb);
4261 break;
4262
4263 case L2CAP_CID_CONN_LESS:
4264 psm = get_unaligned_le16(skb->data);
4265 skb_pull(skb, 2);
4266 l2cap_conless_channel(conn, psm, skb);
4267 break;
4268
4269 default:
4270 l2cap_data_channel(conn, cid, skb);
4271 break;
4272 }
4273 }
4274
4275 /* ---- L2CAP interface with lower layer (HCI) ---- */
4276
4277 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4278 {
4279 int exact = 0, lm1 = 0, lm2 = 0;
4280 register struct sock *sk;
4281 struct hlist_node *node;
4282
4283 if (type != ACL_LINK)
4284 return 0;
4285
4286 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4287
4288 /* Find listening sockets and check their link_mode */
4289 read_lock(&l2cap_sk_list.lock);
4290 sk_for_each(sk, node, &l2cap_sk_list.head) {
4291 if (sk->sk_state != BT_LISTEN)
4292 continue;
4293
4294 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4295 lm1 |= HCI_LM_ACCEPT;
4296 if (l2cap_pi(sk)->role_switch)
4297 lm1 |= HCI_LM_MASTER;
4298 exact++;
4299 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4300 lm2 |= HCI_LM_ACCEPT;
4301 if (l2cap_pi(sk)->role_switch)
4302 lm2 |= HCI_LM_MASTER;
4303 }
4304 }
4305 read_unlock(&l2cap_sk_list.lock);
4306
4307 return exact ? lm1 : lm2;
4308 }
4309
4310 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4311 {
4312 struct l2cap_conn *conn;
4313
4314 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4315
4316 if (hcon->type != ACL_LINK)
4317 return 0;
4318
4319 if (!status) {
4320 conn = l2cap_conn_add(hcon, status);
4321 if (conn)
4322 l2cap_conn_ready(conn);
4323 } else
4324 l2cap_conn_del(hcon, bt_err(status));
4325
4326 return 0;
4327 }
4328
4329 static int l2cap_disconn_ind(struct hci_conn *hcon)
4330 {
4331 struct l2cap_conn *conn = hcon->l2cap_data;
4332
4333 BT_DBG("hcon %p", hcon);
4334
4335 if (hcon->type != ACL_LINK || !conn)
4336 return 0x13;
4337
4338 return conn->disc_reason;
4339 }
4340
4341 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4342 {
4343 BT_DBG("hcon %p reason %d", hcon, reason);
4344
4345 if (hcon->type != ACL_LINK)
4346 return 0;
4347
4348 l2cap_conn_del(hcon, bt_err(reason));
4349
4350 return 0;
4351 }
4352
4353 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4354 {
4355 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4356 return;
4357
4358 if (encrypt == 0x00) {
4359 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4360 l2cap_sock_clear_timer(sk);
4361 l2cap_sock_set_timer(sk, HZ * 5);
4362 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4363 __l2cap_sock_close(sk, ECONNREFUSED);
4364 } else {
4365 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4366 l2cap_sock_clear_timer(sk);
4367 }
4368 }
4369
4370 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4371 {
4372 struct l2cap_chan_list *l;
4373 struct l2cap_conn *conn = hcon->l2cap_data;
4374 struct sock *sk;
4375
4376 if (!conn)
4377 return 0;
4378
4379 l = &conn->chan_list;
4380
4381 BT_DBG("conn %p", conn);
4382
4383 read_lock(&l->lock);
4384
4385 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4386 bh_lock_sock(sk);
4387
4388 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4389 bh_unlock_sock(sk);
4390 continue;
4391 }
4392
4393 if (!status && (sk->sk_state == BT_CONNECTED ||
4394 sk->sk_state == BT_CONFIG)) {
4395 l2cap_check_encryption(sk, encrypt);
4396 bh_unlock_sock(sk);
4397 continue;
4398 }
4399
4400 if (sk->sk_state == BT_CONNECT) {
4401 if (!status) {
4402 struct l2cap_conn_req req;
4403 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4404 req.psm = l2cap_pi(sk)->psm;
4405
4406 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4407
4408 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4409 L2CAP_CONN_REQ, sizeof(req), &req);
4410 } else {
4411 l2cap_sock_clear_timer(sk);
4412 l2cap_sock_set_timer(sk, HZ / 10);
4413 }
4414 } else if (sk->sk_state == BT_CONNECT2) {
4415 struct l2cap_conn_rsp rsp;
4416 __u16 result;
4417
4418 if (!status) {
4419 sk->sk_state = BT_CONFIG;
4420 result = L2CAP_CR_SUCCESS;
4421 } else {
4422 sk->sk_state = BT_DISCONN;
4423 l2cap_sock_set_timer(sk, HZ / 10);
4424 result = L2CAP_CR_SEC_BLOCK;
4425 }
4426
4427 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4428 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4429 rsp.result = cpu_to_le16(result);
4430 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4431 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4432 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4433 }
4434
4435 bh_unlock_sock(sk);
4436 }
4437
4438 read_unlock(&l->lock);
4439
4440 return 0;
4441 }
4442
4443 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4444 {
4445 struct l2cap_conn *conn = hcon->l2cap_data;
4446
4447 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4448 goto drop;
4449
4450 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4451
4452 if (flags & ACL_START) {
4453 struct l2cap_hdr *hdr;
4454 int len;
4455
4456 if (conn->rx_len) {
4457 BT_ERR("Unexpected start frame (len %d)", skb->len);
4458 kfree_skb(conn->rx_skb);
4459 conn->rx_skb = NULL;
4460 conn->rx_len = 0;
4461 l2cap_conn_unreliable(conn, ECOMM);
4462 }
4463
4464 if (skb->len < 2) {
4465 BT_ERR("Frame is too short (len %d)", skb->len);
4466 l2cap_conn_unreliable(conn, ECOMM);
4467 goto drop;
4468 }
4469
4470 hdr = (struct l2cap_hdr *) skb->data;
4471 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4472
4473 if (len == skb->len) {
4474 /* Complete frame received */
4475 l2cap_recv_frame(conn, skb);
4476 return 0;
4477 }
4478
4479 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4480
4481 if (skb->len > len) {
4482 BT_ERR("Frame is too long (len %d, expected len %d)",
4483 skb->len, len);
4484 l2cap_conn_unreliable(conn, ECOMM);
4485 goto drop;
4486 }
4487
4488 /* Allocate skb for the complete frame (with header) */
4489 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4490 if (!conn->rx_skb)
4491 goto drop;
4492
4493 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4494 skb->len);
4495 conn->rx_len = len - skb->len;
4496 } else {
4497 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4498
4499 if (!conn->rx_len) {
4500 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4501 l2cap_conn_unreliable(conn, ECOMM);
4502 goto drop;
4503 }
4504
4505 if (skb->len > conn->rx_len) {
4506 BT_ERR("Fragment is too long (len %d, expected %d)",
4507 skb->len, conn->rx_len);
4508 kfree_skb(conn->rx_skb);
4509 conn->rx_skb = NULL;
4510 conn->rx_len = 0;
4511 l2cap_conn_unreliable(conn, ECOMM);
4512 goto drop;
4513 }
4514
4515 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4516 skb->len);
4517 conn->rx_len -= skb->len;
4518
4519 if (!conn->rx_len) {
4520 /* Complete frame received */
4521 l2cap_recv_frame(conn, conn->rx_skb);
4522 conn->rx_skb = NULL;
4523 }
4524 }
4525
4526 drop:
4527 kfree_skb(skb);
4528 return 0;
4529 }
4530
4531 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4532 {
4533 struct sock *sk;
4534 struct hlist_node *node;
4535
4536 read_lock_bh(&l2cap_sk_list.lock);
4537
4538 sk_for_each(sk, node, &l2cap_sk_list.head) {
4539 struct l2cap_pinfo *pi = l2cap_pi(sk);
4540
4541 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4542 batostr(&bt_sk(sk)->src),
4543 batostr(&bt_sk(sk)->dst),
4544 sk->sk_state, __le16_to_cpu(pi->psm),
4545 pi->scid, pi->dcid,
4546 pi->imtu, pi->omtu, pi->sec_level);
4547 }
4548
4549 read_unlock_bh(&l2cap_sk_list.lock);
4550
4551 return 0;
4552 }
4553
4554 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4555 {
4556 return single_open(file, l2cap_debugfs_show, inode->i_private);
4557 }
4558
4559 static const struct file_operations l2cap_debugfs_fops = {
4560 .open = l2cap_debugfs_open,
4561 .read = seq_read,
4562 .llseek = seq_lseek,
4563 .release = single_release,
4564 };
4565
4566 static struct dentry *l2cap_debugfs;
4567
4568 static const struct proto_ops l2cap_sock_ops = {
4569 .family = PF_BLUETOOTH,
4570 .owner = THIS_MODULE,
4571 .release = l2cap_sock_release,
4572 .bind = l2cap_sock_bind,
4573 .connect = l2cap_sock_connect,
4574 .listen = l2cap_sock_listen,
4575 .accept = l2cap_sock_accept,
4576 .getname = l2cap_sock_getname,
4577 .sendmsg = l2cap_sock_sendmsg,
4578 .recvmsg = l2cap_sock_recvmsg,
4579 .poll = bt_sock_poll,
4580 .ioctl = bt_sock_ioctl,
4581 .mmap = sock_no_mmap,
4582 .socketpair = sock_no_socketpair,
4583 .shutdown = l2cap_sock_shutdown,
4584 .setsockopt = l2cap_sock_setsockopt,
4585 .getsockopt = l2cap_sock_getsockopt
4586 };
4587
4588 static const struct net_proto_family l2cap_sock_family_ops = {
4589 .family = PF_BLUETOOTH,
4590 .owner = THIS_MODULE,
4591 .create = l2cap_sock_create,
4592 };
4593
4594 static struct hci_proto l2cap_hci_proto = {
4595 .name = "L2CAP",
4596 .id = HCI_PROTO_L2CAP,
4597 .connect_ind = l2cap_connect_ind,
4598 .connect_cfm = l2cap_connect_cfm,
4599 .disconn_ind = l2cap_disconn_ind,
4600 .disconn_cfm = l2cap_disconn_cfm,
4601 .security_cfm = l2cap_security_cfm,
4602 .recv_acldata = l2cap_recv_acldata
4603 };
4604
4605 static int __init l2cap_init(void)
4606 {
4607 int err;
4608
4609 err = proto_register(&l2cap_proto, 0);
4610 if (err < 0)
4611 return err;
4612
4613 _busy_wq = create_singlethread_workqueue("l2cap");
4614 if (!_busy_wq)
4615 goto error;
4616
4617 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4618 if (err < 0) {
4619 BT_ERR("L2CAP socket registration failed");
4620 goto error;
4621 }
4622
4623 err = hci_register_proto(&l2cap_hci_proto);
4624 if (err < 0) {
4625 BT_ERR("L2CAP protocol registration failed");
4626 bt_sock_unregister(BTPROTO_L2CAP);
4627 goto error;
4628 }
4629
4630 if (bt_debugfs) {
4631 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4632 bt_debugfs, NULL, &l2cap_debugfs_fops);
4633 if (!l2cap_debugfs)
4634 BT_ERR("Failed to create L2CAP debug file");
4635 }
4636
4637 BT_INFO("L2CAP ver %s", VERSION);
4638 BT_INFO("L2CAP socket layer initialized");
4639
4640 return 0;
4641
4642 error:
4643 proto_unregister(&l2cap_proto);
4644 return err;
4645 }
4646
4647 static void __exit l2cap_exit(void)
4648 {
4649 debugfs_remove(l2cap_debugfs);
4650
4651 flush_workqueue(_busy_wq);
4652 destroy_workqueue(_busy_wq);
4653
4654 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4655 BT_ERR("L2CAP socket unregistration failed");
4656
4657 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4658 BT_ERR("L2CAP protocol unregistration failed");
4659
4660 proto_unregister(&l2cap_proto);
4661 }
4662
4663 void l2cap_load(void)
4664 {
4665 /* Dummy function to trigger automatic L2CAP module loading by
4666 * other modules that use L2CAP sockets but don't use any other
4667 * symbols from it. */
4668 }
4669 EXPORT_SYMBOL(l2cap_load);
4670
4671 module_init(l2cap_init);
4672 module_exit(l2cap_exit);
4673
4674 module_param(enable_ertm, bool, 0644);
4675 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4676
4677 module_param(max_transmit, uint, 0644);
4678 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4679
4680 module_param(tx_window, uint, 0644);
4681 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4682
4683 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4684 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4685 MODULE_VERSION(VERSION);
4686 MODULE_LICENSE("GPL");
4687 MODULE_ALIAS("bt-proto-0");
This page took 0.129154 seconds and 5 git commands to generate.