2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { 0x02, };
65 static struct workqueue_struct
*_busy_wq
;
67 struct bt_sock_list l2cap_sk_list
= {
68 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
71 static void l2cap_busy_work(struct work_struct
*work
);
73 static void l2cap_sock_close(struct sock
*sk
);
75 static int l2cap_build_conf_req(struct sock
*sk
, void *data
);
76 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
77 u8 code
, u8 ident
, u16 dlen
, void *data
);
79 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
81 /* ---- L2CAP timers ---- */
82 void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
84 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
85 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
88 static void l2cap_sock_clear_timer(struct sock
*sk
)
90 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
91 sk_stop_timer(sk
, &sk
->sk_timer
);
94 /* ---- L2CAP channels ---- */
95 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
98 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
99 if (l2cap_pi(s
)->dcid
== cid
)
105 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
108 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
109 if (l2cap_pi(s
)->scid
== cid
)
115 /* Find channel with given SCID.
116 * Returns locked socket */
117 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
121 s
= __l2cap_get_chan_by_scid(l
, cid
);
124 read_unlock(&l
->lock
);
128 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
131 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
132 if (l2cap_pi(s
)->ident
== ident
)
138 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
142 s
= __l2cap_get_chan_by_ident(l
, ident
);
145 read_unlock(&l
->lock
);
149 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
151 u16 cid
= L2CAP_CID_DYN_START
;
153 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
154 if (!__l2cap_get_chan_by_scid(l
, cid
))
161 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
166 l2cap_pi(l
->head
)->prev_c
= sk
;
168 l2cap_pi(sk
)->next_c
= l
->head
;
169 l2cap_pi(sk
)->prev_c
= NULL
;
173 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
175 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
177 write_lock_bh(&l
->lock
);
182 l2cap_pi(next
)->prev_c
= prev
;
184 l2cap_pi(prev
)->next_c
= next
;
185 write_unlock_bh(&l
->lock
);
190 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
192 struct l2cap_chan_list
*l
= &conn
->chan_list
;
194 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
195 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
197 conn
->disc_reason
= 0x13;
199 l2cap_pi(sk
)->conn
= conn
;
201 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
202 /* Alloc CID for connection-oriented socket */
203 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
204 } else if (sk
->sk_type
== SOCK_DGRAM
) {
205 /* Connectionless socket */
206 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
207 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
208 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
210 /* Raw socket can send/recv signalling messages only */
211 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
212 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
213 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
216 __l2cap_chan_link(l
, sk
);
219 bt_accept_enqueue(parent
, sk
);
223 * Must be called on the locked socket. */
224 static void l2cap_chan_del(struct sock
*sk
, int err
)
226 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
227 struct sock
*parent
= bt_sk(sk
)->parent
;
229 l2cap_sock_clear_timer(sk
);
231 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
234 /* Unlink from channel list */
235 l2cap_chan_unlink(&conn
->chan_list
, sk
);
236 l2cap_pi(sk
)->conn
= NULL
;
237 hci_conn_put(conn
->hcon
);
240 sk
->sk_state
= BT_CLOSED
;
241 sock_set_flag(sk
, SOCK_ZAPPED
);
247 bt_accept_unlink(sk
);
248 parent
->sk_data_ready(parent
, 0);
250 sk
->sk_state_change(sk
);
252 skb_queue_purge(TX_QUEUE(sk
));
254 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
255 struct srej_list
*l
, *tmp
;
257 del_timer(&l2cap_pi(sk
)->retrans_timer
);
258 del_timer(&l2cap_pi(sk
)->monitor_timer
);
259 del_timer(&l2cap_pi(sk
)->ack_timer
);
261 skb_queue_purge(SREJ_QUEUE(sk
));
262 skb_queue_purge(BUSY_QUEUE(sk
));
264 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
271 static inline u8
l2cap_get_auth_type(struct sock
*sk
)
273 if (sk
->sk_type
== SOCK_RAW
) {
274 switch (l2cap_pi(sk
)->sec_level
) {
275 case BT_SECURITY_HIGH
:
276 return HCI_AT_DEDICATED_BONDING_MITM
;
277 case BT_SECURITY_MEDIUM
:
278 return HCI_AT_DEDICATED_BONDING
;
280 return HCI_AT_NO_BONDING
;
282 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
283 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
284 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
286 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
287 return HCI_AT_NO_BONDING_MITM
;
289 return HCI_AT_NO_BONDING
;
291 switch (l2cap_pi(sk
)->sec_level
) {
292 case BT_SECURITY_HIGH
:
293 return HCI_AT_GENERAL_BONDING_MITM
;
294 case BT_SECURITY_MEDIUM
:
295 return HCI_AT_GENERAL_BONDING
;
297 return HCI_AT_NO_BONDING
;
302 /* Service level security */
303 static inline int l2cap_check_security(struct sock
*sk
)
305 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
308 auth_type
= l2cap_get_auth_type(sk
);
310 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
314 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
318 /* Get next available identificator.
319 * 1 - 128 are used by kernel.
320 * 129 - 199 are reserved.
321 * 200 - 254 are used by utilities like l2ping, etc.
324 spin_lock_bh(&conn
->lock
);
326 if (++conn
->tx_ident
> 128)
331 spin_unlock_bh(&conn
->lock
);
336 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
338 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
341 BT_DBG("code 0x%2.2x", code
);
346 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
347 flags
= ACL_START_NO_FLUSH
;
351 hci_send_acl(conn
->hcon
, skb
, flags
);
354 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
357 struct l2cap_hdr
*lh
;
358 struct l2cap_conn
*conn
= pi
->conn
;
359 struct sock
*sk
= (struct sock
*)pi
;
360 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
363 if (sk
->sk_state
!= BT_CONNECTED
)
366 if (pi
->fcs
== L2CAP_FCS_CRC16
)
369 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
371 count
= min_t(unsigned int, conn
->mtu
, hlen
);
372 control
|= L2CAP_CTRL_FRAME_TYPE
;
374 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
375 control
|= L2CAP_CTRL_FINAL
;
376 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
379 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
380 control
|= L2CAP_CTRL_POLL
;
381 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
384 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
388 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
389 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
390 lh
->cid
= cpu_to_le16(pi
->dcid
);
391 put_unaligned_le16(control
, skb_put(skb
, 2));
393 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
394 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
395 put_unaligned_le16(fcs
, skb_put(skb
, 2));
398 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
399 flags
= ACL_START_NO_FLUSH
;
403 hci_send_acl(pi
->conn
->hcon
, skb
, flags
);
406 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
408 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
409 control
|= L2CAP_SUPER_RCV_NOT_READY
;
410 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
412 control
|= L2CAP_SUPER_RCV_READY
;
414 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
416 l2cap_send_sframe(pi
, control
);
419 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
421 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
424 static void l2cap_do_start(struct sock
*sk
)
426 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
428 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
429 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
432 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
433 struct l2cap_conn_req req
;
434 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
435 req
.psm
= l2cap_pi(sk
)->psm
;
437 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
438 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
440 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
441 L2CAP_CONN_REQ
, sizeof(req
), &req
);
444 struct l2cap_info_req req
;
445 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
447 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
448 conn
->info_ident
= l2cap_get_ident(conn
);
450 mod_timer(&conn
->info_timer
, jiffies
+
451 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
453 l2cap_send_cmd(conn
, conn
->info_ident
,
454 L2CAP_INFO_REQ
, sizeof(req
), &req
);
458 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
460 u32 local_feat_mask
= l2cap_feat_mask
;
462 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
465 case L2CAP_MODE_ERTM
:
466 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
467 case L2CAP_MODE_STREAMING
:
468 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
474 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
476 struct l2cap_disconn_req req
;
481 skb_queue_purge(TX_QUEUE(sk
));
483 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
484 del_timer(&l2cap_pi(sk
)->retrans_timer
);
485 del_timer(&l2cap_pi(sk
)->monitor_timer
);
486 del_timer(&l2cap_pi(sk
)->ack_timer
);
489 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
490 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
491 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
492 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
494 sk
->sk_state
= BT_DISCONN
;
498 /* ---- L2CAP connections ---- */
499 static void l2cap_conn_start(struct l2cap_conn
*conn
)
501 struct l2cap_chan_list
*l
= &conn
->chan_list
;
502 struct sock_del_list del
, *tmp1
, *tmp2
;
505 BT_DBG("conn %p", conn
);
507 INIT_LIST_HEAD(&del
.list
);
511 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
514 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
515 sk
->sk_type
!= SOCK_STREAM
) {
520 if (sk
->sk_state
== BT_CONNECT
) {
521 struct l2cap_conn_req req
;
523 if (!l2cap_check_security(sk
) ||
524 !__l2cap_no_conn_pending(sk
)) {
529 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
531 && l2cap_pi(sk
)->conf_state
&
532 L2CAP_CONF_STATE2_DEVICE
) {
533 tmp1
= kzalloc(sizeof(struct sock_del_list
),
536 list_add_tail(&tmp1
->list
, &del
.list
);
541 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
542 req
.psm
= l2cap_pi(sk
)->psm
;
544 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
545 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
547 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
548 L2CAP_CONN_REQ
, sizeof(req
), &req
);
550 } else if (sk
->sk_state
== BT_CONNECT2
) {
551 struct l2cap_conn_rsp rsp
;
553 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
554 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
556 if (l2cap_check_security(sk
)) {
557 if (bt_sk(sk
)->defer_setup
) {
558 struct sock
*parent
= bt_sk(sk
)->parent
;
559 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
560 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
561 parent
->sk_data_ready(parent
, 0);
564 sk
->sk_state
= BT_CONFIG
;
565 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
566 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
569 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
570 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
573 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
574 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
576 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
||
577 rsp
.result
!= L2CAP_CR_SUCCESS
) {
582 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
583 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
584 l2cap_build_conf_req(sk
, buf
), buf
);
585 l2cap_pi(sk
)->num_conf_req
++;
591 read_unlock(&l
->lock
);
593 list_for_each_entry_safe(tmp1
, tmp2
, &del
.list
, list
) {
594 bh_lock_sock(tmp1
->sk
);
595 __l2cap_sock_close(tmp1
->sk
, ECONNRESET
);
596 bh_unlock_sock(tmp1
->sk
);
597 list_del(&tmp1
->list
);
602 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
604 struct l2cap_chan_list
*l
= &conn
->chan_list
;
607 BT_DBG("conn %p", conn
);
611 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
614 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
615 sk
->sk_type
!= SOCK_STREAM
) {
616 l2cap_sock_clear_timer(sk
);
617 sk
->sk_state
= BT_CONNECTED
;
618 sk
->sk_state_change(sk
);
619 } else if (sk
->sk_state
== BT_CONNECT
)
625 read_unlock(&l
->lock
);
628 /* Notify sockets that we cannot guaranty reliability anymore */
629 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
631 struct l2cap_chan_list
*l
= &conn
->chan_list
;
634 BT_DBG("conn %p", conn
);
638 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
639 if (l2cap_pi(sk
)->force_reliable
)
643 read_unlock(&l
->lock
);
646 static void l2cap_info_timeout(unsigned long arg
)
648 struct l2cap_conn
*conn
= (void *) arg
;
650 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
651 conn
->info_ident
= 0;
653 l2cap_conn_start(conn
);
656 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
658 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
663 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
667 hcon
->l2cap_data
= conn
;
670 BT_DBG("hcon %p conn %p", hcon
, conn
);
672 conn
->mtu
= hcon
->hdev
->acl_mtu
;
673 conn
->src
= &hcon
->hdev
->bdaddr
;
674 conn
->dst
= &hcon
->dst
;
678 spin_lock_init(&conn
->lock
);
679 rwlock_init(&conn
->chan_list
.lock
);
681 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
682 (unsigned long) conn
);
684 conn
->disc_reason
= 0x13;
689 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
691 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
697 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
699 kfree_skb(conn
->rx_skb
);
702 while ((sk
= conn
->chan_list
.head
)) {
704 l2cap_chan_del(sk
, err
);
709 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
710 del_timer_sync(&conn
->info_timer
);
712 hcon
->l2cap_data
= NULL
;
716 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
718 struct l2cap_chan_list
*l
= &conn
->chan_list
;
719 write_lock_bh(&l
->lock
);
720 __l2cap_chan_add(conn
, sk
, parent
);
721 write_unlock_bh(&l
->lock
);
724 /* ---- Socket interface ---- */
726 /* Find socket with psm and source bdaddr.
727 * Returns closest match.
729 static struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
731 struct sock
*sk
= NULL
, *sk1
= NULL
;
732 struct hlist_node
*node
;
734 read_lock(&l2cap_sk_list
.lock
);
736 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
737 if (state
&& sk
->sk_state
!= state
)
740 if (l2cap_pi(sk
)->psm
== psm
) {
742 if (!bacmp(&bt_sk(sk
)->src
, src
))
746 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
751 read_unlock(&l2cap_sk_list
.lock
);
753 return node
? sk
: sk1
;
756 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
760 BT_DBG("parent %p", parent
);
762 /* Close not yet accepted channels */
763 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
764 l2cap_sock_close(sk
);
766 parent
->sk_state
= BT_CLOSED
;
767 sock_set_flag(parent
, SOCK_ZAPPED
);
770 /* Kill socket (only if zapped and orphan)
771 * Must be called on unlocked socket.
773 void l2cap_sock_kill(struct sock
*sk
)
775 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
778 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
780 /* Kill poor orphan */
781 bt_sock_unlink(&l2cap_sk_list
, sk
);
782 sock_set_flag(sk
, SOCK_DEAD
);
786 void __l2cap_sock_close(struct sock
*sk
, int reason
)
788 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
790 switch (sk
->sk_state
) {
792 l2cap_sock_cleanup_listen(sk
);
797 if (sk
->sk_type
== SOCK_SEQPACKET
||
798 sk
->sk_type
== SOCK_STREAM
) {
799 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
801 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
802 l2cap_send_disconn_req(conn
, sk
, reason
);
804 l2cap_chan_del(sk
, reason
);
808 if (sk
->sk_type
== SOCK_SEQPACKET
||
809 sk
->sk_type
== SOCK_STREAM
) {
810 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
811 struct l2cap_conn_rsp rsp
;
814 if (bt_sk(sk
)->defer_setup
)
815 result
= L2CAP_CR_SEC_BLOCK
;
817 result
= L2CAP_CR_BAD_PSM
;
818 sk
->sk_state
= BT_DISCONN
;
820 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
821 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
822 rsp
.result
= cpu_to_le16(result
);
823 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
824 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
825 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
827 l2cap_chan_del(sk
, reason
);
832 l2cap_chan_del(sk
, reason
);
836 sock_set_flag(sk
, SOCK_ZAPPED
);
841 /* Must be called on unlocked socket. */
842 static void l2cap_sock_close(struct sock
*sk
)
844 l2cap_sock_clear_timer(sk
);
846 __l2cap_sock_close(sk
, ECONNRESET
);
851 int l2cap_do_connect(struct sock
*sk
)
853 bdaddr_t
*src
= &bt_sk(sk
)->src
;
854 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
855 struct l2cap_conn
*conn
;
856 struct hci_conn
*hcon
;
857 struct hci_dev
*hdev
;
861 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
864 hdev
= hci_get_route(dst
, src
);
866 return -EHOSTUNREACH
;
868 hci_dev_lock_bh(hdev
);
872 auth_type
= l2cap_get_auth_type(sk
);
874 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
875 l2cap_pi(sk
)->sec_level
, auth_type
);
879 conn
= l2cap_conn_add(hcon
, 0);
887 /* Update source addr of the socket */
888 bacpy(src
, conn
->src
);
890 l2cap_chan_add(conn
, sk
, NULL
);
892 sk
->sk_state
= BT_CONNECT
;
893 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
895 if (hcon
->state
== BT_CONNECTED
) {
896 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
897 sk
->sk_type
!= SOCK_STREAM
) {
898 l2cap_sock_clear_timer(sk
);
899 if (l2cap_check_security(sk
))
900 sk
->sk_state
= BT_CONNECTED
;
906 hci_dev_unlock_bh(hdev
);
911 static int __l2cap_wait_ack(struct sock
*sk
)
913 DECLARE_WAITQUEUE(wait
, current
);
917 add_wait_queue(sk_sleep(sk
), &wait
);
918 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
919 set_current_state(TASK_INTERRUPTIBLE
);
924 if (signal_pending(current
)) {
925 err
= sock_intr_errno(timeo
);
930 timeo
= schedule_timeout(timeo
);
933 err
= sock_error(sk
);
937 set_current_state(TASK_RUNNING
);
938 remove_wait_queue(sk_sleep(sk
), &wait
);
942 static void l2cap_monitor_timeout(unsigned long arg
)
944 struct sock
*sk
= (void *) arg
;
949 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
950 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
955 l2cap_pi(sk
)->retry_count
++;
956 __mod_monitor_timer();
958 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
962 static void l2cap_retrans_timeout(unsigned long arg
)
964 struct sock
*sk
= (void *) arg
;
969 l2cap_pi(sk
)->retry_count
= 1;
970 __mod_monitor_timer();
972 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
974 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
978 static void l2cap_drop_acked_frames(struct sock
*sk
)
982 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
983 l2cap_pi(sk
)->unacked_frames
) {
984 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
987 skb
= skb_dequeue(TX_QUEUE(sk
));
990 l2cap_pi(sk
)->unacked_frames
--;
993 if (!l2cap_pi(sk
)->unacked_frames
)
994 del_timer(&l2cap_pi(sk
)->retrans_timer
);
997 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
999 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1000 struct hci_conn
*hcon
= pi
->conn
->hcon
;
1003 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1005 if (!pi
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
1006 flags
= ACL_START_NO_FLUSH
;
1010 hci_send_acl(hcon
, skb
, flags
);
1013 static void l2cap_streaming_send(struct sock
*sk
)
1015 struct sk_buff
*skb
;
1016 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1019 while ((skb
= skb_dequeue(TX_QUEUE(sk
)))) {
1020 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1021 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1022 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1024 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1025 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1026 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1029 l2cap_do_send(sk
, skb
);
1031 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1035 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1037 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1038 struct sk_buff
*skb
, *tx_skb
;
1041 skb
= skb_peek(TX_QUEUE(sk
));
1046 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1049 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1052 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1054 if (pi
->remote_max_tx
&&
1055 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1056 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1060 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1061 bt_cb(skb
)->retries
++;
1062 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1064 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1065 control
|= L2CAP_CTRL_FINAL
;
1066 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1069 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1070 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1072 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1074 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1075 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1076 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1079 l2cap_do_send(sk
, tx_skb
);
1082 static int l2cap_ertm_send(struct sock
*sk
)
1084 struct sk_buff
*skb
, *tx_skb
;
1085 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1089 if (sk
->sk_state
!= BT_CONNECTED
)
1092 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1094 if (pi
->remote_max_tx
&&
1095 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1096 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1100 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1102 bt_cb(skb
)->retries
++;
1104 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1105 control
&= L2CAP_CTRL_SAR
;
1107 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1108 control
|= L2CAP_CTRL_FINAL
;
1109 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1111 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1112 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1113 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1116 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1117 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1118 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1121 l2cap_do_send(sk
, tx_skb
);
1123 __mod_retrans_timer();
1125 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1126 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1128 pi
->unacked_frames
++;
1131 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1132 sk
->sk_send_head
= NULL
;
1134 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1142 static int l2cap_retransmit_frames(struct sock
*sk
)
1144 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1147 if (!skb_queue_empty(TX_QUEUE(sk
)))
1148 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1150 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1151 ret
= l2cap_ertm_send(sk
);
1155 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1157 struct sock
*sk
= (struct sock
*)pi
;
1160 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1162 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1163 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1164 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1165 l2cap_send_sframe(pi
, control
);
1169 if (l2cap_ertm_send(sk
) > 0)
1172 control
|= L2CAP_SUPER_RCV_READY
;
1173 l2cap_send_sframe(pi
, control
);
1176 static void l2cap_send_srejtail(struct sock
*sk
)
1178 struct srej_list
*tail
;
1181 control
= L2CAP_SUPER_SELECT_REJECT
;
1182 control
|= L2CAP_CTRL_FINAL
;
1184 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1185 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1187 l2cap_send_sframe(l2cap_pi(sk
), control
);
1190 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1192 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1193 struct sk_buff
**frag
;
1196 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1202 /* Continuation fragments (no L2CAP header) */
1203 frag
= &skb_shinfo(skb
)->frag_list
;
1205 count
= min_t(unsigned int, conn
->mtu
, len
);
1207 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1210 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1216 frag
= &(*frag
)->next
;
1222 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1224 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1225 struct sk_buff
*skb
;
1226 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1227 struct l2cap_hdr
*lh
;
1229 BT_DBG("sk %p len %d", sk
, (int)len
);
1231 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1232 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1233 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1235 return ERR_PTR(err
);
1237 /* Create L2CAP header */
1238 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1239 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1240 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1241 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1243 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1244 if (unlikely(err
< 0)) {
1246 return ERR_PTR(err
);
1251 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1253 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1254 struct sk_buff
*skb
;
1255 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1256 struct l2cap_hdr
*lh
;
1258 BT_DBG("sk %p len %d", sk
, (int)len
);
1260 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1261 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1262 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1264 return ERR_PTR(err
);
1266 /* Create L2CAP header */
1267 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1268 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1269 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1271 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1272 if (unlikely(err
< 0)) {
1274 return ERR_PTR(err
);
1279 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1281 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1282 struct sk_buff
*skb
;
1283 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1284 struct l2cap_hdr
*lh
;
1286 BT_DBG("sk %p len %d", sk
, (int)len
);
1289 return ERR_PTR(-ENOTCONN
);
1294 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1297 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1298 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1299 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1301 return ERR_PTR(err
);
1303 /* Create L2CAP header */
1304 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1305 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1306 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1307 put_unaligned_le16(control
, skb_put(skb
, 2));
1309 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1311 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1312 if (unlikely(err
< 0)) {
1314 return ERR_PTR(err
);
1317 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1318 put_unaligned_le16(0, skb_put(skb
, 2));
1320 bt_cb(skb
)->retries
= 0;
1324 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1326 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1327 struct sk_buff
*skb
;
1328 struct sk_buff_head sar_queue
;
1332 skb_queue_head_init(&sar_queue
);
1333 control
= L2CAP_SDU_START
;
1334 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1336 return PTR_ERR(skb
);
1338 __skb_queue_tail(&sar_queue
, skb
);
1339 len
-= pi
->remote_mps
;
1340 size
+= pi
->remote_mps
;
1345 if (len
> pi
->remote_mps
) {
1346 control
= L2CAP_SDU_CONTINUE
;
1347 buflen
= pi
->remote_mps
;
1349 control
= L2CAP_SDU_END
;
1353 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1355 skb_queue_purge(&sar_queue
);
1356 return PTR_ERR(skb
);
1359 __skb_queue_tail(&sar_queue
, skb
);
1363 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1364 if (sk
->sk_send_head
== NULL
)
1365 sk
->sk_send_head
= sar_queue
.next
;
1370 int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1372 struct sock
*sk
= sock
->sk
;
1373 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1374 struct sk_buff
*skb
;
1378 BT_DBG("sock %p, sk %p", sock
, sk
);
1380 err
= sock_error(sk
);
1384 if (msg
->msg_flags
& MSG_OOB
)
1389 if (sk
->sk_state
!= BT_CONNECTED
) {
1394 /* Connectionless channel */
1395 if (sk
->sk_type
== SOCK_DGRAM
) {
1396 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1400 l2cap_do_send(sk
, skb
);
1407 case L2CAP_MODE_BASIC
:
1408 /* Check outgoing MTU */
1409 if (len
> pi
->omtu
) {
1414 /* Create a basic PDU */
1415 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1421 l2cap_do_send(sk
, skb
);
1425 case L2CAP_MODE_ERTM
:
1426 case L2CAP_MODE_STREAMING
:
1427 /* Entire SDU fits into one PDU */
1428 if (len
<= pi
->remote_mps
) {
1429 control
= L2CAP_SDU_UNSEGMENTED
;
1430 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1435 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1437 if (sk
->sk_send_head
== NULL
)
1438 sk
->sk_send_head
= skb
;
1441 /* Segment SDU into multiples PDUs */
1442 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1447 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1448 l2cap_streaming_send(sk
);
1450 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
1451 (pi
->conn_state
& L2CAP_CONN_WAIT_F
)) {
1455 err
= l2cap_ertm_send(sk
);
1463 BT_DBG("bad state %1.1x", pi
->mode
);
1472 int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1474 struct sock
*sk
= sock
->sk
;
1478 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1479 struct l2cap_conn_rsp rsp
;
1480 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1483 sk
->sk_state
= BT_CONFIG
;
1485 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1486 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1487 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1488 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1489 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1490 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1492 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) {
1497 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
1498 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1499 l2cap_build_conf_req(sk
, buf
), buf
);
1500 l2cap_pi(sk
)->num_conf_req
++;
1508 if (sock
->type
== SOCK_STREAM
)
1509 return bt_sock_stream_recvmsg(iocb
, sock
, msg
, len
, flags
);
1511 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1514 int l2cap_sock_shutdown(struct socket
*sock
, int how
)
1516 struct sock
*sk
= sock
->sk
;
1519 BT_DBG("sock %p, sk %p", sock
, sk
);
1525 if (!sk
->sk_shutdown
) {
1526 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
1527 err
= __l2cap_wait_ack(sk
);
1529 sk
->sk_shutdown
= SHUTDOWN_MASK
;
1530 l2cap_sock_clear_timer(sk
);
1531 __l2cap_sock_close(sk
, 0);
1533 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
1534 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
1538 if (!err
&& sk
->sk_err
)
1545 static void l2cap_chan_ready(struct sock
*sk
)
1547 struct sock
*parent
= bt_sk(sk
)->parent
;
1549 BT_DBG("sk %p, parent %p", sk
, parent
);
1551 l2cap_pi(sk
)->conf_state
= 0;
1552 l2cap_sock_clear_timer(sk
);
1555 /* Outgoing channel.
1556 * Wake up socket sleeping on connect.
1558 sk
->sk_state
= BT_CONNECTED
;
1559 sk
->sk_state_change(sk
);
1561 /* Incoming channel.
1562 * Wake up socket sleeping on accept.
1564 parent
->sk_data_ready(parent
, 0);
1568 /* Copy frame to all raw sockets on that connection */
1569 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1571 struct l2cap_chan_list
*l
= &conn
->chan_list
;
1572 struct sk_buff
*nskb
;
1575 BT_DBG("conn %p", conn
);
1577 read_lock(&l
->lock
);
1578 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
1579 if (sk
->sk_type
!= SOCK_RAW
)
1582 /* Don't send frame to the socket it came from */
1585 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1589 if (sock_queue_rcv_skb(sk
, nskb
))
1592 read_unlock(&l
->lock
);
1595 /* ---- L2CAP signalling commands ---- */
1596 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1597 u8 code
, u8 ident
, u16 dlen
, void *data
)
1599 struct sk_buff
*skb
, **frag
;
1600 struct l2cap_cmd_hdr
*cmd
;
1601 struct l2cap_hdr
*lh
;
1604 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1605 conn
, code
, ident
, dlen
);
1607 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1608 count
= min_t(unsigned int, conn
->mtu
, len
);
1610 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1614 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1615 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1616 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1618 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1621 cmd
->len
= cpu_to_le16(dlen
);
1624 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1625 memcpy(skb_put(skb
, count
), data
, count
);
1631 /* Continuation fragments (no L2CAP header) */
1632 frag
= &skb_shinfo(skb
)->frag_list
;
1634 count
= min_t(unsigned int, conn
->mtu
, len
);
1636 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1640 memcpy(skb_put(*frag
, count
), data
, count
);
1645 frag
= &(*frag
)->next
;
1655 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1657 struct l2cap_conf_opt
*opt
= *ptr
;
1660 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1668 *val
= *((u8
*) opt
->val
);
1672 *val
= get_unaligned_le16(opt
->val
);
1676 *val
= get_unaligned_le32(opt
->val
);
1680 *val
= (unsigned long) opt
->val
;
1684 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1688 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1690 struct l2cap_conf_opt
*opt
= *ptr
;
1692 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1699 *((u8
*) opt
->val
) = val
;
1703 put_unaligned_le16(val
, opt
->val
);
1707 put_unaligned_le32(val
, opt
->val
);
1711 memcpy(opt
->val
, (void *) val
, len
);
1715 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1718 static void l2cap_ack_timeout(unsigned long arg
)
1720 struct sock
*sk
= (void *) arg
;
1723 l2cap_send_ack(l2cap_pi(sk
));
1727 static inline void l2cap_ertm_init(struct sock
*sk
)
1729 l2cap_pi(sk
)->expected_ack_seq
= 0;
1730 l2cap_pi(sk
)->unacked_frames
= 0;
1731 l2cap_pi(sk
)->buffer_seq
= 0;
1732 l2cap_pi(sk
)->num_acked
= 0;
1733 l2cap_pi(sk
)->frames_sent
= 0;
1735 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
1736 l2cap_retrans_timeout
, (unsigned long) sk
);
1737 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
1738 l2cap_monitor_timeout
, (unsigned long) sk
);
1739 setup_timer(&l2cap_pi(sk
)->ack_timer
,
1740 l2cap_ack_timeout
, (unsigned long) sk
);
1742 __skb_queue_head_init(SREJ_QUEUE(sk
));
1743 __skb_queue_head_init(BUSY_QUEUE(sk
));
1745 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
1747 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1750 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1753 case L2CAP_MODE_STREAMING
:
1754 case L2CAP_MODE_ERTM
:
1755 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1759 return L2CAP_MODE_BASIC
;
1763 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
1765 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1766 struct l2cap_conf_req
*req
= data
;
1767 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
1768 void *ptr
= req
->data
;
1770 BT_DBG("sk %p", sk
);
1772 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
1776 case L2CAP_MODE_STREAMING
:
1777 case L2CAP_MODE_ERTM
:
1778 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
1783 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
1788 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
1789 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
1792 case L2CAP_MODE_BASIC
:
1793 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1794 !(pi
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1797 rfc
.mode
= L2CAP_MODE_BASIC
;
1799 rfc
.max_transmit
= 0;
1800 rfc
.retrans_timeout
= 0;
1801 rfc
.monitor_timeout
= 0;
1802 rfc
.max_pdu_size
= 0;
1804 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1805 (unsigned long) &rfc
);
1808 case L2CAP_MODE_ERTM
:
1809 rfc
.mode
= L2CAP_MODE_ERTM
;
1810 rfc
.txwin_size
= pi
->tx_win
;
1811 rfc
.max_transmit
= pi
->max_tx
;
1812 rfc
.retrans_timeout
= 0;
1813 rfc
.monitor_timeout
= 0;
1814 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1815 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
1816 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1818 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1819 (unsigned long) &rfc
);
1821 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1824 if (pi
->fcs
== L2CAP_FCS_NONE
||
1825 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1826 pi
->fcs
= L2CAP_FCS_NONE
;
1827 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
1831 case L2CAP_MODE_STREAMING
:
1832 rfc
.mode
= L2CAP_MODE_STREAMING
;
1834 rfc
.max_transmit
= 0;
1835 rfc
.retrans_timeout
= 0;
1836 rfc
.monitor_timeout
= 0;
1837 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1838 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
1839 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1841 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1842 (unsigned long) &rfc
);
1844 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1847 if (pi
->fcs
== L2CAP_FCS_NONE
||
1848 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1849 pi
->fcs
= L2CAP_FCS_NONE
;
1850 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
1855 /* FIXME: Need actual value of the flush timeout */
1856 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
1857 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
1859 req
->dcid
= cpu_to_le16(pi
->dcid
);
1860 req
->flags
= cpu_to_le16(0);
1865 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
1867 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1868 struct l2cap_conf_rsp
*rsp
= data
;
1869 void *ptr
= rsp
->data
;
1870 void *req
= pi
->conf_req
;
1871 int len
= pi
->conf_len
;
1872 int type
, hint
, olen
;
1874 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
1875 u16 mtu
= L2CAP_DEFAULT_MTU
;
1876 u16 result
= L2CAP_CONF_SUCCESS
;
1878 BT_DBG("sk %p", sk
);
1880 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1881 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
1883 hint
= type
& L2CAP_CONF_HINT
;
1884 type
&= L2CAP_CONF_MASK
;
1887 case L2CAP_CONF_MTU
:
1891 case L2CAP_CONF_FLUSH_TO
:
1895 case L2CAP_CONF_QOS
:
1898 case L2CAP_CONF_RFC
:
1899 if (olen
== sizeof(rfc
))
1900 memcpy(&rfc
, (void *) val
, olen
);
1903 case L2CAP_CONF_FCS
:
1904 if (val
== L2CAP_FCS_NONE
)
1905 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
1913 result
= L2CAP_CONF_UNKNOWN
;
1914 *((u8
*) ptr
++) = type
;
1919 if (pi
->num_conf_rsp
|| pi
->num_conf_req
> 1)
1923 case L2CAP_MODE_STREAMING
:
1924 case L2CAP_MODE_ERTM
:
1925 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
1926 pi
->mode
= l2cap_select_mode(rfc
.mode
,
1927 pi
->conn
->feat_mask
);
1931 if (pi
->mode
!= rfc
.mode
)
1932 return -ECONNREFUSED
;
1938 if (pi
->mode
!= rfc
.mode
) {
1939 result
= L2CAP_CONF_UNACCEPT
;
1940 rfc
.mode
= pi
->mode
;
1942 if (pi
->num_conf_rsp
== 1)
1943 return -ECONNREFUSED
;
1945 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1946 sizeof(rfc
), (unsigned long) &rfc
);
1950 if (result
== L2CAP_CONF_SUCCESS
) {
1951 /* Configure output options and let the other side know
1952 * which ones we don't like. */
1954 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
1955 result
= L2CAP_CONF_UNACCEPT
;
1958 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
1960 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
1963 case L2CAP_MODE_BASIC
:
1964 pi
->fcs
= L2CAP_FCS_NONE
;
1965 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1968 case L2CAP_MODE_ERTM
:
1969 pi
->remote_tx_win
= rfc
.txwin_size
;
1970 pi
->remote_max_tx
= rfc
.max_transmit
;
1972 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
1973 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1975 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1977 rfc
.retrans_timeout
=
1978 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
1979 rfc
.monitor_timeout
=
1980 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
1982 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1984 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1985 sizeof(rfc
), (unsigned long) &rfc
);
1989 case L2CAP_MODE_STREAMING
:
1990 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
1991 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1993 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1995 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1997 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1998 sizeof(rfc
), (unsigned long) &rfc
);
2003 result
= L2CAP_CONF_UNACCEPT
;
2005 memset(&rfc
, 0, sizeof(rfc
));
2006 rfc
.mode
= pi
->mode
;
2009 if (result
== L2CAP_CONF_SUCCESS
)
2010 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2012 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2013 rsp
->result
= cpu_to_le16(result
);
2014 rsp
->flags
= cpu_to_le16(0x0000);
2019 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2021 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2022 struct l2cap_conf_req
*req
= data
;
2023 void *ptr
= req
->data
;
2026 struct l2cap_conf_rfc rfc
;
2028 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2030 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2031 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2034 case L2CAP_CONF_MTU
:
2035 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2036 *result
= L2CAP_CONF_UNACCEPT
;
2037 pi
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2040 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2043 case L2CAP_CONF_FLUSH_TO
:
2045 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2049 case L2CAP_CONF_RFC
:
2050 if (olen
== sizeof(rfc
))
2051 memcpy(&rfc
, (void *)val
, olen
);
2053 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2054 rfc
.mode
!= pi
->mode
)
2055 return -ECONNREFUSED
;
2059 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2060 sizeof(rfc
), (unsigned long) &rfc
);
2065 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
2066 return -ECONNREFUSED
;
2068 pi
->mode
= rfc
.mode
;
2070 if (*result
== L2CAP_CONF_SUCCESS
) {
2072 case L2CAP_MODE_ERTM
:
2073 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2074 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2075 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2077 case L2CAP_MODE_STREAMING
:
2078 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2082 req
->dcid
= cpu_to_le16(pi
->dcid
);
2083 req
->flags
= cpu_to_le16(0x0000);
2088 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2090 struct l2cap_conf_rsp
*rsp
= data
;
2091 void *ptr
= rsp
->data
;
2093 BT_DBG("sk %p", sk
);
2095 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2096 rsp
->result
= cpu_to_le16(result
);
2097 rsp
->flags
= cpu_to_le16(flags
);
2102 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2104 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2107 struct l2cap_conf_rfc rfc
;
2109 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2111 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2114 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2115 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2118 case L2CAP_CONF_RFC
:
2119 if (olen
== sizeof(rfc
))
2120 memcpy(&rfc
, (void *)val
, olen
);
2127 case L2CAP_MODE_ERTM
:
2128 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2129 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2130 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2132 case L2CAP_MODE_STREAMING
:
2133 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2137 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2139 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2141 if (rej
->reason
!= 0x0000)
2144 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2145 cmd
->ident
== conn
->info_ident
) {
2146 del_timer(&conn
->info_timer
);
2148 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2149 conn
->info_ident
= 0;
2151 l2cap_conn_start(conn
);
2157 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2159 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2160 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2161 struct l2cap_conn_rsp rsp
;
2162 struct sock
*parent
, *sk
= NULL
;
2163 int result
, status
= L2CAP_CS_NO_INFO
;
2165 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2166 __le16 psm
= req
->psm
;
2168 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2170 /* Check if we have socket listening on psm */
2171 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2173 result
= L2CAP_CR_BAD_PSM
;
2177 bh_lock_sock(parent
);
2179 /* Check if the ACL is secure enough (if not SDP) */
2180 if (psm
!= cpu_to_le16(0x0001) &&
2181 !hci_conn_check_link_mode(conn
->hcon
)) {
2182 conn
->disc_reason
= 0x05;
2183 result
= L2CAP_CR_SEC_BLOCK
;
2187 result
= L2CAP_CR_NO_MEM
;
2189 /* Check for backlog size */
2190 if (sk_acceptq_is_full(parent
)) {
2191 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2195 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2199 write_lock_bh(&list
->lock
);
2201 /* Check if we already have channel with that dcid */
2202 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2203 write_unlock_bh(&list
->lock
);
2204 sock_set_flag(sk
, SOCK_ZAPPED
);
2205 l2cap_sock_kill(sk
);
2209 hci_conn_hold(conn
->hcon
);
2211 l2cap_sock_init(sk
, parent
);
2212 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2213 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2214 l2cap_pi(sk
)->psm
= psm
;
2215 l2cap_pi(sk
)->dcid
= scid
;
2217 __l2cap_chan_add(conn
, sk
, parent
);
2218 dcid
= l2cap_pi(sk
)->scid
;
2220 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2222 l2cap_pi(sk
)->ident
= cmd
->ident
;
2224 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2225 if (l2cap_check_security(sk
)) {
2226 if (bt_sk(sk
)->defer_setup
) {
2227 sk
->sk_state
= BT_CONNECT2
;
2228 result
= L2CAP_CR_PEND
;
2229 status
= L2CAP_CS_AUTHOR_PEND
;
2230 parent
->sk_data_ready(parent
, 0);
2232 sk
->sk_state
= BT_CONFIG
;
2233 result
= L2CAP_CR_SUCCESS
;
2234 status
= L2CAP_CS_NO_INFO
;
2237 sk
->sk_state
= BT_CONNECT2
;
2238 result
= L2CAP_CR_PEND
;
2239 status
= L2CAP_CS_AUTHEN_PEND
;
2242 sk
->sk_state
= BT_CONNECT2
;
2243 result
= L2CAP_CR_PEND
;
2244 status
= L2CAP_CS_NO_INFO
;
2247 write_unlock_bh(&list
->lock
);
2250 bh_unlock_sock(parent
);
2253 rsp
.scid
= cpu_to_le16(scid
);
2254 rsp
.dcid
= cpu_to_le16(dcid
);
2255 rsp
.result
= cpu_to_le16(result
);
2256 rsp
.status
= cpu_to_le16(status
);
2257 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2259 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2260 struct l2cap_info_req info
;
2261 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2263 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2264 conn
->info_ident
= l2cap_get_ident(conn
);
2266 mod_timer(&conn
->info_timer
, jiffies
+
2267 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2269 l2cap_send_cmd(conn
, conn
->info_ident
,
2270 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2273 if (sk
&& !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) &&
2274 result
== L2CAP_CR_SUCCESS
) {
2276 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2277 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2278 l2cap_build_conf_req(sk
, buf
), buf
);
2279 l2cap_pi(sk
)->num_conf_req
++;
2285 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2287 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2288 u16 scid
, dcid
, result
, status
;
2292 scid
= __le16_to_cpu(rsp
->scid
);
2293 dcid
= __le16_to_cpu(rsp
->dcid
);
2294 result
= __le16_to_cpu(rsp
->result
);
2295 status
= __le16_to_cpu(rsp
->status
);
2297 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2300 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2304 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2310 case L2CAP_CR_SUCCESS
:
2311 sk
->sk_state
= BT_CONFIG
;
2312 l2cap_pi(sk
)->ident
= 0;
2313 l2cap_pi(sk
)->dcid
= dcid
;
2314 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2316 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
2319 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2321 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2322 l2cap_build_conf_req(sk
, req
), req
);
2323 l2cap_pi(sk
)->num_conf_req
++;
2327 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2331 /* don't delete l2cap channel if sk is owned by user */
2332 if (sock_owned_by_user(sk
)) {
2333 sk
->sk_state
= BT_DISCONN
;
2334 l2cap_sock_clear_timer(sk
);
2335 l2cap_sock_set_timer(sk
, HZ
/ 5);
2339 l2cap_chan_del(sk
, ECONNREFUSED
);
2347 static inline void set_default_fcs(struct l2cap_pinfo
*pi
)
2349 /* FCS is enabled only in ERTM or streaming mode, if one or both
2352 if (pi
->mode
!= L2CAP_MODE_ERTM
&& pi
->mode
!= L2CAP_MODE_STREAMING
)
2353 pi
->fcs
= L2CAP_FCS_NONE
;
2354 else if (!(pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
2355 pi
->fcs
= L2CAP_FCS_CRC16
;
2358 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2360 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2366 dcid
= __le16_to_cpu(req
->dcid
);
2367 flags
= __le16_to_cpu(req
->flags
);
2369 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2371 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2375 if (sk
->sk_state
!= BT_CONFIG
) {
2376 struct l2cap_cmd_rej rej
;
2378 rej
.reason
= cpu_to_le16(0x0002);
2379 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2384 /* Reject if config buffer is too small. */
2385 len
= cmd_len
- sizeof(*req
);
2386 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
2387 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2388 l2cap_build_conf_rsp(sk
, rsp
,
2389 L2CAP_CONF_REJECT
, flags
), rsp
);
2394 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
2395 l2cap_pi(sk
)->conf_len
+= len
;
2397 if (flags
& 0x0001) {
2398 /* Incomplete config. Send empty response. */
2399 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2400 l2cap_build_conf_rsp(sk
, rsp
,
2401 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2405 /* Complete config. */
2406 len
= l2cap_parse_conf_req(sk
, rsp
);
2408 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2412 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2413 l2cap_pi(sk
)->num_conf_rsp
++;
2415 /* Reset config buffer. */
2416 l2cap_pi(sk
)->conf_len
= 0;
2418 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2421 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2422 set_default_fcs(l2cap_pi(sk
));
2424 sk
->sk_state
= BT_CONNECTED
;
2426 l2cap_pi(sk
)->next_tx_seq
= 0;
2427 l2cap_pi(sk
)->expected_tx_seq
= 0;
2428 __skb_queue_head_init(TX_QUEUE(sk
));
2429 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2430 l2cap_ertm_init(sk
);
2432 l2cap_chan_ready(sk
);
2436 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2438 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2439 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2440 l2cap_build_conf_req(sk
, buf
), buf
);
2441 l2cap_pi(sk
)->num_conf_req
++;
2449 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2451 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2452 u16 scid
, flags
, result
;
2454 int len
= cmd
->len
- sizeof(*rsp
);
2456 scid
= __le16_to_cpu(rsp
->scid
);
2457 flags
= __le16_to_cpu(rsp
->flags
);
2458 result
= __le16_to_cpu(rsp
->result
);
2460 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2461 scid
, flags
, result
);
2463 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2468 case L2CAP_CONF_SUCCESS
:
2469 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
2472 case L2CAP_CONF_UNACCEPT
:
2473 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2476 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2477 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2481 /* throw out any old stored conf requests */
2482 result
= L2CAP_CONF_SUCCESS
;
2483 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2486 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2490 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2491 L2CAP_CONF_REQ
, len
, req
);
2492 l2cap_pi(sk
)->num_conf_req
++;
2493 if (result
!= L2CAP_CONF_SUCCESS
)
2499 sk
->sk_err
= ECONNRESET
;
2500 l2cap_sock_set_timer(sk
, HZ
* 5);
2501 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2508 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2510 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2511 set_default_fcs(l2cap_pi(sk
));
2513 sk
->sk_state
= BT_CONNECTED
;
2514 l2cap_pi(sk
)->next_tx_seq
= 0;
2515 l2cap_pi(sk
)->expected_tx_seq
= 0;
2516 __skb_queue_head_init(TX_QUEUE(sk
));
2517 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2518 l2cap_ertm_init(sk
);
2520 l2cap_chan_ready(sk
);
2528 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2530 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2531 struct l2cap_disconn_rsp rsp
;
2535 scid
= __le16_to_cpu(req
->scid
);
2536 dcid
= __le16_to_cpu(req
->dcid
);
2538 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2540 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
2544 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2545 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2546 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2548 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2550 /* don't delete l2cap channel if sk is owned by user */
2551 if (sock_owned_by_user(sk
)) {
2552 sk
->sk_state
= BT_DISCONN
;
2553 l2cap_sock_clear_timer(sk
);
2554 l2cap_sock_set_timer(sk
, HZ
/ 5);
2559 l2cap_chan_del(sk
, ECONNRESET
);
2562 l2cap_sock_kill(sk
);
2566 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2568 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2572 scid
= __le16_to_cpu(rsp
->scid
);
2573 dcid
= __le16_to_cpu(rsp
->dcid
);
2575 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2577 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2581 /* don't delete l2cap channel if sk is owned by user */
2582 if (sock_owned_by_user(sk
)) {
2583 sk
->sk_state
= BT_DISCONN
;
2584 l2cap_sock_clear_timer(sk
);
2585 l2cap_sock_set_timer(sk
, HZ
/ 5);
2590 l2cap_chan_del(sk
, 0);
2593 l2cap_sock_kill(sk
);
2597 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2599 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2602 type
= __le16_to_cpu(req
->type
);
2604 BT_DBG("type 0x%4.4x", type
);
2606 if (type
== L2CAP_IT_FEAT_MASK
) {
2608 u32 feat_mask
= l2cap_feat_mask
;
2609 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2610 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2611 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2613 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2615 put_unaligned_le32(feat_mask
, rsp
->data
);
2616 l2cap_send_cmd(conn
, cmd
->ident
,
2617 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2618 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2620 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2621 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2622 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2623 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2624 l2cap_send_cmd(conn
, cmd
->ident
,
2625 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2627 struct l2cap_info_rsp rsp
;
2628 rsp
.type
= cpu_to_le16(type
);
2629 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2630 l2cap_send_cmd(conn
, cmd
->ident
,
2631 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2637 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2639 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2642 type
= __le16_to_cpu(rsp
->type
);
2643 result
= __le16_to_cpu(rsp
->result
);
2645 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2647 del_timer(&conn
->info_timer
);
2649 if (result
!= L2CAP_IR_SUCCESS
) {
2650 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2651 conn
->info_ident
= 0;
2653 l2cap_conn_start(conn
);
2658 if (type
== L2CAP_IT_FEAT_MASK
) {
2659 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2661 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2662 struct l2cap_info_req req
;
2663 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2665 conn
->info_ident
= l2cap_get_ident(conn
);
2667 l2cap_send_cmd(conn
, conn
->info_ident
,
2668 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2670 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2671 conn
->info_ident
= 0;
2673 l2cap_conn_start(conn
);
2675 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2676 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2677 conn
->info_ident
= 0;
2679 l2cap_conn_start(conn
);
2685 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2687 u8
*data
= skb
->data
;
2689 struct l2cap_cmd_hdr cmd
;
2692 l2cap_raw_recv(conn
, skb
);
2694 while (len
>= L2CAP_CMD_HDR_SIZE
) {
2696 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
2697 data
+= L2CAP_CMD_HDR_SIZE
;
2698 len
-= L2CAP_CMD_HDR_SIZE
;
2700 cmd_len
= le16_to_cpu(cmd
.len
);
2702 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
2704 if (cmd_len
> len
|| !cmd
.ident
) {
2705 BT_DBG("corrupted command");
2710 case L2CAP_COMMAND_REJ
:
2711 l2cap_command_rej(conn
, &cmd
, data
);
2714 case L2CAP_CONN_REQ
:
2715 err
= l2cap_connect_req(conn
, &cmd
, data
);
2718 case L2CAP_CONN_RSP
:
2719 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
2722 case L2CAP_CONF_REQ
:
2723 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
2726 case L2CAP_CONF_RSP
:
2727 err
= l2cap_config_rsp(conn
, &cmd
, data
);
2730 case L2CAP_DISCONN_REQ
:
2731 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
2734 case L2CAP_DISCONN_RSP
:
2735 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
2738 case L2CAP_ECHO_REQ
:
2739 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
2742 case L2CAP_ECHO_RSP
:
2745 case L2CAP_INFO_REQ
:
2746 err
= l2cap_information_req(conn
, &cmd
, data
);
2749 case L2CAP_INFO_RSP
:
2750 err
= l2cap_information_rsp(conn
, &cmd
, data
);
2754 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
2760 struct l2cap_cmd_rej rej
;
2761 BT_DBG("error %d", err
);
2763 /* FIXME: Map err to a valid reason */
2764 rej
.reason
= cpu_to_le16(0);
2765 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
2775 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
2777 u16 our_fcs
, rcv_fcs
;
2778 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
2780 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
2781 skb_trim(skb
, skb
->len
- 2);
2782 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
2783 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
2785 if (our_fcs
!= rcv_fcs
)
2791 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
2793 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2796 pi
->frames_sent
= 0;
2798 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2800 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
2801 control
|= L2CAP_SUPER_RCV_NOT_READY
;
2802 l2cap_send_sframe(pi
, control
);
2803 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
2806 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
2807 l2cap_retransmit_frames(sk
);
2809 l2cap_ertm_send(sk
);
2811 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
2812 pi
->frames_sent
== 0) {
2813 control
|= L2CAP_SUPER_RCV_READY
;
2814 l2cap_send_sframe(pi
, control
);
2818 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
2820 struct sk_buff
*next_skb
;
2821 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2822 int tx_seq_offset
, next_tx_seq_offset
;
2824 bt_cb(skb
)->tx_seq
= tx_seq
;
2825 bt_cb(skb
)->sar
= sar
;
2827 next_skb
= skb_peek(SREJ_QUEUE(sk
));
2829 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
2833 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
2834 if (tx_seq_offset
< 0)
2835 tx_seq_offset
+= 64;
2838 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
2841 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
2842 pi
->buffer_seq
) % 64;
2843 if (next_tx_seq_offset
< 0)
2844 next_tx_seq_offset
+= 64;
2846 if (next_tx_seq_offset
> tx_seq_offset
) {
2847 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
2851 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
2854 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
2856 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
2861 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
2863 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2864 struct sk_buff
*_skb
;
2867 switch (control
& L2CAP_CTRL_SAR
) {
2868 case L2CAP_SDU_UNSEGMENTED
:
2869 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
2872 err
= sock_queue_rcv_skb(sk
, skb
);
2878 case L2CAP_SDU_START
:
2879 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
2882 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
2884 if (pi
->sdu_len
> pi
->imtu
)
2887 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
2891 /* pull sdu_len bytes only after alloc, because of Local Busy
2892 * condition we have to be sure that this will be executed
2893 * only once, i.e., when alloc does not fail */
2896 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2898 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
2899 pi
->partial_sdu_len
= skb
->len
;
2902 case L2CAP_SDU_CONTINUE
:
2903 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
2909 pi
->partial_sdu_len
+= skb
->len
;
2910 if (pi
->partial_sdu_len
> pi
->sdu_len
)
2913 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2918 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
2924 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
2925 pi
->partial_sdu_len
+= skb
->len
;
2927 if (pi
->partial_sdu_len
> pi
->imtu
)
2930 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
2933 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2936 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
2938 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2942 err
= sock_queue_rcv_skb(sk
, _skb
);
2945 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2949 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
2950 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
2964 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
2969 static int l2cap_try_push_rx_skb(struct sock
*sk
)
2971 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2972 struct sk_buff
*skb
;
2976 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
2977 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
2978 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
2980 skb_queue_head(BUSY_QUEUE(sk
), skb
);
2984 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
2987 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
2990 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2991 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
2992 l2cap_send_sframe(pi
, control
);
2993 l2cap_pi(sk
)->retry_count
= 1;
2995 del_timer(&pi
->retrans_timer
);
2996 __mod_monitor_timer();
2998 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3001 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3002 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3004 BT_DBG("sk %p, Exit local busy", sk
);
3009 static void l2cap_busy_work(struct work_struct
*work
)
3011 DECLARE_WAITQUEUE(wait
, current
);
3012 struct l2cap_pinfo
*pi
=
3013 container_of(work
, struct l2cap_pinfo
, busy_work
);
3014 struct sock
*sk
= (struct sock
*)pi
;
3015 int n_tries
= 0, timeo
= HZ
/5, err
;
3016 struct sk_buff
*skb
;
3020 add_wait_queue(sk_sleep(sk
), &wait
);
3021 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3022 set_current_state(TASK_INTERRUPTIBLE
);
3024 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3026 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3033 if (signal_pending(current
)) {
3034 err
= sock_intr_errno(timeo
);
3039 timeo
= schedule_timeout(timeo
);
3042 err
= sock_error(sk
);
3046 if (l2cap_try_push_rx_skb(sk
) == 0)
3050 set_current_state(TASK_RUNNING
);
3051 remove_wait_queue(sk_sleep(sk
), &wait
);
3056 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3058 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3061 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3062 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3063 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3064 return l2cap_try_push_rx_skb(sk
);
3069 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3071 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3075 /* Busy Condition */
3076 BT_DBG("sk %p, Enter local busy", sk
);
3078 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3079 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3080 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3082 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3083 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3084 l2cap_send_sframe(pi
, sctrl
);
3086 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3088 del_timer(&pi
->ack_timer
);
3090 queue_work(_busy_wq
, &pi
->busy_work
);
3095 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3097 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3098 struct sk_buff
*_skb
;
3102 * TODO: We have to notify the userland if some data is lost with the
3106 switch (control
& L2CAP_CTRL_SAR
) {
3107 case L2CAP_SDU_UNSEGMENTED
:
3108 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3113 err
= sock_queue_rcv_skb(sk
, skb
);
3119 case L2CAP_SDU_START
:
3120 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3125 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3128 if (pi
->sdu_len
> pi
->imtu
) {
3133 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3139 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3141 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3142 pi
->partial_sdu_len
= skb
->len
;
3146 case L2CAP_SDU_CONTINUE
:
3147 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3150 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3152 pi
->partial_sdu_len
+= skb
->len
;
3153 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3161 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3164 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3166 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3167 pi
->partial_sdu_len
+= skb
->len
;
3169 if (pi
->partial_sdu_len
> pi
->imtu
)
3172 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3173 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3174 err
= sock_queue_rcv_skb(sk
, _skb
);
3189 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3191 struct sk_buff
*skb
;
3194 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3195 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3198 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3199 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3200 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3201 l2cap_pi(sk
)->buffer_seq_srej
=
3202 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3203 tx_seq
= (tx_seq
+ 1) % 64;
3207 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3209 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3210 struct srej_list
*l
, *tmp
;
3213 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3214 if (l
->tx_seq
== tx_seq
) {
3219 control
= L2CAP_SUPER_SELECT_REJECT
;
3220 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3221 l2cap_send_sframe(pi
, control
);
3223 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3227 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3229 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3230 struct srej_list
*new;
3233 while (tx_seq
!= pi
->expected_tx_seq
) {
3234 control
= L2CAP_SUPER_SELECT_REJECT
;
3235 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3236 l2cap_send_sframe(pi
, control
);
3238 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3239 new->tx_seq
= pi
->expected_tx_seq
;
3240 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3241 list_add_tail(&new->list
, SREJ_LIST(sk
));
3243 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3246 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3248 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3249 u8 tx_seq
= __get_txseq(rx_control
);
3250 u8 req_seq
= __get_reqseq(rx_control
);
3251 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3252 int tx_seq_offset
, expected_tx_seq_offset
;
3253 int num_to_ack
= (pi
->tx_win
/6) + 1;
3256 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3259 if (L2CAP_CTRL_FINAL
& rx_control
&&
3260 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3261 del_timer(&pi
->monitor_timer
);
3262 if (pi
->unacked_frames
> 0)
3263 __mod_retrans_timer();
3264 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3267 pi
->expected_ack_seq
= req_seq
;
3268 l2cap_drop_acked_frames(sk
);
3270 if (tx_seq
== pi
->expected_tx_seq
)
3273 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3274 if (tx_seq_offset
< 0)
3275 tx_seq_offset
+= 64;
3277 /* invalid tx_seq */
3278 if (tx_seq_offset
>= pi
->tx_win
) {
3279 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3283 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3286 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3287 struct srej_list
*first
;
3289 first
= list_first_entry(SREJ_LIST(sk
),
3290 struct srej_list
, list
);
3291 if (tx_seq
== first
->tx_seq
) {
3292 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3293 l2cap_check_srej_gap(sk
, tx_seq
);
3295 list_del(&first
->list
);
3298 if (list_empty(SREJ_LIST(sk
))) {
3299 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3300 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3302 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
3305 struct srej_list
*l
;
3307 /* duplicated tx_seq */
3308 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3311 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3312 if (l
->tx_seq
== tx_seq
) {
3313 l2cap_resend_srejframe(sk
, tx_seq
);
3317 l2cap_send_srejframe(sk
, tx_seq
);
3320 expected_tx_seq_offset
=
3321 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3322 if (expected_tx_seq_offset
< 0)
3323 expected_tx_seq_offset
+= 64;
3325 /* duplicated tx_seq */
3326 if (tx_seq_offset
< expected_tx_seq_offset
)
3329 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3331 BT_DBG("sk %p, Enter SREJ", sk
);
3333 INIT_LIST_HEAD(SREJ_LIST(sk
));
3334 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3336 __skb_queue_head_init(SREJ_QUEUE(sk
));
3337 __skb_queue_head_init(BUSY_QUEUE(sk
));
3338 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3340 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3342 l2cap_send_srejframe(sk
, tx_seq
);
3344 del_timer(&pi
->ack_timer
);
3349 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3351 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3352 bt_cb(skb
)->tx_seq
= tx_seq
;
3353 bt_cb(skb
)->sar
= sar
;
3354 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3358 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
3362 if (rx_control
& L2CAP_CTRL_FINAL
) {
3363 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3364 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3366 l2cap_retransmit_frames(sk
);
3371 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
3372 if (pi
->num_acked
== num_to_ack
- 1)
3382 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
3384 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3386 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
3389 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3390 l2cap_drop_acked_frames(sk
);
3392 if (rx_control
& L2CAP_CTRL_POLL
) {
3393 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3394 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3395 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3396 (pi
->unacked_frames
> 0))
3397 __mod_retrans_timer();
3399 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3400 l2cap_send_srejtail(sk
);
3402 l2cap_send_i_or_rr_or_rnr(sk
);
3405 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3406 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3408 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3409 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3411 l2cap_retransmit_frames(sk
);
3414 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3415 (pi
->unacked_frames
> 0))
3416 __mod_retrans_timer();
3418 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3419 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)
3422 l2cap_ertm_send(sk
);
3426 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
3428 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3429 u8 tx_seq
= __get_reqseq(rx_control
);
3431 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3433 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3435 pi
->expected_ack_seq
= tx_seq
;
3436 l2cap_drop_acked_frames(sk
);
3438 if (rx_control
& L2CAP_CTRL_FINAL
) {
3439 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3440 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3442 l2cap_retransmit_frames(sk
);
3444 l2cap_retransmit_frames(sk
);
3446 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
3447 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3450 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
3452 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3453 u8 tx_seq
= __get_reqseq(rx_control
);
3455 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3457 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3459 if (rx_control
& L2CAP_CTRL_POLL
) {
3460 pi
->expected_ack_seq
= tx_seq
;
3461 l2cap_drop_acked_frames(sk
);
3463 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3464 l2cap_retransmit_one_frame(sk
, tx_seq
);
3466 l2cap_ertm_send(sk
);
3468 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3469 pi
->srej_save_reqseq
= tx_seq
;
3470 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3472 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3473 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3474 pi
->srej_save_reqseq
== tx_seq
)
3475 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3477 l2cap_retransmit_one_frame(sk
, tx_seq
);
3479 l2cap_retransmit_one_frame(sk
, tx_seq
);
3480 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3481 pi
->srej_save_reqseq
= tx_seq
;
3482 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3487 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
3489 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3490 u8 tx_seq
= __get_reqseq(rx_control
);
3492 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3494 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3495 pi
->expected_ack_seq
= tx_seq
;
3496 l2cap_drop_acked_frames(sk
);
3498 if (rx_control
& L2CAP_CTRL_POLL
)
3499 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3501 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
3502 del_timer(&pi
->retrans_timer
);
3503 if (rx_control
& L2CAP_CTRL_POLL
)
3504 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
3508 if (rx_control
& L2CAP_CTRL_POLL
)
3509 l2cap_send_srejtail(sk
);
3511 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
3514 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3516 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3518 if (L2CAP_CTRL_FINAL
& rx_control
&&
3519 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3520 del_timer(&l2cap_pi(sk
)->monitor_timer
);
3521 if (l2cap_pi(sk
)->unacked_frames
> 0)
3522 __mod_retrans_timer();
3523 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3526 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3527 case L2CAP_SUPER_RCV_READY
:
3528 l2cap_data_channel_rrframe(sk
, rx_control
);
3531 case L2CAP_SUPER_REJECT
:
3532 l2cap_data_channel_rejframe(sk
, rx_control
);
3535 case L2CAP_SUPER_SELECT_REJECT
:
3536 l2cap_data_channel_srejframe(sk
, rx_control
);
3539 case L2CAP_SUPER_RCV_NOT_READY
:
3540 l2cap_data_channel_rnrframe(sk
, rx_control
);
3548 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3550 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3553 int len
, next_tx_seq_offset
, req_seq_offset
;
3555 control
= get_unaligned_le16(skb
->data
);
3560 * We can just drop the corrupted I-frame here.
3561 * Receiver will miss it and start proper recovery
3562 * procedures and ask retransmission.
3564 if (l2cap_check_fcs(pi
, skb
))
3567 if (__is_sar_start(control
) && __is_iframe(control
))
3570 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3573 if (len
> pi
->mps
) {
3574 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3578 req_seq
= __get_reqseq(control
);
3579 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
3580 if (req_seq_offset
< 0)
3581 req_seq_offset
+= 64;
3583 next_tx_seq_offset
=
3584 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
3585 if (next_tx_seq_offset
< 0)
3586 next_tx_seq_offset
+= 64;
3588 /* check for invalid req-seq */
3589 if (req_seq_offset
> next_tx_seq_offset
) {
3590 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3594 if (__is_iframe(control
)) {
3596 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3600 l2cap_data_channel_iframe(sk
, control
, skb
);
3604 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3608 l2cap_data_channel_sframe(sk
, control
, skb
);
3618 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3621 struct l2cap_pinfo
*pi
;
3626 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3628 BT_DBG("unknown cid 0x%4.4x", cid
);
3634 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3636 if (sk
->sk_state
!= BT_CONNECTED
)
3640 case L2CAP_MODE_BASIC
:
3641 /* If socket recv buffers overflows we drop data here
3642 * which is *bad* because L2CAP has to be reliable.
3643 * But we don't have any other choice. L2CAP doesn't
3644 * provide flow control mechanism. */
3646 if (pi
->imtu
< skb
->len
)
3649 if (!sock_queue_rcv_skb(sk
, skb
))
3653 case L2CAP_MODE_ERTM
:
3654 if (!sock_owned_by_user(sk
)) {
3655 l2cap_ertm_data_rcv(sk
, skb
);
3657 if (sk_add_backlog(sk
, skb
))
3663 case L2CAP_MODE_STREAMING
:
3664 control
= get_unaligned_le16(skb
->data
);
3668 if (l2cap_check_fcs(pi
, skb
))
3671 if (__is_sar_start(control
))
3674 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3677 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
3680 tx_seq
= __get_txseq(control
);
3682 if (pi
->expected_tx_seq
== tx_seq
)
3683 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3685 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3687 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
3692 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
3706 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3710 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3716 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3718 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3721 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3724 if (!sock_queue_rcv_skb(sk
, skb
))
3736 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3738 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3742 skb_pull(skb
, L2CAP_HDR_SIZE
);
3743 cid
= __le16_to_cpu(lh
->cid
);
3744 len
= __le16_to_cpu(lh
->len
);
3746 if (len
!= skb
->len
) {
3751 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3754 case L2CAP_CID_SIGNALING
:
3755 l2cap_sig_channel(conn
, skb
);
3758 case L2CAP_CID_CONN_LESS
:
3759 psm
= get_unaligned_le16(skb
->data
);
3761 l2cap_conless_channel(conn
, psm
, skb
);
3765 l2cap_data_channel(conn
, cid
, skb
);
3770 /* ---- L2CAP interface with lower layer (HCI) ---- */
3772 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3774 int exact
= 0, lm1
= 0, lm2
= 0;
3775 register struct sock
*sk
;
3776 struct hlist_node
*node
;
3778 if (type
!= ACL_LINK
)
3781 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3783 /* Find listening sockets and check their link_mode */
3784 read_lock(&l2cap_sk_list
.lock
);
3785 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3786 if (sk
->sk_state
!= BT_LISTEN
)
3789 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3790 lm1
|= HCI_LM_ACCEPT
;
3791 if (l2cap_pi(sk
)->role_switch
)
3792 lm1
|= HCI_LM_MASTER
;
3794 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3795 lm2
|= HCI_LM_ACCEPT
;
3796 if (l2cap_pi(sk
)->role_switch
)
3797 lm2
|= HCI_LM_MASTER
;
3800 read_unlock(&l2cap_sk_list
.lock
);
3802 return exact
? lm1
: lm2
;
3805 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3807 struct l2cap_conn
*conn
;
3809 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3811 if (hcon
->type
!= ACL_LINK
)
3815 conn
= l2cap_conn_add(hcon
, status
);
3817 l2cap_conn_ready(conn
);
3819 l2cap_conn_del(hcon
, bt_err(status
));
3824 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3826 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3828 BT_DBG("hcon %p", hcon
);
3830 if (hcon
->type
!= ACL_LINK
|| !conn
)
3833 return conn
->disc_reason
;
3836 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3838 BT_DBG("hcon %p reason %d", hcon
, reason
);
3840 if (hcon
->type
!= ACL_LINK
)
3843 l2cap_conn_del(hcon
, bt_err(reason
));
3848 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3850 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
3853 if (encrypt
== 0x00) {
3854 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3855 l2cap_sock_clear_timer(sk
);
3856 l2cap_sock_set_timer(sk
, HZ
* 5);
3857 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3858 __l2cap_sock_close(sk
, ECONNREFUSED
);
3860 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3861 l2cap_sock_clear_timer(sk
);
3865 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3867 struct l2cap_chan_list
*l
;
3868 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3874 l
= &conn
->chan_list
;
3876 BT_DBG("conn %p", conn
);
3878 read_lock(&l
->lock
);
3880 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
3883 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3888 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3889 sk
->sk_state
== BT_CONFIG
)) {
3890 l2cap_check_encryption(sk
, encrypt
);
3895 if (sk
->sk_state
== BT_CONNECT
) {
3897 struct l2cap_conn_req req
;
3898 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3899 req
.psm
= l2cap_pi(sk
)->psm
;
3901 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
3902 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3904 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3905 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3907 l2cap_sock_clear_timer(sk
);
3908 l2cap_sock_set_timer(sk
, HZ
/ 10);
3910 } else if (sk
->sk_state
== BT_CONNECT2
) {
3911 struct l2cap_conn_rsp rsp
;
3915 sk
->sk_state
= BT_CONFIG
;
3916 result
= L2CAP_CR_SUCCESS
;
3918 sk
->sk_state
= BT_DISCONN
;
3919 l2cap_sock_set_timer(sk
, HZ
/ 10);
3920 result
= L2CAP_CR_SEC_BLOCK
;
3923 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3924 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3925 rsp
.result
= cpu_to_le16(result
);
3926 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3927 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
3928 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3934 read_unlock(&l
->lock
);
3939 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3941 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3944 conn
= l2cap_conn_add(hcon
, 0);
3949 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3951 if (!(flags
& ACL_CONT
)) {
3952 struct l2cap_hdr
*hdr
;
3958 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3959 kfree_skb(conn
->rx_skb
);
3960 conn
->rx_skb
= NULL
;
3962 l2cap_conn_unreliable(conn
, ECOMM
);
3965 /* Start fragment always begin with Basic L2CAP header */
3966 if (skb
->len
< L2CAP_HDR_SIZE
) {
3967 BT_ERR("Frame is too short (len %d)", skb
->len
);
3968 l2cap_conn_unreliable(conn
, ECOMM
);
3972 hdr
= (struct l2cap_hdr
*) skb
->data
;
3973 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3974 cid
= __le16_to_cpu(hdr
->cid
);
3976 if (len
== skb
->len
) {
3977 /* Complete frame received */
3978 l2cap_recv_frame(conn
, skb
);
3982 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
3984 if (skb
->len
> len
) {
3985 BT_ERR("Frame is too long (len %d, expected len %d)",
3987 l2cap_conn_unreliable(conn
, ECOMM
);
3991 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
3993 if (sk
&& l2cap_pi(sk
)->imtu
< len
- L2CAP_HDR_SIZE
) {
3994 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
3995 len
, l2cap_pi(sk
)->imtu
);
3997 l2cap_conn_unreliable(conn
, ECOMM
);
4004 /* Allocate skb for the complete frame (with header) */
4005 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4009 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4011 conn
->rx_len
= len
- skb
->len
;
4013 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4015 if (!conn
->rx_len
) {
4016 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4017 l2cap_conn_unreliable(conn
, ECOMM
);
4021 if (skb
->len
> conn
->rx_len
) {
4022 BT_ERR("Fragment is too long (len %d, expected %d)",
4023 skb
->len
, conn
->rx_len
);
4024 kfree_skb(conn
->rx_skb
);
4025 conn
->rx_skb
= NULL
;
4027 l2cap_conn_unreliable(conn
, ECOMM
);
4031 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4033 conn
->rx_len
-= skb
->len
;
4035 if (!conn
->rx_len
) {
4036 /* Complete frame received */
4037 l2cap_recv_frame(conn
, conn
->rx_skb
);
4038 conn
->rx_skb
= NULL
;
4047 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4050 struct hlist_node
*node
;
4052 read_lock_bh(&l2cap_sk_list
.lock
);
4054 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4055 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4057 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4058 batostr(&bt_sk(sk
)->src
),
4059 batostr(&bt_sk(sk
)->dst
),
4060 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4062 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4065 read_unlock_bh(&l2cap_sk_list
.lock
);
4070 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4072 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4075 static const struct file_operations l2cap_debugfs_fops
= {
4076 .open
= l2cap_debugfs_open
,
4078 .llseek
= seq_lseek
,
4079 .release
= single_release
,
4082 static struct dentry
*l2cap_debugfs
;
4084 static struct hci_proto l2cap_hci_proto
= {
4086 .id
= HCI_PROTO_L2CAP
,
4087 .connect_ind
= l2cap_connect_ind
,
4088 .connect_cfm
= l2cap_connect_cfm
,
4089 .disconn_ind
= l2cap_disconn_ind
,
4090 .disconn_cfm
= l2cap_disconn_cfm
,
4091 .security_cfm
= l2cap_security_cfm
,
4092 .recv_acldata
= l2cap_recv_acldata
4095 static int __init
l2cap_init(void)
4099 err
= l2cap_init_sockets();
4103 _busy_wq
= create_singlethread_workqueue("l2cap");
4109 err
= hci_register_proto(&l2cap_hci_proto
);
4111 BT_ERR("L2CAP protocol registration failed");
4112 bt_sock_unregister(BTPROTO_L2CAP
);
4117 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4118 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4120 BT_ERR("Failed to create L2CAP debug file");
4123 BT_INFO("L2CAP ver %s", VERSION
);
4124 BT_INFO("L2CAP socket layer initialized");
4129 destroy_workqueue(_busy_wq
);
4130 l2cap_cleanup_sockets();
4134 static void __exit
l2cap_exit(void)
4136 debugfs_remove(l2cap_debugfs
);
4138 flush_workqueue(_busy_wq
);
4139 destroy_workqueue(_busy_wq
);
4141 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4142 BT_ERR("L2CAP protocol unregistration failed");
4144 l2cap_cleanup_sockets();
4147 void l2cap_load(void)
4149 /* Dummy function to trigger automatic L2CAP module loading by
4150 * other modules that use L2CAP sockets but don't use any other
4151 * symbols from it. */
4153 EXPORT_SYMBOL(l2cap_load
);
4155 module_init(l2cap_init
);
4156 module_exit(l2cap_exit
);
4158 module_param(disable_ertm
, bool, 0644);
4159 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");
4161 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4162 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4163 MODULE_VERSION(VERSION
);
4164 MODULE_LICENSE("GPL");
4165 MODULE_ALIAS("bt-proto-0");