2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static struct workqueue_struct
*_busy_wq
;
65 struct bt_sock_list l2cap_sk_list
= {
66 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
69 static void l2cap_busy_work(struct work_struct
*work
);
71 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
72 u8 code
, u8 ident
, u16 dlen
, void *data
);
73 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
75 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
82 list_for_each_entry(c
, &conn
->chan_l
, list
) {
83 struct sock
*s
= c
->sk
;
84 if (l2cap_pi(s
)->dcid
== cid
)
91 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
95 list_for_each_entry(c
, &conn
->chan_l
, list
) {
96 struct sock
*s
= c
->sk
;
97 if (l2cap_pi(s
)->scid
== cid
)
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
107 struct l2cap_chan
*c
;
109 read_lock(&conn
->chan_lock
);
110 c
= __l2cap_get_chan_by_scid(conn
, cid
);
113 read_unlock(&conn
->chan_lock
);
117 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
119 struct l2cap_chan
*c
;
121 list_for_each_entry(c
, &conn
->chan_l
, list
) {
122 if (c
->ident
== ident
)
128 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
130 struct l2cap_chan
*c
;
132 read_lock(&conn
->chan_lock
);
133 c
= __l2cap_get_chan_by_ident(conn
, ident
);
136 read_unlock(&conn
->chan_lock
);
140 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
142 u16 cid
= L2CAP_CID_DYN_START
;
144 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
145 if (!__l2cap_get_chan_by_scid(conn
, cid
))
152 static struct l2cap_chan
*l2cap_chan_alloc(struct sock
*sk
)
154 struct l2cap_chan
*chan
;
156 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
165 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
167 struct sock
*sk
= chan
->sk
;
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
170 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
172 conn
->disc_reason
= 0x13;
174 l2cap_pi(sk
)->conn
= conn
;
176 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
177 if (conn
->hcon
->type
== LE_LINK
) {
179 l2cap_pi(sk
)->omtu
= L2CAP_LE_DEFAULT_MTU
;
180 l2cap_pi(sk
)->scid
= L2CAP_CID_LE_DATA
;
181 l2cap_pi(sk
)->dcid
= L2CAP_CID_LE_DATA
;
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(conn
);
185 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
187 } else if (sk
->sk_type
== SOCK_DGRAM
) {
188 /* Connectionless socket */
189 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
190 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
191 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
195 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
196 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
201 list_add(&chan
->list
, &conn
->chan_l
);
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
208 struct sock
*sk
= chan
->sk
;
209 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
210 struct sock
*parent
= bt_sk(sk
)->parent
;
212 l2cap_sock_clear_timer(sk
);
214 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
217 /* Delete from channel list */
218 write_lock_bh(&conn
->chan_lock
);
219 list_del(&chan
->list
);
220 write_unlock_bh(&conn
->chan_lock
);
223 l2cap_pi(sk
)->conn
= NULL
;
224 hci_conn_put(conn
->hcon
);
227 sk
->sk_state
= BT_CLOSED
;
228 sock_set_flag(sk
, SOCK_ZAPPED
);
234 bt_accept_unlink(sk
);
235 parent
->sk_data_ready(parent
, 0);
237 sk
->sk_state_change(sk
);
239 skb_queue_purge(TX_QUEUE(sk
));
241 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
242 struct srej_list
*l
, *tmp
;
244 del_timer(&l2cap_pi(sk
)->retrans_timer
);
245 del_timer(&l2cap_pi(sk
)->monitor_timer
);
246 del_timer(&l2cap_pi(sk
)->ack_timer
);
248 skb_queue_purge(SREJ_QUEUE(sk
));
249 skb_queue_purge(BUSY_QUEUE(sk
));
251 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
260 static inline u8
l2cap_get_auth_type(struct sock
*sk
)
262 if (sk
->sk_type
== SOCK_RAW
) {
263 switch (l2cap_pi(sk
)->sec_level
) {
264 case BT_SECURITY_HIGH
:
265 return HCI_AT_DEDICATED_BONDING_MITM
;
266 case BT_SECURITY_MEDIUM
:
267 return HCI_AT_DEDICATED_BONDING
;
269 return HCI_AT_NO_BONDING
;
271 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
272 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
273 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
275 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
276 return HCI_AT_NO_BONDING_MITM
;
278 return HCI_AT_NO_BONDING
;
280 switch (l2cap_pi(sk
)->sec_level
) {
281 case BT_SECURITY_HIGH
:
282 return HCI_AT_GENERAL_BONDING_MITM
;
283 case BT_SECURITY_MEDIUM
:
284 return HCI_AT_GENERAL_BONDING
;
286 return HCI_AT_NO_BONDING
;
291 /* Service level security */
292 static inline int l2cap_check_security(struct sock
*sk
)
294 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
297 auth_type
= l2cap_get_auth_type(sk
);
299 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
303 u8
l2cap_get_ident(struct l2cap_conn
*conn
)
307 /* Get next available identificator.
308 * 1 - 128 are used by kernel.
309 * 129 - 199 are reserved.
310 * 200 - 254 are used by utilities like l2ping, etc.
313 spin_lock_bh(&conn
->lock
);
315 if (++conn
->tx_ident
> 128)
320 spin_unlock_bh(&conn
->lock
);
325 void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
327 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
330 BT_DBG("code 0x%2.2x", code
);
335 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
336 flags
= ACL_START_NO_FLUSH
;
340 hci_send_acl(conn
->hcon
, skb
, flags
);
343 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
346 struct l2cap_hdr
*lh
;
347 struct l2cap_conn
*conn
= pi
->conn
;
348 struct sock
*sk
= (struct sock
*)pi
;
349 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
352 if (sk
->sk_state
!= BT_CONNECTED
)
355 if (pi
->fcs
== L2CAP_FCS_CRC16
)
358 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
360 count
= min_t(unsigned int, conn
->mtu
, hlen
);
361 control
|= L2CAP_CTRL_FRAME_TYPE
;
363 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
364 control
|= L2CAP_CTRL_FINAL
;
365 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
368 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
369 control
|= L2CAP_CTRL_POLL
;
370 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
373 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
377 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
378 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
379 lh
->cid
= cpu_to_le16(pi
->dcid
);
380 put_unaligned_le16(control
, skb_put(skb
, 2));
382 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
383 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
384 put_unaligned_le16(fcs
, skb_put(skb
, 2));
387 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
388 flags
= ACL_START_NO_FLUSH
;
392 hci_send_acl(pi
->conn
->hcon
, skb
, flags
);
395 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
397 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
398 control
|= L2CAP_SUPER_RCV_NOT_READY
;
399 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
401 control
|= L2CAP_SUPER_RCV_READY
;
403 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
405 l2cap_send_sframe(pi
, control
);
408 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
410 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
413 static void l2cap_do_start(struct l2cap_chan
*chan
)
415 struct sock
*sk
= chan
->sk
;
416 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
418 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
419 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
422 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
423 struct l2cap_conn_req req
;
424 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
425 req
.psm
= l2cap_pi(sk
)->psm
;
427 chan
->ident
= l2cap_get_ident(conn
);
428 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
430 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
434 struct l2cap_info_req req
;
435 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
437 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
438 conn
->info_ident
= l2cap_get_ident(conn
);
440 mod_timer(&conn
->info_timer
, jiffies
+
441 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
443 l2cap_send_cmd(conn
, conn
->info_ident
,
444 L2CAP_INFO_REQ
, sizeof(req
), &req
);
448 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
450 u32 local_feat_mask
= l2cap_feat_mask
;
452 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
455 case L2CAP_MODE_ERTM
:
456 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
457 case L2CAP_MODE_STREAMING
:
458 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
464 void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
466 struct l2cap_disconn_req req
;
471 skb_queue_purge(TX_QUEUE(sk
));
473 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
474 del_timer(&l2cap_pi(sk
)->retrans_timer
);
475 del_timer(&l2cap_pi(sk
)->monitor_timer
);
476 del_timer(&l2cap_pi(sk
)->ack_timer
);
479 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
480 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
481 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
482 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
484 sk
->sk_state
= BT_DISCONN
;
488 /* ---- L2CAP connections ---- */
489 static void l2cap_conn_start(struct l2cap_conn
*conn
)
491 struct l2cap_chan
*chan
, *tmp
;
493 BT_DBG("conn %p", conn
);
495 read_lock(&conn
->chan_lock
);
497 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
498 struct sock
*sk
= chan
->sk
;
502 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
503 sk
->sk_type
!= SOCK_STREAM
) {
508 if (sk
->sk_state
== BT_CONNECT
) {
509 struct l2cap_conn_req req
;
511 if (!l2cap_check_security(sk
) ||
512 !__l2cap_no_conn_pending(sk
)) {
517 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
519 && l2cap_pi(sk
)->conf_state
&
520 L2CAP_CONF_STATE2_DEVICE
) {
521 /* __l2cap_sock_close() calls list_del(chan)
522 * so release the lock */
523 read_unlock_bh(&conn
->chan_lock
);
524 __l2cap_sock_close(sk
, ECONNRESET
);
525 read_lock_bh(&conn
->chan_lock
);
530 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
531 req
.psm
= l2cap_pi(sk
)->psm
;
533 chan
->ident
= l2cap_get_ident(conn
);
534 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
536 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
539 } else if (sk
->sk_state
== BT_CONNECT2
) {
540 struct l2cap_conn_rsp rsp
;
542 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
543 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
545 if (l2cap_check_security(sk
)) {
546 if (bt_sk(sk
)->defer_setup
) {
547 struct sock
*parent
= bt_sk(sk
)->parent
;
548 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
549 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
550 parent
->sk_data_ready(parent
, 0);
553 sk
->sk_state
= BT_CONFIG
;
554 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
555 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
558 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
559 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
562 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
565 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
||
566 rsp
.result
!= L2CAP_CR_SUCCESS
) {
571 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
572 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
573 l2cap_build_conf_req(chan
, buf
), buf
);
574 chan
->num_conf_req
++;
580 read_unlock(&conn
->chan_lock
);
583 /* Find socket with cid and source bdaddr.
584 * Returns closest match, locked.
586 static struct sock
*l2cap_get_sock_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
588 struct sock
*s
, *sk
= NULL
, *sk1
= NULL
;
589 struct hlist_node
*node
;
591 read_lock(&l2cap_sk_list
.lock
);
593 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
594 if (state
&& sk
->sk_state
!= state
)
597 if (l2cap_pi(sk
)->scid
== cid
) {
599 if (!bacmp(&bt_sk(sk
)->src
, src
))
603 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
610 read_unlock(&l2cap_sk_list
.lock
);
615 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
617 struct sock
*parent
, *uninitialized_var(sk
);
618 struct l2cap_chan
*chan
;
622 /* Check if we have socket listening on cid */
623 parent
= l2cap_get_sock_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
628 /* Check for backlog size */
629 if (sk_acceptq_is_full(parent
)) {
630 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
634 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
638 chan
= l2cap_chan_alloc(sk
);
644 write_lock_bh(&conn
->chan_lock
);
646 hci_conn_hold(conn
->hcon
);
648 l2cap_sock_init(sk
, parent
);
650 bacpy(&bt_sk(sk
)->src
, conn
->src
);
651 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
653 bt_accept_enqueue(parent
, sk
);
655 __l2cap_chan_add(conn
, chan
);
657 l2cap_pi(sk
)->chan
= chan
;
659 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
661 sk
->sk_state
= BT_CONNECTED
;
662 parent
->sk_data_ready(parent
, 0);
664 write_unlock_bh(&conn
->chan_lock
);
667 bh_unlock_sock(parent
);
670 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
672 struct l2cap_chan
*chan
;
674 BT_DBG("conn %p", conn
);
676 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
677 l2cap_le_conn_ready(conn
);
679 read_lock(&conn
->chan_lock
);
681 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
682 struct sock
*sk
= chan
->sk
;
686 if (conn
->hcon
->type
== LE_LINK
) {
687 l2cap_sock_clear_timer(sk
);
688 sk
->sk_state
= BT_CONNECTED
;
689 sk
->sk_state_change(sk
);
692 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
693 sk
->sk_type
!= SOCK_STREAM
) {
694 l2cap_sock_clear_timer(sk
);
695 sk
->sk_state
= BT_CONNECTED
;
696 sk
->sk_state_change(sk
);
697 } else if (sk
->sk_state
== BT_CONNECT
)
698 l2cap_do_start(chan
);
703 read_unlock(&conn
->chan_lock
);
706 /* Notify sockets that we cannot guaranty reliability anymore */
707 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
709 struct l2cap_chan
*chan
;
711 BT_DBG("conn %p", conn
);
713 read_lock(&conn
->chan_lock
);
715 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
716 struct sock
*sk
= chan
->sk
;
718 if (l2cap_pi(sk
)->force_reliable
)
722 read_unlock(&conn
->chan_lock
);
725 static void l2cap_info_timeout(unsigned long arg
)
727 struct l2cap_conn
*conn
= (void *) arg
;
729 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
730 conn
->info_ident
= 0;
732 l2cap_conn_start(conn
);
735 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
737 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
742 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
746 hcon
->l2cap_data
= conn
;
749 BT_DBG("hcon %p conn %p", hcon
, conn
);
751 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
752 conn
->mtu
= hcon
->hdev
->le_mtu
;
754 conn
->mtu
= hcon
->hdev
->acl_mtu
;
756 conn
->src
= &hcon
->hdev
->bdaddr
;
757 conn
->dst
= &hcon
->dst
;
761 spin_lock_init(&conn
->lock
);
762 rwlock_init(&conn
->chan_lock
);
764 INIT_LIST_HEAD(&conn
->chan_l
);
766 if (hcon
->type
!= LE_LINK
)
767 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
768 (unsigned long) conn
);
770 conn
->disc_reason
= 0x13;
775 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
777 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
778 struct l2cap_chan
*chan
, *l
;
784 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
786 kfree_skb(conn
->rx_skb
);
789 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
792 l2cap_chan_del(chan
, err
);
797 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
798 del_timer_sync(&conn
->info_timer
);
800 hcon
->l2cap_data
= NULL
;
804 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
806 write_lock_bh(&conn
->chan_lock
);
807 __l2cap_chan_add(conn
, chan
);
808 write_unlock_bh(&conn
->chan_lock
);
811 /* ---- Socket interface ---- */
813 /* Find socket with psm and source bdaddr.
814 * Returns closest match.
816 static struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
818 struct sock
*sk
= NULL
, *sk1
= NULL
;
819 struct hlist_node
*node
;
821 read_lock(&l2cap_sk_list
.lock
);
823 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
824 if (state
&& sk
->sk_state
!= state
)
827 if (l2cap_pi(sk
)->psm
== psm
) {
829 if (!bacmp(&bt_sk(sk
)->src
, src
))
833 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
838 read_unlock(&l2cap_sk_list
.lock
);
840 return node
? sk
: sk1
;
843 int l2cap_do_connect(struct sock
*sk
)
845 bdaddr_t
*src
= &bt_sk(sk
)->src
;
846 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
847 struct l2cap_conn
*conn
;
848 struct l2cap_chan
*chan
;
849 struct hci_conn
*hcon
;
850 struct hci_dev
*hdev
;
854 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
857 hdev
= hci_get_route(dst
, src
);
859 return -EHOSTUNREACH
;
861 hci_dev_lock_bh(hdev
);
863 auth_type
= l2cap_get_auth_type(sk
);
865 if (l2cap_pi(sk
)->dcid
== L2CAP_CID_LE_DATA
)
866 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
867 l2cap_pi(sk
)->sec_level
, auth_type
);
869 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
870 l2cap_pi(sk
)->sec_level
, auth_type
);
877 conn
= l2cap_conn_add(hcon
, 0);
884 chan
= l2cap_chan_alloc(sk
);
891 /* Update source addr of the socket */
892 bacpy(src
, conn
->src
);
894 l2cap_chan_add(conn
, chan
);
896 l2cap_pi(sk
)->chan
= chan
;
898 sk
->sk_state
= BT_CONNECT
;
899 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
901 if (hcon
->state
== BT_CONNECTED
) {
902 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
903 sk
->sk_type
!= SOCK_STREAM
) {
904 l2cap_sock_clear_timer(sk
);
905 if (l2cap_check_security(sk
))
906 sk
->sk_state
= BT_CONNECTED
;
908 l2cap_do_start(chan
);
914 hci_dev_unlock_bh(hdev
);
919 int __l2cap_wait_ack(struct sock
*sk
)
921 DECLARE_WAITQUEUE(wait
, current
);
925 add_wait_queue(sk_sleep(sk
), &wait
);
926 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
927 set_current_state(TASK_INTERRUPTIBLE
);
932 if (signal_pending(current
)) {
933 err
= sock_intr_errno(timeo
);
938 timeo
= schedule_timeout(timeo
);
941 err
= sock_error(sk
);
945 set_current_state(TASK_RUNNING
);
946 remove_wait_queue(sk_sleep(sk
), &wait
);
950 static void l2cap_monitor_timeout(unsigned long arg
)
952 struct sock
*sk
= (void *) arg
;
957 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
958 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
963 l2cap_pi(sk
)->retry_count
++;
964 __mod_monitor_timer();
966 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
970 static void l2cap_retrans_timeout(unsigned long arg
)
972 struct sock
*sk
= (void *) arg
;
977 l2cap_pi(sk
)->retry_count
= 1;
978 __mod_monitor_timer();
980 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
982 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
986 static void l2cap_drop_acked_frames(struct sock
*sk
)
990 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
991 l2cap_pi(sk
)->unacked_frames
) {
992 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
995 skb
= skb_dequeue(TX_QUEUE(sk
));
998 l2cap_pi(sk
)->unacked_frames
--;
1001 if (!l2cap_pi(sk
)->unacked_frames
)
1002 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1005 void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1007 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1008 struct hci_conn
*hcon
= pi
->conn
->hcon
;
1011 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1013 if (!pi
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
1014 flags
= ACL_START_NO_FLUSH
;
1018 hci_send_acl(hcon
, skb
, flags
);
1021 void l2cap_streaming_send(struct sock
*sk
)
1023 struct sk_buff
*skb
;
1024 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1027 while ((skb
= skb_dequeue(TX_QUEUE(sk
)))) {
1028 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1029 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1030 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1032 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1033 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1034 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1037 l2cap_do_send(sk
, skb
);
1039 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1043 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1045 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1046 struct sk_buff
*skb
, *tx_skb
;
1049 skb
= skb_peek(TX_QUEUE(sk
));
1054 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1057 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1060 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1062 if (pi
->remote_max_tx
&&
1063 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1064 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1068 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1069 bt_cb(skb
)->retries
++;
1070 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1072 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1073 control
|= L2CAP_CTRL_FINAL
;
1074 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1077 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1078 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1080 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1082 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1083 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1084 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1087 l2cap_do_send(sk
, tx_skb
);
1090 int l2cap_ertm_send(struct sock
*sk
)
1092 struct sk_buff
*skb
, *tx_skb
;
1093 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1097 if (sk
->sk_state
!= BT_CONNECTED
)
1100 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1102 if (pi
->remote_max_tx
&&
1103 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1104 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1108 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1110 bt_cb(skb
)->retries
++;
1112 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1113 control
&= L2CAP_CTRL_SAR
;
1115 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1116 control
|= L2CAP_CTRL_FINAL
;
1117 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1119 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1120 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1121 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1124 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1125 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1126 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1129 l2cap_do_send(sk
, tx_skb
);
1131 __mod_retrans_timer();
1133 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1134 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1136 if (bt_cb(skb
)->retries
== 1)
1137 pi
->unacked_frames
++;
1141 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1142 sk
->sk_send_head
= NULL
;
1144 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1152 static int l2cap_retransmit_frames(struct sock
*sk
)
1154 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1157 if (!skb_queue_empty(TX_QUEUE(sk
)))
1158 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1160 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1161 ret
= l2cap_ertm_send(sk
);
1165 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1167 struct sock
*sk
= (struct sock
*)pi
;
1170 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1172 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1173 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1174 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1175 l2cap_send_sframe(pi
, control
);
1179 if (l2cap_ertm_send(sk
) > 0)
1182 control
|= L2CAP_SUPER_RCV_READY
;
1183 l2cap_send_sframe(pi
, control
);
1186 static void l2cap_send_srejtail(struct sock
*sk
)
1188 struct srej_list
*tail
;
1191 control
= L2CAP_SUPER_SELECT_REJECT
;
1192 control
|= L2CAP_CTRL_FINAL
;
1194 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1195 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1197 l2cap_send_sframe(l2cap_pi(sk
), control
);
1200 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1202 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1203 struct sk_buff
**frag
;
1206 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1212 /* Continuation fragments (no L2CAP header) */
1213 frag
= &skb_shinfo(skb
)->frag_list
;
1215 count
= min_t(unsigned int, conn
->mtu
, len
);
1217 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1220 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1226 frag
= &(*frag
)->next
;
1232 struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1234 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1235 struct sk_buff
*skb
;
1236 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1237 struct l2cap_hdr
*lh
;
1239 BT_DBG("sk %p len %d", sk
, (int)len
);
1241 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1242 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1243 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1245 return ERR_PTR(err
);
1247 /* Create L2CAP header */
1248 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1249 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1250 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1251 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1253 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1254 if (unlikely(err
< 0)) {
1256 return ERR_PTR(err
);
1261 struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1263 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1264 struct sk_buff
*skb
;
1265 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1266 struct l2cap_hdr
*lh
;
1268 BT_DBG("sk %p len %d", sk
, (int)len
);
1270 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1271 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1272 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1274 return ERR_PTR(err
);
1276 /* Create L2CAP header */
1277 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1278 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1279 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1281 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1282 if (unlikely(err
< 0)) {
1284 return ERR_PTR(err
);
1289 struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1291 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1292 struct sk_buff
*skb
;
1293 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1294 struct l2cap_hdr
*lh
;
1296 BT_DBG("sk %p len %d", sk
, (int)len
);
1299 return ERR_PTR(-ENOTCONN
);
1304 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1307 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1308 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1309 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1311 return ERR_PTR(err
);
1313 /* Create L2CAP header */
1314 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1315 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1316 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1317 put_unaligned_le16(control
, skb_put(skb
, 2));
1319 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1321 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1322 if (unlikely(err
< 0)) {
1324 return ERR_PTR(err
);
1327 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1328 put_unaligned_le16(0, skb_put(skb
, 2));
1330 bt_cb(skb
)->retries
= 0;
1334 int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1336 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1337 struct sk_buff
*skb
;
1338 struct sk_buff_head sar_queue
;
1342 skb_queue_head_init(&sar_queue
);
1343 control
= L2CAP_SDU_START
;
1344 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1346 return PTR_ERR(skb
);
1348 __skb_queue_tail(&sar_queue
, skb
);
1349 len
-= pi
->remote_mps
;
1350 size
+= pi
->remote_mps
;
1355 if (len
> pi
->remote_mps
) {
1356 control
= L2CAP_SDU_CONTINUE
;
1357 buflen
= pi
->remote_mps
;
1359 control
= L2CAP_SDU_END
;
1363 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1365 skb_queue_purge(&sar_queue
);
1366 return PTR_ERR(skb
);
1369 __skb_queue_tail(&sar_queue
, skb
);
1373 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1374 if (sk
->sk_send_head
== NULL
)
1375 sk
->sk_send_head
= sar_queue
.next
;
1380 static void l2cap_chan_ready(struct sock
*sk
)
1382 struct sock
*parent
= bt_sk(sk
)->parent
;
1384 BT_DBG("sk %p, parent %p", sk
, parent
);
1386 l2cap_pi(sk
)->conf_state
= 0;
1387 l2cap_sock_clear_timer(sk
);
1390 /* Outgoing channel.
1391 * Wake up socket sleeping on connect.
1393 sk
->sk_state
= BT_CONNECTED
;
1394 sk
->sk_state_change(sk
);
1396 /* Incoming channel.
1397 * Wake up socket sleeping on accept.
1399 parent
->sk_data_ready(parent
, 0);
1403 /* Copy frame to all raw sockets on that connection */
1404 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1406 struct sk_buff
*nskb
;
1407 struct l2cap_chan
*chan
;
1409 BT_DBG("conn %p", conn
);
1411 read_lock(&conn
->chan_lock
);
1412 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1413 struct sock
*sk
= chan
->sk
;
1414 if (sk
->sk_type
!= SOCK_RAW
)
1417 /* Don't send frame to the socket it came from */
1420 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1424 if (sock_queue_rcv_skb(sk
, nskb
))
1427 read_unlock(&conn
->chan_lock
);
1430 /* ---- L2CAP signalling commands ---- */
1431 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1432 u8 code
, u8 ident
, u16 dlen
, void *data
)
1434 struct sk_buff
*skb
, **frag
;
1435 struct l2cap_cmd_hdr
*cmd
;
1436 struct l2cap_hdr
*lh
;
1439 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1440 conn
, code
, ident
, dlen
);
1442 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1443 count
= min_t(unsigned int, conn
->mtu
, len
);
1445 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1449 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1450 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1452 if (conn
->hcon
->type
== LE_LINK
)
1453 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1455 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1457 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1460 cmd
->len
= cpu_to_le16(dlen
);
1463 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1464 memcpy(skb_put(skb
, count
), data
, count
);
1470 /* Continuation fragments (no L2CAP header) */
1471 frag
= &skb_shinfo(skb
)->frag_list
;
1473 count
= min_t(unsigned int, conn
->mtu
, len
);
1475 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1479 memcpy(skb_put(*frag
, count
), data
, count
);
1484 frag
= &(*frag
)->next
;
1494 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1496 struct l2cap_conf_opt
*opt
= *ptr
;
1499 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1507 *val
= *((u8
*) opt
->val
);
1511 *val
= get_unaligned_le16(opt
->val
);
1515 *val
= get_unaligned_le32(opt
->val
);
1519 *val
= (unsigned long) opt
->val
;
1523 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1527 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1529 struct l2cap_conf_opt
*opt
= *ptr
;
1531 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1538 *((u8
*) opt
->val
) = val
;
1542 put_unaligned_le16(val
, opt
->val
);
1546 put_unaligned_le32(val
, opt
->val
);
1550 memcpy(opt
->val
, (void *) val
, len
);
1554 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1557 static void l2cap_ack_timeout(unsigned long arg
)
1559 struct sock
*sk
= (void *) arg
;
1562 l2cap_send_ack(l2cap_pi(sk
));
1566 static inline void l2cap_ertm_init(struct sock
*sk
)
1568 l2cap_pi(sk
)->expected_ack_seq
= 0;
1569 l2cap_pi(sk
)->unacked_frames
= 0;
1570 l2cap_pi(sk
)->buffer_seq
= 0;
1571 l2cap_pi(sk
)->num_acked
= 0;
1572 l2cap_pi(sk
)->frames_sent
= 0;
1574 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
1575 l2cap_retrans_timeout
, (unsigned long) sk
);
1576 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
1577 l2cap_monitor_timeout
, (unsigned long) sk
);
1578 setup_timer(&l2cap_pi(sk
)->ack_timer
,
1579 l2cap_ack_timeout
, (unsigned long) sk
);
1581 __skb_queue_head_init(SREJ_QUEUE(sk
));
1582 __skb_queue_head_init(BUSY_QUEUE(sk
));
1584 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
1586 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1589 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1592 case L2CAP_MODE_STREAMING
:
1593 case L2CAP_MODE_ERTM
:
1594 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1598 return L2CAP_MODE_BASIC
;
1602 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
1604 struct sock
*sk
= chan
->sk
;
1605 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1606 struct l2cap_conf_req
*req
= data
;
1607 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
1608 void *ptr
= req
->data
;
1610 BT_DBG("sk %p", sk
);
1612 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
1616 case L2CAP_MODE_STREAMING
:
1617 case L2CAP_MODE_ERTM
:
1618 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
1623 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
1628 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
1629 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
1632 case L2CAP_MODE_BASIC
:
1633 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1634 !(pi
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1637 rfc
.mode
= L2CAP_MODE_BASIC
;
1639 rfc
.max_transmit
= 0;
1640 rfc
.retrans_timeout
= 0;
1641 rfc
.monitor_timeout
= 0;
1642 rfc
.max_pdu_size
= 0;
1644 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1645 (unsigned long) &rfc
);
1648 case L2CAP_MODE_ERTM
:
1649 rfc
.mode
= L2CAP_MODE_ERTM
;
1650 rfc
.txwin_size
= pi
->tx_win
;
1651 rfc
.max_transmit
= pi
->max_tx
;
1652 rfc
.retrans_timeout
= 0;
1653 rfc
.monitor_timeout
= 0;
1654 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1655 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
1656 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1658 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1659 (unsigned long) &rfc
);
1661 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1664 if (pi
->fcs
== L2CAP_FCS_NONE
||
1665 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1666 pi
->fcs
= L2CAP_FCS_NONE
;
1667 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
1671 case L2CAP_MODE_STREAMING
:
1672 rfc
.mode
= L2CAP_MODE_STREAMING
;
1674 rfc
.max_transmit
= 0;
1675 rfc
.retrans_timeout
= 0;
1676 rfc
.monitor_timeout
= 0;
1677 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1678 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
1679 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1681 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1682 (unsigned long) &rfc
);
1684 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1687 if (pi
->fcs
== L2CAP_FCS_NONE
||
1688 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1689 pi
->fcs
= L2CAP_FCS_NONE
;
1690 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
1695 req
->dcid
= cpu_to_le16(pi
->dcid
);
1696 req
->flags
= cpu_to_le16(0);
1701 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
1703 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
1704 struct l2cap_conf_rsp
*rsp
= data
;
1705 void *ptr
= rsp
->data
;
1706 void *req
= chan
->conf_req
;
1707 int len
= chan
->conf_len
;
1708 int type
, hint
, olen
;
1710 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
1711 u16 mtu
= L2CAP_DEFAULT_MTU
;
1712 u16 result
= L2CAP_CONF_SUCCESS
;
1714 BT_DBG("chan %p", chan
);
1716 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1717 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
1719 hint
= type
& L2CAP_CONF_HINT
;
1720 type
&= L2CAP_CONF_MASK
;
1723 case L2CAP_CONF_MTU
:
1727 case L2CAP_CONF_FLUSH_TO
:
1731 case L2CAP_CONF_QOS
:
1734 case L2CAP_CONF_RFC
:
1735 if (olen
== sizeof(rfc
))
1736 memcpy(&rfc
, (void *) val
, olen
);
1739 case L2CAP_CONF_FCS
:
1740 if (val
== L2CAP_FCS_NONE
)
1741 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
1749 result
= L2CAP_CONF_UNKNOWN
;
1750 *((u8
*) ptr
++) = type
;
1755 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
1759 case L2CAP_MODE_STREAMING
:
1760 case L2CAP_MODE_ERTM
:
1761 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
1762 pi
->mode
= l2cap_select_mode(rfc
.mode
,
1763 pi
->conn
->feat_mask
);
1767 if (pi
->mode
!= rfc
.mode
)
1768 return -ECONNREFUSED
;
1774 if (pi
->mode
!= rfc
.mode
) {
1775 result
= L2CAP_CONF_UNACCEPT
;
1776 rfc
.mode
= pi
->mode
;
1778 if (chan
->num_conf_rsp
== 1)
1779 return -ECONNREFUSED
;
1781 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1782 sizeof(rfc
), (unsigned long) &rfc
);
1786 if (result
== L2CAP_CONF_SUCCESS
) {
1787 /* Configure output options and let the other side know
1788 * which ones we don't like. */
1790 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
1791 result
= L2CAP_CONF_UNACCEPT
;
1794 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
1796 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
1799 case L2CAP_MODE_BASIC
:
1800 pi
->fcs
= L2CAP_FCS_NONE
;
1801 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1804 case L2CAP_MODE_ERTM
:
1805 pi
->remote_tx_win
= rfc
.txwin_size
;
1806 pi
->remote_max_tx
= rfc
.max_transmit
;
1808 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
1809 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1811 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1813 rfc
.retrans_timeout
=
1814 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
1815 rfc
.monitor_timeout
=
1816 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
1818 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1820 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1821 sizeof(rfc
), (unsigned long) &rfc
);
1825 case L2CAP_MODE_STREAMING
:
1826 if (le16_to_cpu(rfc
.max_pdu_size
) > pi
->conn
->mtu
- 10)
1827 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
1829 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
1831 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
1833 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1834 sizeof(rfc
), (unsigned long) &rfc
);
1839 result
= L2CAP_CONF_UNACCEPT
;
1841 memset(&rfc
, 0, sizeof(rfc
));
1842 rfc
.mode
= pi
->mode
;
1845 if (result
== L2CAP_CONF_SUCCESS
)
1846 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
1848 rsp
->scid
= cpu_to_le16(pi
->dcid
);
1849 rsp
->result
= cpu_to_le16(result
);
1850 rsp
->flags
= cpu_to_le16(0x0000);
1855 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
1857 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1858 struct l2cap_conf_req
*req
= data
;
1859 void *ptr
= req
->data
;
1862 struct l2cap_conf_rfc rfc
;
1864 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
1866 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1867 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
1870 case L2CAP_CONF_MTU
:
1871 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
1872 *result
= L2CAP_CONF_UNACCEPT
;
1873 pi
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
1876 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
1879 case L2CAP_CONF_FLUSH_TO
:
1881 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
1885 case L2CAP_CONF_RFC
:
1886 if (olen
== sizeof(rfc
))
1887 memcpy(&rfc
, (void *)val
, olen
);
1889 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
1890 rfc
.mode
!= pi
->mode
)
1891 return -ECONNREFUSED
;
1895 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
1896 sizeof(rfc
), (unsigned long) &rfc
);
1901 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
1902 return -ECONNREFUSED
;
1904 pi
->mode
= rfc
.mode
;
1906 if (*result
== L2CAP_CONF_SUCCESS
) {
1908 case L2CAP_MODE_ERTM
:
1909 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
1910 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
1911 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1913 case L2CAP_MODE_STREAMING
:
1914 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1918 req
->dcid
= cpu_to_le16(pi
->dcid
);
1919 req
->flags
= cpu_to_le16(0x0000);
1924 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
1926 struct l2cap_conf_rsp
*rsp
= data
;
1927 void *ptr
= rsp
->data
;
1929 BT_DBG("sk %p", sk
);
1931 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1932 rsp
->result
= cpu_to_le16(result
);
1933 rsp
->flags
= cpu_to_le16(flags
);
1938 void __l2cap_connect_rsp_defer(struct sock
*sk
)
1940 struct l2cap_conn_rsp rsp
;
1941 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1942 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1945 sk
->sk_state
= BT_CONFIG
;
1947 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1948 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1949 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1950 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1951 l2cap_send_cmd(conn
, chan
->ident
,
1952 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1954 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
1957 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
1958 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1959 l2cap_build_conf_req(chan
, buf
), buf
);
1960 chan
->num_conf_req
++;
1963 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
1965 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1968 struct l2cap_conf_rfc rfc
;
1970 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
1972 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
1975 while (len
>= L2CAP_CONF_OPT_SIZE
) {
1976 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
1979 case L2CAP_CONF_RFC
:
1980 if (olen
== sizeof(rfc
))
1981 memcpy(&rfc
, (void *)val
, olen
);
1988 case L2CAP_MODE_ERTM
:
1989 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
1990 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
1991 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1993 case L2CAP_MODE_STREAMING
:
1994 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
1998 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2000 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2002 if (rej
->reason
!= 0x0000)
2005 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2006 cmd
->ident
== conn
->info_ident
) {
2007 del_timer(&conn
->info_timer
);
2009 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2010 conn
->info_ident
= 0;
2012 l2cap_conn_start(conn
);
2018 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2020 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2021 struct l2cap_conn_rsp rsp
;
2022 struct l2cap_chan
*chan
= NULL
;
2023 struct sock
*parent
, *sk
= NULL
;
2024 int result
, status
= L2CAP_CS_NO_INFO
;
2026 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2027 __le16 psm
= req
->psm
;
2029 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2031 /* Check if we have socket listening on psm */
2032 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2034 result
= L2CAP_CR_BAD_PSM
;
2038 bh_lock_sock(parent
);
2040 /* Check if the ACL is secure enough (if not SDP) */
2041 if (psm
!= cpu_to_le16(0x0001) &&
2042 !hci_conn_check_link_mode(conn
->hcon
)) {
2043 conn
->disc_reason
= 0x05;
2044 result
= L2CAP_CR_SEC_BLOCK
;
2048 result
= L2CAP_CR_NO_MEM
;
2050 /* Check for backlog size */
2051 if (sk_acceptq_is_full(parent
)) {
2052 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2056 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2060 chan
= l2cap_chan_alloc(sk
);
2062 l2cap_sock_kill(sk
);
2066 write_lock_bh(&conn
->chan_lock
);
2068 /* Check if we already have channel with that dcid */
2069 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2070 write_unlock_bh(&conn
->chan_lock
);
2071 sock_set_flag(sk
, SOCK_ZAPPED
);
2072 l2cap_sock_kill(sk
);
2076 hci_conn_hold(conn
->hcon
);
2078 l2cap_sock_init(sk
, parent
);
2079 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2080 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2081 l2cap_pi(sk
)->psm
= psm
;
2082 l2cap_pi(sk
)->dcid
= scid
;
2084 bt_accept_enqueue(parent
, sk
);
2086 __l2cap_chan_add(conn
, chan
);
2088 l2cap_pi(sk
)->chan
= chan
;
2090 dcid
= l2cap_pi(sk
)->scid
;
2092 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2094 chan
->ident
= cmd
->ident
;
2096 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2097 if (l2cap_check_security(sk
)) {
2098 if (bt_sk(sk
)->defer_setup
) {
2099 sk
->sk_state
= BT_CONNECT2
;
2100 result
= L2CAP_CR_PEND
;
2101 status
= L2CAP_CS_AUTHOR_PEND
;
2102 parent
->sk_data_ready(parent
, 0);
2104 sk
->sk_state
= BT_CONFIG
;
2105 result
= L2CAP_CR_SUCCESS
;
2106 status
= L2CAP_CS_NO_INFO
;
2109 sk
->sk_state
= BT_CONNECT2
;
2110 result
= L2CAP_CR_PEND
;
2111 status
= L2CAP_CS_AUTHEN_PEND
;
2114 sk
->sk_state
= BT_CONNECT2
;
2115 result
= L2CAP_CR_PEND
;
2116 status
= L2CAP_CS_NO_INFO
;
2119 write_unlock_bh(&conn
->chan_lock
);
2122 bh_unlock_sock(parent
);
2125 rsp
.scid
= cpu_to_le16(scid
);
2126 rsp
.dcid
= cpu_to_le16(dcid
);
2127 rsp
.result
= cpu_to_le16(result
);
2128 rsp
.status
= cpu_to_le16(status
);
2129 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2131 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2132 struct l2cap_info_req info
;
2133 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2135 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2136 conn
->info_ident
= l2cap_get_ident(conn
);
2138 mod_timer(&conn
->info_timer
, jiffies
+
2139 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2141 l2cap_send_cmd(conn
, conn
->info_ident
,
2142 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2145 if (chan
&& !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
) &&
2146 result
== L2CAP_CR_SUCCESS
) {
2148 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2149 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2150 l2cap_build_conf_req(chan
, buf
), buf
);
2151 chan
->num_conf_req
++;
2157 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2159 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2160 u16 scid
, dcid
, result
, status
;
2161 struct l2cap_chan
*chan
;
2165 scid
= __le16_to_cpu(rsp
->scid
);
2166 dcid
= __le16_to_cpu(rsp
->dcid
);
2167 result
= __le16_to_cpu(rsp
->result
);
2168 status
= __le16_to_cpu(rsp
->status
);
2170 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2173 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2177 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2185 case L2CAP_CR_SUCCESS
:
2186 sk
->sk_state
= BT_CONFIG
;
2188 l2cap_pi(sk
)->dcid
= dcid
;
2189 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2191 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)
2194 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2196 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2197 l2cap_build_conf_req(chan
, req
), req
);
2198 chan
->num_conf_req
++;
2202 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2206 /* don't delete l2cap channel if sk is owned by user */
2207 if (sock_owned_by_user(sk
)) {
2208 sk
->sk_state
= BT_DISCONN
;
2209 l2cap_sock_clear_timer(sk
);
2210 l2cap_sock_set_timer(sk
, HZ
/ 5);
2214 l2cap_chan_del(chan
, ECONNREFUSED
);
2222 static inline void set_default_fcs(struct l2cap_pinfo
*pi
)
2224 /* FCS is enabled only in ERTM or streaming mode, if one or both
2227 if (pi
->mode
!= L2CAP_MODE_ERTM
&& pi
->mode
!= L2CAP_MODE_STREAMING
)
2228 pi
->fcs
= L2CAP_FCS_NONE
;
2229 else if (!(pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
2230 pi
->fcs
= L2CAP_FCS_CRC16
;
2233 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2235 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2238 struct l2cap_chan
*chan
;
2242 dcid
= __le16_to_cpu(req
->dcid
);
2243 flags
= __le16_to_cpu(req
->flags
);
2245 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2247 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2253 if (sk
->sk_state
!= BT_CONFIG
) {
2254 struct l2cap_cmd_rej rej
;
2256 rej
.reason
= cpu_to_le16(0x0002);
2257 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2262 /* Reject if config buffer is too small. */
2263 len
= cmd_len
- sizeof(*req
);
2264 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2265 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2266 l2cap_build_conf_rsp(sk
, rsp
,
2267 L2CAP_CONF_REJECT
, flags
), rsp
);
2272 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2273 chan
->conf_len
+= len
;
2275 if (flags
& 0x0001) {
2276 /* Incomplete config. Send empty response. */
2277 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2278 l2cap_build_conf_rsp(sk
, rsp
,
2279 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2283 /* Complete config. */
2284 len
= l2cap_parse_conf_req(chan
, rsp
);
2286 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2290 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2291 chan
->num_conf_rsp
++;
2293 /* Reset config buffer. */
2296 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2299 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2300 set_default_fcs(l2cap_pi(sk
));
2302 sk
->sk_state
= BT_CONNECTED
;
2304 l2cap_pi(sk
)->next_tx_seq
= 0;
2305 l2cap_pi(sk
)->expected_tx_seq
= 0;
2306 __skb_queue_head_init(TX_QUEUE(sk
));
2307 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2308 l2cap_ertm_init(sk
);
2310 l2cap_chan_ready(sk
);
2314 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2316 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2317 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2318 l2cap_build_conf_req(chan
, buf
), buf
);
2319 chan
->num_conf_req
++;
2327 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2329 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2330 u16 scid
, flags
, result
;
2331 struct l2cap_chan
*chan
;
2333 int len
= cmd
->len
- sizeof(*rsp
);
2335 scid
= __le16_to_cpu(rsp
->scid
);
2336 flags
= __le16_to_cpu(rsp
->flags
);
2337 result
= __le16_to_cpu(rsp
->result
);
2339 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2340 scid
, flags
, result
);
2342 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2349 case L2CAP_CONF_SUCCESS
:
2350 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
2353 case L2CAP_CONF_UNACCEPT
:
2354 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2357 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2358 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2362 /* throw out any old stored conf requests */
2363 result
= L2CAP_CONF_SUCCESS
;
2364 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
2367 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2371 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2372 L2CAP_CONF_REQ
, len
, req
);
2373 chan
->num_conf_req
++;
2374 if (result
!= L2CAP_CONF_SUCCESS
)
2380 sk
->sk_err
= ECONNRESET
;
2381 l2cap_sock_set_timer(sk
, HZ
* 5);
2382 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
2389 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2391 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2392 set_default_fcs(l2cap_pi(sk
));
2394 sk
->sk_state
= BT_CONNECTED
;
2395 l2cap_pi(sk
)->next_tx_seq
= 0;
2396 l2cap_pi(sk
)->expected_tx_seq
= 0;
2397 __skb_queue_head_init(TX_QUEUE(sk
));
2398 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2399 l2cap_ertm_init(sk
);
2401 l2cap_chan_ready(sk
);
2409 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2411 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2412 struct l2cap_disconn_rsp rsp
;
2414 struct l2cap_chan
*chan
;
2417 scid
= __le16_to_cpu(req
->scid
);
2418 dcid
= __le16_to_cpu(req
->dcid
);
2420 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2422 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2428 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
2429 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2430 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2432 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2434 /* don't delete l2cap channel if sk is owned by user */
2435 if (sock_owned_by_user(sk
)) {
2436 sk
->sk_state
= BT_DISCONN
;
2437 l2cap_sock_clear_timer(sk
);
2438 l2cap_sock_set_timer(sk
, HZ
/ 5);
2443 l2cap_chan_del(chan
, ECONNRESET
);
2446 l2cap_sock_kill(sk
);
2450 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2452 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2454 struct l2cap_chan
*chan
;
2457 scid
= __le16_to_cpu(rsp
->scid
);
2458 dcid
= __le16_to_cpu(rsp
->dcid
);
2460 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2462 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2468 /* don't delete l2cap channel if sk is owned by user */
2469 if (sock_owned_by_user(sk
)) {
2470 sk
->sk_state
= BT_DISCONN
;
2471 l2cap_sock_clear_timer(sk
);
2472 l2cap_sock_set_timer(sk
, HZ
/ 5);
2477 l2cap_chan_del(chan
, 0);
2480 l2cap_sock_kill(sk
);
2484 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2486 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2489 type
= __le16_to_cpu(req
->type
);
2491 BT_DBG("type 0x%4.4x", type
);
2493 if (type
== L2CAP_IT_FEAT_MASK
) {
2495 u32 feat_mask
= l2cap_feat_mask
;
2496 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2497 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2498 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2500 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2502 put_unaligned_le32(feat_mask
, rsp
->data
);
2503 l2cap_send_cmd(conn
, cmd
->ident
,
2504 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2505 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2507 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2508 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2509 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2510 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2511 l2cap_send_cmd(conn
, cmd
->ident
,
2512 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2514 struct l2cap_info_rsp rsp
;
2515 rsp
.type
= cpu_to_le16(type
);
2516 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2517 l2cap_send_cmd(conn
, cmd
->ident
,
2518 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2524 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2526 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2529 type
= __le16_to_cpu(rsp
->type
);
2530 result
= __le16_to_cpu(rsp
->result
);
2532 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2534 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2535 if (cmd
->ident
!= conn
->info_ident
||
2536 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2539 del_timer(&conn
->info_timer
);
2541 if (result
!= L2CAP_IR_SUCCESS
) {
2542 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2543 conn
->info_ident
= 0;
2545 l2cap_conn_start(conn
);
2550 if (type
== L2CAP_IT_FEAT_MASK
) {
2551 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2553 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2554 struct l2cap_info_req req
;
2555 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2557 conn
->info_ident
= l2cap_get_ident(conn
);
2559 l2cap_send_cmd(conn
, conn
->info_ident
,
2560 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2562 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2563 conn
->info_ident
= 0;
2565 l2cap_conn_start(conn
);
2567 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2568 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2569 conn
->info_ident
= 0;
2571 l2cap_conn_start(conn
);
2577 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2582 if (min
> max
|| min
< 6 || max
> 3200)
2585 if (to_multiplier
< 10 || to_multiplier
> 3200)
2588 if (max
>= to_multiplier
* 8)
2591 max_latency
= (to_multiplier
* 8 / max
) - 1;
2592 if (latency
> 499 || latency
> max_latency
)
2598 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2599 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2601 struct hci_conn
*hcon
= conn
->hcon
;
2602 struct l2cap_conn_param_update_req
*req
;
2603 struct l2cap_conn_param_update_rsp rsp
;
2604 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
2607 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
2610 cmd_len
= __le16_to_cpu(cmd
->len
);
2611 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
2614 req
= (struct l2cap_conn_param_update_req
*) data
;
2615 min
= __le16_to_cpu(req
->min
);
2616 max
= __le16_to_cpu(req
->max
);
2617 latency
= __le16_to_cpu(req
->latency
);
2618 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
2620 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2621 min
, max
, latency
, to_multiplier
);
2623 memset(&rsp
, 0, sizeof(rsp
));
2625 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
2627 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
2629 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
2631 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
2635 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
2640 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
2641 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2645 switch (cmd
->code
) {
2646 case L2CAP_COMMAND_REJ
:
2647 l2cap_command_rej(conn
, cmd
, data
);
2650 case L2CAP_CONN_REQ
:
2651 err
= l2cap_connect_req(conn
, cmd
, data
);
2654 case L2CAP_CONN_RSP
:
2655 err
= l2cap_connect_rsp(conn
, cmd
, data
);
2658 case L2CAP_CONF_REQ
:
2659 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
2662 case L2CAP_CONF_RSP
:
2663 err
= l2cap_config_rsp(conn
, cmd
, data
);
2666 case L2CAP_DISCONN_REQ
:
2667 err
= l2cap_disconnect_req(conn
, cmd
, data
);
2670 case L2CAP_DISCONN_RSP
:
2671 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
2674 case L2CAP_ECHO_REQ
:
2675 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
2678 case L2CAP_ECHO_RSP
:
2681 case L2CAP_INFO_REQ
:
2682 err
= l2cap_information_req(conn
, cmd
, data
);
2685 case L2CAP_INFO_RSP
:
2686 err
= l2cap_information_rsp(conn
, cmd
, data
);
2690 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
2698 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
2699 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2701 switch (cmd
->code
) {
2702 case L2CAP_COMMAND_REJ
:
2705 case L2CAP_CONN_PARAM_UPDATE_REQ
:
2706 return l2cap_conn_param_update_req(conn
, cmd
, data
);
2708 case L2CAP_CONN_PARAM_UPDATE_RSP
:
2712 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
2717 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
2718 struct sk_buff
*skb
)
2720 u8
*data
= skb
->data
;
2722 struct l2cap_cmd_hdr cmd
;
2725 l2cap_raw_recv(conn
, skb
);
2727 while (len
>= L2CAP_CMD_HDR_SIZE
) {
2729 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
2730 data
+= L2CAP_CMD_HDR_SIZE
;
2731 len
-= L2CAP_CMD_HDR_SIZE
;
2733 cmd_len
= le16_to_cpu(cmd
.len
);
2735 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
2737 if (cmd_len
> len
|| !cmd
.ident
) {
2738 BT_DBG("corrupted command");
2742 if (conn
->hcon
->type
== LE_LINK
)
2743 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
2745 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
2748 struct l2cap_cmd_rej rej
;
2750 BT_ERR("Wrong link type (%d)", err
);
2752 /* FIXME: Map err to a valid reason */
2753 rej
.reason
= cpu_to_le16(0);
2754 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
2764 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
2766 u16 our_fcs
, rcv_fcs
;
2767 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
2769 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
2770 skb_trim(skb
, skb
->len
- 2);
2771 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
2772 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
2774 if (our_fcs
!= rcv_fcs
)
2780 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
2782 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2785 pi
->frames_sent
= 0;
2787 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2789 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
2790 control
|= L2CAP_SUPER_RCV_NOT_READY
;
2791 l2cap_send_sframe(pi
, control
);
2792 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
2795 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
2796 l2cap_retransmit_frames(sk
);
2798 l2cap_ertm_send(sk
);
2800 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
2801 pi
->frames_sent
== 0) {
2802 control
|= L2CAP_SUPER_RCV_READY
;
2803 l2cap_send_sframe(pi
, control
);
2807 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
2809 struct sk_buff
*next_skb
;
2810 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2811 int tx_seq_offset
, next_tx_seq_offset
;
2813 bt_cb(skb
)->tx_seq
= tx_seq
;
2814 bt_cb(skb
)->sar
= sar
;
2816 next_skb
= skb_peek(SREJ_QUEUE(sk
));
2818 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
2822 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
2823 if (tx_seq_offset
< 0)
2824 tx_seq_offset
+= 64;
2827 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
2830 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
2831 pi
->buffer_seq
) % 64;
2832 if (next_tx_seq_offset
< 0)
2833 next_tx_seq_offset
+= 64;
2835 if (next_tx_seq_offset
> tx_seq_offset
) {
2836 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
2840 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
2843 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
2845 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
2850 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
2852 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2853 struct sk_buff
*_skb
;
2856 switch (control
& L2CAP_CTRL_SAR
) {
2857 case L2CAP_SDU_UNSEGMENTED
:
2858 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
2861 err
= sock_queue_rcv_skb(sk
, skb
);
2867 case L2CAP_SDU_START
:
2868 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
2871 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
2873 if (pi
->sdu_len
> pi
->imtu
)
2876 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
2880 /* pull sdu_len bytes only after alloc, because of Local Busy
2881 * condition we have to be sure that this will be executed
2882 * only once, i.e., when alloc does not fail */
2885 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2887 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
2888 pi
->partial_sdu_len
= skb
->len
;
2891 case L2CAP_SDU_CONTINUE
:
2892 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
2898 pi
->partial_sdu_len
+= skb
->len
;
2899 if (pi
->partial_sdu_len
> pi
->sdu_len
)
2902 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2907 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
2913 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
2914 pi
->partial_sdu_len
+= skb
->len
;
2916 if (pi
->partial_sdu_len
> pi
->imtu
)
2919 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
2922 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
2925 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
2927 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2931 err
= sock_queue_rcv_skb(sk
, _skb
);
2934 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
2938 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
2939 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
2953 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
2958 static int l2cap_try_push_rx_skb(struct sock
*sk
)
2960 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2961 struct sk_buff
*skb
;
2965 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
2966 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
2967 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
2969 skb_queue_head(BUSY_QUEUE(sk
), skb
);
2973 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
2976 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
2979 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
2980 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
2981 l2cap_send_sframe(pi
, control
);
2982 l2cap_pi(sk
)->retry_count
= 1;
2984 del_timer(&pi
->retrans_timer
);
2985 __mod_monitor_timer();
2987 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
2990 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
2991 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
2993 BT_DBG("sk %p, Exit local busy", sk
);
2998 static void l2cap_busy_work(struct work_struct
*work
)
3000 DECLARE_WAITQUEUE(wait
, current
);
3001 struct l2cap_pinfo
*pi
=
3002 container_of(work
, struct l2cap_pinfo
, busy_work
);
3003 struct sock
*sk
= (struct sock
*)pi
;
3004 int n_tries
= 0, timeo
= HZ
/5, err
;
3005 struct sk_buff
*skb
;
3009 add_wait_queue(sk_sleep(sk
), &wait
);
3010 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3011 set_current_state(TASK_INTERRUPTIBLE
);
3013 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3015 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3022 if (signal_pending(current
)) {
3023 err
= sock_intr_errno(timeo
);
3028 timeo
= schedule_timeout(timeo
);
3031 err
= sock_error(sk
);
3035 if (l2cap_try_push_rx_skb(sk
) == 0)
3039 set_current_state(TASK_RUNNING
);
3040 remove_wait_queue(sk_sleep(sk
), &wait
);
3045 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3047 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3050 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3051 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3052 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3053 return l2cap_try_push_rx_skb(sk
);
3058 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3060 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3064 /* Busy Condition */
3065 BT_DBG("sk %p, Enter local busy", sk
);
3067 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3068 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3069 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3071 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3072 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3073 l2cap_send_sframe(pi
, sctrl
);
3075 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3077 del_timer(&pi
->ack_timer
);
3079 queue_work(_busy_wq
, &pi
->busy_work
);
3084 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3086 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3087 struct sk_buff
*_skb
;
3091 * TODO: We have to notify the userland if some data is lost with the
3095 switch (control
& L2CAP_CTRL_SAR
) {
3096 case L2CAP_SDU_UNSEGMENTED
:
3097 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3102 err
= sock_queue_rcv_skb(sk
, skb
);
3108 case L2CAP_SDU_START
:
3109 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3114 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3117 if (pi
->sdu_len
> pi
->imtu
) {
3122 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3128 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3130 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3131 pi
->partial_sdu_len
= skb
->len
;
3135 case L2CAP_SDU_CONTINUE
:
3136 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3139 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3141 pi
->partial_sdu_len
+= skb
->len
;
3142 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3150 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3153 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3155 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3156 pi
->partial_sdu_len
+= skb
->len
;
3158 if (pi
->partial_sdu_len
> pi
->imtu
)
3161 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3162 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3163 err
= sock_queue_rcv_skb(sk
, _skb
);
3178 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3180 struct sk_buff
*skb
;
3183 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3184 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3187 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3188 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3189 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3190 l2cap_pi(sk
)->buffer_seq_srej
=
3191 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3192 tx_seq
= (tx_seq
+ 1) % 64;
3196 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3198 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3199 struct srej_list
*l
, *tmp
;
3202 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3203 if (l
->tx_seq
== tx_seq
) {
3208 control
= L2CAP_SUPER_SELECT_REJECT
;
3209 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3210 l2cap_send_sframe(pi
, control
);
3212 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3216 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3218 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3219 struct srej_list
*new;
3222 while (tx_seq
!= pi
->expected_tx_seq
) {
3223 control
= L2CAP_SUPER_SELECT_REJECT
;
3224 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3225 l2cap_send_sframe(pi
, control
);
3227 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3228 new->tx_seq
= pi
->expected_tx_seq
;
3229 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3230 list_add_tail(&new->list
, SREJ_LIST(sk
));
3232 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3235 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3237 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3238 u8 tx_seq
= __get_txseq(rx_control
);
3239 u8 req_seq
= __get_reqseq(rx_control
);
3240 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3241 int tx_seq_offset
, expected_tx_seq_offset
;
3242 int num_to_ack
= (pi
->tx_win
/6) + 1;
3245 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3248 if (L2CAP_CTRL_FINAL
& rx_control
&&
3249 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3250 del_timer(&pi
->monitor_timer
);
3251 if (pi
->unacked_frames
> 0)
3252 __mod_retrans_timer();
3253 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3256 pi
->expected_ack_seq
= req_seq
;
3257 l2cap_drop_acked_frames(sk
);
3259 if (tx_seq
== pi
->expected_tx_seq
)
3262 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3263 if (tx_seq_offset
< 0)
3264 tx_seq_offset
+= 64;
3266 /* invalid tx_seq */
3267 if (tx_seq_offset
>= pi
->tx_win
) {
3268 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3272 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3275 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3276 struct srej_list
*first
;
3278 first
= list_first_entry(SREJ_LIST(sk
),
3279 struct srej_list
, list
);
3280 if (tx_seq
== first
->tx_seq
) {
3281 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3282 l2cap_check_srej_gap(sk
, tx_seq
);
3284 list_del(&first
->list
);
3287 if (list_empty(SREJ_LIST(sk
))) {
3288 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3289 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3291 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
3294 struct srej_list
*l
;
3296 /* duplicated tx_seq */
3297 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3300 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3301 if (l
->tx_seq
== tx_seq
) {
3302 l2cap_resend_srejframe(sk
, tx_seq
);
3306 l2cap_send_srejframe(sk
, tx_seq
);
3309 expected_tx_seq_offset
=
3310 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3311 if (expected_tx_seq_offset
< 0)
3312 expected_tx_seq_offset
+= 64;
3314 /* duplicated tx_seq */
3315 if (tx_seq_offset
< expected_tx_seq_offset
)
3318 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3320 BT_DBG("sk %p, Enter SREJ", sk
);
3322 INIT_LIST_HEAD(SREJ_LIST(sk
));
3323 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3325 __skb_queue_head_init(SREJ_QUEUE(sk
));
3326 __skb_queue_head_init(BUSY_QUEUE(sk
));
3327 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3329 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3331 l2cap_send_srejframe(sk
, tx_seq
);
3333 del_timer(&pi
->ack_timer
);
3338 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3340 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3341 bt_cb(skb
)->tx_seq
= tx_seq
;
3342 bt_cb(skb
)->sar
= sar
;
3343 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3347 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
3351 if (rx_control
& L2CAP_CTRL_FINAL
) {
3352 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3353 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3355 l2cap_retransmit_frames(sk
);
3360 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
3361 if (pi
->num_acked
== num_to_ack
- 1)
3371 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
3373 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3375 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
3378 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
3379 l2cap_drop_acked_frames(sk
);
3381 if (rx_control
& L2CAP_CTRL_POLL
) {
3382 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3383 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3384 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3385 (pi
->unacked_frames
> 0))
3386 __mod_retrans_timer();
3388 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3389 l2cap_send_srejtail(sk
);
3391 l2cap_send_i_or_rr_or_rnr(sk
);
3394 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3395 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3397 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3398 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3400 l2cap_retransmit_frames(sk
);
3403 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3404 (pi
->unacked_frames
> 0))
3405 __mod_retrans_timer();
3407 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3408 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)
3411 l2cap_ertm_send(sk
);
3415 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
3417 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3418 u8 tx_seq
= __get_reqseq(rx_control
);
3420 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3422 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3424 pi
->expected_ack_seq
= tx_seq
;
3425 l2cap_drop_acked_frames(sk
);
3427 if (rx_control
& L2CAP_CTRL_FINAL
) {
3428 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3429 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3431 l2cap_retransmit_frames(sk
);
3433 l2cap_retransmit_frames(sk
);
3435 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
3436 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3439 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
3441 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3442 u8 tx_seq
= __get_reqseq(rx_control
);
3444 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3446 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3448 if (rx_control
& L2CAP_CTRL_POLL
) {
3449 pi
->expected_ack_seq
= tx_seq
;
3450 l2cap_drop_acked_frames(sk
);
3452 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3453 l2cap_retransmit_one_frame(sk
, tx_seq
);
3455 l2cap_ertm_send(sk
);
3457 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3458 pi
->srej_save_reqseq
= tx_seq
;
3459 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3461 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3462 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3463 pi
->srej_save_reqseq
== tx_seq
)
3464 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3466 l2cap_retransmit_one_frame(sk
, tx_seq
);
3468 l2cap_retransmit_one_frame(sk
, tx_seq
);
3469 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
3470 pi
->srej_save_reqseq
= tx_seq
;
3471 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3476 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
3478 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3479 u8 tx_seq
= __get_reqseq(rx_control
);
3481 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
3483 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3484 pi
->expected_ack_seq
= tx_seq
;
3485 l2cap_drop_acked_frames(sk
);
3487 if (rx_control
& L2CAP_CTRL_POLL
)
3488 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3490 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
3491 del_timer(&pi
->retrans_timer
);
3492 if (rx_control
& L2CAP_CTRL_POLL
)
3493 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
3497 if (rx_control
& L2CAP_CTRL_POLL
)
3498 l2cap_send_srejtail(sk
);
3500 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
3503 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3505 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
3507 if (L2CAP_CTRL_FINAL
& rx_control
&&
3508 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3509 del_timer(&l2cap_pi(sk
)->monitor_timer
);
3510 if (l2cap_pi(sk
)->unacked_frames
> 0)
3511 __mod_retrans_timer();
3512 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3515 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3516 case L2CAP_SUPER_RCV_READY
:
3517 l2cap_data_channel_rrframe(sk
, rx_control
);
3520 case L2CAP_SUPER_REJECT
:
3521 l2cap_data_channel_rejframe(sk
, rx_control
);
3524 case L2CAP_SUPER_SELECT_REJECT
:
3525 l2cap_data_channel_srejframe(sk
, rx_control
);
3528 case L2CAP_SUPER_RCV_NOT_READY
:
3529 l2cap_data_channel_rnrframe(sk
, rx_control
);
3537 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3539 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3542 int len
, next_tx_seq_offset
, req_seq_offset
;
3544 control
= get_unaligned_le16(skb
->data
);
3549 * We can just drop the corrupted I-frame here.
3550 * Receiver will miss it and start proper recovery
3551 * procedures and ask retransmission.
3553 if (l2cap_check_fcs(pi
, skb
))
3556 if (__is_sar_start(control
) && __is_iframe(control
))
3559 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3562 if (len
> pi
->mps
) {
3563 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3567 req_seq
= __get_reqseq(control
);
3568 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
3569 if (req_seq_offset
< 0)
3570 req_seq_offset
+= 64;
3572 next_tx_seq_offset
=
3573 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
3574 if (next_tx_seq_offset
< 0)
3575 next_tx_seq_offset
+= 64;
3577 /* check for invalid req-seq */
3578 if (req_seq_offset
> next_tx_seq_offset
) {
3579 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3583 if (__is_iframe(control
)) {
3585 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3589 l2cap_data_channel_iframe(sk
, control
, skb
);
3593 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3597 l2cap_data_channel_sframe(sk
, control
, skb
);
3607 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3609 struct l2cap_chan
*chan
;
3611 struct l2cap_pinfo
*pi
;
3616 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3618 BT_DBG("unknown cid 0x%4.4x", cid
);
3625 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3627 if (sk
->sk_state
!= BT_CONNECTED
)
3631 case L2CAP_MODE_BASIC
:
3632 /* If socket recv buffers overflows we drop data here
3633 * which is *bad* because L2CAP has to be reliable.
3634 * But we don't have any other choice. L2CAP doesn't
3635 * provide flow control mechanism. */
3637 if (pi
->imtu
< skb
->len
)
3640 if (!sock_queue_rcv_skb(sk
, skb
))
3644 case L2CAP_MODE_ERTM
:
3645 if (!sock_owned_by_user(sk
)) {
3646 l2cap_ertm_data_rcv(sk
, skb
);
3648 if (sk_add_backlog(sk
, skb
))
3654 case L2CAP_MODE_STREAMING
:
3655 control
= get_unaligned_le16(skb
->data
);
3659 if (l2cap_check_fcs(pi
, skb
))
3662 if (__is_sar_start(control
))
3665 if (pi
->fcs
== L2CAP_FCS_CRC16
)
3668 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
3671 tx_seq
= __get_txseq(control
);
3673 if (pi
->expected_tx_seq
== tx_seq
)
3674 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3676 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3678 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
3683 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
3697 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3701 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
3707 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3709 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
3712 if (l2cap_pi(sk
)->imtu
< skb
->len
)
3715 if (!sock_queue_rcv_skb(sk
, skb
))
3727 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3729 struct l2cap_hdr
*lh
= (void *) skb
->data
;
3733 skb_pull(skb
, L2CAP_HDR_SIZE
);
3734 cid
= __le16_to_cpu(lh
->cid
);
3735 len
= __le16_to_cpu(lh
->len
);
3737 if (len
!= skb
->len
) {
3742 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
3745 case L2CAP_CID_LE_SIGNALING
:
3746 case L2CAP_CID_SIGNALING
:
3747 l2cap_sig_channel(conn
, skb
);
3750 case L2CAP_CID_CONN_LESS
:
3751 psm
= get_unaligned_le16(skb
->data
);
3753 l2cap_conless_channel(conn
, psm
, skb
);
3757 l2cap_data_channel(conn
, cid
, skb
);
3762 /* ---- L2CAP interface with lower layer (HCI) ---- */
3764 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
3766 int exact
= 0, lm1
= 0, lm2
= 0;
3767 register struct sock
*sk
;
3768 struct hlist_node
*node
;
3770 if (type
!= ACL_LINK
)
3773 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
3775 /* Find listening sockets and check their link_mode */
3776 read_lock(&l2cap_sk_list
.lock
);
3777 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
3778 if (sk
->sk_state
!= BT_LISTEN
)
3781 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
3782 lm1
|= HCI_LM_ACCEPT
;
3783 if (l2cap_pi(sk
)->role_switch
)
3784 lm1
|= HCI_LM_MASTER
;
3786 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
3787 lm2
|= HCI_LM_ACCEPT
;
3788 if (l2cap_pi(sk
)->role_switch
)
3789 lm2
|= HCI_LM_MASTER
;
3792 read_unlock(&l2cap_sk_list
.lock
);
3794 return exact
? lm1
: lm2
;
3797 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
3799 struct l2cap_conn
*conn
;
3801 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
3803 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3807 conn
= l2cap_conn_add(hcon
, status
);
3809 l2cap_conn_ready(conn
);
3811 l2cap_conn_del(hcon
, bt_err(status
));
3816 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
3818 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3820 BT_DBG("hcon %p", hcon
);
3822 if (hcon
->type
!= ACL_LINK
|| !conn
)
3825 return conn
->disc_reason
;
3828 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
3830 BT_DBG("hcon %p reason %d", hcon
, reason
);
3832 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
3835 l2cap_conn_del(hcon
, bt_err(reason
));
3840 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
3842 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
3845 if (encrypt
== 0x00) {
3846 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
3847 l2cap_sock_clear_timer(sk
);
3848 l2cap_sock_set_timer(sk
, HZ
* 5);
3849 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
3850 __l2cap_sock_close(sk
, ECONNREFUSED
);
3852 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
3853 l2cap_sock_clear_timer(sk
);
3857 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
3859 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3860 struct l2cap_chan
*chan
;
3865 BT_DBG("conn %p", conn
);
3867 read_lock(&conn
->chan_lock
);
3869 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
3870 struct sock
*sk
= chan
->sk
;
3874 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
3879 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
3880 sk
->sk_state
== BT_CONFIG
)) {
3881 l2cap_check_encryption(sk
, encrypt
);
3886 if (sk
->sk_state
== BT_CONNECT
) {
3888 struct l2cap_conn_req req
;
3889 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3890 req
.psm
= l2cap_pi(sk
)->psm
;
3892 chan
->ident
= l2cap_get_ident(conn
);
3893 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3895 l2cap_send_cmd(conn
, chan
->ident
,
3896 L2CAP_CONN_REQ
, sizeof(req
), &req
);
3898 l2cap_sock_clear_timer(sk
);
3899 l2cap_sock_set_timer(sk
, HZ
/ 10);
3901 } else if (sk
->sk_state
== BT_CONNECT2
) {
3902 struct l2cap_conn_rsp rsp
;
3906 sk
->sk_state
= BT_CONFIG
;
3907 result
= L2CAP_CR_SUCCESS
;
3909 sk
->sk_state
= BT_DISCONN
;
3910 l2cap_sock_set_timer(sk
, HZ
/ 10);
3911 result
= L2CAP_CR_SEC_BLOCK
;
3914 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3915 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3916 rsp
.result
= cpu_to_le16(result
);
3917 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3918 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
3925 read_unlock(&conn
->chan_lock
);
3930 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
3932 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
3935 conn
= l2cap_conn_add(hcon
, 0);
3940 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
3942 if (!(flags
& ACL_CONT
)) {
3943 struct l2cap_hdr
*hdr
;
3944 struct l2cap_chan
*chan
;
3949 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
3950 kfree_skb(conn
->rx_skb
);
3951 conn
->rx_skb
= NULL
;
3953 l2cap_conn_unreliable(conn
, ECOMM
);
3956 /* Start fragment always begin with Basic L2CAP header */
3957 if (skb
->len
< L2CAP_HDR_SIZE
) {
3958 BT_ERR("Frame is too short (len %d)", skb
->len
);
3959 l2cap_conn_unreliable(conn
, ECOMM
);
3963 hdr
= (struct l2cap_hdr
*) skb
->data
;
3964 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
3965 cid
= __le16_to_cpu(hdr
->cid
);
3967 if (len
== skb
->len
) {
3968 /* Complete frame received */
3969 l2cap_recv_frame(conn
, skb
);
3973 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
3975 if (skb
->len
> len
) {
3976 BT_ERR("Frame is too long (len %d, expected len %d)",
3978 l2cap_conn_unreliable(conn
, ECOMM
);
3982 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3984 if (chan
&& chan
->sk
) {
3985 struct sock
*sk
= chan
->sk
;
3987 if (l2cap_pi(sk
)->imtu
< len
- L2CAP_HDR_SIZE
) {
3988 BT_ERR("Frame exceeding recv MTU (len %d, "
3990 l2cap_pi(sk
)->imtu
);
3992 l2cap_conn_unreliable(conn
, ECOMM
);
3998 /* Allocate skb for the complete frame (with header) */
3999 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4003 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4005 conn
->rx_len
= len
- skb
->len
;
4007 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4009 if (!conn
->rx_len
) {
4010 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4011 l2cap_conn_unreliable(conn
, ECOMM
);
4015 if (skb
->len
> conn
->rx_len
) {
4016 BT_ERR("Fragment is too long (len %d, expected %d)",
4017 skb
->len
, conn
->rx_len
);
4018 kfree_skb(conn
->rx_skb
);
4019 conn
->rx_skb
= NULL
;
4021 l2cap_conn_unreliable(conn
, ECOMM
);
4025 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4027 conn
->rx_len
-= skb
->len
;
4029 if (!conn
->rx_len
) {
4030 /* Complete frame received */
4031 l2cap_recv_frame(conn
, conn
->rx_skb
);
4032 conn
->rx_skb
= NULL
;
4041 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4044 struct hlist_node
*node
;
4046 read_lock_bh(&l2cap_sk_list
.lock
);
4048 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4049 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4051 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4052 batostr(&bt_sk(sk
)->src
),
4053 batostr(&bt_sk(sk
)->dst
),
4054 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4056 pi
->imtu
, pi
->omtu
, pi
->sec_level
,
4060 read_unlock_bh(&l2cap_sk_list
.lock
);
4065 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4067 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4070 static const struct file_operations l2cap_debugfs_fops
= {
4071 .open
= l2cap_debugfs_open
,
4073 .llseek
= seq_lseek
,
4074 .release
= single_release
,
4077 static struct dentry
*l2cap_debugfs
;
4079 static struct hci_proto l2cap_hci_proto
= {
4081 .id
= HCI_PROTO_L2CAP
,
4082 .connect_ind
= l2cap_connect_ind
,
4083 .connect_cfm
= l2cap_connect_cfm
,
4084 .disconn_ind
= l2cap_disconn_ind
,
4085 .disconn_cfm
= l2cap_disconn_cfm
,
4086 .security_cfm
= l2cap_security_cfm
,
4087 .recv_acldata
= l2cap_recv_acldata
4090 int __init
l2cap_init(void)
4094 err
= l2cap_init_sockets();
4098 _busy_wq
= create_singlethread_workqueue("l2cap");
4104 err
= hci_register_proto(&l2cap_hci_proto
);
4106 BT_ERR("L2CAP protocol registration failed");
4107 bt_sock_unregister(BTPROTO_L2CAP
);
4112 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4113 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4115 BT_ERR("Failed to create L2CAP debug file");
4121 destroy_workqueue(_busy_wq
);
4122 l2cap_cleanup_sockets();
4126 void l2cap_exit(void)
4128 debugfs_remove(l2cap_debugfs
);
4130 flush_workqueue(_busy_wq
);
4131 destroy_workqueue(_busy_wq
);
4133 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4134 BT_ERR("L2CAP protocol unregistration failed");
4136 l2cap_cleanup_sockets();
4139 module_param(disable_ertm
, bool, 0644);
4140 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");