2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
80 struct l2cap_chan
*c
, *r
= NULL
;
84 list_for_each_entry_rcu(c
, &conn
->chan_l
, list
) {
95 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
97 struct l2cap_chan
*c
, *r
= NULL
;
101 list_for_each_entry_rcu(c
, &conn
->chan_l
, list
) {
102 if (c
->scid
== cid
) {
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
116 struct l2cap_chan
*c
;
118 c
= __l2cap_get_chan_by_scid(conn
, cid
);
124 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
126 struct l2cap_chan
*c
, *r
= NULL
;
130 list_for_each_entry_rcu(c
, &conn
->chan_l
, list
) {
131 if (c
->ident
== ident
) {
141 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
143 struct l2cap_chan
*c
;
145 c
= __l2cap_get_chan_by_ident(conn
, ident
);
151 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
153 struct l2cap_chan
*c
;
155 list_for_each_entry(c
, &chan_list
, global_l
) {
156 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
162 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
166 write_lock(&chan_list_lock
);
168 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
181 for (p
= 0x1001; p
< 0x1100; p
+= 2)
182 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
183 chan
->psm
= cpu_to_le16(p
);
184 chan
->sport
= cpu_to_le16(p
);
191 write_unlock(&chan_list_lock
);
195 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
197 write_lock(&chan_list_lock
);
201 write_unlock(&chan_list_lock
);
206 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
208 u16 cid
= L2CAP_CID_DYN_START
;
210 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
211 if (!__l2cap_get_chan_by_scid(conn
, cid
))
218 static char *state_to_string(int state
)
222 return "BT_CONNECTED";
232 return "BT_CONNECT2";
241 return "invalid state";
244 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
246 BT_DBG("%p %s -> %s", chan
, state_to_string(chan
->state
),
247 state_to_string(state
));
250 chan
->ops
->state_change(chan
->data
, state
);
253 static void l2cap_chan_timeout(struct work_struct
*work
)
255 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
257 struct sock
*sk
= chan
->sk
;
260 BT_DBG("chan %p state %d", chan
, chan
->state
);
264 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
265 reason
= ECONNREFUSED
;
266 else if (chan
->state
== BT_CONNECT
&&
267 chan
->sec_level
!= BT_SECURITY_SDP
)
268 reason
= ECONNREFUSED
;
272 l2cap_chan_close(chan
, reason
);
276 chan
->ops
->close(chan
->data
);
277 l2cap_chan_put(chan
);
280 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
282 struct l2cap_chan
*chan
;
284 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
290 write_lock(&chan_list_lock
);
291 list_add(&chan
->global_l
, &chan_list
);
292 write_unlock(&chan_list_lock
);
294 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
296 chan
->state
= BT_OPEN
;
298 atomic_set(&chan
->refcnt
, 1);
300 BT_DBG("sk %p chan %p", sk
, chan
);
305 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
307 write_lock(&chan_list_lock
);
308 list_del(&chan
->global_l
);
309 write_unlock(&chan_list_lock
);
311 l2cap_chan_put(chan
);
314 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
316 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
317 chan
->psm
, chan
->dcid
);
319 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
323 switch (chan
->chan_type
) {
324 case L2CAP_CHAN_CONN_ORIENTED
:
325 if (conn
->hcon
->type
== LE_LINK
) {
327 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
328 chan
->scid
= L2CAP_CID_LE_DATA
;
329 chan
->dcid
= L2CAP_CID_LE_DATA
;
331 /* Alloc CID for connection-oriented socket */
332 chan
->scid
= l2cap_alloc_cid(conn
);
333 chan
->omtu
= L2CAP_DEFAULT_MTU
;
337 case L2CAP_CHAN_CONN_LESS
:
338 /* Connectionless socket */
339 chan
->scid
= L2CAP_CID_CONN_LESS
;
340 chan
->dcid
= L2CAP_CID_CONN_LESS
;
341 chan
->omtu
= L2CAP_DEFAULT_MTU
;
345 /* Raw socket can send/recv signalling messages only */
346 chan
->scid
= L2CAP_CID_SIGNALING
;
347 chan
->dcid
= L2CAP_CID_SIGNALING
;
348 chan
->omtu
= L2CAP_DEFAULT_MTU
;
351 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
352 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
353 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
354 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
355 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
356 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
358 l2cap_chan_hold(chan
);
360 list_add_rcu(&chan
->list
, &conn
->chan_l
);
364 * Must be called on the locked socket. */
365 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
367 struct sock
*sk
= chan
->sk
;
368 struct l2cap_conn
*conn
= chan
->conn
;
369 struct sock
*parent
= bt_sk(sk
)->parent
;
371 __clear_chan_timer(chan
);
373 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
376 /* Delete from channel list */
377 list_del_rcu(&chan
->list
);
380 l2cap_chan_put(chan
);
383 hci_conn_put(conn
->hcon
);
386 l2cap_state_change(chan
, BT_CLOSED
);
387 sock_set_flag(sk
, SOCK_ZAPPED
);
393 bt_accept_unlink(sk
);
394 parent
->sk_data_ready(parent
, 0);
396 sk
->sk_state_change(sk
);
398 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
399 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
402 skb_queue_purge(&chan
->tx_q
);
404 if (chan
->mode
== L2CAP_MODE_ERTM
) {
405 struct srej_list
*l
, *tmp
;
407 __clear_retrans_timer(chan
);
408 __clear_monitor_timer(chan
);
409 __clear_ack_timer(chan
);
411 skb_queue_purge(&chan
->srej_q
);
413 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
420 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
424 BT_DBG("parent %p", parent
);
426 /* Close not yet accepted channels */
427 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
428 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
429 __clear_chan_timer(chan
);
431 l2cap_chan_close(chan
, ECONNRESET
);
433 chan
->ops
->close(chan
->data
);
437 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
439 struct l2cap_conn
*conn
= chan
->conn
;
440 struct sock
*sk
= chan
->sk
;
442 BT_DBG("chan %p state %d socket %p", chan
, chan
->state
, sk
->sk_socket
);
444 switch (chan
->state
) {
446 l2cap_chan_cleanup_listen(sk
);
448 l2cap_state_change(chan
, BT_CLOSED
);
449 sock_set_flag(sk
, SOCK_ZAPPED
);
454 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
455 conn
->hcon
->type
== ACL_LINK
) {
456 __clear_chan_timer(chan
);
457 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
458 l2cap_send_disconn_req(conn
, chan
, reason
);
460 l2cap_chan_del(chan
, reason
);
464 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
465 conn
->hcon
->type
== ACL_LINK
) {
466 struct l2cap_conn_rsp rsp
;
469 if (bt_sk(sk
)->defer_setup
)
470 result
= L2CAP_CR_SEC_BLOCK
;
472 result
= L2CAP_CR_BAD_PSM
;
473 l2cap_state_change(chan
, BT_DISCONN
);
475 rsp
.scid
= cpu_to_le16(chan
->dcid
);
476 rsp
.dcid
= cpu_to_le16(chan
->scid
);
477 rsp
.result
= cpu_to_le16(result
);
478 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
479 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
483 l2cap_chan_del(chan
, reason
);
488 l2cap_chan_del(chan
, reason
);
492 sock_set_flag(sk
, SOCK_ZAPPED
);
497 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
499 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
500 switch (chan
->sec_level
) {
501 case BT_SECURITY_HIGH
:
502 return HCI_AT_DEDICATED_BONDING_MITM
;
503 case BT_SECURITY_MEDIUM
:
504 return HCI_AT_DEDICATED_BONDING
;
506 return HCI_AT_NO_BONDING
;
508 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
509 if (chan
->sec_level
== BT_SECURITY_LOW
)
510 chan
->sec_level
= BT_SECURITY_SDP
;
512 if (chan
->sec_level
== BT_SECURITY_HIGH
)
513 return HCI_AT_NO_BONDING_MITM
;
515 return HCI_AT_NO_BONDING
;
517 switch (chan
->sec_level
) {
518 case BT_SECURITY_HIGH
:
519 return HCI_AT_GENERAL_BONDING_MITM
;
520 case BT_SECURITY_MEDIUM
:
521 return HCI_AT_GENERAL_BONDING
;
523 return HCI_AT_NO_BONDING
;
528 /* Service level security */
529 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
531 struct l2cap_conn
*conn
= chan
->conn
;
534 auth_type
= l2cap_get_auth_type(chan
);
536 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
539 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
543 /* Get next available identificator.
544 * 1 - 128 are used by kernel.
545 * 129 - 199 are reserved.
546 * 200 - 254 are used by utilities like l2ping, etc.
549 spin_lock(&conn
->lock
);
551 if (++conn
->tx_ident
> 128)
556 spin_unlock(&conn
->lock
);
561 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
563 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
566 BT_DBG("code 0x%2.2x", code
);
571 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
572 flags
= ACL_START_NO_FLUSH
;
576 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
577 skb
->priority
= HCI_PRIO_MAX
;
579 hci_send_acl(conn
->hchan
, skb
, flags
);
582 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
584 struct hci_conn
*hcon
= chan
->conn
->hcon
;
587 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
590 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
591 lmp_no_flush_capable(hcon
->hdev
))
592 flags
= ACL_START_NO_FLUSH
;
596 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
597 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
600 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u32 control
)
603 struct l2cap_hdr
*lh
;
604 struct l2cap_conn
*conn
= chan
->conn
;
607 if (chan
->state
!= BT_CONNECTED
)
610 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
611 hlen
= L2CAP_EXT_HDR_SIZE
;
613 hlen
= L2CAP_ENH_HDR_SIZE
;
615 if (chan
->fcs
== L2CAP_FCS_CRC16
)
616 hlen
+= L2CAP_FCS_SIZE
;
618 BT_DBG("chan %p, control 0x%8.8x", chan
, control
);
620 count
= min_t(unsigned int, conn
->mtu
, hlen
);
622 control
|= __set_sframe(chan
);
624 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
625 control
|= __set_ctrl_final(chan
);
627 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
628 control
|= __set_ctrl_poll(chan
);
630 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
634 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
635 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
636 lh
->cid
= cpu_to_le16(chan
->dcid
);
638 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
640 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
641 u16 fcs
= crc16(0, (u8
*)lh
, count
- L2CAP_FCS_SIZE
);
642 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
645 skb
->priority
= HCI_PRIO_MAX
;
646 l2cap_do_send(chan
, skb
);
649 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u32 control
)
651 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
652 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
653 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
655 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
657 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
659 l2cap_send_sframe(chan
, control
);
662 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
664 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
667 static void l2cap_do_start(struct l2cap_chan
*chan
)
669 struct l2cap_conn
*conn
= chan
->conn
;
671 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
672 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
675 if (l2cap_chan_check_security(chan
) &&
676 __l2cap_no_conn_pending(chan
)) {
677 struct l2cap_conn_req req
;
678 req
.scid
= cpu_to_le16(chan
->scid
);
681 chan
->ident
= l2cap_get_ident(conn
);
682 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
684 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
688 struct l2cap_info_req req
;
689 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
691 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
692 conn
->info_ident
= l2cap_get_ident(conn
);
694 schedule_delayed_work(&conn
->info_timer
,
695 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
697 l2cap_send_cmd(conn
, conn
->info_ident
,
698 L2CAP_INFO_REQ
, sizeof(req
), &req
);
702 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
704 u32 local_feat_mask
= l2cap_feat_mask
;
706 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
709 case L2CAP_MODE_ERTM
:
710 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
711 case L2CAP_MODE_STREAMING
:
712 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
718 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
721 struct l2cap_disconn_req req
;
728 if (chan
->mode
== L2CAP_MODE_ERTM
) {
729 __clear_retrans_timer(chan
);
730 __clear_monitor_timer(chan
);
731 __clear_ack_timer(chan
);
734 req
.dcid
= cpu_to_le16(chan
->dcid
);
735 req
.scid
= cpu_to_le16(chan
->scid
);
736 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
737 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
739 l2cap_state_change(chan
, BT_DISCONN
);
743 /* ---- L2CAP connections ---- */
744 static void l2cap_conn_start(struct l2cap_conn
*conn
)
746 struct l2cap_chan
*chan
;
748 BT_DBG("conn %p", conn
);
752 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
753 struct sock
*sk
= chan
->sk
;
757 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
762 if (chan
->state
== BT_CONNECT
) {
763 struct l2cap_conn_req req
;
765 if (!l2cap_chan_check_security(chan
) ||
766 !__l2cap_no_conn_pending(chan
)) {
771 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
772 && test_bit(CONF_STATE2_DEVICE
,
773 &chan
->conf_state
)) {
774 /* l2cap_chan_close() calls list_del(chan)
775 * so release the lock */
776 l2cap_chan_close(chan
, ECONNRESET
);
781 req
.scid
= cpu_to_le16(chan
->scid
);
784 chan
->ident
= l2cap_get_ident(conn
);
785 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
787 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
790 } else if (chan
->state
== BT_CONNECT2
) {
791 struct l2cap_conn_rsp rsp
;
793 rsp
.scid
= cpu_to_le16(chan
->dcid
);
794 rsp
.dcid
= cpu_to_le16(chan
->scid
);
796 if (l2cap_chan_check_security(chan
)) {
797 if (bt_sk(sk
)->defer_setup
) {
798 struct sock
*parent
= bt_sk(sk
)->parent
;
799 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
800 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
802 parent
->sk_data_ready(parent
, 0);
805 l2cap_state_change(chan
, BT_CONFIG
);
806 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
807 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
810 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
811 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
814 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
817 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
818 rsp
.result
!= L2CAP_CR_SUCCESS
) {
823 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
824 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
825 l2cap_build_conf_req(chan
, buf
), buf
);
826 chan
->num_conf_req
++;
835 /* Find socket with cid and source bdaddr.
836 * Returns closest match, locked.
838 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
840 struct l2cap_chan
*c
, *c1
= NULL
;
842 read_lock(&chan_list_lock
);
844 list_for_each_entry(c
, &chan_list
, global_l
) {
845 struct sock
*sk
= c
->sk
;
847 if (state
&& c
->state
!= state
)
850 if (c
->scid
== cid
) {
852 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
853 read_unlock(&chan_list_lock
);
858 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
863 read_unlock(&chan_list_lock
);
868 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
870 struct sock
*parent
, *sk
;
871 struct l2cap_chan
*chan
, *pchan
;
875 /* Check if we have socket listening on cid */
876 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
885 /* Check for backlog size */
886 if (sk_acceptq_is_full(parent
)) {
887 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
891 chan
= pchan
->ops
->new_connection(pchan
->data
);
897 hci_conn_hold(conn
->hcon
);
899 bacpy(&bt_sk(sk
)->src
, conn
->src
);
900 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
902 bt_accept_enqueue(parent
, sk
);
904 l2cap_chan_add(conn
, chan
);
906 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
908 l2cap_state_change(chan
, BT_CONNECTED
);
909 parent
->sk_data_ready(parent
, 0);
912 release_sock(parent
);
915 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
917 struct sock
*sk
= chan
->sk
;
918 struct sock
*parent
= bt_sk(sk
)->parent
;
920 BT_DBG("sk %p, parent %p", sk
, parent
);
922 chan
->conf_state
= 0;
923 __clear_chan_timer(chan
);
925 l2cap_state_change(chan
, BT_CONNECTED
);
926 sk
->sk_state_change(sk
);
929 parent
->sk_data_ready(parent
, 0);
932 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
934 struct l2cap_chan
*chan
;
936 BT_DBG("conn %p", conn
);
938 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
939 l2cap_le_conn_ready(conn
);
941 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
942 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
946 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
947 struct sock
*sk
= chan
->sk
;
951 if (conn
->hcon
->type
== LE_LINK
) {
952 if (smp_conn_security(conn
, chan
->sec_level
))
953 l2cap_chan_ready(chan
);
955 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
956 __clear_chan_timer(chan
);
957 l2cap_state_change(chan
, BT_CONNECTED
);
958 sk
->sk_state_change(sk
);
960 } else if (chan
->state
== BT_CONNECT
)
961 l2cap_do_start(chan
);
969 /* Notify sockets that we cannot guaranty reliability anymore */
970 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
972 struct l2cap_chan
*chan
;
974 BT_DBG("conn %p", conn
);
978 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
979 struct sock
*sk
= chan
->sk
;
981 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
988 static void l2cap_info_timeout(struct work_struct
*work
)
990 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
993 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
994 conn
->info_ident
= 0;
996 l2cap_conn_start(conn
);
999 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1001 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1002 struct l2cap_chan
*chan
, *l
;
1008 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1010 kfree_skb(conn
->rx_skb
);
1013 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1016 l2cap_chan_del(chan
, err
);
1018 chan
->ops
->close(chan
->data
);
1021 hci_chan_del(conn
->hchan
);
1023 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1024 cancel_delayed_work_sync(&conn
->info_timer
);
1026 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1027 cancel_delayed_work_sync(&conn
->security_timer
);
1028 smp_chan_destroy(conn
);
1031 hcon
->l2cap_data
= NULL
;
1035 static void security_timeout(struct work_struct
*work
)
1037 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1038 security_timer
.work
);
1040 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1043 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1045 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1046 struct hci_chan
*hchan
;
1051 hchan
= hci_chan_create(hcon
);
1055 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1057 hci_chan_del(hchan
);
1061 hcon
->l2cap_data
= conn
;
1063 conn
->hchan
= hchan
;
1065 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1067 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1068 conn
->mtu
= hcon
->hdev
->le_mtu
;
1070 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1072 conn
->src
= &hcon
->hdev
->bdaddr
;
1073 conn
->dst
= &hcon
->dst
;
1075 conn
->feat_mask
= 0;
1077 spin_lock_init(&conn
->lock
);
1079 INIT_LIST_HEAD(&conn
->chan_l
);
1081 if (hcon
->type
== LE_LINK
)
1082 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1084 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1086 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1091 /* ---- Socket interface ---- */
1093 /* Find socket with psm and source bdaddr.
1094 * Returns closest match.
1096 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1098 struct l2cap_chan
*c
, *c1
= NULL
;
1100 read_lock(&chan_list_lock
);
1102 list_for_each_entry(c
, &chan_list
, global_l
) {
1103 struct sock
*sk
= c
->sk
;
1105 if (state
&& c
->state
!= state
)
1108 if (c
->psm
== psm
) {
1110 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1111 read_unlock(&chan_list_lock
);
1116 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1121 read_unlock(&chan_list_lock
);
1126 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
, bdaddr_t
*dst
)
1128 struct sock
*sk
= chan
->sk
;
1129 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1130 struct l2cap_conn
*conn
;
1131 struct hci_conn
*hcon
;
1132 struct hci_dev
*hdev
;
1136 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1139 hdev
= hci_get_route(dst
, src
);
1141 return -EHOSTUNREACH
;
1147 /* PSM must be odd and lsb of upper byte must be 0 */
1148 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1149 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1154 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1159 switch (chan
->mode
) {
1160 case L2CAP_MODE_BASIC
:
1162 case L2CAP_MODE_ERTM
:
1163 case L2CAP_MODE_STREAMING
:
1172 switch (sk
->sk_state
) {
1176 /* Already connecting */
1181 /* Already connected */
1195 /* Set destination address and psm */
1196 bacpy(&bt_sk(sk
)->dst
, dst
);
1200 auth_type
= l2cap_get_auth_type(chan
);
1202 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1203 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1204 chan
->sec_level
, auth_type
);
1206 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1207 chan
->sec_level
, auth_type
);
1210 err
= PTR_ERR(hcon
);
1214 conn
= l2cap_conn_add(hcon
, 0);
1221 /* Update source addr of the socket */
1222 bacpy(src
, conn
->src
);
1224 l2cap_chan_add(conn
, chan
);
1226 l2cap_state_change(chan
, BT_CONNECT
);
1227 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1229 if (hcon
->state
== BT_CONNECTED
) {
1230 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1231 __clear_chan_timer(chan
);
1232 if (l2cap_chan_check_security(chan
))
1233 l2cap_state_change(chan
, BT_CONNECTED
);
1235 l2cap_do_start(chan
);
1241 hci_dev_unlock(hdev
);
1246 int __l2cap_wait_ack(struct sock
*sk
)
1248 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1249 DECLARE_WAITQUEUE(wait
, current
);
1253 add_wait_queue(sk_sleep(sk
), &wait
);
1254 set_current_state(TASK_INTERRUPTIBLE
);
1255 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1259 if (signal_pending(current
)) {
1260 err
= sock_intr_errno(timeo
);
1265 timeo
= schedule_timeout(timeo
);
1267 set_current_state(TASK_INTERRUPTIBLE
);
1269 err
= sock_error(sk
);
1273 set_current_state(TASK_RUNNING
);
1274 remove_wait_queue(sk_sleep(sk
), &wait
);
1278 static void l2cap_monitor_timeout(struct work_struct
*work
)
1280 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1281 monitor_timer
.work
);
1282 struct sock
*sk
= chan
->sk
;
1284 BT_DBG("chan %p", chan
);
1287 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1288 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1293 chan
->retry_count
++;
1294 __set_monitor_timer(chan
);
1296 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1300 static void l2cap_retrans_timeout(struct work_struct
*work
)
1302 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1303 retrans_timer
.work
);
1304 struct sock
*sk
= chan
->sk
;
1306 BT_DBG("chan %p", chan
);
1309 chan
->retry_count
= 1;
1310 __set_monitor_timer(chan
);
1312 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1314 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1318 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1320 struct sk_buff
*skb
;
1322 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1323 chan
->unacked_frames
) {
1324 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1327 skb
= skb_dequeue(&chan
->tx_q
);
1330 chan
->unacked_frames
--;
1333 if (!chan
->unacked_frames
)
1334 __clear_retrans_timer(chan
);
1337 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1339 struct sk_buff
*skb
;
1343 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1344 control
= __get_control(chan
, skb
->data
+ L2CAP_HDR_SIZE
);
1345 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1346 __put_control(chan
, control
, skb
->data
+ L2CAP_HDR_SIZE
);
1348 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1349 fcs
= crc16(0, (u8
*)skb
->data
,
1350 skb
->len
- L2CAP_FCS_SIZE
);
1351 put_unaligned_le16(fcs
,
1352 skb
->data
+ skb
->len
- L2CAP_FCS_SIZE
);
1355 l2cap_do_send(chan
, skb
);
1357 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1361 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1363 struct sk_buff
*skb
, *tx_skb
;
1367 skb
= skb_peek(&chan
->tx_q
);
1371 while (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1372 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1375 skb
= skb_queue_next(&chan
->tx_q
, skb
);
1378 if (chan
->remote_max_tx
&&
1379 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1380 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1384 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1385 bt_cb(skb
)->retries
++;
1387 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1388 control
&= __get_sar_mask(chan
);
1390 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1391 control
|= __set_ctrl_final(chan
);
1393 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1394 control
|= __set_txseq(chan
, tx_seq
);
1396 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1398 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1399 fcs
= crc16(0, (u8
*)tx_skb
->data
,
1400 tx_skb
->len
- L2CAP_FCS_SIZE
);
1401 put_unaligned_le16(fcs
,
1402 tx_skb
->data
+ tx_skb
->len
- L2CAP_FCS_SIZE
);
1405 l2cap_do_send(chan
, tx_skb
);
1408 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1410 struct sk_buff
*skb
, *tx_skb
;
1415 if (chan
->state
!= BT_CONNECTED
)
1418 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1420 if (chan
->remote_max_tx
&&
1421 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1422 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1426 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1428 bt_cb(skb
)->retries
++;
1430 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1431 control
&= __get_sar_mask(chan
);
1433 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1434 control
|= __set_ctrl_final(chan
);
1436 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1437 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1439 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1441 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1442 fcs
= crc16(0, (u8
*)skb
->data
,
1443 tx_skb
->len
- L2CAP_FCS_SIZE
);
1444 put_unaligned_le16(fcs
, skb
->data
+
1445 tx_skb
->len
- L2CAP_FCS_SIZE
);
1448 l2cap_do_send(chan
, tx_skb
);
1450 __set_retrans_timer(chan
);
1452 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1454 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1456 if (bt_cb(skb
)->retries
== 1) {
1457 chan
->unacked_frames
++;
1461 chan
->frames_sent
++;
1463 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1464 chan
->tx_send_head
= NULL
;
1466 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1472 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1476 if (!skb_queue_empty(&chan
->tx_q
))
1477 chan
->tx_send_head
= chan
->tx_q
.next
;
1479 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1480 ret
= l2cap_ertm_send(chan
);
1484 static void __l2cap_send_ack(struct l2cap_chan
*chan
)
1488 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1490 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1491 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1492 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1493 l2cap_send_sframe(chan
, control
);
1497 if (l2cap_ertm_send(chan
) > 0)
1500 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1501 l2cap_send_sframe(chan
, control
);
1504 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1506 __clear_ack_timer(chan
);
1507 __l2cap_send_ack(chan
);
1510 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1512 struct srej_list
*tail
;
1515 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1516 control
|= __set_ctrl_final(chan
);
1518 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1519 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1521 l2cap_send_sframe(chan
, control
);
1524 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1526 struct l2cap_conn
*conn
= chan
->conn
;
1527 struct sk_buff
**frag
;
1530 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1536 /* Continuation fragments (no L2CAP header) */
1537 frag
= &skb_shinfo(skb
)->frag_list
;
1539 count
= min_t(unsigned int, conn
->mtu
, len
);
1541 *frag
= chan
->ops
->alloc_skb(chan
, count
,
1542 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1546 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1549 (*frag
)->priority
= skb
->priority
;
1554 frag
= &(*frag
)->next
;
1560 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1561 struct msghdr
*msg
, size_t len
,
1564 struct l2cap_conn
*conn
= chan
->conn
;
1565 struct sk_buff
*skb
;
1566 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1567 struct l2cap_hdr
*lh
;
1569 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1571 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1573 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1574 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1577 return ERR_PTR(err
);
1579 skb
->priority
= priority
;
1581 /* Create L2CAP header */
1582 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1583 lh
->cid
= cpu_to_le16(chan
->dcid
);
1584 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1585 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1587 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1588 if (unlikely(err
< 0)) {
1590 return ERR_PTR(err
);
1595 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1596 struct msghdr
*msg
, size_t len
,
1599 struct l2cap_conn
*conn
= chan
->conn
;
1600 struct sk_buff
*skb
;
1601 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1602 struct l2cap_hdr
*lh
;
1604 BT_DBG("chan %p len %d", chan
, (int)len
);
1606 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1608 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1609 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1612 return ERR_PTR(err
);
1614 skb
->priority
= priority
;
1616 /* Create L2CAP header */
1617 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1618 lh
->cid
= cpu_to_le16(chan
->dcid
);
1619 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1621 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1622 if (unlikely(err
< 0)) {
1624 return ERR_PTR(err
);
1629 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1630 struct msghdr
*msg
, size_t len
,
1631 u32 control
, u16 sdulen
)
1633 struct l2cap_conn
*conn
= chan
->conn
;
1634 struct sk_buff
*skb
;
1635 int err
, count
, hlen
;
1636 struct l2cap_hdr
*lh
;
1638 BT_DBG("chan %p len %d", chan
, (int)len
);
1641 return ERR_PTR(-ENOTCONN
);
1643 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1644 hlen
= L2CAP_EXT_HDR_SIZE
;
1646 hlen
= L2CAP_ENH_HDR_SIZE
;
1649 hlen
+= L2CAP_SDULEN_SIZE
;
1651 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1652 hlen
+= L2CAP_FCS_SIZE
;
1654 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1656 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1657 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1660 return ERR_PTR(err
);
1662 /* Create L2CAP header */
1663 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1664 lh
->cid
= cpu_to_le16(chan
->dcid
);
1665 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1667 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
1670 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
1672 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1673 if (unlikely(err
< 0)) {
1675 return ERR_PTR(err
);
1678 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1679 put_unaligned_le16(0, skb_put(skb
, L2CAP_FCS_SIZE
));
1681 bt_cb(skb
)->retries
= 0;
1685 static int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1687 struct sk_buff
*skb
;
1688 struct sk_buff_head sar_queue
;
1692 skb_queue_head_init(&sar_queue
);
1693 control
= __set_ctrl_sar(chan
, L2CAP_SAR_START
);
1694 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1696 return PTR_ERR(skb
);
1698 __skb_queue_tail(&sar_queue
, skb
);
1699 len
-= chan
->remote_mps
;
1700 size
+= chan
->remote_mps
;
1705 if (len
> chan
->remote_mps
) {
1706 control
= __set_ctrl_sar(chan
, L2CAP_SAR_CONTINUE
);
1707 buflen
= chan
->remote_mps
;
1709 control
= __set_ctrl_sar(chan
, L2CAP_SAR_END
);
1713 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1715 skb_queue_purge(&sar_queue
);
1716 return PTR_ERR(skb
);
1719 __skb_queue_tail(&sar_queue
, skb
);
1723 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1724 if (chan
->tx_send_head
== NULL
)
1725 chan
->tx_send_head
= sar_queue
.next
;
1730 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
1733 struct sk_buff
*skb
;
1737 /* Connectionless channel */
1738 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1739 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
1741 return PTR_ERR(skb
);
1743 l2cap_do_send(chan
, skb
);
1747 switch (chan
->mode
) {
1748 case L2CAP_MODE_BASIC
:
1749 /* Check outgoing MTU */
1750 if (len
> chan
->omtu
)
1753 /* Create a basic PDU */
1754 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
1756 return PTR_ERR(skb
);
1758 l2cap_do_send(chan
, skb
);
1762 case L2CAP_MODE_ERTM
:
1763 case L2CAP_MODE_STREAMING
:
1764 /* Entire SDU fits into one PDU */
1765 if (len
<= chan
->remote_mps
) {
1766 control
= __set_ctrl_sar(chan
, L2CAP_SAR_UNSEGMENTED
);
1767 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1770 return PTR_ERR(skb
);
1772 __skb_queue_tail(&chan
->tx_q
, skb
);
1774 if (chan
->tx_send_head
== NULL
)
1775 chan
->tx_send_head
= skb
;
1778 /* Segment SDU into multiples PDUs */
1779 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1784 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1785 l2cap_streaming_send(chan
);
1790 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
1791 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
1796 err
= l2cap_ertm_send(chan
);
1803 BT_DBG("bad state %1.1x", chan
->mode
);
1810 /* Copy frame to all raw sockets on that connection */
1811 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1813 struct sk_buff
*nskb
;
1814 struct l2cap_chan
*chan
;
1816 BT_DBG("conn %p", conn
);
1820 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
1821 struct sock
*sk
= chan
->sk
;
1822 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1825 /* Don't send frame to the socket it came from */
1828 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1832 if (chan
->ops
->recv(chan
->data
, nskb
))
1839 /* ---- L2CAP signalling commands ---- */
1840 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1841 u8 code
, u8 ident
, u16 dlen
, void *data
)
1843 struct sk_buff
*skb
, **frag
;
1844 struct l2cap_cmd_hdr
*cmd
;
1845 struct l2cap_hdr
*lh
;
1848 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1849 conn
, code
, ident
, dlen
);
1851 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1852 count
= min_t(unsigned int, conn
->mtu
, len
);
1854 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1858 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1859 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1861 if (conn
->hcon
->type
== LE_LINK
)
1862 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1864 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1866 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1869 cmd
->len
= cpu_to_le16(dlen
);
1872 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1873 memcpy(skb_put(skb
, count
), data
, count
);
1879 /* Continuation fragments (no L2CAP header) */
1880 frag
= &skb_shinfo(skb
)->frag_list
;
1882 count
= min_t(unsigned int, conn
->mtu
, len
);
1884 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1888 memcpy(skb_put(*frag
, count
), data
, count
);
1893 frag
= &(*frag
)->next
;
1903 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1905 struct l2cap_conf_opt
*opt
= *ptr
;
1908 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1916 *val
= *((u8
*) opt
->val
);
1920 *val
= get_unaligned_le16(opt
->val
);
1924 *val
= get_unaligned_le32(opt
->val
);
1928 *val
= (unsigned long) opt
->val
;
1932 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1936 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1938 struct l2cap_conf_opt
*opt
= *ptr
;
1940 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1947 *((u8
*) opt
->val
) = val
;
1951 put_unaligned_le16(val
, opt
->val
);
1955 put_unaligned_le32(val
, opt
->val
);
1959 memcpy(opt
->val
, (void *) val
, len
);
1963 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1966 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
1968 struct l2cap_conf_efs efs
;
1970 switch (chan
->mode
) {
1971 case L2CAP_MODE_ERTM
:
1972 efs
.id
= chan
->local_id
;
1973 efs
.stype
= chan
->local_stype
;
1974 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1975 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
1976 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
1977 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
1980 case L2CAP_MODE_STREAMING
:
1982 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
1983 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1984 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
1993 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
1994 (unsigned long) &efs
);
1997 static void l2cap_ack_timeout(struct work_struct
*work
)
1999 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2002 BT_DBG("chan %p", chan
);
2004 lock_sock(chan
->sk
);
2005 __l2cap_send_ack(chan
);
2006 release_sock(chan
->sk
);
2008 l2cap_chan_put(chan
);
2011 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
2013 chan
->expected_ack_seq
= 0;
2014 chan
->unacked_frames
= 0;
2015 chan
->buffer_seq
= 0;
2016 chan
->num_acked
= 0;
2017 chan
->frames_sent
= 0;
2019 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2020 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2021 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2023 skb_queue_head_init(&chan
->srej_q
);
2025 INIT_LIST_HEAD(&chan
->srej_l
);
2028 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2031 case L2CAP_MODE_STREAMING
:
2032 case L2CAP_MODE_ERTM
:
2033 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2037 return L2CAP_MODE_BASIC
;
2041 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2043 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2046 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2048 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2051 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2053 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2054 __l2cap_ews_supported(chan
)) {
2055 /* use extended control field */
2056 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2057 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2059 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2060 L2CAP_DEFAULT_TX_WINDOW
);
2061 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2065 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2067 struct l2cap_conf_req
*req
= data
;
2068 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2069 void *ptr
= req
->data
;
2072 BT_DBG("chan %p", chan
);
2074 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2077 switch (chan
->mode
) {
2078 case L2CAP_MODE_STREAMING
:
2079 case L2CAP_MODE_ERTM
:
2080 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2083 if (__l2cap_efs_supported(chan
))
2084 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2088 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2093 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2094 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2096 switch (chan
->mode
) {
2097 case L2CAP_MODE_BASIC
:
2098 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2099 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2102 rfc
.mode
= L2CAP_MODE_BASIC
;
2104 rfc
.max_transmit
= 0;
2105 rfc
.retrans_timeout
= 0;
2106 rfc
.monitor_timeout
= 0;
2107 rfc
.max_pdu_size
= 0;
2109 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2110 (unsigned long) &rfc
);
2113 case L2CAP_MODE_ERTM
:
2114 rfc
.mode
= L2CAP_MODE_ERTM
;
2115 rfc
.max_transmit
= chan
->max_tx
;
2116 rfc
.retrans_timeout
= 0;
2117 rfc
.monitor_timeout
= 0;
2119 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2120 L2CAP_EXT_HDR_SIZE
-
2123 rfc
.max_pdu_size
= cpu_to_le16(size
);
2125 l2cap_txwin_setup(chan
);
2127 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2128 L2CAP_DEFAULT_TX_WINDOW
);
2130 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2131 (unsigned long) &rfc
);
2133 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2134 l2cap_add_opt_efs(&ptr
, chan
);
2136 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2139 if (chan
->fcs
== L2CAP_FCS_NONE
||
2140 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2141 chan
->fcs
= L2CAP_FCS_NONE
;
2142 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2145 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2146 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2150 case L2CAP_MODE_STREAMING
:
2151 rfc
.mode
= L2CAP_MODE_STREAMING
;
2153 rfc
.max_transmit
= 0;
2154 rfc
.retrans_timeout
= 0;
2155 rfc
.monitor_timeout
= 0;
2157 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2158 L2CAP_EXT_HDR_SIZE
-
2161 rfc
.max_pdu_size
= cpu_to_le16(size
);
2163 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2164 (unsigned long) &rfc
);
2166 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2167 l2cap_add_opt_efs(&ptr
, chan
);
2169 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2172 if (chan
->fcs
== L2CAP_FCS_NONE
||
2173 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2174 chan
->fcs
= L2CAP_FCS_NONE
;
2175 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2180 req
->dcid
= cpu_to_le16(chan
->dcid
);
2181 req
->flags
= cpu_to_le16(0);
2186 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2188 struct l2cap_conf_rsp
*rsp
= data
;
2189 void *ptr
= rsp
->data
;
2190 void *req
= chan
->conf_req
;
2191 int len
= chan
->conf_len
;
2192 int type
, hint
, olen
;
2194 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2195 struct l2cap_conf_efs efs
;
2197 u16 mtu
= L2CAP_DEFAULT_MTU
;
2198 u16 result
= L2CAP_CONF_SUCCESS
;
2201 BT_DBG("chan %p", chan
);
2203 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2204 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2206 hint
= type
& L2CAP_CONF_HINT
;
2207 type
&= L2CAP_CONF_MASK
;
2210 case L2CAP_CONF_MTU
:
2214 case L2CAP_CONF_FLUSH_TO
:
2215 chan
->flush_to
= val
;
2218 case L2CAP_CONF_QOS
:
2221 case L2CAP_CONF_RFC
:
2222 if (olen
== sizeof(rfc
))
2223 memcpy(&rfc
, (void *) val
, olen
);
2226 case L2CAP_CONF_FCS
:
2227 if (val
== L2CAP_FCS_NONE
)
2228 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2231 case L2CAP_CONF_EFS
:
2233 if (olen
== sizeof(efs
))
2234 memcpy(&efs
, (void *) val
, olen
);
2237 case L2CAP_CONF_EWS
:
2239 return -ECONNREFUSED
;
2241 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2242 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2243 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2244 chan
->remote_tx_win
= val
;
2251 result
= L2CAP_CONF_UNKNOWN
;
2252 *((u8
*) ptr
++) = type
;
2257 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2260 switch (chan
->mode
) {
2261 case L2CAP_MODE_STREAMING
:
2262 case L2CAP_MODE_ERTM
:
2263 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2264 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2265 chan
->conn
->feat_mask
);
2270 if (__l2cap_efs_supported(chan
))
2271 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2273 return -ECONNREFUSED
;
2276 if (chan
->mode
!= rfc
.mode
)
2277 return -ECONNREFUSED
;
2283 if (chan
->mode
!= rfc
.mode
) {
2284 result
= L2CAP_CONF_UNACCEPT
;
2285 rfc
.mode
= chan
->mode
;
2287 if (chan
->num_conf_rsp
== 1)
2288 return -ECONNREFUSED
;
2290 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2291 sizeof(rfc
), (unsigned long) &rfc
);
2294 if (result
== L2CAP_CONF_SUCCESS
) {
2295 /* Configure output options and let the other side know
2296 * which ones we don't like. */
2298 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2299 result
= L2CAP_CONF_UNACCEPT
;
2302 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2304 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2307 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2308 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2309 efs
.stype
!= chan
->local_stype
) {
2311 result
= L2CAP_CONF_UNACCEPT
;
2313 if (chan
->num_conf_req
>= 1)
2314 return -ECONNREFUSED
;
2316 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2318 (unsigned long) &efs
);
2320 /* Send PENDING Conf Rsp */
2321 result
= L2CAP_CONF_PENDING
;
2322 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2327 case L2CAP_MODE_BASIC
:
2328 chan
->fcs
= L2CAP_FCS_NONE
;
2329 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2332 case L2CAP_MODE_ERTM
:
2333 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2334 chan
->remote_tx_win
= rfc
.txwin_size
;
2336 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2338 chan
->remote_max_tx
= rfc
.max_transmit
;
2340 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2342 L2CAP_EXT_HDR_SIZE
-
2345 rfc
.max_pdu_size
= cpu_to_le16(size
);
2346 chan
->remote_mps
= size
;
2348 rfc
.retrans_timeout
=
2349 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2350 rfc
.monitor_timeout
=
2351 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2353 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2355 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2356 sizeof(rfc
), (unsigned long) &rfc
);
2358 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2359 chan
->remote_id
= efs
.id
;
2360 chan
->remote_stype
= efs
.stype
;
2361 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
2362 chan
->remote_flush_to
=
2363 le32_to_cpu(efs
.flush_to
);
2364 chan
->remote_acc_lat
=
2365 le32_to_cpu(efs
.acc_lat
);
2366 chan
->remote_sdu_itime
=
2367 le32_to_cpu(efs
.sdu_itime
);
2368 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2369 sizeof(efs
), (unsigned long) &efs
);
2373 case L2CAP_MODE_STREAMING
:
2374 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2376 L2CAP_EXT_HDR_SIZE
-
2379 rfc
.max_pdu_size
= cpu_to_le16(size
);
2380 chan
->remote_mps
= size
;
2382 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2384 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2385 sizeof(rfc
), (unsigned long) &rfc
);
2390 result
= L2CAP_CONF_UNACCEPT
;
2392 memset(&rfc
, 0, sizeof(rfc
));
2393 rfc
.mode
= chan
->mode
;
2396 if (result
== L2CAP_CONF_SUCCESS
)
2397 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2399 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2400 rsp
->result
= cpu_to_le16(result
);
2401 rsp
->flags
= cpu_to_le16(0x0000);
2406 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2408 struct l2cap_conf_req
*req
= data
;
2409 void *ptr
= req
->data
;
2412 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2413 struct l2cap_conf_efs efs
;
2415 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2417 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2418 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2421 case L2CAP_CONF_MTU
:
2422 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2423 *result
= L2CAP_CONF_UNACCEPT
;
2424 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2427 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2430 case L2CAP_CONF_FLUSH_TO
:
2431 chan
->flush_to
= val
;
2432 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2436 case L2CAP_CONF_RFC
:
2437 if (olen
== sizeof(rfc
))
2438 memcpy(&rfc
, (void *)val
, olen
);
2440 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2441 rfc
.mode
!= chan
->mode
)
2442 return -ECONNREFUSED
;
2446 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2447 sizeof(rfc
), (unsigned long) &rfc
);
2450 case L2CAP_CONF_EWS
:
2451 chan
->tx_win
= min_t(u16
, val
,
2452 L2CAP_DEFAULT_EXT_WINDOW
);
2453 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2457 case L2CAP_CONF_EFS
:
2458 if (olen
== sizeof(efs
))
2459 memcpy(&efs
, (void *)val
, olen
);
2461 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2462 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2463 efs
.stype
!= chan
->local_stype
)
2464 return -ECONNREFUSED
;
2466 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2467 sizeof(efs
), (unsigned long) &efs
);
2472 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2473 return -ECONNREFUSED
;
2475 chan
->mode
= rfc
.mode
;
2477 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
2479 case L2CAP_MODE_ERTM
:
2480 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2481 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2482 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2484 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2485 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
2486 chan
->local_sdu_itime
=
2487 le32_to_cpu(efs
.sdu_itime
);
2488 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
2489 chan
->local_flush_to
=
2490 le32_to_cpu(efs
.flush_to
);
2494 case L2CAP_MODE_STREAMING
:
2495 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2499 req
->dcid
= cpu_to_le16(chan
->dcid
);
2500 req
->flags
= cpu_to_le16(0x0000);
2505 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2507 struct l2cap_conf_rsp
*rsp
= data
;
2508 void *ptr
= rsp
->data
;
2510 BT_DBG("chan %p", chan
);
2512 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2513 rsp
->result
= cpu_to_le16(result
);
2514 rsp
->flags
= cpu_to_le16(flags
);
2519 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2521 struct l2cap_conn_rsp rsp
;
2522 struct l2cap_conn
*conn
= chan
->conn
;
2525 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2526 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2527 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2528 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2529 l2cap_send_cmd(conn
, chan
->ident
,
2530 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2532 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2535 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2536 l2cap_build_conf_req(chan
, buf
), buf
);
2537 chan
->num_conf_req
++;
2540 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2544 struct l2cap_conf_rfc rfc
;
2546 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2548 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2551 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2552 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2555 case L2CAP_CONF_RFC
:
2556 if (olen
== sizeof(rfc
))
2557 memcpy(&rfc
, (void *)val
, olen
);
2562 /* Use sane default values in case a misbehaving remote device
2563 * did not send an RFC option.
2565 rfc
.mode
= chan
->mode
;
2566 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2567 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2568 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
2570 BT_ERR("Expected RFC option was not found, using defaults");
2574 case L2CAP_MODE_ERTM
:
2575 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2576 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2577 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2579 case L2CAP_MODE_STREAMING
:
2580 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2584 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2586 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2588 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2591 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2592 cmd
->ident
== conn
->info_ident
) {
2593 cancel_delayed_work(&conn
->info_timer
);
2595 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2596 conn
->info_ident
= 0;
2598 l2cap_conn_start(conn
);
2604 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2606 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2607 struct l2cap_conn_rsp rsp
;
2608 struct l2cap_chan
*chan
= NULL
, *pchan
;
2609 struct sock
*parent
, *sk
= NULL
;
2610 int result
, status
= L2CAP_CS_NO_INFO
;
2612 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2613 __le16 psm
= req
->psm
;
2615 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2617 /* Check if we have socket listening on psm */
2618 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2620 result
= L2CAP_CR_BAD_PSM
;
2628 /* Check if the ACL is secure enough (if not SDP) */
2629 if (psm
!= cpu_to_le16(0x0001) &&
2630 !hci_conn_check_link_mode(conn
->hcon
)) {
2631 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
2632 result
= L2CAP_CR_SEC_BLOCK
;
2636 result
= L2CAP_CR_NO_MEM
;
2638 /* Check for backlog size */
2639 if (sk_acceptq_is_full(parent
)) {
2640 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2644 chan
= pchan
->ops
->new_connection(pchan
->data
);
2650 /* Check if we already have channel with that dcid */
2651 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2652 sock_set_flag(sk
, SOCK_ZAPPED
);
2653 chan
->ops
->close(chan
->data
);
2657 hci_conn_hold(conn
->hcon
);
2659 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2660 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2664 bt_accept_enqueue(parent
, sk
);
2666 l2cap_chan_add(conn
, chan
);
2670 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2672 chan
->ident
= cmd
->ident
;
2674 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2675 if (l2cap_chan_check_security(chan
)) {
2676 if (bt_sk(sk
)->defer_setup
) {
2677 l2cap_state_change(chan
, BT_CONNECT2
);
2678 result
= L2CAP_CR_PEND
;
2679 status
= L2CAP_CS_AUTHOR_PEND
;
2680 parent
->sk_data_ready(parent
, 0);
2682 l2cap_state_change(chan
, BT_CONFIG
);
2683 result
= L2CAP_CR_SUCCESS
;
2684 status
= L2CAP_CS_NO_INFO
;
2687 l2cap_state_change(chan
, BT_CONNECT2
);
2688 result
= L2CAP_CR_PEND
;
2689 status
= L2CAP_CS_AUTHEN_PEND
;
2692 l2cap_state_change(chan
, BT_CONNECT2
);
2693 result
= L2CAP_CR_PEND
;
2694 status
= L2CAP_CS_NO_INFO
;
2698 release_sock(parent
);
2701 rsp
.scid
= cpu_to_le16(scid
);
2702 rsp
.dcid
= cpu_to_le16(dcid
);
2703 rsp
.result
= cpu_to_le16(result
);
2704 rsp
.status
= cpu_to_le16(status
);
2705 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2707 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2708 struct l2cap_info_req info
;
2709 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2711 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2712 conn
->info_ident
= l2cap_get_ident(conn
);
2714 schedule_delayed_work(&conn
->info_timer
,
2715 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2717 l2cap_send_cmd(conn
, conn
->info_ident
,
2718 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2721 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
2722 result
== L2CAP_CR_SUCCESS
) {
2724 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
2725 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2726 l2cap_build_conf_req(chan
, buf
), buf
);
2727 chan
->num_conf_req
++;
2733 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2735 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2736 u16 scid
, dcid
, result
, status
;
2737 struct l2cap_chan
*chan
;
2741 scid
= __le16_to_cpu(rsp
->scid
);
2742 dcid
= __le16_to_cpu(rsp
->dcid
);
2743 result
= __le16_to_cpu(rsp
->result
);
2744 status
= __le16_to_cpu(rsp
->status
);
2746 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2749 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2753 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2761 case L2CAP_CR_SUCCESS
:
2762 l2cap_state_change(chan
, BT_CONFIG
);
2765 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2767 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2770 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2771 l2cap_build_conf_req(chan
, req
), req
);
2772 chan
->num_conf_req
++;
2776 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2780 l2cap_chan_del(chan
, ECONNREFUSED
);
2788 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2790 /* FCS is enabled only in ERTM or streaming mode, if one or both
2793 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2794 chan
->fcs
= L2CAP_FCS_NONE
;
2795 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
2796 chan
->fcs
= L2CAP_FCS_CRC16
;
2799 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2801 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2804 struct l2cap_chan
*chan
;
2808 dcid
= __le16_to_cpu(req
->dcid
);
2809 flags
= __le16_to_cpu(req
->flags
);
2811 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2813 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2819 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
2820 struct l2cap_cmd_rej_cid rej
;
2822 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
2823 rej
.scid
= cpu_to_le16(chan
->scid
);
2824 rej
.dcid
= cpu_to_le16(chan
->dcid
);
2826 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2831 /* Reject if config buffer is too small. */
2832 len
= cmd_len
- sizeof(*req
);
2833 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2834 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2835 l2cap_build_conf_rsp(chan
, rsp
,
2836 L2CAP_CONF_REJECT
, flags
), rsp
);
2841 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2842 chan
->conf_len
+= len
;
2844 if (flags
& 0x0001) {
2845 /* Incomplete config. Send empty response. */
2846 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2847 l2cap_build_conf_rsp(chan
, rsp
,
2848 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2852 /* Complete config. */
2853 len
= l2cap_parse_conf_req(chan
, rsp
);
2855 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2859 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2860 chan
->num_conf_rsp
++;
2862 /* Reset config buffer. */
2865 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
2868 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
2869 set_default_fcs(chan
);
2871 l2cap_state_change(chan
, BT_CONNECTED
);
2873 chan
->next_tx_seq
= 0;
2874 chan
->expected_tx_seq
= 0;
2875 skb_queue_head_init(&chan
->tx_q
);
2876 if (chan
->mode
== L2CAP_MODE_ERTM
)
2877 l2cap_ertm_init(chan
);
2879 l2cap_chan_ready(chan
);
2883 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
2885 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2886 l2cap_build_conf_req(chan
, buf
), buf
);
2887 chan
->num_conf_req
++;
2890 /* Got Conf Rsp PENDING from remote side and asume we sent
2891 Conf Rsp PENDING in the code above */
2892 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
2893 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
2895 /* check compatibility */
2897 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2898 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2900 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2901 l2cap_build_conf_rsp(chan
, rsp
,
2902 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
2910 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2912 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2913 u16 scid
, flags
, result
;
2914 struct l2cap_chan
*chan
;
2916 int len
= cmd
->len
- sizeof(*rsp
);
2918 scid
= __le16_to_cpu(rsp
->scid
);
2919 flags
= __le16_to_cpu(rsp
->flags
);
2920 result
= __le16_to_cpu(rsp
->result
);
2922 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2923 scid
, flags
, result
);
2925 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2932 case L2CAP_CONF_SUCCESS
:
2933 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2934 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
2937 case L2CAP_CONF_PENDING
:
2938 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
2940 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
2943 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2946 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2950 /* check compatibility */
2952 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2953 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2955 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2956 l2cap_build_conf_rsp(chan
, buf
,
2957 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
2961 case L2CAP_CONF_UNACCEPT
:
2962 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2965 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2966 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2970 /* throw out any old stored conf requests */
2971 result
= L2CAP_CONF_SUCCESS
;
2972 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2975 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2979 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2980 L2CAP_CONF_REQ
, len
, req
);
2981 chan
->num_conf_req
++;
2982 if (result
!= L2CAP_CONF_SUCCESS
)
2988 sk
->sk_err
= ECONNRESET
;
2989 __set_chan_timer(chan
,
2990 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT
));
2991 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2998 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3000 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3001 set_default_fcs(chan
);
3003 l2cap_state_change(chan
, BT_CONNECTED
);
3004 chan
->next_tx_seq
= 0;
3005 chan
->expected_tx_seq
= 0;
3006 skb_queue_head_init(&chan
->tx_q
);
3007 if (chan
->mode
== L2CAP_MODE_ERTM
)
3008 l2cap_ertm_init(chan
);
3010 l2cap_chan_ready(chan
);
3018 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3020 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3021 struct l2cap_disconn_rsp rsp
;
3023 struct l2cap_chan
*chan
;
3026 scid
= __le16_to_cpu(req
->scid
);
3027 dcid
= __le16_to_cpu(req
->dcid
);
3029 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3031 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3037 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3038 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3039 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3041 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3043 l2cap_chan_del(chan
, ECONNRESET
);
3046 chan
->ops
->close(chan
->data
);
3050 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3052 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3054 struct l2cap_chan
*chan
;
3057 scid
= __le16_to_cpu(rsp
->scid
);
3058 dcid
= __le16_to_cpu(rsp
->dcid
);
3060 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3062 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3068 l2cap_chan_del(chan
, 0);
3071 chan
->ops
->close(chan
->data
);
3075 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3077 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3080 type
= __le16_to_cpu(req
->type
);
3082 BT_DBG("type 0x%4.4x", type
);
3084 if (type
== L2CAP_IT_FEAT_MASK
) {
3086 u32 feat_mask
= l2cap_feat_mask
;
3087 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3088 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3089 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3091 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3094 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3095 | L2CAP_FEAT_EXT_WINDOW
;
3097 put_unaligned_le32(feat_mask
, rsp
->data
);
3098 l2cap_send_cmd(conn
, cmd
->ident
,
3099 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3100 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3102 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3105 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3107 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3109 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3110 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3111 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3112 l2cap_send_cmd(conn
, cmd
->ident
,
3113 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3115 struct l2cap_info_rsp rsp
;
3116 rsp
.type
= cpu_to_le16(type
);
3117 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3118 l2cap_send_cmd(conn
, cmd
->ident
,
3119 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3125 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3127 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3130 type
= __le16_to_cpu(rsp
->type
);
3131 result
= __le16_to_cpu(rsp
->result
);
3133 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3135 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3136 if (cmd
->ident
!= conn
->info_ident
||
3137 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3140 cancel_delayed_work(&conn
->info_timer
);
3142 if (result
!= L2CAP_IR_SUCCESS
) {
3143 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3144 conn
->info_ident
= 0;
3146 l2cap_conn_start(conn
);
3151 if (type
== L2CAP_IT_FEAT_MASK
) {
3152 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3154 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3155 struct l2cap_info_req req
;
3156 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3158 conn
->info_ident
= l2cap_get_ident(conn
);
3160 l2cap_send_cmd(conn
, conn
->info_ident
,
3161 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3163 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3164 conn
->info_ident
= 0;
3166 l2cap_conn_start(conn
);
3168 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3169 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3170 conn
->info_ident
= 0;
3172 l2cap_conn_start(conn
);
3178 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3179 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3182 struct l2cap_create_chan_req
*req
= data
;
3183 struct l2cap_create_chan_rsp rsp
;
3186 if (cmd_len
!= sizeof(*req
))
3192 psm
= le16_to_cpu(req
->psm
);
3193 scid
= le16_to_cpu(req
->scid
);
3195 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3197 /* Placeholder: Always reject */
3199 rsp
.scid
= cpu_to_le16(scid
);
3200 rsp
.result
= L2CAP_CR_NO_MEM
;
3201 rsp
.status
= L2CAP_CS_NO_INFO
;
3203 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3209 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3210 struct l2cap_cmd_hdr
*cmd
, void *data
)
3212 BT_DBG("conn %p", conn
);
3214 return l2cap_connect_rsp(conn
, cmd
, data
);
3217 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3218 u16 icid
, u16 result
)
3220 struct l2cap_move_chan_rsp rsp
;
3222 BT_DBG("icid %d, result %d", icid
, result
);
3224 rsp
.icid
= cpu_to_le16(icid
);
3225 rsp
.result
= cpu_to_le16(result
);
3227 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3230 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3231 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3233 struct l2cap_move_chan_cfm cfm
;
3236 BT_DBG("icid %d, result %d", icid
, result
);
3238 ident
= l2cap_get_ident(conn
);
3240 chan
->ident
= ident
;
3242 cfm
.icid
= cpu_to_le16(icid
);
3243 cfm
.result
= cpu_to_le16(result
);
3245 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3248 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
3251 struct l2cap_move_chan_cfm_rsp rsp
;
3253 BT_DBG("icid %d", icid
);
3255 rsp
.icid
= cpu_to_le16(icid
);
3256 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
3259 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
3260 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3262 struct l2cap_move_chan_req
*req
= data
;
3264 u16 result
= L2CAP_MR_NOT_ALLOWED
;
3266 if (cmd_len
!= sizeof(*req
))
3269 icid
= le16_to_cpu(req
->icid
);
3271 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
3276 /* Placeholder: Always refuse */
3277 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
3282 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
3283 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3285 struct l2cap_move_chan_rsp
*rsp
= data
;
3288 if (cmd_len
!= sizeof(*rsp
))
3291 icid
= le16_to_cpu(rsp
->icid
);
3292 result
= le16_to_cpu(rsp
->result
);
3294 BT_DBG("icid %d, result %d", icid
, result
);
3296 /* Placeholder: Always unconfirmed */
3297 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
3302 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
3303 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3305 struct l2cap_move_chan_cfm
*cfm
= data
;
3308 if (cmd_len
!= sizeof(*cfm
))
3311 icid
= le16_to_cpu(cfm
->icid
);
3312 result
= le16_to_cpu(cfm
->result
);
3314 BT_DBG("icid %d, result %d", icid
, result
);
3316 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
3321 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
3322 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3324 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
3327 if (cmd_len
!= sizeof(*rsp
))
3330 icid
= le16_to_cpu(rsp
->icid
);
3332 BT_DBG("icid %d", icid
);
3337 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
3342 if (min
> max
|| min
< 6 || max
> 3200)
3345 if (to_multiplier
< 10 || to_multiplier
> 3200)
3348 if (max
>= to_multiplier
* 8)
3351 max_latency
= (to_multiplier
* 8 / max
) - 1;
3352 if (latency
> 499 || latency
> max_latency
)
3358 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
3359 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3361 struct hci_conn
*hcon
= conn
->hcon
;
3362 struct l2cap_conn_param_update_req
*req
;
3363 struct l2cap_conn_param_update_rsp rsp
;
3364 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
3367 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
3370 cmd_len
= __le16_to_cpu(cmd
->len
);
3371 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
3374 req
= (struct l2cap_conn_param_update_req
*) data
;
3375 min
= __le16_to_cpu(req
->min
);
3376 max
= __le16_to_cpu(req
->max
);
3377 latency
= __le16_to_cpu(req
->latency
);
3378 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
3380 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3381 min
, max
, latency
, to_multiplier
);
3383 memset(&rsp
, 0, sizeof(rsp
));
3385 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
3387 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
3389 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
3391 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
3395 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
3400 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
3401 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3405 switch (cmd
->code
) {
3406 case L2CAP_COMMAND_REJ
:
3407 l2cap_command_rej(conn
, cmd
, data
);
3410 case L2CAP_CONN_REQ
:
3411 err
= l2cap_connect_req(conn
, cmd
, data
);
3414 case L2CAP_CONN_RSP
:
3415 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3418 case L2CAP_CONF_REQ
:
3419 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3422 case L2CAP_CONF_RSP
:
3423 err
= l2cap_config_rsp(conn
, cmd
, data
);
3426 case L2CAP_DISCONN_REQ
:
3427 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3430 case L2CAP_DISCONN_RSP
:
3431 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3434 case L2CAP_ECHO_REQ
:
3435 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3438 case L2CAP_ECHO_RSP
:
3441 case L2CAP_INFO_REQ
:
3442 err
= l2cap_information_req(conn
, cmd
, data
);
3445 case L2CAP_INFO_RSP
:
3446 err
= l2cap_information_rsp(conn
, cmd
, data
);
3449 case L2CAP_CREATE_CHAN_REQ
:
3450 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
3453 case L2CAP_CREATE_CHAN_RSP
:
3454 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
3457 case L2CAP_MOVE_CHAN_REQ
:
3458 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
3461 case L2CAP_MOVE_CHAN_RSP
:
3462 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
3465 case L2CAP_MOVE_CHAN_CFM
:
3466 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
3469 case L2CAP_MOVE_CHAN_CFM_RSP
:
3470 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
3474 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3482 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3483 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3485 switch (cmd
->code
) {
3486 case L2CAP_COMMAND_REJ
:
3489 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3490 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3492 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3496 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3501 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3502 struct sk_buff
*skb
)
3504 u8
*data
= skb
->data
;
3506 struct l2cap_cmd_hdr cmd
;
3509 l2cap_raw_recv(conn
, skb
);
3511 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3513 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3514 data
+= L2CAP_CMD_HDR_SIZE
;
3515 len
-= L2CAP_CMD_HDR_SIZE
;
3517 cmd_len
= le16_to_cpu(cmd
.len
);
3519 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3521 if (cmd_len
> len
|| !cmd
.ident
) {
3522 BT_DBG("corrupted command");
3526 if (conn
->hcon
->type
== LE_LINK
)
3527 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3529 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3532 struct l2cap_cmd_rej_unk rej
;
3534 BT_ERR("Wrong link type (%d)", err
);
3536 /* FIXME: Map err to a valid reason */
3537 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3538 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3548 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3550 u16 our_fcs
, rcv_fcs
;
3553 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3554 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3556 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3558 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3559 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
3560 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3561 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3563 if (our_fcs
!= rcv_fcs
)
3569 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3573 chan
->frames_sent
= 0;
3575 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3577 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3578 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3579 l2cap_send_sframe(chan
, control
);
3580 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3583 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3584 l2cap_retransmit_frames(chan
);
3586 l2cap_ertm_send(chan
);
3588 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3589 chan
->frames_sent
== 0) {
3590 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3591 l2cap_send_sframe(chan
, control
);
3595 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
3597 struct sk_buff
*next_skb
;
3598 int tx_seq_offset
, next_tx_seq_offset
;
3600 bt_cb(skb
)->tx_seq
= tx_seq
;
3601 bt_cb(skb
)->sar
= sar
;
3603 next_skb
= skb_peek(&chan
->srej_q
);
3605 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
3608 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3611 next_tx_seq_offset
= __seq_offset(chan
,
3612 bt_cb(next_skb
)->tx_seq
, chan
->buffer_seq
);
3614 if (next_tx_seq_offset
> tx_seq_offset
) {
3615 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3619 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3622 next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
);
3625 __skb_queue_tail(&chan
->srej_q
, skb
);
3630 static void append_skb_frag(struct sk_buff
*skb
,
3631 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
3633 /* skb->len reflects data in skb as well as all fragments
3634 * skb->data_len reflects only data in fragments
3636 if (!skb_has_frag_list(skb
))
3637 skb_shinfo(skb
)->frag_list
= new_frag
;
3639 new_frag
->next
= NULL
;
3641 (*last_frag
)->next
= new_frag
;
3642 *last_frag
= new_frag
;
3644 skb
->len
+= new_frag
->len
;
3645 skb
->data_len
+= new_frag
->len
;
3646 skb
->truesize
+= new_frag
->truesize
;
3649 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u32 control
)
3653 switch (__get_ctrl_sar(chan
, control
)) {
3654 case L2CAP_SAR_UNSEGMENTED
:
3658 err
= chan
->ops
->recv(chan
->data
, skb
);
3661 case L2CAP_SAR_START
:
3665 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3666 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
3668 if (chan
->sdu_len
> chan
->imtu
) {
3673 if (skb
->len
>= chan
->sdu_len
)
3677 chan
->sdu_last_frag
= skb
;
3683 case L2CAP_SAR_CONTINUE
:
3687 append_skb_frag(chan
->sdu
, skb
,
3688 &chan
->sdu_last_frag
);
3691 if (chan
->sdu
->len
>= chan
->sdu_len
)
3701 append_skb_frag(chan
->sdu
, skb
,
3702 &chan
->sdu_last_frag
);
3705 if (chan
->sdu
->len
!= chan
->sdu_len
)
3708 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
3711 /* Reassembly complete */
3713 chan
->sdu_last_frag
= NULL
;
3721 kfree_skb(chan
->sdu
);
3723 chan
->sdu_last_frag
= NULL
;
3730 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
3732 BT_DBG("chan %p, Enter local busy", chan
);
3734 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3736 __set_ack_timer(chan
);
3739 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
3743 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3746 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3747 control
|= __set_ctrl_poll(chan
);
3748 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3749 l2cap_send_sframe(chan
, control
);
3750 chan
->retry_count
= 1;
3752 __clear_retrans_timer(chan
);
3753 __set_monitor_timer(chan
);
3755 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
3758 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3759 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3761 BT_DBG("chan %p, Exit local busy", chan
);
3764 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
3766 if (chan
->mode
== L2CAP_MODE_ERTM
) {
3768 l2cap_ertm_enter_local_busy(chan
);
3770 l2cap_ertm_exit_local_busy(chan
);
3774 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
3776 struct sk_buff
*skb
;
3779 while ((skb
= skb_peek(&chan
->srej_q
)) &&
3780 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3783 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3786 skb
= skb_dequeue(&chan
->srej_q
);
3787 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->sar
);
3788 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
3791 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3795 chan
->buffer_seq_srej
= __next_seq(chan
, chan
->buffer_seq_srej
);
3796 tx_seq
= __next_seq(chan
, tx_seq
);
3800 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3802 struct srej_list
*l
, *tmp
;
3805 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3806 if (l
->tx_seq
== tx_seq
) {
3811 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3812 control
|= __set_reqseq(chan
, l
->tx_seq
);
3813 l2cap_send_sframe(chan
, control
);
3815 list_add_tail(&l
->list
, &chan
->srej_l
);
3819 static int l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3821 struct srej_list
*new;
3824 while (tx_seq
!= chan
->expected_tx_seq
) {
3825 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3826 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
3827 l2cap_send_sframe(chan
, control
);
3829 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3833 new->tx_seq
= chan
->expected_tx_seq
;
3835 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3837 list_add_tail(&new->list
, &chan
->srej_l
);
3840 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3845 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
3847 u16 tx_seq
= __get_txseq(chan
, rx_control
);
3848 u16 req_seq
= __get_reqseq(chan
, rx_control
);
3849 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
3850 int tx_seq_offset
, expected_tx_seq_offset
;
3851 int num_to_ack
= (chan
->tx_win
/6) + 1;
3854 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan
, skb
->len
,
3855 tx_seq
, rx_control
);
3857 if (__is_ctrl_final(chan
, rx_control
) &&
3858 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3859 __clear_monitor_timer(chan
);
3860 if (chan
->unacked_frames
> 0)
3861 __set_retrans_timer(chan
);
3862 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3865 chan
->expected_ack_seq
= req_seq
;
3866 l2cap_drop_acked_frames(chan
);
3868 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
3870 /* invalid tx_seq */
3871 if (tx_seq_offset
>= chan
->tx_win
) {
3872 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3876 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3877 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3878 l2cap_send_ack(chan
);
3882 if (tx_seq
== chan
->expected_tx_seq
)
3885 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3886 struct srej_list
*first
;
3888 first
= list_first_entry(&chan
->srej_l
,
3889 struct srej_list
, list
);
3890 if (tx_seq
== first
->tx_seq
) {
3891 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3892 l2cap_check_srej_gap(chan
, tx_seq
);
3894 list_del(&first
->list
);
3897 if (list_empty(&chan
->srej_l
)) {
3898 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3899 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3900 l2cap_send_ack(chan
);
3901 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3904 struct srej_list
*l
;
3906 /* duplicated tx_seq */
3907 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3910 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3911 if (l
->tx_seq
== tx_seq
) {
3912 l2cap_resend_srejframe(chan
, tx_seq
);
3917 err
= l2cap_send_srejframe(chan
, tx_seq
);
3919 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3924 expected_tx_seq_offset
= __seq_offset(chan
,
3925 chan
->expected_tx_seq
, chan
->buffer_seq
);
3927 /* duplicated tx_seq */
3928 if (tx_seq_offset
< expected_tx_seq_offset
)
3931 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3933 BT_DBG("chan %p, Enter SREJ", chan
);
3935 INIT_LIST_HEAD(&chan
->srej_l
);
3936 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3938 __skb_queue_head_init(&chan
->srej_q
);
3939 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3941 /* Set P-bit only if there are some I-frames to ack. */
3942 if (__clear_ack_timer(chan
))
3943 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
3945 err
= l2cap_send_srejframe(chan
, tx_seq
);
3947 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3954 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3956 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3957 bt_cb(skb
)->tx_seq
= tx_seq
;
3958 bt_cb(skb
)->sar
= sar
;
3959 __skb_queue_tail(&chan
->srej_q
, skb
);
3963 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
3964 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
3967 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3971 if (__is_ctrl_final(chan
, rx_control
)) {
3972 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3973 l2cap_retransmit_frames(chan
);
3977 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3978 if (chan
->num_acked
== num_to_ack
- 1)
3979 l2cap_send_ack(chan
);
3981 __set_ack_timer(chan
);
3990 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u32 rx_control
)
3992 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
,
3993 __get_reqseq(chan
, rx_control
), rx_control
);
3995 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
3996 l2cap_drop_acked_frames(chan
);
3998 if (__is_ctrl_poll(chan
, rx_control
)) {
3999 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4000 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4001 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4002 (chan
->unacked_frames
> 0))
4003 __set_retrans_timer(chan
);
4005 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4006 l2cap_send_srejtail(chan
);
4008 l2cap_send_i_or_rr_or_rnr(chan
);
4011 } else if (__is_ctrl_final(chan
, rx_control
)) {
4012 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4014 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4015 l2cap_retransmit_frames(chan
);
4018 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4019 (chan
->unacked_frames
> 0))
4020 __set_retrans_timer(chan
);
4022 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4023 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
4024 l2cap_send_ack(chan
);
4026 l2cap_ertm_send(chan
);
4030 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4032 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4034 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4036 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4038 chan
->expected_ack_seq
= tx_seq
;
4039 l2cap_drop_acked_frames(chan
);
4041 if (__is_ctrl_final(chan
, rx_control
)) {
4042 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4043 l2cap_retransmit_frames(chan
);
4045 l2cap_retransmit_frames(chan
);
4047 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
4048 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4051 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4053 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4055 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4057 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4059 if (__is_ctrl_poll(chan
, rx_control
)) {
4060 chan
->expected_ack_seq
= tx_seq
;
4061 l2cap_drop_acked_frames(chan
);
4063 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4064 l2cap_retransmit_one_frame(chan
, tx_seq
);
4066 l2cap_ertm_send(chan
);
4068 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4069 chan
->srej_save_reqseq
= tx_seq
;
4070 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4072 } else if (__is_ctrl_final(chan
, rx_control
)) {
4073 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
4074 chan
->srej_save_reqseq
== tx_seq
)
4075 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4077 l2cap_retransmit_one_frame(chan
, tx_seq
);
4079 l2cap_retransmit_one_frame(chan
, tx_seq
);
4080 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4081 chan
->srej_save_reqseq
= tx_seq
;
4082 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4087 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4089 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4091 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4093 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4094 chan
->expected_ack_seq
= tx_seq
;
4095 l2cap_drop_acked_frames(chan
);
4097 if (__is_ctrl_poll(chan
, rx_control
))
4098 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4100 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4101 __clear_retrans_timer(chan
);
4102 if (__is_ctrl_poll(chan
, rx_control
))
4103 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
4107 if (__is_ctrl_poll(chan
, rx_control
)) {
4108 l2cap_send_srejtail(chan
);
4110 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4111 l2cap_send_sframe(chan
, rx_control
);
4115 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4117 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan
, rx_control
, skb
->len
);
4119 if (__is_ctrl_final(chan
, rx_control
) &&
4120 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4121 __clear_monitor_timer(chan
);
4122 if (chan
->unacked_frames
> 0)
4123 __set_retrans_timer(chan
);
4124 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4127 switch (__get_ctrl_super(chan
, rx_control
)) {
4128 case L2CAP_SUPER_RR
:
4129 l2cap_data_channel_rrframe(chan
, rx_control
);
4132 case L2CAP_SUPER_REJ
:
4133 l2cap_data_channel_rejframe(chan
, rx_control
);
4136 case L2CAP_SUPER_SREJ
:
4137 l2cap_data_channel_srejframe(chan
, rx_control
);
4140 case L2CAP_SUPER_RNR
:
4141 l2cap_data_channel_rnrframe(chan
, rx_control
);
4149 static int l2cap_ertm_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4153 int len
, next_tx_seq_offset
, req_seq_offset
;
4155 control
= __get_control(chan
, skb
->data
);
4156 skb_pull(skb
, __ctrl_size(chan
));
4160 * We can just drop the corrupted I-frame here.
4161 * Receiver will miss it and start proper recovery
4162 * procedures and ask retransmission.
4164 if (l2cap_check_fcs(chan
, skb
))
4167 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
4168 len
-= L2CAP_SDULEN_SIZE
;
4170 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4171 len
-= L2CAP_FCS_SIZE
;
4173 if (len
> chan
->mps
) {
4174 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4178 req_seq
= __get_reqseq(chan
, control
);
4180 req_seq_offset
= __seq_offset(chan
, req_seq
, chan
->expected_ack_seq
);
4182 next_tx_seq_offset
= __seq_offset(chan
, chan
->next_tx_seq
,
4183 chan
->expected_ack_seq
);
4185 /* check for invalid req-seq */
4186 if (req_seq_offset
> next_tx_seq_offset
) {
4187 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4191 if (!__is_sframe(chan
, control
)) {
4193 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4197 l2cap_data_channel_iframe(chan
, control
, skb
);
4201 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4205 l2cap_data_channel_sframe(chan
, control
, skb
);
4215 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4217 struct l2cap_chan
*chan
;
4218 struct sock
*sk
= NULL
;
4223 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4225 BT_DBG("unknown cid 0x%4.4x", cid
);
4231 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4233 if (chan
->state
!= BT_CONNECTED
)
4236 switch (chan
->mode
) {
4237 case L2CAP_MODE_BASIC
:
4238 /* If socket recv buffers overflows we drop data here
4239 * which is *bad* because L2CAP has to be reliable.
4240 * But we don't have any other choice. L2CAP doesn't
4241 * provide flow control mechanism. */
4243 if (chan
->imtu
< skb
->len
)
4246 if (!chan
->ops
->recv(chan
->data
, skb
))
4250 case L2CAP_MODE_ERTM
:
4251 l2cap_ertm_data_rcv(chan
, skb
);
4255 case L2CAP_MODE_STREAMING
:
4256 control
= __get_control(chan
, skb
->data
);
4257 skb_pull(skb
, __ctrl_size(chan
));
4260 if (l2cap_check_fcs(chan
, skb
))
4263 if (__is_sar_start(chan
, control
))
4264 len
-= L2CAP_SDULEN_SIZE
;
4266 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4267 len
-= L2CAP_FCS_SIZE
;
4269 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
4272 tx_seq
= __get_txseq(chan
, control
);
4274 if (chan
->expected_tx_seq
!= tx_seq
) {
4275 /* Frame(s) missing - must discard partial SDU */
4276 kfree_skb(chan
->sdu
);
4278 chan
->sdu_last_frag
= NULL
;
4281 /* TODO: Notify userland of missing data */
4284 chan
->expected_tx_seq
= __next_seq(chan
, tx_seq
);
4286 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
4287 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4292 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
4306 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4308 struct sock
*sk
= NULL
;
4309 struct l2cap_chan
*chan
;
4311 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
4319 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4321 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4324 if (chan
->imtu
< skb
->len
)
4327 if (!chan
->ops
->recv(chan
->data
, skb
))
4339 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
4341 struct sock
*sk
= NULL
;
4342 struct l2cap_chan
*chan
;
4344 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
4352 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4354 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4357 if (chan
->imtu
< skb
->len
)
4360 if (!chan
->ops
->recv(chan
->data
, skb
))
4372 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4374 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4378 skb_pull(skb
, L2CAP_HDR_SIZE
);
4379 cid
= __le16_to_cpu(lh
->cid
);
4380 len
= __le16_to_cpu(lh
->len
);
4382 if (len
!= skb
->len
) {
4387 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4390 case L2CAP_CID_LE_SIGNALING
:
4391 case L2CAP_CID_SIGNALING
:
4392 l2cap_sig_channel(conn
, skb
);
4395 case L2CAP_CID_CONN_LESS
:
4396 psm
= get_unaligned_le16(skb
->data
);
4398 l2cap_conless_channel(conn
, psm
, skb
);
4401 case L2CAP_CID_LE_DATA
:
4402 l2cap_att_channel(conn
, cid
, skb
);
4406 if (smp_sig_channel(conn
, skb
))
4407 l2cap_conn_del(conn
->hcon
, EACCES
);
4411 l2cap_data_channel(conn
, cid
, skb
);
4416 /* ---- L2CAP interface with lower layer (HCI) ---- */
4418 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
4420 int exact
= 0, lm1
= 0, lm2
= 0;
4421 struct l2cap_chan
*c
;
4423 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4425 /* Find listening sockets and check their link_mode */
4426 read_lock(&chan_list_lock
);
4427 list_for_each_entry(c
, &chan_list
, global_l
) {
4428 struct sock
*sk
= c
->sk
;
4430 if (c
->state
!= BT_LISTEN
)
4433 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4434 lm1
|= HCI_LM_ACCEPT
;
4435 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4436 lm1
|= HCI_LM_MASTER
;
4438 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4439 lm2
|= HCI_LM_ACCEPT
;
4440 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4441 lm2
|= HCI_LM_MASTER
;
4444 read_unlock(&chan_list_lock
);
4446 return exact
? lm1
: lm2
;
4449 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4451 struct l2cap_conn
*conn
;
4453 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4456 conn
= l2cap_conn_add(hcon
, status
);
4458 l2cap_conn_ready(conn
);
4460 l2cap_conn_del(hcon
, bt_to_errno(status
));
4465 int l2cap_disconn_ind(struct hci_conn
*hcon
)
4467 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4469 BT_DBG("hcon %p", hcon
);
4472 return HCI_ERROR_REMOTE_USER_TERM
;
4473 return conn
->disc_reason
;
4476 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4478 BT_DBG("hcon %p reason %d", hcon
, reason
);
4480 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4484 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4486 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4489 if (encrypt
== 0x00) {
4490 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4491 __clear_chan_timer(chan
);
4492 __set_chan_timer(chan
,
4493 msecs_to_jiffies(L2CAP_ENC_TIMEOUT
));
4494 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4495 l2cap_chan_close(chan
, ECONNREFUSED
);
4497 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4498 __clear_chan_timer(chan
);
4502 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4504 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4505 struct l2cap_chan
*chan
;
4510 BT_DBG("conn %p", conn
);
4512 if (hcon
->type
== LE_LINK
) {
4513 smp_distribute_keys(conn
, 0);
4514 cancel_delayed_work(&conn
->security_timer
);
4519 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
4520 struct sock
*sk
= chan
->sk
;
4524 BT_DBG("chan->scid %d", chan
->scid
);
4526 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4527 if (!status
&& encrypt
) {
4528 chan
->sec_level
= hcon
->sec_level
;
4529 l2cap_chan_ready(chan
);
4536 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4541 if (!status
&& (chan
->state
== BT_CONNECTED
||
4542 chan
->state
== BT_CONFIG
)) {
4543 l2cap_check_encryption(chan
, encrypt
);
4548 if (chan
->state
== BT_CONNECT
) {
4550 struct l2cap_conn_req req
;
4551 req
.scid
= cpu_to_le16(chan
->scid
);
4552 req
.psm
= chan
->psm
;
4554 chan
->ident
= l2cap_get_ident(conn
);
4555 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4557 l2cap_send_cmd(conn
, chan
->ident
,
4558 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4560 __clear_chan_timer(chan
);
4561 __set_chan_timer(chan
,
4562 msecs_to_jiffies(L2CAP_DISC_TIMEOUT
));
4564 } else if (chan
->state
== BT_CONNECT2
) {
4565 struct l2cap_conn_rsp rsp
;
4569 if (bt_sk(sk
)->defer_setup
) {
4570 struct sock
*parent
= bt_sk(sk
)->parent
;
4571 res
= L2CAP_CR_PEND
;
4572 stat
= L2CAP_CS_AUTHOR_PEND
;
4574 parent
->sk_data_ready(parent
, 0);
4576 l2cap_state_change(chan
, BT_CONFIG
);
4577 res
= L2CAP_CR_SUCCESS
;
4578 stat
= L2CAP_CS_NO_INFO
;
4581 l2cap_state_change(chan
, BT_DISCONN
);
4582 __set_chan_timer(chan
,
4583 msecs_to_jiffies(L2CAP_DISC_TIMEOUT
));
4584 res
= L2CAP_CR_SEC_BLOCK
;
4585 stat
= L2CAP_CS_NO_INFO
;
4588 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4589 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4590 rsp
.result
= cpu_to_le16(res
);
4591 rsp
.status
= cpu_to_le16(stat
);
4592 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4604 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4606 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4609 conn
= l2cap_conn_add(hcon
, 0);
4614 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4616 if (!(flags
& ACL_CONT
)) {
4617 struct l2cap_hdr
*hdr
;
4618 struct l2cap_chan
*chan
;
4623 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4624 kfree_skb(conn
->rx_skb
);
4625 conn
->rx_skb
= NULL
;
4627 l2cap_conn_unreliable(conn
, ECOMM
);
4630 /* Start fragment always begin with Basic L2CAP header */
4631 if (skb
->len
< L2CAP_HDR_SIZE
) {
4632 BT_ERR("Frame is too short (len %d)", skb
->len
);
4633 l2cap_conn_unreliable(conn
, ECOMM
);
4637 hdr
= (struct l2cap_hdr
*) skb
->data
;
4638 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4639 cid
= __le16_to_cpu(hdr
->cid
);
4641 if (len
== skb
->len
) {
4642 /* Complete frame received */
4643 l2cap_recv_frame(conn
, skb
);
4647 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4649 if (skb
->len
> len
) {
4650 BT_ERR("Frame is too long (len %d, expected len %d)",
4652 l2cap_conn_unreliable(conn
, ECOMM
);
4656 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4658 if (chan
&& chan
->sk
) {
4659 struct sock
*sk
= chan
->sk
;
4661 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4662 BT_ERR("Frame exceeding recv MTU (len %d, "
4666 l2cap_conn_unreliable(conn
, ECOMM
);
4672 /* Allocate skb for the complete frame (with header) */
4673 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4677 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4679 conn
->rx_len
= len
- skb
->len
;
4681 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4683 if (!conn
->rx_len
) {
4684 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4685 l2cap_conn_unreliable(conn
, ECOMM
);
4689 if (skb
->len
> conn
->rx_len
) {
4690 BT_ERR("Fragment is too long (len %d, expected %d)",
4691 skb
->len
, conn
->rx_len
);
4692 kfree_skb(conn
->rx_skb
);
4693 conn
->rx_skb
= NULL
;
4695 l2cap_conn_unreliable(conn
, ECOMM
);
4699 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4701 conn
->rx_len
-= skb
->len
;
4703 if (!conn
->rx_len
) {
4704 /* Complete frame received */
4705 l2cap_recv_frame(conn
, conn
->rx_skb
);
4706 conn
->rx_skb
= NULL
;
4715 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4717 struct l2cap_chan
*c
;
4719 read_lock(&chan_list_lock
);
4721 list_for_each_entry(c
, &chan_list
, global_l
) {
4722 struct sock
*sk
= c
->sk
;
4724 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4725 batostr(&bt_sk(sk
)->src
),
4726 batostr(&bt_sk(sk
)->dst
),
4727 c
->state
, __le16_to_cpu(c
->psm
),
4728 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4729 c
->sec_level
, c
->mode
);
4732 read_unlock(&chan_list_lock
);
4737 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4739 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4742 static const struct file_operations l2cap_debugfs_fops
= {
4743 .open
= l2cap_debugfs_open
,
4745 .llseek
= seq_lseek
,
4746 .release
= single_release
,
4749 static struct dentry
*l2cap_debugfs
;
4751 int __init
l2cap_init(void)
4755 err
= l2cap_init_sockets();
4760 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4761 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4763 BT_ERR("Failed to create L2CAP debug file");
4769 void l2cap_exit(void)
4771 debugfs_remove(l2cap_debugfs
);
4772 l2cap_cleanup_sockets();
4775 module_param(disable_ertm
, bool, 0644);
4776 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");