2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
80 struct l2cap_chan
*c
, *r
= NULL
;
84 list_for_each_entry_rcu(c
, &conn
->chan_l
, list
) {
95 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
97 struct l2cap_chan
*c
, *r
= NULL
;
101 list_for_each_entry_rcu(c
, &conn
->chan_l
, list
) {
102 if (c
->scid
== cid
) {
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
116 struct l2cap_chan
*c
;
118 c
= __l2cap_get_chan_by_scid(conn
, cid
);
124 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
126 struct l2cap_chan
*c
, *r
= NULL
;
130 list_for_each_entry_rcu(c
, &conn
->chan_l
, list
) {
131 if (c
->ident
== ident
) {
141 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
143 struct l2cap_chan
*c
;
145 c
= __l2cap_get_chan_by_ident(conn
, ident
);
151 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
153 struct l2cap_chan
*c
;
155 list_for_each_entry(c
, &chan_list
, global_l
) {
156 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
162 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
166 write_lock(&chan_list_lock
);
168 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
181 for (p
= 0x1001; p
< 0x1100; p
+= 2)
182 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
183 chan
->psm
= cpu_to_le16(p
);
184 chan
->sport
= cpu_to_le16(p
);
191 write_unlock(&chan_list_lock
);
195 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
197 write_lock(&chan_list_lock
);
201 write_unlock(&chan_list_lock
);
206 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
208 u16 cid
= L2CAP_CID_DYN_START
;
210 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
211 if (!__l2cap_get_chan_by_scid(conn
, cid
))
218 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
220 BT_DBG("%p %s -> %s", chan
, state_to_string(chan
->state
),
221 state_to_string(state
));
224 chan
->ops
->state_change(chan
->data
, state
);
227 static void l2cap_chan_timeout(struct work_struct
*work
)
229 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
231 struct sock
*sk
= chan
->sk
;
234 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
238 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
239 reason
= ECONNREFUSED
;
240 else if (chan
->state
== BT_CONNECT
&&
241 chan
->sec_level
!= BT_SECURITY_SDP
)
242 reason
= ECONNREFUSED
;
246 l2cap_chan_close(chan
, reason
);
250 chan
->ops
->close(chan
->data
);
251 l2cap_chan_put(chan
);
254 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
256 struct l2cap_chan
*chan
;
258 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
264 write_lock(&chan_list_lock
);
265 list_add(&chan
->global_l
, &chan_list
);
266 write_unlock(&chan_list_lock
);
268 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
270 chan
->state
= BT_OPEN
;
272 atomic_set(&chan
->refcnt
, 1);
274 BT_DBG("sk %p chan %p", sk
, chan
);
279 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
281 write_lock(&chan_list_lock
);
282 list_del(&chan
->global_l
);
283 write_unlock(&chan_list_lock
);
285 l2cap_chan_put(chan
);
288 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
290 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
291 chan
->psm
, chan
->dcid
);
293 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
297 switch (chan
->chan_type
) {
298 case L2CAP_CHAN_CONN_ORIENTED
:
299 if (conn
->hcon
->type
== LE_LINK
) {
301 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
302 chan
->scid
= L2CAP_CID_LE_DATA
;
303 chan
->dcid
= L2CAP_CID_LE_DATA
;
305 /* Alloc CID for connection-oriented socket */
306 chan
->scid
= l2cap_alloc_cid(conn
);
307 chan
->omtu
= L2CAP_DEFAULT_MTU
;
311 case L2CAP_CHAN_CONN_LESS
:
312 /* Connectionless socket */
313 chan
->scid
= L2CAP_CID_CONN_LESS
;
314 chan
->dcid
= L2CAP_CID_CONN_LESS
;
315 chan
->omtu
= L2CAP_DEFAULT_MTU
;
319 /* Raw socket can send/recv signalling messages only */
320 chan
->scid
= L2CAP_CID_SIGNALING
;
321 chan
->dcid
= L2CAP_CID_SIGNALING
;
322 chan
->omtu
= L2CAP_DEFAULT_MTU
;
325 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
326 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
327 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
328 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
329 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
330 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
332 l2cap_chan_hold(chan
);
334 list_add_rcu(&chan
->list
, &conn
->chan_l
);
338 * Must be called on the locked socket. */
339 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
341 struct sock
*sk
= chan
->sk
;
342 struct l2cap_conn
*conn
= chan
->conn
;
343 struct sock
*parent
= bt_sk(sk
)->parent
;
345 __clear_chan_timer(chan
);
347 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
350 /* Delete from channel list */
351 list_del_rcu(&chan
->list
);
354 l2cap_chan_put(chan
);
357 hci_conn_put(conn
->hcon
);
360 l2cap_state_change(chan
, BT_CLOSED
);
361 sock_set_flag(sk
, SOCK_ZAPPED
);
367 bt_accept_unlink(sk
);
368 parent
->sk_data_ready(parent
, 0);
370 sk
->sk_state_change(sk
);
372 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
373 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
376 skb_queue_purge(&chan
->tx_q
);
378 if (chan
->mode
== L2CAP_MODE_ERTM
) {
379 struct srej_list
*l
, *tmp
;
381 __clear_retrans_timer(chan
);
382 __clear_monitor_timer(chan
);
383 __clear_ack_timer(chan
);
385 skb_queue_purge(&chan
->srej_q
);
387 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
394 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
398 BT_DBG("parent %p", parent
);
400 /* Close not yet accepted channels */
401 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
402 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
403 __clear_chan_timer(chan
);
405 l2cap_chan_close(chan
, ECONNRESET
);
407 chan
->ops
->close(chan
->data
);
411 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
413 struct l2cap_conn
*conn
= chan
->conn
;
414 struct sock
*sk
= chan
->sk
;
416 BT_DBG("chan %p state %s sk %p", chan
,
417 state_to_string(chan
->state
), sk
);
419 switch (chan
->state
) {
421 l2cap_chan_cleanup_listen(sk
);
423 l2cap_state_change(chan
, BT_CLOSED
);
424 sock_set_flag(sk
, SOCK_ZAPPED
);
429 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
430 conn
->hcon
->type
== ACL_LINK
) {
431 __clear_chan_timer(chan
);
432 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
433 l2cap_send_disconn_req(conn
, chan
, reason
);
435 l2cap_chan_del(chan
, reason
);
439 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
440 conn
->hcon
->type
== ACL_LINK
) {
441 struct l2cap_conn_rsp rsp
;
444 if (bt_sk(sk
)->defer_setup
)
445 result
= L2CAP_CR_SEC_BLOCK
;
447 result
= L2CAP_CR_BAD_PSM
;
448 l2cap_state_change(chan
, BT_DISCONN
);
450 rsp
.scid
= cpu_to_le16(chan
->dcid
);
451 rsp
.dcid
= cpu_to_le16(chan
->scid
);
452 rsp
.result
= cpu_to_le16(result
);
453 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
454 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
458 l2cap_chan_del(chan
, reason
);
463 l2cap_chan_del(chan
, reason
);
467 sock_set_flag(sk
, SOCK_ZAPPED
);
472 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
474 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
475 switch (chan
->sec_level
) {
476 case BT_SECURITY_HIGH
:
477 return HCI_AT_DEDICATED_BONDING_MITM
;
478 case BT_SECURITY_MEDIUM
:
479 return HCI_AT_DEDICATED_BONDING
;
481 return HCI_AT_NO_BONDING
;
483 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
484 if (chan
->sec_level
== BT_SECURITY_LOW
)
485 chan
->sec_level
= BT_SECURITY_SDP
;
487 if (chan
->sec_level
== BT_SECURITY_HIGH
)
488 return HCI_AT_NO_BONDING_MITM
;
490 return HCI_AT_NO_BONDING
;
492 switch (chan
->sec_level
) {
493 case BT_SECURITY_HIGH
:
494 return HCI_AT_GENERAL_BONDING_MITM
;
495 case BT_SECURITY_MEDIUM
:
496 return HCI_AT_GENERAL_BONDING
;
498 return HCI_AT_NO_BONDING
;
503 /* Service level security */
504 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
506 struct l2cap_conn
*conn
= chan
->conn
;
509 auth_type
= l2cap_get_auth_type(chan
);
511 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
514 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
518 /* Get next available identificator.
519 * 1 - 128 are used by kernel.
520 * 129 - 199 are reserved.
521 * 200 - 254 are used by utilities like l2ping, etc.
524 spin_lock(&conn
->lock
);
526 if (++conn
->tx_ident
> 128)
531 spin_unlock(&conn
->lock
);
536 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
538 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
541 BT_DBG("code 0x%2.2x", code
);
546 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
547 flags
= ACL_START_NO_FLUSH
;
551 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
552 skb
->priority
= HCI_PRIO_MAX
;
554 hci_send_acl(conn
->hchan
, skb
, flags
);
557 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
559 struct hci_conn
*hcon
= chan
->conn
->hcon
;
562 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
565 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
566 lmp_no_flush_capable(hcon
->hdev
))
567 flags
= ACL_START_NO_FLUSH
;
571 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
572 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
575 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u32 control
)
578 struct l2cap_hdr
*lh
;
579 struct l2cap_conn
*conn
= chan
->conn
;
582 if (chan
->state
!= BT_CONNECTED
)
585 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
586 hlen
= L2CAP_EXT_HDR_SIZE
;
588 hlen
= L2CAP_ENH_HDR_SIZE
;
590 if (chan
->fcs
== L2CAP_FCS_CRC16
)
591 hlen
+= L2CAP_FCS_SIZE
;
593 BT_DBG("chan %p, control 0x%8.8x", chan
, control
);
595 count
= min_t(unsigned int, conn
->mtu
, hlen
);
597 control
|= __set_sframe(chan
);
599 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
600 control
|= __set_ctrl_final(chan
);
602 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
603 control
|= __set_ctrl_poll(chan
);
605 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
609 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
610 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
611 lh
->cid
= cpu_to_le16(chan
->dcid
);
613 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
615 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
616 u16 fcs
= crc16(0, (u8
*)lh
, count
- L2CAP_FCS_SIZE
);
617 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
620 skb
->priority
= HCI_PRIO_MAX
;
621 l2cap_do_send(chan
, skb
);
624 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u32 control
)
626 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
627 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
628 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
630 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
632 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
634 l2cap_send_sframe(chan
, control
);
637 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
639 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
642 static void l2cap_do_start(struct l2cap_chan
*chan
)
644 struct l2cap_conn
*conn
= chan
->conn
;
646 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
647 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
650 if (l2cap_chan_check_security(chan
) &&
651 __l2cap_no_conn_pending(chan
)) {
652 struct l2cap_conn_req req
;
653 req
.scid
= cpu_to_le16(chan
->scid
);
656 chan
->ident
= l2cap_get_ident(conn
);
657 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
659 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
663 struct l2cap_info_req req
;
664 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
666 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
667 conn
->info_ident
= l2cap_get_ident(conn
);
669 schedule_delayed_work(&conn
->info_timer
,
670 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
672 l2cap_send_cmd(conn
, conn
->info_ident
,
673 L2CAP_INFO_REQ
, sizeof(req
), &req
);
677 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
679 u32 local_feat_mask
= l2cap_feat_mask
;
681 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
684 case L2CAP_MODE_ERTM
:
685 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
686 case L2CAP_MODE_STREAMING
:
687 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
693 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
696 struct l2cap_disconn_req req
;
703 if (chan
->mode
== L2CAP_MODE_ERTM
) {
704 __clear_retrans_timer(chan
);
705 __clear_monitor_timer(chan
);
706 __clear_ack_timer(chan
);
709 req
.dcid
= cpu_to_le16(chan
->dcid
);
710 req
.scid
= cpu_to_le16(chan
->scid
);
711 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
712 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
714 l2cap_state_change(chan
, BT_DISCONN
);
718 /* ---- L2CAP connections ---- */
719 static void l2cap_conn_start(struct l2cap_conn
*conn
)
721 struct l2cap_chan
*chan
;
723 BT_DBG("conn %p", conn
);
727 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
728 struct sock
*sk
= chan
->sk
;
732 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
737 if (chan
->state
== BT_CONNECT
) {
738 struct l2cap_conn_req req
;
740 if (!l2cap_chan_check_security(chan
) ||
741 !__l2cap_no_conn_pending(chan
)) {
746 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
747 && test_bit(CONF_STATE2_DEVICE
,
748 &chan
->conf_state
)) {
749 /* l2cap_chan_close() calls list_del(chan)
750 * so release the lock */
751 l2cap_chan_close(chan
, ECONNRESET
);
756 req
.scid
= cpu_to_le16(chan
->scid
);
759 chan
->ident
= l2cap_get_ident(conn
);
760 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
762 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
765 } else if (chan
->state
== BT_CONNECT2
) {
766 struct l2cap_conn_rsp rsp
;
768 rsp
.scid
= cpu_to_le16(chan
->dcid
);
769 rsp
.dcid
= cpu_to_le16(chan
->scid
);
771 if (l2cap_chan_check_security(chan
)) {
772 if (bt_sk(sk
)->defer_setup
) {
773 struct sock
*parent
= bt_sk(sk
)->parent
;
774 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
775 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
777 parent
->sk_data_ready(parent
, 0);
780 l2cap_state_change(chan
, BT_CONFIG
);
781 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
782 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
785 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
786 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
789 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
792 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
793 rsp
.result
!= L2CAP_CR_SUCCESS
) {
798 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
799 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
800 l2cap_build_conf_req(chan
, buf
), buf
);
801 chan
->num_conf_req
++;
810 /* Find socket with cid and source bdaddr.
811 * Returns closest match, locked.
813 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
815 struct l2cap_chan
*c
, *c1
= NULL
;
817 read_lock(&chan_list_lock
);
819 list_for_each_entry(c
, &chan_list
, global_l
) {
820 struct sock
*sk
= c
->sk
;
822 if (state
&& c
->state
!= state
)
825 if (c
->scid
== cid
) {
827 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
828 read_unlock(&chan_list_lock
);
833 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
838 read_unlock(&chan_list_lock
);
843 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
845 struct sock
*parent
, *sk
;
846 struct l2cap_chan
*chan
, *pchan
;
850 /* Check if we have socket listening on cid */
851 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
860 /* Check for backlog size */
861 if (sk_acceptq_is_full(parent
)) {
862 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
866 chan
= pchan
->ops
->new_connection(pchan
->data
);
872 hci_conn_hold(conn
->hcon
);
874 bacpy(&bt_sk(sk
)->src
, conn
->src
);
875 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
877 bt_accept_enqueue(parent
, sk
);
879 l2cap_chan_add(conn
, chan
);
881 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
883 l2cap_state_change(chan
, BT_CONNECTED
);
884 parent
->sk_data_ready(parent
, 0);
887 release_sock(parent
);
890 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
892 struct sock
*sk
= chan
->sk
;
893 struct sock
*parent
= bt_sk(sk
)->parent
;
895 BT_DBG("sk %p, parent %p", sk
, parent
);
897 chan
->conf_state
= 0;
898 __clear_chan_timer(chan
);
900 l2cap_state_change(chan
, BT_CONNECTED
);
901 sk
->sk_state_change(sk
);
904 parent
->sk_data_ready(parent
, 0);
907 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
909 struct l2cap_chan
*chan
;
911 BT_DBG("conn %p", conn
);
913 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
914 l2cap_le_conn_ready(conn
);
916 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
917 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
921 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
922 struct sock
*sk
= chan
->sk
;
926 if (conn
->hcon
->type
== LE_LINK
) {
927 if (smp_conn_security(conn
, chan
->sec_level
))
928 l2cap_chan_ready(chan
);
930 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
931 __clear_chan_timer(chan
);
932 l2cap_state_change(chan
, BT_CONNECTED
);
933 sk
->sk_state_change(sk
);
935 } else if (chan
->state
== BT_CONNECT
)
936 l2cap_do_start(chan
);
944 /* Notify sockets that we cannot guaranty reliability anymore */
945 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
947 struct l2cap_chan
*chan
;
949 BT_DBG("conn %p", conn
);
953 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
954 struct sock
*sk
= chan
->sk
;
956 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
963 static void l2cap_info_timeout(struct work_struct
*work
)
965 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
968 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
969 conn
->info_ident
= 0;
971 l2cap_conn_start(conn
);
974 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
976 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
977 struct l2cap_chan
*chan
, *l
;
983 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
985 kfree_skb(conn
->rx_skb
);
988 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
991 l2cap_chan_del(chan
, err
);
993 chan
->ops
->close(chan
->data
);
996 hci_chan_del(conn
->hchan
);
998 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
999 cancel_delayed_work_sync(&conn
->info_timer
);
1001 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1002 cancel_delayed_work_sync(&conn
->security_timer
);
1003 smp_chan_destroy(conn
);
1006 hcon
->l2cap_data
= NULL
;
1010 static void security_timeout(struct work_struct
*work
)
1012 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1013 security_timer
.work
);
1015 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1018 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1020 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1021 struct hci_chan
*hchan
;
1026 hchan
= hci_chan_create(hcon
);
1030 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1032 hci_chan_del(hchan
);
1036 hcon
->l2cap_data
= conn
;
1038 conn
->hchan
= hchan
;
1040 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1042 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1043 conn
->mtu
= hcon
->hdev
->le_mtu
;
1045 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1047 conn
->src
= &hcon
->hdev
->bdaddr
;
1048 conn
->dst
= &hcon
->dst
;
1050 conn
->feat_mask
= 0;
1052 spin_lock_init(&conn
->lock
);
1054 INIT_LIST_HEAD(&conn
->chan_l
);
1056 if (hcon
->type
== LE_LINK
)
1057 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1059 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1061 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1066 /* ---- Socket interface ---- */
1068 /* Find socket with psm and source bdaddr.
1069 * Returns closest match.
1071 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1073 struct l2cap_chan
*c
, *c1
= NULL
;
1075 read_lock(&chan_list_lock
);
1077 list_for_each_entry(c
, &chan_list
, global_l
) {
1078 struct sock
*sk
= c
->sk
;
1080 if (state
&& c
->state
!= state
)
1083 if (c
->psm
== psm
) {
1085 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1086 read_unlock(&chan_list_lock
);
1091 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1096 read_unlock(&chan_list_lock
);
1101 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
, bdaddr_t
*dst
)
1103 struct sock
*sk
= chan
->sk
;
1104 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1105 struct l2cap_conn
*conn
;
1106 struct hci_conn
*hcon
;
1107 struct hci_dev
*hdev
;
1111 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1114 hdev
= hci_get_route(dst
, src
);
1116 return -EHOSTUNREACH
;
1122 /* PSM must be odd and lsb of upper byte must be 0 */
1123 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1124 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1129 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1134 switch (chan
->mode
) {
1135 case L2CAP_MODE_BASIC
:
1137 case L2CAP_MODE_ERTM
:
1138 case L2CAP_MODE_STREAMING
:
1147 switch (sk
->sk_state
) {
1151 /* Already connecting */
1156 /* Already connected */
1170 /* Set destination address and psm */
1171 bacpy(&bt_sk(sk
)->dst
, dst
);
1175 auth_type
= l2cap_get_auth_type(chan
);
1177 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1178 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1179 chan
->sec_level
, auth_type
);
1181 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1182 chan
->sec_level
, auth_type
);
1185 err
= PTR_ERR(hcon
);
1189 conn
= l2cap_conn_add(hcon
, 0);
1196 /* Update source addr of the socket */
1197 bacpy(src
, conn
->src
);
1199 l2cap_chan_add(conn
, chan
);
1201 l2cap_state_change(chan
, BT_CONNECT
);
1202 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1204 if (hcon
->state
== BT_CONNECTED
) {
1205 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1206 __clear_chan_timer(chan
);
1207 if (l2cap_chan_check_security(chan
))
1208 l2cap_state_change(chan
, BT_CONNECTED
);
1210 l2cap_do_start(chan
);
1216 hci_dev_unlock(hdev
);
1221 int __l2cap_wait_ack(struct sock
*sk
)
1223 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1224 DECLARE_WAITQUEUE(wait
, current
);
1228 add_wait_queue(sk_sleep(sk
), &wait
);
1229 set_current_state(TASK_INTERRUPTIBLE
);
1230 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1234 if (signal_pending(current
)) {
1235 err
= sock_intr_errno(timeo
);
1240 timeo
= schedule_timeout(timeo
);
1242 set_current_state(TASK_INTERRUPTIBLE
);
1244 err
= sock_error(sk
);
1248 set_current_state(TASK_RUNNING
);
1249 remove_wait_queue(sk_sleep(sk
), &wait
);
1253 static void l2cap_monitor_timeout(struct work_struct
*work
)
1255 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1256 monitor_timer
.work
);
1257 struct sock
*sk
= chan
->sk
;
1259 BT_DBG("chan %p", chan
);
1262 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1263 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1268 chan
->retry_count
++;
1269 __set_monitor_timer(chan
);
1271 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1275 static void l2cap_retrans_timeout(struct work_struct
*work
)
1277 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1278 retrans_timer
.work
);
1279 struct sock
*sk
= chan
->sk
;
1281 BT_DBG("chan %p", chan
);
1284 chan
->retry_count
= 1;
1285 __set_monitor_timer(chan
);
1287 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1289 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1293 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1295 struct sk_buff
*skb
;
1297 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1298 chan
->unacked_frames
) {
1299 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1302 skb
= skb_dequeue(&chan
->tx_q
);
1305 chan
->unacked_frames
--;
1308 if (!chan
->unacked_frames
)
1309 __clear_retrans_timer(chan
);
1312 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1314 struct sk_buff
*skb
;
1318 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1319 control
= __get_control(chan
, skb
->data
+ L2CAP_HDR_SIZE
);
1320 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1321 __put_control(chan
, control
, skb
->data
+ L2CAP_HDR_SIZE
);
1323 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1324 fcs
= crc16(0, (u8
*)skb
->data
,
1325 skb
->len
- L2CAP_FCS_SIZE
);
1326 put_unaligned_le16(fcs
,
1327 skb
->data
+ skb
->len
- L2CAP_FCS_SIZE
);
1330 l2cap_do_send(chan
, skb
);
1332 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1336 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1338 struct sk_buff
*skb
, *tx_skb
;
1342 skb
= skb_peek(&chan
->tx_q
);
1346 while (bt_cb(skb
)->tx_seq
!= tx_seq
) {
1347 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1350 skb
= skb_queue_next(&chan
->tx_q
, skb
);
1353 if (chan
->remote_max_tx
&&
1354 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1355 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1359 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1360 bt_cb(skb
)->retries
++;
1362 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1363 control
&= __get_sar_mask(chan
);
1365 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1366 control
|= __set_ctrl_final(chan
);
1368 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1369 control
|= __set_txseq(chan
, tx_seq
);
1371 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1373 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1374 fcs
= crc16(0, (u8
*)tx_skb
->data
,
1375 tx_skb
->len
- L2CAP_FCS_SIZE
);
1376 put_unaligned_le16(fcs
,
1377 tx_skb
->data
+ tx_skb
->len
- L2CAP_FCS_SIZE
);
1380 l2cap_do_send(chan
, tx_skb
);
1383 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1385 struct sk_buff
*skb
, *tx_skb
;
1390 if (chan
->state
!= BT_CONNECTED
)
1393 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1395 if (chan
->remote_max_tx
&&
1396 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1397 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1401 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1403 bt_cb(skb
)->retries
++;
1405 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1406 control
&= __get_sar_mask(chan
);
1408 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1409 control
|= __set_ctrl_final(chan
);
1411 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1412 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1414 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1416 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1417 fcs
= crc16(0, (u8
*)skb
->data
,
1418 tx_skb
->len
- L2CAP_FCS_SIZE
);
1419 put_unaligned_le16(fcs
, skb
->data
+
1420 tx_skb
->len
- L2CAP_FCS_SIZE
);
1423 l2cap_do_send(chan
, tx_skb
);
1425 __set_retrans_timer(chan
);
1427 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1429 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1431 if (bt_cb(skb
)->retries
== 1) {
1432 chan
->unacked_frames
++;
1435 __clear_ack_timer(chan
);
1438 chan
->frames_sent
++;
1440 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1441 chan
->tx_send_head
= NULL
;
1443 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1449 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1453 if (!skb_queue_empty(&chan
->tx_q
))
1454 chan
->tx_send_head
= chan
->tx_q
.next
;
1456 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1457 ret
= l2cap_ertm_send(chan
);
1461 static void __l2cap_send_ack(struct l2cap_chan
*chan
)
1465 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1467 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1468 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1469 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1470 l2cap_send_sframe(chan
, control
);
1474 if (l2cap_ertm_send(chan
) > 0)
1477 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1478 l2cap_send_sframe(chan
, control
);
1481 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1483 __clear_ack_timer(chan
);
1484 __l2cap_send_ack(chan
);
1487 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1489 struct srej_list
*tail
;
1492 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1493 control
|= __set_ctrl_final(chan
);
1495 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1496 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1498 l2cap_send_sframe(chan
, control
);
1501 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1503 struct l2cap_conn
*conn
= chan
->conn
;
1504 struct sk_buff
**frag
;
1507 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1513 /* Continuation fragments (no L2CAP header) */
1514 frag
= &skb_shinfo(skb
)->frag_list
;
1516 count
= min_t(unsigned int, conn
->mtu
, len
);
1518 *frag
= chan
->ops
->alloc_skb(chan
, count
,
1519 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1523 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1526 (*frag
)->priority
= skb
->priority
;
1531 frag
= &(*frag
)->next
;
1537 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1538 struct msghdr
*msg
, size_t len
,
1541 struct l2cap_conn
*conn
= chan
->conn
;
1542 struct sk_buff
*skb
;
1543 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1544 struct l2cap_hdr
*lh
;
1546 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1548 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1550 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1551 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1554 return ERR_PTR(err
);
1556 skb
->priority
= priority
;
1558 /* Create L2CAP header */
1559 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1560 lh
->cid
= cpu_to_le16(chan
->dcid
);
1561 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1562 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1564 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1565 if (unlikely(err
< 0)) {
1567 return ERR_PTR(err
);
1572 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1573 struct msghdr
*msg
, size_t len
,
1576 struct l2cap_conn
*conn
= chan
->conn
;
1577 struct sk_buff
*skb
;
1578 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1579 struct l2cap_hdr
*lh
;
1581 BT_DBG("chan %p len %d", chan
, (int)len
);
1583 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1585 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1586 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1589 return ERR_PTR(err
);
1591 skb
->priority
= priority
;
1593 /* Create L2CAP header */
1594 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1595 lh
->cid
= cpu_to_le16(chan
->dcid
);
1596 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1598 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1599 if (unlikely(err
< 0)) {
1601 return ERR_PTR(err
);
1606 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1607 struct msghdr
*msg
, size_t len
,
1608 u32 control
, u16 sdulen
)
1610 struct l2cap_conn
*conn
= chan
->conn
;
1611 struct sk_buff
*skb
;
1612 int err
, count
, hlen
;
1613 struct l2cap_hdr
*lh
;
1615 BT_DBG("chan %p len %d", chan
, (int)len
);
1618 return ERR_PTR(-ENOTCONN
);
1620 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1621 hlen
= L2CAP_EXT_HDR_SIZE
;
1623 hlen
= L2CAP_ENH_HDR_SIZE
;
1626 hlen
+= L2CAP_SDULEN_SIZE
;
1628 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1629 hlen
+= L2CAP_FCS_SIZE
;
1631 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1633 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1634 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1637 return ERR_PTR(err
);
1639 /* Create L2CAP header */
1640 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1641 lh
->cid
= cpu_to_le16(chan
->dcid
);
1642 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1644 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
1647 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
1649 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1650 if (unlikely(err
< 0)) {
1652 return ERR_PTR(err
);
1655 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1656 put_unaligned_le16(0, skb_put(skb
, L2CAP_FCS_SIZE
));
1658 bt_cb(skb
)->retries
= 0;
1662 static int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1664 struct sk_buff
*skb
;
1665 struct sk_buff_head sar_queue
;
1669 skb_queue_head_init(&sar_queue
);
1670 control
= __set_ctrl_sar(chan
, L2CAP_SAR_START
);
1671 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1673 return PTR_ERR(skb
);
1675 __skb_queue_tail(&sar_queue
, skb
);
1676 len
-= chan
->remote_mps
;
1677 size
+= chan
->remote_mps
;
1682 if (len
> chan
->remote_mps
) {
1683 control
= __set_ctrl_sar(chan
, L2CAP_SAR_CONTINUE
);
1684 buflen
= chan
->remote_mps
;
1686 control
= __set_ctrl_sar(chan
, L2CAP_SAR_END
);
1690 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1692 skb_queue_purge(&sar_queue
);
1693 return PTR_ERR(skb
);
1696 __skb_queue_tail(&sar_queue
, skb
);
1700 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1701 if (chan
->tx_send_head
== NULL
)
1702 chan
->tx_send_head
= sar_queue
.next
;
1707 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
1710 struct sk_buff
*skb
;
1714 /* Connectionless channel */
1715 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1716 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
1718 return PTR_ERR(skb
);
1720 l2cap_do_send(chan
, skb
);
1724 switch (chan
->mode
) {
1725 case L2CAP_MODE_BASIC
:
1726 /* Check outgoing MTU */
1727 if (len
> chan
->omtu
)
1730 /* Create a basic PDU */
1731 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
1733 return PTR_ERR(skb
);
1735 l2cap_do_send(chan
, skb
);
1739 case L2CAP_MODE_ERTM
:
1740 case L2CAP_MODE_STREAMING
:
1741 /* Entire SDU fits into one PDU */
1742 if (len
<= chan
->remote_mps
) {
1743 control
= __set_ctrl_sar(chan
, L2CAP_SAR_UNSEGMENTED
);
1744 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1747 return PTR_ERR(skb
);
1749 __skb_queue_tail(&chan
->tx_q
, skb
);
1751 if (chan
->tx_send_head
== NULL
)
1752 chan
->tx_send_head
= skb
;
1755 /* Segment SDU into multiples PDUs */
1756 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1761 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1762 l2cap_streaming_send(chan
);
1767 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
1768 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
1773 err
= l2cap_ertm_send(chan
);
1780 BT_DBG("bad state %1.1x", chan
->mode
);
1787 /* Copy frame to all raw sockets on that connection */
1788 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1790 struct sk_buff
*nskb
;
1791 struct l2cap_chan
*chan
;
1793 BT_DBG("conn %p", conn
);
1797 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
1798 struct sock
*sk
= chan
->sk
;
1799 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1802 /* Don't send frame to the socket it came from */
1805 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1809 if (chan
->ops
->recv(chan
->data
, nskb
))
1816 /* ---- L2CAP signalling commands ---- */
1817 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1818 u8 code
, u8 ident
, u16 dlen
, void *data
)
1820 struct sk_buff
*skb
, **frag
;
1821 struct l2cap_cmd_hdr
*cmd
;
1822 struct l2cap_hdr
*lh
;
1825 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1826 conn
, code
, ident
, dlen
);
1828 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1829 count
= min_t(unsigned int, conn
->mtu
, len
);
1831 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1835 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1836 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1838 if (conn
->hcon
->type
== LE_LINK
)
1839 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1841 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1843 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1846 cmd
->len
= cpu_to_le16(dlen
);
1849 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1850 memcpy(skb_put(skb
, count
), data
, count
);
1856 /* Continuation fragments (no L2CAP header) */
1857 frag
= &skb_shinfo(skb
)->frag_list
;
1859 count
= min_t(unsigned int, conn
->mtu
, len
);
1861 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1865 memcpy(skb_put(*frag
, count
), data
, count
);
1870 frag
= &(*frag
)->next
;
1880 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1882 struct l2cap_conf_opt
*opt
= *ptr
;
1885 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1893 *val
= *((u8
*) opt
->val
);
1897 *val
= get_unaligned_le16(opt
->val
);
1901 *val
= get_unaligned_le32(opt
->val
);
1905 *val
= (unsigned long) opt
->val
;
1909 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1913 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1915 struct l2cap_conf_opt
*opt
= *ptr
;
1917 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1924 *((u8
*) opt
->val
) = val
;
1928 put_unaligned_le16(val
, opt
->val
);
1932 put_unaligned_le32(val
, opt
->val
);
1936 memcpy(opt
->val
, (void *) val
, len
);
1940 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1943 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
1945 struct l2cap_conf_efs efs
;
1947 switch (chan
->mode
) {
1948 case L2CAP_MODE_ERTM
:
1949 efs
.id
= chan
->local_id
;
1950 efs
.stype
= chan
->local_stype
;
1951 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1952 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
1953 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
1954 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
1957 case L2CAP_MODE_STREAMING
:
1959 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
1960 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
1961 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
1970 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
1971 (unsigned long) &efs
);
1974 static void l2cap_ack_timeout(struct work_struct
*work
)
1976 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1979 BT_DBG("chan %p", chan
);
1981 lock_sock(chan
->sk
);
1982 __l2cap_send_ack(chan
);
1983 release_sock(chan
->sk
);
1985 l2cap_chan_put(chan
);
1988 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1990 chan
->expected_ack_seq
= 0;
1991 chan
->unacked_frames
= 0;
1992 chan
->buffer_seq
= 0;
1993 chan
->num_acked
= 0;
1994 chan
->frames_sent
= 0;
1996 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
1997 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
1998 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2000 skb_queue_head_init(&chan
->srej_q
);
2002 INIT_LIST_HEAD(&chan
->srej_l
);
2005 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2008 case L2CAP_MODE_STREAMING
:
2009 case L2CAP_MODE_ERTM
:
2010 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2014 return L2CAP_MODE_BASIC
;
2018 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2020 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2023 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2025 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2028 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2030 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2031 __l2cap_ews_supported(chan
)) {
2032 /* use extended control field */
2033 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2034 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2036 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2037 L2CAP_DEFAULT_TX_WINDOW
);
2038 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2042 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2044 struct l2cap_conf_req
*req
= data
;
2045 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2046 void *ptr
= req
->data
;
2049 BT_DBG("chan %p", chan
);
2051 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2054 switch (chan
->mode
) {
2055 case L2CAP_MODE_STREAMING
:
2056 case L2CAP_MODE_ERTM
:
2057 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2060 if (__l2cap_efs_supported(chan
))
2061 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2065 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2070 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2071 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2073 switch (chan
->mode
) {
2074 case L2CAP_MODE_BASIC
:
2075 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2076 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2079 rfc
.mode
= L2CAP_MODE_BASIC
;
2081 rfc
.max_transmit
= 0;
2082 rfc
.retrans_timeout
= 0;
2083 rfc
.monitor_timeout
= 0;
2084 rfc
.max_pdu_size
= 0;
2086 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2087 (unsigned long) &rfc
);
2090 case L2CAP_MODE_ERTM
:
2091 rfc
.mode
= L2CAP_MODE_ERTM
;
2092 rfc
.max_transmit
= chan
->max_tx
;
2093 rfc
.retrans_timeout
= 0;
2094 rfc
.monitor_timeout
= 0;
2096 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2097 L2CAP_EXT_HDR_SIZE
-
2100 rfc
.max_pdu_size
= cpu_to_le16(size
);
2102 l2cap_txwin_setup(chan
);
2104 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2105 L2CAP_DEFAULT_TX_WINDOW
);
2107 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2108 (unsigned long) &rfc
);
2110 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2111 l2cap_add_opt_efs(&ptr
, chan
);
2113 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2116 if (chan
->fcs
== L2CAP_FCS_NONE
||
2117 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2118 chan
->fcs
= L2CAP_FCS_NONE
;
2119 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2122 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2123 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2127 case L2CAP_MODE_STREAMING
:
2128 rfc
.mode
= L2CAP_MODE_STREAMING
;
2130 rfc
.max_transmit
= 0;
2131 rfc
.retrans_timeout
= 0;
2132 rfc
.monitor_timeout
= 0;
2134 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2135 L2CAP_EXT_HDR_SIZE
-
2138 rfc
.max_pdu_size
= cpu_to_le16(size
);
2140 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2141 (unsigned long) &rfc
);
2143 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2144 l2cap_add_opt_efs(&ptr
, chan
);
2146 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2149 if (chan
->fcs
== L2CAP_FCS_NONE
||
2150 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2151 chan
->fcs
= L2CAP_FCS_NONE
;
2152 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2157 req
->dcid
= cpu_to_le16(chan
->dcid
);
2158 req
->flags
= cpu_to_le16(0);
2163 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2165 struct l2cap_conf_rsp
*rsp
= data
;
2166 void *ptr
= rsp
->data
;
2167 void *req
= chan
->conf_req
;
2168 int len
= chan
->conf_len
;
2169 int type
, hint
, olen
;
2171 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2172 struct l2cap_conf_efs efs
;
2174 u16 mtu
= L2CAP_DEFAULT_MTU
;
2175 u16 result
= L2CAP_CONF_SUCCESS
;
2178 BT_DBG("chan %p", chan
);
2180 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2181 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2183 hint
= type
& L2CAP_CONF_HINT
;
2184 type
&= L2CAP_CONF_MASK
;
2187 case L2CAP_CONF_MTU
:
2191 case L2CAP_CONF_FLUSH_TO
:
2192 chan
->flush_to
= val
;
2195 case L2CAP_CONF_QOS
:
2198 case L2CAP_CONF_RFC
:
2199 if (olen
== sizeof(rfc
))
2200 memcpy(&rfc
, (void *) val
, olen
);
2203 case L2CAP_CONF_FCS
:
2204 if (val
== L2CAP_FCS_NONE
)
2205 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2208 case L2CAP_CONF_EFS
:
2210 if (olen
== sizeof(efs
))
2211 memcpy(&efs
, (void *) val
, olen
);
2214 case L2CAP_CONF_EWS
:
2216 return -ECONNREFUSED
;
2218 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2219 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2220 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2221 chan
->remote_tx_win
= val
;
2228 result
= L2CAP_CONF_UNKNOWN
;
2229 *((u8
*) ptr
++) = type
;
2234 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2237 switch (chan
->mode
) {
2238 case L2CAP_MODE_STREAMING
:
2239 case L2CAP_MODE_ERTM
:
2240 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2241 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2242 chan
->conn
->feat_mask
);
2247 if (__l2cap_efs_supported(chan
))
2248 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2250 return -ECONNREFUSED
;
2253 if (chan
->mode
!= rfc
.mode
)
2254 return -ECONNREFUSED
;
2260 if (chan
->mode
!= rfc
.mode
) {
2261 result
= L2CAP_CONF_UNACCEPT
;
2262 rfc
.mode
= chan
->mode
;
2264 if (chan
->num_conf_rsp
== 1)
2265 return -ECONNREFUSED
;
2267 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2268 sizeof(rfc
), (unsigned long) &rfc
);
2271 if (result
== L2CAP_CONF_SUCCESS
) {
2272 /* Configure output options and let the other side know
2273 * which ones we don't like. */
2275 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2276 result
= L2CAP_CONF_UNACCEPT
;
2279 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2281 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2284 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2285 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2286 efs
.stype
!= chan
->local_stype
) {
2288 result
= L2CAP_CONF_UNACCEPT
;
2290 if (chan
->num_conf_req
>= 1)
2291 return -ECONNREFUSED
;
2293 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2295 (unsigned long) &efs
);
2297 /* Send PENDING Conf Rsp */
2298 result
= L2CAP_CONF_PENDING
;
2299 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2304 case L2CAP_MODE_BASIC
:
2305 chan
->fcs
= L2CAP_FCS_NONE
;
2306 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2309 case L2CAP_MODE_ERTM
:
2310 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2311 chan
->remote_tx_win
= rfc
.txwin_size
;
2313 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2315 chan
->remote_max_tx
= rfc
.max_transmit
;
2317 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2319 L2CAP_EXT_HDR_SIZE
-
2322 rfc
.max_pdu_size
= cpu_to_le16(size
);
2323 chan
->remote_mps
= size
;
2325 rfc
.retrans_timeout
=
2326 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2327 rfc
.monitor_timeout
=
2328 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2330 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2332 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2333 sizeof(rfc
), (unsigned long) &rfc
);
2335 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2336 chan
->remote_id
= efs
.id
;
2337 chan
->remote_stype
= efs
.stype
;
2338 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
2339 chan
->remote_flush_to
=
2340 le32_to_cpu(efs
.flush_to
);
2341 chan
->remote_acc_lat
=
2342 le32_to_cpu(efs
.acc_lat
);
2343 chan
->remote_sdu_itime
=
2344 le32_to_cpu(efs
.sdu_itime
);
2345 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2346 sizeof(efs
), (unsigned long) &efs
);
2350 case L2CAP_MODE_STREAMING
:
2351 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2353 L2CAP_EXT_HDR_SIZE
-
2356 rfc
.max_pdu_size
= cpu_to_le16(size
);
2357 chan
->remote_mps
= size
;
2359 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2361 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2362 sizeof(rfc
), (unsigned long) &rfc
);
2367 result
= L2CAP_CONF_UNACCEPT
;
2369 memset(&rfc
, 0, sizeof(rfc
));
2370 rfc
.mode
= chan
->mode
;
2373 if (result
== L2CAP_CONF_SUCCESS
)
2374 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2376 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2377 rsp
->result
= cpu_to_le16(result
);
2378 rsp
->flags
= cpu_to_le16(0x0000);
2383 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2385 struct l2cap_conf_req
*req
= data
;
2386 void *ptr
= req
->data
;
2389 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2390 struct l2cap_conf_efs efs
;
2392 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2394 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2395 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2398 case L2CAP_CONF_MTU
:
2399 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2400 *result
= L2CAP_CONF_UNACCEPT
;
2401 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2404 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2407 case L2CAP_CONF_FLUSH_TO
:
2408 chan
->flush_to
= val
;
2409 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2413 case L2CAP_CONF_RFC
:
2414 if (olen
== sizeof(rfc
))
2415 memcpy(&rfc
, (void *)val
, olen
);
2417 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2418 rfc
.mode
!= chan
->mode
)
2419 return -ECONNREFUSED
;
2423 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2424 sizeof(rfc
), (unsigned long) &rfc
);
2427 case L2CAP_CONF_EWS
:
2428 chan
->tx_win
= min_t(u16
, val
,
2429 L2CAP_DEFAULT_EXT_WINDOW
);
2430 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2434 case L2CAP_CONF_EFS
:
2435 if (olen
== sizeof(efs
))
2436 memcpy(&efs
, (void *)val
, olen
);
2438 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2439 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2440 efs
.stype
!= chan
->local_stype
)
2441 return -ECONNREFUSED
;
2443 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2444 sizeof(efs
), (unsigned long) &efs
);
2449 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2450 return -ECONNREFUSED
;
2452 chan
->mode
= rfc
.mode
;
2454 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
2456 case L2CAP_MODE_ERTM
:
2457 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2458 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2459 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2461 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2462 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
2463 chan
->local_sdu_itime
=
2464 le32_to_cpu(efs
.sdu_itime
);
2465 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
2466 chan
->local_flush_to
=
2467 le32_to_cpu(efs
.flush_to
);
2471 case L2CAP_MODE_STREAMING
:
2472 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2476 req
->dcid
= cpu_to_le16(chan
->dcid
);
2477 req
->flags
= cpu_to_le16(0x0000);
2482 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2484 struct l2cap_conf_rsp
*rsp
= data
;
2485 void *ptr
= rsp
->data
;
2487 BT_DBG("chan %p", chan
);
2489 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2490 rsp
->result
= cpu_to_le16(result
);
2491 rsp
->flags
= cpu_to_le16(flags
);
2496 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2498 struct l2cap_conn_rsp rsp
;
2499 struct l2cap_conn
*conn
= chan
->conn
;
2502 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2503 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2504 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2505 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2506 l2cap_send_cmd(conn
, chan
->ident
,
2507 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2509 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2512 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2513 l2cap_build_conf_req(chan
, buf
), buf
);
2514 chan
->num_conf_req
++;
2517 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2521 struct l2cap_conf_rfc rfc
;
2523 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2525 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2528 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2529 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2532 case L2CAP_CONF_RFC
:
2533 if (olen
== sizeof(rfc
))
2534 memcpy(&rfc
, (void *)val
, olen
);
2539 /* Use sane default values in case a misbehaving remote device
2540 * did not send an RFC option.
2542 rfc
.mode
= chan
->mode
;
2543 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2544 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2545 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
2547 BT_ERR("Expected RFC option was not found, using defaults");
2551 case L2CAP_MODE_ERTM
:
2552 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2553 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2554 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2556 case L2CAP_MODE_STREAMING
:
2557 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2561 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2563 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2565 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2568 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2569 cmd
->ident
== conn
->info_ident
) {
2570 cancel_delayed_work(&conn
->info_timer
);
2572 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2573 conn
->info_ident
= 0;
2575 l2cap_conn_start(conn
);
2581 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2583 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2584 struct l2cap_conn_rsp rsp
;
2585 struct l2cap_chan
*chan
= NULL
, *pchan
;
2586 struct sock
*parent
, *sk
= NULL
;
2587 int result
, status
= L2CAP_CS_NO_INFO
;
2589 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2590 __le16 psm
= req
->psm
;
2592 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2594 /* Check if we have socket listening on psm */
2595 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2597 result
= L2CAP_CR_BAD_PSM
;
2605 /* Check if the ACL is secure enough (if not SDP) */
2606 if (psm
!= cpu_to_le16(0x0001) &&
2607 !hci_conn_check_link_mode(conn
->hcon
)) {
2608 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
2609 result
= L2CAP_CR_SEC_BLOCK
;
2613 result
= L2CAP_CR_NO_MEM
;
2615 /* Check for backlog size */
2616 if (sk_acceptq_is_full(parent
)) {
2617 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2621 chan
= pchan
->ops
->new_connection(pchan
->data
);
2627 /* Check if we already have channel with that dcid */
2628 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2629 sock_set_flag(sk
, SOCK_ZAPPED
);
2630 chan
->ops
->close(chan
->data
);
2634 hci_conn_hold(conn
->hcon
);
2636 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2637 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2641 bt_accept_enqueue(parent
, sk
);
2643 l2cap_chan_add(conn
, chan
);
2647 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2649 chan
->ident
= cmd
->ident
;
2651 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2652 if (l2cap_chan_check_security(chan
)) {
2653 if (bt_sk(sk
)->defer_setup
) {
2654 l2cap_state_change(chan
, BT_CONNECT2
);
2655 result
= L2CAP_CR_PEND
;
2656 status
= L2CAP_CS_AUTHOR_PEND
;
2657 parent
->sk_data_ready(parent
, 0);
2659 l2cap_state_change(chan
, BT_CONFIG
);
2660 result
= L2CAP_CR_SUCCESS
;
2661 status
= L2CAP_CS_NO_INFO
;
2664 l2cap_state_change(chan
, BT_CONNECT2
);
2665 result
= L2CAP_CR_PEND
;
2666 status
= L2CAP_CS_AUTHEN_PEND
;
2669 l2cap_state_change(chan
, BT_CONNECT2
);
2670 result
= L2CAP_CR_PEND
;
2671 status
= L2CAP_CS_NO_INFO
;
2675 release_sock(parent
);
2678 rsp
.scid
= cpu_to_le16(scid
);
2679 rsp
.dcid
= cpu_to_le16(dcid
);
2680 rsp
.result
= cpu_to_le16(result
);
2681 rsp
.status
= cpu_to_le16(status
);
2682 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2684 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2685 struct l2cap_info_req info
;
2686 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2688 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2689 conn
->info_ident
= l2cap_get_ident(conn
);
2691 schedule_delayed_work(&conn
->info_timer
,
2692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2694 l2cap_send_cmd(conn
, conn
->info_ident
,
2695 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2698 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
2699 result
== L2CAP_CR_SUCCESS
) {
2701 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
2702 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2703 l2cap_build_conf_req(chan
, buf
), buf
);
2704 chan
->num_conf_req
++;
2710 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2712 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2713 u16 scid
, dcid
, result
, status
;
2714 struct l2cap_chan
*chan
;
2718 scid
= __le16_to_cpu(rsp
->scid
);
2719 dcid
= __le16_to_cpu(rsp
->dcid
);
2720 result
= __le16_to_cpu(rsp
->result
);
2721 status
= __le16_to_cpu(rsp
->status
);
2723 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2726 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2730 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2738 case L2CAP_CR_SUCCESS
:
2739 l2cap_state_change(chan
, BT_CONFIG
);
2742 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2744 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2747 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2748 l2cap_build_conf_req(chan
, req
), req
);
2749 chan
->num_conf_req
++;
2753 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
2757 l2cap_chan_del(chan
, ECONNREFUSED
);
2765 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2767 /* FCS is enabled only in ERTM or streaming mode, if one or both
2770 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2771 chan
->fcs
= L2CAP_FCS_NONE
;
2772 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
2773 chan
->fcs
= L2CAP_FCS_CRC16
;
2776 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2778 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2781 struct l2cap_chan
*chan
;
2785 dcid
= __le16_to_cpu(req
->dcid
);
2786 flags
= __le16_to_cpu(req
->flags
);
2788 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2790 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2796 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
2797 struct l2cap_cmd_rej_cid rej
;
2799 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
2800 rej
.scid
= cpu_to_le16(chan
->scid
);
2801 rej
.dcid
= cpu_to_le16(chan
->dcid
);
2803 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2808 /* Reject if config buffer is too small. */
2809 len
= cmd_len
- sizeof(*req
);
2810 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2811 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2812 l2cap_build_conf_rsp(chan
, rsp
,
2813 L2CAP_CONF_REJECT
, flags
), rsp
);
2818 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2819 chan
->conf_len
+= len
;
2821 if (flags
& 0x0001) {
2822 /* Incomplete config. Send empty response. */
2823 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2824 l2cap_build_conf_rsp(chan
, rsp
,
2825 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2829 /* Complete config. */
2830 len
= l2cap_parse_conf_req(chan
, rsp
);
2832 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2836 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2837 chan
->num_conf_rsp
++;
2839 /* Reset config buffer. */
2842 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
2845 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
2846 set_default_fcs(chan
);
2848 l2cap_state_change(chan
, BT_CONNECTED
);
2850 chan
->next_tx_seq
= 0;
2851 chan
->expected_tx_seq
= 0;
2852 skb_queue_head_init(&chan
->tx_q
);
2853 if (chan
->mode
== L2CAP_MODE_ERTM
)
2854 l2cap_ertm_init(chan
);
2856 l2cap_chan_ready(chan
);
2860 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
2862 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2863 l2cap_build_conf_req(chan
, buf
), buf
);
2864 chan
->num_conf_req
++;
2867 /* Got Conf Rsp PENDING from remote side and asume we sent
2868 Conf Rsp PENDING in the code above */
2869 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
2870 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
2872 /* check compatibility */
2874 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2875 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2877 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2878 l2cap_build_conf_rsp(chan
, rsp
,
2879 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
2887 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2889 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2890 u16 scid
, flags
, result
;
2891 struct l2cap_chan
*chan
;
2893 int len
= cmd
->len
- sizeof(*rsp
);
2895 scid
= __le16_to_cpu(rsp
->scid
);
2896 flags
= __le16_to_cpu(rsp
->flags
);
2897 result
= __le16_to_cpu(rsp
->result
);
2899 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2900 scid
, flags
, result
);
2902 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2909 case L2CAP_CONF_SUCCESS
:
2910 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2911 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
2914 case L2CAP_CONF_PENDING
:
2915 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
2917 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
2920 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2923 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2927 /* check compatibility */
2929 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2930 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2932 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2933 l2cap_build_conf_rsp(chan
, buf
,
2934 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
2938 case L2CAP_CONF_UNACCEPT
:
2939 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2942 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2943 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2947 /* throw out any old stored conf requests */
2948 result
= L2CAP_CONF_SUCCESS
;
2949 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2952 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2956 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2957 L2CAP_CONF_REQ
, len
, req
);
2958 chan
->num_conf_req
++;
2959 if (result
!= L2CAP_CONF_SUCCESS
)
2965 sk
->sk_err
= ECONNRESET
;
2966 __set_chan_timer(chan
,
2967 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT
));
2968 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2975 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
2977 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
2978 set_default_fcs(chan
);
2980 l2cap_state_change(chan
, BT_CONNECTED
);
2981 chan
->next_tx_seq
= 0;
2982 chan
->expected_tx_seq
= 0;
2983 skb_queue_head_init(&chan
->tx_q
);
2984 if (chan
->mode
== L2CAP_MODE_ERTM
)
2985 l2cap_ertm_init(chan
);
2987 l2cap_chan_ready(chan
);
2995 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2997 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2998 struct l2cap_disconn_rsp rsp
;
3000 struct l2cap_chan
*chan
;
3003 scid
= __le16_to_cpu(req
->scid
);
3004 dcid
= __le16_to_cpu(req
->dcid
);
3006 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3008 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3014 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3015 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3016 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3018 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3020 l2cap_chan_del(chan
, ECONNRESET
);
3023 chan
->ops
->close(chan
->data
);
3027 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3029 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3031 struct l2cap_chan
*chan
;
3034 scid
= __le16_to_cpu(rsp
->scid
);
3035 dcid
= __le16_to_cpu(rsp
->dcid
);
3037 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3039 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3045 l2cap_chan_del(chan
, 0);
3048 chan
->ops
->close(chan
->data
);
3052 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3054 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3057 type
= __le16_to_cpu(req
->type
);
3059 BT_DBG("type 0x%4.4x", type
);
3061 if (type
== L2CAP_IT_FEAT_MASK
) {
3063 u32 feat_mask
= l2cap_feat_mask
;
3064 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3065 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3066 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3068 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3071 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3072 | L2CAP_FEAT_EXT_WINDOW
;
3074 put_unaligned_le32(feat_mask
, rsp
->data
);
3075 l2cap_send_cmd(conn
, cmd
->ident
,
3076 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3077 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3079 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3082 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3084 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3086 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3087 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3088 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3089 l2cap_send_cmd(conn
, cmd
->ident
,
3090 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3092 struct l2cap_info_rsp rsp
;
3093 rsp
.type
= cpu_to_le16(type
);
3094 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3095 l2cap_send_cmd(conn
, cmd
->ident
,
3096 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3102 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3104 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3107 type
= __le16_to_cpu(rsp
->type
);
3108 result
= __le16_to_cpu(rsp
->result
);
3110 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3112 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3113 if (cmd
->ident
!= conn
->info_ident
||
3114 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3117 cancel_delayed_work(&conn
->info_timer
);
3119 if (result
!= L2CAP_IR_SUCCESS
) {
3120 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3121 conn
->info_ident
= 0;
3123 l2cap_conn_start(conn
);
3128 if (type
== L2CAP_IT_FEAT_MASK
) {
3129 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3131 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3132 struct l2cap_info_req req
;
3133 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3135 conn
->info_ident
= l2cap_get_ident(conn
);
3137 l2cap_send_cmd(conn
, conn
->info_ident
,
3138 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3140 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3141 conn
->info_ident
= 0;
3143 l2cap_conn_start(conn
);
3145 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3146 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3147 conn
->info_ident
= 0;
3149 l2cap_conn_start(conn
);
3155 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3156 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3159 struct l2cap_create_chan_req
*req
= data
;
3160 struct l2cap_create_chan_rsp rsp
;
3163 if (cmd_len
!= sizeof(*req
))
3169 psm
= le16_to_cpu(req
->psm
);
3170 scid
= le16_to_cpu(req
->scid
);
3172 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3174 /* Placeholder: Always reject */
3176 rsp
.scid
= cpu_to_le16(scid
);
3177 rsp
.result
= L2CAP_CR_NO_MEM
;
3178 rsp
.status
= L2CAP_CS_NO_INFO
;
3180 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3186 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3187 struct l2cap_cmd_hdr
*cmd
, void *data
)
3189 BT_DBG("conn %p", conn
);
3191 return l2cap_connect_rsp(conn
, cmd
, data
);
3194 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3195 u16 icid
, u16 result
)
3197 struct l2cap_move_chan_rsp rsp
;
3199 BT_DBG("icid %d, result %d", icid
, result
);
3201 rsp
.icid
= cpu_to_le16(icid
);
3202 rsp
.result
= cpu_to_le16(result
);
3204 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3207 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3208 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3210 struct l2cap_move_chan_cfm cfm
;
3213 BT_DBG("icid %d, result %d", icid
, result
);
3215 ident
= l2cap_get_ident(conn
);
3217 chan
->ident
= ident
;
3219 cfm
.icid
= cpu_to_le16(icid
);
3220 cfm
.result
= cpu_to_le16(result
);
3222 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3225 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
3228 struct l2cap_move_chan_cfm_rsp rsp
;
3230 BT_DBG("icid %d", icid
);
3232 rsp
.icid
= cpu_to_le16(icid
);
3233 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
3236 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
3237 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3239 struct l2cap_move_chan_req
*req
= data
;
3241 u16 result
= L2CAP_MR_NOT_ALLOWED
;
3243 if (cmd_len
!= sizeof(*req
))
3246 icid
= le16_to_cpu(req
->icid
);
3248 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
3253 /* Placeholder: Always refuse */
3254 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
3259 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
3260 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3262 struct l2cap_move_chan_rsp
*rsp
= data
;
3265 if (cmd_len
!= sizeof(*rsp
))
3268 icid
= le16_to_cpu(rsp
->icid
);
3269 result
= le16_to_cpu(rsp
->result
);
3271 BT_DBG("icid %d, result %d", icid
, result
);
3273 /* Placeholder: Always unconfirmed */
3274 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
3279 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
3280 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3282 struct l2cap_move_chan_cfm
*cfm
= data
;
3285 if (cmd_len
!= sizeof(*cfm
))
3288 icid
= le16_to_cpu(cfm
->icid
);
3289 result
= le16_to_cpu(cfm
->result
);
3291 BT_DBG("icid %d, result %d", icid
, result
);
3293 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
3298 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
3299 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3301 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
3304 if (cmd_len
!= sizeof(*rsp
))
3307 icid
= le16_to_cpu(rsp
->icid
);
3309 BT_DBG("icid %d", icid
);
3314 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
3319 if (min
> max
|| min
< 6 || max
> 3200)
3322 if (to_multiplier
< 10 || to_multiplier
> 3200)
3325 if (max
>= to_multiplier
* 8)
3328 max_latency
= (to_multiplier
* 8 / max
) - 1;
3329 if (latency
> 499 || latency
> max_latency
)
3335 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
3336 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3338 struct hci_conn
*hcon
= conn
->hcon
;
3339 struct l2cap_conn_param_update_req
*req
;
3340 struct l2cap_conn_param_update_rsp rsp
;
3341 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
3344 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
3347 cmd_len
= __le16_to_cpu(cmd
->len
);
3348 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
3351 req
= (struct l2cap_conn_param_update_req
*) data
;
3352 min
= __le16_to_cpu(req
->min
);
3353 max
= __le16_to_cpu(req
->max
);
3354 latency
= __le16_to_cpu(req
->latency
);
3355 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
3357 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3358 min
, max
, latency
, to_multiplier
);
3360 memset(&rsp
, 0, sizeof(rsp
));
3362 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
3364 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
3366 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
3368 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
3372 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
3377 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
3378 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3382 switch (cmd
->code
) {
3383 case L2CAP_COMMAND_REJ
:
3384 l2cap_command_rej(conn
, cmd
, data
);
3387 case L2CAP_CONN_REQ
:
3388 err
= l2cap_connect_req(conn
, cmd
, data
);
3391 case L2CAP_CONN_RSP
:
3392 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3395 case L2CAP_CONF_REQ
:
3396 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3399 case L2CAP_CONF_RSP
:
3400 err
= l2cap_config_rsp(conn
, cmd
, data
);
3403 case L2CAP_DISCONN_REQ
:
3404 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3407 case L2CAP_DISCONN_RSP
:
3408 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3411 case L2CAP_ECHO_REQ
:
3412 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3415 case L2CAP_ECHO_RSP
:
3418 case L2CAP_INFO_REQ
:
3419 err
= l2cap_information_req(conn
, cmd
, data
);
3422 case L2CAP_INFO_RSP
:
3423 err
= l2cap_information_rsp(conn
, cmd
, data
);
3426 case L2CAP_CREATE_CHAN_REQ
:
3427 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
3430 case L2CAP_CREATE_CHAN_RSP
:
3431 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
3434 case L2CAP_MOVE_CHAN_REQ
:
3435 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
3438 case L2CAP_MOVE_CHAN_RSP
:
3439 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
3442 case L2CAP_MOVE_CHAN_CFM
:
3443 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
3446 case L2CAP_MOVE_CHAN_CFM_RSP
:
3447 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
3451 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3459 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3460 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3462 switch (cmd
->code
) {
3463 case L2CAP_COMMAND_REJ
:
3466 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3467 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3469 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3473 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3478 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3479 struct sk_buff
*skb
)
3481 u8
*data
= skb
->data
;
3483 struct l2cap_cmd_hdr cmd
;
3486 l2cap_raw_recv(conn
, skb
);
3488 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3490 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3491 data
+= L2CAP_CMD_HDR_SIZE
;
3492 len
-= L2CAP_CMD_HDR_SIZE
;
3494 cmd_len
= le16_to_cpu(cmd
.len
);
3496 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3498 if (cmd_len
> len
|| !cmd
.ident
) {
3499 BT_DBG("corrupted command");
3503 if (conn
->hcon
->type
== LE_LINK
)
3504 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3506 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3509 struct l2cap_cmd_rej_unk rej
;
3511 BT_ERR("Wrong link type (%d)", err
);
3513 /* FIXME: Map err to a valid reason */
3514 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3515 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3525 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3527 u16 our_fcs
, rcv_fcs
;
3530 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3531 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3533 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3535 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3536 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
3537 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3538 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3540 if (our_fcs
!= rcv_fcs
)
3546 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3550 chan
->frames_sent
= 0;
3552 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3554 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3555 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3556 l2cap_send_sframe(chan
, control
);
3557 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3560 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3561 l2cap_retransmit_frames(chan
);
3563 l2cap_ertm_send(chan
);
3565 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3566 chan
->frames_sent
== 0) {
3567 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3568 l2cap_send_sframe(chan
, control
);
3572 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
3574 struct sk_buff
*next_skb
;
3575 int tx_seq_offset
, next_tx_seq_offset
;
3577 bt_cb(skb
)->tx_seq
= tx_seq
;
3578 bt_cb(skb
)->sar
= sar
;
3580 next_skb
= skb_peek(&chan
->srej_q
);
3582 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
3585 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3588 next_tx_seq_offset
= __seq_offset(chan
,
3589 bt_cb(next_skb
)->tx_seq
, chan
->buffer_seq
);
3591 if (next_tx_seq_offset
> tx_seq_offset
) {
3592 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3596 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3599 next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
);
3602 __skb_queue_tail(&chan
->srej_q
, skb
);
3607 static void append_skb_frag(struct sk_buff
*skb
,
3608 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
3610 /* skb->len reflects data in skb as well as all fragments
3611 * skb->data_len reflects only data in fragments
3613 if (!skb_has_frag_list(skb
))
3614 skb_shinfo(skb
)->frag_list
= new_frag
;
3616 new_frag
->next
= NULL
;
3618 (*last_frag
)->next
= new_frag
;
3619 *last_frag
= new_frag
;
3621 skb
->len
+= new_frag
->len
;
3622 skb
->data_len
+= new_frag
->len
;
3623 skb
->truesize
+= new_frag
->truesize
;
3626 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u32 control
)
3630 switch (__get_ctrl_sar(chan
, control
)) {
3631 case L2CAP_SAR_UNSEGMENTED
:
3635 err
= chan
->ops
->recv(chan
->data
, skb
);
3638 case L2CAP_SAR_START
:
3642 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3643 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
3645 if (chan
->sdu_len
> chan
->imtu
) {
3650 if (skb
->len
>= chan
->sdu_len
)
3654 chan
->sdu_last_frag
= skb
;
3660 case L2CAP_SAR_CONTINUE
:
3664 append_skb_frag(chan
->sdu
, skb
,
3665 &chan
->sdu_last_frag
);
3668 if (chan
->sdu
->len
>= chan
->sdu_len
)
3678 append_skb_frag(chan
->sdu
, skb
,
3679 &chan
->sdu_last_frag
);
3682 if (chan
->sdu
->len
!= chan
->sdu_len
)
3685 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
3688 /* Reassembly complete */
3690 chan
->sdu_last_frag
= NULL
;
3698 kfree_skb(chan
->sdu
);
3700 chan
->sdu_last_frag
= NULL
;
3707 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
3709 BT_DBG("chan %p, Enter local busy", chan
);
3711 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3713 __set_ack_timer(chan
);
3716 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
3720 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3723 control
= __set_reqseq(chan
, chan
->buffer_seq
);
3724 control
|= __set_ctrl_poll(chan
);
3725 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3726 l2cap_send_sframe(chan
, control
);
3727 chan
->retry_count
= 1;
3729 __clear_retrans_timer(chan
);
3730 __set_monitor_timer(chan
);
3732 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
3735 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
3736 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3738 BT_DBG("chan %p, Exit local busy", chan
);
3741 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
3743 if (chan
->mode
== L2CAP_MODE_ERTM
) {
3745 l2cap_ertm_enter_local_busy(chan
);
3747 l2cap_ertm_exit_local_busy(chan
);
3751 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
3753 struct sk_buff
*skb
;
3756 while ((skb
= skb_peek(&chan
->srej_q
)) &&
3757 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3760 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3763 skb
= skb_dequeue(&chan
->srej_q
);
3764 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->sar
);
3765 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
3768 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3772 chan
->buffer_seq_srej
= __next_seq(chan
, chan
->buffer_seq_srej
);
3773 tx_seq
= __next_seq(chan
, tx_seq
);
3777 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3779 struct srej_list
*l
, *tmp
;
3782 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3783 if (l
->tx_seq
== tx_seq
) {
3788 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3789 control
|= __set_reqseq(chan
, l
->tx_seq
);
3790 l2cap_send_sframe(chan
, control
);
3792 list_add_tail(&l
->list
, &chan
->srej_l
);
3796 static int l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
3798 struct srej_list
*new;
3801 while (tx_seq
!= chan
->expected_tx_seq
) {
3802 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
3803 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
3804 l2cap_send_sframe(chan
, control
);
3806 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3810 new->tx_seq
= chan
->expected_tx_seq
;
3812 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3814 list_add_tail(&new->list
, &chan
->srej_l
);
3817 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3822 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
3824 u16 tx_seq
= __get_txseq(chan
, rx_control
);
3825 u16 req_seq
= __get_reqseq(chan
, rx_control
);
3826 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
3827 int tx_seq_offset
, expected_tx_seq_offset
;
3828 int num_to_ack
= (chan
->tx_win
/6) + 1;
3831 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan
, skb
->len
,
3832 tx_seq
, rx_control
);
3834 if (__is_ctrl_final(chan
, rx_control
) &&
3835 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
3836 __clear_monitor_timer(chan
);
3837 if (chan
->unacked_frames
> 0)
3838 __set_retrans_timer(chan
);
3839 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
3842 chan
->expected_ack_seq
= req_seq
;
3843 l2cap_drop_acked_frames(chan
);
3845 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
3847 /* invalid tx_seq */
3848 if (tx_seq_offset
>= chan
->tx_win
) {
3849 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3853 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3854 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
3855 l2cap_send_ack(chan
);
3859 if (tx_seq
== chan
->expected_tx_seq
)
3862 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3863 struct srej_list
*first
;
3865 first
= list_first_entry(&chan
->srej_l
,
3866 struct srej_list
, list
);
3867 if (tx_seq
== first
->tx_seq
) {
3868 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3869 l2cap_check_srej_gap(chan
, tx_seq
);
3871 list_del(&first
->list
);
3874 if (list_empty(&chan
->srej_l
)) {
3875 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3876 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3877 l2cap_send_ack(chan
);
3878 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3881 struct srej_list
*l
;
3883 /* duplicated tx_seq */
3884 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3887 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3888 if (l
->tx_seq
== tx_seq
) {
3889 l2cap_resend_srejframe(chan
, tx_seq
);
3894 err
= l2cap_send_srejframe(chan
, tx_seq
);
3896 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3901 expected_tx_seq_offset
= __seq_offset(chan
,
3902 chan
->expected_tx_seq
, chan
->buffer_seq
);
3904 /* duplicated tx_seq */
3905 if (tx_seq_offset
< expected_tx_seq_offset
)
3908 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
3910 BT_DBG("chan %p, Enter SREJ", chan
);
3912 INIT_LIST_HEAD(&chan
->srej_l
);
3913 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3915 __skb_queue_head_init(&chan
->srej_q
);
3916 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3918 /* Set P-bit only if there are some I-frames to ack. */
3919 if (__clear_ack_timer(chan
))
3920 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
3922 err
= l2cap_send_srejframe(chan
, tx_seq
);
3924 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3931 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
3933 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3934 bt_cb(skb
)->tx_seq
= tx_seq
;
3935 bt_cb(skb
)->sar
= sar
;
3936 __skb_queue_tail(&chan
->srej_q
, skb
);
3940 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
3941 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
3944 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3948 if (__is_ctrl_final(chan
, rx_control
)) {
3949 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3950 l2cap_retransmit_frames(chan
);
3954 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3955 if (chan
->num_acked
== num_to_ack
- 1)
3956 l2cap_send_ack(chan
);
3958 __set_ack_timer(chan
);
3967 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u32 rx_control
)
3969 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
,
3970 __get_reqseq(chan
, rx_control
), rx_control
);
3972 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
3973 l2cap_drop_acked_frames(chan
);
3975 if (__is_ctrl_poll(chan
, rx_control
)) {
3976 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
3977 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
3978 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3979 (chan
->unacked_frames
> 0))
3980 __set_retrans_timer(chan
);
3982 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3983 l2cap_send_srejtail(chan
);
3985 l2cap_send_i_or_rr_or_rnr(chan
);
3988 } else if (__is_ctrl_final(chan
, rx_control
)) {
3989 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
3991 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
3992 l2cap_retransmit_frames(chan
);
3995 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
3996 (chan
->unacked_frames
> 0))
3997 __set_retrans_timer(chan
);
3999 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4000 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
4001 l2cap_send_ack(chan
);
4003 l2cap_ertm_send(chan
);
4007 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4009 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4011 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4013 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4015 chan
->expected_ack_seq
= tx_seq
;
4016 l2cap_drop_acked_frames(chan
);
4018 if (__is_ctrl_final(chan
, rx_control
)) {
4019 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4020 l2cap_retransmit_frames(chan
);
4022 l2cap_retransmit_frames(chan
);
4024 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
4025 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4028 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4030 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4032 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4034 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4036 if (__is_ctrl_poll(chan
, rx_control
)) {
4037 chan
->expected_ack_seq
= tx_seq
;
4038 l2cap_drop_acked_frames(chan
);
4040 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4041 l2cap_retransmit_one_frame(chan
, tx_seq
);
4043 l2cap_ertm_send(chan
);
4045 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4046 chan
->srej_save_reqseq
= tx_seq
;
4047 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4049 } else if (__is_ctrl_final(chan
, rx_control
)) {
4050 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
4051 chan
->srej_save_reqseq
== tx_seq
)
4052 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4054 l2cap_retransmit_one_frame(chan
, tx_seq
);
4056 l2cap_retransmit_one_frame(chan
, tx_seq
);
4057 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4058 chan
->srej_save_reqseq
= tx_seq
;
4059 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4064 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4066 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4068 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4070 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4071 chan
->expected_ack_seq
= tx_seq
;
4072 l2cap_drop_acked_frames(chan
);
4074 if (__is_ctrl_poll(chan
, rx_control
))
4075 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4077 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4078 __clear_retrans_timer(chan
);
4079 if (__is_ctrl_poll(chan
, rx_control
))
4080 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
4084 if (__is_ctrl_poll(chan
, rx_control
)) {
4085 l2cap_send_srejtail(chan
);
4087 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4088 l2cap_send_sframe(chan
, rx_control
);
4092 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4094 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan
, rx_control
, skb
->len
);
4096 if (__is_ctrl_final(chan
, rx_control
) &&
4097 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4098 __clear_monitor_timer(chan
);
4099 if (chan
->unacked_frames
> 0)
4100 __set_retrans_timer(chan
);
4101 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4104 switch (__get_ctrl_super(chan
, rx_control
)) {
4105 case L2CAP_SUPER_RR
:
4106 l2cap_data_channel_rrframe(chan
, rx_control
);
4109 case L2CAP_SUPER_REJ
:
4110 l2cap_data_channel_rejframe(chan
, rx_control
);
4113 case L2CAP_SUPER_SREJ
:
4114 l2cap_data_channel_srejframe(chan
, rx_control
);
4117 case L2CAP_SUPER_RNR
:
4118 l2cap_data_channel_rnrframe(chan
, rx_control
);
4126 static int l2cap_ertm_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4130 int len
, next_tx_seq_offset
, req_seq_offset
;
4132 control
= __get_control(chan
, skb
->data
);
4133 skb_pull(skb
, __ctrl_size(chan
));
4137 * We can just drop the corrupted I-frame here.
4138 * Receiver will miss it and start proper recovery
4139 * procedures and ask retransmission.
4141 if (l2cap_check_fcs(chan
, skb
))
4144 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
4145 len
-= L2CAP_SDULEN_SIZE
;
4147 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4148 len
-= L2CAP_FCS_SIZE
;
4150 if (len
> chan
->mps
) {
4151 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4155 req_seq
= __get_reqseq(chan
, control
);
4157 req_seq_offset
= __seq_offset(chan
, req_seq
, chan
->expected_ack_seq
);
4159 next_tx_seq_offset
= __seq_offset(chan
, chan
->next_tx_seq
,
4160 chan
->expected_ack_seq
);
4162 /* check for invalid req-seq */
4163 if (req_seq_offset
> next_tx_seq_offset
) {
4164 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4168 if (!__is_sframe(chan
, control
)) {
4170 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4174 l2cap_data_channel_iframe(chan
, control
, skb
);
4178 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4182 l2cap_data_channel_sframe(chan
, control
, skb
);
4192 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4194 struct l2cap_chan
*chan
;
4195 struct sock
*sk
= NULL
;
4200 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4202 BT_DBG("unknown cid 0x%4.4x", cid
);
4208 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4210 if (chan
->state
!= BT_CONNECTED
)
4213 switch (chan
->mode
) {
4214 case L2CAP_MODE_BASIC
:
4215 /* If socket recv buffers overflows we drop data here
4216 * which is *bad* because L2CAP has to be reliable.
4217 * But we don't have any other choice. L2CAP doesn't
4218 * provide flow control mechanism. */
4220 if (chan
->imtu
< skb
->len
)
4223 if (!chan
->ops
->recv(chan
->data
, skb
))
4227 case L2CAP_MODE_ERTM
:
4228 l2cap_ertm_data_rcv(chan
, skb
);
4232 case L2CAP_MODE_STREAMING
:
4233 control
= __get_control(chan
, skb
->data
);
4234 skb_pull(skb
, __ctrl_size(chan
));
4237 if (l2cap_check_fcs(chan
, skb
))
4240 if (__is_sar_start(chan
, control
))
4241 len
-= L2CAP_SDULEN_SIZE
;
4243 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4244 len
-= L2CAP_FCS_SIZE
;
4246 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
4249 tx_seq
= __get_txseq(chan
, control
);
4251 if (chan
->expected_tx_seq
!= tx_seq
) {
4252 /* Frame(s) missing - must discard partial SDU */
4253 kfree_skb(chan
->sdu
);
4255 chan
->sdu_last_frag
= NULL
;
4258 /* TODO: Notify userland of missing data */
4261 chan
->expected_tx_seq
= __next_seq(chan
, tx_seq
);
4263 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
4264 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4269 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
4283 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4285 struct sock
*sk
= NULL
;
4286 struct l2cap_chan
*chan
;
4288 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
4296 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4298 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4301 if (chan
->imtu
< skb
->len
)
4304 if (!chan
->ops
->recv(chan
->data
, skb
))
4316 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
4318 struct sock
*sk
= NULL
;
4319 struct l2cap_chan
*chan
;
4321 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
4329 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4331 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4334 if (chan
->imtu
< skb
->len
)
4337 if (!chan
->ops
->recv(chan
->data
, skb
))
4349 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4351 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4355 skb_pull(skb
, L2CAP_HDR_SIZE
);
4356 cid
= __le16_to_cpu(lh
->cid
);
4357 len
= __le16_to_cpu(lh
->len
);
4359 if (len
!= skb
->len
) {
4364 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4367 case L2CAP_CID_LE_SIGNALING
:
4368 case L2CAP_CID_SIGNALING
:
4369 l2cap_sig_channel(conn
, skb
);
4372 case L2CAP_CID_CONN_LESS
:
4373 psm
= get_unaligned_le16(skb
->data
);
4375 l2cap_conless_channel(conn
, psm
, skb
);
4378 case L2CAP_CID_LE_DATA
:
4379 l2cap_att_channel(conn
, cid
, skb
);
4383 if (smp_sig_channel(conn
, skb
))
4384 l2cap_conn_del(conn
->hcon
, EACCES
);
4388 l2cap_data_channel(conn
, cid
, skb
);
4393 /* ---- L2CAP interface with lower layer (HCI) ---- */
4395 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
4397 int exact
= 0, lm1
= 0, lm2
= 0;
4398 struct l2cap_chan
*c
;
4400 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4402 /* Find listening sockets and check their link_mode */
4403 read_lock(&chan_list_lock
);
4404 list_for_each_entry(c
, &chan_list
, global_l
) {
4405 struct sock
*sk
= c
->sk
;
4407 if (c
->state
!= BT_LISTEN
)
4410 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4411 lm1
|= HCI_LM_ACCEPT
;
4412 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4413 lm1
|= HCI_LM_MASTER
;
4415 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4416 lm2
|= HCI_LM_ACCEPT
;
4417 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4418 lm2
|= HCI_LM_MASTER
;
4421 read_unlock(&chan_list_lock
);
4423 return exact
? lm1
: lm2
;
4426 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4428 struct l2cap_conn
*conn
;
4430 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4433 conn
= l2cap_conn_add(hcon
, status
);
4435 l2cap_conn_ready(conn
);
4437 l2cap_conn_del(hcon
, bt_to_errno(status
));
4442 int l2cap_disconn_ind(struct hci_conn
*hcon
)
4444 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4446 BT_DBG("hcon %p", hcon
);
4449 return HCI_ERROR_REMOTE_USER_TERM
;
4450 return conn
->disc_reason
;
4453 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4455 BT_DBG("hcon %p reason %d", hcon
, reason
);
4457 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4461 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4463 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4466 if (encrypt
== 0x00) {
4467 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4468 __clear_chan_timer(chan
);
4469 __set_chan_timer(chan
,
4470 msecs_to_jiffies(L2CAP_ENC_TIMEOUT
));
4471 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4472 l2cap_chan_close(chan
, ECONNREFUSED
);
4474 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4475 __clear_chan_timer(chan
);
4479 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4481 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4482 struct l2cap_chan
*chan
;
4487 BT_DBG("conn %p", conn
);
4489 if (hcon
->type
== LE_LINK
) {
4490 smp_distribute_keys(conn
, 0);
4491 cancel_delayed_work(&conn
->security_timer
);
4496 list_for_each_entry_rcu(chan
, &conn
->chan_l
, list
) {
4497 struct sock
*sk
= chan
->sk
;
4501 BT_DBG("chan->scid %d", chan
->scid
);
4503 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4504 if (!status
&& encrypt
) {
4505 chan
->sec_level
= hcon
->sec_level
;
4506 l2cap_chan_ready(chan
);
4513 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4518 if (!status
&& (chan
->state
== BT_CONNECTED
||
4519 chan
->state
== BT_CONFIG
)) {
4520 l2cap_check_encryption(chan
, encrypt
);
4525 if (chan
->state
== BT_CONNECT
) {
4527 struct l2cap_conn_req req
;
4528 req
.scid
= cpu_to_le16(chan
->scid
);
4529 req
.psm
= chan
->psm
;
4531 chan
->ident
= l2cap_get_ident(conn
);
4532 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
4534 l2cap_send_cmd(conn
, chan
->ident
,
4535 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4537 __clear_chan_timer(chan
);
4538 __set_chan_timer(chan
,
4539 msecs_to_jiffies(L2CAP_DISC_TIMEOUT
));
4541 } else if (chan
->state
== BT_CONNECT2
) {
4542 struct l2cap_conn_rsp rsp
;
4546 if (bt_sk(sk
)->defer_setup
) {
4547 struct sock
*parent
= bt_sk(sk
)->parent
;
4548 res
= L2CAP_CR_PEND
;
4549 stat
= L2CAP_CS_AUTHOR_PEND
;
4551 parent
->sk_data_ready(parent
, 0);
4553 l2cap_state_change(chan
, BT_CONFIG
);
4554 res
= L2CAP_CR_SUCCESS
;
4555 stat
= L2CAP_CS_NO_INFO
;
4558 l2cap_state_change(chan
, BT_DISCONN
);
4559 __set_chan_timer(chan
,
4560 msecs_to_jiffies(L2CAP_DISC_TIMEOUT
));
4561 res
= L2CAP_CR_SEC_BLOCK
;
4562 stat
= L2CAP_CS_NO_INFO
;
4565 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4566 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4567 rsp
.result
= cpu_to_le16(res
);
4568 rsp
.status
= cpu_to_le16(stat
);
4569 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4581 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4583 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4586 conn
= l2cap_conn_add(hcon
, 0);
4591 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4593 if (!(flags
& ACL_CONT
)) {
4594 struct l2cap_hdr
*hdr
;
4595 struct l2cap_chan
*chan
;
4600 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4601 kfree_skb(conn
->rx_skb
);
4602 conn
->rx_skb
= NULL
;
4604 l2cap_conn_unreliable(conn
, ECOMM
);
4607 /* Start fragment always begin with Basic L2CAP header */
4608 if (skb
->len
< L2CAP_HDR_SIZE
) {
4609 BT_ERR("Frame is too short (len %d)", skb
->len
);
4610 l2cap_conn_unreliable(conn
, ECOMM
);
4614 hdr
= (struct l2cap_hdr
*) skb
->data
;
4615 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4616 cid
= __le16_to_cpu(hdr
->cid
);
4618 if (len
== skb
->len
) {
4619 /* Complete frame received */
4620 l2cap_recv_frame(conn
, skb
);
4624 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4626 if (skb
->len
> len
) {
4627 BT_ERR("Frame is too long (len %d, expected len %d)",
4629 l2cap_conn_unreliable(conn
, ECOMM
);
4633 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4635 if (chan
&& chan
->sk
) {
4636 struct sock
*sk
= chan
->sk
;
4638 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4639 BT_ERR("Frame exceeding recv MTU (len %d, "
4643 l2cap_conn_unreliable(conn
, ECOMM
);
4649 /* Allocate skb for the complete frame (with header) */
4650 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4654 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4656 conn
->rx_len
= len
- skb
->len
;
4658 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4660 if (!conn
->rx_len
) {
4661 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4662 l2cap_conn_unreliable(conn
, ECOMM
);
4666 if (skb
->len
> conn
->rx_len
) {
4667 BT_ERR("Fragment is too long (len %d, expected %d)",
4668 skb
->len
, conn
->rx_len
);
4669 kfree_skb(conn
->rx_skb
);
4670 conn
->rx_skb
= NULL
;
4672 l2cap_conn_unreliable(conn
, ECOMM
);
4676 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4678 conn
->rx_len
-= skb
->len
;
4680 if (!conn
->rx_len
) {
4681 /* Complete frame received */
4682 l2cap_recv_frame(conn
, conn
->rx_skb
);
4683 conn
->rx_skb
= NULL
;
4692 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4694 struct l2cap_chan
*c
;
4696 read_lock(&chan_list_lock
);
4698 list_for_each_entry(c
, &chan_list
, global_l
) {
4699 struct sock
*sk
= c
->sk
;
4701 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4702 batostr(&bt_sk(sk
)->src
),
4703 batostr(&bt_sk(sk
)->dst
),
4704 c
->state
, __le16_to_cpu(c
->psm
),
4705 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4706 c
->sec_level
, c
->mode
);
4709 read_unlock(&chan_list_lock
);
4714 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4716 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4719 static const struct file_operations l2cap_debugfs_fops
= {
4720 .open
= l2cap_debugfs_open
,
4722 .llseek
= seq_lseek
,
4723 .release
= single_release
,
4726 static struct dentry
*l2cap_debugfs
;
4728 int __init
l2cap_init(void)
4732 err
= l2cap_init_sockets();
4737 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4738 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4740 BT_ERR("Failed to create L2CAP debug file");
4746 void l2cap_exit(void)
4748 debugfs_remove(l2cap_debugfs
);
4749 l2cap_cleanup_sockets();
4752 module_param(disable_ertm
, bool, 0644);
4753 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");