2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
62 static u8 l2cap_fixed_chan
[8] = { 0x02, };
64 static struct workqueue_struct
*_busy_wq
;
66 static LIST_HEAD(chan_list
);
67 static DEFINE_RWLOCK(chan_list_lock
);
69 static void l2cap_busy_work(struct work_struct
*work
);
71 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
72 u8 code
, u8 ident
, u16 dlen
, void *data
);
73 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
75 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
76 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
77 struct l2cap_chan
*chan
, int err
);
79 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
);
81 /* ---- L2CAP channels ---- */
83 static inline void chan_hold(struct l2cap_chan
*c
)
85 atomic_inc(&c
->refcnt
);
88 static inline void chan_put(struct l2cap_chan
*c
)
90 if (atomic_dec_and_test(&c
->refcnt
))
94 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
98 list_for_each_entry(c
, &conn
->chan_l
, list
) {
106 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
108 struct l2cap_chan
*c
;
110 list_for_each_entry(c
, &conn
->chan_l
, list
) {
117 /* Find channel with given SCID.
118 * Returns locked socket */
119 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
121 struct l2cap_chan
*c
;
123 read_lock(&conn
->chan_lock
);
124 c
= __l2cap_get_chan_by_scid(conn
, cid
);
127 read_unlock(&conn
->chan_lock
);
131 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
133 struct l2cap_chan
*c
;
135 list_for_each_entry(c
, &conn
->chan_l
, list
) {
136 if (c
->ident
== ident
)
142 static inline struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
144 struct l2cap_chan
*c
;
146 read_lock(&conn
->chan_lock
);
147 c
= __l2cap_get_chan_by_ident(conn
, ident
);
150 read_unlock(&conn
->chan_lock
);
154 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
156 struct l2cap_chan
*c
;
158 list_for_each_entry(c
, &chan_list
, global_l
) {
159 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
168 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
172 write_lock_bh(&chan_list_lock
);
174 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
187 for (p
= 0x1001; p
< 0x1100; p
+= 2)
188 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
189 chan
->psm
= cpu_to_le16(p
);
190 chan
->sport
= cpu_to_le16(p
);
197 write_unlock_bh(&chan_list_lock
);
201 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
203 write_lock_bh(&chan_list_lock
);
207 write_unlock_bh(&chan_list_lock
);
212 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
214 u16 cid
= L2CAP_CID_DYN_START
;
216 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
217 if (!__l2cap_get_chan_by_scid(conn
, cid
))
224 static void l2cap_set_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
, long timeout
)
226 BT_DBG("chan %p state %d timeout %ld", chan
->sk
, chan
->state
, timeout
);
228 if (!mod_timer(timer
, jiffies
+ timeout
))
232 static void l2cap_clear_timer(struct l2cap_chan
*chan
, struct timer_list
*timer
)
234 BT_DBG("chan %p state %d", chan
, chan
->state
);
236 if (timer_pending(timer
) && del_timer(timer
))
240 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
243 chan
->ops
->state_change(chan
->data
, state
);
246 static void l2cap_chan_timeout(unsigned long arg
)
248 struct l2cap_chan
*chan
= (struct l2cap_chan
*) arg
;
249 struct sock
*sk
= chan
->sk
;
252 BT_DBG("chan %p state %d", chan
, chan
->state
);
256 if (sock_owned_by_user(sk
)) {
257 /* sk is owned by user. Try again later */
258 __set_chan_timer(chan
, HZ
/ 5);
264 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
265 reason
= ECONNREFUSED
;
266 else if (chan
->state
== BT_CONNECT
&&
267 chan
->sec_level
!= BT_SECURITY_SDP
)
268 reason
= ECONNREFUSED
;
272 l2cap_chan_close(chan
, reason
);
276 chan
->ops
->close(chan
->data
);
280 struct l2cap_chan
*l2cap_chan_create(struct sock
*sk
)
282 struct l2cap_chan
*chan
;
284 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
290 write_lock_bh(&chan_list_lock
);
291 list_add(&chan
->global_l
, &chan_list
);
292 write_unlock_bh(&chan_list_lock
);
294 setup_timer(&chan
->chan_timer
, l2cap_chan_timeout
, (unsigned long) chan
);
296 chan
->state
= BT_OPEN
;
298 atomic_set(&chan
->refcnt
, 1);
303 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
305 write_lock_bh(&chan_list_lock
);
306 list_del(&chan
->global_l
);
307 write_unlock_bh(&chan_list_lock
);
312 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
314 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
315 chan
->psm
, chan
->dcid
);
317 conn
->disc_reason
= 0x13;
321 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
322 if (conn
->hcon
->type
== LE_LINK
) {
324 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
325 chan
->scid
= L2CAP_CID_LE_DATA
;
326 chan
->dcid
= L2CAP_CID_LE_DATA
;
328 /* Alloc CID for connection-oriented socket */
329 chan
->scid
= l2cap_alloc_cid(conn
);
330 chan
->omtu
= L2CAP_DEFAULT_MTU
;
332 } else if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
333 /* Connectionless socket */
334 chan
->scid
= L2CAP_CID_CONN_LESS
;
335 chan
->dcid
= L2CAP_CID_CONN_LESS
;
336 chan
->omtu
= L2CAP_DEFAULT_MTU
;
338 /* Raw socket can send/recv signalling messages only */
339 chan
->scid
= L2CAP_CID_SIGNALING
;
340 chan
->dcid
= L2CAP_CID_SIGNALING
;
341 chan
->omtu
= L2CAP_DEFAULT_MTU
;
346 list_add(&chan
->list
, &conn
->chan_l
);
350 * Must be called on the locked socket. */
351 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
353 struct sock
*sk
= chan
->sk
;
354 struct l2cap_conn
*conn
= chan
->conn
;
355 struct sock
*parent
= bt_sk(sk
)->parent
;
357 __clear_chan_timer(chan
);
359 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
362 /* Delete from channel list */
363 write_lock_bh(&conn
->chan_lock
);
364 list_del(&chan
->list
);
365 write_unlock_bh(&conn
->chan_lock
);
369 hci_conn_put(conn
->hcon
);
372 l2cap_state_change(chan
, BT_CLOSED
);
373 sock_set_flag(sk
, SOCK_ZAPPED
);
379 bt_accept_unlink(sk
);
380 parent
->sk_data_ready(parent
, 0);
382 sk
->sk_state_change(sk
);
384 if (!(chan
->conf_state
& L2CAP_CONF_OUTPUT_DONE
&&
385 chan
->conf_state
& L2CAP_CONF_INPUT_DONE
))
388 skb_queue_purge(&chan
->tx_q
);
390 if (chan
->mode
== L2CAP_MODE_ERTM
) {
391 struct srej_list
*l
, *tmp
;
393 __clear_retrans_timer(chan
);
394 __clear_monitor_timer(chan
);
395 __clear_ack_timer(chan
);
397 skb_queue_purge(&chan
->srej_q
);
398 skb_queue_purge(&chan
->busy_q
);
400 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
407 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
411 BT_DBG("parent %p", parent
);
413 /* Close not yet accepted channels */
414 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
415 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
416 __clear_chan_timer(chan
);
418 l2cap_chan_close(chan
, ECONNRESET
);
420 chan
->ops
->close(chan
->data
);
424 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
426 struct l2cap_conn
*conn
= chan
->conn
;
427 struct sock
*sk
= chan
->sk
;
429 BT_DBG("chan %p state %d socket %p", chan
, chan
->state
, sk
->sk_socket
);
431 switch (chan
->state
) {
433 l2cap_chan_cleanup_listen(sk
);
435 l2cap_state_change(chan
, BT_CLOSED
);
436 sock_set_flag(sk
, SOCK_ZAPPED
);
441 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
442 conn
->hcon
->type
== ACL_LINK
) {
443 __clear_chan_timer(chan
);
444 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
445 l2cap_send_disconn_req(conn
, chan
, reason
);
447 l2cap_chan_del(chan
, reason
);
451 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
452 conn
->hcon
->type
== ACL_LINK
) {
453 struct l2cap_conn_rsp rsp
;
456 if (bt_sk(sk
)->defer_setup
)
457 result
= L2CAP_CR_SEC_BLOCK
;
459 result
= L2CAP_CR_BAD_PSM
;
460 l2cap_state_change(chan
, BT_DISCONN
);
462 rsp
.scid
= cpu_to_le16(chan
->dcid
);
463 rsp
.dcid
= cpu_to_le16(chan
->scid
);
464 rsp
.result
= cpu_to_le16(result
);
465 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
466 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
470 l2cap_chan_del(chan
, reason
);
475 l2cap_chan_del(chan
, reason
);
479 sock_set_flag(sk
, SOCK_ZAPPED
);
484 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
486 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
487 switch (chan
->sec_level
) {
488 case BT_SECURITY_HIGH
:
489 return HCI_AT_DEDICATED_BONDING_MITM
;
490 case BT_SECURITY_MEDIUM
:
491 return HCI_AT_DEDICATED_BONDING
;
493 return HCI_AT_NO_BONDING
;
495 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
496 if (chan
->sec_level
== BT_SECURITY_LOW
)
497 chan
->sec_level
= BT_SECURITY_SDP
;
499 if (chan
->sec_level
== BT_SECURITY_HIGH
)
500 return HCI_AT_NO_BONDING_MITM
;
502 return HCI_AT_NO_BONDING
;
504 switch (chan
->sec_level
) {
505 case BT_SECURITY_HIGH
:
506 return HCI_AT_GENERAL_BONDING_MITM
;
507 case BT_SECURITY_MEDIUM
:
508 return HCI_AT_GENERAL_BONDING
;
510 return HCI_AT_NO_BONDING
;
515 /* Service level security */
516 static inline int l2cap_check_security(struct l2cap_chan
*chan
)
518 struct l2cap_conn
*conn
= chan
->conn
;
521 auth_type
= l2cap_get_auth_type(chan
);
523 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
526 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
530 /* Get next available identificator.
531 * 1 - 128 are used by kernel.
532 * 129 - 199 are reserved.
533 * 200 - 254 are used by utilities like l2ping, etc.
536 spin_lock_bh(&conn
->lock
);
538 if (++conn
->tx_ident
> 128)
543 spin_unlock_bh(&conn
->lock
);
548 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
550 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
553 BT_DBG("code 0x%2.2x", code
);
558 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
559 flags
= ACL_START_NO_FLUSH
;
563 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
565 hci_send_acl(conn
->hcon
, skb
, flags
);
568 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u16 control
)
571 struct l2cap_hdr
*lh
;
572 struct l2cap_conn
*conn
= chan
->conn
;
573 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
576 if (chan
->state
!= BT_CONNECTED
)
579 if (chan
->fcs
== L2CAP_FCS_CRC16
)
582 BT_DBG("chan %p, control 0x%2.2x", chan
, control
);
584 count
= min_t(unsigned int, conn
->mtu
, hlen
);
585 control
|= L2CAP_CTRL_FRAME_TYPE
;
587 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
588 control
|= L2CAP_CTRL_FINAL
;
589 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
592 if (chan
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
593 control
|= L2CAP_CTRL_POLL
;
594 chan
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
597 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
601 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
602 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
603 lh
->cid
= cpu_to_le16(chan
->dcid
);
604 put_unaligned_le16(control
, skb_put(skb
, 2));
606 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
607 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
608 put_unaligned_le16(fcs
, skb_put(skb
, 2));
611 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
612 flags
= ACL_START_NO_FLUSH
;
616 bt_cb(skb
)->force_active
= chan
->force_active
;
618 hci_send_acl(chan
->conn
->hcon
, skb
, flags
);
621 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u16 control
)
623 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
624 control
|= L2CAP_SUPER_RCV_NOT_READY
;
625 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
627 control
|= L2CAP_SUPER_RCV_READY
;
629 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
631 l2cap_send_sframe(chan
, control
);
634 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
636 return !(chan
->conf_state
& L2CAP_CONF_CONNECT_PEND
);
639 static void l2cap_do_start(struct l2cap_chan
*chan
)
641 struct l2cap_conn
*conn
= chan
->conn
;
643 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
644 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
647 if (l2cap_check_security(chan
) &&
648 __l2cap_no_conn_pending(chan
)) {
649 struct l2cap_conn_req req
;
650 req
.scid
= cpu_to_le16(chan
->scid
);
653 chan
->ident
= l2cap_get_ident(conn
);
654 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
656 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
660 struct l2cap_info_req req
;
661 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
663 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
664 conn
->info_ident
= l2cap_get_ident(conn
);
666 mod_timer(&conn
->info_timer
, jiffies
+
667 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
669 l2cap_send_cmd(conn
, conn
->info_ident
,
670 L2CAP_INFO_REQ
, sizeof(req
), &req
);
674 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
676 u32 local_feat_mask
= l2cap_feat_mask
;
678 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
681 case L2CAP_MODE_ERTM
:
682 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
683 case L2CAP_MODE_STREAMING
:
684 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
690 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
693 struct l2cap_disconn_req req
;
700 if (chan
->mode
== L2CAP_MODE_ERTM
) {
701 __clear_retrans_timer(chan
);
702 __clear_monitor_timer(chan
);
703 __clear_ack_timer(chan
);
706 req
.dcid
= cpu_to_le16(chan
->dcid
);
707 req
.scid
= cpu_to_le16(chan
->scid
);
708 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
709 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
711 l2cap_state_change(chan
, BT_DISCONN
);
715 /* ---- L2CAP connections ---- */
716 static void l2cap_conn_start(struct l2cap_conn
*conn
)
718 struct l2cap_chan
*chan
, *tmp
;
720 BT_DBG("conn %p", conn
);
722 read_lock(&conn
->chan_lock
);
724 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
725 struct sock
*sk
= chan
->sk
;
729 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
734 if (chan
->state
== BT_CONNECT
) {
735 struct l2cap_conn_req req
;
737 if (!l2cap_check_security(chan
) ||
738 !__l2cap_no_conn_pending(chan
)) {
743 if (!l2cap_mode_supported(chan
->mode
,
745 && chan
->conf_state
&
746 L2CAP_CONF_STATE2_DEVICE
) {
747 /* l2cap_chan_close() calls list_del(chan)
748 * so release the lock */
749 read_unlock_bh(&conn
->chan_lock
);
750 l2cap_chan_close(chan
, ECONNRESET
);
751 read_lock_bh(&conn
->chan_lock
);
756 req
.scid
= cpu_to_le16(chan
->scid
);
759 chan
->ident
= l2cap_get_ident(conn
);
760 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
762 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
,
765 } else if (chan
->state
== BT_CONNECT2
) {
766 struct l2cap_conn_rsp rsp
;
768 rsp
.scid
= cpu_to_le16(chan
->dcid
);
769 rsp
.dcid
= cpu_to_le16(chan
->scid
);
771 if (l2cap_check_security(chan
)) {
772 if (bt_sk(sk
)->defer_setup
) {
773 struct sock
*parent
= bt_sk(sk
)->parent
;
774 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
775 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
776 parent
->sk_data_ready(parent
, 0);
779 l2cap_state_change(chan
, BT_CONFIG
);
780 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
781 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
784 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
785 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
788 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
791 if (chan
->conf_state
& L2CAP_CONF_REQ_SENT
||
792 rsp
.result
!= L2CAP_CR_SUCCESS
) {
797 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
798 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
799 l2cap_build_conf_req(chan
, buf
), buf
);
800 chan
->num_conf_req
++;
806 read_unlock(&conn
->chan_lock
);
809 /* Find socket with cid and source bdaddr.
810 * Returns closest match, locked.
812 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, __le16 cid
, bdaddr_t
*src
)
814 struct l2cap_chan
*c
, *c1
= NULL
;
816 read_lock(&chan_list_lock
);
818 list_for_each_entry(c
, &chan_list
, global_l
) {
819 struct sock
*sk
= c
->sk
;
821 if (state
&& c
->state
!= state
)
824 if (c
->scid
== cid
) {
826 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
827 read_unlock(&chan_list_lock
);
832 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
837 read_unlock(&chan_list_lock
);
842 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
844 struct sock
*parent
, *sk
;
845 struct l2cap_chan
*chan
, *pchan
;
849 /* Check if we have socket listening on cid */
850 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
857 bh_lock_sock(parent
);
859 /* Check for backlog size */
860 if (sk_acceptq_is_full(parent
)) {
861 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
865 chan
= pchan
->ops
->new_connection(pchan
->data
);
871 write_lock_bh(&conn
->chan_lock
);
873 hci_conn_hold(conn
->hcon
);
875 bacpy(&bt_sk(sk
)->src
, conn
->src
);
876 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
878 bt_accept_enqueue(parent
, sk
);
880 __l2cap_chan_add(conn
, chan
);
882 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
884 l2cap_state_change(chan
, BT_CONNECTED
);
885 parent
->sk_data_ready(parent
, 0);
887 write_unlock_bh(&conn
->chan_lock
);
890 bh_unlock_sock(parent
);
893 static void l2cap_chan_ready(struct sock
*sk
)
895 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
896 struct sock
*parent
= bt_sk(sk
)->parent
;
898 BT_DBG("sk %p, parent %p", sk
, parent
);
900 chan
->conf_state
= 0;
901 __clear_chan_timer(chan
);
903 sk
->sk_state
= BT_CONNECTED
;
904 sk
->sk_state_change(sk
);
907 parent
->sk_data_ready(parent
, 0);
910 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
912 struct l2cap_chan
*chan
;
914 BT_DBG("conn %p", conn
);
916 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
917 l2cap_le_conn_ready(conn
);
919 read_lock(&conn
->chan_lock
);
921 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
922 struct sock
*sk
= chan
->sk
;
926 if (conn
->hcon
->type
== LE_LINK
)
927 if (smp_conn_security(conn
, chan
->sec_level
))
928 l2cap_chan_ready(sk
);
930 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
931 __clear_chan_timer(chan
);
932 l2cap_state_change(chan
, BT_CONNECTED
);
933 sk
->sk_state_change(sk
);
935 } else if (chan
->state
== BT_CONNECT
)
936 l2cap_do_start(chan
);
941 read_unlock(&conn
->chan_lock
);
944 /* Notify sockets that we cannot guaranty reliability anymore */
945 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
947 struct l2cap_chan
*chan
;
949 BT_DBG("conn %p", conn
);
951 read_lock(&conn
->chan_lock
);
953 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
954 struct sock
*sk
= chan
->sk
;
956 if (chan
->force_reliable
)
960 read_unlock(&conn
->chan_lock
);
963 static void l2cap_info_timeout(unsigned long arg
)
965 struct l2cap_conn
*conn
= (void *) arg
;
967 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
968 conn
->info_ident
= 0;
970 l2cap_conn_start(conn
);
973 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
975 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
980 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
984 hcon
->l2cap_data
= conn
;
987 BT_DBG("hcon %p conn %p", hcon
, conn
);
989 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
990 conn
->mtu
= hcon
->hdev
->le_mtu
;
992 conn
->mtu
= hcon
->hdev
->acl_mtu
;
994 conn
->src
= &hcon
->hdev
->bdaddr
;
995 conn
->dst
= &hcon
->dst
;
999 spin_lock_init(&conn
->lock
);
1000 rwlock_init(&conn
->chan_lock
);
1002 INIT_LIST_HEAD(&conn
->chan_l
);
1004 if (hcon
->type
!= LE_LINK
)
1005 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
1006 (unsigned long) conn
);
1008 conn
->disc_reason
= 0x13;
1013 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1015 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1016 struct l2cap_chan
*chan
, *l
;
1022 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1024 kfree_skb(conn
->rx_skb
);
1027 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1030 l2cap_chan_del(chan
, err
);
1032 chan
->ops
->close(chan
->data
);
1035 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1036 del_timer_sync(&conn
->info_timer
);
1038 hcon
->l2cap_data
= NULL
;
1042 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
1044 write_lock_bh(&conn
->chan_lock
);
1045 __l2cap_chan_add(conn
, chan
);
1046 write_unlock_bh(&conn
->chan_lock
);
1049 /* ---- Socket interface ---- */
1051 /* Find socket with psm and source bdaddr.
1052 * Returns closest match.
1054 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
1056 struct l2cap_chan
*c
, *c1
= NULL
;
1058 read_lock(&chan_list_lock
);
1060 list_for_each_entry(c
, &chan_list
, global_l
) {
1061 struct sock
*sk
= c
->sk
;
1063 if (state
&& c
->state
!= state
)
1066 if (c
->psm
== psm
) {
1068 if (!bacmp(&bt_sk(sk
)->src
, src
)) {
1069 read_unlock(&chan_list_lock
);
1074 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
1079 read_unlock(&chan_list_lock
);
1084 int l2cap_chan_connect(struct l2cap_chan
*chan
)
1086 struct sock
*sk
= chan
->sk
;
1087 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1088 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1089 struct l2cap_conn
*conn
;
1090 struct hci_conn
*hcon
;
1091 struct hci_dev
*hdev
;
1095 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1098 hdev
= hci_get_route(dst
, src
);
1100 return -EHOSTUNREACH
;
1102 hci_dev_lock_bh(hdev
);
1104 auth_type
= l2cap_get_auth_type(chan
);
1106 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1107 hcon
= hci_connect(hdev
, LE_LINK
, dst
,
1108 chan
->sec_level
, auth_type
);
1110 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1111 chan
->sec_level
, auth_type
);
1114 err
= PTR_ERR(hcon
);
1118 conn
= l2cap_conn_add(hcon
, 0);
1125 /* Update source addr of the socket */
1126 bacpy(src
, conn
->src
);
1128 l2cap_chan_add(conn
, chan
);
1130 l2cap_state_change(chan
, BT_CONNECT
);
1131 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1133 if (hcon
->state
== BT_CONNECTED
) {
1134 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1135 __clear_chan_timer(chan
);
1136 if (l2cap_check_security(chan
))
1137 l2cap_state_change(chan
, BT_CONNECTED
);
1139 l2cap_do_start(chan
);
1145 hci_dev_unlock_bh(hdev
);
1150 int __l2cap_wait_ack(struct sock
*sk
)
1152 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1153 DECLARE_WAITQUEUE(wait
, current
);
1157 add_wait_queue(sk_sleep(sk
), &wait
);
1158 while ((chan
->unacked_frames
> 0 && chan
->conn
)) {
1159 set_current_state(TASK_INTERRUPTIBLE
);
1164 if (signal_pending(current
)) {
1165 err
= sock_intr_errno(timeo
);
1170 timeo
= schedule_timeout(timeo
);
1173 err
= sock_error(sk
);
1177 set_current_state(TASK_RUNNING
);
1178 remove_wait_queue(sk_sleep(sk
), &wait
);
1182 static void l2cap_monitor_timeout(unsigned long arg
)
1184 struct l2cap_chan
*chan
= (void *) arg
;
1185 struct sock
*sk
= chan
->sk
;
1187 BT_DBG("chan %p", chan
);
1190 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1191 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1196 chan
->retry_count
++;
1197 __set_monitor_timer(chan
);
1199 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1203 static void l2cap_retrans_timeout(unsigned long arg
)
1205 struct l2cap_chan
*chan
= (void *) arg
;
1206 struct sock
*sk
= chan
->sk
;
1208 BT_DBG("chan %p", chan
);
1211 chan
->retry_count
= 1;
1212 __set_monitor_timer(chan
);
1214 chan
->conn_state
|= L2CAP_CONN_WAIT_F
;
1216 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1220 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1222 struct sk_buff
*skb
;
1224 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1225 chan
->unacked_frames
) {
1226 if (bt_cb(skb
)->tx_seq
== chan
->expected_ack_seq
)
1229 skb
= skb_dequeue(&chan
->tx_q
);
1232 chan
->unacked_frames
--;
1235 if (!chan
->unacked_frames
)
1236 __clear_retrans_timer(chan
);
1239 void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
1241 struct hci_conn
*hcon
= chan
->conn
->hcon
;
1244 BT_DBG("chan %p, skb %p len %d", chan
, skb
, skb
->len
);
1246 if (!chan
->flushable
&& lmp_no_flush_capable(hcon
->hdev
))
1247 flags
= ACL_START_NO_FLUSH
;
1251 bt_cb(skb
)->force_active
= chan
->force_active
;
1252 hci_send_acl(hcon
, skb
, flags
);
1255 void l2cap_streaming_send(struct l2cap_chan
*chan
)
1257 struct sk_buff
*skb
;
1260 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1261 control
= get_unaligned_le16(skb
->data
+ L2CAP_HDR_SIZE
);
1262 control
|= chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1263 put_unaligned_le16(control
, skb
->data
+ L2CAP_HDR_SIZE
);
1265 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1266 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
- 2);
1267 put_unaligned_le16(fcs
, skb
->data
+ skb
->len
- 2);
1270 l2cap_do_send(chan
, skb
);
1272 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1276 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u8 tx_seq
)
1278 struct sk_buff
*skb
, *tx_skb
;
1281 skb
= skb_peek(&chan
->tx_q
);
1286 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1289 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1292 } while ((skb
= skb_queue_next(&chan
->tx_q
, skb
)));
1294 if (chan
->remote_max_tx
&&
1295 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1296 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1300 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1301 bt_cb(skb
)->retries
++;
1302 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1303 control
&= L2CAP_CTRL_SAR
;
1305 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1306 control
|= L2CAP_CTRL_FINAL
;
1307 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1310 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1311 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1313 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1315 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1316 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1317 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1320 l2cap_do_send(chan
, tx_skb
);
1323 int l2cap_ertm_send(struct l2cap_chan
*chan
)
1325 struct sk_buff
*skb
, *tx_skb
;
1329 if (chan
->state
!= BT_CONNECTED
)
1332 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1334 if (chan
->remote_max_tx
&&
1335 bt_cb(skb
)->retries
== chan
->remote_max_tx
) {
1336 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1340 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1342 bt_cb(skb
)->retries
++;
1344 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1345 control
&= L2CAP_CTRL_SAR
;
1347 if (chan
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1348 control
|= L2CAP_CTRL_FINAL
;
1349 chan
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1351 control
|= (chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1352 | (chan
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1353 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1356 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1357 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1358 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1361 l2cap_do_send(chan
, tx_skb
);
1363 __set_retrans_timer(chan
);
1365 bt_cb(skb
)->tx_seq
= chan
->next_tx_seq
;
1366 chan
->next_tx_seq
= (chan
->next_tx_seq
+ 1) % 64;
1368 if (bt_cb(skb
)->retries
== 1)
1369 chan
->unacked_frames
++;
1371 chan
->frames_sent
++;
1373 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1374 chan
->tx_send_head
= NULL
;
1376 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1384 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1388 if (!skb_queue_empty(&chan
->tx_q
))
1389 chan
->tx_send_head
= chan
->tx_q
.next
;
1391 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1392 ret
= l2cap_ertm_send(chan
);
1396 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1400 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1402 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1403 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1404 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1405 l2cap_send_sframe(chan
, control
);
1409 if (l2cap_ertm_send(chan
) > 0)
1412 control
|= L2CAP_SUPER_RCV_READY
;
1413 l2cap_send_sframe(chan
, control
);
1416 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1418 struct srej_list
*tail
;
1421 control
= L2CAP_SUPER_SELECT_REJECT
;
1422 control
|= L2CAP_CTRL_FINAL
;
1424 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1425 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1427 l2cap_send_sframe(chan
, control
);
1430 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1432 struct l2cap_conn
*conn
= l2cap_pi(sk
)->chan
->conn
;
1433 struct sk_buff
**frag
;
1436 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1442 /* Continuation fragments (no L2CAP header) */
1443 frag
= &skb_shinfo(skb
)->frag_list
;
1445 count
= min_t(unsigned int, conn
->mtu
, len
);
1447 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1450 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1456 frag
= &(*frag
)->next
;
1462 struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1464 struct sock
*sk
= chan
->sk
;
1465 struct l2cap_conn
*conn
= chan
->conn
;
1466 struct sk_buff
*skb
;
1467 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1468 struct l2cap_hdr
*lh
;
1470 BT_DBG("sk %p len %d", sk
, (int)len
);
1472 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1473 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1474 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1476 return ERR_PTR(err
);
1478 /* Create L2CAP header */
1479 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1480 lh
->cid
= cpu_to_le16(chan
->dcid
);
1481 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1482 put_unaligned_le16(chan
->psm
, skb_put(skb
, 2));
1484 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1485 if (unlikely(err
< 0)) {
1487 return ERR_PTR(err
);
1492 struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1494 struct sock
*sk
= chan
->sk
;
1495 struct l2cap_conn
*conn
= chan
->conn
;
1496 struct sk_buff
*skb
;
1497 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1498 struct l2cap_hdr
*lh
;
1500 BT_DBG("sk %p len %d", sk
, (int)len
);
1502 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1503 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1504 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1506 return ERR_PTR(err
);
1508 /* Create L2CAP header */
1509 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1510 lh
->cid
= cpu_to_le16(chan
->dcid
);
1511 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1513 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1514 if (unlikely(err
< 0)) {
1516 return ERR_PTR(err
);
1521 struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1523 struct sock
*sk
= chan
->sk
;
1524 struct l2cap_conn
*conn
= chan
->conn
;
1525 struct sk_buff
*skb
;
1526 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1527 struct l2cap_hdr
*lh
;
1529 BT_DBG("sk %p len %d", sk
, (int)len
);
1532 return ERR_PTR(-ENOTCONN
);
1537 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1540 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1541 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1542 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1544 return ERR_PTR(err
);
1546 /* Create L2CAP header */
1547 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1548 lh
->cid
= cpu_to_le16(chan
->dcid
);
1549 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1550 put_unaligned_le16(control
, skb_put(skb
, 2));
1552 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1554 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1555 if (unlikely(err
< 0)) {
1557 return ERR_PTR(err
);
1560 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1561 put_unaligned_le16(0, skb_put(skb
, 2));
1563 bt_cb(skb
)->retries
= 0;
1567 int l2cap_sar_segment_sdu(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1569 struct sk_buff
*skb
;
1570 struct sk_buff_head sar_queue
;
1574 skb_queue_head_init(&sar_queue
);
1575 control
= L2CAP_SDU_START
;
1576 skb
= l2cap_create_iframe_pdu(chan
, msg
, chan
->remote_mps
, control
, len
);
1578 return PTR_ERR(skb
);
1580 __skb_queue_tail(&sar_queue
, skb
);
1581 len
-= chan
->remote_mps
;
1582 size
+= chan
->remote_mps
;
1587 if (len
> chan
->remote_mps
) {
1588 control
= L2CAP_SDU_CONTINUE
;
1589 buflen
= chan
->remote_mps
;
1591 control
= L2CAP_SDU_END
;
1595 skb
= l2cap_create_iframe_pdu(chan
, msg
, buflen
, control
, 0);
1597 skb_queue_purge(&sar_queue
);
1598 return PTR_ERR(skb
);
1601 __skb_queue_tail(&sar_queue
, skb
);
1605 skb_queue_splice_tail(&sar_queue
, &chan
->tx_q
);
1606 if (chan
->tx_send_head
== NULL
)
1607 chan
->tx_send_head
= sar_queue
.next
;
1612 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
)
1614 struct sk_buff
*skb
;
1618 /* Connectionless channel */
1619 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
1620 skb
= l2cap_create_connless_pdu(chan
, msg
, len
);
1622 return PTR_ERR(skb
);
1624 l2cap_do_send(chan
, skb
);
1628 switch (chan
->mode
) {
1629 case L2CAP_MODE_BASIC
:
1630 /* Check outgoing MTU */
1631 if (len
> chan
->omtu
)
1634 /* Create a basic PDU */
1635 skb
= l2cap_create_basic_pdu(chan
, msg
, len
);
1637 return PTR_ERR(skb
);
1639 l2cap_do_send(chan
, skb
);
1643 case L2CAP_MODE_ERTM
:
1644 case L2CAP_MODE_STREAMING
:
1645 /* Entire SDU fits into one PDU */
1646 if (len
<= chan
->remote_mps
) {
1647 control
= L2CAP_SDU_UNSEGMENTED
;
1648 skb
= l2cap_create_iframe_pdu(chan
, msg
, len
, control
,
1651 return PTR_ERR(skb
);
1653 __skb_queue_tail(&chan
->tx_q
, skb
);
1655 if (chan
->tx_send_head
== NULL
)
1656 chan
->tx_send_head
= skb
;
1659 /* Segment SDU into multiples PDUs */
1660 err
= l2cap_sar_segment_sdu(chan
, msg
, len
);
1665 if (chan
->mode
== L2CAP_MODE_STREAMING
) {
1666 l2cap_streaming_send(chan
);
1671 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
1672 (chan
->conn_state
& L2CAP_CONN_WAIT_F
)) {
1677 err
= l2cap_ertm_send(chan
);
1684 BT_DBG("bad state %1.1x", chan
->mode
);
1691 /* Copy frame to all raw sockets on that connection */
1692 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
1694 struct sk_buff
*nskb
;
1695 struct l2cap_chan
*chan
;
1697 BT_DBG("conn %p", conn
);
1699 read_lock(&conn
->chan_lock
);
1700 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1701 struct sock
*sk
= chan
->sk
;
1702 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
1705 /* Don't send frame to the socket it came from */
1708 nskb
= skb_clone(skb
, GFP_ATOMIC
);
1712 if (chan
->ops
->recv(chan
->data
, nskb
))
1715 read_unlock(&conn
->chan_lock
);
1718 /* ---- L2CAP signalling commands ---- */
1719 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
1720 u8 code
, u8 ident
, u16 dlen
, void *data
)
1722 struct sk_buff
*skb
, **frag
;
1723 struct l2cap_cmd_hdr
*cmd
;
1724 struct l2cap_hdr
*lh
;
1727 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1728 conn
, code
, ident
, dlen
);
1730 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
1731 count
= min_t(unsigned int, conn
->mtu
, len
);
1733 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
1737 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1738 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
1740 if (conn
->hcon
->type
== LE_LINK
)
1741 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
1743 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
1745 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
1748 cmd
->len
= cpu_to_le16(dlen
);
1751 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
1752 memcpy(skb_put(skb
, count
), data
, count
);
1758 /* Continuation fragments (no L2CAP header) */
1759 frag
= &skb_shinfo(skb
)->frag_list
;
1761 count
= min_t(unsigned int, conn
->mtu
, len
);
1763 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
1767 memcpy(skb_put(*frag
, count
), data
, count
);
1772 frag
= &(*frag
)->next
;
1782 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
1784 struct l2cap_conf_opt
*opt
= *ptr
;
1787 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
1795 *val
= *((u8
*) opt
->val
);
1799 *val
= get_unaligned_le16(opt
->val
);
1803 *val
= get_unaligned_le32(opt
->val
);
1807 *val
= (unsigned long) opt
->val
;
1811 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
1815 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
1817 struct l2cap_conf_opt
*opt
= *ptr
;
1819 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
1826 *((u8
*) opt
->val
) = val
;
1830 put_unaligned_le16(val
, opt
->val
);
1834 put_unaligned_le32(val
, opt
->val
);
1838 memcpy(opt
->val
, (void *) val
, len
);
1842 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
1845 static void l2cap_ack_timeout(unsigned long arg
)
1847 struct l2cap_chan
*chan
= (void *) arg
;
1849 bh_lock_sock(chan
->sk
);
1850 l2cap_send_ack(chan
);
1851 bh_unlock_sock(chan
->sk
);
1854 static inline void l2cap_ertm_init(struct l2cap_chan
*chan
)
1856 struct sock
*sk
= chan
->sk
;
1858 chan
->expected_ack_seq
= 0;
1859 chan
->unacked_frames
= 0;
1860 chan
->buffer_seq
= 0;
1861 chan
->num_acked
= 0;
1862 chan
->frames_sent
= 0;
1864 setup_timer(&chan
->retrans_timer
, l2cap_retrans_timeout
,
1865 (unsigned long) chan
);
1866 setup_timer(&chan
->monitor_timer
, l2cap_monitor_timeout
,
1867 (unsigned long) chan
);
1868 setup_timer(&chan
->ack_timer
, l2cap_ack_timeout
, (unsigned long) chan
);
1870 skb_queue_head_init(&chan
->srej_q
);
1871 skb_queue_head_init(&chan
->busy_q
);
1873 INIT_LIST_HEAD(&chan
->srej_l
);
1875 INIT_WORK(&chan
->busy_work
, l2cap_busy_work
);
1877 sk
->sk_backlog_rcv
= l2cap_ertm_data_rcv
;
1880 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
1883 case L2CAP_MODE_STREAMING
:
1884 case L2CAP_MODE_ERTM
:
1885 if (l2cap_mode_supported(mode
, remote_feat_mask
))
1889 return L2CAP_MODE_BASIC
;
1893 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
1895 struct l2cap_conf_req
*req
= data
;
1896 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
1897 void *ptr
= req
->data
;
1899 BT_DBG("chan %p", chan
);
1901 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
1904 switch (chan
->mode
) {
1905 case L2CAP_MODE_STREAMING
:
1906 case L2CAP_MODE_ERTM
:
1907 if (chan
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
1912 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
1917 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
1918 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
1920 switch (chan
->mode
) {
1921 case L2CAP_MODE_BASIC
:
1922 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
1923 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
1926 rfc
.mode
= L2CAP_MODE_BASIC
;
1928 rfc
.max_transmit
= 0;
1929 rfc
.retrans_timeout
= 0;
1930 rfc
.monitor_timeout
= 0;
1931 rfc
.max_pdu_size
= 0;
1933 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1934 (unsigned long) &rfc
);
1937 case L2CAP_MODE_ERTM
:
1938 rfc
.mode
= L2CAP_MODE_ERTM
;
1939 rfc
.txwin_size
= chan
->tx_win
;
1940 rfc
.max_transmit
= chan
->max_tx
;
1941 rfc
.retrans_timeout
= 0;
1942 rfc
.monitor_timeout
= 0;
1943 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1944 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1945 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1947 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1948 (unsigned long) &rfc
);
1950 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1953 if (chan
->fcs
== L2CAP_FCS_NONE
||
1954 chan
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1955 chan
->fcs
= L2CAP_FCS_NONE
;
1956 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1960 case L2CAP_MODE_STREAMING
:
1961 rfc
.mode
= L2CAP_MODE_STREAMING
;
1963 rfc
.max_transmit
= 0;
1964 rfc
.retrans_timeout
= 0;
1965 rfc
.monitor_timeout
= 0;
1966 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
1967 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> chan
->conn
->mtu
- 10)
1968 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
1970 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
1971 (unsigned long) &rfc
);
1973 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
1976 if (chan
->fcs
== L2CAP_FCS_NONE
||
1977 chan
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
1978 chan
->fcs
= L2CAP_FCS_NONE
;
1979 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
1984 req
->dcid
= cpu_to_le16(chan
->dcid
);
1985 req
->flags
= cpu_to_le16(0);
1990 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
1992 struct l2cap_conf_rsp
*rsp
= data
;
1993 void *ptr
= rsp
->data
;
1994 void *req
= chan
->conf_req
;
1995 int len
= chan
->conf_len
;
1996 int type
, hint
, olen
;
1998 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
1999 u16 mtu
= L2CAP_DEFAULT_MTU
;
2000 u16 result
= L2CAP_CONF_SUCCESS
;
2002 BT_DBG("chan %p", chan
);
2004 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2005 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2007 hint
= type
& L2CAP_CONF_HINT
;
2008 type
&= L2CAP_CONF_MASK
;
2011 case L2CAP_CONF_MTU
:
2015 case L2CAP_CONF_FLUSH_TO
:
2016 chan
->flush_to
= val
;
2019 case L2CAP_CONF_QOS
:
2022 case L2CAP_CONF_RFC
:
2023 if (olen
== sizeof(rfc
))
2024 memcpy(&rfc
, (void *) val
, olen
);
2027 case L2CAP_CONF_FCS
:
2028 if (val
== L2CAP_FCS_NONE
)
2029 chan
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2037 result
= L2CAP_CONF_UNKNOWN
;
2038 *((u8
*) ptr
++) = type
;
2043 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2046 switch (chan
->mode
) {
2047 case L2CAP_MODE_STREAMING
:
2048 case L2CAP_MODE_ERTM
:
2049 if (!(chan
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2050 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2051 chan
->conn
->feat_mask
);
2055 if (chan
->mode
!= rfc
.mode
)
2056 return -ECONNREFUSED
;
2062 if (chan
->mode
!= rfc
.mode
) {
2063 result
= L2CAP_CONF_UNACCEPT
;
2064 rfc
.mode
= chan
->mode
;
2066 if (chan
->num_conf_rsp
== 1)
2067 return -ECONNREFUSED
;
2069 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2070 sizeof(rfc
), (unsigned long) &rfc
);
2074 if (result
== L2CAP_CONF_SUCCESS
) {
2075 /* Configure output options and let the other side know
2076 * which ones we don't like. */
2078 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2079 result
= L2CAP_CONF_UNACCEPT
;
2082 chan
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2084 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2087 case L2CAP_MODE_BASIC
:
2088 chan
->fcs
= L2CAP_FCS_NONE
;
2089 chan
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2092 case L2CAP_MODE_ERTM
:
2093 chan
->remote_tx_win
= rfc
.txwin_size
;
2094 chan
->remote_max_tx
= rfc
.max_transmit
;
2096 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2097 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2099 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2101 rfc
.retrans_timeout
=
2102 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2103 rfc
.monitor_timeout
=
2104 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2106 chan
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2108 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2109 sizeof(rfc
), (unsigned long) &rfc
);
2113 case L2CAP_MODE_STREAMING
:
2114 if (le16_to_cpu(rfc
.max_pdu_size
) > chan
->conn
->mtu
- 10)
2115 rfc
.max_pdu_size
= cpu_to_le16(chan
->conn
->mtu
- 10);
2117 chan
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2119 chan
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2121 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2122 sizeof(rfc
), (unsigned long) &rfc
);
2127 result
= L2CAP_CONF_UNACCEPT
;
2129 memset(&rfc
, 0, sizeof(rfc
));
2130 rfc
.mode
= chan
->mode
;
2133 if (result
== L2CAP_CONF_SUCCESS
)
2134 chan
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2136 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2137 rsp
->result
= cpu_to_le16(result
);
2138 rsp
->flags
= cpu_to_le16(0x0000);
2143 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2145 struct l2cap_conf_req
*req
= data
;
2146 void *ptr
= req
->data
;
2149 struct l2cap_conf_rfc rfc
;
2151 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2153 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2154 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2157 case L2CAP_CONF_MTU
:
2158 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2159 *result
= L2CAP_CONF_UNACCEPT
;
2160 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2163 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2166 case L2CAP_CONF_FLUSH_TO
:
2167 chan
->flush_to
= val
;
2168 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2172 case L2CAP_CONF_RFC
:
2173 if (olen
== sizeof(rfc
))
2174 memcpy(&rfc
, (void *)val
, olen
);
2176 if ((chan
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2177 rfc
.mode
!= chan
->mode
)
2178 return -ECONNREFUSED
;
2182 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2183 sizeof(rfc
), (unsigned long) &rfc
);
2188 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2189 return -ECONNREFUSED
;
2191 chan
->mode
= rfc
.mode
;
2193 if (*result
== L2CAP_CONF_SUCCESS
) {
2195 case L2CAP_MODE_ERTM
:
2196 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2197 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2198 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2200 case L2CAP_MODE_STREAMING
:
2201 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2205 req
->dcid
= cpu_to_le16(chan
->dcid
);
2206 req
->flags
= cpu_to_le16(0x0000);
2211 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2213 struct l2cap_conf_rsp
*rsp
= data
;
2214 void *ptr
= rsp
->data
;
2216 BT_DBG("chan %p", chan
);
2218 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2219 rsp
->result
= cpu_to_le16(result
);
2220 rsp
->flags
= cpu_to_le16(flags
);
2225 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2227 struct l2cap_conn_rsp rsp
;
2228 struct l2cap_conn
*conn
= chan
->conn
;
2231 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2232 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2233 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2234 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2235 l2cap_send_cmd(conn
, chan
->ident
,
2236 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2238 if (chan
->conf_state
& L2CAP_CONF_REQ_SENT
)
2241 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2242 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2243 l2cap_build_conf_req(chan
, buf
), buf
);
2244 chan
->num_conf_req
++;
2247 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2251 struct l2cap_conf_rfc rfc
;
2253 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2255 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2258 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2259 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2262 case L2CAP_CONF_RFC
:
2263 if (olen
== sizeof(rfc
))
2264 memcpy(&rfc
, (void *)val
, olen
);
2271 case L2CAP_MODE_ERTM
:
2272 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2273 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2274 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2276 case L2CAP_MODE_STREAMING
:
2277 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2281 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2283 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2285 if (rej
->reason
!= 0x0000)
2288 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2289 cmd
->ident
== conn
->info_ident
) {
2290 del_timer(&conn
->info_timer
);
2292 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2293 conn
->info_ident
= 0;
2295 l2cap_conn_start(conn
);
2301 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2303 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2304 struct l2cap_conn_rsp rsp
;
2305 struct l2cap_chan
*chan
= NULL
, *pchan
;
2306 struct sock
*parent
, *sk
= NULL
;
2307 int result
, status
= L2CAP_CS_NO_INFO
;
2309 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2310 __le16 psm
= req
->psm
;
2312 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2314 /* Check if we have socket listening on psm */
2315 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
);
2317 result
= L2CAP_CR_BAD_PSM
;
2323 bh_lock_sock(parent
);
2325 /* Check if the ACL is secure enough (if not SDP) */
2326 if (psm
!= cpu_to_le16(0x0001) &&
2327 !hci_conn_check_link_mode(conn
->hcon
)) {
2328 conn
->disc_reason
= 0x05;
2329 result
= L2CAP_CR_SEC_BLOCK
;
2333 result
= L2CAP_CR_NO_MEM
;
2335 /* Check for backlog size */
2336 if (sk_acceptq_is_full(parent
)) {
2337 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2341 chan
= pchan
->ops
->new_connection(pchan
->data
);
2347 write_lock_bh(&conn
->chan_lock
);
2349 /* Check if we already have channel with that dcid */
2350 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
2351 write_unlock_bh(&conn
->chan_lock
);
2352 sock_set_flag(sk
, SOCK_ZAPPED
);
2353 chan
->ops
->close(chan
->data
);
2357 hci_conn_hold(conn
->hcon
);
2359 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2360 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2364 bt_accept_enqueue(parent
, sk
);
2366 __l2cap_chan_add(conn
, chan
);
2370 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
2372 chan
->ident
= cmd
->ident
;
2374 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2375 if (l2cap_check_security(chan
)) {
2376 if (bt_sk(sk
)->defer_setup
) {
2377 l2cap_state_change(chan
, BT_CONNECT2
);
2378 result
= L2CAP_CR_PEND
;
2379 status
= L2CAP_CS_AUTHOR_PEND
;
2380 parent
->sk_data_ready(parent
, 0);
2382 l2cap_state_change(chan
, BT_CONFIG
);
2383 result
= L2CAP_CR_SUCCESS
;
2384 status
= L2CAP_CS_NO_INFO
;
2387 l2cap_state_change(chan
, BT_CONNECT2
);
2388 result
= L2CAP_CR_PEND
;
2389 status
= L2CAP_CS_AUTHEN_PEND
;
2392 l2cap_state_change(chan
, BT_CONNECT2
);
2393 result
= L2CAP_CR_PEND
;
2394 status
= L2CAP_CS_NO_INFO
;
2397 write_unlock_bh(&conn
->chan_lock
);
2400 bh_unlock_sock(parent
);
2403 rsp
.scid
= cpu_to_le16(scid
);
2404 rsp
.dcid
= cpu_to_le16(dcid
);
2405 rsp
.result
= cpu_to_le16(result
);
2406 rsp
.status
= cpu_to_le16(status
);
2407 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2409 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2410 struct l2cap_info_req info
;
2411 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2413 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2414 conn
->info_ident
= l2cap_get_ident(conn
);
2416 mod_timer(&conn
->info_timer
, jiffies
+
2417 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2419 l2cap_send_cmd(conn
, conn
->info_ident
,
2420 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2423 if (chan
&& !(chan
->conf_state
& L2CAP_CONF_REQ_SENT
) &&
2424 result
== L2CAP_CR_SUCCESS
) {
2426 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2427 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2428 l2cap_build_conf_req(chan
, buf
), buf
);
2429 chan
->num_conf_req
++;
2435 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2437 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2438 u16 scid
, dcid
, result
, status
;
2439 struct l2cap_chan
*chan
;
2443 scid
= __le16_to_cpu(rsp
->scid
);
2444 dcid
= __le16_to_cpu(rsp
->dcid
);
2445 result
= __le16_to_cpu(rsp
->result
);
2446 status
= __le16_to_cpu(rsp
->status
);
2448 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2451 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2455 chan
= l2cap_get_chan_by_ident(conn
, cmd
->ident
);
2463 case L2CAP_CR_SUCCESS
:
2464 l2cap_state_change(chan
, BT_CONFIG
);
2467 chan
->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
2469 if (chan
->conf_state
& L2CAP_CONF_REQ_SENT
)
2472 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2474 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2475 l2cap_build_conf_req(chan
, req
), req
);
2476 chan
->num_conf_req
++;
2480 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
2484 /* don't delete l2cap channel if sk is owned by user */
2485 if (sock_owned_by_user(sk
)) {
2486 l2cap_state_change(chan
, BT_DISCONN
);
2487 __clear_chan_timer(chan
);
2488 __set_chan_timer(chan
, HZ
/ 5);
2492 l2cap_chan_del(chan
, ECONNREFUSED
);
2500 static inline void set_default_fcs(struct l2cap_chan
*chan
)
2502 struct l2cap_pinfo
*pi
= l2cap_pi(chan
->sk
);
2504 /* FCS is enabled only in ERTM or streaming mode, if one or both
2507 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
2508 chan
->fcs
= L2CAP_FCS_NONE
;
2509 else if (!(pi
->chan
->conf_state
& L2CAP_CONF_NO_FCS_RECV
))
2510 chan
->fcs
= L2CAP_FCS_CRC16
;
2513 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2515 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
2518 struct l2cap_chan
*chan
;
2522 dcid
= __le16_to_cpu(req
->dcid
);
2523 flags
= __le16_to_cpu(req
->flags
);
2525 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
2527 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2533 if (chan
->state
!= BT_CONFIG
) {
2534 struct l2cap_cmd_rej rej
;
2536 rej
.reason
= cpu_to_le16(0x0002);
2537 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
2542 /* Reject if config buffer is too small. */
2543 len
= cmd_len
- sizeof(*req
);
2544 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
2545 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2546 l2cap_build_conf_rsp(chan
, rsp
,
2547 L2CAP_CONF_REJECT
, flags
), rsp
);
2552 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
2553 chan
->conf_len
+= len
;
2555 if (flags
& 0x0001) {
2556 /* Incomplete config. Send empty response. */
2557 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
2558 l2cap_build_conf_rsp(chan
, rsp
,
2559 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
2563 /* Complete config. */
2564 len
= l2cap_parse_conf_req(chan
, rsp
);
2566 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2570 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
2571 chan
->num_conf_rsp
++;
2573 /* Reset config buffer. */
2576 if (!(chan
->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
2579 if (chan
->conf_state
& L2CAP_CONF_INPUT_DONE
) {
2580 set_default_fcs(chan
);
2582 l2cap_state_change(chan
, BT_CONNECTED
);
2584 chan
->next_tx_seq
= 0;
2585 chan
->expected_tx_seq
= 0;
2586 skb_queue_head_init(&chan
->tx_q
);
2587 if (chan
->mode
== L2CAP_MODE_ERTM
)
2588 l2cap_ertm_init(chan
);
2590 l2cap_chan_ready(sk
);
2594 if (!(chan
->conf_state
& L2CAP_CONF_REQ_SENT
)) {
2596 chan
->conf_state
|= L2CAP_CONF_REQ_SENT
;
2597 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2598 l2cap_build_conf_req(chan
, buf
), buf
);
2599 chan
->num_conf_req
++;
2607 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2609 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
2610 u16 scid
, flags
, result
;
2611 struct l2cap_chan
*chan
;
2613 int len
= cmd
->len
- sizeof(*rsp
);
2615 scid
= __le16_to_cpu(rsp
->scid
);
2616 flags
= __le16_to_cpu(rsp
->flags
);
2617 result
= __le16_to_cpu(rsp
->result
);
2619 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2620 scid
, flags
, result
);
2622 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2629 case L2CAP_CONF_SUCCESS
:
2630 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
2633 case L2CAP_CONF_UNACCEPT
:
2634 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
2637 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
2638 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2642 /* throw out any old stored conf requests */
2643 result
= L2CAP_CONF_SUCCESS
;
2644 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
2647 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2651 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
2652 L2CAP_CONF_REQ
, len
, req
);
2653 chan
->num_conf_req
++;
2654 if (result
!= L2CAP_CONF_SUCCESS
)
2660 sk
->sk_err
= ECONNRESET
;
2661 __set_chan_timer(chan
, HZ
* 5);
2662 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
2669 chan
->conf_state
|= L2CAP_CONF_INPUT_DONE
;
2671 if (chan
->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
2672 set_default_fcs(chan
);
2674 l2cap_state_change(chan
, BT_CONNECTED
);
2675 chan
->next_tx_seq
= 0;
2676 chan
->expected_tx_seq
= 0;
2677 skb_queue_head_init(&chan
->tx_q
);
2678 if (chan
->mode
== L2CAP_MODE_ERTM
)
2679 l2cap_ertm_init(chan
);
2681 l2cap_chan_ready(sk
);
2689 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2691 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
2692 struct l2cap_disconn_rsp rsp
;
2694 struct l2cap_chan
*chan
;
2697 scid
= __le16_to_cpu(req
->scid
);
2698 dcid
= __le16_to_cpu(req
->dcid
);
2700 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
2702 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
2708 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2709 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2710 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
2712 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2714 /* don't delete l2cap channel if sk is owned by user */
2715 if (sock_owned_by_user(sk
)) {
2716 l2cap_state_change(chan
, BT_DISCONN
);
2717 __clear_chan_timer(chan
);
2718 __set_chan_timer(chan
, HZ
/ 5);
2723 l2cap_chan_del(chan
, ECONNRESET
);
2726 chan
->ops
->close(chan
->data
);
2730 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2732 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
2734 struct l2cap_chan
*chan
;
2737 scid
= __le16_to_cpu(rsp
->scid
);
2738 dcid
= __le16_to_cpu(rsp
->dcid
);
2740 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
2742 chan
= l2cap_get_chan_by_scid(conn
, scid
);
2748 /* don't delete l2cap channel if sk is owned by user */
2749 if (sock_owned_by_user(sk
)) {
2750 l2cap_state_change(chan
,BT_DISCONN
);
2751 __clear_chan_timer(chan
);
2752 __set_chan_timer(chan
, HZ
/ 5);
2757 l2cap_chan_del(chan
, 0);
2760 chan
->ops
->close(chan
->data
);
2764 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2766 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
2769 type
= __le16_to_cpu(req
->type
);
2771 BT_DBG("type 0x%4.4x", type
);
2773 if (type
== L2CAP_IT_FEAT_MASK
) {
2775 u32 feat_mask
= l2cap_feat_mask
;
2776 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2777 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2778 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2780 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
2782 put_unaligned_le32(feat_mask
, rsp
->data
);
2783 l2cap_send_cmd(conn
, cmd
->ident
,
2784 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2785 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2787 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
2788 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2789 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
2790 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
2791 l2cap_send_cmd(conn
, cmd
->ident
,
2792 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
2794 struct l2cap_info_rsp rsp
;
2795 rsp
.type
= cpu_to_le16(type
);
2796 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
2797 l2cap_send_cmd(conn
, cmd
->ident
,
2798 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
2804 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2806 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
2809 type
= __le16_to_cpu(rsp
->type
);
2810 result
= __le16_to_cpu(rsp
->result
);
2812 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
2814 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2815 if (cmd
->ident
!= conn
->info_ident
||
2816 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
2819 del_timer(&conn
->info_timer
);
2821 if (result
!= L2CAP_IR_SUCCESS
) {
2822 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2823 conn
->info_ident
= 0;
2825 l2cap_conn_start(conn
);
2830 if (type
== L2CAP_IT_FEAT_MASK
) {
2831 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
2833 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
2834 struct l2cap_info_req req
;
2835 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
2837 conn
->info_ident
= l2cap_get_ident(conn
);
2839 l2cap_send_cmd(conn
, conn
->info_ident
,
2840 L2CAP_INFO_REQ
, sizeof(req
), &req
);
2842 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2843 conn
->info_ident
= 0;
2845 l2cap_conn_start(conn
);
2847 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
2848 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2849 conn
->info_ident
= 0;
2851 l2cap_conn_start(conn
);
2857 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
2862 if (min
> max
|| min
< 6 || max
> 3200)
2865 if (to_multiplier
< 10 || to_multiplier
> 3200)
2868 if (max
>= to_multiplier
* 8)
2871 max_latency
= (to_multiplier
* 8 / max
) - 1;
2872 if (latency
> 499 || latency
> max_latency
)
2878 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
2879 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2881 struct hci_conn
*hcon
= conn
->hcon
;
2882 struct l2cap_conn_param_update_req
*req
;
2883 struct l2cap_conn_param_update_rsp rsp
;
2884 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
2887 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
2890 cmd_len
= __le16_to_cpu(cmd
->len
);
2891 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
2894 req
= (struct l2cap_conn_param_update_req
*) data
;
2895 min
= __le16_to_cpu(req
->min
);
2896 max
= __le16_to_cpu(req
->max
);
2897 latency
= __le16_to_cpu(req
->latency
);
2898 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
2900 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2901 min
, max
, latency
, to_multiplier
);
2903 memset(&rsp
, 0, sizeof(rsp
));
2905 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
2907 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
2909 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
2911 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
2915 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
2920 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
2921 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
2925 switch (cmd
->code
) {
2926 case L2CAP_COMMAND_REJ
:
2927 l2cap_command_rej(conn
, cmd
, data
);
2930 case L2CAP_CONN_REQ
:
2931 err
= l2cap_connect_req(conn
, cmd
, data
);
2934 case L2CAP_CONN_RSP
:
2935 err
= l2cap_connect_rsp(conn
, cmd
, data
);
2938 case L2CAP_CONF_REQ
:
2939 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
2942 case L2CAP_CONF_RSP
:
2943 err
= l2cap_config_rsp(conn
, cmd
, data
);
2946 case L2CAP_DISCONN_REQ
:
2947 err
= l2cap_disconnect_req(conn
, cmd
, data
);
2950 case L2CAP_DISCONN_RSP
:
2951 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
2954 case L2CAP_ECHO_REQ
:
2955 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
2958 case L2CAP_ECHO_RSP
:
2961 case L2CAP_INFO_REQ
:
2962 err
= l2cap_information_req(conn
, cmd
, data
);
2965 case L2CAP_INFO_RSP
:
2966 err
= l2cap_information_rsp(conn
, cmd
, data
);
2970 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
2978 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
2979 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2981 switch (cmd
->code
) {
2982 case L2CAP_COMMAND_REJ
:
2985 case L2CAP_CONN_PARAM_UPDATE_REQ
:
2986 return l2cap_conn_param_update_req(conn
, cmd
, data
);
2988 case L2CAP_CONN_PARAM_UPDATE_RSP
:
2992 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
2997 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
2998 struct sk_buff
*skb
)
3000 u8
*data
= skb
->data
;
3002 struct l2cap_cmd_hdr cmd
;
3005 l2cap_raw_recv(conn
, skb
);
3007 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3009 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3010 data
+= L2CAP_CMD_HDR_SIZE
;
3011 len
-= L2CAP_CMD_HDR_SIZE
;
3013 cmd_len
= le16_to_cpu(cmd
.len
);
3015 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3017 if (cmd_len
> len
|| !cmd
.ident
) {
3018 BT_DBG("corrupted command");
3022 if (conn
->hcon
->type
== LE_LINK
)
3023 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3025 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3028 struct l2cap_cmd_rej rej
;
3030 BT_ERR("Wrong link type (%d)", err
);
3032 /* FIXME: Map err to a valid reason */
3033 rej
.reason
= cpu_to_le16(0);
3034 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3044 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3046 u16 our_fcs
, rcv_fcs
;
3047 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3049 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3050 skb_trim(skb
, skb
->len
- 2);
3051 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3052 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3054 if (our_fcs
!= rcv_fcs
)
3060 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3064 chan
->frames_sent
= 0;
3066 control
|= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3068 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3069 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3070 l2cap_send_sframe(chan
, control
);
3071 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3074 if (chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3075 l2cap_retransmit_frames(chan
);
3077 l2cap_ertm_send(chan
);
3079 if (!(chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3080 chan
->frames_sent
== 0) {
3081 control
|= L2CAP_SUPER_RCV_READY
;
3082 l2cap_send_sframe(chan
, control
);
3086 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3088 struct sk_buff
*next_skb
;
3089 int tx_seq_offset
, next_tx_seq_offset
;
3091 bt_cb(skb
)->tx_seq
= tx_seq
;
3092 bt_cb(skb
)->sar
= sar
;
3094 next_skb
= skb_peek(&chan
->srej_q
);
3096 __skb_queue_tail(&chan
->srej_q
, skb
);
3100 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3101 if (tx_seq_offset
< 0)
3102 tx_seq_offset
+= 64;
3105 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3108 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3109 chan
->buffer_seq
) % 64;
3110 if (next_tx_seq_offset
< 0)
3111 next_tx_seq_offset
+= 64;
3113 if (next_tx_seq_offset
> tx_seq_offset
) {
3114 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
3118 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
3121 } while ((next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
)));
3123 __skb_queue_tail(&chan
->srej_q
, skb
);
3128 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3130 struct sk_buff
*_skb
;
3133 switch (control
& L2CAP_CTRL_SAR
) {
3134 case L2CAP_SDU_UNSEGMENTED
:
3135 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
)
3138 return chan
->ops
->recv(chan
->data
, skb
);
3140 case L2CAP_SDU_START
:
3141 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
)
3144 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3146 if (chan
->sdu_len
> chan
->imtu
)
3149 chan
->sdu
= bt_skb_alloc(chan
->sdu_len
, GFP_ATOMIC
);
3153 /* pull sdu_len bytes only after alloc, because of Local Busy
3154 * condition we have to be sure that this will be executed
3155 * only once, i.e., when alloc does not fail */
3158 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3160 chan
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3161 chan
->partial_sdu_len
= skb
->len
;
3164 case L2CAP_SDU_CONTINUE
:
3165 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3171 chan
->partial_sdu_len
+= skb
->len
;
3172 if (chan
->partial_sdu_len
> chan
->sdu_len
)
3175 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3180 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3186 if (!(chan
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3187 chan
->partial_sdu_len
+= skb
->len
;
3189 if (chan
->partial_sdu_len
> chan
->imtu
)
3192 if (chan
->partial_sdu_len
!= chan
->sdu_len
)
3195 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3198 _skb
= skb_clone(chan
->sdu
, GFP_ATOMIC
);
3200 chan
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3204 err
= chan
->ops
->recv(chan
->data
, _skb
);
3207 chan
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3211 chan
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3212 chan
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3214 kfree_skb(chan
->sdu
);
3222 kfree_skb(chan
->sdu
);
3226 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3231 static int l2cap_try_push_rx_skb(struct l2cap_chan
*chan
)
3233 struct sk_buff
*skb
;
3237 while ((skb
= skb_dequeue(&chan
->busy_q
))) {
3238 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3239 err
= l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3241 skb_queue_head(&chan
->busy_q
, skb
);
3245 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3248 if (!(chan
->conn_state
& L2CAP_CONN_RNR_SENT
))
3251 control
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3252 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3253 l2cap_send_sframe(chan
, control
);
3254 chan
->retry_count
= 1;
3256 __clear_retrans_timer(chan
);
3257 __set_monitor_timer(chan
);
3259 chan
->conn_state
|= L2CAP_CONN_WAIT_F
;
3262 chan
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3263 chan
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3265 BT_DBG("chan %p, Exit local busy", chan
);
3270 static void l2cap_busy_work(struct work_struct
*work
)
3272 DECLARE_WAITQUEUE(wait
, current
);
3273 struct l2cap_chan
*chan
=
3274 container_of(work
, struct l2cap_chan
, busy_work
);
3275 struct sock
*sk
= chan
->sk
;
3276 int n_tries
= 0, timeo
= HZ
/5, err
;
3277 struct sk_buff
*skb
;
3281 add_wait_queue(sk_sleep(sk
), &wait
);
3282 while ((skb
= skb_peek(&chan
->busy_q
))) {
3283 set_current_state(TASK_INTERRUPTIBLE
);
3285 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3287 l2cap_send_disconn_req(chan
->conn
, chan
, EBUSY
);
3294 if (signal_pending(current
)) {
3295 err
= sock_intr_errno(timeo
);
3300 timeo
= schedule_timeout(timeo
);
3303 err
= sock_error(sk
);
3307 if (l2cap_try_push_rx_skb(chan
) == 0)
3311 set_current_state(TASK_RUNNING
);
3312 remove_wait_queue(sk_sleep(sk
), &wait
);
3317 static int l2cap_push_rx_skb(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3321 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3322 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3323 __skb_queue_tail(&chan
->busy_q
, skb
);
3324 return l2cap_try_push_rx_skb(chan
);
3329 err
= l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3331 chan
->buffer_seq
= (chan
->buffer_seq
+ 1) % 64;
3335 /* Busy Condition */
3336 BT_DBG("chan %p, Enter local busy", chan
);
3338 chan
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3339 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3340 __skb_queue_tail(&chan
->busy_q
, skb
);
3342 sctrl
= chan
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3343 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3344 l2cap_send_sframe(chan
, sctrl
);
3346 chan
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3348 __clear_ack_timer(chan
);
3350 queue_work(_busy_wq
, &chan
->busy_work
);
3355 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 control
)
3357 struct sk_buff
*_skb
;
3361 * TODO: We have to notify the userland if some data is lost with the
3365 switch (control
& L2CAP_CTRL_SAR
) {
3366 case L2CAP_SDU_UNSEGMENTED
:
3367 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3368 kfree_skb(chan
->sdu
);
3372 err
= chan
->ops
->recv(chan
->data
, skb
);
3378 case L2CAP_SDU_START
:
3379 if (chan
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3380 kfree_skb(chan
->sdu
);
3384 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
3387 if (chan
->sdu_len
> chan
->imtu
) {
3392 chan
->sdu
= bt_skb_alloc(chan
->sdu_len
, GFP_ATOMIC
);
3398 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3400 chan
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3401 chan
->partial_sdu_len
= skb
->len
;
3405 case L2CAP_SDU_CONTINUE
:
3406 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3409 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3411 chan
->partial_sdu_len
+= skb
->len
;
3412 if (chan
->partial_sdu_len
> chan
->sdu_len
)
3413 kfree_skb(chan
->sdu
);
3420 if (!(chan
->conn_state
& L2CAP_CONN_SAR_SDU
))
3423 memcpy(skb_put(chan
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3425 chan
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3426 chan
->partial_sdu_len
+= skb
->len
;
3428 if (chan
->partial_sdu_len
> chan
->imtu
)
3431 if (chan
->partial_sdu_len
== chan
->sdu_len
) {
3432 _skb
= skb_clone(chan
->sdu
, GFP_ATOMIC
);
3433 err
= chan
->ops
->recv(chan
->data
, _skb
);
3440 kfree_skb(chan
->sdu
);
3448 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u8 tx_seq
)
3450 struct sk_buff
*skb
;
3453 while ((skb
= skb_peek(&chan
->srej_q
))) {
3454 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3457 skb
= skb_dequeue(&chan
->srej_q
);
3458 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3459 l2cap_ertm_reassembly_sdu(chan
, skb
, control
);
3460 chan
->buffer_seq_srej
=
3461 (chan
->buffer_seq_srej
+ 1) % 64;
3462 tx_seq
= (tx_seq
+ 1) % 64;
3466 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3468 struct srej_list
*l
, *tmp
;
3471 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
3472 if (l
->tx_seq
== tx_seq
) {
3477 control
= L2CAP_SUPER_SELECT_REJECT
;
3478 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3479 l2cap_send_sframe(chan
, control
);
3481 list_add_tail(&l
->list
, &chan
->srej_l
);
3485 static void l2cap_send_srejframe(struct l2cap_chan
*chan
, u8 tx_seq
)
3487 struct srej_list
*new;
3490 while (tx_seq
!= chan
->expected_tx_seq
) {
3491 control
= L2CAP_SUPER_SELECT_REJECT
;
3492 control
|= chan
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3493 l2cap_send_sframe(chan
, control
);
3495 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3496 new->tx_seq
= chan
->expected_tx_seq
;
3497 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3498 list_add_tail(&new->list
, &chan
->srej_l
);
3500 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3503 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3505 u8 tx_seq
= __get_txseq(rx_control
);
3506 u8 req_seq
= __get_reqseq(rx_control
);
3507 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3508 int tx_seq_offset
, expected_tx_seq_offset
;
3509 int num_to_ack
= (chan
->tx_win
/6) + 1;
3512 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan
, skb
->len
,
3513 tx_seq
, rx_control
);
3515 if (L2CAP_CTRL_FINAL
& rx_control
&&
3516 chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3517 __clear_monitor_timer(chan
);
3518 if (chan
->unacked_frames
> 0)
3519 __set_retrans_timer(chan
);
3520 chan
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3523 chan
->expected_ack_seq
= req_seq
;
3524 l2cap_drop_acked_frames(chan
);
3526 if (tx_seq
== chan
->expected_tx_seq
)
3529 tx_seq_offset
= (tx_seq
- chan
->buffer_seq
) % 64;
3530 if (tx_seq_offset
< 0)
3531 tx_seq_offset
+= 64;
3533 /* invalid tx_seq */
3534 if (tx_seq_offset
>= chan
->tx_win
) {
3535 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3539 if (chan
->conn_state
& L2CAP_CONN_LOCAL_BUSY
)
3542 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3543 struct srej_list
*first
;
3545 first
= list_first_entry(&chan
->srej_l
,
3546 struct srej_list
, list
);
3547 if (tx_seq
== first
->tx_seq
) {
3548 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3549 l2cap_check_srej_gap(chan
, tx_seq
);
3551 list_del(&first
->list
);
3554 if (list_empty(&chan
->srej_l
)) {
3555 chan
->buffer_seq
= chan
->buffer_seq_srej
;
3556 chan
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3557 l2cap_send_ack(chan
);
3558 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
3561 struct srej_list
*l
;
3563 /* duplicated tx_seq */
3564 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
3567 list_for_each_entry(l
, &chan
->srej_l
, list
) {
3568 if (l
->tx_seq
== tx_seq
) {
3569 l2cap_resend_srejframe(chan
, tx_seq
);
3573 l2cap_send_srejframe(chan
, tx_seq
);
3576 expected_tx_seq_offset
=
3577 (chan
->expected_tx_seq
- chan
->buffer_seq
) % 64;
3578 if (expected_tx_seq_offset
< 0)
3579 expected_tx_seq_offset
+= 64;
3581 /* duplicated tx_seq */
3582 if (tx_seq_offset
< expected_tx_seq_offset
)
3585 chan
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3587 BT_DBG("chan %p, Enter SREJ", chan
);
3589 INIT_LIST_HEAD(&chan
->srej_l
);
3590 chan
->buffer_seq_srej
= chan
->buffer_seq
;
3592 __skb_queue_head_init(&chan
->srej_q
);
3593 __skb_queue_head_init(&chan
->busy_q
);
3594 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
3596 chan
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3598 l2cap_send_srejframe(chan
, tx_seq
);
3600 __clear_ack_timer(chan
);
3605 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3607 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3608 bt_cb(skb
)->tx_seq
= tx_seq
;
3609 bt_cb(skb
)->sar
= sar
;
3610 __skb_queue_tail(&chan
->srej_q
, skb
);
3614 err
= l2cap_push_rx_skb(chan
, skb
, rx_control
);
3618 if (rx_control
& L2CAP_CTRL_FINAL
) {
3619 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3620 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3622 l2cap_retransmit_frames(chan
);
3625 __set_ack_timer(chan
);
3627 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
3628 if (chan
->num_acked
== num_to_ack
- 1)
3629 l2cap_send_ack(chan
);
3638 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3640 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, __get_reqseq(rx_control
),
3643 chan
->expected_ack_seq
= __get_reqseq(rx_control
);
3644 l2cap_drop_acked_frames(chan
);
3646 if (rx_control
& L2CAP_CTRL_POLL
) {
3647 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3648 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3649 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3650 (chan
->unacked_frames
> 0))
3651 __set_retrans_timer(chan
);
3653 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3654 l2cap_send_srejtail(chan
);
3656 l2cap_send_i_or_rr_or_rnr(chan
);
3659 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3660 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3662 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3663 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3665 l2cap_retransmit_frames(chan
);
3668 if ((chan
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
3669 (chan
->unacked_frames
> 0))
3670 __set_retrans_timer(chan
);
3672 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3673 if (chan
->conn_state
& L2CAP_CONN_SREJ_SENT
)
3674 l2cap_send_ack(chan
);
3676 l2cap_ertm_send(chan
);
3680 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3682 u8 tx_seq
= __get_reqseq(rx_control
);
3684 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3686 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3688 chan
->expected_ack_seq
= tx_seq
;
3689 l2cap_drop_acked_frames(chan
);
3691 if (rx_control
& L2CAP_CTRL_FINAL
) {
3692 if (chan
->conn_state
& L2CAP_CONN_REJ_ACT
)
3693 chan
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3695 l2cap_retransmit_frames(chan
);
3697 l2cap_retransmit_frames(chan
);
3699 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
)
3700 chan
->conn_state
|= L2CAP_CONN_REJ_ACT
;
3703 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u16 rx_control
)
3705 u8 tx_seq
= __get_reqseq(rx_control
);
3707 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3709 chan
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
3711 if (rx_control
& L2CAP_CTRL_POLL
) {
3712 chan
->expected_ack_seq
= tx_seq
;
3713 l2cap_drop_acked_frames(chan
);
3715 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3716 l2cap_retransmit_one_frame(chan
, tx_seq
);
3718 l2cap_ertm_send(chan
);
3720 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3721 chan
->srej_save_reqseq
= tx_seq
;
3722 chan
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3724 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
3725 if ((chan
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
3726 chan
->srej_save_reqseq
== tx_seq
)
3727 chan
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
3729 l2cap_retransmit_one_frame(chan
, tx_seq
);
3731 l2cap_retransmit_one_frame(chan
, tx_seq
);
3732 if (chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3733 chan
->srej_save_reqseq
= tx_seq
;
3734 chan
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
3739 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u16 rx_control
)
3741 u8 tx_seq
= __get_reqseq(rx_control
);
3743 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan
, tx_seq
, rx_control
);
3745 chan
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
3746 chan
->expected_ack_seq
= tx_seq
;
3747 l2cap_drop_acked_frames(chan
);
3749 if (rx_control
& L2CAP_CTRL_POLL
)
3750 chan
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
3752 if (!(chan
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
3753 __clear_retrans_timer(chan
);
3754 if (rx_control
& L2CAP_CTRL_POLL
)
3755 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
3759 if (rx_control
& L2CAP_CTRL_POLL
)
3760 l2cap_send_srejtail(chan
);
3762 l2cap_send_sframe(chan
, L2CAP_SUPER_RCV_READY
);
3765 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u16 rx_control
, struct sk_buff
*skb
)
3767 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan
, rx_control
, skb
->len
);
3769 if (L2CAP_CTRL_FINAL
& rx_control
&&
3770 chan
->conn_state
& L2CAP_CONN_WAIT_F
) {
3771 __clear_monitor_timer(chan
);
3772 if (chan
->unacked_frames
> 0)
3773 __set_retrans_timer(chan
);
3774 chan
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3777 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
3778 case L2CAP_SUPER_RCV_READY
:
3779 l2cap_data_channel_rrframe(chan
, rx_control
);
3782 case L2CAP_SUPER_REJECT
:
3783 l2cap_data_channel_rejframe(chan
, rx_control
);
3786 case L2CAP_SUPER_SELECT_REJECT
:
3787 l2cap_data_channel_srejframe(chan
, rx_control
);
3790 case L2CAP_SUPER_RCV_NOT_READY
:
3791 l2cap_data_channel_rnrframe(chan
, rx_control
);
3799 static int l2cap_ertm_data_rcv(struct sock
*sk
, struct sk_buff
*skb
)
3801 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
3804 int len
, next_tx_seq_offset
, req_seq_offset
;
3806 control
= get_unaligned_le16(skb
->data
);
3811 * We can just drop the corrupted I-frame here.
3812 * Receiver will miss it and start proper recovery
3813 * procedures and ask retransmission.
3815 if (l2cap_check_fcs(chan
, skb
))
3818 if (__is_sar_start(control
) && __is_iframe(control
))
3821 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3824 if (len
> chan
->mps
) {
3825 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3829 req_seq
= __get_reqseq(control
);
3830 req_seq_offset
= (req_seq
- chan
->expected_ack_seq
) % 64;
3831 if (req_seq_offset
< 0)
3832 req_seq_offset
+= 64;
3834 next_tx_seq_offset
=
3835 (chan
->next_tx_seq
- chan
->expected_ack_seq
) % 64;
3836 if (next_tx_seq_offset
< 0)
3837 next_tx_seq_offset
+= 64;
3839 /* check for invalid req-seq */
3840 if (req_seq_offset
> next_tx_seq_offset
) {
3841 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3845 if (__is_iframe(control
)) {
3847 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3851 l2cap_data_channel_iframe(chan
, control
, skb
);
3855 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
3859 l2cap_data_channel_sframe(chan
, control
, skb
);
3869 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
3871 struct l2cap_chan
*chan
;
3872 struct sock
*sk
= NULL
;
3877 chan
= l2cap_get_chan_by_scid(conn
, cid
);
3879 BT_DBG("unknown cid 0x%4.4x", cid
);
3885 BT_DBG("chan %p, len %d", chan
, skb
->len
);
3887 if (chan
->state
!= BT_CONNECTED
)
3890 switch (chan
->mode
) {
3891 case L2CAP_MODE_BASIC
:
3892 /* If socket recv buffers overflows we drop data here
3893 * which is *bad* because L2CAP has to be reliable.
3894 * But we don't have any other choice. L2CAP doesn't
3895 * provide flow control mechanism. */
3897 if (chan
->imtu
< skb
->len
)
3900 if (!chan
->ops
->recv(chan
->data
, skb
))
3904 case L2CAP_MODE_ERTM
:
3905 if (!sock_owned_by_user(sk
)) {
3906 l2cap_ertm_data_rcv(sk
, skb
);
3908 if (sk_add_backlog(sk
, skb
))
3914 case L2CAP_MODE_STREAMING
:
3915 control
= get_unaligned_le16(skb
->data
);
3919 if (l2cap_check_fcs(chan
, skb
))
3922 if (__is_sar_start(control
))
3925 if (chan
->fcs
== L2CAP_FCS_CRC16
)
3928 if (len
> chan
->mps
|| len
< 0 || __is_sframe(control
))
3931 tx_seq
= __get_txseq(control
);
3933 if (chan
->expected_tx_seq
== tx_seq
)
3934 chan
->expected_tx_seq
= (chan
->expected_tx_seq
+ 1) % 64;
3936 chan
->expected_tx_seq
= (tx_seq
+ 1) % 64;
3938 l2cap_streaming_reassembly_sdu(chan
, skb
, control
);
3943 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
3957 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
3959 struct sock
*sk
= NULL
;
3960 struct l2cap_chan
*chan
;
3962 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
);
3970 BT_DBG("sk %p, len %d", sk
, skb
->len
);
3972 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
3975 if (l2cap_pi(sk
)->chan
->imtu
< skb
->len
)
3978 if (!chan
->ops
->recv(chan
->data
, skb
))
3990 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, __le16 cid
, struct sk_buff
*skb
)
3992 struct sock
*sk
= NULL
;
3993 struct l2cap_chan
*chan
;
3995 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
);
4003 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4005 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4008 if (l2cap_pi(sk
)->chan
->imtu
< skb
->len
)
4011 if (!chan
->ops
->recv(chan
->data
, skb
))
4023 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4025 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4029 skb_pull(skb
, L2CAP_HDR_SIZE
);
4030 cid
= __le16_to_cpu(lh
->cid
);
4031 len
= __le16_to_cpu(lh
->len
);
4033 if (len
!= skb
->len
) {
4038 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4041 case L2CAP_CID_LE_SIGNALING
:
4042 case L2CAP_CID_SIGNALING
:
4043 l2cap_sig_channel(conn
, skb
);
4046 case L2CAP_CID_CONN_LESS
:
4047 psm
= get_unaligned_le16(skb
->data
);
4049 l2cap_conless_channel(conn
, psm
, skb
);
4052 case L2CAP_CID_LE_DATA
:
4053 l2cap_att_channel(conn
, cid
, skb
);
4057 if (smp_sig_channel(conn
, skb
))
4058 l2cap_conn_del(conn
->hcon
, EACCES
);
4062 l2cap_data_channel(conn
, cid
, skb
);
4067 /* ---- L2CAP interface with lower layer (HCI) ---- */
4069 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4071 int exact
= 0, lm1
= 0, lm2
= 0;
4072 struct l2cap_chan
*c
;
4074 if (type
!= ACL_LINK
)
4077 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4079 /* Find listening sockets and check their link_mode */
4080 read_lock(&chan_list_lock
);
4081 list_for_each_entry(c
, &chan_list
, global_l
) {
4082 struct sock
*sk
= c
->sk
;
4084 if (c
->state
!= BT_LISTEN
)
4087 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4088 lm1
|= HCI_LM_ACCEPT
;
4090 lm1
|= HCI_LM_MASTER
;
4092 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4093 lm2
|= HCI_LM_ACCEPT
;
4095 lm2
|= HCI_LM_MASTER
;
4098 read_unlock(&chan_list_lock
);
4100 return exact
? lm1
: lm2
;
4103 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4105 struct l2cap_conn
*conn
;
4107 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4109 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4113 conn
= l2cap_conn_add(hcon
, status
);
4115 l2cap_conn_ready(conn
);
4117 l2cap_conn_del(hcon
, bt_err(status
));
4122 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4124 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4126 BT_DBG("hcon %p", hcon
);
4128 if ((hcon
->type
!= ACL_LINK
&& hcon
->type
!= LE_LINK
) || !conn
)
4131 return conn
->disc_reason
;
4134 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4136 BT_DBG("hcon %p reason %d", hcon
, reason
);
4138 if (!(hcon
->type
== ACL_LINK
|| hcon
->type
== LE_LINK
))
4141 l2cap_conn_del(hcon
, bt_err(reason
));
4146 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4148 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4151 if (encrypt
== 0x00) {
4152 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4153 __clear_chan_timer(chan
);
4154 __set_chan_timer(chan
, HZ
* 5);
4155 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4156 l2cap_chan_close(chan
, ECONNREFUSED
);
4158 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4159 __clear_chan_timer(chan
);
4163 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4165 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4166 struct l2cap_chan
*chan
;
4171 BT_DBG("conn %p", conn
);
4173 read_lock(&conn
->chan_lock
);
4175 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4176 struct sock
*sk
= chan
->sk
;
4180 BT_DBG("chan->scid %d", chan
->scid
);
4182 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4183 if (!status
&& encrypt
) {
4184 chan
->sec_level
= hcon
->sec_level
;
4185 l2cap_chan_ready(sk
);
4192 if (chan
->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4197 if (!status
&& (chan
->state
== BT_CONNECTED
||
4198 chan
->state
== BT_CONFIG
)) {
4199 l2cap_check_encryption(chan
, encrypt
);
4204 if (chan
->state
== BT_CONNECT
) {
4206 struct l2cap_conn_req req
;
4207 req
.scid
= cpu_to_le16(chan
->scid
);
4208 req
.psm
= chan
->psm
;
4210 chan
->ident
= l2cap_get_ident(conn
);
4211 chan
->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4213 l2cap_send_cmd(conn
, chan
->ident
,
4214 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4216 __clear_chan_timer(chan
);
4217 __set_chan_timer(chan
, HZ
/ 10);
4219 } else if (chan
->state
== BT_CONNECT2
) {
4220 struct l2cap_conn_rsp rsp
;
4224 l2cap_state_change(chan
, BT_CONFIG
);
4225 result
= L2CAP_CR_SUCCESS
;
4227 l2cap_state_change(chan
, BT_DISCONN
);
4228 __set_chan_timer(chan
, HZ
/ 10);
4229 result
= L2CAP_CR_SEC_BLOCK
;
4232 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4233 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4234 rsp
.result
= cpu_to_le16(result
);
4235 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4236 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4243 read_unlock(&conn
->chan_lock
);
4248 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4250 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4253 conn
= l2cap_conn_add(hcon
, 0);
4258 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4260 if (!(flags
& ACL_CONT
)) {
4261 struct l2cap_hdr
*hdr
;
4262 struct l2cap_chan
*chan
;
4267 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4268 kfree_skb(conn
->rx_skb
);
4269 conn
->rx_skb
= NULL
;
4271 l2cap_conn_unreliable(conn
, ECOMM
);
4274 /* Start fragment always begin with Basic L2CAP header */
4275 if (skb
->len
< L2CAP_HDR_SIZE
) {
4276 BT_ERR("Frame is too short (len %d)", skb
->len
);
4277 l2cap_conn_unreliable(conn
, ECOMM
);
4281 hdr
= (struct l2cap_hdr
*) skb
->data
;
4282 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4283 cid
= __le16_to_cpu(hdr
->cid
);
4285 if (len
== skb
->len
) {
4286 /* Complete frame received */
4287 l2cap_recv_frame(conn
, skb
);
4291 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4293 if (skb
->len
> len
) {
4294 BT_ERR("Frame is too long (len %d, expected len %d)",
4296 l2cap_conn_unreliable(conn
, ECOMM
);
4300 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4302 if (chan
&& chan
->sk
) {
4303 struct sock
*sk
= chan
->sk
;
4305 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
4306 BT_ERR("Frame exceeding recv MTU (len %d, "
4310 l2cap_conn_unreliable(conn
, ECOMM
);
4316 /* Allocate skb for the complete frame (with header) */
4317 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4321 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4323 conn
->rx_len
= len
- skb
->len
;
4325 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4327 if (!conn
->rx_len
) {
4328 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4329 l2cap_conn_unreliable(conn
, ECOMM
);
4333 if (skb
->len
> conn
->rx_len
) {
4334 BT_ERR("Fragment is too long (len %d, expected %d)",
4335 skb
->len
, conn
->rx_len
);
4336 kfree_skb(conn
->rx_skb
);
4337 conn
->rx_skb
= NULL
;
4339 l2cap_conn_unreliable(conn
, ECOMM
);
4343 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4345 conn
->rx_len
-= skb
->len
;
4347 if (!conn
->rx_len
) {
4348 /* Complete frame received */
4349 l2cap_recv_frame(conn
, conn
->rx_skb
);
4350 conn
->rx_skb
= NULL
;
4359 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4361 struct l2cap_chan
*c
;
4363 read_lock_bh(&chan_list_lock
);
4365 list_for_each_entry(c
, &chan_list
, global_l
) {
4366 struct sock
*sk
= c
->sk
;
4368 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4369 batostr(&bt_sk(sk
)->src
),
4370 batostr(&bt_sk(sk
)->dst
),
4371 c
->state
, __le16_to_cpu(c
->psm
),
4372 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
4373 c
->sec_level
, c
->mode
);
4376 read_unlock_bh(&chan_list_lock
);
4381 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4383 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4386 static const struct file_operations l2cap_debugfs_fops
= {
4387 .open
= l2cap_debugfs_open
,
4389 .llseek
= seq_lseek
,
4390 .release
= single_release
,
4393 static struct dentry
*l2cap_debugfs
;
4395 static struct hci_proto l2cap_hci_proto
= {
4397 .id
= HCI_PROTO_L2CAP
,
4398 .connect_ind
= l2cap_connect_ind
,
4399 .connect_cfm
= l2cap_connect_cfm
,
4400 .disconn_ind
= l2cap_disconn_ind
,
4401 .disconn_cfm
= l2cap_disconn_cfm
,
4402 .security_cfm
= l2cap_security_cfm
,
4403 .recv_acldata
= l2cap_recv_acldata
4406 int __init
l2cap_init(void)
4410 err
= l2cap_init_sockets();
4414 _busy_wq
= create_singlethread_workqueue("l2cap");
4420 err
= hci_register_proto(&l2cap_hci_proto
);
4422 BT_ERR("L2CAP protocol registration failed");
4423 bt_sock_unregister(BTPROTO_L2CAP
);
4428 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4429 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4431 BT_ERR("Failed to create L2CAP debug file");
4437 destroy_workqueue(_busy_wq
);
4438 l2cap_cleanup_sockets();
4442 void l2cap_exit(void)
4444 debugfs_remove(l2cap_debugfs
);
4446 flush_workqueue(_busy_wq
);
4447 destroy_workqueue(_busy_wq
);
4449 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4450 BT_ERR("L2CAP protocol unregistration failed");
4452 l2cap_cleanup_sockets();
4455 module_param(disable_ertm
, bool, 0644);
4456 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");