2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm
= 1;
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
77 struct sk_buff_head
*skbs
, u8 event
);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
85 list_for_each_entry(c
, &conn
->chan_l
, list
) {
92 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
96 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
107 struct l2cap_chan
*c
;
109 mutex_lock(&conn
->chan_lock
);
110 c
= __l2cap_get_chan_by_scid(conn
, cid
);
113 mutex_unlock(&conn
->chan_lock
);
118 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
120 struct l2cap_chan
*c
;
122 list_for_each_entry(c
, &conn
->chan_l
, list
) {
123 if (c
->ident
== ident
)
129 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
131 struct l2cap_chan
*c
;
133 list_for_each_entry(c
, &chan_list
, global_l
) {
134 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
140 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
144 write_lock(&chan_list_lock
);
146 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
159 for (p
= 0x1001; p
< 0x1100; p
+= 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
161 chan
->psm
= cpu_to_le16(p
);
162 chan
->sport
= cpu_to_le16(p
);
169 write_unlock(&chan_list_lock
);
173 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
175 write_lock(&chan_list_lock
);
179 write_unlock(&chan_list_lock
);
184 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
186 u16 cid
= L2CAP_CID_DYN_START
;
188 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
189 if (!__l2cap_get_chan_by_scid(conn
, cid
))
196 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
198 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
199 state_to_string(state
));
202 chan
->ops
->state_change(chan
->data
, state
);
205 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
207 struct sock
*sk
= chan
->sk
;
210 __l2cap_state_change(chan
, state
);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
216 struct sock
*sk
= chan
->sk
;
221 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
223 struct sock
*sk
= chan
->sk
;
226 __l2cap_chan_set_err(chan
, err
);
230 static void __set_retrans_timer(struct l2cap_chan
*chan
)
232 if (!delayed_work_pending(&chan
->monitor_timer
) &&
233 chan
->retrans_timeout
) {
234 l2cap_set_timer(chan
, &chan
->retrans_timer
,
235 msecs_to_jiffies(chan
->retrans_timeout
));
239 static void __set_monitor_timer(struct l2cap_chan
*chan
)
241 __clear_retrans_timer(chan
);
242 if (chan
->monitor_timeout
) {
243 l2cap_set_timer(chan
, &chan
->monitor_timer
,
244 msecs_to_jiffies(chan
->monitor_timeout
));
248 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
253 skb_queue_walk(head
, skb
) {
254 if (bt_cb(skb
)->control
.txseq
== seq
)
261 /* ---- L2CAP sequence number lists ---- */
263 /* For ERTM, ordered lists of sequence numbers must be tracked for
264 * SREJ requests that are received and for frames that are to be
265 * retransmitted. These seq_list functions implement a singly-linked
266 * list in an array, where membership in the list can also be checked
267 * in constant time. Items can also be added to the tail of the list
268 * and removed from the head in constant time, without further memory
272 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
274 size_t alloc_size
, i
;
276 /* Allocated size is a power of 2 to map sequence numbers
277 * (which may be up to 14 bits) in to a smaller array that is
278 * sized for the negotiated ERTM transmit windows.
280 alloc_size
= roundup_pow_of_two(size
);
282 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
286 seq_list
->mask
= alloc_size
- 1;
287 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
288 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
289 for (i
= 0; i
< alloc_size
; i
++)
290 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
295 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
297 kfree(seq_list
->list
);
300 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
303 /* Constant-time check for list membership */
304 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
307 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
309 u16 mask
= seq_list
->mask
;
311 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
312 /* In case someone tries to pop the head of an empty list */
313 return L2CAP_SEQ_LIST_CLEAR
;
314 } else if (seq_list
->head
== seq
) {
315 /* Head can be removed in constant time */
316 seq_list
->head
= seq_list
->list
[seq
& mask
];
317 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
319 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
320 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
321 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
324 /* Walk the list to find the sequence number */
325 u16 prev
= seq_list
->head
;
326 while (seq_list
->list
[prev
& mask
] != seq
) {
327 prev
= seq_list
->list
[prev
& mask
];
328 if (prev
== L2CAP_SEQ_LIST_TAIL
)
329 return L2CAP_SEQ_LIST_CLEAR
;
332 /* Unlink the number from the list and clear it */
333 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
334 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
335 if (seq_list
->tail
== seq
)
336 seq_list
->tail
= prev
;
341 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
343 /* Remove the head in constant time */
344 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
347 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
351 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
354 for (i
= 0; i
<= seq_list
->mask
; i
++)
355 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
357 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
358 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
361 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
363 u16 mask
= seq_list
->mask
;
365 /* All appends happen in constant time */
367 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
370 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
371 seq_list
->head
= seq
;
373 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
375 seq_list
->tail
= seq
;
376 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
379 static void l2cap_chan_timeout(struct work_struct
*work
)
381 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
383 struct l2cap_conn
*conn
= chan
->conn
;
386 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
388 mutex_lock(&conn
->chan_lock
);
389 l2cap_chan_lock(chan
);
391 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
392 reason
= ECONNREFUSED
;
393 else if (chan
->state
== BT_CONNECT
&&
394 chan
->sec_level
!= BT_SECURITY_SDP
)
395 reason
= ECONNREFUSED
;
399 l2cap_chan_close(chan
, reason
);
401 l2cap_chan_unlock(chan
);
403 chan
->ops
->close(chan
->data
);
404 mutex_unlock(&conn
->chan_lock
);
406 l2cap_chan_put(chan
);
409 struct l2cap_chan
*l2cap_chan_create(void)
411 struct l2cap_chan
*chan
;
413 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
417 mutex_init(&chan
->lock
);
419 write_lock(&chan_list_lock
);
420 list_add(&chan
->global_l
, &chan_list
);
421 write_unlock(&chan_list_lock
);
423 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
425 chan
->state
= BT_OPEN
;
427 atomic_set(&chan
->refcnt
, 1);
429 /* This flag is cleared in l2cap_chan_ready() */
430 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
432 BT_DBG("chan %p", chan
);
437 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
439 write_lock(&chan_list_lock
);
440 list_del(&chan
->global_l
);
441 write_unlock(&chan_list_lock
);
443 l2cap_chan_put(chan
);
446 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
448 chan
->fcs
= L2CAP_FCS_CRC16
;
449 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
450 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
451 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
452 chan
->sec_level
= BT_SECURITY_LOW
;
454 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
457 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
460 __le16_to_cpu(chan
->psm
), chan
->dcid
);
462 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
466 switch (chan
->chan_type
) {
467 case L2CAP_CHAN_CONN_ORIENTED
:
468 if (conn
->hcon
->type
== LE_LINK
) {
470 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
471 chan
->scid
= L2CAP_CID_LE_DATA
;
472 chan
->dcid
= L2CAP_CID_LE_DATA
;
474 /* Alloc CID for connection-oriented socket */
475 chan
->scid
= l2cap_alloc_cid(conn
);
476 chan
->omtu
= L2CAP_DEFAULT_MTU
;
480 case L2CAP_CHAN_CONN_LESS
:
481 /* Connectionless socket */
482 chan
->scid
= L2CAP_CID_CONN_LESS
;
483 chan
->dcid
= L2CAP_CID_CONN_LESS
;
484 chan
->omtu
= L2CAP_DEFAULT_MTU
;
488 /* Raw socket can send/recv signalling messages only */
489 chan
->scid
= L2CAP_CID_SIGNALING
;
490 chan
->dcid
= L2CAP_CID_SIGNALING
;
491 chan
->omtu
= L2CAP_DEFAULT_MTU
;
494 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
495 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
496 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
497 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
498 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
499 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
501 l2cap_chan_hold(chan
);
503 list_add(&chan
->list
, &conn
->chan_l
);
506 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
508 mutex_lock(&conn
->chan_lock
);
509 __l2cap_chan_add(conn
, chan
);
510 mutex_unlock(&conn
->chan_lock
);
513 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
515 struct sock
*sk
= chan
->sk
;
516 struct l2cap_conn
*conn
= chan
->conn
;
517 struct sock
*parent
= bt_sk(sk
)->parent
;
519 __clear_chan_timer(chan
);
521 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
524 /* Delete from channel list */
525 list_del(&chan
->list
);
527 l2cap_chan_put(chan
);
530 hci_conn_put(conn
->hcon
);
535 __l2cap_state_change(chan
, BT_CLOSED
);
536 sock_set_flag(sk
, SOCK_ZAPPED
);
539 __l2cap_chan_set_err(chan
, err
);
542 bt_accept_unlink(sk
);
543 parent
->sk_data_ready(parent
, 0);
545 sk
->sk_state_change(sk
);
549 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
552 skb_queue_purge(&chan
->tx_q
);
554 if (chan
->mode
== L2CAP_MODE_ERTM
) {
555 __clear_retrans_timer(chan
);
556 __clear_monitor_timer(chan
);
557 __clear_ack_timer(chan
);
559 skb_queue_purge(&chan
->srej_q
);
561 l2cap_seq_list_free(&chan
->srej_list
);
562 l2cap_seq_list_free(&chan
->retrans_list
);
566 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
570 BT_DBG("parent %p", parent
);
572 /* Close not yet accepted channels */
573 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
574 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
576 l2cap_chan_lock(chan
);
577 __clear_chan_timer(chan
);
578 l2cap_chan_close(chan
, ECONNRESET
);
579 l2cap_chan_unlock(chan
);
581 chan
->ops
->close(chan
->data
);
585 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
587 struct l2cap_conn
*conn
= chan
->conn
;
588 struct sock
*sk
= chan
->sk
;
590 BT_DBG("chan %p state %s sk %p", chan
,
591 state_to_string(chan
->state
), sk
);
593 switch (chan
->state
) {
596 l2cap_chan_cleanup_listen(sk
);
598 __l2cap_state_change(chan
, BT_CLOSED
);
599 sock_set_flag(sk
, SOCK_ZAPPED
);
605 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
606 conn
->hcon
->type
== ACL_LINK
) {
607 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
608 l2cap_send_disconn_req(conn
, chan
, reason
);
610 l2cap_chan_del(chan
, reason
);
614 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
615 conn
->hcon
->type
== ACL_LINK
) {
616 struct l2cap_conn_rsp rsp
;
619 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
620 result
= L2CAP_CR_SEC_BLOCK
;
622 result
= L2CAP_CR_BAD_PSM
;
623 l2cap_state_change(chan
, BT_DISCONN
);
625 rsp
.scid
= cpu_to_le16(chan
->dcid
);
626 rsp
.dcid
= cpu_to_le16(chan
->scid
);
627 rsp
.result
= cpu_to_le16(result
);
628 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
629 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
633 l2cap_chan_del(chan
, reason
);
638 l2cap_chan_del(chan
, reason
);
643 sock_set_flag(sk
, SOCK_ZAPPED
);
649 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
651 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
652 switch (chan
->sec_level
) {
653 case BT_SECURITY_HIGH
:
654 return HCI_AT_DEDICATED_BONDING_MITM
;
655 case BT_SECURITY_MEDIUM
:
656 return HCI_AT_DEDICATED_BONDING
;
658 return HCI_AT_NO_BONDING
;
660 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
661 if (chan
->sec_level
== BT_SECURITY_LOW
)
662 chan
->sec_level
= BT_SECURITY_SDP
;
664 if (chan
->sec_level
== BT_SECURITY_HIGH
)
665 return HCI_AT_NO_BONDING_MITM
;
667 return HCI_AT_NO_BONDING
;
669 switch (chan
->sec_level
) {
670 case BT_SECURITY_HIGH
:
671 return HCI_AT_GENERAL_BONDING_MITM
;
672 case BT_SECURITY_MEDIUM
:
673 return HCI_AT_GENERAL_BONDING
;
675 return HCI_AT_NO_BONDING
;
680 /* Service level security */
681 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
683 struct l2cap_conn
*conn
= chan
->conn
;
686 auth_type
= l2cap_get_auth_type(chan
);
688 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
691 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
695 /* Get next available identificator.
696 * 1 - 128 are used by kernel.
697 * 129 - 199 are reserved.
698 * 200 - 254 are used by utilities like l2ping, etc.
701 spin_lock(&conn
->lock
);
703 if (++conn
->tx_ident
> 128)
708 spin_unlock(&conn
->lock
);
713 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
715 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
718 BT_DBG("code 0x%2.2x", code
);
723 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
724 flags
= ACL_START_NO_FLUSH
;
728 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
729 skb
->priority
= HCI_PRIO_MAX
;
731 hci_send_acl(conn
->hchan
, skb
, flags
);
734 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
736 struct hci_conn
*hcon
= chan
->conn
->hcon
;
739 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
742 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
743 lmp_no_flush_capable(hcon
->hdev
))
744 flags
= ACL_START_NO_FLUSH
;
748 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
749 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
752 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
754 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
755 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
757 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
760 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
761 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
768 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
769 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
776 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
778 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
779 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
781 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
784 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
785 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
792 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
793 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
800 static inline void __unpack_control(struct l2cap_chan
*chan
,
803 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
804 __unpack_extended_control(get_unaligned_le32(skb
->data
),
805 &bt_cb(skb
)->control
);
806 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
808 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
809 &bt_cb(skb
)->control
);
810 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
814 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
818 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
819 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
821 if (control
->sframe
) {
822 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
823 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
824 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
826 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
827 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
833 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
837 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
838 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
840 if (control
->sframe
) {
841 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
842 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
843 packed
|= L2CAP_CTRL_FRAME_TYPE
;
845 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
846 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
852 static inline void __pack_control(struct l2cap_chan
*chan
,
853 struct l2cap_ctrl
*control
,
856 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
857 put_unaligned_le32(__pack_extended_control(control
),
858 skb
->data
+ L2CAP_HDR_SIZE
);
860 put_unaligned_le16(__pack_enhanced_control(control
),
861 skb
->data
+ L2CAP_HDR_SIZE
);
865 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
869 struct l2cap_hdr
*lh
;
872 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
873 hlen
= L2CAP_EXT_HDR_SIZE
;
875 hlen
= L2CAP_ENH_HDR_SIZE
;
877 if (chan
->fcs
== L2CAP_FCS_CRC16
)
878 hlen
+= L2CAP_FCS_SIZE
;
880 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
883 return ERR_PTR(-ENOMEM
);
885 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
886 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
887 lh
->cid
= cpu_to_le16(chan
->dcid
);
889 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
890 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
892 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
894 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
895 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
896 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
899 skb
->priority
= HCI_PRIO_MAX
;
903 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
904 struct l2cap_ctrl
*control
)
909 BT_DBG("chan %p, control %p", chan
, control
);
911 if (!control
->sframe
)
914 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
918 if (control
->super
== L2CAP_SUPER_RR
)
919 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
920 else if (control
->super
== L2CAP_SUPER_RNR
)
921 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
923 if (control
->super
!= L2CAP_SUPER_SREJ
) {
924 chan
->last_acked_seq
= control
->reqseq
;
925 __clear_ack_timer(chan
);
928 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
929 control
->final
, control
->poll
, control
->super
);
931 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
932 control_field
= __pack_extended_control(control
);
934 control_field
= __pack_enhanced_control(control
);
936 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
938 l2cap_do_send(chan
, skb
);
941 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
943 struct l2cap_ctrl control
;
945 BT_DBG("chan %p, poll %d", chan
, poll
);
947 memset(&control
, 0, sizeof(control
));
951 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
952 control
.super
= L2CAP_SUPER_RNR
;
954 control
.super
= L2CAP_SUPER_RR
;
956 control
.reqseq
= chan
->buffer_seq
;
957 l2cap_send_sframe(chan
, &control
);
960 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
962 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
965 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
967 struct l2cap_conn
*conn
= chan
->conn
;
968 struct l2cap_conn_req req
;
970 req
.scid
= cpu_to_le16(chan
->scid
);
973 chan
->ident
= l2cap_get_ident(conn
);
975 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
977 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
980 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
982 struct sock
*sk
= chan
->sk
;
987 parent
= bt_sk(sk
)->parent
;
989 BT_DBG("sk %p, parent %p", sk
, parent
);
991 /* This clears all conf flags, including CONF_NOT_COMPLETE */
992 chan
->conf_state
= 0;
993 __clear_chan_timer(chan
);
995 __l2cap_state_change(chan
, BT_CONNECTED
);
996 sk
->sk_state_change(sk
);
999 parent
->sk_data_ready(parent
, 0);
1004 static void l2cap_do_start(struct l2cap_chan
*chan
)
1006 struct l2cap_conn
*conn
= chan
->conn
;
1008 if (conn
->hcon
->type
== LE_LINK
) {
1009 l2cap_chan_ready(chan
);
1013 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1014 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1017 if (l2cap_chan_check_security(chan
) &&
1018 __l2cap_no_conn_pending(chan
))
1019 l2cap_send_conn_req(chan
);
1021 struct l2cap_info_req req
;
1022 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1024 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1025 conn
->info_ident
= l2cap_get_ident(conn
);
1027 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1029 l2cap_send_cmd(conn
, conn
->info_ident
,
1030 L2CAP_INFO_REQ
, sizeof(req
), &req
);
1034 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1036 u32 local_feat_mask
= l2cap_feat_mask
;
1038 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1041 case L2CAP_MODE_ERTM
:
1042 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1043 case L2CAP_MODE_STREAMING
:
1044 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1050 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
1052 struct sock
*sk
= chan
->sk
;
1053 struct l2cap_disconn_req req
;
1058 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1059 __clear_retrans_timer(chan
);
1060 __clear_monitor_timer(chan
);
1061 __clear_ack_timer(chan
);
1064 req
.dcid
= cpu_to_le16(chan
->dcid
);
1065 req
.scid
= cpu_to_le16(chan
->scid
);
1066 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1067 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1070 __l2cap_state_change(chan
, BT_DISCONN
);
1071 __l2cap_chan_set_err(chan
, err
);
1075 /* ---- L2CAP connections ---- */
1076 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1078 struct l2cap_chan
*chan
, *tmp
;
1080 BT_DBG("conn %p", conn
);
1082 mutex_lock(&conn
->chan_lock
);
1084 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1085 struct sock
*sk
= chan
->sk
;
1087 l2cap_chan_lock(chan
);
1089 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1090 l2cap_chan_unlock(chan
);
1094 if (chan
->state
== BT_CONNECT
) {
1095 if (!l2cap_chan_check_security(chan
) ||
1096 !__l2cap_no_conn_pending(chan
)) {
1097 l2cap_chan_unlock(chan
);
1101 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1102 && test_bit(CONF_STATE2_DEVICE
,
1103 &chan
->conf_state
)) {
1104 l2cap_chan_close(chan
, ECONNRESET
);
1105 l2cap_chan_unlock(chan
);
1109 l2cap_send_conn_req(chan
);
1111 } else if (chan
->state
== BT_CONNECT2
) {
1112 struct l2cap_conn_rsp rsp
;
1114 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1115 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1117 if (l2cap_chan_check_security(chan
)) {
1119 if (test_bit(BT_SK_DEFER_SETUP
,
1120 &bt_sk(sk
)->flags
)) {
1121 struct sock
*parent
= bt_sk(sk
)->parent
;
1122 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1123 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1125 parent
->sk_data_ready(parent
, 0);
1128 __l2cap_state_change(chan
, BT_CONFIG
);
1129 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1130 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1134 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1135 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1138 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1141 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1142 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1143 l2cap_chan_unlock(chan
);
1147 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1148 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1149 l2cap_build_conf_req(chan
, buf
), buf
);
1150 chan
->num_conf_req
++;
1153 l2cap_chan_unlock(chan
);
1156 mutex_unlock(&conn
->chan_lock
);
1159 /* Find socket with cid and source/destination bdaddr.
1160 * Returns closest match, locked.
1162 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1166 struct l2cap_chan
*c
, *c1
= NULL
;
1168 read_lock(&chan_list_lock
);
1170 list_for_each_entry(c
, &chan_list
, global_l
) {
1171 struct sock
*sk
= c
->sk
;
1173 if (state
&& c
->state
!= state
)
1176 if (c
->scid
== cid
) {
1177 int src_match
, dst_match
;
1178 int src_any
, dst_any
;
1181 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1182 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1183 if (src_match
&& dst_match
) {
1184 read_unlock(&chan_list_lock
);
1189 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1190 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1191 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1192 (src_any
&& dst_any
))
1197 read_unlock(&chan_list_lock
);
1202 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1204 struct sock
*parent
, *sk
;
1205 struct l2cap_chan
*chan
, *pchan
;
1209 /* Check if we have socket listening on cid */
1210 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1211 conn
->src
, conn
->dst
);
1219 /* Check for backlog size */
1220 if (sk_acceptq_is_full(parent
)) {
1221 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1225 chan
= pchan
->ops
->new_connection(pchan
->data
);
1231 hci_conn_hold(conn
->hcon
);
1233 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1234 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1236 bt_accept_enqueue(parent
, sk
);
1238 l2cap_chan_add(conn
, chan
);
1240 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1242 __l2cap_state_change(chan
, BT_CONNECTED
);
1243 parent
->sk_data_ready(parent
, 0);
1246 release_sock(parent
);
1249 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1251 struct l2cap_chan
*chan
;
1253 BT_DBG("conn %p", conn
);
1255 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1256 l2cap_le_conn_ready(conn
);
1258 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1259 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1261 mutex_lock(&conn
->chan_lock
);
1263 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1265 l2cap_chan_lock(chan
);
1267 if (conn
->hcon
->type
== LE_LINK
) {
1268 if (smp_conn_security(conn
, chan
->sec_level
))
1269 l2cap_chan_ready(chan
);
1271 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1272 struct sock
*sk
= chan
->sk
;
1273 __clear_chan_timer(chan
);
1275 __l2cap_state_change(chan
, BT_CONNECTED
);
1276 sk
->sk_state_change(sk
);
1279 } else if (chan
->state
== BT_CONNECT
)
1280 l2cap_do_start(chan
);
1282 l2cap_chan_unlock(chan
);
1285 mutex_unlock(&conn
->chan_lock
);
1288 /* Notify sockets that we cannot guaranty reliability anymore */
1289 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1291 struct l2cap_chan
*chan
;
1293 BT_DBG("conn %p", conn
);
1295 mutex_lock(&conn
->chan_lock
);
1297 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1298 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1299 __l2cap_chan_set_err(chan
, err
);
1302 mutex_unlock(&conn
->chan_lock
);
1305 static void l2cap_info_timeout(struct work_struct
*work
)
1307 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1310 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1311 conn
->info_ident
= 0;
1313 l2cap_conn_start(conn
);
1316 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1318 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1319 struct l2cap_chan
*chan
, *l
;
1324 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1326 kfree_skb(conn
->rx_skb
);
1328 mutex_lock(&conn
->chan_lock
);
1331 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1332 l2cap_chan_hold(chan
);
1333 l2cap_chan_lock(chan
);
1335 l2cap_chan_del(chan
, err
);
1337 l2cap_chan_unlock(chan
);
1339 chan
->ops
->close(chan
->data
);
1340 l2cap_chan_put(chan
);
1343 mutex_unlock(&conn
->chan_lock
);
1345 hci_chan_del(conn
->hchan
);
1347 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1348 cancel_delayed_work_sync(&conn
->info_timer
);
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1351 cancel_delayed_work_sync(&conn
->security_timer
);
1352 smp_chan_destroy(conn
);
1355 hcon
->l2cap_data
= NULL
;
1359 static void security_timeout(struct work_struct
*work
)
1361 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1362 security_timer
.work
);
1364 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1367 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1369 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1370 struct hci_chan
*hchan
;
1375 hchan
= hci_chan_create(hcon
);
1379 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1381 hci_chan_del(hchan
);
1385 hcon
->l2cap_data
= conn
;
1387 conn
->hchan
= hchan
;
1389 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1391 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1392 conn
->mtu
= hcon
->hdev
->le_mtu
;
1394 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1396 conn
->src
= &hcon
->hdev
->bdaddr
;
1397 conn
->dst
= &hcon
->dst
;
1399 conn
->feat_mask
= 0;
1401 spin_lock_init(&conn
->lock
);
1402 mutex_init(&conn
->chan_lock
);
1404 INIT_LIST_HEAD(&conn
->chan_l
);
1406 if (hcon
->type
== LE_LINK
)
1407 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1409 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1411 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1416 /* ---- Socket interface ---- */
1418 /* Find socket with psm and source / destination bdaddr.
1419 * Returns closest match.
1421 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1425 struct l2cap_chan
*c
, *c1
= NULL
;
1427 read_lock(&chan_list_lock
);
1429 list_for_each_entry(c
, &chan_list
, global_l
) {
1430 struct sock
*sk
= c
->sk
;
1432 if (state
&& c
->state
!= state
)
1435 if (c
->psm
== psm
) {
1436 int src_match
, dst_match
;
1437 int src_any
, dst_any
;
1440 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1441 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1442 if (src_match
&& dst_match
) {
1443 read_unlock(&chan_list_lock
);
1448 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1449 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1450 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1451 (src_any
&& dst_any
))
1456 read_unlock(&chan_list_lock
);
1461 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1462 bdaddr_t
*dst
, u8 dst_type
)
1464 struct sock
*sk
= chan
->sk
;
1465 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1466 struct l2cap_conn
*conn
;
1467 struct hci_conn
*hcon
;
1468 struct hci_dev
*hdev
;
1472 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1473 dst_type
, __le16_to_cpu(chan
->psm
));
1475 hdev
= hci_get_route(dst
, src
);
1477 return -EHOSTUNREACH
;
1481 l2cap_chan_lock(chan
);
1483 /* PSM must be odd and lsb of upper byte must be 0 */
1484 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1485 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1490 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1495 switch (chan
->mode
) {
1496 case L2CAP_MODE_BASIC
:
1498 case L2CAP_MODE_ERTM
:
1499 case L2CAP_MODE_STREAMING
:
1510 switch (sk
->sk_state
) {
1514 /* Already connecting */
1520 /* Already connected */
1536 /* Set destination address and psm */
1537 bacpy(&bt_sk(sk
)->dst
, dst
);
1544 auth_type
= l2cap_get_auth_type(chan
);
1546 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1547 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1548 chan
->sec_level
, auth_type
);
1550 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1551 chan
->sec_level
, auth_type
);
1554 err
= PTR_ERR(hcon
);
1558 conn
= l2cap_conn_add(hcon
, 0);
1565 if (hcon
->type
== LE_LINK
) {
1568 if (!list_empty(&conn
->chan_l
)) {
1577 /* Update source addr of the socket */
1578 bacpy(src
, conn
->src
);
1580 l2cap_chan_unlock(chan
);
1581 l2cap_chan_add(conn
, chan
);
1582 l2cap_chan_lock(chan
);
1584 l2cap_state_change(chan
, BT_CONNECT
);
1585 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1587 if (hcon
->state
== BT_CONNECTED
) {
1588 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1589 __clear_chan_timer(chan
);
1590 if (l2cap_chan_check_security(chan
))
1591 l2cap_state_change(chan
, BT_CONNECTED
);
1593 l2cap_do_start(chan
);
1599 l2cap_chan_unlock(chan
);
1600 hci_dev_unlock(hdev
);
1605 int __l2cap_wait_ack(struct sock
*sk
)
1607 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1608 DECLARE_WAITQUEUE(wait
, current
);
1612 add_wait_queue(sk_sleep(sk
), &wait
);
1613 set_current_state(TASK_INTERRUPTIBLE
);
1614 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1618 if (signal_pending(current
)) {
1619 err
= sock_intr_errno(timeo
);
1624 timeo
= schedule_timeout(timeo
);
1626 set_current_state(TASK_INTERRUPTIBLE
);
1628 err
= sock_error(sk
);
1632 set_current_state(TASK_RUNNING
);
1633 remove_wait_queue(sk_sleep(sk
), &wait
);
1637 static void l2cap_monitor_timeout(struct work_struct
*work
)
1639 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1640 monitor_timer
.work
);
1642 BT_DBG("chan %p", chan
);
1644 l2cap_chan_lock(chan
);
1647 l2cap_chan_unlock(chan
);
1648 l2cap_chan_put(chan
);
1652 l2cap_tx(chan
, 0, 0, L2CAP_EV_MONITOR_TO
);
1654 l2cap_chan_unlock(chan
);
1655 l2cap_chan_put(chan
);
1658 static void l2cap_retrans_timeout(struct work_struct
*work
)
1660 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1661 retrans_timer
.work
);
1663 BT_DBG("chan %p", chan
);
1665 l2cap_chan_lock(chan
);
1668 l2cap_chan_unlock(chan
);
1669 l2cap_chan_put(chan
);
1673 l2cap_tx(chan
, 0, 0, L2CAP_EV_RETRANS_TO
);
1674 l2cap_chan_unlock(chan
);
1675 l2cap_chan_put(chan
);
1678 static int l2cap_streaming_send(struct l2cap_chan
*chan
,
1679 struct sk_buff_head
*skbs
)
1681 struct sk_buff
*skb
;
1682 struct l2cap_ctrl
*control
;
1684 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1686 if (chan
->state
!= BT_CONNECTED
)
1689 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1691 while (!skb_queue_empty(&chan
->tx_q
)) {
1693 skb
= skb_dequeue(&chan
->tx_q
);
1695 bt_cb(skb
)->control
.retries
= 1;
1696 control
= &bt_cb(skb
)->control
;
1698 control
->reqseq
= 0;
1699 control
->txseq
= chan
->next_tx_seq
;
1701 __pack_control(chan
, control
, skb
);
1703 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1704 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1705 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1708 l2cap_do_send(chan
, skb
);
1710 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1712 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1713 chan
->frames_sent
++;
1719 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1721 struct sk_buff
*skb
, *tx_skb
;
1722 struct l2cap_ctrl
*control
;
1725 BT_DBG("chan %p", chan
);
1727 if (chan
->state
!= BT_CONNECTED
)
1730 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1733 while (chan
->tx_send_head
&&
1734 chan
->unacked_frames
< chan
->remote_tx_win
&&
1735 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1737 skb
= chan
->tx_send_head
;
1739 bt_cb(skb
)->control
.retries
= 1;
1740 control
= &bt_cb(skb
)->control
;
1742 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1745 control
->reqseq
= chan
->buffer_seq
;
1746 chan
->last_acked_seq
= chan
->buffer_seq
;
1747 control
->txseq
= chan
->next_tx_seq
;
1749 __pack_control(chan
, control
, skb
);
1751 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1752 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1753 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1756 /* Clone after data has been modified. Data is assumed to be
1757 read-only (for locking purposes) on cloned sk_buffs.
1759 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1764 __set_retrans_timer(chan
);
1766 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1767 chan
->unacked_frames
++;
1768 chan
->frames_sent
++;
1771 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1772 chan
->tx_send_head
= NULL
;
1774 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1776 l2cap_do_send(chan
, tx_skb
);
1777 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1780 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent
,
1781 (int) chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1786 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1788 struct l2cap_ctrl control
;
1789 struct sk_buff
*skb
;
1790 struct sk_buff
*tx_skb
;
1793 BT_DBG("chan %p", chan
);
1795 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1798 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1799 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1801 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1803 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1808 bt_cb(skb
)->control
.retries
++;
1809 control
= bt_cb(skb
)->control
;
1811 if (chan
->max_tx
!= 0 &&
1812 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1813 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1814 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1815 l2cap_seq_list_clear(&chan
->retrans_list
);
1819 control
.reqseq
= chan
->buffer_seq
;
1820 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1825 if (skb_cloned(skb
)) {
1826 /* Cloned sk_buffs are read-only, so we need a
1829 tx_skb
= skb_copy(skb
, GFP_ATOMIC
);
1831 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1835 l2cap_seq_list_clear(&chan
->retrans_list
);
1839 /* Update skb contents */
1840 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1841 put_unaligned_le32(__pack_extended_control(&control
),
1842 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1844 put_unaligned_le16(__pack_enhanced_control(&control
),
1845 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1848 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1849 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1850 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1854 l2cap_do_send(chan
, tx_skb
);
1856 BT_DBG("Resent txseq %d", control
.txseq
);
1858 chan
->last_acked_seq
= chan
->buffer_seq
;
1862 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1863 struct l2cap_ctrl
*control
)
1865 BT_DBG("chan %p, control %p", chan
, control
);
1867 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1868 l2cap_ertm_resend(chan
);
1871 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1872 struct l2cap_ctrl
*control
)
1874 struct sk_buff
*skb
;
1876 BT_DBG("chan %p, control %p", chan
, control
);
1879 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1881 l2cap_seq_list_clear(&chan
->retrans_list
);
1883 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1886 if (chan
->unacked_frames
) {
1887 skb_queue_walk(&chan
->tx_q
, skb
) {
1888 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1889 skb
== chan
->tx_send_head
)
1893 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1894 if (skb
== chan
->tx_send_head
)
1897 l2cap_seq_list_append(&chan
->retrans_list
,
1898 bt_cb(skb
)->control
.txseq
);
1901 l2cap_ertm_resend(chan
);
1905 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1907 struct l2cap_ctrl control
;
1908 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1909 chan
->last_acked_seq
);
1912 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1913 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1915 memset(&control
, 0, sizeof(control
));
1918 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1919 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1920 __clear_ack_timer(chan
);
1921 control
.super
= L2CAP_SUPER_RNR
;
1922 control
.reqseq
= chan
->buffer_seq
;
1923 l2cap_send_sframe(chan
, &control
);
1925 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1926 l2cap_ertm_send(chan
);
1927 /* If any i-frames were sent, they included an ack */
1928 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1932 /* Ack now if the tx window is 3/4ths full.
1933 * Calculate without mul or div
1935 threshold
= chan
->tx_win
;
1936 threshold
+= threshold
<< 1;
1939 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack
,
1942 if (frames_to_ack
>= threshold
) {
1943 __clear_ack_timer(chan
);
1944 control
.super
= L2CAP_SUPER_RR
;
1945 control
.reqseq
= chan
->buffer_seq
;
1946 l2cap_send_sframe(chan
, &control
);
1951 __set_ack_timer(chan
);
1955 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1956 struct msghdr
*msg
, int len
,
1957 int count
, struct sk_buff
*skb
)
1959 struct l2cap_conn
*conn
= chan
->conn
;
1960 struct sk_buff
**frag
;
1963 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1969 /* Continuation fragments (no L2CAP header) */
1970 frag
= &skb_shinfo(skb
)->frag_list
;
1972 struct sk_buff
*tmp
;
1974 count
= min_t(unsigned int, conn
->mtu
, len
);
1976 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1977 msg
->msg_flags
& MSG_DONTWAIT
);
1979 return PTR_ERR(tmp
);
1983 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1986 (*frag
)->priority
= skb
->priority
;
1991 skb
->len
+= (*frag
)->len
;
1992 skb
->data_len
+= (*frag
)->len
;
1994 frag
= &(*frag
)->next
;
2000 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2001 struct msghdr
*msg
, size_t len
,
2004 struct l2cap_conn
*conn
= chan
->conn
;
2005 struct sk_buff
*skb
;
2006 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2007 struct l2cap_hdr
*lh
;
2009 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
2011 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2013 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2014 msg
->msg_flags
& MSG_DONTWAIT
);
2018 skb
->priority
= priority
;
2020 /* Create L2CAP header */
2021 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2022 lh
->cid
= cpu_to_le16(chan
->dcid
);
2023 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2024 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2026 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2027 if (unlikely(err
< 0)) {
2029 return ERR_PTR(err
);
2034 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2035 struct msghdr
*msg
, size_t len
,
2038 struct l2cap_conn
*conn
= chan
->conn
;
2039 struct sk_buff
*skb
;
2041 struct l2cap_hdr
*lh
;
2043 BT_DBG("chan %p len %d", chan
, (int)len
);
2045 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2047 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2048 msg
->msg_flags
& MSG_DONTWAIT
);
2052 skb
->priority
= priority
;
2054 /* Create L2CAP header */
2055 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2056 lh
->cid
= cpu_to_le16(chan
->dcid
);
2057 lh
->len
= cpu_to_le16(len
);
2059 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2060 if (unlikely(err
< 0)) {
2062 return ERR_PTR(err
);
2067 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2068 struct msghdr
*msg
, size_t len
,
2071 struct l2cap_conn
*conn
= chan
->conn
;
2072 struct sk_buff
*skb
;
2073 int err
, count
, hlen
;
2074 struct l2cap_hdr
*lh
;
2076 BT_DBG("chan %p len %d", chan
, (int)len
);
2079 return ERR_PTR(-ENOTCONN
);
2081 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2082 hlen
= L2CAP_EXT_HDR_SIZE
;
2084 hlen
= L2CAP_ENH_HDR_SIZE
;
2087 hlen
+= L2CAP_SDULEN_SIZE
;
2089 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2090 hlen
+= L2CAP_FCS_SIZE
;
2092 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2094 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2095 msg
->msg_flags
& MSG_DONTWAIT
);
2099 /* Create L2CAP header */
2100 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2101 lh
->cid
= cpu_to_le16(chan
->dcid
);
2102 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2104 /* Control header is populated later */
2105 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2106 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2108 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2111 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2113 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2114 if (unlikely(err
< 0)) {
2116 return ERR_PTR(err
);
2119 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2120 bt_cb(skb
)->control
.retries
= 0;
2124 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2125 struct sk_buff_head
*seg_queue
,
2126 struct msghdr
*msg
, size_t len
)
2128 struct sk_buff
*skb
;
2134 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2136 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2137 * so fragmented skbs are not used. The HCI layer's handling
2138 * of fragmented skbs is not compatible with ERTM's queueing.
2141 /* PDU size is derived from the HCI MTU */
2142 pdu_len
= chan
->conn
->mtu
;
2144 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2146 /* Adjust for largest possible L2CAP overhead. */
2147 pdu_len
-= L2CAP_EXT_HDR_SIZE
+ L2CAP_FCS_SIZE
;
2149 /* Remote device may have requested smaller PDUs */
2150 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2152 if (len
<= pdu_len
) {
2153 sar
= L2CAP_SAR_UNSEGMENTED
;
2157 sar
= L2CAP_SAR_START
;
2159 pdu_len
-= L2CAP_SDULEN_SIZE
;
2163 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2166 __skb_queue_purge(seg_queue
);
2167 return PTR_ERR(skb
);
2170 bt_cb(skb
)->control
.sar
= sar
;
2171 __skb_queue_tail(seg_queue
, skb
);
2176 pdu_len
+= L2CAP_SDULEN_SIZE
;
2179 if (len
<= pdu_len
) {
2180 sar
= L2CAP_SAR_END
;
2183 sar
= L2CAP_SAR_CONTINUE
;
2190 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2193 struct sk_buff
*skb
;
2195 struct sk_buff_head seg_queue
;
2197 /* Connectionless channel */
2198 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2199 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2201 return PTR_ERR(skb
);
2203 l2cap_do_send(chan
, skb
);
2207 switch (chan
->mode
) {
2208 case L2CAP_MODE_BASIC
:
2209 /* Check outgoing MTU */
2210 if (len
> chan
->omtu
)
2213 /* Create a basic PDU */
2214 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2216 return PTR_ERR(skb
);
2218 l2cap_do_send(chan
, skb
);
2222 case L2CAP_MODE_ERTM
:
2223 case L2CAP_MODE_STREAMING
:
2224 /* Check outgoing MTU */
2225 if (len
> chan
->omtu
) {
2230 __skb_queue_head_init(&seg_queue
);
2232 /* Do segmentation before calling in to the state machine,
2233 * since it's possible to block while waiting for memory
2236 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2238 /* The channel could have been closed while segmenting,
2239 * check that it is still connected.
2241 if (chan
->state
!= BT_CONNECTED
) {
2242 __skb_queue_purge(&seg_queue
);
2249 if (chan
->mode
== L2CAP_MODE_ERTM
)
2250 err
= l2cap_tx(chan
, 0, &seg_queue
,
2251 L2CAP_EV_DATA_REQUEST
);
2253 err
= l2cap_streaming_send(chan
, &seg_queue
);
2258 /* If the skbs were not queued for sending, they'll still be in
2259 * seg_queue and need to be purged.
2261 __skb_queue_purge(&seg_queue
);
2265 BT_DBG("bad state %1.1x", chan
->mode
);
2272 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2274 struct l2cap_ctrl control
;
2277 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2279 memset(&control
, 0, sizeof(control
));
2281 control
.super
= L2CAP_SUPER_SREJ
;
2283 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2284 seq
= __next_seq(chan
, seq
)) {
2285 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2286 control
.reqseq
= seq
;
2287 l2cap_send_sframe(chan
, &control
);
2288 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2292 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2295 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2297 struct l2cap_ctrl control
;
2299 BT_DBG("chan %p", chan
);
2301 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2304 memset(&control
, 0, sizeof(control
));
2306 control
.super
= L2CAP_SUPER_SREJ
;
2307 control
.reqseq
= chan
->srej_list
.tail
;
2308 l2cap_send_sframe(chan
, &control
);
2311 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2313 struct l2cap_ctrl control
;
2317 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2319 memset(&control
, 0, sizeof(control
));
2321 control
.super
= L2CAP_SUPER_SREJ
;
2323 /* Capture initial list head to allow only one pass through the list. */
2324 initial_head
= chan
->srej_list
.head
;
2327 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2328 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2331 control
.reqseq
= seq
;
2332 l2cap_send_sframe(chan
, &control
);
2333 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2334 } while (chan
->srej_list
.head
!= initial_head
);
2337 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2339 struct sk_buff
*acked_skb
;
2342 BT_DBG("chan %p, reqseq %d", chan
, reqseq
);
2344 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2347 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2348 chan
->expected_ack_seq
, chan
->unacked_frames
);
2350 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2351 ackseq
= __next_seq(chan
, ackseq
)) {
2353 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2355 skb_unlink(acked_skb
, &chan
->tx_q
);
2356 kfree_skb(acked_skb
);
2357 chan
->unacked_frames
--;
2361 chan
->expected_ack_seq
= reqseq
;
2363 if (chan
->unacked_frames
== 0)
2364 __clear_retrans_timer(chan
);
2366 BT_DBG("unacked_frames %d", (int) chan
->unacked_frames
);
2369 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2371 BT_DBG("chan %p", chan
);
2373 chan
->expected_tx_seq
= chan
->buffer_seq
;
2374 l2cap_seq_list_clear(&chan
->srej_list
);
2375 skb_queue_purge(&chan
->srej_q
);
2376 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2379 static int l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2380 struct l2cap_ctrl
*control
,
2381 struct sk_buff_head
*skbs
, u8 event
)
2385 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2389 case L2CAP_EV_DATA_REQUEST
:
2390 if (chan
->tx_send_head
== NULL
)
2391 chan
->tx_send_head
= skb_peek(skbs
);
2393 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2394 l2cap_ertm_send(chan
);
2396 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2397 BT_DBG("Enter LOCAL_BUSY");
2398 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2400 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2401 /* The SREJ_SENT state must be aborted if we are to
2402 * enter the LOCAL_BUSY state.
2404 l2cap_abort_rx_srej_sent(chan
);
2407 l2cap_send_ack(chan
);
2410 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2411 BT_DBG("Exit LOCAL_BUSY");
2412 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2414 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2415 struct l2cap_ctrl local_control
;
2417 memset(&local_control
, 0, sizeof(local_control
));
2418 local_control
.sframe
= 1;
2419 local_control
.super
= L2CAP_SUPER_RR
;
2420 local_control
.poll
= 1;
2421 local_control
.reqseq
= chan
->buffer_seq
;
2422 l2cap_send_sframe(chan
, &local_control
);
2424 chan
->retry_count
= 1;
2425 __set_monitor_timer(chan
);
2426 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2429 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2430 l2cap_process_reqseq(chan
, control
->reqseq
);
2432 case L2CAP_EV_EXPLICIT_POLL
:
2433 l2cap_send_rr_or_rnr(chan
, 1);
2434 chan
->retry_count
= 1;
2435 __set_monitor_timer(chan
);
2436 __clear_ack_timer(chan
);
2437 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2439 case L2CAP_EV_RETRANS_TO
:
2440 l2cap_send_rr_or_rnr(chan
, 1);
2441 chan
->retry_count
= 1;
2442 __set_monitor_timer(chan
);
2443 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2445 case L2CAP_EV_RECV_FBIT
:
2446 /* Nothing to process */
2455 static int l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2456 struct l2cap_ctrl
*control
,
2457 struct sk_buff_head
*skbs
, u8 event
)
2461 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2465 case L2CAP_EV_DATA_REQUEST
:
2466 if (chan
->tx_send_head
== NULL
)
2467 chan
->tx_send_head
= skb_peek(skbs
);
2468 /* Queue data, but don't send. */
2469 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2471 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2472 BT_DBG("Enter LOCAL_BUSY");
2473 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2475 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2476 /* The SREJ_SENT state must be aborted if we are to
2477 * enter the LOCAL_BUSY state.
2479 l2cap_abort_rx_srej_sent(chan
);
2482 l2cap_send_ack(chan
);
2485 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2486 BT_DBG("Exit LOCAL_BUSY");
2487 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2489 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2490 struct l2cap_ctrl local_control
;
2491 memset(&local_control
, 0, sizeof(local_control
));
2492 local_control
.sframe
= 1;
2493 local_control
.super
= L2CAP_SUPER_RR
;
2494 local_control
.poll
= 1;
2495 local_control
.reqseq
= chan
->buffer_seq
;
2496 l2cap_send_sframe(chan
, &local_control
);
2498 chan
->retry_count
= 1;
2499 __set_monitor_timer(chan
);
2500 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2503 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2504 l2cap_process_reqseq(chan
, control
->reqseq
);
2508 case L2CAP_EV_RECV_FBIT
:
2509 if (control
&& control
->final
) {
2510 __clear_monitor_timer(chan
);
2511 if (chan
->unacked_frames
> 0)
2512 __set_retrans_timer(chan
);
2513 chan
->retry_count
= 0;
2514 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2515 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2518 case L2CAP_EV_EXPLICIT_POLL
:
2521 case L2CAP_EV_MONITOR_TO
:
2522 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2523 l2cap_send_rr_or_rnr(chan
, 1);
2524 __set_monitor_timer(chan
);
2525 chan
->retry_count
++;
2527 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2537 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2538 struct sk_buff_head
*skbs
, u8 event
)
2542 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2543 chan
, control
, skbs
, event
, chan
->tx_state
);
2545 switch (chan
->tx_state
) {
2546 case L2CAP_TX_STATE_XMIT
:
2547 err
= l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2549 case L2CAP_TX_STATE_WAIT_F
:
2550 err
= l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2560 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2561 struct l2cap_ctrl
*control
)
2563 BT_DBG("chan %p, control %p", chan
, control
);
2564 l2cap_tx(chan
, control
, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2567 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2568 struct l2cap_ctrl
*control
)
2570 BT_DBG("chan %p, control %p", chan
, control
);
2571 l2cap_tx(chan
, control
, 0, L2CAP_EV_RECV_FBIT
);
2574 /* Copy frame to all raw sockets on that connection */
2575 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2577 struct sk_buff
*nskb
;
2578 struct l2cap_chan
*chan
;
2580 BT_DBG("conn %p", conn
);
2582 mutex_lock(&conn
->chan_lock
);
2584 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2585 struct sock
*sk
= chan
->sk
;
2586 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2589 /* Don't send frame to the socket it came from */
2592 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2596 if (chan
->ops
->recv(chan
->data
, nskb
))
2600 mutex_unlock(&conn
->chan_lock
);
2603 /* ---- L2CAP signalling commands ---- */
2604 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2605 u8 code
, u8 ident
, u16 dlen
, void *data
)
2607 struct sk_buff
*skb
, **frag
;
2608 struct l2cap_cmd_hdr
*cmd
;
2609 struct l2cap_hdr
*lh
;
2612 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2613 conn
, code
, ident
, dlen
);
2615 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2616 count
= min_t(unsigned int, conn
->mtu
, len
);
2618 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2622 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2623 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2625 if (conn
->hcon
->type
== LE_LINK
)
2626 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2628 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2630 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2633 cmd
->len
= cpu_to_le16(dlen
);
2636 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2637 memcpy(skb_put(skb
, count
), data
, count
);
2643 /* Continuation fragments (no L2CAP header) */
2644 frag
= &skb_shinfo(skb
)->frag_list
;
2646 count
= min_t(unsigned int, conn
->mtu
, len
);
2648 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2652 memcpy(skb_put(*frag
, count
), data
, count
);
2657 frag
= &(*frag
)->next
;
2667 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2669 struct l2cap_conf_opt
*opt
= *ptr
;
2672 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2680 *val
= *((u8
*) opt
->val
);
2684 *val
= get_unaligned_le16(opt
->val
);
2688 *val
= get_unaligned_le32(opt
->val
);
2692 *val
= (unsigned long) opt
->val
;
2696 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2700 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2702 struct l2cap_conf_opt
*opt
= *ptr
;
2704 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2711 *((u8
*) opt
->val
) = val
;
2715 put_unaligned_le16(val
, opt
->val
);
2719 put_unaligned_le32(val
, opt
->val
);
2723 memcpy(opt
->val
, (void *) val
, len
);
2727 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2730 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2732 struct l2cap_conf_efs efs
;
2734 switch (chan
->mode
) {
2735 case L2CAP_MODE_ERTM
:
2736 efs
.id
= chan
->local_id
;
2737 efs
.stype
= chan
->local_stype
;
2738 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2739 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2740 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2741 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2744 case L2CAP_MODE_STREAMING
:
2746 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2747 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2748 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2757 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2758 (unsigned long) &efs
);
2761 static void l2cap_ack_timeout(struct work_struct
*work
)
2763 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2766 BT_DBG("chan %p", chan
);
2768 l2cap_chan_lock(chan
);
2770 l2cap_send_ack(chan
);
2772 l2cap_chan_unlock(chan
);
2774 l2cap_chan_put(chan
);
2777 static inline int l2cap_ertm_init(struct l2cap_chan
*chan
)
2781 chan
->next_tx_seq
= 0;
2782 chan
->expected_tx_seq
= 0;
2783 chan
->expected_ack_seq
= 0;
2784 chan
->unacked_frames
= 0;
2785 chan
->buffer_seq
= 0;
2786 chan
->frames_sent
= 0;
2787 chan
->last_acked_seq
= 0;
2789 chan
->sdu_last_frag
= NULL
;
2792 skb_queue_head_init(&chan
->tx_q
);
2794 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2797 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2798 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2800 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2801 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2802 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2804 skb_queue_head_init(&chan
->srej_q
);
2806 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2810 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2812 l2cap_seq_list_free(&chan
->srej_list
);
2817 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2820 case L2CAP_MODE_STREAMING
:
2821 case L2CAP_MODE_ERTM
:
2822 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2826 return L2CAP_MODE_BASIC
;
2830 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2832 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2835 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2837 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2840 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2842 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2843 __l2cap_ews_supported(chan
)) {
2844 /* use extended control field */
2845 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2846 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2848 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2849 L2CAP_DEFAULT_TX_WINDOW
);
2850 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2854 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2856 struct l2cap_conf_req
*req
= data
;
2857 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2858 void *ptr
= req
->data
;
2861 BT_DBG("chan %p", chan
);
2863 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2866 switch (chan
->mode
) {
2867 case L2CAP_MODE_STREAMING
:
2868 case L2CAP_MODE_ERTM
:
2869 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2872 if (__l2cap_efs_supported(chan
))
2873 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2877 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2882 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2883 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2885 switch (chan
->mode
) {
2886 case L2CAP_MODE_BASIC
:
2887 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2888 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2891 rfc
.mode
= L2CAP_MODE_BASIC
;
2893 rfc
.max_transmit
= 0;
2894 rfc
.retrans_timeout
= 0;
2895 rfc
.monitor_timeout
= 0;
2896 rfc
.max_pdu_size
= 0;
2898 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2899 (unsigned long) &rfc
);
2902 case L2CAP_MODE_ERTM
:
2903 rfc
.mode
= L2CAP_MODE_ERTM
;
2904 rfc
.max_transmit
= chan
->max_tx
;
2905 rfc
.retrans_timeout
= 0;
2906 rfc
.monitor_timeout
= 0;
2908 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2909 L2CAP_EXT_HDR_SIZE
-
2912 rfc
.max_pdu_size
= cpu_to_le16(size
);
2914 l2cap_txwin_setup(chan
);
2916 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2917 L2CAP_DEFAULT_TX_WINDOW
);
2919 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2920 (unsigned long) &rfc
);
2922 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2923 l2cap_add_opt_efs(&ptr
, chan
);
2925 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2928 if (chan
->fcs
== L2CAP_FCS_NONE
||
2929 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2930 chan
->fcs
= L2CAP_FCS_NONE
;
2931 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2934 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2935 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2939 case L2CAP_MODE_STREAMING
:
2940 rfc
.mode
= L2CAP_MODE_STREAMING
;
2942 rfc
.max_transmit
= 0;
2943 rfc
.retrans_timeout
= 0;
2944 rfc
.monitor_timeout
= 0;
2946 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2947 L2CAP_EXT_HDR_SIZE
-
2950 rfc
.max_pdu_size
= cpu_to_le16(size
);
2952 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2953 (unsigned long) &rfc
);
2955 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2956 l2cap_add_opt_efs(&ptr
, chan
);
2958 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2961 if (chan
->fcs
== L2CAP_FCS_NONE
||
2962 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2963 chan
->fcs
= L2CAP_FCS_NONE
;
2964 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2969 req
->dcid
= cpu_to_le16(chan
->dcid
);
2970 req
->flags
= cpu_to_le16(0);
2975 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2977 struct l2cap_conf_rsp
*rsp
= data
;
2978 void *ptr
= rsp
->data
;
2979 void *req
= chan
->conf_req
;
2980 int len
= chan
->conf_len
;
2981 int type
, hint
, olen
;
2983 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2984 struct l2cap_conf_efs efs
;
2986 u16 mtu
= L2CAP_DEFAULT_MTU
;
2987 u16 result
= L2CAP_CONF_SUCCESS
;
2990 BT_DBG("chan %p", chan
);
2992 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2993 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2995 hint
= type
& L2CAP_CONF_HINT
;
2996 type
&= L2CAP_CONF_MASK
;
2999 case L2CAP_CONF_MTU
:
3003 case L2CAP_CONF_FLUSH_TO
:
3004 chan
->flush_to
= val
;
3007 case L2CAP_CONF_QOS
:
3010 case L2CAP_CONF_RFC
:
3011 if (olen
== sizeof(rfc
))
3012 memcpy(&rfc
, (void *) val
, olen
);
3015 case L2CAP_CONF_FCS
:
3016 if (val
== L2CAP_FCS_NONE
)
3017 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3020 case L2CAP_CONF_EFS
:
3022 if (olen
== sizeof(efs
))
3023 memcpy(&efs
, (void *) val
, olen
);
3026 case L2CAP_CONF_EWS
:
3028 return -ECONNREFUSED
;
3030 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3031 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3032 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3033 chan
->remote_tx_win
= val
;
3040 result
= L2CAP_CONF_UNKNOWN
;
3041 *((u8
*) ptr
++) = type
;
3046 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3049 switch (chan
->mode
) {
3050 case L2CAP_MODE_STREAMING
:
3051 case L2CAP_MODE_ERTM
:
3052 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3053 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3054 chan
->conn
->feat_mask
);
3059 if (__l2cap_efs_supported(chan
))
3060 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3062 return -ECONNREFUSED
;
3065 if (chan
->mode
!= rfc
.mode
)
3066 return -ECONNREFUSED
;
3072 if (chan
->mode
!= rfc
.mode
) {
3073 result
= L2CAP_CONF_UNACCEPT
;
3074 rfc
.mode
= chan
->mode
;
3076 if (chan
->num_conf_rsp
== 1)
3077 return -ECONNREFUSED
;
3079 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3080 sizeof(rfc
), (unsigned long) &rfc
);
3083 if (result
== L2CAP_CONF_SUCCESS
) {
3084 /* Configure output options and let the other side know
3085 * which ones we don't like. */
3087 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3088 result
= L2CAP_CONF_UNACCEPT
;
3091 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3093 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3096 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3097 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3098 efs
.stype
!= chan
->local_stype
) {
3100 result
= L2CAP_CONF_UNACCEPT
;
3102 if (chan
->num_conf_req
>= 1)
3103 return -ECONNREFUSED
;
3105 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3107 (unsigned long) &efs
);
3109 /* Send PENDING Conf Rsp */
3110 result
= L2CAP_CONF_PENDING
;
3111 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3116 case L2CAP_MODE_BASIC
:
3117 chan
->fcs
= L2CAP_FCS_NONE
;
3118 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3121 case L2CAP_MODE_ERTM
:
3122 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3123 chan
->remote_tx_win
= rfc
.txwin_size
;
3125 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3127 chan
->remote_max_tx
= rfc
.max_transmit
;
3129 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3131 L2CAP_EXT_HDR_SIZE
-
3134 rfc
.max_pdu_size
= cpu_to_le16(size
);
3135 chan
->remote_mps
= size
;
3137 rfc
.retrans_timeout
=
3138 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3139 rfc
.monitor_timeout
=
3140 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3142 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3144 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3145 sizeof(rfc
), (unsigned long) &rfc
);
3147 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3148 chan
->remote_id
= efs
.id
;
3149 chan
->remote_stype
= efs
.stype
;
3150 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3151 chan
->remote_flush_to
=
3152 le32_to_cpu(efs
.flush_to
);
3153 chan
->remote_acc_lat
=
3154 le32_to_cpu(efs
.acc_lat
);
3155 chan
->remote_sdu_itime
=
3156 le32_to_cpu(efs
.sdu_itime
);
3157 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3158 sizeof(efs
), (unsigned long) &efs
);
3162 case L2CAP_MODE_STREAMING
:
3163 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3165 L2CAP_EXT_HDR_SIZE
-
3168 rfc
.max_pdu_size
= cpu_to_le16(size
);
3169 chan
->remote_mps
= size
;
3171 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3173 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3174 sizeof(rfc
), (unsigned long) &rfc
);
3179 result
= L2CAP_CONF_UNACCEPT
;
3181 memset(&rfc
, 0, sizeof(rfc
));
3182 rfc
.mode
= chan
->mode
;
3185 if (result
== L2CAP_CONF_SUCCESS
)
3186 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3188 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3189 rsp
->result
= cpu_to_le16(result
);
3190 rsp
->flags
= cpu_to_le16(0x0000);
3195 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3197 struct l2cap_conf_req
*req
= data
;
3198 void *ptr
= req
->data
;
3201 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3202 struct l2cap_conf_efs efs
;
3204 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3206 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3207 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3210 case L2CAP_CONF_MTU
:
3211 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3212 *result
= L2CAP_CONF_UNACCEPT
;
3213 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3216 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3219 case L2CAP_CONF_FLUSH_TO
:
3220 chan
->flush_to
= val
;
3221 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3225 case L2CAP_CONF_RFC
:
3226 if (olen
== sizeof(rfc
))
3227 memcpy(&rfc
, (void *)val
, olen
);
3229 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3230 rfc
.mode
!= chan
->mode
)
3231 return -ECONNREFUSED
;
3235 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3236 sizeof(rfc
), (unsigned long) &rfc
);
3239 case L2CAP_CONF_EWS
:
3240 chan
->tx_win
= min_t(u16
, val
,
3241 L2CAP_DEFAULT_EXT_WINDOW
);
3242 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3246 case L2CAP_CONF_EFS
:
3247 if (olen
== sizeof(efs
))
3248 memcpy(&efs
, (void *)val
, olen
);
3250 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3251 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3252 efs
.stype
!= chan
->local_stype
)
3253 return -ECONNREFUSED
;
3255 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3256 sizeof(efs
), (unsigned long) &efs
);
3261 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3262 return -ECONNREFUSED
;
3264 chan
->mode
= rfc
.mode
;
3266 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3268 case L2CAP_MODE_ERTM
:
3269 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3270 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3271 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3273 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3274 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3275 chan
->local_sdu_itime
=
3276 le32_to_cpu(efs
.sdu_itime
);
3277 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3278 chan
->local_flush_to
=
3279 le32_to_cpu(efs
.flush_to
);
3283 case L2CAP_MODE_STREAMING
:
3284 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3288 req
->dcid
= cpu_to_le16(chan
->dcid
);
3289 req
->flags
= cpu_to_le16(0x0000);
3294 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3296 struct l2cap_conf_rsp
*rsp
= data
;
3297 void *ptr
= rsp
->data
;
3299 BT_DBG("chan %p", chan
);
3301 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3302 rsp
->result
= cpu_to_le16(result
);
3303 rsp
->flags
= cpu_to_le16(flags
);
3308 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3310 struct l2cap_conn_rsp rsp
;
3311 struct l2cap_conn
*conn
= chan
->conn
;
3314 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3315 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3316 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3317 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3318 l2cap_send_cmd(conn
, chan
->ident
,
3319 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3321 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3324 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3325 l2cap_build_conf_req(chan
, buf
), buf
);
3326 chan
->num_conf_req
++;
3329 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3333 struct l2cap_conf_rfc rfc
;
3335 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3337 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3340 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3341 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3344 case L2CAP_CONF_RFC
:
3345 if (olen
== sizeof(rfc
))
3346 memcpy(&rfc
, (void *)val
, olen
);
3351 /* Use sane default values in case a misbehaving remote device
3352 * did not send an RFC option.
3354 rfc
.mode
= chan
->mode
;
3355 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3356 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3357 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
3359 BT_ERR("Expected RFC option was not found, using defaults");
3363 case L2CAP_MODE_ERTM
:
3364 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3365 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3366 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3368 case L2CAP_MODE_STREAMING
:
3369 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3373 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3375 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3377 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3380 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3381 cmd
->ident
== conn
->info_ident
) {
3382 cancel_delayed_work(&conn
->info_timer
);
3384 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3385 conn
->info_ident
= 0;
3387 l2cap_conn_start(conn
);
3393 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3395 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3396 struct l2cap_conn_rsp rsp
;
3397 struct l2cap_chan
*chan
= NULL
, *pchan
;
3398 struct sock
*parent
, *sk
= NULL
;
3399 int result
, status
= L2CAP_CS_NO_INFO
;
3401 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3402 __le16 psm
= req
->psm
;
3404 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3406 /* Check if we have socket listening on psm */
3407 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3409 result
= L2CAP_CR_BAD_PSM
;
3415 mutex_lock(&conn
->chan_lock
);
3418 /* Check if the ACL is secure enough (if not SDP) */
3419 if (psm
!= cpu_to_le16(0x0001) &&
3420 !hci_conn_check_link_mode(conn
->hcon
)) {
3421 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3422 result
= L2CAP_CR_SEC_BLOCK
;
3426 result
= L2CAP_CR_NO_MEM
;
3428 /* Check for backlog size */
3429 if (sk_acceptq_is_full(parent
)) {
3430 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
3434 chan
= pchan
->ops
->new_connection(pchan
->data
);
3440 /* Check if we already have channel with that dcid */
3441 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
3442 sock_set_flag(sk
, SOCK_ZAPPED
);
3443 chan
->ops
->close(chan
->data
);
3447 hci_conn_hold(conn
->hcon
);
3449 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3450 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3454 bt_accept_enqueue(parent
, sk
);
3456 __l2cap_chan_add(conn
, chan
);
3460 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3462 chan
->ident
= cmd
->ident
;
3464 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3465 if (l2cap_chan_check_security(chan
)) {
3466 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3467 __l2cap_state_change(chan
, BT_CONNECT2
);
3468 result
= L2CAP_CR_PEND
;
3469 status
= L2CAP_CS_AUTHOR_PEND
;
3470 parent
->sk_data_ready(parent
, 0);
3472 __l2cap_state_change(chan
, BT_CONFIG
);
3473 result
= L2CAP_CR_SUCCESS
;
3474 status
= L2CAP_CS_NO_INFO
;
3477 __l2cap_state_change(chan
, BT_CONNECT2
);
3478 result
= L2CAP_CR_PEND
;
3479 status
= L2CAP_CS_AUTHEN_PEND
;
3482 __l2cap_state_change(chan
, BT_CONNECT2
);
3483 result
= L2CAP_CR_PEND
;
3484 status
= L2CAP_CS_NO_INFO
;
3488 release_sock(parent
);
3489 mutex_unlock(&conn
->chan_lock
);
3492 rsp
.scid
= cpu_to_le16(scid
);
3493 rsp
.dcid
= cpu_to_le16(dcid
);
3494 rsp
.result
= cpu_to_le16(result
);
3495 rsp
.status
= cpu_to_le16(status
);
3496 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3498 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3499 struct l2cap_info_req info
;
3500 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3502 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3503 conn
->info_ident
= l2cap_get_ident(conn
);
3505 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3507 l2cap_send_cmd(conn
, conn
->info_ident
,
3508 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3511 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3512 result
== L2CAP_CR_SUCCESS
) {
3514 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3515 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3516 l2cap_build_conf_req(chan
, buf
), buf
);
3517 chan
->num_conf_req
++;
3523 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3525 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3526 u16 scid
, dcid
, result
, status
;
3527 struct l2cap_chan
*chan
;
3531 scid
= __le16_to_cpu(rsp
->scid
);
3532 dcid
= __le16_to_cpu(rsp
->dcid
);
3533 result
= __le16_to_cpu(rsp
->result
);
3534 status
= __le16_to_cpu(rsp
->status
);
3536 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3537 dcid
, scid
, result
, status
);
3539 mutex_lock(&conn
->chan_lock
);
3542 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3548 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3557 l2cap_chan_lock(chan
);
3560 case L2CAP_CR_SUCCESS
:
3561 l2cap_state_change(chan
, BT_CONFIG
);
3564 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3566 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3569 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3570 l2cap_build_conf_req(chan
, req
), req
);
3571 chan
->num_conf_req
++;
3575 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3579 l2cap_chan_del(chan
, ECONNREFUSED
);
3583 l2cap_chan_unlock(chan
);
3586 mutex_unlock(&conn
->chan_lock
);
3591 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3593 /* FCS is enabled only in ERTM or streaming mode, if one or both
3596 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3597 chan
->fcs
= L2CAP_FCS_NONE
;
3598 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3599 chan
->fcs
= L2CAP_FCS_CRC16
;
3602 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3604 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3607 struct l2cap_chan
*chan
;
3610 dcid
= __le16_to_cpu(req
->dcid
);
3611 flags
= __le16_to_cpu(req
->flags
);
3613 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3615 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3619 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3620 struct l2cap_cmd_rej_cid rej
;
3622 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3623 rej
.scid
= cpu_to_le16(chan
->scid
);
3624 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3626 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3631 /* Reject if config buffer is too small. */
3632 len
= cmd_len
- sizeof(*req
);
3633 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3634 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3635 l2cap_build_conf_rsp(chan
, rsp
,
3636 L2CAP_CONF_REJECT
, flags
), rsp
);
3641 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3642 chan
->conf_len
+= len
;
3644 if (flags
& 0x0001) {
3645 /* Incomplete config. Send empty response. */
3646 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3647 l2cap_build_conf_rsp(chan
, rsp
,
3648 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3652 /* Complete config. */
3653 len
= l2cap_parse_conf_req(chan
, rsp
);
3655 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3659 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3660 chan
->num_conf_rsp
++;
3662 /* Reset config buffer. */
3665 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3668 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3669 set_default_fcs(chan
);
3671 l2cap_state_change(chan
, BT_CONNECTED
);
3673 if (chan
->mode
== L2CAP_MODE_ERTM
||
3674 chan
->mode
== L2CAP_MODE_STREAMING
)
3675 err
= l2cap_ertm_init(chan
);
3678 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3680 l2cap_chan_ready(chan
);
3685 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3687 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3688 l2cap_build_conf_req(chan
, buf
), buf
);
3689 chan
->num_conf_req
++;
3692 /* Got Conf Rsp PENDING from remote side and asume we sent
3693 Conf Rsp PENDING in the code above */
3694 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3695 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3697 /* check compatibility */
3699 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3700 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3702 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3703 l2cap_build_conf_rsp(chan
, rsp
,
3704 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
3708 l2cap_chan_unlock(chan
);
3712 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3714 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3715 u16 scid
, flags
, result
;
3716 struct l2cap_chan
*chan
;
3717 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3720 scid
= __le16_to_cpu(rsp
->scid
);
3721 flags
= __le16_to_cpu(rsp
->flags
);
3722 result
= __le16_to_cpu(rsp
->result
);
3724 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3727 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3732 case L2CAP_CONF_SUCCESS
:
3733 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3734 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3737 case L2CAP_CONF_PENDING
:
3738 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3740 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3743 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3746 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3750 /* check compatibility */
3752 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3753 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3755 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3756 l2cap_build_conf_rsp(chan
, buf
,
3757 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3761 case L2CAP_CONF_UNACCEPT
:
3762 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3765 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3766 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3770 /* throw out any old stored conf requests */
3771 result
= L2CAP_CONF_SUCCESS
;
3772 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3775 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3779 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3780 L2CAP_CONF_REQ
, len
, req
);
3781 chan
->num_conf_req
++;
3782 if (result
!= L2CAP_CONF_SUCCESS
)
3788 l2cap_chan_set_err(chan
, ECONNRESET
);
3790 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3791 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3798 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3800 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3801 set_default_fcs(chan
);
3803 l2cap_state_change(chan
, BT_CONNECTED
);
3804 if (chan
->mode
== L2CAP_MODE_ERTM
||
3805 chan
->mode
== L2CAP_MODE_STREAMING
)
3806 err
= l2cap_ertm_init(chan
);
3809 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3811 l2cap_chan_ready(chan
);
3815 l2cap_chan_unlock(chan
);
3819 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3821 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3822 struct l2cap_disconn_rsp rsp
;
3824 struct l2cap_chan
*chan
;
3827 scid
= __le16_to_cpu(req
->scid
);
3828 dcid
= __le16_to_cpu(req
->dcid
);
3830 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3832 mutex_lock(&conn
->chan_lock
);
3834 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3836 mutex_unlock(&conn
->chan_lock
);
3840 l2cap_chan_lock(chan
);
3844 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3845 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3846 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3849 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3852 l2cap_chan_hold(chan
);
3853 l2cap_chan_del(chan
, ECONNRESET
);
3855 l2cap_chan_unlock(chan
);
3857 chan
->ops
->close(chan
->data
);
3858 l2cap_chan_put(chan
);
3860 mutex_unlock(&conn
->chan_lock
);
3865 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3867 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3869 struct l2cap_chan
*chan
;
3871 scid
= __le16_to_cpu(rsp
->scid
);
3872 dcid
= __le16_to_cpu(rsp
->dcid
);
3874 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3876 mutex_lock(&conn
->chan_lock
);
3878 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3880 mutex_unlock(&conn
->chan_lock
);
3884 l2cap_chan_lock(chan
);
3886 l2cap_chan_hold(chan
);
3887 l2cap_chan_del(chan
, 0);
3889 l2cap_chan_unlock(chan
);
3891 chan
->ops
->close(chan
->data
);
3892 l2cap_chan_put(chan
);
3894 mutex_unlock(&conn
->chan_lock
);
3899 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3901 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3904 type
= __le16_to_cpu(req
->type
);
3906 BT_DBG("type 0x%4.4x", type
);
3908 if (type
== L2CAP_IT_FEAT_MASK
) {
3910 u32 feat_mask
= l2cap_feat_mask
;
3911 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3912 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3913 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3915 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3918 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3919 | L2CAP_FEAT_EXT_WINDOW
;
3921 put_unaligned_le32(feat_mask
, rsp
->data
);
3922 l2cap_send_cmd(conn
, cmd
->ident
,
3923 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3924 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3926 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3929 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3931 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3933 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3934 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3935 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3936 l2cap_send_cmd(conn
, cmd
->ident
,
3937 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3939 struct l2cap_info_rsp rsp
;
3940 rsp
.type
= cpu_to_le16(type
);
3941 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3942 l2cap_send_cmd(conn
, cmd
->ident
,
3943 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3949 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3951 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3954 type
= __le16_to_cpu(rsp
->type
);
3955 result
= __le16_to_cpu(rsp
->result
);
3957 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3959 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3960 if (cmd
->ident
!= conn
->info_ident
||
3961 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3964 cancel_delayed_work(&conn
->info_timer
);
3966 if (result
!= L2CAP_IR_SUCCESS
) {
3967 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3968 conn
->info_ident
= 0;
3970 l2cap_conn_start(conn
);
3976 case L2CAP_IT_FEAT_MASK
:
3977 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3979 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3980 struct l2cap_info_req req
;
3981 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3983 conn
->info_ident
= l2cap_get_ident(conn
);
3985 l2cap_send_cmd(conn
, conn
->info_ident
,
3986 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3988 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3989 conn
->info_ident
= 0;
3991 l2cap_conn_start(conn
);
3995 case L2CAP_IT_FIXED_CHAN
:
3996 conn
->fixed_chan_mask
= rsp
->data
[0];
3997 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3998 conn
->info_ident
= 0;
4000 l2cap_conn_start(conn
);
4007 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4008 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4011 struct l2cap_create_chan_req
*req
= data
;
4012 struct l2cap_create_chan_rsp rsp
;
4015 if (cmd_len
!= sizeof(*req
))
4021 psm
= le16_to_cpu(req
->psm
);
4022 scid
= le16_to_cpu(req
->scid
);
4024 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
4026 /* Placeholder: Always reject */
4028 rsp
.scid
= cpu_to_le16(scid
);
4029 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4030 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4032 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4038 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
4039 struct l2cap_cmd_hdr
*cmd
, void *data
)
4041 BT_DBG("conn %p", conn
);
4043 return l2cap_connect_rsp(conn
, cmd
, data
);
4046 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
4047 u16 icid
, u16 result
)
4049 struct l2cap_move_chan_rsp rsp
;
4051 BT_DBG("icid %d, result %d", icid
, result
);
4053 rsp
.icid
= cpu_to_le16(icid
);
4054 rsp
.result
= cpu_to_le16(result
);
4056 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
4059 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
4060 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
4062 struct l2cap_move_chan_cfm cfm
;
4065 BT_DBG("icid %d, result %d", icid
, result
);
4067 ident
= l2cap_get_ident(conn
);
4069 chan
->ident
= ident
;
4071 cfm
.icid
= cpu_to_le16(icid
);
4072 cfm
.result
= cpu_to_le16(result
);
4074 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4077 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4080 struct l2cap_move_chan_cfm_rsp rsp
;
4082 BT_DBG("icid %d", icid
);
4084 rsp
.icid
= cpu_to_le16(icid
);
4085 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4088 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4089 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4091 struct l2cap_move_chan_req
*req
= data
;
4093 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4095 if (cmd_len
!= sizeof(*req
))
4098 icid
= le16_to_cpu(req
->icid
);
4100 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
4105 /* Placeholder: Always refuse */
4106 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4111 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4112 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4114 struct l2cap_move_chan_rsp
*rsp
= data
;
4117 if (cmd_len
!= sizeof(*rsp
))
4120 icid
= le16_to_cpu(rsp
->icid
);
4121 result
= le16_to_cpu(rsp
->result
);
4123 BT_DBG("icid %d, result %d", icid
, result
);
4125 /* Placeholder: Always unconfirmed */
4126 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4131 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4132 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4134 struct l2cap_move_chan_cfm
*cfm
= data
;
4137 if (cmd_len
!= sizeof(*cfm
))
4140 icid
= le16_to_cpu(cfm
->icid
);
4141 result
= le16_to_cpu(cfm
->result
);
4143 BT_DBG("icid %d, result %d", icid
, result
);
4145 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4150 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4151 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4153 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4156 if (cmd_len
!= sizeof(*rsp
))
4159 icid
= le16_to_cpu(rsp
->icid
);
4161 BT_DBG("icid %d", icid
);
4166 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4171 if (min
> max
|| min
< 6 || max
> 3200)
4174 if (to_multiplier
< 10 || to_multiplier
> 3200)
4177 if (max
>= to_multiplier
* 8)
4180 max_latency
= (to_multiplier
* 8 / max
) - 1;
4181 if (latency
> 499 || latency
> max_latency
)
4187 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4188 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4190 struct hci_conn
*hcon
= conn
->hcon
;
4191 struct l2cap_conn_param_update_req
*req
;
4192 struct l2cap_conn_param_update_rsp rsp
;
4193 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4196 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4199 cmd_len
= __le16_to_cpu(cmd
->len
);
4200 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4203 req
= (struct l2cap_conn_param_update_req
*) data
;
4204 min
= __le16_to_cpu(req
->min
);
4205 max
= __le16_to_cpu(req
->max
);
4206 latency
= __le16_to_cpu(req
->latency
);
4207 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4209 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4210 min
, max
, latency
, to_multiplier
);
4212 memset(&rsp
, 0, sizeof(rsp
));
4214 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4216 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4218 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4220 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4224 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4229 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4230 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4234 switch (cmd
->code
) {
4235 case L2CAP_COMMAND_REJ
:
4236 l2cap_command_rej(conn
, cmd
, data
);
4239 case L2CAP_CONN_REQ
:
4240 err
= l2cap_connect_req(conn
, cmd
, data
);
4243 case L2CAP_CONN_RSP
:
4244 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4247 case L2CAP_CONF_REQ
:
4248 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4251 case L2CAP_CONF_RSP
:
4252 err
= l2cap_config_rsp(conn
, cmd
, data
);
4255 case L2CAP_DISCONN_REQ
:
4256 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4259 case L2CAP_DISCONN_RSP
:
4260 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4263 case L2CAP_ECHO_REQ
:
4264 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4267 case L2CAP_ECHO_RSP
:
4270 case L2CAP_INFO_REQ
:
4271 err
= l2cap_information_req(conn
, cmd
, data
);
4274 case L2CAP_INFO_RSP
:
4275 err
= l2cap_information_rsp(conn
, cmd
, data
);
4278 case L2CAP_CREATE_CHAN_REQ
:
4279 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4282 case L2CAP_CREATE_CHAN_RSP
:
4283 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4286 case L2CAP_MOVE_CHAN_REQ
:
4287 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4290 case L2CAP_MOVE_CHAN_RSP
:
4291 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4294 case L2CAP_MOVE_CHAN_CFM
:
4295 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4298 case L2CAP_MOVE_CHAN_CFM_RSP
:
4299 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4303 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4311 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4312 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4314 switch (cmd
->code
) {
4315 case L2CAP_COMMAND_REJ
:
4318 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4319 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4321 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4325 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4330 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4331 struct sk_buff
*skb
)
4333 u8
*data
= skb
->data
;
4335 struct l2cap_cmd_hdr cmd
;
4338 l2cap_raw_recv(conn
, skb
);
4340 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4342 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4343 data
+= L2CAP_CMD_HDR_SIZE
;
4344 len
-= L2CAP_CMD_HDR_SIZE
;
4346 cmd_len
= le16_to_cpu(cmd
.len
);
4348 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4350 if (cmd_len
> len
|| !cmd
.ident
) {
4351 BT_DBG("corrupted command");
4355 if (conn
->hcon
->type
== LE_LINK
)
4356 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4358 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4361 struct l2cap_cmd_rej_unk rej
;
4363 BT_ERR("Wrong link type (%d)", err
);
4365 /* FIXME: Map err to a valid reason */
4366 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4367 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4377 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4379 u16 our_fcs
, rcv_fcs
;
4382 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4383 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4385 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4387 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4388 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4389 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4390 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4392 if (our_fcs
!= rcv_fcs
)
4398 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4400 struct l2cap_ctrl control
;
4402 BT_DBG("chan %p", chan
);
4404 memset(&control
, 0, sizeof(control
));
4407 control
.reqseq
= chan
->buffer_seq
;
4408 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4410 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4411 control
.super
= L2CAP_SUPER_RNR
;
4412 l2cap_send_sframe(chan
, &control
);
4415 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4416 chan
->unacked_frames
> 0)
4417 __set_retrans_timer(chan
);
4419 /* Send pending iframes */
4420 l2cap_ertm_send(chan
);
4422 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4423 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4424 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4427 control
.super
= L2CAP_SUPER_RR
;
4428 l2cap_send_sframe(chan
, &control
);
4432 static void append_skb_frag(struct sk_buff
*skb
,
4433 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4435 /* skb->len reflects data in skb as well as all fragments
4436 * skb->data_len reflects only data in fragments
4438 if (!skb_has_frag_list(skb
))
4439 skb_shinfo(skb
)->frag_list
= new_frag
;
4441 new_frag
->next
= NULL
;
4443 (*last_frag
)->next
= new_frag
;
4444 *last_frag
= new_frag
;
4446 skb
->len
+= new_frag
->len
;
4447 skb
->data_len
+= new_frag
->len
;
4448 skb
->truesize
+= new_frag
->truesize
;
4451 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4452 struct l2cap_ctrl
*control
)
4456 switch (control
->sar
) {
4457 case L2CAP_SAR_UNSEGMENTED
:
4461 err
= chan
->ops
->recv(chan
->data
, skb
);
4464 case L2CAP_SAR_START
:
4468 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4469 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4471 if (chan
->sdu_len
> chan
->imtu
) {
4476 if (skb
->len
>= chan
->sdu_len
)
4480 chan
->sdu_last_frag
= skb
;
4486 case L2CAP_SAR_CONTINUE
:
4490 append_skb_frag(chan
->sdu
, skb
,
4491 &chan
->sdu_last_frag
);
4494 if (chan
->sdu
->len
>= chan
->sdu_len
)
4504 append_skb_frag(chan
->sdu
, skb
,
4505 &chan
->sdu_last_frag
);
4508 if (chan
->sdu
->len
!= chan
->sdu_len
)
4511 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
4514 /* Reassembly complete */
4516 chan
->sdu_last_frag
= NULL
;
4524 kfree_skb(chan
->sdu
);
4526 chan
->sdu_last_frag
= NULL
;
4533 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4537 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4540 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4541 l2cap_tx(chan
, 0, 0, event
);
4544 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4547 /* Pass sequential frames to l2cap_reassemble_sdu()
4548 * until a gap is encountered.
4551 BT_DBG("chan %p", chan
);
4553 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4554 struct sk_buff
*skb
;
4555 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4556 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4558 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4563 skb_unlink(skb
, &chan
->srej_q
);
4564 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4565 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4570 if (skb_queue_empty(&chan
->srej_q
)) {
4571 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4572 l2cap_send_ack(chan
);
4578 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4579 struct l2cap_ctrl
*control
)
4581 struct sk_buff
*skb
;
4583 BT_DBG("chan %p, control %p", chan
, control
);
4585 if (control
->reqseq
== chan
->next_tx_seq
) {
4586 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4587 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4591 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4594 BT_DBG("Seq %d not available for retransmission",
4599 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4600 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4601 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4605 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4607 if (control
->poll
) {
4608 l2cap_pass_to_tx(chan
, control
);
4610 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4611 l2cap_retransmit(chan
, control
);
4612 l2cap_ertm_send(chan
);
4614 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4615 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4616 chan
->srej_save_reqseq
= control
->reqseq
;
4619 l2cap_pass_to_tx_fbit(chan
, control
);
4621 if (control
->final
) {
4622 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4623 !test_and_clear_bit(CONN_SREJ_ACT
,
4625 l2cap_retransmit(chan
, control
);
4627 l2cap_retransmit(chan
, control
);
4628 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4629 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4630 chan
->srej_save_reqseq
= control
->reqseq
;
4636 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4637 struct l2cap_ctrl
*control
)
4639 struct sk_buff
*skb
;
4641 BT_DBG("chan %p, control %p", chan
, control
);
4643 if (control
->reqseq
== chan
->next_tx_seq
) {
4644 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4645 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4649 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4651 if (chan
->max_tx
&& skb
&&
4652 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4653 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4654 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4658 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4660 l2cap_pass_to_tx(chan
, control
);
4662 if (control
->final
) {
4663 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4664 l2cap_retransmit_all(chan
, control
);
4666 l2cap_retransmit_all(chan
, control
);
4667 l2cap_ertm_send(chan
);
4668 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4669 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4673 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4675 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4677 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4678 chan
->expected_tx_seq
);
4680 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4681 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4683 /* See notes below regarding "double poll" and
4686 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4687 BT_DBG("Invalid/Ignore - after SREJ");
4688 return L2CAP_TXSEQ_INVALID_IGNORE
;
4690 BT_DBG("Invalid - in window after SREJ sent");
4691 return L2CAP_TXSEQ_INVALID
;
4695 if (chan
->srej_list
.head
== txseq
) {
4696 BT_DBG("Expected SREJ");
4697 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4700 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4701 BT_DBG("Duplicate SREJ - txseq already stored");
4702 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4705 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4706 BT_DBG("Unexpected SREJ - not requested");
4707 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4711 if (chan
->expected_tx_seq
== txseq
) {
4712 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4714 BT_DBG("Invalid - txseq outside tx window");
4715 return L2CAP_TXSEQ_INVALID
;
4718 return L2CAP_TXSEQ_EXPECTED
;
4722 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4723 __seq_offset(chan
, chan
->expected_tx_seq
,
4724 chan
->last_acked_seq
)){
4725 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4726 return L2CAP_TXSEQ_DUPLICATE
;
4729 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4730 /* A source of invalid packets is a "double poll" condition,
4731 * where delays cause us to send multiple poll packets. If
4732 * the remote stack receives and processes both polls,
4733 * sequence numbers can wrap around in such a way that a
4734 * resent frame has a sequence number that looks like new data
4735 * with a sequence gap. This would trigger an erroneous SREJ
4738 * Fortunately, this is impossible with a tx window that's
4739 * less than half of the maximum sequence number, which allows
4740 * invalid frames to be safely ignored.
4742 * With tx window sizes greater than half of the tx window
4743 * maximum, the frame is invalid and cannot be ignored. This
4744 * causes a disconnect.
4747 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4748 BT_DBG("Invalid/Ignore - txseq outside tx window");
4749 return L2CAP_TXSEQ_INVALID_IGNORE
;
4751 BT_DBG("Invalid - txseq outside tx window");
4752 return L2CAP_TXSEQ_INVALID
;
4755 BT_DBG("Unexpected - txseq indicates missing frames");
4756 return L2CAP_TXSEQ_UNEXPECTED
;
4760 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4761 struct l2cap_ctrl
*control
,
4762 struct sk_buff
*skb
, u8 event
)
4765 bool skb_in_use
= 0;
4767 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4771 case L2CAP_EV_RECV_IFRAME
:
4772 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4773 case L2CAP_TXSEQ_EXPECTED
:
4774 l2cap_pass_to_tx(chan
, control
);
4776 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4777 BT_DBG("Busy, discarding expected seq %d",
4782 chan
->expected_tx_seq
= __next_seq(chan
,
4785 chan
->buffer_seq
= chan
->expected_tx_seq
;
4788 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4792 if (control
->final
) {
4793 if (!test_and_clear_bit(CONN_REJ_ACT
,
4794 &chan
->conn_state
)) {
4796 l2cap_retransmit_all(chan
, control
);
4797 l2cap_ertm_send(chan
);
4801 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4802 l2cap_send_ack(chan
);
4804 case L2CAP_TXSEQ_UNEXPECTED
:
4805 l2cap_pass_to_tx(chan
, control
);
4807 /* Can't issue SREJ frames in the local busy state.
4808 * Drop this frame, it will be seen as missing
4809 * when local busy is exited.
4811 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4812 BT_DBG("Busy, discarding unexpected seq %d",
4817 /* There was a gap in the sequence, so an SREJ
4818 * must be sent for each missing frame. The
4819 * current frame is stored for later use.
4821 skb_queue_tail(&chan
->srej_q
, skb
);
4823 BT_DBG("Queued %p (queue len %d)", skb
,
4824 skb_queue_len(&chan
->srej_q
));
4826 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4827 l2cap_seq_list_clear(&chan
->srej_list
);
4828 l2cap_send_srej(chan
, control
->txseq
);
4830 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4832 case L2CAP_TXSEQ_DUPLICATE
:
4833 l2cap_pass_to_tx(chan
, control
);
4835 case L2CAP_TXSEQ_INVALID_IGNORE
:
4837 case L2CAP_TXSEQ_INVALID
:
4839 l2cap_send_disconn_req(chan
->conn
, chan
,
4844 case L2CAP_EV_RECV_RR
:
4845 l2cap_pass_to_tx(chan
, control
);
4846 if (control
->final
) {
4847 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4849 if (!test_and_clear_bit(CONN_REJ_ACT
,
4850 &chan
->conn_state
)) {
4852 l2cap_retransmit_all(chan
, control
);
4855 l2cap_ertm_send(chan
);
4856 } else if (control
->poll
) {
4857 l2cap_send_i_or_rr_or_rnr(chan
);
4859 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4860 &chan
->conn_state
) &&
4861 chan
->unacked_frames
)
4862 __set_retrans_timer(chan
);
4864 l2cap_ertm_send(chan
);
4867 case L2CAP_EV_RECV_RNR
:
4868 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4869 l2cap_pass_to_tx(chan
, control
);
4870 if (control
&& control
->poll
) {
4871 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4872 l2cap_send_rr_or_rnr(chan
, 0);
4874 __clear_retrans_timer(chan
);
4875 l2cap_seq_list_clear(&chan
->retrans_list
);
4877 case L2CAP_EV_RECV_REJ
:
4878 l2cap_handle_rej(chan
, control
);
4880 case L2CAP_EV_RECV_SREJ
:
4881 l2cap_handle_srej(chan
, control
);
4887 if (skb
&& !skb_in_use
) {
4888 BT_DBG("Freeing %p", skb
);
4895 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4896 struct l2cap_ctrl
*control
,
4897 struct sk_buff
*skb
, u8 event
)
4900 u16 txseq
= control
->txseq
;
4901 bool skb_in_use
= 0;
4903 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4907 case L2CAP_EV_RECV_IFRAME
:
4908 switch (l2cap_classify_txseq(chan
, txseq
)) {
4909 case L2CAP_TXSEQ_EXPECTED
:
4910 /* Keep frame for reassembly later */
4911 l2cap_pass_to_tx(chan
, control
);
4912 skb_queue_tail(&chan
->srej_q
, skb
);
4914 BT_DBG("Queued %p (queue len %d)", skb
,
4915 skb_queue_len(&chan
->srej_q
));
4917 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4919 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4920 l2cap_seq_list_pop(&chan
->srej_list
);
4922 l2cap_pass_to_tx(chan
, control
);
4923 skb_queue_tail(&chan
->srej_q
, skb
);
4925 BT_DBG("Queued %p (queue len %d)", skb
,
4926 skb_queue_len(&chan
->srej_q
));
4928 err
= l2cap_rx_queued_iframes(chan
);
4933 case L2CAP_TXSEQ_UNEXPECTED
:
4934 /* Got a frame that can't be reassembled yet.
4935 * Save it for later, and send SREJs to cover
4936 * the missing frames.
4938 skb_queue_tail(&chan
->srej_q
, skb
);
4940 BT_DBG("Queued %p (queue len %d)", skb
,
4941 skb_queue_len(&chan
->srej_q
));
4943 l2cap_pass_to_tx(chan
, control
);
4944 l2cap_send_srej(chan
, control
->txseq
);
4946 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4947 /* This frame was requested with an SREJ, but
4948 * some expected retransmitted frames are
4949 * missing. Request retransmission of missing
4952 skb_queue_tail(&chan
->srej_q
, skb
);
4954 BT_DBG("Queued %p (queue len %d)", skb
,
4955 skb_queue_len(&chan
->srej_q
));
4957 l2cap_pass_to_tx(chan
, control
);
4958 l2cap_send_srej_list(chan
, control
->txseq
);
4960 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4961 /* We've already queued this frame. Drop this copy. */
4962 l2cap_pass_to_tx(chan
, control
);
4964 case L2CAP_TXSEQ_DUPLICATE
:
4965 /* Expecting a later sequence number, so this frame
4966 * was already received. Ignore it completely.
4969 case L2CAP_TXSEQ_INVALID_IGNORE
:
4971 case L2CAP_TXSEQ_INVALID
:
4973 l2cap_send_disconn_req(chan
->conn
, chan
,
4978 case L2CAP_EV_RECV_RR
:
4979 l2cap_pass_to_tx(chan
, control
);
4980 if (control
->final
) {
4981 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4983 if (!test_and_clear_bit(CONN_REJ_ACT
,
4984 &chan
->conn_state
)) {
4986 l2cap_retransmit_all(chan
, control
);
4989 l2cap_ertm_send(chan
);
4990 } else if (control
->poll
) {
4991 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4992 &chan
->conn_state
) &&
4993 chan
->unacked_frames
) {
4994 __set_retrans_timer(chan
);
4997 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4998 l2cap_send_srej_tail(chan
);
5000 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5001 &chan
->conn_state
) &&
5002 chan
->unacked_frames
)
5003 __set_retrans_timer(chan
);
5005 l2cap_send_ack(chan
);
5008 case L2CAP_EV_RECV_RNR
:
5009 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5010 l2cap_pass_to_tx(chan
, control
);
5011 if (control
->poll
) {
5012 l2cap_send_srej_tail(chan
);
5014 struct l2cap_ctrl rr_control
;
5015 memset(&rr_control
, 0, sizeof(rr_control
));
5016 rr_control
.sframe
= 1;
5017 rr_control
.super
= L2CAP_SUPER_RR
;
5018 rr_control
.reqseq
= chan
->buffer_seq
;
5019 l2cap_send_sframe(chan
, &rr_control
);
5023 case L2CAP_EV_RECV_REJ
:
5024 l2cap_handle_rej(chan
, control
);
5026 case L2CAP_EV_RECV_SREJ
:
5027 l2cap_handle_srej(chan
, control
);
5031 if (skb
&& !skb_in_use
) {
5032 BT_DBG("Freeing %p", skb
);
5039 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5041 /* Make sure reqseq is for a packet that has been sent but not acked */
5044 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5045 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5048 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5049 struct sk_buff
*skb
, u8 event
)
5053 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5054 control
, skb
, event
, chan
->rx_state
);
5056 if (__valid_reqseq(chan
, control
->reqseq
)) {
5057 switch (chan
->rx_state
) {
5058 case L2CAP_RX_STATE_RECV
:
5059 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5061 case L2CAP_RX_STATE_SREJ_SENT
:
5062 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5070 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5071 control
->reqseq
, chan
->next_tx_seq
,
5072 chan
->expected_ack_seq
);
5073 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5079 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5080 struct sk_buff
*skb
)
5084 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5087 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5088 L2CAP_TXSEQ_EXPECTED
) {
5089 l2cap_pass_to_tx(chan
, control
);
5091 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5092 __next_seq(chan
, chan
->buffer_seq
));
5094 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5096 l2cap_reassemble_sdu(chan
, skb
, control
);
5099 kfree_skb(chan
->sdu
);
5102 chan
->sdu_last_frag
= NULL
;
5106 BT_DBG("Freeing %p", skb
);
5111 chan
->last_acked_seq
= control
->txseq
;
5112 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5117 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5119 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5123 __unpack_control(chan
, skb
);
5128 * We can just drop the corrupted I-frame here.
5129 * Receiver will miss it and start proper recovery
5130 * procedures and ask for retransmission.
5132 if (l2cap_check_fcs(chan
, skb
))
5135 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5136 len
-= L2CAP_SDULEN_SIZE
;
5138 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5139 len
-= L2CAP_FCS_SIZE
;
5141 if (len
> chan
->mps
) {
5142 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5146 if (!control
->sframe
) {
5149 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5150 control
->sar
, control
->reqseq
, control
->final
,
5153 /* Validate F-bit - F=0 always valid, F=1 only
5154 * valid in TX WAIT_F
5156 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5159 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5160 event
= L2CAP_EV_RECV_IFRAME
;
5161 err
= l2cap_rx(chan
, control
, skb
, event
);
5163 err
= l2cap_stream_rx(chan
, control
, skb
);
5167 l2cap_send_disconn_req(chan
->conn
, chan
,
5170 const u8 rx_func_to_event
[4] = {
5171 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5172 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5175 /* Only I-frames are expected in streaming mode */
5176 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5179 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5180 control
->reqseq
, control
->final
, control
->poll
,
5185 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5189 /* Validate F and P bits */
5190 if (control
->final
&& (control
->poll
||
5191 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5194 event
= rx_func_to_event
[control
->super
];
5195 if (l2cap_rx(chan
, control
, skb
, event
))
5196 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5206 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
5208 struct l2cap_chan
*chan
;
5210 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5212 BT_DBG("unknown cid 0x%4.4x", cid
);
5213 /* Drop packet and return */
5218 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5220 if (chan
->state
!= BT_CONNECTED
)
5223 switch (chan
->mode
) {
5224 case L2CAP_MODE_BASIC
:
5225 /* If socket recv buffers overflows we drop data here
5226 * which is *bad* because L2CAP has to be reliable.
5227 * But we don't have any other choice. L2CAP doesn't
5228 * provide flow control mechanism. */
5230 if (chan
->imtu
< skb
->len
)
5233 if (!chan
->ops
->recv(chan
->data
, skb
))
5237 case L2CAP_MODE_ERTM
:
5238 case L2CAP_MODE_STREAMING
:
5239 l2cap_data_rcv(chan
, skb
);
5243 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5251 l2cap_chan_unlock(chan
);
5256 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
5258 struct l2cap_chan
*chan
;
5260 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5264 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5266 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5269 if (chan
->imtu
< skb
->len
)
5272 if (!chan
->ops
->recv(chan
->data
, skb
))
5281 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5282 struct sk_buff
*skb
)
5284 struct l2cap_chan
*chan
;
5286 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5290 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5292 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5295 if (chan
->imtu
< skb
->len
)
5298 if (!chan
->ops
->recv(chan
->data
, skb
))
5307 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5309 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5313 skb_pull(skb
, L2CAP_HDR_SIZE
);
5314 cid
= __le16_to_cpu(lh
->cid
);
5315 len
= __le16_to_cpu(lh
->len
);
5317 if (len
!= skb
->len
) {
5322 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5325 case L2CAP_CID_LE_SIGNALING
:
5326 case L2CAP_CID_SIGNALING
:
5327 l2cap_sig_channel(conn
, skb
);
5330 case L2CAP_CID_CONN_LESS
:
5331 psm
= get_unaligned((__le16
*) skb
->data
);
5333 l2cap_conless_channel(conn
, psm
, skb
);
5336 case L2CAP_CID_LE_DATA
:
5337 l2cap_att_channel(conn
, cid
, skb
);
5341 if (smp_sig_channel(conn
, skb
))
5342 l2cap_conn_del(conn
->hcon
, EACCES
);
5346 l2cap_data_channel(conn
, cid
, skb
);
5351 /* ---- L2CAP interface with lower layer (HCI) ---- */
5353 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5355 int exact
= 0, lm1
= 0, lm2
= 0;
5356 struct l2cap_chan
*c
;
5358 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
5360 /* Find listening sockets and check their link_mode */
5361 read_lock(&chan_list_lock
);
5362 list_for_each_entry(c
, &chan_list
, global_l
) {
5363 struct sock
*sk
= c
->sk
;
5365 if (c
->state
!= BT_LISTEN
)
5368 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5369 lm1
|= HCI_LM_ACCEPT
;
5370 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5371 lm1
|= HCI_LM_MASTER
;
5373 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5374 lm2
|= HCI_LM_ACCEPT
;
5375 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5376 lm2
|= HCI_LM_MASTER
;
5379 read_unlock(&chan_list_lock
);
5381 return exact
? lm1
: lm2
;
5384 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5386 struct l2cap_conn
*conn
;
5388 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
5391 conn
= l2cap_conn_add(hcon
, status
);
5393 l2cap_conn_ready(conn
);
5395 l2cap_conn_del(hcon
, bt_to_errno(status
));
5400 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5402 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5404 BT_DBG("hcon %p", hcon
);
5407 return HCI_ERROR_REMOTE_USER_TERM
;
5408 return conn
->disc_reason
;
5411 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5413 BT_DBG("hcon %p reason %d", hcon
, reason
);
5415 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5419 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5421 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5424 if (encrypt
== 0x00) {
5425 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5426 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5427 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5428 l2cap_chan_close(chan
, ECONNREFUSED
);
5430 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5431 __clear_chan_timer(chan
);
5435 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5437 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5438 struct l2cap_chan
*chan
;
5443 BT_DBG("conn %p", conn
);
5445 if (hcon
->type
== LE_LINK
) {
5446 if (!status
&& encrypt
)
5447 smp_distribute_keys(conn
, 0);
5448 cancel_delayed_work(&conn
->security_timer
);
5451 mutex_lock(&conn
->chan_lock
);
5453 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5454 l2cap_chan_lock(chan
);
5456 BT_DBG("chan->scid %d", chan
->scid
);
5458 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5459 if (!status
&& encrypt
) {
5460 chan
->sec_level
= hcon
->sec_level
;
5461 l2cap_chan_ready(chan
);
5464 l2cap_chan_unlock(chan
);
5468 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5469 l2cap_chan_unlock(chan
);
5473 if (!status
&& (chan
->state
== BT_CONNECTED
||
5474 chan
->state
== BT_CONFIG
)) {
5475 struct sock
*sk
= chan
->sk
;
5477 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5478 sk
->sk_state_change(sk
);
5480 l2cap_check_encryption(chan
, encrypt
);
5481 l2cap_chan_unlock(chan
);
5485 if (chan
->state
== BT_CONNECT
) {
5487 l2cap_send_conn_req(chan
);
5489 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5491 } else if (chan
->state
== BT_CONNECT2
) {
5492 struct sock
*sk
= chan
->sk
;
5493 struct l2cap_conn_rsp rsp
;
5499 if (test_bit(BT_SK_DEFER_SETUP
,
5500 &bt_sk(sk
)->flags
)) {
5501 struct sock
*parent
= bt_sk(sk
)->parent
;
5502 res
= L2CAP_CR_PEND
;
5503 stat
= L2CAP_CS_AUTHOR_PEND
;
5505 parent
->sk_data_ready(parent
, 0);
5507 __l2cap_state_change(chan
, BT_CONFIG
);
5508 res
= L2CAP_CR_SUCCESS
;
5509 stat
= L2CAP_CS_NO_INFO
;
5512 __l2cap_state_change(chan
, BT_DISCONN
);
5513 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5514 res
= L2CAP_CR_SEC_BLOCK
;
5515 stat
= L2CAP_CS_NO_INFO
;
5520 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5521 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5522 rsp
.result
= cpu_to_le16(res
);
5523 rsp
.status
= cpu_to_le16(stat
);
5524 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5528 l2cap_chan_unlock(chan
);
5531 mutex_unlock(&conn
->chan_lock
);
5536 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5538 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5541 conn
= l2cap_conn_add(hcon
, 0);
5546 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5548 if (!(flags
& ACL_CONT
)) {
5549 struct l2cap_hdr
*hdr
;
5553 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5554 kfree_skb(conn
->rx_skb
);
5555 conn
->rx_skb
= NULL
;
5557 l2cap_conn_unreliable(conn
, ECOMM
);
5560 /* Start fragment always begin with Basic L2CAP header */
5561 if (skb
->len
< L2CAP_HDR_SIZE
) {
5562 BT_ERR("Frame is too short (len %d)", skb
->len
);
5563 l2cap_conn_unreliable(conn
, ECOMM
);
5567 hdr
= (struct l2cap_hdr
*) skb
->data
;
5568 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5570 if (len
== skb
->len
) {
5571 /* Complete frame received */
5572 l2cap_recv_frame(conn
, skb
);
5576 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5578 if (skb
->len
> len
) {
5579 BT_ERR("Frame is too long (len %d, expected len %d)",
5581 l2cap_conn_unreliable(conn
, ECOMM
);
5585 /* Allocate skb for the complete frame (with header) */
5586 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5590 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5592 conn
->rx_len
= len
- skb
->len
;
5594 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5596 if (!conn
->rx_len
) {
5597 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5598 l2cap_conn_unreliable(conn
, ECOMM
);
5602 if (skb
->len
> conn
->rx_len
) {
5603 BT_ERR("Fragment is too long (len %d, expected %d)",
5604 skb
->len
, conn
->rx_len
);
5605 kfree_skb(conn
->rx_skb
);
5606 conn
->rx_skb
= NULL
;
5608 l2cap_conn_unreliable(conn
, ECOMM
);
5612 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5614 conn
->rx_len
-= skb
->len
;
5616 if (!conn
->rx_len
) {
5617 /* Complete frame received */
5618 l2cap_recv_frame(conn
, conn
->rx_skb
);
5619 conn
->rx_skb
= NULL
;
5628 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5630 struct l2cap_chan
*c
;
5632 read_lock(&chan_list_lock
);
5634 list_for_each_entry(c
, &chan_list
, global_l
) {
5635 struct sock
*sk
= c
->sk
;
5637 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5638 batostr(&bt_sk(sk
)->src
),
5639 batostr(&bt_sk(sk
)->dst
),
5640 c
->state
, __le16_to_cpu(c
->psm
),
5641 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5642 c
->sec_level
, c
->mode
);
5645 read_unlock(&chan_list_lock
);
5650 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5652 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5655 static const struct file_operations l2cap_debugfs_fops
= {
5656 .open
= l2cap_debugfs_open
,
5658 .llseek
= seq_lseek
,
5659 .release
= single_release
,
5662 static struct dentry
*l2cap_debugfs
;
5664 int __init
l2cap_init(void)
5668 err
= l2cap_init_sockets();
5673 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5674 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5676 BT_ERR("Failed to create L2CAP debug file");
5682 void l2cap_exit(void)
5684 debugfs_remove(l2cap_debugfs
);
5685 l2cap_cleanup_sockets();
5688 module_param(disable_ertm
, bool, 0644);
5689 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");