2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
45 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
47 static LIST_HEAD(chan_list
);
48 static DEFINE_RWLOCK(chan_list_lock
);
50 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
51 u8 code
, u8 ident
, u16 dlen
, void *data
);
52 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
54 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
55 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
56 struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
67 list_for_each_entry(c
, &conn
->chan_l
, list
) {
74 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
78 list_for_each_entry(c
, &conn
->chan_l
, list
) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
91 mutex_lock(&conn
->chan_lock
);
92 c
= __l2cap_get_chan_by_scid(conn
, cid
);
95 mutex_unlock(&conn
->chan_lock
);
100 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
102 struct l2cap_chan
*c
;
104 list_for_each_entry(c
, &conn
->chan_l
, list
) {
105 if (c
->ident
== ident
)
111 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
113 struct l2cap_chan
*c
;
115 list_for_each_entry(c
, &chan_list
, global_l
) {
116 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
122 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
126 write_lock(&chan_list_lock
);
128 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
141 for (p
= 0x1001; p
< 0x1100; p
+= 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
143 chan
->psm
= cpu_to_le16(p
);
144 chan
->sport
= cpu_to_le16(p
);
151 write_unlock(&chan_list_lock
);
155 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
157 write_lock(&chan_list_lock
);
161 write_unlock(&chan_list_lock
);
166 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
168 u16 cid
= L2CAP_CID_DYN_START
;
170 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
171 if (!__l2cap_get_chan_by_scid(conn
, cid
))
178 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
180 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
181 state_to_string(state
));
184 chan
->ops
->state_change(chan
, state
);
187 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
189 struct sock
*sk
= chan
->sk
;
192 __l2cap_state_change(chan
, state
);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
198 struct sock
*sk
= chan
->sk
;
203 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
205 struct sock
*sk
= chan
->sk
;
208 __l2cap_chan_set_err(chan
, err
);
212 static void __set_retrans_timer(struct l2cap_chan
*chan
)
214 if (!delayed_work_pending(&chan
->monitor_timer
) &&
215 chan
->retrans_timeout
) {
216 l2cap_set_timer(chan
, &chan
->retrans_timer
,
217 msecs_to_jiffies(chan
->retrans_timeout
));
221 static void __set_monitor_timer(struct l2cap_chan
*chan
)
223 __clear_retrans_timer(chan
);
224 if (chan
->monitor_timeout
) {
225 l2cap_set_timer(chan
, &chan
->monitor_timer
,
226 msecs_to_jiffies(chan
->monitor_timeout
));
230 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
235 skb_queue_walk(head
, skb
) {
236 if (bt_cb(skb
)->control
.txseq
== seq
)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
256 size_t alloc_size
, i
;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size
= roundup_pow_of_two(size
);
264 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
268 seq_list
->mask
= alloc_size
- 1;
269 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
270 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
271 for (i
= 0; i
< alloc_size
; i
++)
272 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
279 kfree(seq_list
->list
);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
285 /* Constant-time check for list membership */
286 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
289 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
291 u16 mask
= seq_list
->mask
;
293 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR
;
296 } else if (seq_list
->head
== seq
) {
297 /* Head can be removed in constant time */
298 seq_list
->head
= seq_list
->list
[seq
& mask
];
299 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
301 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
302 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
303 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 /* Walk the list to find the sequence number */
307 u16 prev
= seq_list
->head
;
308 while (seq_list
->list
[prev
& mask
] != seq
) {
309 prev
= seq_list
->list
[prev
& mask
];
310 if (prev
== L2CAP_SEQ_LIST_TAIL
)
311 return L2CAP_SEQ_LIST_CLEAR
;
314 /* Unlink the number from the list and clear it */
315 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
316 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
317 if (seq_list
->tail
== seq
)
318 seq_list
->tail
= prev
;
323 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
333 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
336 for (i
= 0; i
<= seq_list
->mask
; i
++)
337 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
339 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
340 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
343 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
345 u16 mask
= seq_list
->mask
;
347 /* All appends happen in constant time */
349 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
352 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
353 seq_list
->head
= seq
;
355 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
357 seq_list
->tail
= seq
;
358 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
361 static void l2cap_chan_timeout(struct work_struct
*work
)
363 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
365 struct l2cap_conn
*conn
= chan
->conn
;
368 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
370 mutex_lock(&conn
->chan_lock
);
371 l2cap_chan_lock(chan
);
373 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
374 reason
= ECONNREFUSED
;
375 else if (chan
->state
== BT_CONNECT
&&
376 chan
->sec_level
!= BT_SECURITY_SDP
)
377 reason
= ECONNREFUSED
;
381 l2cap_chan_close(chan
, reason
);
383 l2cap_chan_unlock(chan
);
385 chan
->ops
->close(chan
);
386 mutex_unlock(&conn
->chan_lock
);
388 l2cap_chan_put(chan
);
391 struct l2cap_chan
*l2cap_chan_create(void)
393 struct l2cap_chan
*chan
;
395 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
399 mutex_init(&chan
->lock
);
401 write_lock(&chan_list_lock
);
402 list_add(&chan
->global_l
, &chan_list
);
403 write_unlock(&chan_list_lock
);
405 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
407 chan
->state
= BT_OPEN
;
409 atomic_set(&chan
->refcnt
, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
414 BT_DBG("chan %p", chan
);
419 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
421 write_lock(&chan_list_lock
);
422 list_del(&chan
->global_l
);
423 write_unlock(&chan_list_lock
);
425 l2cap_chan_put(chan
);
428 void l2cap_chan_hold(struct l2cap_chan
*c
)
430 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->refcnt
));
432 atomic_inc(&c
->refcnt
);
435 void l2cap_chan_put(struct l2cap_chan
*c
)
437 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->refcnt
));
439 if (atomic_dec_and_test(&c
->refcnt
))
443 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
445 chan
->fcs
= L2CAP_FCS_CRC16
;
446 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
447 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
448 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
449 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
450 chan
->sec_level
= BT_SECURITY_LOW
;
452 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
455 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
457 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
458 __le16_to_cpu(chan
->psm
), chan
->dcid
);
460 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
464 switch (chan
->chan_type
) {
465 case L2CAP_CHAN_CONN_ORIENTED
:
466 if (conn
->hcon
->type
== LE_LINK
) {
468 chan
->omtu
= L2CAP_DEFAULT_MTU
;
469 chan
->scid
= L2CAP_CID_LE_DATA
;
470 chan
->dcid
= L2CAP_CID_LE_DATA
;
472 /* Alloc CID for connection-oriented socket */
473 chan
->scid
= l2cap_alloc_cid(conn
);
474 chan
->omtu
= L2CAP_DEFAULT_MTU
;
478 case L2CAP_CHAN_CONN_LESS
:
479 /* Connectionless socket */
480 chan
->scid
= L2CAP_CID_CONN_LESS
;
481 chan
->dcid
= L2CAP_CID_CONN_LESS
;
482 chan
->omtu
= L2CAP_DEFAULT_MTU
;
485 case L2CAP_CHAN_CONN_FIX_A2MP
:
486 chan
->scid
= L2CAP_CID_A2MP
;
487 chan
->dcid
= L2CAP_CID_A2MP
;
488 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
489 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
493 /* Raw socket can send/recv signalling messages only */
494 chan
->scid
= L2CAP_CID_SIGNALING
;
495 chan
->dcid
= L2CAP_CID_SIGNALING
;
496 chan
->omtu
= L2CAP_DEFAULT_MTU
;
499 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
500 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
501 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
502 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
503 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
504 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
506 l2cap_chan_hold(chan
);
508 list_add(&chan
->list
, &conn
->chan_l
);
511 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
513 mutex_lock(&conn
->chan_lock
);
514 __l2cap_chan_add(conn
, chan
);
515 mutex_unlock(&conn
->chan_lock
);
518 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
520 struct l2cap_conn
*conn
= chan
->conn
;
522 __clear_chan_timer(chan
);
524 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
527 /* Delete from channel list */
528 list_del(&chan
->list
);
530 l2cap_chan_put(chan
);
534 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
535 hci_conn_put(conn
->hcon
);
538 if (chan
->ops
->teardown
)
539 chan
->ops
->teardown(chan
, err
);
541 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
545 case L2CAP_MODE_BASIC
:
548 case L2CAP_MODE_ERTM
:
549 __clear_retrans_timer(chan
);
550 __clear_monitor_timer(chan
);
551 __clear_ack_timer(chan
);
553 skb_queue_purge(&chan
->srej_q
);
555 l2cap_seq_list_free(&chan
->srej_list
);
556 l2cap_seq_list_free(&chan
->retrans_list
);
560 case L2CAP_MODE_STREAMING
:
561 skb_queue_purge(&chan
->tx_q
);
568 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
570 struct l2cap_conn
*conn
= chan
->conn
;
571 struct sock
*sk
= chan
->sk
;
573 BT_DBG("chan %p state %s sk %p", chan
,
574 state_to_string(chan
->state
), sk
);
576 switch (chan
->state
) {
578 if (chan
->ops
->teardown
)
579 chan
->ops
->teardown(chan
, 0);
584 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
585 conn
->hcon
->type
== ACL_LINK
) {
586 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
587 l2cap_send_disconn_req(conn
, chan
, reason
);
589 l2cap_chan_del(chan
, reason
);
593 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
594 conn
->hcon
->type
== ACL_LINK
) {
595 struct l2cap_conn_rsp rsp
;
598 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
599 result
= L2CAP_CR_SEC_BLOCK
;
601 result
= L2CAP_CR_BAD_PSM
;
602 l2cap_state_change(chan
, BT_DISCONN
);
604 rsp
.scid
= cpu_to_le16(chan
->dcid
);
605 rsp
.dcid
= cpu_to_le16(chan
->scid
);
606 rsp
.result
= cpu_to_le16(result
);
607 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
608 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
612 l2cap_chan_del(chan
, reason
);
617 l2cap_chan_del(chan
, reason
);
621 if (chan
->ops
->teardown
)
622 chan
->ops
->teardown(chan
, 0);
627 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
629 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
630 switch (chan
->sec_level
) {
631 case BT_SECURITY_HIGH
:
632 return HCI_AT_DEDICATED_BONDING_MITM
;
633 case BT_SECURITY_MEDIUM
:
634 return HCI_AT_DEDICATED_BONDING
;
636 return HCI_AT_NO_BONDING
;
638 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
639 if (chan
->sec_level
== BT_SECURITY_LOW
)
640 chan
->sec_level
= BT_SECURITY_SDP
;
642 if (chan
->sec_level
== BT_SECURITY_HIGH
)
643 return HCI_AT_NO_BONDING_MITM
;
645 return HCI_AT_NO_BONDING
;
647 switch (chan
->sec_level
) {
648 case BT_SECURITY_HIGH
:
649 return HCI_AT_GENERAL_BONDING_MITM
;
650 case BT_SECURITY_MEDIUM
:
651 return HCI_AT_GENERAL_BONDING
;
653 return HCI_AT_NO_BONDING
;
658 /* Service level security */
659 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
661 struct l2cap_conn
*conn
= chan
->conn
;
664 auth_type
= l2cap_get_auth_type(chan
);
666 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
669 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
673 /* Get next available identificator.
674 * 1 - 128 are used by kernel.
675 * 129 - 199 are reserved.
676 * 200 - 254 are used by utilities like l2ping, etc.
679 spin_lock(&conn
->lock
);
681 if (++conn
->tx_ident
> 128)
686 spin_unlock(&conn
->lock
);
691 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
693 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
696 BT_DBG("code 0x%2.2x", code
);
701 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
702 flags
= ACL_START_NO_FLUSH
;
706 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
707 skb
->priority
= HCI_PRIO_MAX
;
709 hci_send_acl(conn
->hchan
, skb
, flags
);
712 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
714 struct hci_conn
*hcon
= chan
->conn
->hcon
;
717 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
720 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
721 lmp_no_flush_capable(hcon
->hdev
))
722 flags
= ACL_START_NO_FLUSH
;
726 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
727 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
730 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
732 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
733 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
735 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
738 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
739 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
746 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
747 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
754 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
756 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
757 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
759 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
762 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
763 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
770 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
771 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
778 static inline void __unpack_control(struct l2cap_chan
*chan
,
781 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
782 __unpack_extended_control(get_unaligned_le32(skb
->data
),
783 &bt_cb(skb
)->control
);
784 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
786 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
787 &bt_cb(skb
)->control
);
788 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
792 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
796 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
797 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
799 if (control
->sframe
) {
800 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
801 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
802 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
804 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
805 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
811 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
815 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
816 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
818 if (control
->sframe
) {
819 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
820 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
821 packed
|= L2CAP_CTRL_FRAME_TYPE
;
823 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
824 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
830 static inline void __pack_control(struct l2cap_chan
*chan
,
831 struct l2cap_ctrl
*control
,
834 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
835 put_unaligned_le32(__pack_extended_control(control
),
836 skb
->data
+ L2CAP_HDR_SIZE
);
838 put_unaligned_le16(__pack_enhanced_control(control
),
839 skb
->data
+ L2CAP_HDR_SIZE
);
843 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
845 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
846 return L2CAP_EXT_HDR_SIZE
;
848 return L2CAP_ENH_HDR_SIZE
;
851 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
855 struct l2cap_hdr
*lh
;
856 int hlen
= __ertm_hdr_size(chan
);
858 if (chan
->fcs
== L2CAP_FCS_CRC16
)
859 hlen
+= L2CAP_FCS_SIZE
;
861 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
864 return ERR_PTR(-ENOMEM
);
866 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
867 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
868 lh
->cid
= cpu_to_le16(chan
->dcid
);
870 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
871 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
873 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
875 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
876 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
877 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
880 skb
->priority
= HCI_PRIO_MAX
;
884 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
885 struct l2cap_ctrl
*control
)
890 BT_DBG("chan %p, control %p", chan
, control
);
892 if (!control
->sframe
)
895 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
899 if (control
->super
== L2CAP_SUPER_RR
)
900 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
901 else if (control
->super
== L2CAP_SUPER_RNR
)
902 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
904 if (control
->super
!= L2CAP_SUPER_SREJ
) {
905 chan
->last_acked_seq
= control
->reqseq
;
906 __clear_ack_timer(chan
);
909 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
910 control
->final
, control
->poll
, control
->super
);
912 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
913 control_field
= __pack_extended_control(control
);
915 control_field
= __pack_enhanced_control(control
);
917 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
919 l2cap_do_send(chan
, skb
);
922 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
924 struct l2cap_ctrl control
;
926 BT_DBG("chan %p, poll %d", chan
, poll
);
928 memset(&control
, 0, sizeof(control
));
932 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
933 control
.super
= L2CAP_SUPER_RNR
;
935 control
.super
= L2CAP_SUPER_RR
;
937 control
.reqseq
= chan
->buffer_seq
;
938 l2cap_send_sframe(chan
, &control
);
941 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
943 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
946 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
948 struct l2cap_conn
*conn
= chan
->conn
;
949 struct l2cap_conn_req req
;
951 req
.scid
= cpu_to_le16(chan
->scid
);
954 chan
->ident
= l2cap_get_ident(conn
);
956 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
958 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
961 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
963 /* This clears all conf flags, including CONF_NOT_COMPLETE */
964 chan
->conf_state
= 0;
965 __clear_chan_timer(chan
);
967 chan
->state
= BT_CONNECTED
;
969 chan
->ops
->ready(chan
);
972 static void l2cap_do_start(struct l2cap_chan
*chan
)
974 struct l2cap_conn
*conn
= chan
->conn
;
976 if (conn
->hcon
->type
== LE_LINK
) {
977 l2cap_chan_ready(chan
);
981 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
982 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
985 if (l2cap_chan_check_security(chan
) &&
986 __l2cap_no_conn_pending(chan
))
987 l2cap_send_conn_req(chan
);
989 struct l2cap_info_req req
;
990 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
992 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
993 conn
->info_ident
= l2cap_get_ident(conn
);
995 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
997 l2cap_send_cmd(conn
, conn
->info_ident
,
998 L2CAP_INFO_REQ
, sizeof(req
), &req
);
1002 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1004 u32 local_feat_mask
= l2cap_feat_mask
;
1006 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1009 case L2CAP_MODE_ERTM
:
1010 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1011 case L2CAP_MODE_STREAMING
:
1012 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1018 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
1020 struct sock
*sk
= chan
->sk
;
1021 struct l2cap_disconn_req req
;
1026 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1027 __clear_retrans_timer(chan
);
1028 __clear_monitor_timer(chan
);
1029 __clear_ack_timer(chan
);
1032 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1033 __l2cap_state_change(chan
, BT_DISCONN
);
1037 req
.dcid
= cpu_to_le16(chan
->dcid
);
1038 req
.scid
= cpu_to_le16(chan
->scid
);
1039 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1040 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1043 __l2cap_state_change(chan
, BT_DISCONN
);
1044 __l2cap_chan_set_err(chan
, err
);
1048 /* ---- L2CAP connections ---- */
1049 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1051 struct l2cap_chan
*chan
, *tmp
;
1053 BT_DBG("conn %p", conn
);
1055 mutex_lock(&conn
->chan_lock
);
1057 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1058 struct sock
*sk
= chan
->sk
;
1060 l2cap_chan_lock(chan
);
1062 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1063 l2cap_chan_unlock(chan
);
1067 if (chan
->state
== BT_CONNECT
) {
1068 if (!l2cap_chan_check_security(chan
) ||
1069 !__l2cap_no_conn_pending(chan
)) {
1070 l2cap_chan_unlock(chan
);
1074 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1075 && test_bit(CONF_STATE2_DEVICE
,
1076 &chan
->conf_state
)) {
1077 l2cap_chan_close(chan
, ECONNRESET
);
1078 l2cap_chan_unlock(chan
);
1082 l2cap_send_conn_req(chan
);
1084 } else if (chan
->state
== BT_CONNECT2
) {
1085 struct l2cap_conn_rsp rsp
;
1087 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1088 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1090 if (l2cap_chan_check_security(chan
)) {
1092 if (test_bit(BT_SK_DEFER_SETUP
,
1093 &bt_sk(sk
)->flags
)) {
1094 struct sock
*parent
= bt_sk(sk
)->parent
;
1095 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1096 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1098 parent
->sk_data_ready(parent
, 0);
1101 __l2cap_state_change(chan
, BT_CONFIG
);
1102 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1103 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1107 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1108 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1111 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1114 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1115 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1116 l2cap_chan_unlock(chan
);
1120 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1121 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1122 l2cap_build_conf_req(chan
, buf
), buf
);
1123 chan
->num_conf_req
++;
1126 l2cap_chan_unlock(chan
);
1129 mutex_unlock(&conn
->chan_lock
);
1132 /* Find socket with cid and source/destination bdaddr.
1133 * Returns closest match, locked.
1135 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1139 struct l2cap_chan
*c
, *c1
= NULL
;
1141 read_lock(&chan_list_lock
);
1143 list_for_each_entry(c
, &chan_list
, global_l
) {
1144 struct sock
*sk
= c
->sk
;
1146 if (state
&& c
->state
!= state
)
1149 if (c
->scid
== cid
) {
1150 int src_match
, dst_match
;
1151 int src_any
, dst_any
;
1154 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1155 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1156 if (src_match
&& dst_match
) {
1157 read_unlock(&chan_list_lock
);
1162 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1163 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1164 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1165 (src_any
&& dst_any
))
1170 read_unlock(&chan_list_lock
);
1175 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1177 struct sock
*parent
, *sk
;
1178 struct l2cap_chan
*chan
, *pchan
;
1182 /* Check if we have socket listening on cid */
1183 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1184 conn
->src
, conn
->dst
);
1192 chan
= pchan
->ops
->new_connection(pchan
);
1198 hci_conn_hold(conn
->hcon
);
1200 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1201 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1203 bt_accept_enqueue(parent
, sk
);
1205 l2cap_chan_add(conn
, chan
);
1207 l2cap_chan_ready(chan
);
1210 release_sock(parent
);
1213 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1215 struct l2cap_chan
*chan
;
1217 BT_DBG("conn %p", conn
);
1219 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1220 l2cap_le_conn_ready(conn
);
1222 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1223 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1225 mutex_lock(&conn
->chan_lock
);
1227 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1229 l2cap_chan_lock(chan
);
1231 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1232 l2cap_chan_unlock(chan
);
1236 if (conn
->hcon
->type
== LE_LINK
) {
1237 if (smp_conn_security(conn
, chan
->sec_level
))
1238 l2cap_chan_ready(chan
);
1240 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1241 struct sock
*sk
= chan
->sk
;
1242 __clear_chan_timer(chan
);
1244 __l2cap_state_change(chan
, BT_CONNECTED
);
1245 sk
->sk_state_change(sk
);
1248 } else if (chan
->state
== BT_CONNECT
)
1249 l2cap_do_start(chan
);
1251 l2cap_chan_unlock(chan
);
1254 mutex_unlock(&conn
->chan_lock
);
1257 /* Notify sockets that we cannot guaranty reliability anymore */
1258 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1260 struct l2cap_chan
*chan
;
1262 BT_DBG("conn %p", conn
);
1264 mutex_lock(&conn
->chan_lock
);
1266 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1267 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1268 __l2cap_chan_set_err(chan
, err
);
1271 mutex_unlock(&conn
->chan_lock
);
1274 static void l2cap_info_timeout(struct work_struct
*work
)
1276 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1279 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1280 conn
->info_ident
= 0;
1282 l2cap_conn_start(conn
);
1285 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1287 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1288 struct l2cap_chan
*chan
, *l
;
1293 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1295 kfree_skb(conn
->rx_skb
);
1297 mutex_lock(&conn
->chan_lock
);
1300 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1301 l2cap_chan_hold(chan
);
1302 l2cap_chan_lock(chan
);
1304 l2cap_chan_del(chan
, err
);
1306 l2cap_chan_unlock(chan
);
1308 chan
->ops
->close(chan
);
1309 l2cap_chan_put(chan
);
1312 mutex_unlock(&conn
->chan_lock
);
1314 hci_chan_del(conn
->hchan
);
1316 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1317 cancel_delayed_work_sync(&conn
->info_timer
);
1319 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1320 cancel_delayed_work_sync(&conn
->security_timer
);
1321 smp_chan_destroy(conn
);
1324 hcon
->l2cap_data
= NULL
;
1328 static void security_timeout(struct work_struct
*work
)
1330 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1331 security_timer
.work
);
1333 BT_DBG("conn %p", conn
);
1335 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1336 smp_chan_destroy(conn
);
1337 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1341 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1343 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1344 struct hci_chan
*hchan
;
1349 hchan
= hci_chan_create(hcon
);
1353 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1355 hci_chan_del(hchan
);
1359 hcon
->l2cap_data
= conn
;
1361 conn
->hchan
= hchan
;
1363 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1365 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1366 conn
->mtu
= hcon
->hdev
->le_mtu
;
1368 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1370 conn
->src
= &hcon
->hdev
->bdaddr
;
1371 conn
->dst
= &hcon
->dst
;
1373 conn
->feat_mask
= 0;
1375 spin_lock_init(&conn
->lock
);
1376 mutex_init(&conn
->chan_lock
);
1378 INIT_LIST_HEAD(&conn
->chan_l
);
1380 if (hcon
->type
== LE_LINK
)
1381 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1383 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1385 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1390 /* ---- Socket interface ---- */
1392 /* Find socket with psm and source / destination bdaddr.
1393 * Returns closest match.
1395 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1399 struct l2cap_chan
*c
, *c1
= NULL
;
1401 read_lock(&chan_list_lock
);
1403 list_for_each_entry(c
, &chan_list
, global_l
) {
1404 struct sock
*sk
= c
->sk
;
1406 if (state
&& c
->state
!= state
)
1409 if (c
->psm
== psm
) {
1410 int src_match
, dst_match
;
1411 int src_any
, dst_any
;
1414 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1415 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1416 if (src_match
&& dst_match
) {
1417 read_unlock(&chan_list_lock
);
1422 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1423 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1424 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1425 (src_any
&& dst_any
))
1430 read_unlock(&chan_list_lock
);
1435 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1436 bdaddr_t
*dst
, u8 dst_type
)
1438 struct sock
*sk
= chan
->sk
;
1439 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1440 struct l2cap_conn
*conn
;
1441 struct hci_conn
*hcon
;
1442 struct hci_dev
*hdev
;
1446 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1447 dst_type
, __le16_to_cpu(chan
->psm
));
1449 hdev
= hci_get_route(dst
, src
);
1451 return -EHOSTUNREACH
;
1455 l2cap_chan_lock(chan
);
1457 /* PSM must be odd and lsb of upper byte must be 0 */
1458 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1459 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1464 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1469 switch (chan
->mode
) {
1470 case L2CAP_MODE_BASIC
:
1472 case L2CAP_MODE_ERTM
:
1473 case L2CAP_MODE_STREAMING
:
1482 switch (chan
->state
) {
1486 /* Already connecting */
1491 /* Already connected */
1505 /* Set destination address and psm */
1507 bacpy(&bt_sk(sk
)->dst
, dst
);
1513 auth_type
= l2cap_get_auth_type(chan
);
1515 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1516 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1517 chan
->sec_level
, auth_type
);
1519 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1520 chan
->sec_level
, auth_type
);
1523 err
= PTR_ERR(hcon
);
1527 conn
= l2cap_conn_add(hcon
, 0);
1534 if (hcon
->type
== LE_LINK
) {
1537 if (!list_empty(&conn
->chan_l
)) {
1546 /* Update source addr of the socket */
1547 bacpy(src
, conn
->src
);
1549 l2cap_chan_unlock(chan
);
1550 l2cap_chan_add(conn
, chan
);
1551 l2cap_chan_lock(chan
);
1553 l2cap_state_change(chan
, BT_CONNECT
);
1554 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1556 if (hcon
->state
== BT_CONNECTED
) {
1557 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1558 __clear_chan_timer(chan
);
1559 if (l2cap_chan_check_security(chan
))
1560 l2cap_state_change(chan
, BT_CONNECTED
);
1562 l2cap_do_start(chan
);
1568 l2cap_chan_unlock(chan
);
1569 hci_dev_unlock(hdev
);
1574 int __l2cap_wait_ack(struct sock
*sk
)
1576 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1577 DECLARE_WAITQUEUE(wait
, current
);
1581 add_wait_queue(sk_sleep(sk
), &wait
);
1582 set_current_state(TASK_INTERRUPTIBLE
);
1583 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1587 if (signal_pending(current
)) {
1588 err
= sock_intr_errno(timeo
);
1593 timeo
= schedule_timeout(timeo
);
1595 set_current_state(TASK_INTERRUPTIBLE
);
1597 err
= sock_error(sk
);
1601 set_current_state(TASK_RUNNING
);
1602 remove_wait_queue(sk_sleep(sk
), &wait
);
1606 static void l2cap_monitor_timeout(struct work_struct
*work
)
1608 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1609 monitor_timer
.work
);
1611 BT_DBG("chan %p", chan
);
1613 l2cap_chan_lock(chan
);
1616 l2cap_chan_unlock(chan
);
1617 l2cap_chan_put(chan
);
1621 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1623 l2cap_chan_unlock(chan
);
1624 l2cap_chan_put(chan
);
1627 static void l2cap_retrans_timeout(struct work_struct
*work
)
1629 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1630 retrans_timer
.work
);
1632 BT_DBG("chan %p", chan
);
1634 l2cap_chan_lock(chan
);
1637 l2cap_chan_unlock(chan
);
1638 l2cap_chan_put(chan
);
1642 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1643 l2cap_chan_unlock(chan
);
1644 l2cap_chan_put(chan
);
1647 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1648 struct sk_buff_head
*skbs
)
1650 struct sk_buff
*skb
;
1651 struct l2cap_ctrl
*control
;
1653 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1655 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1657 while (!skb_queue_empty(&chan
->tx_q
)) {
1659 skb
= skb_dequeue(&chan
->tx_q
);
1661 bt_cb(skb
)->control
.retries
= 1;
1662 control
= &bt_cb(skb
)->control
;
1664 control
->reqseq
= 0;
1665 control
->txseq
= chan
->next_tx_seq
;
1667 __pack_control(chan
, control
, skb
);
1669 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1670 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1671 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1674 l2cap_do_send(chan
, skb
);
1676 BT_DBG("Sent txseq %u", control
->txseq
);
1678 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1679 chan
->frames_sent
++;
1683 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1685 struct sk_buff
*skb
, *tx_skb
;
1686 struct l2cap_ctrl
*control
;
1689 BT_DBG("chan %p", chan
);
1691 if (chan
->state
!= BT_CONNECTED
)
1694 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1697 while (chan
->tx_send_head
&&
1698 chan
->unacked_frames
< chan
->remote_tx_win
&&
1699 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1701 skb
= chan
->tx_send_head
;
1703 bt_cb(skb
)->control
.retries
= 1;
1704 control
= &bt_cb(skb
)->control
;
1706 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1709 control
->reqseq
= chan
->buffer_seq
;
1710 chan
->last_acked_seq
= chan
->buffer_seq
;
1711 control
->txseq
= chan
->next_tx_seq
;
1713 __pack_control(chan
, control
, skb
);
1715 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1716 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1717 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1720 /* Clone after data has been modified. Data is assumed to be
1721 read-only (for locking purposes) on cloned sk_buffs.
1723 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1728 __set_retrans_timer(chan
);
1730 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1731 chan
->unacked_frames
++;
1732 chan
->frames_sent
++;
1735 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1736 chan
->tx_send_head
= NULL
;
1738 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1740 l2cap_do_send(chan
, tx_skb
);
1741 BT_DBG("Sent txseq %u", control
->txseq
);
1744 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1745 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1750 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1752 struct l2cap_ctrl control
;
1753 struct sk_buff
*skb
;
1754 struct sk_buff
*tx_skb
;
1757 BT_DBG("chan %p", chan
);
1759 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1762 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1763 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1765 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1767 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1772 bt_cb(skb
)->control
.retries
++;
1773 control
= bt_cb(skb
)->control
;
1775 if (chan
->max_tx
!= 0 &&
1776 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1777 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1778 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1779 l2cap_seq_list_clear(&chan
->retrans_list
);
1783 control
.reqseq
= chan
->buffer_seq
;
1784 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1789 if (skb_cloned(skb
)) {
1790 /* Cloned sk_buffs are read-only, so we need a
1793 tx_skb
= skb_copy(skb
, GFP_ATOMIC
);
1795 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1799 l2cap_seq_list_clear(&chan
->retrans_list
);
1803 /* Update skb contents */
1804 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1805 put_unaligned_le32(__pack_extended_control(&control
),
1806 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1808 put_unaligned_le16(__pack_enhanced_control(&control
),
1809 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1812 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1813 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1814 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1818 l2cap_do_send(chan
, tx_skb
);
1820 BT_DBG("Resent txseq %d", control
.txseq
);
1822 chan
->last_acked_seq
= chan
->buffer_seq
;
1826 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1827 struct l2cap_ctrl
*control
)
1829 BT_DBG("chan %p, control %p", chan
, control
);
1831 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1832 l2cap_ertm_resend(chan
);
1835 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1836 struct l2cap_ctrl
*control
)
1838 struct sk_buff
*skb
;
1840 BT_DBG("chan %p, control %p", chan
, control
);
1843 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1845 l2cap_seq_list_clear(&chan
->retrans_list
);
1847 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1850 if (chan
->unacked_frames
) {
1851 skb_queue_walk(&chan
->tx_q
, skb
) {
1852 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1853 skb
== chan
->tx_send_head
)
1857 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1858 if (skb
== chan
->tx_send_head
)
1861 l2cap_seq_list_append(&chan
->retrans_list
,
1862 bt_cb(skb
)->control
.txseq
);
1865 l2cap_ertm_resend(chan
);
1869 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1871 struct l2cap_ctrl control
;
1872 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1873 chan
->last_acked_seq
);
1876 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1877 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1879 memset(&control
, 0, sizeof(control
));
1882 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1883 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1884 __clear_ack_timer(chan
);
1885 control
.super
= L2CAP_SUPER_RNR
;
1886 control
.reqseq
= chan
->buffer_seq
;
1887 l2cap_send_sframe(chan
, &control
);
1889 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1890 l2cap_ertm_send(chan
);
1891 /* If any i-frames were sent, they included an ack */
1892 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1896 /* Ack now if the window is 3/4ths full.
1897 * Calculate without mul or div
1899 threshold
= chan
->ack_win
;
1900 threshold
+= threshold
<< 1;
1903 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
1906 if (frames_to_ack
>= threshold
) {
1907 __clear_ack_timer(chan
);
1908 control
.super
= L2CAP_SUPER_RR
;
1909 control
.reqseq
= chan
->buffer_seq
;
1910 l2cap_send_sframe(chan
, &control
);
1915 __set_ack_timer(chan
);
1919 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1920 struct msghdr
*msg
, int len
,
1921 int count
, struct sk_buff
*skb
)
1923 struct l2cap_conn
*conn
= chan
->conn
;
1924 struct sk_buff
**frag
;
1927 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1933 /* Continuation fragments (no L2CAP header) */
1934 frag
= &skb_shinfo(skb
)->frag_list
;
1936 struct sk_buff
*tmp
;
1938 count
= min_t(unsigned int, conn
->mtu
, len
);
1940 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1941 msg
->msg_flags
& MSG_DONTWAIT
);
1943 return PTR_ERR(tmp
);
1947 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1950 (*frag
)->priority
= skb
->priority
;
1955 skb
->len
+= (*frag
)->len
;
1956 skb
->data_len
+= (*frag
)->len
;
1958 frag
= &(*frag
)->next
;
1964 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1965 struct msghdr
*msg
, size_t len
,
1968 struct l2cap_conn
*conn
= chan
->conn
;
1969 struct sk_buff
*skb
;
1970 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1971 struct l2cap_hdr
*lh
;
1973 BT_DBG("chan %p len %zu priority %u", chan
, len
, priority
);
1975 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1977 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1978 msg
->msg_flags
& MSG_DONTWAIT
);
1982 skb
->priority
= priority
;
1984 /* Create L2CAP header */
1985 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1986 lh
->cid
= cpu_to_le16(chan
->dcid
);
1987 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
1988 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
1990 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1991 if (unlikely(err
< 0)) {
1993 return ERR_PTR(err
);
1998 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1999 struct msghdr
*msg
, size_t len
,
2002 struct l2cap_conn
*conn
= chan
->conn
;
2003 struct sk_buff
*skb
;
2005 struct l2cap_hdr
*lh
;
2007 BT_DBG("chan %p len %zu", chan
, len
);
2009 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2011 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2012 msg
->msg_flags
& MSG_DONTWAIT
);
2016 skb
->priority
= priority
;
2018 /* Create L2CAP header */
2019 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2020 lh
->cid
= cpu_to_le16(chan
->dcid
);
2021 lh
->len
= cpu_to_le16(len
);
2023 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2024 if (unlikely(err
< 0)) {
2026 return ERR_PTR(err
);
2031 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2032 struct msghdr
*msg
, size_t len
,
2035 struct l2cap_conn
*conn
= chan
->conn
;
2036 struct sk_buff
*skb
;
2037 int err
, count
, hlen
;
2038 struct l2cap_hdr
*lh
;
2040 BT_DBG("chan %p len %zu", chan
, len
);
2043 return ERR_PTR(-ENOTCONN
);
2045 hlen
= __ertm_hdr_size(chan
);
2048 hlen
+= L2CAP_SDULEN_SIZE
;
2050 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2051 hlen
+= L2CAP_FCS_SIZE
;
2053 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2055 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2056 msg
->msg_flags
& MSG_DONTWAIT
);
2060 /* Create L2CAP header */
2061 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2062 lh
->cid
= cpu_to_le16(chan
->dcid
);
2063 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2065 /* Control header is populated later */
2066 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2067 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2069 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2072 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2074 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2075 if (unlikely(err
< 0)) {
2077 return ERR_PTR(err
);
2080 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2081 bt_cb(skb
)->control
.retries
= 0;
2085 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2086 struct sk_buff_head
*seg_queue
,
2087 struct msghdr
*msg
, size_t len
)
2089 struct sk_buff
*skb
;
2094 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2096 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2097 * so fragmented skbs are not used. The HCI layer's handling
2098 * of fragmented skbs is not compatible with ERTM's queueing.
2101 /* PDU size is derived from the HCI MTU */
2102 pdu_len
= chan
->conn
->mtu
;
2104 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2106 /* Adjust for largest possible L2CAP overhead. */
2108 pdu_len
-= L2CAP_FCS_SIZE
;
2110 pdu_len
-= __ertm_hdr_size(chan
);
2112 /* Remote device may have requested smaller PDUs */
2113 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2115 if (len
<= pdu_len
) {
2116 sar
= L2CAP_SAR_UNSEGMENTED
;
2120 sar
= L2CAP_SAR_START
;
2122 pdu_len
-= L2CAP_SDULEN_SIZE
;
2126 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2129 __skb_queue_purge(seg_queue
);
2130 return PTR_ERR(skb
);
2133 bt_cb(skb
)->control
.sar
= sar
;
2134 __skb_queue_tail(seg_queue
, skb
);
2139 pdu_len
+= L2CAP_SDULEN_SIZE
;
2142 if (len
<= pdu_len
) {
2143 sar
= L2CAP_SAR_END
;
2146 sar
= L2CAP_SAR_CONTINUE
;
2153 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2156 struct sk_buff
*skb
;
2158 struct sk_buff_head seg_queue
;
2160 /* Connectionless channel */
2161 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2162 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2164 return PTR_ERR(skb
);
2166 l2cap_do_send(chan
, skb
);
2170 switch (chan
->mode
) {
2171 case L2CAP_MODE_BASIC
:
2172 /* Check outgoing MTU */
2173 if (len
> chan
->omtu
)
2176 /* Create a basic PDU */
2177 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2179 return PTR_ERR(skb
);
2181 l2cap_do_send(chan
, skb
);
2185 case L2CAP_MODE_ERTM
:
2186 case L2CAP_MODE_STREAMING
:
2187 /* Check outgoing MTU */
2188 if (len
> chan
->omtu
) {
2193 __skb_queue_head_init(&seg_queue
);
2195 /* Do segmentation before calling in to the state machine,
2196 * since it's possible to block while waiting for memory
2199 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2201 /* The channel could have been closed while segmenting,
2202 * check that it is still connected.
2204 if (chan
->state
!= BT_CONNECTED
) {
2205 __skb_queue_purge(&seg_queue
);
2212 if (chan
->mode
== L2CAP_MODE_ERTM
)
2213 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2215 l2cap_streaming_send(chan
, &seg_queue
);
2219 /* If the skbs were not queued for sending, they'll still be in
2220 * seg_queue and need to be purged.
2222 __skb_queue_purge(&seg_queue
);
2226 BT_DBG("bad state %1.1x", chan
->mode
);
2233 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2235 struct l2cap_ctrl control
;
2238 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2240 memset(&control
, 0, sizeof(control
));
2242 control
.super
= L2CAP_SUPER_SREJ
;
2244 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2245 seq
= __next_seq(chan
, seq
)) {
2246 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2247 control
.reqseq
= seq
;
2248 l2cap_send_sframe(chan
, &control
);
2249 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2253 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2256 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2258 struct l2cap_ctrl control
;
2260 BT_DBG("chan %p", chan
);
2262 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2265 memset(&control
, 0, sizeof(control
));
2267 control
.super
= L2CAP_SUPER_SREJ
;
2268 control
.reqseq
= chan
->srej_list
.tail
;
2269 l2cap_send_sframe(chan
, &control
);
2272 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2274 struct l2cap_ctrl control
;
2278 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2280 memset(&control
, 0, sizeof(control
));
2282 control
.super
= L2CAP_SUPER_SREJ
;
2284 /* Capture initial list head to allow only one pass through the list. */
2285 initial_head
= chan
->srej_list
.head
;
2288 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2289 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2292 control
.reqseq
= seq
;
2293 l2cap_send_sframe(chan
, &control
);
2294 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2295 } while (chan
->srej_list
.head
!= initial_head
);
2298 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2300 struct sk_buff
*acked_skb
;
2303 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2305 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2308 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2309 chan
->expected_ack_seq
, chan
->unacked_frames
);
2311 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2312 ackseq
= __next_seq(chan
, ackseq
)) {
2314 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2316 skb_unlink(acked_skb
, &chan
->tx_q
);
2317 kfree_skb(acked_skb
);
2318 chan
->unacked_frames
--;
2322 chan
->expected_ack_seq
= reqseq
;
2324 if (chan
->unacked_frames
== 0)
2325 __clear_retrans_timer(chan
);
2327 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2330 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2332 BT_DBG("chan %p", chan
);
2334 chan
->expected_tx_seq
= chan
->buffer_seq
;
2335 l2cap_seq_list_clear(&chan
->srej_list
);
2336 skb_queue_purge(&chan
->srej_q
);
2337 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2340 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2341 struct l2cap_ctrl
*control
,
2342 struct sk_buff_head
*skbs
, u8 event
)
2344 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2348 case L2CAP_EV_DATA_REQUEST
:
2349 if (chan
->tx_send_head
== NULL
)
2350 chan
->tx_send_head
= skb_peek(skbs
);
2352 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2353 l2cap_ertm_send(chan
);
2355 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2356 BT_DBG("Enter LOCAL_BUSY");
2357 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2359 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2360 /* The SREJ_SENT state must be aborted if we are to
2361 * enter the LOCAL_BUSY state.
2363 l2cap_abort_rx_srej_sent(chan
);
2366 l2cap_send_ack(chan
);
2369 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2370 BT_DBG("Exit LOCAL_BUSY");
2371 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2373 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2374 struct l2cap_ctrl local_control
;
2376 memset(&local_control
, 0, sizeof(local_control
));
2377 local_control
.sframe
= 1;
2378 local_control
.super
= L2CAP_SUPER_RR
;
2379 local_control
.poll
= 1;
2380 local_control
.reqseq
= chan
->buffer_seq
;
2381 l2cap_send_sframe(chan
, &local_control
);
2383 chan
->retry_count
= 1;
2384 __set_monitor_timer(chan
);
2385 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2388 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2389 l2cap_process_reqseq(chan
, control
->reqseq
);
2391 case L2CAP_EV_EXPLICIT_POLL
:
2392 l2cap_send_rr_or_rnr(chan
, 1);
2393 chan
->retry_count
= 1;
2394 __set_monitor_timer(chan
);
2395 __clear_ack_timer(chan
);
2396 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2398 case L2CAP_EV_RETRANS_TO
:
2399 l2cap_send_rr_or_rnr(chan
, 1);
2400 chan
->retry_count
= 1;
2401 __set_monitor_timer(chan
);
2402 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2404 case L2CAP_EV_RECV_FBIT
:
2405 /* Nothing to process */
2412 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2413 struct l2cap_ctrl
*control
,
2414 struct sk_buff_head
*skbs
, u8 event
)
2416 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2420 case L2CAP_EV_DATA_REQUEST
:
2421 if (chan
->tx_send_head
== NULL
)
2422 chan
->tx_send_head
= skb_peek(skbs
);
2423 /* Queue data, but don't send. */
2424 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2426 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2427 BT_DBG("Enter LOCAL_BUSY");
2428 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2430 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2431 /* The SREJ_SENT state must be aborted if we are to
2432 * enter the LOCAL_BUSY state.
2434 l2cap_abort_rx_srej_sent(chan
);
2437 l2cap_send_ack(chan
);
2440 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2441 BT_DBG("Exit LOCAL_BUSY");
2442 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2444 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2445 struct l2cap_ctrl local_control
;
2446 memset(&local_control
, 0, sizeof(local_control
));
2447 local_control
.sframe
= 1;
2448 local_control
.super
= L2CAP_SUPER_RR
;
2449 local_control
.poll
= 1;
2450 local_control
.reqseq
= chan
->buffer_seq
;
2451 l2cap_send_sframe(chan
, &local_control
);
2453 chan
->retry_count
= 1;
2454 __set_monitor_timer(chan
);
2455 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2458 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2459 l2cap_process_reqseq(chan
, control
->reqseq
);
2463 case L2CAP_EV_RECV_FBIT
:
2464 if (control
&& control
->final
) {
2465 __clear_monitor_timer(chan
);
2466 if (chan
->unacked_frames
> 0)
2467 __set_retrans_timer(chan
);
2468 chan
->retry_count
= 0;
2469 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2470 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2473 case L2CAP_EV_EXPLICIT_POLL
:
2476 case L2CAP_EV_MONITOR_TO
:
2477 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2478 l2cap_send_rr_or_rnr(chan
, 1);
2479 __set_monitor_timer(chan
);
2480 chan
->retry_count
++;
2482 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2490 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2491 struct sk_buff_head
*skbs
, u8 event
)
2493 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2494 chan
, control
, skbs
, event
, chan
->tx_state
);
2496 switch (chan
->tx_state
) {
2497 case L2CAP_TX_STATE_XMIT
:
2498 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2500 case L2CAP_TX_STATE_WAIT_F
:
2501 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2509 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2510 struct l2cap_ctrl
*control
)
2512 BT_DBG("chan %p, control %p", chan
, control
);
2513 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2516 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2517 struct l2cap_ctrl
*control
)
2519 BT_DBG("chan %p, control %p", chan
, control
);
2520 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2523 /* Copy frame to all raw sockets on that connection */
2524 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2526 struct sk_buff
*nskb
;
2527 struct l2cap_chan
*chan
;
2529 BT_DBG("conn %p", conn
);
2531 mutex_lock(&conn
->chan_lock
);
2533 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2534 struct sock
*sk
= chan
->sk
;
2535 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2538 /* Don't send frame to the socket it came from */
2541 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2545 if (chan
->ops
->recv(chan
, nskb
))
2549 mutex_unlock(&conn
->chan_lock
);
2552 /* ---- L2CAP signalling commands ---- */
2553 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2554 u8 ident
, u16 dlen
, void *data
)
2556 struct sk_buff
*skb
, **frag
;
2557 struct l2cap_cmd_hdr
*cmd
;
2558 struct l2cap_hdr
*lh
;
2561 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2562 conn
, code
, ident
, dlen
);
2564 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2565 count
= min_t(unsigned int, conn
->mtu
, len
);
2567 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2571 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2572 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2574 if (conn
->hcon
->type
== LE_LINK
)
2575 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2577 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2579 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2582 cmd
->len
= cpu_to_le16(dlen
);
2585 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2586 memcpy(skb_put(skb
, count
), data
, count
);
2592 /* Continuation fragments (no L2CAP header) */
2593 frag
= &skb_shinfo(skb
)->frag_list
;
2595 count
= min_t(unsigned int, conn
->mtu
, len
);
2597 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2601 memcpy(skb_put(*frag
, count
), data
, count
);
2606 frag
= &(*frag
)->next
;
2616 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2618 struct l2cap_conf_opt
*opt
= *ptr
;
2621 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2629 *val
= *((u8
*) opt
->val
);
2633 *val
= get_unaligned_le16(opt
->val
);
2637 *val
= get_unaligned_le32(opt
->val
);
2641 *val
= (unsigned long) opt
->val
;
2645 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2649 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2651 struct l2cap_conf_opt
*opt
= *ptr
;
2653 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2660 *((u8
*) opt
->val
) = val
;
2664 put_unaligned_le16(val
, opt
->val
);
2668 put_unaligned_le32(val
, opt
->val
);
2672 memcpy(opt
->val
, (void *) val
, len
);
2676 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2679 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2681 struct l2cap_conf_efs efs
;
2683 switch (chan
->mode
) {
2684 case L2CAP_MODE_ERTM
:
2685 efs
.id
= chan
->local_id
;
2686 efs
.stype
= chan
->local_stype
;
2687 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2688 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2689 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2690 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2693 case L2CAP_MODE_STREAMING
:
2695 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2696 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2697 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2706 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2707 (unsigned long) &efs
);
2710 static void l2cap_ack_timeout(struct work_struct
*work
)
2712 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2716 BT_DBG("chan %p", chan
);
2718 l2cap_chan_lock(chan
);
2720 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2721 chan
->last_acked_seq
);
2724 l2cap_send_rr_or_rnr(chan
, 0);
2726 l2cap_chan_unlock(chan
);
2727 l2cap_chan_put(chan
);
2730 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2734 chan
->next_tx_seq
= 0;
2735 chan
->expected_tx_seq
= 0;
2736 chan
->expected_ack_seq
= 0;
2737 chan
->unacked_frames
= 0;
2738 chan
->buffer_seq
= 0;
2739 chan
->frames_sent
= 0;
2740 chan
->last_acked_seq
= 0;
2742 chan
->sdu_last_frag
= NULL
;
2745 skb_queue_head_init(&chan
->tx_q
);
2747 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2750 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2751 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2753 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2754 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2755 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2757 skb_queue_head_init(&chan
->srej_q
);
2759 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2763 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2765 l2cap_seq_list_free(&chan
->srej_list
);
2770 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2773 case L2CAP_MODE_STREAMING
:
2774 case L2CAP_MODE_ERTM
:
2775 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2779 return L2CAP_MODE_BASIC
;
2783 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2785 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2788 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2790 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2793 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2795 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2796 __l2cap_ews_supported(chan
)) {
2797 /* use extended control field */
2798 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2799 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2801 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2802 L2CAP_DEFAULT_TX_WINDOW
);
2803 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2805 chan
->ack_win
= chan
->tx_win
;
2808 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2810 struct l2cap_conf_req
*req
= data
;
2811 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2812 void *ptr
= req
->data
;
2815 BT_DBG("chan %p", chan
);
2817 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2820 switch (chan
->mode
) {
2821 case L2CAP_MODE_STREAMING
:
2822 case L2CAP_MODE_ERTM
:
2823 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2826 if (__l2cap_efs_supported(chan
))
2827 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2831 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2836 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2837 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2839 switch (chan
->mode
) {
2840 case L2CAP_MODE_BASIC
:
2841 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2842 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2845 rfc
.mode
= L2CAP_MODE_BASIC
;
2847 rfc
.max_transmit
= 0;
2848 rfc
.retrans_timeout
= 0;
2849 rfc
.monitor_timeout
= 0;
2850 rfc
.max_pdu_size
= 0;
2852 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2853 (unsigned long) &rfc
);
2856 case L2CAP_MODE_ERTM
:
2857 rfc
.mode
= L2CAP_MODE_ERTM
;
2858 rfc
.max_transmit
= chan
->max_tx
;
2859 rfc
.retrans_timeout
= 0;
2860 rfc
.monitor_timeout
= 0;
2862 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2863 L2CAP_EXT_HDR_SIZE
-
2866 rfc
.max_pdu_size
= cpu_to_le16(size
);
2868 l2cap_txwin_setup(chan
);
2870 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2871 L2CAP_DEFAULT_TX_WINDOW
);
2873 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2874 (unsigned long) &rfc
);
2876 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2877 l2cap_add_opt_efs(&ptr
, chan
);
2879 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2882 if (chan
->fcs
== L2CAP_FCS_NONE
||
2883 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2884 chan
->fcs
= L2CAP_FCS_NONE
;
2885 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2888 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2889 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2893 case L2CAP_MODE_STREAMING
:
2894 l2cap_txwin_setup(chan
);
2895 rfc
.mode
= L2CAP_MODE_STREAMING
;
2897 rfc
.max_transmit
= 0;
2898 rfc
.retrans_timeout
= 0;
2899 rfc
.monitor_timeout
= 0;
2901 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2902 L2CAP_EXT_HDR_SIZE
-
2905 rfc
.max_pdu_size
= cpu_to_le16(size
);
2907 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2908 (unsigned long) &rfc
);
2910 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2911 l2cap_add_opt_efs(&ptr
, chan
);
2913 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2916 if (chan
->fcs
== L2CAP_FCS_NONE
||
2917 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2918 chan
->fcs
= L2CAP_FCS_NONE
;
2919 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2924 req
->dcid
= cpu_to_le16(chan
->dcid
);
2925 req
->flags
= __constant_cpu_to_le16(0);
2930 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2932 struct l2cap_conf_rsp
*rsp
= data
;
2933 void *ptr
= rsp
->data
;
2934 void *req
= chan
->conf_req
;
2935 int len
= chan
->conf_len
;
2936 int type
, hint
, olen
;
2938 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2939 struct l2cap_conf_efs efs
;
2941 u16 mtu
= L2CAP_DEFAULT_MTU
;
2942 u16 result
= L2CAP_CONF_SUCCESS
;
2945 BT_DBG("chan %p", chan
);
2947 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2948 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2950 hint
= type
& L2CAP_CONF_HINT
;
2951 type
&= L2CAP_CONF_MASK
;
2954 case L2CAP_CONF_MTU
:
2958 case L2CAP_CONF_FLUSH_TO
:
2959 chan
->flush_to
= val
;
2962 case L2CAP_CONF_QOS
:
2965 case L2CAP_CONF_RFC
:
2966 if (olen
== sizeof(rfc
))
2967 memcpy(&rfc
, (void *) val
, olen
);
2970 case L2CAP_CONF_FCS
:
2971 if (val
== L2CAP_FCS_NONE
)
2972 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2975 case L2CAP_CONF_EFS
:
2977 if (olen
== sizeof(efs
))
2978 memcpy(&efs
, (void *) val
, olen
);
2981 case L2CAP_CONF_EWS
:
2983 return -ECONNREFUSED
;
2985 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2986 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2987 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2988 chan
->remote_tx_win
= val
;
2995 result
= L2CAP_CONF_UNKNOWN
;
2996 *((u8
*) ptr
++) = type
;
3001 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3004 switch (chan
->mode
) {
3005 case L2CAP_MODE_STREAMING
:
3006 case L2CAP_MODE_ERTM
:
3007 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3008 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3009 chan
->conn
->feat_mask
);
3014 if (__l2cap_efs_supported(chan
))
3015 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3017 return -ECONNREFUSED
;
3020 if (chan
->mode
!= rfc
.mode
)
3021 return -ECONNREFUSED
;
3027 if (chan
->mode
!= rfc
.mode
) {
3028 result
= L2CAP_CONF_UNACCEPT
;
3029 rfc
.mode
= chan
->mode
;
3031 if (chan
->num_conf_rsp
== 1)
3032 return -ECONNREFUSED
;
3034 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3035 sizeof(rfc
), (unsigned long) &rfc
);
3038 if (result
== L2CAP_CONF_SUCCESS
) {
3039 /* Configure output options and let the other side know
3040 * which ones we don't like. */
3042 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3043 result
= L2CAP_CONF_UNACCEPT
;
3046 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3048 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3051 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3052 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3053 efs
.stype
!= chan
->local_stype
) {
3055 result
= L2CAP_CONF_UNACCEPT
;
3057 if (chan
->num_conf_req
>= 1)
3058 return -ECONNREFUSED
;
3060 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3062 (unsigned long) &efs
);
3064 /* Send PENDING Conf Rsp */
3065 result
= L2CAP_CONF_PENDING
;
3066 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3071 case L2CAP_MODE_BASIC
:
3072 chan
->fcs
= L2CAP_FCS_NONE
;
3073 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3076 case L2CAP_MODE_ERTM
:
3077 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3078 chan
->remote_tx_win
= rfc
.txwin_size
;
3080 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3082 chan
->remote_max_tx
= rfc
.max_transmit
;
3084 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3086 L2CAP_EXT_HDR_SIZE
-
3089 rfc
.max_pdu_size
= cpu_to_le16(size
);
3090 chan
->remote_mps
= size
;
3092 rfc
.retrans_timeout
=
3093 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3094 rfc
.monitor_timeout
=
3095 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3097 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3099 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3100 sizeof(rfc
), (unsigned long) &rfc
);
3102 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3103 chan
->remote_id
= efs
.id
;
3104 chan
->remote_stype
= efs
.stype
;
3105 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3106 chan
->remote_flush_to
=
3107 le32_to_cpu(efs
.flush_to
);
3108 chan
->remote_acc_lat
=
3109 le32_to_cpu(efs
.acc_lat
);
3110 chan
->remote_sdu_itime
=
3111 le32_to_cpu(efs
.sdu_itime
);
3112 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3113 sizeof(efs
), (unsigned long) &efs
);
3117 case L2CAP_MODE_STREAMING
:
3118 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3120 L2CAP_EXT_HDR_SIZE
-
3123 rfc
.max_pdu_size
= cpu_to_le16(size
);
3124 chan
->remote_mps
= size
;
3126 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3128 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3129 sizeof(rfc
), (unsigned long) &rfc
);
3134 result
= L2CAP_CONF_UNACCEPT
;
3136 memset(&rfc
, 0, sizeof(rfc
));
3137 rfc
.mode
= chan
->mode
;
3140 if (result
== L2CAP_CONF_SUCCESS
)
3141 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3143 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3144 rsp
->result
= cpu_to_le16(result
);
3145 rsp
->flags
= __constant_cpu_to_le16(0);
3150 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3152 struct l2cap_conf_req
*req
= data
;
3153 void *ptr
= req
->data
;
3156 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3157 struct l2cap_conf_efs efs
;
3159 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3161 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3162 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3165 case L2CAP_CONF_MTU
:
3166 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3167 *result
= L2CAP_CONF_UNACCEPT
;
3168 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3171 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3174 case L2CAP_CONF_FLUSH_TO
:
3175 chan
->flush_to
= val
;
3176 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3180 case L2CAP_CONF_RFC
:
3181 if (olen
== sizeof(rfc
))
3182 memcpy(&rfc
, (void *)val
, olen
);
3184 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3185 rfc
.mode
!= chan
->mode
)
3186 return -ECONNREFUSED
;
3190 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3191 sizeof(rfc
), (unsigned long) &rfc
);
3194 case L2CAP_CONF_EWS
:
3195 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3196 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3200 case L2CAP_CONF_EFS
:
3201 if (olen
== sizeof(efs
))
3202 memcpy(&efs
, (void *)val
, olen
);
3204 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3205 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3206 efs
.stype
!= chan
->local_stype
)
3207 return -ECONNREFUSED
;
3209 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3210 sizeof(efs
), (unsigned long) &efs
);
3215 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3216 return -ECONNREFUSED
;
3218 chan
->mode
= rfc
.mode
;
3220 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3222 case L2CAP_MODE_ERTM
:
3223 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3224 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3225 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3226 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3227 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3230 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3231 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3232 chan
->local_sdu_itime
=
3233 le32_to_cpu(efs
.sdu_itime
);
3234 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3235 chan
->local_flush_to
=
3236 le32_to_cpu(efs
.flush_to
);
3240 case L2CAP_MODE_STREAMING
:
3241 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3245 req
->dcid
= cpu_to_le16(chan
->dcid
);
3246 req
->flags
= __constant_cpu_to_le16(0);
3251 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3253 struct l2cap_conf_rsp
*rsp
= data
;
3254 void *ptr
= rsp
->data
;
3256 BT_DBG("chan %p", chan
);
3258 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3259 rsp
->result
= cpu_to_le16(result
);
3260 rsp
->flags
= cpu_to_le16(flags
);
3265 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3267 struct l2cap_conn_rsp rsp
;
3268 struct l2cap_conn
*conn
= chan
->conn
;
3271 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3272 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3273 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3274 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3275 l2cap_send_cmd(conn
, chan
->ident
,
3276 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3278 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3281 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3282 l2cap_build_conf_req(chan
, buf
), buf
);
3283 chan
->num_conf_req
++;
3286 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3290 /* Use sane default values in case a misbehaving remote device
3291 * did not send an RFC or extended window size option.
3293 u16 txwin_ext
= chan
->ack_win
;
3294 struct l2cap_conf_rfc rfc
= {
3296 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3297 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3298 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3299 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3302 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3304 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3307 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3308 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3311 case L2CAP_CONF_RFC
:
3312 if (olen
== sizeof(rfc
))
3313 memcpy(&rfc
, (void *)val
, olen
);
3315 case L2CAP_CONF_EWS
:
3322 case L2CAP_MODE_ERTM
:
3323 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3324 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3325 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3326 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3327 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3329 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3332 case L2CAP_MODE_STREAMING
:
3333 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3337 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3339 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3341 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3344 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3345 cmd
->ident
== conn
->info_ident
) {
3346 cancel_delayed_work(&conn
->info_timer
);
3348 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3349 conn
->info_ident
= 0;
3351 l2cap_conn_start(conn
);
3357 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3359 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3360 struct l2cap_conn_rsp rsp
;
3361 struct l2cap_chan
*chan
= NULL
, *pchan
;
3362 struct sock
*parent
, *sk
= NULL
;
3363 int result
, status
= L2CAP_CS_NO_INFO
;
3365 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3366 __le16 psm
= req
->psm
;
3368 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3370 /* Check if we have socket listening on psm */
3371 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3373 result
= L2CAP_CR_BAD_PSM
;
3379 mutex_lock(&conn
->chan_lock
);
3382 /* Check if the ACL is secure enough (if not SDP) */
3383 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3384 !hci_conn_check_link_mode(conn
->hcon
)) {
3385 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3386 result
= L2CAP_CR_SEC_BLOCK
;
3390 result
= L2CAP_CR_NO_MEM
;
3392 /* Check if we already have channel with that dcid */
3393 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3396 chan
= pchan
->ops
->new_connection(pchan
);
3402 hci_conn_hold(conn
->hcon
);
3404 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3405 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3409 bt_accept_enqueue(parent
, sk
);
3411 __l2cap_chan_add(conn
, chan
);
3415 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3417 chan
->ident
= cmd
->ident
;
3419 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3420 if (l2cap_chan_check_security(chan
)) {
3421 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3422 __l2cap_state_change(chan
, BT_CONNECT2
);
3423 result
= L2CAP_CR_PEND
;
3424 status
= L2CAP_CS_AUTHOR_PEND
;
3425 parent
->sk_data_ready(parent
, 0);
3427 __l2cap_state_change(chan
, BT_CONFIG
);
3428 result
= L2CAP_CR_SUCCESS
;
3429 status
= L2CAP_CS_NO_INFO
;
3432 __l2cap_state_change(chan
, BT_CONNECT2
);
3433 result
= L2CAP_CR_PEND
;
3434 status
= L2CAP_CS_AUTHEN_PEND
;
3437 __l2cap_state_change(chan
, BT_CONNECT2
);
3438 result
= L2CAP_CR_PEND
;
3439 status
= L2CAP_CS_NO_INFO
;
3443 release_sock(parent
);
3444 mutex_unlock(&conn
->chan_lock
);
3447 rsp
.scid
= cpu_to_le16(scid
);
3448 rsp
.dcid
= cpu_to_le16(dcid
);
3449 rsp
.result
= cpu_to_le16(result
);
3450 rsp
.status
= cpu_to_le16(status
);
3451 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3453 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3454 struct l2cap_info_req info
;
3455 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3457 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3458 conn
->info_ident
= l2cap_get_ident(conn
);
3460 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3462 l2cap_send_cmd(conn
, conn
->info_ident
,
3463 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3466 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3467 result
== L2CAP_CR_SUCCESS
) {
3469 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3470 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3471 l2cap_build_conf_req(chan
, buf
), buf
);
3472 chan
->num_conf_req
++;
3478 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3480 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3481 u16 scid
, dcid
, result
, status
;
3482 struct l2cap_chan
*chan
;
3486 scid
= __le16_to_cpu(rsp
->scid
);
3487 dcid
= __le16_to_cpu(rsp
->dcid
);
3488 result
= __le16_to_cpu(rsp
->result
);
3489 status
= __le16_to_cpu(rsp
->status
);
3491 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3492 dcid
, scid
, result
, status
);
3494 mutex_lock(&conn
->chan_lock
);
3497 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3503 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3512 l2cap_chan_lock(chan
);
3515 case L2CAP_CR_SUCCESS
:
3516 l2cap_state_change(chan
, BT_CONFIG
);
3519 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3521 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3524 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3525 l2cap_build_conf_req(chan
, req
), req
);
3526 chan
->num_conf_req
++;
3530 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3534 l2cap_chan_del(chan
, ECONNREFUSED
);
3538 l2cap_chan_unlock(chan
);
3541 mutex_unlock(&conn
->chan_lock
);
3546 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3548 /* FCS is enabled only in ERTM or streaming mode, if one or both
3551 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3552 chan
->fcs
= L2CAP_FCS_NONE
;
3553 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3554 chan
->fcs
= L2CAP_FCS_CRC16
;
3557 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3559 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3562 struct l2cap_chan
*chan
;
3565 dcid
= __le16_to_cpu(req
->dcid
);
3566 flags
= __le16_to_cpu(req
->flags
);
3568 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3570 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3574 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3575 struct l2cap_cmd_rej_cid rej
;
3577 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3578 rej
.scid
= cpu_to_le16(chan
->scid
);
3579 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3581 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3586 /* Reject if config buffer is too small. */
3587 len
= cmd_len
- sizeof(*req
);
3588 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3589 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3590 l2cap_build_conf_rsp(chan
, rsp
,
3591 L2CAP_CONF_REJECT
, flags
), rsp
);
3596 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3597 chan
->conf_len
+= len
;
3599 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3600 /* Incomplete config. Send empty response. */
3601 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3602 l2cap_build_conf_rsp(chan
, rsp
,
3603 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3607 /* Complete config. */
3608 len
= l2cap_parse_conf_req(chan
, rsp
);
3610 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3614 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3615 chan
->num_conf_rsp
++;
3617 /* Reset config buffer. */
3620 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3623 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3624 set_default_fcs(chan
);
3626 if (chan
->mode
== L2CAP_MODE_ERTM
||
3627 chan
->mode
== L2CAP_MODE_STREAMING
)
3628 err
= l2cap_ertm_init(chan
);
3631 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3633 l2cap_chan_ready(chan
);
3638 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3640 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3641 l2cap_build_conf_req(chan
, buf
), buf
);
3642 chan
->num_conf_req
++;
3645 /* Got Conf Rsp PENDING from remote side and asume we sent
3646 Conf Rsp PENDING in the code above */
3647 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3648 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3650 /* check compatibility */
3652 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3653 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3655 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3656 l2cap_build_conf_rsp(chan
, rsp
,
3657 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3661 l2cap_chan_unlock(chan
);
3665 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3667 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3668 u16 scid
, flags
, result
;
3669 struct l2cap_chan
*chan
;
3670 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3673 scid
= __le16_to_cpu(rsp
->scid
);
3674 flags
= __le16_to_cpu(rsp
->flags
);
3675 result
= __le16_to_cpu(rsp
->result
);
3677 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3680 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3685 case L2CAP_CONF_SUCCESS
:
3686 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3687 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3690 case L2CAP_CONF_PENDING
:
3691 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3693 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3696 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3699 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3703 /* check compatibility */
3705 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3706 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3708 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3709 l2cap_build_conf_rsp(chan
, buf
,
3710 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3714 case L2CAP_CONF_UNACCEPT
:
3715 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3718 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3719 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3723 /* throw out any old stored conf requests */
3724 result
= L2CAP_CONF_SUCCESS
;
3725 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3728 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3732 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3733 L2CAP_CONF_REQ
, len
, req
);
3734 chan
->num_conf_req
++;
3735 if (result
!= L2CAP_CONF_SUCCESS
)
3741 l2cap_chan_set_err(chan
, ECONNRESET
);
3743 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3744 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3748 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
3751 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3753 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3754 set_default_fcs(chan
);
3756 if (chan
->mode
== L2CAP_MODE_ERTM
||
3757 chan
->mode
== L2CAP_MODE_STREAMING
)
3758 err
= l2cap_ertm_init(chan
);
3761 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3763 l2cap_chan_ready(chan
);
3767 l2cap_chan_unlock(chan
);
3771 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3773 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3774 struct l2cap_disconn_rsp rsp
;
3776 struct l2cap_chan
*chan
;
3779 scid
= __le16_to_cpu(req
->scid
);
3780 dcid
= __le16_to_cpu(req
->dcid
);
3782 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3784 mutex_lock(&conn
->chan_lock
);
3786 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3788 mutex_unlock(&conn
->chan_lock
);
3792 l2cap_chan_lock(chan
);
3796 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3797 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3798 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3801 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3804 l2cap_chan_hold(chan
);
3805 l2cap_chan_del(chan
, ECONNRESET
);
3807 l2cap_chan_unlock(chan
);
3809 chan
->ops
->close(chan
);
3810 l2cap_chan_put(chan
);
3812 mutex_unlock(&conn
->chan_lock
);
3817 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3819 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3821 struct l2cap_chan
*chan
;
3823 scid
= __le16_to_cpu(rsp
->scid
);
3824 dcid
= __le16_to_cpu(rsp
->dcid
);
3826 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3828 mutex_lock(&conn
->chan_lock
);
3830 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3832 mutex_unlock(&conn
->chan_lock
);
3836 l2cap_chan_lock(chan
);
3838 l2cap_chan_hold(chan
);
3839 l2cap_chan_del(chan
, 0);
3841 l2cap_chan_unlock(chan
);
3843 chan
->ops
->close(chan
);
3844 l2cap_chan_put(chan
);
3846 mutex_unlock(&conn
->chan_lock
);
3851 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3853 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3856 type
= __le16_to_cpu(req
->type
);
3858 BT_DBG("type 0x%4.4x", type
);
3860 if (type
== L2CAP_IT_FEAT_MASK
) {
3862 u32 feat_mask
= l2cap_feat_mask
;
3863 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3864 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3865 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3867 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3870 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3871 | L2CAP_FEAT_EXT_WINDOW
;
3873 put_unaligned_le32(feat_mask
, rsp
->data
);
3874 l2cap_send_cmd(conn
, cmd
->ident
,
3875 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3876 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3878 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3881 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3883 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3885 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3886 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3887 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3888 l2cap_send_cmd(conn
, cmd
->ident
,
3889 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3891 struct l2cap_info_rsp rsp
;
3892 rsp
.type
= cpu_to_le16(type
);
3893 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
3894 l2cap_send_cmd(conn
, cmd
->ident
,
3895 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3901 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3903 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3906 type
= __le16_to_cpu(rsp
->type
);
3907 result
= __le16_to_cpu(rsp
->result
);
3909 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3911 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3912 if (cmd
->ident
!= conn
->info_ident
||
3913 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3916 cancel_delayed_work(&conn
->info_timer
);
3918 if (result
!= L2CAP_IR_SUCCESS
) {
3919 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3920 conn
->info_ident
= 0;
3922 l2cap_conn_start(conn
);
3928 case L2CAP_IT_FEAT_MASK
:
3929 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3931 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3932 struct l2cap_info_req req
;
3933 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3935 conn
->info_ident
= l2cap_get_ident(conn
);
3937 l2cap_send_cmd(conn
, conn
->info_ident
,
3938 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3940 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3941 conn
->info_ident
= 0;
3943 l2cap_conn_start(conn
);
3947 case L2CAP_IT_FIXED_CHAN
:
3948 conn
->fixed_chan_mask
= rsp
->data
[0];
3949 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3950 conn
->info_ident
= 0;
3952 l2cap_conn_start(conn
);
3959 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3960 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3963 struct l2cap_create_chan_req
*req
= data
;
3964 struct l2cap_create_chan_rsp rsp
;
3967 if (cmd_len
!= sizeof(*req
))
3973 psm
= le16_to_cpu(req
->psm
);
3974 scid
= le16_to_cpu(req
->scid
);
3976 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
3978 /* Placeholder: Always reject */
3980 rsp
.scid
= cpu_to_le16(scid
);
3981 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
3982 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3984 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3990 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3991 struct l2cap_cmd_hdr
*cmd
, void *data
)
3993 BT_DBG("conn %p", conn
);
3995 return l2cap_connect_rsp(conn
, cmd
, data
);
3998 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3999 u16 icid
, u16 result
)
4001 struct l2cap_move_chan_rsp rsp
;
4003 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4005 rsp
.icid
= cpu_to_le16(icid
);
4006 rsp
.result
= cpu_to_le16(result
);
4008 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
4011 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
4012 struct l2cap_chan
*chan
,
4013 u16 icid
, u16 result
)
4015 struct l2cap_move_chan_cfm cfm
;
4018 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4020 ident
= l2cap_get_ident(conn
);
4022 chan
->ident
= ident
;
4024 cfm
.icid
= cpu_to_le16(icid
);
4025 cfm
.result
= cpu_to_le16(result
);
4027 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4030 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4033 struct l2cap_move_chan_cfm_rsp rsp
;
4035 BT_DBG("icid 0x%4.4x", icid
);
4037 rsp
.icid
= cpu_to_le16(icid
);
4038 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4041 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4042 struct l2cap_cmd_hdr
*cmd
,
4043 u16 cmd_len
, void *data
)
4045 struct l2cap_move_chan_req
*req
= data
;
4047 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4049 if (cmd_len
!= sizeof(*req
))
4052 icid
= le16_to_cpu(req
->icid
);
4054 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4059 /* Placeholder: Always refuse */
4060 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4065 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4066 struct l2cap_cmd_hdr
*cmd
,
4067 u16 cmd_len
, void *data
)
4069 struct l2cap_move_chan_rsp
*rsp
= data
;
4072 if (cmd_len
!= sizeof(*rsp
))
4075 icid
= le16_to_cpu(rsp
->icid
);
4076 result
= le16_to_cpu(rsp
->result
);
4078 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4080 /* Placeholder: Always unconfirmed */
4081 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4086 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4087 struct l2cap_cmd_hdr
*cmd
,
4088 u16 cmd_len
, void *data
)
4090 struct l2cap_move_chan_cfm
*cfm
= data
;
4093 if (cmd_len
!= sizeof(*cfm
))
4096 icid
= le16_to_cpu(cfm
->icid
);
4097 result
= le16_to_cpu(cfm
->result
);
4099 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4101 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4106 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4107 struct l2cap_cmd_hdr
*cmd
,
4108 u16 cmd_len
, void *data
)
4110 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4113 if (cmd_len
!= sizeof(*rsp
))
4116 icid
= le16_to_cpu(rsp
->icid
);
4118 BT_DBG("icid 0x%4.4x", icid
);
4123 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4128 if (min
> max
|| min
< 6 || max
> 3200)
4131 if (to_multiplier
< 10 || to_multiplier
> 3200)
4134 if (max
>= to_multiplier
* 8)
4137 max_latency
= (to_multiplier
* 8 / max
) - 1;
4138 if (latency
> 499 || latency
> max_latency
)
4144 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4145 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4147 struct hci_conn
*hcon
= conn
->hcon
;
4148 struct l2cap_conn_param_update_req
*req
;
4149 struct l2cap_conn_param_update_rsp rsp
;
4150 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4153 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4156 cmd_len
= __le16_to_cpu(cmd
->len
);
4157 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4160 req
= (struct l2cap_conn_param_update_req
*) data
;
4161 min
= __le16_to_cpu(req
->min
);
4162 max
= __le16_to_cpu(req
->max
);
4163 latency
= __le16_to_cpu(req
->latency
);
4164 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4166 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4167 min
, max
, latency
, to_multiplier
);
4169 memset(&rsp
, 0, sizeof(rsp
));
4171 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4173 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4175 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4177 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4181 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4186 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4187 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4191 switch (cmd
->code
) {
4192 case L2CAP_COMMAND_REJ
:
4193 l2cap_command_rej(conn
, cmd
, data
);
4196 case L2CAP_CONN_REQ
:
4197 err
= l2cap_connect_req(conn
, cmd
, data
);
4200 case L2CAP_CONN_RSP
:
4201 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4204 case L2CAP_CONF_REQ
:
4205 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4208 case L2CAP_CONF_RSP
:
4209 err
= l2cap_config_rsp(conn
, cmd
, data
);
4212 case L2CAP_DISCONN_REQ
:
4213 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4216 case L2CAP_DISCONN_RSP
:
4217 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4220 case L2CAP_ECHO_REQ
:
4221 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4224 case L2CAP_ECHO_RSP
:
4227 case L2CAP_INFO_REQ
:
4228 err
= l2cap_information_req(conn
, cmd
, data
);
4231 case L2CAP_INFO_RSP
:
4232 err
= l2cap_information_rsp(conn
, cmd
, data
);
4235 case L2CAP_CREATE_CHAN_REQ
:
4236 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4239 case L2CAP_CREATE_CHAN_RSP
:
4240 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4243 case L2CAP_MOVE_CHAN_REQ
:
4244 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4247 case L2CAP_MOVE_CHAN_RSP
:
4248 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4251 case L2CAP_MOVE_CHAN_CFM
:
4252 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4255 case L2CAP_MOVE_CHAN_CFM_RSP
:
4256 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4260 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4268 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4269 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4271 switch (cmd
->code
) {
4272 case L2CAP_COMMAND_REJ
:
4275 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4276 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4278 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4282 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4287 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4288 struct sk_buff
*skb
)
4290 u8
*data
= skb
->data
;
4292 struct l2cap_cmd_hdr cmd
;
4295 l2cap_raw_recv(conn
, skb
);
4297 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4299 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4300 data
+= L2CAP_CMD_HDR_SIZE
;
4301 len
-= L2CAP_CMD_HDR_SIZE
;
4303 cmd_len
= le16_to_cpu(cmd
.len
);
4305 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4307 if (cmd_len
> len
|| !cmd
.ident
) {
4308 BT_DBG("corrupted command");
4312 if (conn
->hcon
->type
== LE_LINK
)
4313 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4315 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4318 struct l2cap_cmd_rej_unk rej
;
4320 BT_ERR("Wrong link type (%d)", err
);
4322 /* FIXME: Map err to a valid reason */
4323 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4324 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4334 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4336 u16 our_fcs
, rcv_fcs
;
4339 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4340 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4342 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4344 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4345 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4346 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4347 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4349 if (our_fcs
!= rcv_fcs
)
4355 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4357 struct l2cap_ctrl control
;
4359 BT_DBG("chan %p", chan
);
4361 memset(&control
, 0, sizeof(control
));
4364 control
.reqseq
= chan
->buffer_seq
;
4365 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4367 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4368 control
.super
= L2CAP_SUPER_RNR
;
4369 l2cap_send_sframe(chan
, &control
);
4372 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4373 chan
->unacked_frames
> 0)
4374 __set_retrans_timer(chan
);
4376 /* Send pending iframes */
4377 l2cap_ertm_send(chan
);
4379 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4380 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4381 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4384 control
.super
= L2CAP_SUPER_RR
;
4385 l2cap_send_sframe(chan
, &control
);
4389 static void append_skb_frag(struct sk_buff
*skb
,
4390 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4392 /* skb->len reflects data in skb as well as all fragments
4393 * skb->data_len reflects only data in fragments
4395 if (!skb_has_frag_list(skb
))
4396 skb_shinfo(skb
)->frag_list
= new_frag
;
4398 new_frag
->next
= NULL
;
4400 (*last_frag
)->next
= new_frag
;
4401 *last_frag
= new_frag
;
4403 skb
->len
+= new_frag
->len
;
4404 skb
->data_len
+= new_frag
->len
;
4405 skb
->truesize
+= new_frag
->truesize
;
4408 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4409 struct l2cap_ctrl
*control
)
4413 switch (control
->sar
) {
4414 case L2CAP_SAR_UNSEGMENTED
:
4418 err
= chan
->ops
->recv(chan
, skb
);
4421 case L2CAP_SAR_START
:
4425 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4426 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4428 if (chan
->sdu_len
> chan
->imtu
) {
4433 if (skb
->len
>= chan
->sdu_len
)
4437 chan
->sdu_last_frag
= skb
;
4443 case L2CAP_SAR_CONTINUE
:
4447 append_skb_frag(chan
->sdu
, skb
,
4448 &chan
->sdu_last_frag
);
4451 if (chan
->sdu
->len
>= chan
->sdu_len
)
4461 append_skb_frag(chan
->sdu
, skb
,
4462 &chan
->sdu_last_frag
);
4465 if (chan
->sdu
->len
!= chan
->sdu_len
)
4468 err
= chan
->ops
->recv(chan
, chan
->sdu
);
4471 /* Reassembly complete */
4473 chan
->sdu_last_frag
= NULL
;
4481 kfree_skb(chan
->sdu
);
4483 chan
->sdu_last_frag
= NULL
;
4490 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4494 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4497 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4498 l2cap_tx(chan
, NULL
, NULL
, event
);
4501 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4504 /* Pass sequential frames to l2cap_reassemble_sdu()
4505 * until a gap is encountered.
4508 BT_DBG("chan %p", chan
);
4510 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4511 struct sk_buff
*skb
;
4512 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4513 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4515 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4520 skb_unlink(skb
, &chan
->srej_q
);
4521 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4522 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4527 if (skb_queue_empty(&chan
->srej_q
)) {
4528 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4529 l2cap_send_ack(chan
);
4535 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4536 struct l2cap_ctrl
*control
)
4538 struct sk_buff
*skb
;
4540 BT_DBG("chan %p, control %p", chan
, control
);
4542 if (control
->reqseq
== chan
->next_tx_seq
) {
4543 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4544 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4548 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4551 BT_DBG("Seq %d not available for retransmission",
4556 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4557 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4558 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4562 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4564 if (control
->poll
) {
4565 l2cap_pass_to_tx(chan
, control
);
4567 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4568 l2cap_retransmit(chan
, control
);
4569 l2cap_ertm_send(chan
);
4571 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4572 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4573 chan
->srej_save_reqseq
= control
->reqseq
;
4576 l2cap_pass_to_tx_fbit(chan
, control
);
4578 if (control
->final
) {
4579 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4580 !test_and_clear_bit(CONN_SREJ_ACT
,
4582 l2cap_retransmit(chan
, control
);
4584 l2cap_retransmit(chan
, control
);
4585 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4586 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4587 chan
->srej_save_reqseq
= control
->reqseq
;
4593 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4594 struct l2cap_ctrl
*control
)
4596 struct sk_buff
*skb
;
4598 BT_DBG("chan %p, control %p", chan
, control
);
4600 if (control
->reqseq
== chan
->next_tx_seq
) {
4601 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4602 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4606 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4608 if (chan
->max_tx
&& skb
&&
4609 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4610 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4611 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4615 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4617 l2cap_pass_to_tx(chan
, control
);
4619 if (control
->final
) {
4620 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4621 l2cap_retransmit_all(chan
, control
);
4623 l2cap_retransmit_all(chan
, control
);
4624 l2cap_ertm_send(chan
);
4625 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4626 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4630 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4632 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4634 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4635 chan
->expected_tx_seq
);
4637 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4638 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4640 /* See notes below regarding "double poll" and
4643 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4644 BT_DBG("Invalid/Ignore - after SREJ");
4645 return L2CAP_TXSEQ_INVALID_IGNORE
;
4647 BT_DBG("Invalid - in window after SREJ sent");
4648 return L2CAP_TXSEQ_INVALID
;
4652 if (chan
->srej_list
.head
== txseq
) {
4653 BT_DBG("Expected SREJ");
4654 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4657 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4658 BT_DBG("Duplicate SREJ - txseq already stored");
4659 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4662 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4663 BT_DBG("Unexpected SREJ - not requested");
4664 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4668 if (chan
->expected_tx_seq
== txseq
) {
4669 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4671 BT_DBG("Invalid - txseq outside tx window");
4672 return L2CAP_TXSEQ_INVALID
;
4675 return L2CAP_TXSEQ_EXPECTED
;
4679 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4680 __seq_offset(chan
, chan
->expected_tx_seq
,
4681 chan
->last_acked_seq
)){
4682 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4683 return L2CAP_TXSEQ_DUPLICATE
;
4686 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4687 /* A source of invalid packets is a "double poll" condition,
4688 * where delays cause us to send multiple poll packets. If
4689 * the remote stack receives and processes both polls,
4690 * sequence numbers can wrap around in such a way that a
4691 * resent frame has a sequence number that looks like new data
4692 * with a sequence gap. This would trigger an erroneous SREJ
4695 * Fortunately, this is impossible with a tx window that's
4696 * less than half of the maximum sequence number, which allows
4697 * invalid frames to be safely ignored.
4699 * With tx window sizes greater than half of the tx window
4700 * maximum, the frame is invalid and cannot be ignored. This
4701 * causes a disconnect.
4704 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4705 BT_DBG("Invalid/Ignore - txseq outside tx window");
4706 return L2CAP_TXSEQ_INVALID_IGNORE
;
4708 BT_DBG("Invalid - txseq outside tx window");
4709 return L2CAP_TXSEQ_INVALID
;
4712 BT_DBG("Unexpected - txseq indicates missing frames");
4713 return L2CAP_TXSEQ_UNEXPECTED
;
4717 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4718 struct l2cap_ctrl
*control
,
4719 struct sk_buff
*skb
, u8 event
)
4722 bool skb_in_use
= 0;
4724 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4728 case L2CAP_EV_RECV_IFRAME
:
4729 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4730 case L2CAP_TXSEQ_EXPECTED
:
4731 l2cap_pass_to_tx(chan
, control
);
4733 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4734 BT_DBG("Busy, discarding expected seq %d",
4739 chan
->expected_tx_seq
= __next_seq(chan
,
4742 chan
->buffer_seq
= chan
->expected_tx_seq
;
4745 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4749 if (control
->final
) {
4750 if (!test_and_clear_bit(CONN_REJ_ACT
,
4751 &chan
->conn_state
)) {
4753 l2cap_retransmit_all(chan
, control
);
4754 l2cap_ertm_send(chan
);
4758 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4759 l2cap_send_ack(chan
);
4761 case L2CAP_TXSEQ_UNEXPECTED
:
4762 l2cap_pass_to_tx(chan
, control
);
4764 /* Can't issue SREJ frames in the local busy state.
4765 * Drop this frame, it will be seen as missing
4766 * when local busy is exited.
4768 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4769 BT_DBG("Busy, discarding unexpected seq %d",
4774 /* There was a gap in the sequence, so an SREJ
4775 * must be sent for each missing frame. The
4776 * current frame is stored for later use.
4778 skb_queue_tail(&chan
->srej_q
, skb
);
4780 BT_DBG("Queued %p (queue len %d)", skb
,
4781 skb_queue_len(&chan
->srej_q
));
4783 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4784 l2cap_seq_list_clear(&chan
->srej_list
);
4785 l2cap_send_srej(chan
, control
->txseq
);
4787 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4789 case L2CAP_TXSEQ_DUPLICATE
:
4790 l2cap_pass_to_tx(chan
, control
);
4792 case L2CAP_TXSEQ_INVALID_IGNORE
:
4794 case L2CAP_TXSEQ_INVALID
:
4796 l2cap_send_disconn_req(chan
->conn
, chan
,
4801 case L2CAP_EV_RECV_RR
:
4802 l2cap_pass_to_tx(chan
, control
);
4803 if (control
->final
) {
4804 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4806 if (!test_and_clear_bit(CONN_REJ_ACT
,
4807 &chan
->conn_state
)) {
4809 l2cap_retransmit_all(chan
, control
);
4812 l2cap_ertm_send(chan
);
4813 } else if (control
->poll
) {
4814 l2cap_send_i_or_rr_or_rnr(chan
);
4816 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4817 &chan
->conn_state
) &&
4818 chan
->unacked_frames
)
4819 __set_retrans_timer(chan
);
4821 l2cap_ertm_send(chan
);
4824 case L2CAP_EV_RECV_RNR
:
4825 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4826 l2cap_pass_to_tx(chan
, control
);
4827 if (control
&& control
->poll
) {
4828 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4829 l2cap_send_rr_or_rnr(chan
, 0);
4831 __clear_retrans_timer(chan
);
4832 l2cap_seq_list_clear(&chan
->retrans_list
);
4834 case L2CAP_EV_RECV_REJ
:
4835 l2cap_handle_rej(chan
, control
);
4837 case L2CAP_EV_RECV_SREJ
:
4838 l2cap_handle_srej(chan
, control
);
4844 if (skb
&& !skb_in_use
) {
4845 BT_DBG("Freeing %p", skb
);
4852 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4853 struct l2cap_ctrl
*control
,
4854 struct sk_buff
*skb
, u8 event
)
4857 u16 txseq
= control
->txseq
;
4858 bool skb_in_use
= 0;
4860 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4864 case L2CAP_EV_RECV_IFRAME
:
4865 switch (l2cap_classify_txseq(chan
, txseq
)) {
4866 case L2CAP_TXSEQ_EXPECTED
:
4867 /* Keep frame for reassembly later */
4868 l2cap_pass_to_tx(chan
, control
);
4869 skb_queue_tail(&chan
->srej_q
, skb
);
4871 BT_DBG("Queued %p (queue len %d)", skb
,
4872 skb_queue_len(&chan
->srej_q
));
4874 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4876 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4877 l2cap_seq_list_pop(&chan
->srej_list
);
4879 l2cap_pass_to_tx(chan
, control
);
4880 skb_queue_tail(&chan
->srej_q
, skb
);
4882 BT_DBG("Queued %p (queue len %d)", skb
,
4883 skb_queue_len(&chan
->srej_q
));
4885 err
= l2cap_rx_queued_iframes(chan
);
4890 case L2CAP_TXSEQ_UNEXPECTED
:
4891 /* Got a frame that can't be reassembled yet.
4892 * Save it for later, and send SREJs to cover
4893 * the missing frames.
4895 skb_queue_tail(&chan
->srej_q
, skb
);
4897 BT_DBG("Queued %p (queue len %d)", skb
,
4898 skb_queue_len(&chan
->srej_q
));
4900 l2cap_pass_to_tx(chan
, control
);
4901 l2cap_send_srej(chan
, control
->txseq
);
4903 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4904 /* This frame was requested with an SREJ, but
4905 * some expected retransmitted frames are
4906 * missing. Request retransmission of missing
4909 skb_queue_tail(&chan
->srej_q
, skb
);
4911 BT_DBG("Queued %p (queue len %d)", skb
,
4912 skb_queue_len(&chan
->srej_q
));
4914 l2cap_pass_to_tx(chan
, control
);
4915 l2cap_send_srej_list(chan
, control
->txseq
);
4917 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4918 /* We've already queued this frame. Drop this copy. */
4919 l2cap_pass_to_tx(chan
, control
);
4921 case L2CAP_TXSEQ_DUPLICATE
:
4922 /* Expecting a later sequence number, so this frame
4923 * was already received. Ignore it completely.
4926 case L2CAP_TXSEQ_INVALID_IGNORE
:
4928 case L2CAP_TXSEQ_INVALID
:
4930 l2cap_send_disconn_req(chan
->conn
, chan
,
4935 case L2CAP_EV_RECV_RR
:
4936 l2cap_pass_to_tx(chan
, control
);
4937 if (control
->final
) {
4938 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4940 if (!test_and_clear_bit(CONN_REJ_ACT
,
4941 &chan
->conn_state
)) {
4943 l2cap_retransmit_all(chan
, control
);
4946 l2cap_ertm_send(chan
);
4947 } else if (control
->poll
) {
4948 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4949 &chan
->conn_state
) &&
4950 chan
->unacked_frames
) {
4951 __set_retrans_timer(chan
);
4954 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4955 l2cap_send_srej_tail(chan
);
4957 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4958 &chan
->conn_state
) &&
4959 chan
->unacked_frames
)
4960 __set_retrans_timer(chan
);
4962 l2cap_send_ack(chan
);
4965 case L2CAP_EV_RECV_RNR
:
4966 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4967 l2cap_pass_to_tx(chan
, control
);
4968 if (control
->poll
) {
4969 l2cap_send_srej_tail(chan
);
4971 struct l2cap_ctrl rr_control
;
4972 memset(&rr_control
, 0, sizeof(rr_control
));
4973 rr_control
.sframe
= 1;
4974 rr_control
.super
= L2CAP_SUPER_RR
;
4975 rr_control
.reqseq
= chan
->buffer_seq
;
4976 l2cap_send_sframe(chan
, &rr_control
);
4980 case L2CAP_EV_RECV_REJ
:
4981 l2cap_handle_rej(chan
, control
);
4983 case L2CAP_EV_RECV_SREJ
:
4984 l2cap_handle_srej(chan
, control
);
4988 if (skb
&& !skb_in_use
) {
4989 BT_DBG("Freeing %p", skb
);
4996 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
4998 /* Make sure reqseq is for a packet that has been sent but not acked */
5001 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5002 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5005 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5006 struct sk_buff
*skb
, u8 event
)
5010 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5011 control
, skb
, event
, chan
->rx_state
);
5013 if (__valid_reqseq(chan
, control
->reqseq
)) {
5014 switch (chan
->rx_state
) {
5015 case L2CAP_RX_STATE_RECV
:
5016 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5018 case L2CAP_RX_STATE_SREJ_SENT
:
5019 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5027 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5028 control
->reqseq
, chan
->next_tx_seq
,
5029 chan
->expected_ack_seq
);
5030 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5036 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5037 struct sk_buff
*skb
)
5041 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5044 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5045 L2CAP_TXSEQ_EXPECTED
) {
5046 l2cap_pass_to_tx(chan
, control
);
5048 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5049 __next_seq(chan
, chan
->buffer_seq
));
5051 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5053 l2cap_reassemble_sdu(chan
, skb
, control
);
5056 kfree_skb(chan
->sdu
);
5059 chan
->sdu_last_frag
= NULL
;
5063 BT_DBG("Freeing %p", skb
);
5068 chan
->last_acked_seq
= control
->txseq
;
5069 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5074 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5076 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5080 __unpack_control(chan
, skb
);
5085 * We can just drop the corrupted I-frame here.
5086 * Receiver will miss it and start proper recovery
5087 * procedures and ask for retransmission.
5089 if (l2cap_check_fcs(chan
, skb
))
5092 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5093 len
-= L2CAP_SDULEN_SIZE
;
5095 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5096 len
-= L2CAP_FCS_SIZE
;
5098 if (len
> chan
->mps
) {
5099 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5103 if (!control
->sframe
) {
5106 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5107 control
->sar
, control
->reqseq
, control
->final
,
5110 /* Validate F-bit - F=0 always valid, F=1 only
5111 * valid in TX WAIT_F
5113 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5116 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5117 event
= L2CAP_EV_RECV_IFRAME
;
5118 err
= l2cap_rx(chan
, control
, skb
, event
);
5120 err
= l2cap_stream_rx(chan
, control
, skb
);
5124 l2cap_send_disconn_req(chan
->conn
, chan
,
5127 const u8 rx_func_to_event
[4] = {
5128 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5129 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5132 /* Only I-frames are expected in streaming mode */
5133 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5136 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5137 control
->reqseq
, control
->final
, control
->poll
,
5142 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5146 /* Validate F and P bits */
5147 if (control
->final
&& (control
->poll
||
5148 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5151 event
= rx_func_to_event
[control
->super
];
5152 if (l2cap_rx(chan
, control
, skb
, event
))
5153 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5163 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
5164 struct sk_buff
*skb
)
5166 struct l2cap_chan
*chan
;
5168 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5170 if (cid
== L2CAP_CID_A2MP
) {
5171 chan
= a2mp_channel_create(conn
, skb
);
5177 l2cap_chan_lock(chan
);
5179 BT_DBG("unknown cid 0x%4.4x", cid
);
5180 /* Drop packet and return */
5186 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5188 if (chan
->state
!= BT_CONNECTED
)
5191 switch (chan
->mode
) {
5192 case L2CAP_MODE_BASIC
:
5193 /* If socket recv buffers overflows we drop data here
5194 * which is *bad* because L2CAP has to be reliable.
5195 * But we don't have any other choice. L2CAP doesn't
5196 * provide flow control mechanism. */
5198 if (chan
->imtu
< skb
->len
)
5201 if (!chan
->ops
->recv(chan
, skb
))
5205 case L2CAP_MODE_ERTM
:
5206 case L2CAP_MODE_STREAMING
:
5207 l2cap_data_rcv(chan
, skb
);
5211 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5219 l2cap_chan_unlock(chan
);
5222 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
5223 struct sk_buff
*skb
)
5225 struct l2cap_chan
*chan
;
5227 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5231 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5233 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5236 if (chan
->imtu
< skb
->len
)
5239 if (!chan
->ops
->recv(chan
, skb
))
5246 static void l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5247 struct sk_buff
*skb
)
5249 struct l2cap_chan
*chan
;
5251 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5255 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5257 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5260 if (chan
->imtu
< skb
->len
)
5263 if (!chan
->ops
->recv(chan
, skb
))
5270 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5272 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5276 skb_pull(skb
, L2CAP_HDR_SIZE
);
5277 cid
= __le16_to_cpu(lh
->cid
);
5278 len
= __le16_to_cpu(lh
->len
);
5280 if (len
!= skb
->len
) {
5285 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5288 case L2CAP_CID_LE_SIGNALING
:
5289 case L2CAP_CID_SIGNALING
:
5290 l2cap_sig_channel(conn
, skb
);
5293 case L2CAP_CID_CONN_LESS
:
5294 psm
= get_unaligned((__le16
*) skb
->data
);
5295 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
5296 l2cap_conless_channel(conn
, psm
, skb
);
5299 case L2CAP_CID_LE_DATA
:
5300 l2cap_att_channel(conn
, cid
, skb
);
5304 if (smp_sig_channel(conn
, skb
))
5305 l2cap_conn_del(conn
->hcon
, EACCES
);
5309 l2cap_data_channel(conn
, cid
, skb
);
5314 /* ---- L2CAP interface with lower layer (HCI) ---- */
5316 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5318 int exact
= 0, lm1
= 0, lm2
= 0;
5319 struct l2cap_chan
*c
;
5321 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
5323 /* Find listening sockets and check their link_mode */
5324 read_lock(&chan_list_lock
);
5325 list_for_each_entry(c
, &chan_list
, global_l
) {
5326 struct sock
*sk
= c
->sk
;
5328 if (c
->state
!= BT_LISTEN
)
5331 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5332 lm1
|= HCI_LM_ACCEPT
;
5333 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5334 lm1
|= HCI_LM_MASTER
;
5336 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5337 lm2
|= HCI_LM_ACCEPT
;
5338 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5339 lm2
|= HCI_LM_MASTER
;
5342 read_unlock(&chan_list_lock
);
5344 return exact
? lm1
: lm2
;
5347 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5349 struct l2cap_conn
*conn
;
5351 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
5354 conn
= l2cap_conn_add(hcon
, status
);
5356 l2cap_conn_ready(conn
);
5358 l2cap_conn_del(hcon
, bt_to_errno(status
));
5362 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5364 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5366 BT_DBG("hcon %p", hcon
);
5369 return HCI_ERROR_REMOTE_USER_TERM
;
5370 return conn
->disc_reason
;
5373 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5375 BT_DBG("hcon %p reason %d", hcon
, reason
);
5377 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5380 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5382 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5385 if (encrypt
== 0x00) {
5386 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5387 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5388 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5389 l2cap_chan_close(chan
, ECONNREFUSED
);
5391 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5392 __clear_chan_timer(chan
);
5396 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5398 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5399 struct l2cap_chan
*chan
;
5404 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
5406 if (hcon
->type
== LE_LINK
) {
5407 if (!status
&& encrypt
)
5408 smp_distribute_keys(conn
, 0);
5409 cancel_delayed_work(&conn
->security_timer
);
5412 mutex_lock(&conn
->chan_lock
);
5414 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5415 l2cap_chan_lock(chan
);
5417 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
5418 state_to_string(chan
->state
));
5420 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
5421 l2cap_chan_unlock(chan
);
5425 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5426 if (!status
&& encrypt
) {
5427 chan
->sec_level
= hcon
->sec_level
;
5428 l2cap_chan_ready(chan
);
5431 l2cap_chan_unlock(chan
);
5435 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5436 l2cap_chan_unlock(chan
);
5440 if (!status
&& (chan
->state
== BT_CONNECTED
||
5441 chan
->state
== BT_CONFIG
)) {
5442 struct sock
*sk
= chan
->sk
;
5444 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5445 sk
->sk_state_change(sk
);
5447 l2cap_check_encryption(chan
, encrypt
);
5448 l2cap_chan_unlock(chan
);
5452 if (chan
->state
== BT_CONNECT
) {
5454 l2cap_send_conn_req(chan
);
5456 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5458 } else if (chan
->state
== BT_CONNECT2
) {
5459 struct sock
*sk
= chan
->sk
;
5460 struct l2cap_conn_rsp rsp
;
5466 if (test_bit(BT_SK_DEFER_SETUP
,
5467 &bt_sk(sk
)->flags
)) {
5468 struct sock
*parent
= bt_sk(sk
)->parent
;
5469 res
= L2CAP_CR_PEND
;
5470 stat
= L2CAP_CS_AUTHOR_PEND
;
5472 parent
->sk_data_ready(parent
, 0);
5474 __l2cap_state_change(chan
, BT_CONFIG
);
5475 res
= L2CAP_CR_SUCCESS
;
5476 stat
= L2CAP_CS_NO_INFO
;
5479 __l2cap_state_change(chan
, BT_DISCONN
);
5480 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5481 res
= L2CAP_CR_SEC_BLOCK
;
5482 stat
= L2CAP_CS_NO_INFO
;
5487 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5488 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5489 rsp
.result
= cpu_to_le16(res
);
5490 rsp
.status
= cpu_to_le16(stat
);
5491 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5494 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
5495 res
== L2CAP_CR_SUCCESS
) {
5497 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
5498 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
5500 l2cap_build_conf_req(chan
, buf
),
5502 chan
->num_conf_req
++;
5506 l2cap_chan_unlock(chan
);
5509 mutex_unlock(&conn
->chan_lock
);
5514 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5516 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5519 conn
= l2cap_conn_add(hcon
, 0);
5524 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5526 if (!(flags
& ACL_CONT
)) {
5527 struct l2cap_hdr
*hdr
;
5531 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5532 kfree_skb(conn
->rx_skb
);
5533 conn
->rx_skb
= NULL
;
5535 l2cap_conn_unreliable(conn
, ECOMM
);
5538 /* Start fragment always begin with Basic L2CAP header */
5539 if (skb
->len
< L2CAP_HDR_SIZE
) {
5540 BT_ERR("Frame is too short (len %d)", skb
->len
);
5541 l2cap_conn_unreliable(conn
, ECOMM
);
5545 hdr
= (struct l2cap_hdr
*) skb
->data
;
5546 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5548 if (len
== skb
->len
) {
5549 /* Complete frame received */
5550 l2cap_recv_frame(conn
, skb
);
5554 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5556 if (skb
->len
> len
) {
5557 BT_ERR("Frame is too long (len %d, expected len %d)",
5559 l2cap_conn_unreliable(conn
, ECOMM
);
5563 /* Allocate skb for the complete frame (with header) */
5564 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5568 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5570 conn
->rx_len
= len
- skb
->len
;
5572 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5574 if (!conn
->rx_len
) {
5575 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5576 l2cap_conn_unreliable(conn
, ECOMM
);
5580 if (skb
->len
> conn
->rx_len
) {
5581 BT_ERR("Fragment is too long (len %d, expected %d)",
5582 skb
->len
, conn
->rx_len
);
5583 kfree_skb(conn
->rx_skb
);
5584 conn
->rx_skb
= NULL
;
5586 l2cap_conn_unreliable(conn
, ECOMM
);
5590 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5592 conn
->rx_len
-= skb
->len
;
5594 if (!conn
->rx_len
) {
5595 /* Complete frame received */
5596 l2cap_recv_frame(conn
, conn
->rx_skb
);
5597 conn
->rx_skb
= NULL
;
5606 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5608 struct l2cap_chan
*c
;
5610 read_lock(&chan_list_lock
);
5612 list_for_each_entry(c
, &chan_list
, global_l
) {
5613 struct sock
*sk
= c
->sk
;
5615 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5616 batostr(&bt_sk(sk
)->src
),
5617 batostr(&bt_sk(sk
)->dst
),
5618 c
->state
, __le16_to_cpu(c
->psm
),
5619 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5620 c
->sec_level
, c
->mode
);
5623 read_unlock(&chan_list_lock
);
5628 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5630 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5633 static const struct file_operations l2cap_debugfs_fops
= {
5634 .open
= l2cap_debugfs_open
,
5636 .llseek
= seq_lseek
,
5637 .release
= single_release
,
5640 static struct dentry
*l2cap_debugfs
;
5642 int __init
l2cap_init(void)
5646 err
= l2cap_init_sockets();
5651 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5652 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5654 BT_ERR("Failed to create L2CAP debug file");
5660 void l2cap_exit(void)
5662 debugfs_remove(l2cap_debugfs
);
5663 l2cap_cleanup_sockets();
5666 module_param(disable_ertm
, bool, 0644);
5667 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");