2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
60 bool disable_ertm
= 1;
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
77 struct sk_buff_head
*skbs
, u8 event
);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
85 list_for_each_entry(c
, &conn
->chan_l
, list
) {
92 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
96 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
107 struct l2cap_chan
*c
;
109 mutex_lock(&conn
->chan_lock
);
110 c
= __l2cap_get_chan_by_scid(conn
, cid
);
113 mutex_unlock(&conn
->chan_lock
);
118 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
120 struct l2cap_chan
*c
;
122 list_for_each_entry(c
, &conn
->chan_l
, list
) {
123 if (c
->ident
== ident
)
129 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
131 struct l2cap_chan
*c
;
133 list_for_each_entry(c
, &chan_list
, global_l
) {
134 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
140 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
144 write_lock(&chan_list_lock
);
146 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
159 for (p
= 0x1001; p
< 0x1100; p
+= 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
161 chan
->psm
= cpu_to_le16(p
);
162 chan
->sport
= cpu_to_le16(p
);
169 write_unlock(&chan_list_lock
);
173 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
175 write_lock(&chan_list_lock
);
179 write_unlock(&chan_list_lock
);
184 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
186 u16 cid
= L2CAP_CID_DYN_START
;
188 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
189 if (!__l2cap_get_chan_by_scid(conn
, cid
))
196 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
198 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
199 state_to_string(state
));
202 chan
->ops
->state_change(chan
->data
, state
);
205 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
207 struct sock
*sk
= chan
->sk
;
210 __l2cap_state_change(chan
, state
);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
216 struct sock
*sk
= chan
->sk
;
221 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
223 struct sock
*sk
= chan
->sk
;
226 __l2cap_chan_set_err(chan
, err
);
230 static void __set_retrans_timer(struct l2cap_chan
*chan
)
232 if (!delayed_work_pending(&chan
->monitor_timer
) &&
233 chan
->retrans_timeout
) {
234 l2cap_set_timer(chan
, &chan
->retrans_timer
,
235 msecs_to_jiffies(chan
->retrans_timeout
));
239 static void __set_monitor_timer(struct l2cap_chan
*chan
)
241 __clear_retrans_timer(chan
);
242 if (chan
->monitor_timeout
) {
243 l2cap_set_timer(chan
, &chan
->monitor_timer
,
244 msecs_to_jiffies(chan
->monitor_timeout
));
248 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
253 skb_queue_walk(head
, skb
) {
254 if (bt_cb(skb
)->control
.txseq
== seq
)
261 /* ---- L2CAP sequence number lists ---- */
263 /* For ERTM, ordered lists of sequence numbers must be tracked for
264 * SREJ requests that are received and for frames that are to be
265 * retransmitted. These seq_list functions implement a singly-linked
266 * list in an array, where membership in the list can also be checked
267 * in constant time. Items can also be added to the tail of the list
268 * and removed from the head in constant time, without further memory
272 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
274 size_t alloc_size
, i
;
276 /* Allocated size is a power of 2 to map sequence numbers
277 * (which may be up to 14 bits) in to a smaller array that is
278 * sized for the negotiated ERTM transmit windows.
280 alloc_size
= roundup_pow_of_two(size
);
282 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
286 seq_list
->mask
= alloc_size
- 1;
287 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
288 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
289 for (i
= 0; i
< alloc_size
; i
++)
290 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
295 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
297 kfree(seq_list
->list
);
300 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
303 /* Constant-time check for list membership */
304 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
307 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
309 u16 mask
= seq_list
->mask
;
311 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
312 /* In case someone tries to pop the head of an empty list */
313 return L2CAP_SEQ_LIST_CLEAR
;
314 } else if (seq_list
->head
== seq
) {
315 /* Head can be removed in constant time */
316 seq_list
->head
= seq_list
->list
[seq
& mask
];
317 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
319 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
320 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
321 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
324 /* Walk the list to find the sequence number */
325 u16 prev
= seq_list
->head
;
326 while (seq_list
->list
[prev
& mask
] != seq
) {
327 prev
= seq_list
->list
[prev
& mask
];
328 if (prev
== L2CAP_SEQ_LIST_TAIL
)
329 return L2CAP_SEQ_LIST_CLEAR
;
332 /* Unlink the number from the list and clear it */
333 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
334 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
335 if (seq_list
->tail
== seq
)
336 seq_list
->tail
= prev
;
341 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
343 /* Remove the head in constant time */
344 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
347 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
351 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
354 for (i
= 0; i
<= seq_list
->mask
; i
++)
355 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
357 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
358 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
361 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
363 u16 mask
= seq_list
->mask
;
365 /* All appends happen in constant time */
367 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
370 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
371 seq_list
->head
= seq
;
373 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
375 seq_list
->tail
= seq
;
376 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
379 static void l2cap_chan_timeout(struct work_struct
*work
)
381 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
383 struct l2cap_conn
*conn
= chan
->conn
;
386 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
388 mutex_lock(&conn
->chan_lock
);
389 l2cap_chan_lock(chan
);
391 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
392 reason
= ECONNREFUSED
;
393 else if (chan
->state
== BT_CONNECT
&&
394 chan
->sec_level
!= BT_SECURITY_SDP
)
395 reason
= ECONNREFUSED
;
399 l2cap_chan_close(chan
, reason
);
401 l2cap_chan_unlock(chan
);
403 chan
->ops
->close(chan
->data
);
404 mutex_unlock(&conn
->chan_lock
);
406 l2cap_chan_put(chan
);
409 struct l2cap_chan
*l2cap_chan_create(void)
411 struct l2cap_chan
*chan
;
413 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
417 mutex_init(&chan
->lock
);
419 write_lock(&chan_list_lock
);
420 list_add(&chan
->global_l
, &chan_list
);
421 write_unlock(&chan_list_lock
);
423 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
425 chan
->state
= BT_OPEN
;
427 atomic_set(&chan
->refcnt
, 1);
429 /* This flag is cleared in l2cap_chan_ready() */
430 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
432 BT_DBG("chan %p", chan
);
437 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
439 write_lock(&chan_list_lock
);
440 list_del(&chan
->global_l
);
441 write_unlock(&chan_list_lock
);
443 l2cap_chan_put(chan
);
446 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
448 chan
->fcs
= L2CAP_FCS_CRC16
;
449 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
450 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
451 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
452 chan
->sec_level
= BT_SECURITY_LOW
;
454 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
457 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
460 __le16_to_cpu(chan
->psm
), chan
->dcid
);
462 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
466 switch (chan
->chan_type
) {
467 case L2CAP_CHAN_CONN_ORIENTED
:
468 if (conn
->hcon
->type
== LE_LINK
) {
470 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
471 chan
->scid
= L2CAP_CID_LE_DATA
;
472 chan
->dcid
= L2CAP_CID_LE_DATA
;
474 /* Alloc CID for connection-oriented socket */
475 chan
->scid
= l2cap_alloc_cid(conn
);
476 chan
->omtu
= L2CAP_DEFAULT_MTU
;
480 case L2CAP_CHAN_CONN_LESS
:
481 /* Connectionless socket */
482 chan
->scid
= L2CAP_CID_CONN_LESS
;
483 chan
->dcid
= L2CAP_CID_CONN_LESS
;
484 chan
->omtu
= L2CAP_DEFAULT_MTU
;
488 /* Raw socket can send/recv signalling messages only */
489 chan
->scid
= L2CAP_CID_SIGNALING
;
490 chan
->dcid
= L2CAP_CID_SIGNALING
;
491 chan
->omtu
= L2CAP_DEFAULT_MTU
;
494 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
495 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
496 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
497 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
498 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
499 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
501 l2cap_chan_hold(chan
);
503 list_add(&chan
->list
, &conn
->chan_l
);
506 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
508 mutex_lock(&conn
->chan_lock
);
509 __l2cap_chan_add(conn
, chan
);
510 mutex_unlock(&conn
->chan_lock
);
513 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
515 struct sock
*sk
= chan
->sk
;
516 struct l2cap_conn
*conn
= chan
->conn
;
517 struct sock
*parent
= bt_sk(sk
)->parent
;
519 __clear_chan_timer(chan
);
521 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
524 /* Delete from channel list */
525 list_del(&chan
->list
);
527 l2cap_chan_put(chan
);
530 hci_conn_put(conn
->hcon
);
535 __l2cap_state_change(chan
, BT_CLOSED
);
536 sock_set_flag(sk
, SOCK_ZAPPED
);
539 __l2cap_chan_set_err(chan
, err
);
542 bt_accept_unlink(sk
);
543 parent
->sk_data_ready(parent
, 0);
545 sk
->sk_state_change(sk
);
549 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
552 skb_queue_purge(&chan
->tx_q
);
554 if (chan
->mode
== L2CAP_MODE_ERTM
) {
555 __clear_retrans_timer(chan
);
556 __clear_monitor_timer(chan
);
557 __clear_ack_timer(chan
);
559 skb_queue_purge(&chan
->srej_q
);
561 l2cap_seq_list_free(&chan
->srej_list
);
562 l2cap_seq_list_free(&chan
->retrans_list
);
566 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
570 BT_DBG("parent %p", parent
);
572 /* Close not yet accepted channels */
573 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
574 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
576 l2cap_chan_lock(chan
);
577 __clear_chan_timer(chan
);
578 l2cap_chan_close(chan
, ECONNRESET
);
579 l2cap_chan_unlock(chan
);
581 chan
->ops
->close(chan
->data
);
585 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
587 struct l2cap_conn
*conn
= chan
->conn
;
588 struct sock
*sk
= chan
->sk
;
590 BT_DBG("chan %p state %s sk %p", chan
,
591 state_to_string(chan
->state
), sk
);
593 switch (chan
->state
) {
596 l2cap_chan_cleanup_listen(sk
);
598 __l2cap_state_change(chan
, BT_CLOSED
);
599 sock_set_flag(sk
, SOCK_ZAPPED
);
605 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
606 conn
->hcon
->type
== ACL_LINK
) {
607 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
608 l2cap_send_disconn_req(conn
, chan
, reason
);
610 l2cap_chan_del(chan
, reason
);
614 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
615 conn
->hcon
->type
== ACL_LINK
) {
616 struct l2cap_conn_rsp rsp
;
619 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
620 result
= L2CAP_CR_SEC_BLOCK
;
622 result
= L2CAP_CR_BAD_PSM
;
623 l2cap_state_change(chan
, BT_DISCONN
);
625 rsp
.scid
= cpu_to_le16(chan
->dcid
);
626 rsp
.dcid
= cpu_to_le16(chan
->scid
);
627 rsp
.result
= cpu_to_le16(result
);
628 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
629 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
633 l2cap_chan_del(chan
, reason
);
638 l2cap_chan_del(chan
, reason
);
643 sock_set_flag(sk
, SOCK_ZAPPED
);
649 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
651 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
652 switch (chan
->sec_level
) {
653 case BT_SECURITY_HIGH
:
654 return HCI_AT_DEDICATED_BONDING_MITM
;
655 case BT_SECURITY_MEDIUM
:
656 return HCI_AT_DEDICATED_BONDING
;
658 return HCI_AT_NO_BONDING
;
660 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
661 if (chan
->sec_level
== BT_SECURITY_LOW
)
662 chan
->sec_level
= BT_SECURITY_SDP
;
664 if (chan
->sec_level
== BT_SECURITY_HIGH
)
665 return HCI_AT_NO_BONDING_MITM
;
667 return HCI_AT_NO_BONDING
;
669 switch (chan
->sec_level
) {
670 case BT_SECURITY_HIGH
:
671 return HCI_AT_GENERAL_BONDING_MITM
;
672 case BT_SECURITY_MEDIUM
:
673 return HCI_AT_GENERAL_BONDING
;
675 return HCI_AT_NO_BONDING
;
680 /* Service level security */
681 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
683 struct l2cap_conn
*conn
= chan
->conn
;
686 auth_type
= l2cap_get_auth_type(chan
);
688 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
691 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
695 /* Get next available identificator.
696 * 1 - 128 are used by kernel.
697 * 129 - 199 are reserved.
698 * 200 - 254 are used by utilities like l2ping, etc.
701 spin_lock(&conn
->lock
);
703 if (++conn
->tx_ident
> 128)
708 spin_unlock(&conn
->lock
);
713 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
715 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
718 BT_DBG("code 0x%2.2x", code
);
723 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
724 flags
= ACL_START_NO_FLUSH
;
728 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
729 skb
->priority
= HCI_PRIO_MAX
;
731 hci_send_acl(conn
->hchan
, skb
, flags
);
734 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
736 struct hci_conn
*hcon
= chan
->conn
->hcon
;
739 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
742 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
743 lmp_no_flush_capable(hcon
->hdev
))
744 flags
= ACL_START_NO_FLUSH
;
748 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
749 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
752 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
754 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
755 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
757 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
760 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
761 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
768 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
769 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
776 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
778 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
779 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
781 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
784 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
785 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
792 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
793 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
800 static inline void __unpack_control(struct l2cap_chan
*chan
,
803 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
804 __unpack_extended_control(get_unaligned_le32(skb
->data
),
805 &bt_cb(skb
)->control
);
806 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
808 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
809 &bt_cb(skb
)->control
);
810 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
814 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
818 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
819 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
821 if (control
->sframe
) {
822 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
823 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
824 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
826 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
827 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
833 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
837 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
838 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
840 if (control
->sframe
) {
841 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
842 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
843 packed
|= L2CAP_CTRL_FRAME_TYPE
;
845 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
846 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
852 static inline void __pack_control(struct l2cap_chan
*chan
,
853 struct l2cap_ctrl
*control
,
856 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
857 put_unaligned_le32(__pack_extended_control(control
),
858 skb
->data
+ L2CAP_HDR_SIZE
);
860 put_unaligned_le16(__pack_enhanced_control(control
),
861 skb
->data
+ L2CAP_HDR_SIZE
);
865 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
869 struct l2cap_hdr
*lh
;
872 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
873 hlen
= L2CAP_EXT_HDR_SIZE
;
875 hlen
= L2CAP_ENH_HDR_SIZE
;
877 if (chan
->fcs
== L2CAP_FCS_CRC16
)
878 hlen
+= L2CAP_FCS_SIZE
;
880 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
883 return ERR_PTR(-ENOMEM
);
885 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
886 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
887 lh
->cid
= cpu_to_le16(chan
->dcid
);
889 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
890 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
892 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
894 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
895 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
896 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
899 skb
->priority
= HCI_PRIO_MAX
;
903 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
904 struct l2cap_ctrl
*control
)
909 BT_DBG("chan %p, control %p", chan
, control
);
911 if (!control
->sframe
)
914 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
918 if (control
->super
== L2CAP_SUPER_RR
)
919 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
920 else if (control
->super
== L2CAP_SUPER_RNR
)
921 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
923 if (control
->super
!= L2CAP_SUPER_SREJ
) {
924 chan
->last_acked_seq
= control
->reqseq
;
925 __clear_ack_timer(chan
);
928 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
929 control
->final
, control
->poll
, control
->super
);
931 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
932 control_field
= __pack_extended_control(control
);
934 control_field
= __pack_enhanced_control(control
);
936 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
938 l2cap_do_send(chan
, skb
);
941 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
943 struct l2cap_ctrl control
;
945 BT_DBG("chan %p, poll %d", chan
, poll
);
947 memset(&control
, 0, sizeof(control
));
951 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
952 control
.super
= L2CAP_SUPER_RNR
;
954 control
.super
= L2CAP_SUPER_RR
;
956 control
.reqseq
= chan
->buffer_seq
;
957 l2cap_send_sframe(chan
, &control
);
960 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
962 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
965 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
967 struct l2cap_conn
*conn
= chan
->conn
;
968 struct l2cap_conn_req req
;
970 req
.scid
= cpu_to_le16(chan
->scid
);
973 chan
->ident
= l2cap_get_ident(conn
);
975 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
977 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
980 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
982 struct sock
*sk
= chan
->sk
;
987 parent
= bt_sk(sk
)->parent
;
989 BT_DBG("sk %p, parent %p", sk
, parent
);
991 /* This clears all conf flags, including CONF_NOT_COMPLETE */
992 chan
->conf_state
= 0;
993 __clear_chan_timer(chan
);
995 __l2cap_state_change(chan
, BT_CONNECTED
);
996 sk
->sk_state_change(sk
);
999 parent
->sk_data_ready(parent
, 0);
1004 static void l2cap_do_start(struct l2cap_chan
*chan
)
1006 struct l2cap_conn
*conn
= chan
->conn
;
1008 if (conn
->hcon
->type
== LE_LINK
) {
1009 l2cap_chan_ready(chan
);
1013 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1014 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1017 if (l2cap_chan_check_security(chan
) &&
1018 __l2cap_no_conn_pending(chan
))
1019 l2cap_send_conn_req(chan
);
1021 struct l2cap_info_req req
;
1022 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1024 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1025 conn
->info_ident
= l2cap_get_ident(conn
);
1027 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1029 l2cap_send_cmd(conn
, conn
->info_ident
,
1030 L2CAP_INFO_REQ
, sizeof(req
), &req
);
1034 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1036 u32 local_feat_mask
= l2cap_feat_mask
;
1038 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1041 case L2CAP_MODE_ERTM
:
1042 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1043 case L2CAP_MODE_STREAMING
:
1044 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1050 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
1052 struct sock
*sk
= chan
->sk
;
1053 struct l2cap_disconn_req req
;
1058 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1059 __clear_retrans_timer(chan
);
1060 __clear_monitor_timer(chan
);
1061 __clear_ack_timer(chan
);
1064 req
.dcid
= cpu_to_le16(chan
->dcid
);
1065 req
.scid
= cpu_to_le16(chan
->scid
);
1066 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1067 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1070 __l2cap_state_change(chan
, BT_DISCONN
);
1071 __l2cap_chan_set_err(chan
, err
);
1075 /* ---- L2CAP connections ---- */
1076 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1078 struct l2cap_chan
*chan
, *tmp
;
1080 BT_DBG("conn %p", conn
);
1082 mutex_lock(&conn
->chan_lock
);
1084 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1085 struct sock
*sk
= chan
->sk
;
1087 l2cap_chan_lock(chan
);
1089 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1090 l2cap_chan_unlock(chan
);
1094 if (chan
->state
== BT_CONNECT
) {
1095 if (!l2cap_chan_check_security(chan
) ||
1096 !__l2cap_no_conn_pending(chan
)) {
1097 l2cap_chan_unlock(chan
);
1101 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1102 && test_bit(CONF_STATE2_DEVICE
,
1103 &chan
->conf_state
)) {
1104 l2cap_chan_close(chan
, ECONNRESET
);
1105 l2cap_chan_unlock(chan
);
1109 l2cap_send_conn_req(chan
);
1111 } else if (chan
->state
== BT_CONNECT2
) {
1112 struct l2cap_conn_rsp rsp
;
1114 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1115 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1117 if (l2cap_chan_check_security(chan
)) {
1119 if (test_bit(BT_SK_DEFER_SETUP
,
1120 &bt_sk(sk
)->flags
)) {
1121 struct sock
*parent
= bt_sk(sk
)->parent
;
1122 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1123 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1125 parent
->sk_data_ready(parent
, 0);
1128 __l2cap_state_change(chan
, BT_CONFIG
);
1129 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1130 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1134 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1135 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1138 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1141 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1142 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1143 l2cap_chan_unlock(chan
);
1147 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1148 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1149 l2cap_build_conf_req(chan
, buf
), buf
);
1150 chan
->num_conf_req
++;
1153 l2cap_chan_unlock(chan
);
1156 mutex_unlock(&conn
->chan_lock
);
1159 /* Find socket with cid and source/destination bdaddr.
1160 * Returns closest match, locked.
1162 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1166 struct l2cap_chan
*c
, *c1
= NULL
;
1168 read_lock(&chan_list_lock
);
1170 list_for_each_entry(c
, &chan_list
, global_l
) {
1171 struct sock
*sk
= c
->sk
;
1173 if (state
&& c
->state
!= state
)
1176 if (c
->scid
== cid
) {
1177 int src_match
, dst_match
;
1178 int src_any
, dst_any
;
1181 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1182 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1183 if (src_match
&& dst_match
) {
1184 read_unlock(&chan_list_lock
);
1189 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1190 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1191 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1192 (src_any
&& dst_any
))
1197 read_unlock(&chan_list_lock
);
1202 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1204 struct sock
*parent
, *sk
;
1205 struct l2cap_chan
*chan
, *pchan
;
1209 /* Check if we have socket listening on cid */
1210 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1211 conn
->src
, conn
->dst
);
1219 /* Check for backlog size */
1220 if (sk_acceptq_is_full(parent
)) {
1221 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1225 chan
= pchan
->ops
->new_connection(pchan
->data
);
1231 hci_conn_hold(conn
->hcon
);
1233 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1234 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1236 bt_accept_enqueue(parent
, sk
);
1238 l2cap_chan_add(conn
, chan
);
1240 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1242 __l2cap_state_change(chan
, BT_CONNECTED
);
1243 parent
->sk_data_ready(parent
, 0);
1246 release_sock(parent
);
1249 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1251 struct l2cap_chan
*chan
;
1253 BT_DBG("conn %p", conn
);
1255 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1256 l2cap_le_conn_ready(conn
);
1258 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1259 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1261 mutex_lock(&conn
->chan_lock
);
1263 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1265 l2cap_chan_lock(chan
);
1267 if (conn
->hcon
->type
== LE_LINK
) {
1268 if (smp_conn_security(conn
, chan
->sec_level
))
1269 l2cap_chan_ready(chan
);
1271 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1272 struct sock
*sk
= chan
->sk
;
1273 __clear_chan_timer(chan
);
1275 __l2cap_state_change(chan
, BT_CONNECTED
);
1276 sk
->sk_state_change(sk
);
1279 } else if (chan
->state
== BT_CONNECT
)
1280 l2cap_do_start(chan
);
1282 l2cap_chan_unlock(chan
);
1285 mutex_unlock(&conn
->chan_lock
);
1288 /* Notify sockets that we cannot guaranty reliability anymore */
1289 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1291 struct l2cap_chan
*chan
;
1293 BT_DBG("conn %p", conn
);
1295 mutex_lock(&conn
->chan_lock
);
1297 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1298 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1299 __l2cap_chan_set_err(chan
, err
);
1302 mutex_unlock(&conn
->chan_lock
);
1305 static void l2cap_info_timeout(struct work_struct
*work
)
1307 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1310 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1311 conn
->info_ident
= 0;
1313 l2cap_conn_start(conn
);
1316 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1318 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1319 struct l2cap_chan
*chan
, *l
;
1324 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1326 kfree_skb(conn
->rx_skb
);
1328 mutex_lock(&conn
->chan_lock
);
1331 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1332 l2cap_chan_hold(chan
);
1333 l2cap_chan_lock(chan
);
1335 l2cap_chan_del(chan
, err
);
1337 l2cap_chan_unlock(chan
);
1339 chan
->ops
->close(chan
->data
);
1340 l2cap_chan_put(chan
);
1343 mutex_unlock(&conn
->chan_lock
);
1345 hci_chan_del(conn
->hchan
);
1347 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1348 cancel_delayed_work_sync(&conn
->info_timer
);
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1351 cancel_delayed_work_sync(&conn
->security_timer
);
1352 smp_chan_destroy(conn
);
1355 hcon
->l2cap_data
= NULL
;
1359 static void security_timeout(struct work_struct
*work
)
1361 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1362 security_timer
.work
);
1364 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1367 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1369 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1370 struct hci_chan
*hchan
;
1375 hchan
= hci_chan_create(hcon
);
1379 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1381 hci_chan_del(hchan
);
1385 hcon
->l2cap_data
= conn
;
1387 conn
->hchan
= hchan
;
1389 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1391 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1392 conn
->mtu
= hcon
->hdev
->le_mtu
;
1394 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1396 conn
->src
= &hcon
->hdev
->bdaddr
;
1397 conn
->dst
= &hcon
->dst
;
1399 conn
->feat_mask
= 0;
1401 spin_lock_init(&conn
->lock
);
1402 mutex_init(&conn
->chan_lock
);
1404 INIT_LIST_HEAD(&conn
->chan_l
);
1406 if (hcon
->type
== LE_LINK
)
1407 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1409 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1411 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1416 /* ---- Socket interface ---- */
1418 /* Find socket with psm and source / destination bdaddr.
1419 * Returns closest match.
1421 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1425 struct l2cap_chan
*c
, *c1
= NULL
;
1427 read_lock(&chan_list_lock
);
1429 list_for_each_entry(c
, &chan_list
, global_l
) {
1430 struct sock
*sk
= c
->sk
;
1432 if (state
&& c
->state
!= state
)
1435 if (c
->psm
== psm
) {
1436 int src_match
, dst_match
;
1437 int src_any
, dst_any
;
1440 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1441 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1442 if (src_match
&& dst_match
) {
1443 read_unlock(&chan_list_lock
);
1448 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1449 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1450 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1451 (src_any
&& dst_any
))
1456 read_unlock(&chan_list_lock
);
1461 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1462 bdaddr_t
*dst
, u8 dst_type
)
1464 struct sock
*sk
= chan
->sk
;
1465 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1466 struct l2cap_conn
*conn
;
1467 struct hci_conn
*hcon
;
1468 struct hci_dev
*hdev
;
1472 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1473 dst_type
, __le16_to_cpu(chan
->psm
));
1475 hdev
= hci_get_route(dst
, src
);
1477 return -EHOSTUNREACH
;
1481 l2cap_chan_lock(chan
);
1483 /* PSM must be odd and lsb of upper byte must be 0 */
1484 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1485 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1490 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1495 switch (chan
->mode
) {
1496 case L2CAP_MODE_BASIC
:
1498 case L2CAP_MODE_ERTM
:
1499 case L2CAP_MODE_STREAMING
:
1510 switch (sk
->sk_state
) {
1514 /* Already connecting */
1520 /* Already connected */
1536 /* Set destination address and psm */
1537 bacpy(&bt_sk(sk
)->dst
, dst
);
1544 auth_type
= l2cap_get_auth_type(chan
);
1546 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1547 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1548 chan
->sec_level
, auth_type
);
1550 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1551 chan
->sec_level
, auth_type
);
1554 err
= PTR_ERR(hcon
);
1558 conn
= l2cap_conn_add(hcon
, 0);
1565 if (hcon
->type
== LE_LINK
) {
1568 if (!list_empty(&conn
->chan_l
)) {
1577 /* Update source addr of the socket */
1578 bacpy(src
, conn
->src
);
1580 l2cap_chan_unlock(chan
);
1581 l2cap_chan_add(conn
, chan
);
1582 l2cap_chan_lock(chan
);
1584 l2cap_state_change(chan
, BT_CONNECT
);
1585 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1587 if (hcon
->state
== BT_CONNECTED
) {
1588 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1589 __clear_chan_timer(chan
);
1590 if (l2cap_chan_check_security(chan
))
1591 l2cap_state_change(chan
, BT_CONNECTED
);
1593 l2cap_do_start(chan
);
1599 l2cap_chan_unlock(chan
);
1600 hci_dev_unlock(hdev
);
1605 int __l2cap_wait_ack(struct sock
*sk
)
1607 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1608 DECLARE_WAITQUEUE(wait
, current
);
1612 add_wait_queue(sk_sleep(sk
), &wait
);
1613 set_current_state(TASK_INTERRUPTIBLE
);
1614 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1618 if (signal_pending(current
)) {
1619 err
= sock_intr_errno(timeo
);
1624 timeo
= schedule_timeout(timeo
);
1626 set_current_state(TASK_INTERRUPTIBLE
);
1628 err
= sock_error(sk
);
1632 set_current_state(TASK_RUNNING
);
1633 remove_wait_queue(sk_sleep(sk
), &wait
);
1637 static void l2cap_monitor_timeout(struct work_struct
*work
)
1639 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1640 monitor_timer
.work
);
1642 BT_DBG("chan %p", chan
);
1644 l2cap_chan_lock(chan
);
1647 l2cap_chan_unlock(chan
);
1648 l2cap_chan_put(chan
);
1652 l2cap_tx(chan
, 0, 0, L2CAP_EV_MONITOR_TO
);
1654 l2cap_chan_unlock(chan
);
1655 l2cap_chan_put(chan
);
1658 static void l2cap_retrans_timeout(struct work_struct
*work
)
1660 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1661 retrans_timer
.work
);
1663 BT_DBG("chan %p", chan
);
1665 l2cap_chan_lock(chan
);
1668 l2cap_chan_unlock(chan
);
1669 l2cap_chan_put(chan
);
1673 l2cap_tx(chan
, 0, 0, L2CAP_EV_RETRANS_TO
);
1674 l2cap_chan_unlock(chan
);
1675 l2cap_chan_put(chan
);
1678 static int l2cap_streaming_send(struct l2cap_chan
*chan
,
1679 struct sk_buff_head
*skbs
)
1681 struct sk_buff
*skb
;
1682 struct l2cap_ctrl
*control
;
1684 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1686 if (chan
->state
!= BT_CONNECTED
)
1689 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1691 while (!skb_queue_empty(&chan
->tx_q
)) {
1693 skb
= skb_dequeue(&chan
->tx_q
);
1695 bt_cb(skb
)->control
.retries
= 1;
1696 control
= &bt_cb(skb
)->control
;
1698 control
->reqseq
= 0;
1699 control
->txseq
= chan
->next_tx_seq
;
1701 __pack_control(chan
, control
, skb
);
1703 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1704 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1705 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1708 l2cap_do_send(chan
, skb
);
1710 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1712 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1713 chan
->frames_sent
++;
1719 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1721 struct sk_buff
*skb
, *tx_skb
;
1722 struct l2cap_ctrl
*control
;
1725 BT_DBG("chan %p", chan
);
1727 if (chan
->state
!= BT_CONNECTED
)
1730 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1733 while (chan
->tx_send_head
&&
1734 chan
->unacked_frames
< chan
->remote_tx_win
&&
1735 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1737 skb
= chan
->tx_send_head
;
1739 bt_cb(skb
)->control
.retries
= 1;
1740 control
= &bt_cb(skb
)->control
;
1742 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1745 control
->reqseq
= chan
->buffer_seq
;
1746 chan
->last_acked_seq
= chan
->buffer_seq
;
1747 control
->txseq
= chan
->next_tx_seq
;
1749 __pack_control(chan
, control
, skb
);
1751 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1752 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1753 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1756 /* Clone after data has been modified. Data is assumed to be
1757 read-only (for locking purposes) on cloned sk_buffs.
1759 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1764 __set_retrans_timer(chan
);
1766 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1767 chan
->unacked_frames
++;
1768 chan
->frames_sent
++;
1771 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1772 chan
->tx_send_head
= NULL
;
1774 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1776 l2cap_do_send(chan
, tx_skb
);
1777 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1780 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent
,
1781 (int) chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1786 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1788 struct l2cap_ctrl control
;
1789 struct sk_buff
*skb
;
1790 struct sk_buff
*tx_skb
;
1793 BT_DBG("chan %p", chan
);
1795 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1798 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1799 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1801 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1803 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1808 bt_cb(skb
)->control
.retries
++;
1809 control
= bt_cb(skb
)->control
;
1811 if (chan
->max_tx
!= 0 &&
1812 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1813 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1814 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1815 l2cap_seq_list_clear(&chan
->retrans_list
);
1819 control
.reqseq
= chan
->buffer_seq
;
1820 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1825 if (skb_cloned(skb
)) {
1826 /* Cloned sk_buffs are read-only, so we need a
1829 tx_skb
= skb_copy(skb
, GFP_ATOMIC
);
1831 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1835 l2cap_seq_list_clear(&chan
->retrans_list
);
1839 /* Update skb contents */
1840 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1841 put_unaligned_le32(__pack_extended_control(&control
),
1842 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1844 put_unaligned_le16(__pack_enhanced_control(&control
),
1845 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1848 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1849 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1850 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1854 l2cap_do_send(chan
, tx_skb
);
1856 BT_DBG("Resent txseq %d", control
.txseq
);
1858 chan
->last_acked_seq
= chan
->buffer_seq
;
1862 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1863 struct l2cap_ctrl
*control
)
1865 BT_DBG("chan %p, control %p", chan
, control
);
1867 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1868 l2cap_ertm_resend(chan
);
1871 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1872 struct l2cap_ctrl
*control
)
1874 struct sk_buff
*skb
;
1876 BT_DBG("chan %p, control %p", chan
, control
);
1879 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1881 l2cap_seq_list_clear(&chan
->retrans_list
);
1883 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1886 if (chan
->unacked_frames
) {
1887 skb_queue_walk(&chan
->tx_q
, skb
) {
1888 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1889 skb
== chan
->tx_send_head
)
1893 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1894 if (skb
== chan
->tx_send_head
)
1897 l2cap_seq_list_append(&chan
->retrans_list
,
1898 bt_cb(skb
)->control
.txseq
);
1901 l2cap_ertm_resend(chan
);
1905 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1907 struct l2cap_ctrl control
;
1908 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1909 chan
->last_acked_seq
);
1912 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1913 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1915 memset(&control
, 0, sizeof(control
));
1918 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1919 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1920 __clear_ack_timer(chan
);
1921 control
.super
= L2CAP_SUPER_RNR
;
1922 control
.reqseq
= chan
->buffer_seq
;
1923 l2cap_send_sframe(chan
, &control
);
1925 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1926 l2cap_ertm_send(chan
);
1927 /* If any i-frames were sent, they included an ack */
1928 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1932 /* Ack now if the tx window is 3/4ths full.
1933 * Calculate without mul or div
1935 threshold
= chan
->tx_win
;
1936 threshold
+= threshold
<< 1;
1939 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack
,
1942 if (frames_to_ack
>= threshold
) {
1943 __clear_ack_timer(chan
);
1944 control
.super
= L2CAP_SUPER_RR
;
1945 control
.reqseq
= chan
->buffer_seq
;
1946 l2cap_send_sframe(chan
, &control
);
1951 __set_ack_timer(chan
);
1955 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1956 struct msghdr
*msg
, int len
,
1957 int count
, struct sk_buff
*skb
)
1959 struct l2cap_conn
*conn
= chan
->conn
;
1960 struct sk_buff
**frag
;
1963 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1969 /* Continuation fragments (no L2CAP header) */
1970 frag
= &skb_shinfo(skb
)->frag_list
;
1972 struct sk_buff
*tmp
;
1974 count
= min_t(unsigned int, conn
->mtu
, len
);
1976 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1977 msg
->msg_flags
& MSG_DONTWAIT
);
1979 return PTR_ERR(tmp
);
1983 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1986 (*frag
)->priority
= skb
->priority
;
1991 skb
->len
+= (*frag
)->len
;
1992 skb
->data_len
+= (*frag
)->len
;
1994 frag
= &(*frag
)->next
;
2000 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2001 struct msghdr
*msg
, size_t len
,
2004 struct l2cap_conn
*conn
= chan
->conn
;
2005 struct sk_buff
*skb
;
2006 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2007 struct l2cap_hdr
*lh
;
2009 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
2011 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2013 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2014 msg
->msg_flags
& MSG_DONTWAIT
);
2018 skb
->priority
= priority
;
2020 /* Create L2CAP header */
2021 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2022 lh
->cid
= cpu_to_le16(chan
->dcid
);
2023 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2024 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2026 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2027 if (unlikely(err
< 0)) {
2029 return ERR_PTR(err
);
2034 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2035 struct msghdr
*msg
, size_t len
,
2038 struct l2cap_conn
*conn
= chan
->conn
;
2039 struct sk_buff
*skb
;
2041 struct l2cap_hdr
*lh
;
2043 BT_DBG("chan %p len %d", chan
, (int)len
);
2045 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2047 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2048 msg
->msg_flags
& MSG_DONTWAIT
);
2052 skb
->priority
= priority
;
2054 /* Create L2CAP header */
2055 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2056 lh
->cid
= cpu_to_le16(chan
->dcid
);
2057 lh
->len
= cpu_to_le16(len
);
2059 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2060 if (unlikely(err
< 0)) {
2062 return ERR_PTR(err
);
2067 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2068 struct msghdr
*msg
, size_t len
,
2071 struct l2cap_conn
*conn
= chan
->conn
;
2072 struct sk_buff
*skb
;
2073 int err
, count
, hlen
;
2074 struct l2cap_hdr
*lh
;
2076 BT_DBG("chan %p len %d", chan
, (int)len
);
2079 return ERR_PTR(-ENOTCONN
);
2081 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2082 hlen
= L2CAP_EXT_HDR_SIZE
;
2084 hlen
= L2CAP_ENH_HDR_SIZE
;
2087 hlen
+= L2CAP_SDULEN_SIZE
;
2089 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2090 hlen
+= L2CAP_FCS_SIZE
;
2092 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2094 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2095 msg
->msg_flags
& MSG_DONTWAIT
);
2099 /* Create L2CAP header */
2100 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2101 lh
->cid
= cpu_to_le16(chan
->dcid
);
2102 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2104 /* Control header is populated later */
2105 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2106 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2108 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2111 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2113 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2114 if (unlikely(err
< 0)) {
2116 return ERR_PTR(err
);
2119 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2120 bt_cb(skb
)->control
.retries
= 0;
2124 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2125 struct sk_buff_head
*seg_queue
,
2126 struct msghdr
*msg
, size_t len
)
2128 struct sk_buff
*skb
;
2134 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2136 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2137 * so fragmented skbs are not used. The HCI layer's handling
2138 * of fragmented skbs is not compatible with ERTM's queueing.
2141 /* PDU size is derived from the HCI MTU */
2142 pdu_len
= chan
->conn
->mtu
;
2144 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2146 /* Adjust for largest possible L2CAP overhead. */
2147 pdu_len
-= L2CAP_EXT_HDR_SIZE
+ L2CAP_FCS_SIZE
;
2149 /* Remote device may have requested smaller PDUs */
2150 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2152 if (len
<= pdu_len
) {
2153 sar
= L2CAP_SAR_UNSEGMENTED
;
2157 sar
= L2CAP_SAR_START
;
2159 pdu_len
-= L2CAP_SDULEN_SIZE
;
2163 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2166 __skb_queue_purge(seg_queue
);
2167 return PTR_ERR(skb
);
2170 bt_cb(skb
)->control
.sar
= sar
;
2171 __skb_queue_tail(seg_queue
, skb
);
2176 pdu_len
+= L2CAP_SDULEN_SIZE
;
2179 if (len
<= pdu_len
) {
2180 sar
= L2CAP_SAR_END
;
2183 sar
= L2CAP_SAR_CONTINUE
;
2190 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2193 struct sk_buff
*skb
;
2195 struct sk_buff_head seg_queue
;
2197 /* Connectionless channel */
2198 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2199 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2201 return PTR_ERR(skb
);
2203 l2cap_do_send(chan
, skb
);
2207 switch (chan
->mode
) {
2208 case L2CAP_MODE_BASIC
:
2209 /* Check outgoing MTU */
2210 if (len
> chan
->omtu
)
2213 /* Create a basic PDU */
2214 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2216 return PTR_ERR(skb
);
2218 l2cap_do_send(chan
, skb
);
2222 case L2CAP_MODE_ERTM
:
2223 case L2CAP_MODE_STREAMING
:
2224 /* Check outgoing MTU */
2225 if (len
> chan
->omtu
) {
2230 __skb_queue_head_init(&seg_queue
);
2232 /* Do segmentation before calling in to the state machine,
2233 * since it's possible to block while waiting for memory
2236 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2238 /* The channel could have been closed while segmenting,
2239 * check that it is still connected.
2241 if (chan
->state
!= BT_CONNECTED
) {
2242 __skb_queue_purge(&seg_queue
);
2249 if (chan
->mode
== L2CAP_MODE_ERTM
)
2250 err
= l2cap_tx(chan
, 0, &seg_queue
,
2251 L2CAP_EV_DATA_REQUEST
);
2253 err
= l2cap_streaming_send(chan
, &seg_queue
);
2258 /* If the skbs were not queued for sending, they'll still be in
2259 * seg_queue and need to be purged.
2261 __skb_queue_purge(&seg_queue
);
2265 BT_DBG("bad state %1.1x", chan
->mode
);
2272 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2274 struct l2cap_ctrl control
;
2277 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2279 memset(&control
, 0, sizeof(control
));
2281 control
.super
= L2CAP_SUPER_SREJ
;
2283 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2284 seq
= __next_seq(chan
, seq
)) {
2285 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2286 control
.reqseq
= seq
;
2287 l2cap_send_sframe(chan
, &control
);
2288 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2292 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2295 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2297 struct l2cap_ctrl control
;
2299 BT_DBG("chan %p", chan
);
2301 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2304 memset(&control
, 0, sizeof(control
));
2306 control
.super
= L2CAP_SUPER_SREJ
;
2307 control
.reqseq
= chan
->srej_list
.tail
;
2308 l2cap_send_sframe(chan
, &control
);
2311 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2313 struct l2cap_ctrl control
;
2317 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2319 memset(&control
, 0, sizeof(control
));
2321 control
.super
= L2CAP_SUPER_SREJ
;
2323 /* Capture initial list head to allow only one pass through the list. */
2324 initial_head
= chan
->srej_list
.head
;
2327 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2328 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2331 control
.reqseq
= seq
;
2332 l2cap_send_sframe(chan
, &control
);
2333 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2334 } while (chan
->srej_list
.head
!= initial_head
);
2337 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2339 struct sk_buff
*acked_skb
;
2342 BT_DBG("chan %p, reqseq %d", chan
, reqseq
);
2344 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2347 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2348 chan
->expected_ack_seq
, chan
->unacked_frames
);
2350 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2351 ackseq
= __next_seq(chan
, ackseq
)) {
2353 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2355 skb_unlink(acked_skb
, &chan
->tx_q
);
2356 kfree_skb(acked_skb
);
2357 chan
->unacked_frames
--;
2361 chan
->expected_ack_seq
= reqseq
;
2363 if (chan
->unacked_frames
== 0)
2364 __clear_retrans_timer(chan
);
2366 BT_DBG("unacked_frames %d", (int) chan
->unacked_frames
);
2369 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2371 BT_DBG("chan %p", chan
);
2373 chan
->expected_tx_seq
= chan
->buffer_seq
;
2374 l2cap_seq_list_clear(&chan
->srej_list
);
2375 skb_queue_purge(&chan
->srej_q
);
2376 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2379 static int l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2380 struct l2cap_ctrl
*control
,
2381 struct sk_buff_head
*skbs
, u8 event
)
2385 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2389 case L2CAP_EV_DATA_REQUEST
:
2390 if (chan
->tx_send_head
== NULL
)
2391 chan
->tx_send_head
= skb_peek(skbs
);
2393 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2394 l2cap_ertm_send(chan
);
2396 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2397 BT_DBG("Enter LOCAL_BUSY");
2398 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2400 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2401 /* The SREJ_SENT state must be aborted if we are to
2402 * enter the LOCAL_BUSY state.
2404 l2cap_abort_rx_srej_sent(chan
);
2407 l2cap_send_ack(chan
);
2410 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2411 BT_DBG("Exit LOCAL_BUSY");
2412 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2414 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2415 struct l2cap_ctrl local_control
;
2417 memset(&local_control
, 0, sizeof(local_control
));
2418 local_control
.sframe
= 1;
2419 local_control
.super
= L2CAP_SUPER_RR
;
2420 local_control
.poll
= 1;
2421 local_control
.reqseq
= chan
->buffer_seq
;
2422 l2cap_send_sframe(chan
, &local_control
);
2424 chan
->retry_count
= 1;
2425 __set_monitor_timer(chan
);
2426 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2429 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2430 l2cap_process_reqseq(chan
, control
->reqseq
);
2432 case L2CAP_EV_EXPLICIT_POLL
:
2433 l2cap_send_rr_or_rnr(chan
, 1);
2434 chan
->retry_count
= 1;
2435 __set_monitor_timer(chan
);
2436 __clear_ack_timer(chan
);
2437 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2439 case L2CAP_EV_RETRANS_TO
:
2440 l2cap_send_rr_or_rnr(chan
, 1);
2441 chan
->retry_count
= 1;
2442 __set_monitor_timer(chan
);
2443 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2445 case L2CAP_EV_RECV_FBIT
:
2446 /* Nothing to process */
2455 static int l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2456 struct l2cap_ctrl
*control
,
2457 struct sk_buff_head
*skbs
, u8 event
)
2461 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2465 case L2CAP_EV_DATA_REQUEST
:
2466 if (chan
->tx_send_head
== NULL
)
2467 chan
->tx_send_head
= skb_peek(skbs
);
2468 /* Queue data, but don't send. */
2469 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2471 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2472 BT_DBG("Enter LOCAL_BUSY");
2473 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2475 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2476 /* The SREJ_SENT state must be aborted if we are to
2477 * enter the LOCAL_BUSY state.
2479 l2cap_abort_rx_srej_sent(chan
);
2482 l2cap_send_ack(chan
);
2485 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2486 BT_DBG("Exit LOCAL_BUSY");
2487 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2489 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2490 struct l2cap_ctrl local_control
;
2491 memset(&local_control
, 0, sizeof(local_control
));
2492 local_control
.sframe
= 1;
2493 local_control
.super
= L2CAP_SUPER_RR
;
2494 local_control
.poll
= 1;
2495 local_control
.reqseq
= chan
->buffer_seq
;
2496 l2cap_send_sframe(chan
, &local_control
);
2498 chan
->retry_count
= 1;
2499 __set_monitor_timer(chan
);
2500 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2503 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2504 l2cap_process_reqseq(chan
, control
->reqseq
);
2508 case L2CAP_EV_RECV_FBIT
:
2509 if (control
&& control
->final
) {
2510 __clear_monitor_timer(chan
);
2511 if (chan
->unacked_frames
> 0)
2512 __set_retrans_timer(chan
);
2513 chan
->retry_count
= 0;
2514 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2515 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2518 case L2CAP_EV_EXPLICIT_POLL
:
2521 case L2CAP_EV_MONITOR_TO
:
2522 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2523 l2cap_send_rr_or_rnr(chan
, 1);
2524 __set_monitor_timer(chan
);
2525 chan
->retry_count
++;
2527 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2537 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2538 struct sk_buff_head
*skbs
, u8 event
)
2542 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2543 chan
, control
, skbs
, event
, chan
->tx_state
);
2545 switch (chan
->tx_state
) {
2546 case L2CAP_TX_STATE_XMIT
:
2547 err
= l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2549 case L2CAP_TX_STATE_WAIT_F
:
2550 err
= l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2560 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2561 struct l2cap_ctrl
*control
)
2563 BT_DBG("chan %p, control %p", chan
, control
);
2564 l2cap_tx(chan
, control
, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2567 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2568 struct l2cap_ctrl
*control
)
2570 BT_DBG("chan %p, control %p", chan
, control
);
2571 l2cap_tx(chan
, control
, 0, L2CAP_EV_RECV_FBIT
);
2574 /* Copy frame to all raw sockets on that connection */
2575 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2577 struct sk_buff
*nskb
;
2578 struct l2cap_chan
*chan
;
2580 BT_DBG("conn %p", conn
);
2582 mutex_lock(&conn
->chan_lock
);
2584 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2585 struct sock
*sk
= chan
->sk
;
2586 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2589 /* Don't send frame to the socket it came from */
2592 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2596 if (chan
->ops
->recv(chan
->data
, nskb
))
2600 mutex_unlock(&conn
->chan_lock
);
2603 /* ---- L2CAP signalling commands ---- */
2604 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2605 u8 code
, u8 ident
, u16 dlen
, void *data
)
2607 struct sk_buff
*skb
, **frag
;
2608 struct l2cap_cmd_hdr
*cmd
;
2609 struct l2cap_hdr
*lh
;
2612 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2613 conn
, code
, ident
, dlen
);
2615 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2616 count
= min_t(unsigned int, conn
->mtu
, len
);
2618 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2622 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2623 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2625 if (conn
->hcon
->type
== LE_LINK
)
2626 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2628 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2630 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2633 cmd
->len
= cpu_to_le16(dlen
);
2636 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2637 memcpy(skb_put(skb
, count
), data
, count
);
2643 /* Continuation fragments (no L2CAP header) */
2644 frag
= &skb_shinfo(skb
)->frag_list
;
2646 count
= min_t(unsigned int, conn
->mtu
, len
);
2648 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2652 memcpy(skb_put(*frag
, count
), data
, count
);
2657 frag
= &(*frag
)->next
;
2667 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2669 struct l2cap_conf_opt
*opt
= *ptr
;
2672 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2680 *val
= *((u8
*) opt
->val
);
2684 *val
= get_unaligned_le16(opt
->val
);
2688 *val
= get_unaligned_le32(opt
->val
);
2692 *val
= (unsigned long) opt
->val
;
2696 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2700 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2702 struct l2cap_conf_opt
*opt
= *ptr
;
2704 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2711 *((u8
*) opt
->val
) = val
;
2715 put_unaligned_le16(val
, opt
->val
);
2719 put_unaligned_le32(val
, opt
->val
);
2723 memcpy(opt
->val
, (void *) val
, len
);
2727 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2730 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2732 struct l2cap_conf_efs efs
;
2734 switch (chan
->mode
) {
2735 case L2CAP_MODE_ERTM
:
2736 efs
.id
= chan
->local_id
;
2737 efs
.stype
= chan
->local_stype
;
2738 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2739 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2740 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2741 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2744 case L2CAP_MODE_STREAMING
:
2746 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2747 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2748 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2757 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2758 (unsigned long) &efs
);
2761 static void l2cap_ack_timeout(struct work_struct
*work
)
2763 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2767 BT_DBG("chan %p", chan
);
2769 l2cap_chan_lock(chan
);
2771 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2772 chan
->last_acked_seq
);
2775 l2cap_send_rr_or_rnr(chan
, 0);
2777 l2cap_chan_unlock(chan
);
2778 l2cap_chan_put(chan
);
2781 static inline int l2cap_ertm_init(struct l2cap_chan
*chan
)
2785 chan
->next_tx_seq
= 0;
2786 chan
->expected_tx_seq
= 0;
2787 chan
->expected_ack_seq
= 0;
2788 chan
->unacked_frames
= 0;
2789 chan
->buffer_seq
= 0;
2790 chan
->frames_sent
= 0;
2791 chan
->last_acked_seq
= 0;
2793 chan
->sdu_last_frag
= NULL
;
2796 skb_queue_head_init(&chan
->tx_q
);
2798 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2801 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2802 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2804 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2805 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2806 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2808 skb_queue_head_init(&chan
->srej_q
);
2810 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2814 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2816 l2cap_seq_list_free(&chan
->srej_list
);
2821 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2824 case L2CAP_MODE_STREAMING
:
2825 case L2CAP_MODE_ERTM
:
2826 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2830 return L2CAP_MODE_BASIC
;
2834 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2836 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2839 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2841 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2844 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2846 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2847 __l2cap_ews_supported(chan
)) {
2848 /* use extended control field */
2849 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2850 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2852 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2853 L2CAP_DEFAULT_TX_WINDOW
);
2854 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2858 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2860 struct l2cap_conf_req
*req
= data
;
2861 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2862 void *ptr
= req
->data
;
2865 BT_DBG("chan %p", chan
);
2867 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2870 switch (chan
->mode
) {
2871 case L2CAP_MODE_STREAMING
:
2872 case L2CAP_MODE_ERTM
:
2873 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2876 if (__l2cap_efs_supported(chan
))
2877 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2881 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2886 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2887 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2889 switch (chan
->mode
) {
2890 case L2CAP_MODE_BASIC
:
2891 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2892 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2895 rfc
.mode
= L2CAP_MODE_BASIC
;
2897 rfc
.max_transmit
= 0;
2898 rfc
.retrans_timeout
= 0;
2899 rfc
.monitor_timeout
= 0;
2900 rfc
.max_pdu_size
= 0;
2902 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2903 (unsigned long) &rfc
);
2906 case L2CAP_MODE_ERTM
:
2907 rfc
.mode
= L2CAP_MODE_ERTM
;
2908 rfc
.max_transmit
= chan
->max_tx
;
2909 rfc
.retrans_timeout
= 0;
2910 rfc
.monitor_timeout
= 0;
2912 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2913 L2CAP_EXT_HDR_SIZE
-
2916 rfc
.max_pdu_size
= cpu_to_le16(size
);
2918 l2cap_txwin_setup(chan
);
2920 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2921 L2CAP_DEFAULT_TX_WINDOW
);
2923 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2924 (unsigned long) &rfc
);
2926 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2927 l2cap_add_opt_efs(&ptr
, chan
);
2929 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2932 if (chan
->fcs
== L2CAP_FCS_NONE
||
2933 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2934 chan
->fcs
= L2CAP_FCS_NONE
;
2935 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2938 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2939 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2943 case L2CAP_MODE_STREAMING
:
2944 rfc
.mode
= L2CAP_MODE_STREAMING
;
2946 rfc
.max_transmit
= 0;
2947 rfc
.retrans_timeout
= 0;
2948 rfc
.monitor_timeout
= 0;
2950 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2951 L2CAP_EXT_HDR_SIZE
-
2954 rfc
.max_pdu_size
= cpu_to_le16(size
);
2956 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2957 (unsigned long) &rfc
);
2959 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2960 l2cap_add_opt_efs(&ptr
, chan
);
2962 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2965 if (chan
->fcs
== L2CAP_FCS_NONE
||
2966 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2967 chan
->fcs
= L2CAP_FCS_NONE
;
2968 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2973 req
->dcid
= cpu_to_le16(chan
->dcid
);
2974 req
->flags
= cpu_to_le16(0);
2979 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2981 struct l2cap_conf_rsp
*rsp
= data
;
2982 void *ptr
= rsp
->data
;
2983 void *req
= chan
->conf_req
;
2984 int len
= chan
->conf_len
;
2985 int type
, hint
, olen
;
2987 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2988 struct l2cap_conf_efs efs
;
2990 u16 mtu
= L2CAP_DEFAULT_MTU
;
2991 u16 result
= L2CAP_CONF_SUCCESS
;
2994 BT_DBG("chan %p", chan
);
2996 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2997 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2999 hint
= type
& L2CAP_CONF_HINT
;
3000 type
&= L2CAP_CONF_MASK
;
3003 case L2CAP_CONF_MTU
:
3007 case L2CAP_CONF_FLUSH_TO
:
3008 chan
->flush_to
= val
;
3011 case L2CAP_CONF_QOS
:
3014 case L2CAP_CONF_RFC
:
3015 if (olen
== sizeof(rfc
))
3016 memcpy(&rfc
, (void *) val
, olen
);
3019 case L2CAP_CONF_FCS
:
3020 if (val
== L2CAP_FCS_NONE
)
3021 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3024 case L2CAP_CONF_EFS
:
3026 if (olen
== sizeof(efs
))
3027 memcpy(&efs
, (void *) val
, olen
);
3030 case L2CAP_CONF_EWS
:
3032 return -ECONNREFUSED
;
3034 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3035 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3036 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3037 chan
->remote_tx_win
= val
;
3044 result
= L2CAP_CONF_UNKNOWN
;
3045 *((u8
*) ptr
++) = type
;
3050 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3053 switch (chan
->mode
) {
3054 case L2CAP_MODE_STREAMING
:
3055 case L2CAP_MODE_ERTM
:
3056 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3057 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3058 chan
->conn
->feat_mask
);
3063 if (__l2cap_efs_supported(chan
))
3064 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3066 return -ECONNREFUSED
;
3069 if (chan
->mode
!= rfc
.mode
)
3070 return -ECONNREFUSED
;
3076 if (chan
->mode
!= rfc
.mode
) {
3077 result
= L2CAP_CONF_UNACCEPT
;
3078 rfc
.mode
= chan
->mode
;
3080 if (chan
->num_conf_rsp
== 1)
3081 return -ECONNREFUSED
;
3083 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3084 sizeof(rfc
), (unsigned long) &rfc
);
3087 if (result
== L2CAP_CONF_SUCCESS
) {
3088 /* Configure output options and let the other side know
3089 * which ones we don't like. */
3091 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3092 result
= L2CAP_CONF_UNACCEPT
;
3095 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3097 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3100 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3101 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3102 efs
.stype
!= chan
->local_stype
) {
3104 result
= L2CAP_CONF_UNACCEPT
;
3106 if (chan
->num_conf_req
>= 1)
3107 return -ECONNREFUSED
;
3109 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3111 (unsigned long) &efs
);
3113 /* Send PENDING Conf Rsp */
3114 result
= L2CAP_CONF_PENDING
;
3115 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3120 case L2CAP_MODE_BASIC
:
3121 chan
->fcs
= L2CAP_FCS_NONE
;
3122 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3125 case L2CAP_MODE_ERTM
:
3126 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3127 chan
->remote_tx_win
= rfc
.txwin_size
;
3129 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3131 chan
->remote_max_tx
= rfc
.max_transmit
;
3133 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3135 L2CAP_EXT_HDR_SIZE
-
3138 rfc
.max_pdu_size
= cpu_to_le16(size
);
3139 chan
->remote_mps
= size
;
3141 rfc
.retrans_timeout
=
3142 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3143 rfc
.monitor_timeout
=
3144 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3146 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3148 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3149 sizeof(rfc
), (unsigned long) &rfc
);
3151 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3152 chan
->remote_id
= efs
.id
;
3153 chan
->remote_stype
= efs
.stype
;
3154 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3155 chan
->remote_flush_to
=
3156 le32_to_cpu(efs
.flush_to
);
3157 chan
->remote_acc_lat
=
3158 le32_to_cpu(efs
.acc_lat
);
3159 chan
->remote_sdu_itime
=
3160 le32_to_cpu(efs
.sdu_itime
);
3161 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3162 sizeof(efs
), (unsigned long) &efs
);
3166 case L2CAP_MODE_STREAMING
:
3167 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3169 L2CAP_EXT_HDR_SIZE
-
3172 rfc
.max_pdu_size
= cpu_to_le16(size
);
3173 chan
->remote_mps
= size
;
3175 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3177 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3178 sizeof(rfc
), (unsigned long) &rfc
);
3183 result
= L2CAP_CONF_UNACCEPT
;
3185 memset(&rfc
, 0, sizeof(rfc
));
3186 rfc
.mode
= chan
->mode
;
3189 if (result
== L2CAP_CONF_SUCCESS
)
3190 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3192 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3193 rsp
->result
= cpu_to_le16(result
);
3194 rsp
->flags
= cpu_to_le16(0x0000);
3199 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3201 struct l2cap_conf_req
*req
= data
;
3202 void *ptr
= req
->data
;
3205 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3206 struct l2cap_conf_efs efs
;
3208 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3210 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3211 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3214 case L2CAP_CONF_MTU
:
3215 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3216 *result
= L2CAP_CONF_UNACCEPT
;
3217 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3220 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3223 case L2CAP_CONF_FLUSH_TO
:
3224 chan
->flush_to
= val
;
3225 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3229 case L2CAP_CONF_RFC
:
3230 if (olen
== sizeof(rfc
))
3231 memcpy(&rfc
, (void *)val
, olen
);
3233 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3234 rfc
.mode
!= chan
->mode
)
3235 return -ECONNREFUSED
;
3239 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3240 sizeof(rfc
), (unsigned long) &rfc
);
3243 case L2CAP_CONF_EWS
:
3244 chan
->tx_win
= min_t(u16
, val
,
3245 L2CAP_DEFAULT_EXT_WINDOW
);
3246 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3250 case L2CAP_CONF_EFS
:
3251 if (olen
== sizeof(efs
))
3252 memcpy(&efs
, (void *)val
, olen
);
3254 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3255 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3256 efs
.stype
!= chan
->local_stype
)
3257 return -ECONNREFUSED
;
3259 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3260 sizeof(efs
), (unsigned long) &efs
);
3265 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3266 return -ECONNREFUSED
;
3268 chan
->mode
= rfc
.mode
;
3270 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3272 case L2CAP_MODE_ERTM
:
3273 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3274 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3275 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3277 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3278 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3279 chan
->local_sdu_itime
=
3280 le32_to_cpu(efs
.sdu_itime
);
3281 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3282 chan
->local_flush_to
=
3283 le32_to_cpu(efs
.flush_to
);
3287 case L2CAP_MODE_STREAMING
:
3288 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3292 req
->dcid
= cpu_to_le16(chan
->dcid
);
3293 req
->flags
= cpu_to_le16(0x0000);
3298 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3300 struct l2cap_conf_rsp
*rsp
= data
;
3301 void *ptr
= rsp
->data
;
3303 BT_DBG("chan %p", chan
);
3305 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3306 rsp
->result
= cpu_to_le16(result
);
3307 rsp
->flags
= cpu_to_le16(flags
);
3312 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3314 struct l2cap_conn_rsp rsp
;
3315 struct l2cap_conn
*conn
= chan
->conn
;
3318 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3319 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3320 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3321 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3322 l2cap_send_cmd(conn
, chan
->ident
,
3323 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3325 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3328 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3329 l2cap_build_conf_req(chan
, buf
), buf
);
3330 chan
->num_conf_req
++;
3333 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3337 struct l2cap_conf_rfc rfc
;
3339 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3341 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3344 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3345 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3348 case L2CAP_CONF_RFC
:
3349 if (olen
== sizeof(rfc
))
3350 memcpy(&rfc
, (void *)val
, olen
);
3355 /* Use sane default values in case a misbehaving remote device
3356 * did not send an RFC option.
3358 rfc
.mode
= chan
->mode
;
3359 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3360 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3361 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
3363 BT_ERR("Expected RFC option was not found, using defaults");
3367 case L2CAP_MODE_ERTM
:
3368 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3369 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3370 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3372 case L2CAP_MODE_STREAMING
:
3373 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3377 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3379 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3381 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3384 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3385 cmd
->ident
== conn
->info_ident
) {
3386 cancel_delayed_work(&conn
->info_timer
);
3388 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3389 conn
->info_ident
= 0;
3391 l2cap_conn_start(conn
);
3397 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3399 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3400 struct l2cap_conn_rsp rsp
;
3401 struct l2cap_chan
*chan
= NULL
, *pchan
;
3402 struct sock
*parent
, *sk
= NULL
;
3403 int result
, status
= L2CAP_CS_NO_INFO
;
3405 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3406 __le16 psm
= req
->psm
;
3408 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3410 /* Check if we have socket listening on psm */
3411 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3413 result
= L2CAP_CR_BAD_PSM
;
3419 mutex_lock(&conn
->chan_lock
);
3422 /* Check if the ACL is secure enough (if not SDP) */
3423 if (psm
!= cpu_to_le16(0x0001) &&
3424 !hci_conn_check_link_mode(conn
->hcon
)) {
3425 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3426 result
= L2CAP_CR_SEC_BLOCK
;
3430 result
= L2CAP_CR_NO_MEM
;
3432 /* Check for backlog size */
3433 if (sk_acceptq_is_full(parent
)) {
3434 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
3438 chan
= pchan
->ops
->new_connection(pchan
->data
);
3444 /* Check if we already have channel with that dcid */
3445 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
3446 sock_set_flag(sk
, SOCK_ZAPPED
);
3447 chan
->ops
->close(chan
->data
);
3451 hci_conn_hold(conn
->hcon
);
3453 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3454 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3458 bt_accept_enqueue(parent
, sk
);
3460 __l2cap_chan_add(conn
, chan
);
3464 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3466 chan
->ident
= cmd
->ident
;
3468 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3469 if (l2cap_chan_check_security(chan
)) {
3470 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3471 __l2cap_state_change(chan
, BT_CONNECT2
);
3472 result
= L2CAP_CR_PEND
;
3473 status
= L2CAP_CS_AUTHOR_PEND
;
3474 parent
->sk_data_ready(parent
, 0);
3476 __l2cap_state_change(chan
, BT_CONFIG
);
3477 result
= L2CAP_CR_SUCCESS
;
3478 status
= L2CAP_CS_NO_INFO
;
3481 __l2cap_state_change(chan
, BT_CONNECT2
);
3482 result
= L2CAP_CR_PEND
;
3483 status
= L2CAP_CS_AUTHEN_PEND
;
3486 __l2cap_state_change(chan
, BT_CONNECT2
);
3487 result
= L2CAP_CR_PEND
;
3488 status
= L2CAP_CS_NO_INFO
;
3492 release_sock(parent
);
3493 mutex_unlock(&conn
->chan_lock
);
3496 rsp
.scid
= cpu_to_le16(scid
);
3497 rsp
.dcid
= cpu_to_le16(dcid
);
3498 rsp
.result
= cpu_to_le16(result
);
3499 rsp
.status
= cpu_to_le16(status
);
3500 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3502 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3503 struct l2cap_info_req info
;
3504 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3506 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3507 conn
->info_ident
= l2cap_get_ident(conn
);
3509 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3511 l2cap_send_cmd(conn
, conn
->info_ident
,
3512 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3515 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3516 result
== L2CAP_CR_SUCCESS
) {
3518 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3519 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3520 l2cap_build_conf_req(chan
, buf
), buf
);
3521 chan
->num_conf_req
++;
3527 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3529 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3530 u16 scid
, dcid
, result
, status
;
3531 struct l2cap_chan
*chan
;
3535 scid
= __le16_to_cpu(rsp
->scid
);
3536 dcid
= __le16_to_cpu(rsp
->dcid
);
3537 result
= __le16_to_cpu(rsp
->result
);
3538 status
= __le16_to_cpu(rsp
->status
);
3540 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3541 dcid
, scid
, result
, status
);
3543 mutex_lock(&conn
->chan_lock
);
3546 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3552 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3561 l2cap_chan_lock(chan
);
3564 case L2CAP_CR_SUCCESS
:
3565 l2cap_state_change(chan
, BT_CONFIG
);
3568 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3570 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3573 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3574 l2cap_build_conf_req(chan
, req
), req
);
3575 chan
->num_conf_req
++;
3579 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3583 l2cap_chan_del(chan
, ECONNREFUSED
);
3587 l2cap_chan_unlock(chan
);
3590 mutex_unlock(&conn
->chan_lock
);
3595 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3597 /* FCS is enabled only in ERTM or streaming mode, if one or both
3600 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3601 chan
->fcs
= L2CAP_FCS_NONE
;
3602 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3603 chan
->fcs
= L2CAP_FCS_CRC16
;
3606 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3608 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3611 struct l2cap_chan
*chan
;
3614 dcid
= __le16_to_cpu(req
->dcid
);
3615 flags
= __le16_to_cpu(req
->flags
);
3617 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3619 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3623 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3624 struct l2cap_cmd_rej_cid rej
;
3626 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3627 rej
.scid
= cpu_to_le16(chan
->scid
);
3628 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3630 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3635 /* Reject if config buffer is too small. */
3636 len
= cmd_len
- sizeof(*req
);
3637 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3638 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3639 l2cap_build_conf_rsp(chan
, rsp
,
3640 L2CAP_CONF_REJECT
, flags
), rsp
);
3645 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3646 chan
->conf_len
+= len
;
3648 if (flags
& 0x0001) {
3649 /* Incomplete config. Send empty response. */
3650 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3651 l2cap_build_conf_rsp(chan
, rsp
,
3652 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3656 /* Complete config. */
3657 len
= l2cap_parse_conf_req(chan
, rsp
);
3659 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3663 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3664 chan
->num_conf_rsp
++;
3666 /* Reset config buffer. */
3669 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3672 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3673 set_default_fcs(chan
);
3675 l2cap_state_change(chan
, BT_CONNECTED
);
3677 if (chan
->mode
== L2CAP_MODE_ERTM
||
3678 chan
->mode
== L2CAP_MODE_STREAMING
)
3679 err
= l2cap_ertm_init(chan
);
3682 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3684 l2cap_chan_ready(chan
);
3689 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3691 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3692 l2cap_build_conf_req(chan
, buf
), buf
);
3693 chan
->num_conf_req
++;
3696 /* Got Conf Rsp PENDING from remote side and asume we sent
3697 Conf Rsp PENDING in the code above */
3698 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3699 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3701 /* check compatibility */
3703 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3704 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3706 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3707 l2cap_build_conf_rsp(chan
, rsp
,
3708 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
3712 l2cap_chan_unlock(chan
);
3716 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3718 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3719 u16 scid
, flags
, result
;
3720 struct l2cap_chan
*chan
;
3721 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3724 scid
= __le16_to_cpu(rsp
->scid
);
3725 flags
= __le16_to_cpu(rsp
->flags
);
3726 result
= __le16_to_cpu(rsp
->result
);
3728 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3731 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3736 case L2CAP_CONF_SUCCESS
:
3737 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3738 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3741 case L2CAP_CONF_PENDING
:
3742 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3744 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3747 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3750 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3754 /* check compatibility */
3756 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3757 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3759 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3760 l2cap_build_conf_rsp(chan
, buf
,
3761 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3765 case L2CAP_CONF_UNACCEPT
:
3766 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3769 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3770 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3774 /* throw out any old stored conf requests */
3775 result
= L2CAP_CONF_SUCCESS
;
3776 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3779 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3783 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3784 L2CAP_CONF_REQ
, len
, req
);
3785 chan
->num_conf_req
++;
3786 if (result
!= L2CAP_CONF_SUCCESS
)
3792 l2cap_chan_set_err(chan
, ECONNRESET
);
3794 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3795 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3802 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3804 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3805 set_default_fcs(chan
);
3807 l2cap_state_change(chan
, BT_CONNECTED
);
3808 if (chan
->mode
== L2CAP_MODE_ERTM
||
3809 chan
->mode
== L2CAP_MODE_STREAMING
)
3810 err
= l2cap_ertm_init(chan
);
3813 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3815 l2cap_chan_ready(chan
);
3819 l2cap_chan_unlock(chan
);
3823 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3825 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3826 struct l2cap_disconn_rsp rsp
;
3828 struct l2cap_chan
*chan
;
3831 scid
= __le16_to_cpu(req
->scid
);
3832 dcid
= __le16_to_cpu(req
->dcid
);
3834 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3836 mutex_lock(&conn
->chan_lock
);
3838 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3840 mutex_unlock(&conn
->chan_lock
);
3844 l2cap_chan_lock(chan
);
3848 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3849 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3850 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3853 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3856 l2cap_chan_hold(chan
);
3857 l2cap_chan_del(chan
, ECONNRESET
);
3859 l2cap_chan_unlock(chan
);
3861 chan
->ops
->close(chan
->data
);
3862 l2cap_chan_put(chan
);
3864 mutex_unlock(&conn
->chan_lock
);
3869 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3871 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3873 struct l2cap_chan
*chan
;
3875 scid
= __le16_to_cpu(rsp
->scid
);
3876 dcid
= __le16_to_cpu(rsp
->dcid
);
3878 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3880 mutex_lock(&conn
->chan_lock
);
3882 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3884 mutex_unlock(&conn
->chan_lock
);
3888 l2cap_chan_lock(chan
);
3890 l2cap_chan_hold(chan
);
3891 l2cap_chan_del(chan
, 0);
3893 l2cap_chan_unlock(chan
);
3895 chan
->ops
->close(chan
->data
);
3896 l2cap_chan_put(chan
);
3898 mutex_unlock(&conn
->chan_lock
);
3903 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3905 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3908 type
= __le16_to_cpu(req
->type
);
3910 BT_DBG("type 0x%4.4x", type
);
3912 if (type
== L2CAP_IT_FEAT_MASK
) {
3914 u32 feat_mask
= l2cap_feat_mask
;
3915 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3916 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3917 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3919 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3922 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3923 | L2CAP_FEAT_EXT_WINDOW
;
3925 put_unaligned_le32(feat_mask
, rsp
->data
);
3926 l2cap_send_cmd(conn
, cmd
->ident
,
3927 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3928 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3930 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3933 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3935 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3937 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3938 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3939 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3940 l2cap_send_cmd(conn
, cmd
->ident
,
3941 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3943 struct l2cap_info_rsp rsp
;
3944 rsp
.type
= cpu_to_le16(type
);
3945 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3946 l2cap_send_cmd(conn
, cmd
->ident
,
3947 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3953 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3955 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3958 type
= __le16_to_cpu(rsp
->type
);
3959 result
= __le16_to_cpu(rsp
->result
);
3961 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3963 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3964 if (cmd
->ident
!= conn
->info_ident
||
3965 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3968 cancel_delayed_work(&conn
->info_timer
);
3970 if (result
!= L2CAP_IR_SUCCESS
) {
3971 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3972 conn
->info_ident
= 0;
3974 l2cap_conn_start(conn
);
3980 case L2CAP_IT_FEAT_MASK
:
3981 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3983 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3984 struct l2cap_info_req req
;
3985 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3987 conn
->info_ident
= l2cap_get_ident(conn
);
3989 l2cap_send_cmd(conn
, conn
->info_ident
,
3990 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3992 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3993 conn
->info_ident
= 0;
3995 l2cap_conn_start(conn
);
3999 case L2CAP_IT_FIXED_CHAN
:
4000 conn
->fixed_chan_mask
= rsp
->data
[0];
4001 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4002 conn
->info_ident
= 0;
4004 l2cap_conn_start(conn
);
4011 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4012 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4015 struct l2cap_create_chan_req
*req
= data
;
4016 struct l2cap_create_chan_rsp rsp
;
4019 if (cmd_len
!= sizeof(*req
))
4025 psm
= le16_to_cpu(req
->psm
);
4026 scid
= le16_to_cpu(req
->scid
);
4028 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
4030 /* Placeholder: Always reject */
4032 rsp
.scid
= cpu_to_le16(scid
);
4033 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4034 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4036 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4042 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
4043 struct l2cap_cmd_hdr
*cmd
, void *data
)
4045 BT_DBG("conn %p", conn
);
4047 return l2cap_connect_rsp(conn
, cmd
, data
);
4050 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
4051 u16 icid
, u16 result
)
4053 struct l2cap_move_chan_rsp rsp
;
4055 BT_DBG("icid %d, result %d", icid
, result
);
4057 rsp
.icid
= cpu_to_le16(icid
);
4058 rsp
.result
= cpu_to_le16(result
);
4060 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
4063 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
4064 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
4066 struct l2cap_move_chan_cfm cfm
;
4069 BT_DBG("icid %d, result %d", icid
, result
);
4071 ident
= l2cap_get_ident(conn
);
4073 chan
->ident
= ident
;
4075 cfm
.icid
= cpu_to_le16(icid
);
4076 cfm
.result
= cpu_to_le16(result
);
4078 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4081 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4084 struct l2cap_move_chan_cfm_rsp rsp
;
4086 BT_DBG("icid %d", icid
);
4088 rsp
.icid
= cpu_to_le16(icid
);
4089 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4092 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4093 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4095 struct l2cap_move_chan_req
*req
= data
;
4097 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4099 if (cmd_len
!= sizeof(*req
))
4102 icid
= le16_to_cpu(req
->icid
);
4104 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
4109 /* Placeholder: Always refuse */
4110 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4115 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4116 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4118 struct l2cap_move_chan_rsp
*rsp
= data
;
4121 if (cmd_len
!= sizeof(*rsp
))
4124 icid
= le16_to_cpu(rsp
->icid
);
4125 result
= le16_to_cpu(rsp
->result
);
4127 BT_DBG("icid %d, result %d", icid
, result
);
4129 /* Placeholder: Always unconfirmed */
4130 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4135 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4136 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4138 struct l2cap_move_chan_cfm
*cfm
= data
;
4141 if (cmd_len
!= sizeof(*cfm
))
4144 icid
= le16_to_cpu(cfm
->icid
);
4145 result
= le16_to_cpu(cfm
->result
);
4147 BT_DBG("icid %d, result %d", icid
, result
);
4149 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4154 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4155 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4157 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4160 if (cmd_len
!= sizeof(*rsp
))
4163 icid
= le16_to_cpu(rsp
->icid
);
4165 BT_DBG("icid %d", icid
);
4170 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4175 if (min
> max
|| min
< 6 || max
> 3200)
4178 if (to_multiplier
< 10 || to_multiplier
> 3200)
4181 if (max
>= to_multiplier
* 8)
4184 max_latency
= (to_multiplier
* 8 / max
) - 1;
4185 if (latency
> 499 || latency
> max_latency
)
4191 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4192 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4194 struct hci_conn
*hcon
= conn
->hcon
;
4195 struct l2cap_conn_param_update_req
*req
;
4196 struct l2cap_conn_param_update_rsp rsp
;
4197 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4200 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4203 cmd_len
= __le16_to_cpu(cmd
->len
);
4204 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4207 req
= (struct l2cap_conn_param_update_req
*) data
;
4208 min
= __le16_to_cpu(req
->min
);
4209 max
= __le16_to_cpu(req
->max
);
4210 latency
= __le16_to_cpu(req
->latency
);
4211 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4213 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4214 min
, max
, latency
, to_multiplier
);
4216 memset(&rsp
, 0, sizeof(rsp
));
4218 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4220 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4222 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4224 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4228 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4233 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4234 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4238 switch (cmd
->code
) {
4239 case L2CAP_COMMAND_REJ
:
4240 l2cap_command_rej(conn
, cmd
, data
);
4243 case L2CAP_CONN_REQ
:
4244 err
= l2cap_connect_req(conn
, cmd
, data
);
4247 case L2CAP_CONN_RSP
:
4248 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4251 case L2CAP_CONF_REQ
:
4252 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4255 case L2CAP_CONF_RSP
:
4256 err
= l2cap_config_rsp(conn
, cmd
, data
);
4259 case L2CAP_DISCONN_REQ
:
4260 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4263 case L2CAP_DISCONN_RSP
:
4264 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4267 case L2CAP_ECHO_REQ
:
4268 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4271 case L2CAP_ECHO_RSP
:
4274 case L2CAP_INFO_REQ
:
4275 err
= l2cap_information_req(conn
, cmd
, data
);
4278 case L2CAP_INFO_RSP
:
4279 err
= l2cap_information_rsp(conn
, cmd
, data
);
4282 case L2CAP_CREATE_CHAN_REQ
:
4283 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4286 case L2CAP_CREATE_CHAN_RSP
:
4287 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4290 case L2CAP_MOVE_CHAN_REQ
:
4291 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4294 case L2CAP_MOVE_CHAN_RSP
:
4295 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4298 case L2CAP_MOVE_CHAN_CFM
:
4299 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4302 case L2CAP_MOVE_CHAN_CFM_RSP
:
4303 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4307 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4315 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4316 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4318 switch (cmd
->code
) {
4319 case L2CAP_COMMAND_REJ
:
4322 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4323 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4325 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4329 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4334 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4335 struct sk_buff
*skb
)
4337 u8
*data
= skb
->data
;
4339 struct l2cap_cmd_hdr cmd
;
4342 l2cap_raw_recv(conn
, skb
);
4344 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4346 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4347 data
+= L2CAP_CMD_HDR_SIZE
;
4348 len
-= L2CAP_CMD_HDR_SIZE
;
4350 cmd_len
= le16_to_cpu(cmd
.len
);
4352 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4354 if (cmd_len
> len
|| !cmd
.ident
) {
4355 BT_DBG("corrupted command");
4359 if (conn
->hcon
->type
== LE_LINK
)
4360 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4362 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4365 struct l2cap_cmd_rej_unk rej
;
4367 BT_ERR("Wrong link type (%d)", err
);
4369 /* FIXME: Map err to a valid reason */
4370 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4371 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4381 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4383 u16 our_fcs
, rcv_fcs
;
4386 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4387 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4389 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4391 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4392 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4393 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4394 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4396 if (our_fcs
!= rcv_fcs
)
4402 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4404 struct l2cap_ctrl control
;
4406 BT_DBG("chan %p", chan
);
4408 memset(&control
, 0, sizeof(control
));
4411 control
.reqseq
= chan
->buffer_seq
;
4412 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4414 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4415 control
.super
= L2CAP_SUPER_RNR
;
4416 l2cap_send_sframe(chan
, &control
);
4419 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4420 chan
->unacked_frames
> 0)
4421 __set_retrans_timer(chan
);
4423 /* Send pending iframes */
4424 l2cap_ertm_send(chan
);
4426 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4427 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4428 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4431 control
.super
= L2CAP_SUPER_RR
;
4432 l2cap_send_sframe(chan
, &control
);
4436 static void append_skb_frag(struct sk_buff
*skb
,
4437 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4439 /* skb->len reflects data in skb as well as all fragments
4440 * skb->data_len reflects only data in fragments
4442 if (!skb_has_frag_list(skb
))
4443 skb_shinfo(skb
)->frag_list
= new_frag
;
4445 new_frag
->next
= NULL
;
4447 (*last_frag
)->next
= new_frag
;
4448 *last_frag
= new_frag
;
4450 skb
->len
+= new_frag
->len
;
4451 skb
->data_len
+= new_frag
->len
;
4452 skb
->truesize
+= new_frag
->truesize
;
4455 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4456 struct l2cap_ctrl
*control
)
4460 switch (control
->sar
) {
4461 case L2CAP_SAR_UNSEGMENTED
:
4465 err
= chan
->ops
->recv(chan
->data
, skb
);
4468 case L2CAP_SAR_START
:
4472 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4473 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4475 if (chan
->sdu_len
> chan
->imtu
) {
4480 if (skb
->len
>= chan
->sdu_len
)
4484 chan
->sdu_last_frag
= skb
;
4490 case L2CAP_SAR_CONTINUE
:
4494 append_skb_frag(chan
->sdu
, skb
,
4495 &chan
->sdu_last_frag
);
4498 if (chan
->sdu
->len
>= chan
->sdu_len
)
4508 append_skb_frag(chan
->sdu
, skb
,
4509 &chan
->sdu_last_frag
);
4512 if (chan
->sdu
->len
!= chan
->sdu_len
)
4515 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
4518 /* Reassembly complete */
4520 chan
->sdu_last_frag
= NULL
;
4528 kfree_skb(chan
->sdu
);
4530 chan
->sdu_last_frag
= NULL
;
4537 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4541 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4544 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4545 l2cap_tx(chan
, 0, 0, event
);
4548 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4551 /* Pass sequential frames to l2cap_reassemble_sdu()
4552 * until a gap is encountered.
4555 BT_DBG("chan %p", chan
);
4557 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4558 struct sk_buff
*skb
;
4559 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4560 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4562 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4567 skb_unlink(skb
, &chan
->srej_q
);
4568 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4569 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4574 if (skb_queue_empty(&chan
->srej_q
)) {
4575 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4576 l2cap_send_ack(chan
);
4582 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4583 struct l2cap_ctrl
*control
)
4585 struct sk_buff
*skb
;
4587 BT_DBG("chan %p, control %p", chan
, control
);
4589 if (control
->reqseq
== chan
->next_tx_seq
) {
4590 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4591 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4595 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4598 BT_DBG("Seq %d not available for retransmission",
4603 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4604 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4605 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4609 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4611 if (control
->poll
) {
4612 l2cap_pass_to_tx(chan
, control
);
4614 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4615 l2cap_retransmit(chan
, control
);
4616 l2cap_ertm_send(chan
);
4618 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4619 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4620 chan
->srej_save_reqseq
= control
->reqseq
;
4623 l2cap_pass_to_tx_fbit(chan
, control
);
4625 if (control
->final
) {
4626 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4627 !test_and_clear_bit(CONN_SREJ_ACT
,
4629 l2cap_retransmit(chan
, control
);
4631 l2cap_retransmit(chan
, control
);
4632 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4633 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4634 chan
->srej_save_reqseq
= control
->reqseq
;
4640 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4641 struct l2cap_ctrl
*control
)
4643 struct sk_buff
*skb
;
4645 BT_DBG("chan %p, control %p", chan
, control
);
4647 if (control
->reqseq
== chan
->next_tx_seq
) {
4648 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4649 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4653 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4655 if (chan
->max_tx
&& skb
&&
4656 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4657 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4658 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4662 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4664 l2cap_pass_to_tx(chan
, control
);
4666 if (control
->final
) {
4667 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4668 l2cap_retransmit_all(chan
, control
);
4670 l2cap_retransmit_all(chan
, control
);
4671 l2cap_ertm_send(chan
);
4672 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4673 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4677 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4679 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4681 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4682 chan
->expected_tx_seq
);
4684 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4685 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4687 /* See notes below regarding "double poll" and
4690 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4691 BT_DBG("Invalid/Ignore - after SREJ");
4692 return L2CAP_TXSEQ_INVALID_IGNORE
;
4694 BT_DBG("Invalid - in window after SREJ sent");
4695 return L2CAP_TXSEQ_INVALID
;
4699 if (chan
->srej_list
.head
== txseq
) {
4700 BT_DBG("Expected SREJ");
4701 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4704 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4705 BT_DBG("Duplicate SREJ - txseq already stored");
4706 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4709 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4710 BT_DBG("Unexpected SREJ - not requested");
4711 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4715 if (chan
->expected_tx_seq
== txseq
) {
4716 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4718 BT_DBG("Invalid - txseq outside tx window");
4719 return L2CAP_TXSEQ_INVALID
;
4722 return L2CAP_TXSEQ_EXPECTED
;
4726 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4727 __seq_offset(chan
, chan
->expected_tx_seq
,
4728 chan
->last_acked_seq
)){
4729 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4730 return L2CAP_TXSEQ_DUPLICATE
;
4733 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4734 /* A source of invalid packets is a "double poll" condition,
4735 * where delays cause us to send multiple poll packets. If
4736 * the remote stack receives and processes both polls,
4737 * sequence numbers can wrap around in such a way that a
4738 * resent frame has a sequence number that looks like new data
4739 * with a sequence gap. This would trigger an erroneous SREJ
4742 * Fortunately, this is impossible with a tx window that's
4743 * less than half of the maximum sequence number, which allows
4744 * invalid frames to be safely ignored.
4746 * With tx window sizes greater than half of the tx window
4747 * maximum, the frame is invalid and cannot be ignored. This
4748 * causes a disconnect.
4751 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4752 BT_DBG("Invalid/Ignore - txseq outside tx window");
4753 return L2CAP_TXSEQ_INVALID_IGNORE
;
4755 BT_DBG("Invalid - txseq outside tx window");
4756 return L2CAP_TXSEQ_INVALID
;
4759 BT_DBG("Unexpected - txseq indicates missing frames");
4760 return L2CAP_TXSEQ_UNEXPECTED
;
4764 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4765 struct l2cap_ctrl
*control
,
4766 struct sk_buff
*skb
, u8 event
)
4769 bool skb_in_use
= 0;
4771 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4775 case L2CAP_EV_RECV_IFRAME
:
4776 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4777 case L2CAP_TXSEQ_EXPECTED
:
4778 l2cap_pass_to_tx(chan
, control
);
4780 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4781 BT_DBG("Busy, discarding expected seq %d",
4786 chan
->expected_tx_seq
= __next_seq(chan
,
4789 chan
->buffer_seq
= chan
->expected_tx_seq
;
4792 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4796 if (control
->final
) {
4797 if (!test_and_clear_bit(CONN_REJ_ACT
,
4798 &chan
->conn_state
)) {
4800 l2cap_retransmit_all(chan
, control
);
4801 l2cap_ertm_send(chan
);
4805 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4806 l2cap_send_ack(chan
);
4808 case L2CAP_TXSEQ_UNEXPECTED
:
4809 l2cap_pass_to_tx(chan
, control
);
4811 /* Can't issue SREJ frames in the local busy state.
4812 * Drop this frame, it will be seen as missing
4813 * when local busy is exited.
4815 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4816 BT_DBG("Busy, discarding unexpected seq %d",
4821 /* There was a gap in the sequence, so an SREJ
4822 * must be sent for each missing frame. The
4823 * current frame is stored for later use.
4825 skb_queue_tail(&chan
->srej_q
, skb
);
4827 BT_DBG("Queued %p (queue len %d)", skb
,
4828 skb_queue_len(&chan
->srej_q
));
4830 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4831 l2cap_seq_list_clear(&chan
->srej_list
);
4832 l2cap_send_srej(chan
, control
->txseq
);
4834 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4836 case L2CAP_TXSEQ_DUPLICATE
:
4837 l2cap_pass_to_tx(chan
, control
);
4839 case L2CAP_TXSEQ_INVALID_IGNORE
:
4841 case L2CAP_TXSEQ_INVALID
:
4843 l2cap_send_disconn_req(chan
->conn
, chan
,
4848 case L2CAP_EV_RECV_RR
:
4849 l2cap_pass_to_tx(chan
, control
);
4850 if (control
->final
) {
4851 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4853 if (!test_and_clear_bit(CONN_REJ_ACT
,
4854 &chan
->conn_state
)) {
4856 l2cap_retransmit_all(chan
, control
);
4859 l2cap_ertm_send(chan
);
4860 } else if (control
->poll
) {
4861 l2cap_send_i_or_rr_or_rnr(chan
);
4863 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4864 &chan
->conn_state
) &&
4865 chan
->unacked_frames
)
4866 __set_retrans_timer(chan
);
4868 l2cap_ertm_send(chan
);
4871 case L2CAP_EV_RECV_RNR
:
4872 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4873 l2cap_pass_to_tx(chan
, control
);
4874 if (control
&& control
->poll
) {
4875 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4876 l2cap_send_rr_or_rnr(chan
, 0);
4878 __clear_retrans_timer(chan
);
4879 l2cap_seq_list_clear(&chan
->retrans_list
);
4881 case L2CAP_EV_RECV_REJ
:
4882 l2cap_handle_rej(chan
, control
);
4884 case L2CAP_EV_RECV_SREJ
:
4885 l2cap_handle_srej(chan
, control
);
4891 if (skb
&& !skb_in_use
) {
4892 BT_DBG("Freeing %p", skb
);
4899 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4900 struct l2cap_ctrl
*control
,
4901 struct sk_buff
*skb
, u8 event
)
4904 u16 txseq
= control
->txseq
;
4905 bool skb_in_use
= 0;
4907 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4911 case L2CAP_EV_RECV_IFRAME
:
4912 switch (l2cap_classify_txseq(chan
, txseq
)) {
4913 case L2CAP_TXSEQ_EXPECTED
:
4914 /* Keep frame for reassembly later */
4915 l2cap_pass_to_tx(chan
, control
);
4916 skb_queue_tail(&chan
->srej_q
, skb
);
4918 BT_DBG("Queued %p (queue len %d)", skb
,
4919 skb_queue_len(&chan
->srej_q
));
4921 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4923 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4924 l2cap_seq_list_pop(&chan
->srej_list
);
4926 l2cap_pass_to_tx(chan
, control
);
4927 skb_queue_tail(&chan
->srej_q
, skb
);
4929 BT_DBG("Queued %p (queue len %d)", skb
,
4930 skb_queue_len(&chan
->srej_q
));
4932 err
= l2cap_rx_queued_iframes(chan
);
4937 case L2CAP_TXSEQ_UNEXPECTED
:
4938 /* Got a frame that can't be reassembled yet.
4939 * Save it for later, and send SREJs to cover
4940 * the missing frames.
4942 skb_queue_tail(&chan
->srej_q
, skb
);
4944 BT_DBG("Queued %p (queue len %d)", skb
,
4945 skb_queue_len(&chan
->srej_q
));
4947 l2cap_pass_to_tx(chan
, control
);
4948 l2cap_send_srej(chan
, control
->txseq
);
4950 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4951 /* This frame was requested with an SREJ, but
4952 * some expected retransmitted frames are
4953 * missing. Request retransmission of missing
4956 skb_queue_tail(&chan
->srej_q
, skb
);
4958 BT_DBG("Queued %p (queue len %d)", skb
,
4959 skb_queue_len(&chan
->srej_q
));
4961 l2cap_pass_to_tx(chan
, control
);
4962 l2cap_send_srej_list(chan
, control
->txseq
);
4964 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4965 /* We've already queued this frame. Drop this copy. */
4966 l2cap_pass_to_tx(chan
, control
);
4968 case L2CAP_TXSEQ_DUPLICATE
:
4969 /* Expecting a later sequence number, so this frame
4970 * was already received. Ignore it completely.
4973 case L2CAP_TXSEQ_INVALID_IGNORE
:
4975 case L2CAP_TXSEQ_INVALID
:
4977 l2cap_send_disconn_req(chan
->conn
, chan
,
4982 case L2CAP_EV_RECV_RR
:
4983 l2cap_pass_to_tx(chan
, control
);
4984 if (control
->final
) {
4985 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4987 if (!test_and_clear_bit(CONN_REJ_ACT
,
4988 &chan
->conn_state
)) {
4990 l2cap_retransmit_all(chan
, control
);
4993 l2cap_ertm_send(chan
);
4994 } else if (control
->poll
) {
4995 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4996 &chan
->conn_state
) &&
4997 chan
->unacked_frames
) {
4998 __set_retrans_timer(chan
);
5001 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5002 l2cap_send_srej_tail(chan
);
5004 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5005 &chan
->conn_state
) &&
5006 chan
->unacked_frames
)
5007 __set_retrans_timer(chan
);
5009 l2cap_send_ack(chan
);
5012 case L2CAP_EV_RECV_RNR
:
5013 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5014 l2cap_pass_to_tx(chan
, control
);
5015 if (control
->poll
) {
5016 l2cap_send_srej_tail(chan
);
5018 struct l2cap_ctrl rr_control
;
5019 memset(&rr_control
, 0, sizeof(rr_control
));
5020 rr_control
.sframe
= 1;
5021 rr_control
.super
= L2CAP_SUPER_RR
;
5022 rr_control
.reqseq
= chan
->buffer_seq
;
5023 l2cap_send_sframe(chan
, &rr_control
);
5027 case L2CAP_EV_RECV_REJ
:
5028 l2cap_handle_rej(chan
, control
);
5030 case L2CAP_EV_RECV_SREJ
:
5031 l2cap_handle_srej(chan
, control
);
5035 if (skb
&& !skb_in_use
) {
5036 BT_DBG("Freeing %p", skb
);
5043 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5045 /* Make sure reqseq is for a packet that has been sent but not acked */
5048 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5049 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5052 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5053 struct sk_buff
*skb
, u8 event
)
5057 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5058 control
, skb
, event
, chan
->rx_state
);
5060 if (__valid_reqseq(chan
, control
->reqseq
)) {
5061 switch (chan
->rx_state
) {
5062 case L2CAP_RX_STATE_RECV
:
5063 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5065 case L2CAP_RX_STATE_SREJ_SENT
:
5066 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5074 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5075 control
->reqseq
, chan
->next_tx_seq
,
5076 chan
->expected_ack_seq
);
5077 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5083 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5084 struct sk_buff
*skb
)
5088 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5091 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5092 L2CAP_TXSEQ_EXPECTED
) {
5093 l2cap_pass_to_tx(chan
, control
);
5095 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5096 __next_seq(chan
, chan
->buffer_seq
));
5098 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5100 l2cap_reassemble_sdu(chan
, skb
, control
);
5103 kfree_skb(chan
->sdu
);
5106 chan
->sdu_last_frag
= NULL
;
5110 BT_DBG("Freeing %p", skb
);
5115 chan
->last_acked_seq
= control
->txseq
;
5116 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5121 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5123 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5127 __unpack_control(chan
, skb
);
5132 * We can just drop the corrupted I-frame here.
5133 * Receiver will miss it and start proper recovery
5134 * procedures and ask for retransmission.
5136 if (l2cap_check_fcs(chan
, skb
))
5139 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5140 len
-= L2CAP_SDULEN_SIZE
;
5142 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5143 len
-= L2CAP_FCS_SIZE
;
5145 if (len
> chan
->mps
) {
5146 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5150 if (!control
->sframe
) {
5153 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5154 control
->sar
, control
->reqseq
, control
->final
,
5157 /* Validate F-bit - F=0 always valid, F=1 only
5158 * valid in TX WAIT_F
5160 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5163 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5164 event
= L2CAP_EV_RECV_IFRAME
;
5165 err
= l2cap_rx(chan
, control
, skb
, event
);
5167 err
= l2cap_stream_rx(chan
, control
, skb
);
5171 l2cap_send_disconn_req(chan
->conn
, chan
,
5174 const u8 rx_func_to_event
[4] = {
5175 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5176 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5179 /* Only I-frames are expected in streaming mode */
5180 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5183 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5184 control
->reqseq
, control
->final
, control
->poll
,
5189 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5193 /* Validate F and P bits */
5194 if (control
->final
&& (control
->poll
||
5195 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5198 event
= rx_func_to_event
[control
->super
];
5199 if (l2cap_rx(chan
, control
, skb
, event
))
5200 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5210 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
5212 struct l2cap_chan
*chan
;
5214 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5216 BT_DBG("unknown cid 0x%4.4x", cid
);
5217 /* Drop packet and return */
5222 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5224 if (chan
->state
!= BT_CONNECTED
)
5227 switch (chan
->mode
) {
5228 case L2CAP_MODE_BASIC
:
5229 /* If socket recv buffers overflows we drop data here
5230 * which is *bad* because L2CAP has to be reliable.
5231 * But we don't have any other choice. L2CAP doesn't
5232 * provide flow control mechanism. */
5234 if (chan
->imtu
< skb
->len
)
5237 if (!chan
->ops
->recv(chan
->data
, skb
))
5241 case L2CAP_MODE_ERTM
:
5242 case L2CAP_MODE_STREAMING
:
5243 l2cap_data_rcv(chan
, skb
);
5247 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5255 l2cap_chan_unlock(chan
);
5260 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
5262 struct l2cap_chan
*chan
;
5264 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5268 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5270 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5273 if (chan
->imtu
< skb
->len
)
5276 if (!chan
->ops
->recv(chan
->data
, skb
))
5285 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5286 struct sk_buff
*skb
)
5288 struct l2cap_chan
*chan
;
5290 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5294 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5296 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5299 if (chan
->imtu
< skb
->len
)
5302 if (!chan
->ops
->recv(chan
->data
, skb
))
5311 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5313 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5317 skb_pull(skb
, L2CAP_HDR_SIZE
);
5318 cid
= __le16_to_cpu(lh
->cid
);
5319 len
= __le16_to_cpu(lh
->len
);
5321 if (len
!= skb
->len
) {
5326 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5329 case L2CAP_CID_LE_SIGNALING
:
5330 case L2CAP_CID_SIGNALING
:
5331 l2cap_sig_channel(conn
, skb
);
5334 case L2CAP_CID_CONN_LESS
:
5335 psm
= get_unaligned((__le16
*) skb
->data
);
5337 l2cap_conless_channel(conn
, psm
, skb
);
5340 case L2CAP_CID_LE_DATA
:
5341 l2cap_att_channel(conn
, cid
, skb
);
5345 if (smp_sig_channel(conn
, skb
))
5346 l2cap_conn_del(conn
->hcon
, EACCES
);
5350 l2cap_data_channel(conn
, cid
, skb
);
5355 /* ---- L2CAP interface with lower layer (HCI) ---- */
5357 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5359 int exact
= 0, lm1
= 0, lm2
= 0;
5360 struct l2cap_chan
*c
;
5362 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
5364 /* Find listening sockets and check their link_mode */
5365 read_lock(&chan_list_lock
);
5366 list_for_each_entry(c
, &chan_list
, global_l
) {
5367 struct sock
*sk
= c
->sk
;
5369 if (c
->state
!= BT_LISTEN
)
5372 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5373 lm1
|= HCI_LM_ACCEPT
;
5374 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5375 lm1
|= HCI_LM_MASTER
;
5377 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5378 lm2
|= HCI_LM_ACCEPT
;
5379 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5380 lm2
|= HCI_LM_MASTER
;
5383 read_unlock(&chan_list_lock
);
5385 return exact
? lm1
: lm2
;
5388 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5390 struct l2cap_conn
*conn
;
5392 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
5395 conn
= l2cap_conn_add(hcon
, status
);
5397 l2cap_conn_ready(conn
);
5399 l2cap_conn_del(hcon
, bt_to_errno(status
));
5404 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5406 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5408 BT_DBG("hcon %p", hcon
);
5411 return HCI_ERROR_REMOTE_USER_TERM
;
5412 return conn
->disc_reason
;
5415 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5417 BT_DBG("hcon %p reason %d", hcon
, reason
);
5419 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5423 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5425 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5428 if (encrypt
== 0x00) {
5429 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5430 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5431 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5432 l2cap_chan_close(chan
, ECONNREFUSED
);
5434 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5435 __clear_chan_timer(chan
);
5439 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5441 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5442 struct l2cap_chan
*chan
;
5447 BT_DBG("conn %p", conn
);
5449 if (hcon
->type
== LE_LINK
) {
5450 if (!status
&& encrypt
)
5451 smp_distribute_keys(conn
, 0);
5452 cancel_delayed_work(&conn
->security_timer
);
5455 mutex_lock(&conn
->chan_lock
);
5457 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5458 l2cap_chan_lock(chan
);
5460 BT_DBG("chan->scid %d", chan
->scid
);
5462 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5463 if (!status
&& encrypt
) {
5464 chan
->sec_level
= hcon
->sec_level
;
5465 l2cap_chan_ready(chan
);
5468 l2cap_chan_unlock(chan
);
5472 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5473 l2cap_chan_unlock(chan
);
5477 if (!status
&& (chan
->state
== BT_CONNECTED
||
5478 chan
->state
== BT_CONFIG
)) {
5479 struct sock
*sk
= chan
->sk
;
5481 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5482 sk
->sk_state_change(sk
);
5484 l2cap_check_encryption(chan
, encrypt
);
5485 l2cap_chan_unlock(chan
);
5489 if (chan
->state
== BT_CONNECT
) {
5491 l2cap_send_conn_req(chan
);
5493 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5495 } else if (chan
->state
== BT_CONNECT2
) {
5496 struct sock
*sk
= chan
->sk
;
5497 struct l2cap_conn_rsp rsp
;
5503 if (test_bit(BT_SK_DEFER_SETUP
,
5504 &bt_sk(sk
)->flags
)) {
5505 struct sock
*parent
= bt_sk(sk
)->parent
;
5506 res
= L2CAP_CR_PEND
;
5507 stat
= L2CAP_CS_AUTHOR_PEND
;
5509 parent
->sk_data_ready(parent
, 0);
5511 __l2cap_state_change(chan
, BT_CONFIG
);
5512 res
= L2CAP_CR_SUCCESS
;
5513 stat
= L2CAP_CS_NO_INFO
;
5516 __l2cap_state_change(chan
, BT_DISCONN
);
5517 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5518 res
= L2CAP_CR_SEC_BLOCK
;
5519 stat
= L2CAP_CS_NO_INFO
;
5524 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5525 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5526 rsp
.result
= cpu_to_le16(res
);
5527 rsp
.status
= cpu_to_le16(stat
);
5528 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5532 l2cap_chan_unlock(chan
);
5535 mutex_unlock(&conn
->chan_lock
);
5540 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5542 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5545 conn
= l2cap_conn_add(hcon
, 0);
5550 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5552 if (!(flags
& ACL_CONT
)) {
5553 struct l2cap_hdr
*hdr
;
5557 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5558 kfree_skb(conn
->rx_skb
);
5559 conn
->rx_skb
= NULL
;
5561 l2cap_conn_unreliable(conn
, ECOMM
);
5564 /* Start fragment always begin with Basic L2CAP header */
5565 if (skb
->len
< L2CAP_HDR_SIZE
) {
5566 BT_ERR("Frame is too short (len %d)", skb
->len
);
5567 l2cap_conn_unreliable(conn
, ECOMM
);
5571 hdr
= (struct l2cap_hdr
*) skb
->data
;
5572 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5574 if (len
== skb
->len
) {
5575 /* Complete frame received */
5576 l2cap_recv_frame(conn
, skb
);
5580 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5582 if (skb
->len
> len
) {
5583 BT_ERR("Frame is too long (len %d, expected len %d)",
5585 l2cap_conn_unreliable(conn
, ECOMM
);
5589 /* Allocate skb for the complete frame (with header) */
5590 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5594 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5596 conn
->rx_len
= len
- skb
->len
;
5598 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5600 if (!conn
->rx_len
) {
5601 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5602 l2cap_conn_unreliable(conn
, ECOMM
);
5606 if (skb
->len
> conn
->rx_len
) {
5607 BT_ERR("Fragment is too long (len %d, expected %d)",
5608 skb
->len
, conn
->rx_len
);
5609 kfree_skb(conn
->rx_skb
);
5610 conn
->rx_skb
= NULL
;
5612 l2cap_conn_unreliable(conn
, ECOMM
);
5616 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5618 conn
->rx_len
-= skb
->len
;
5620 if (!conn
->rx_len
) {
5621 /* Complete frame received */
5622 l2cap_recv_frame(conn
, conn
->rx_skb
);
5623 conn
->rx_skb
= NULL
;
5632 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5634 struct l2cap_chan
*c
;
5636 read_lock(&chan_list_lock
);
5638 list_for_each_entry(c
, &chan_list
, global_l
) {
5639 struct sock
*sk
= c
->sk
;
5641 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5642 batostr(&bt_sk(sk
)->src
),
5643 batostr(&bt_sk(sk
)->dst
),
5644 c
->state
, __le16_to_cpu(c
->psm
),
5645 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5646 c
->sec_level
, c
->mode
);
5649 read_unlock(&chan_list_lock
);
5654 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5656 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5659 static const struct file_operations l2cap_debugfs_fops
= {
5660 .open
= l2cap_debugfs_open
,
5662 .llseek
= seq_lseek
,
5663 .release
= single_release
,
5666 static struct dentry
*l2cap_debugfs
;
5668 int __init
l2cap_init(void)
5672 err
= l2cap_init_sockets();
5677 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5678 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5680 BT_ERR("Failed to create L2CAP debug file");
5686 void l2cap_exit(void)
5688 debugfs_remove(l2cap_debugfs
);
5689 l2cap_cleanup_sockets();
5692 module_param(disable_ertm
, bool, 0644);
5693 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");