2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
77 struct sk_buff_head
*skbs
, u8 event
);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
85 list_for_each_entry(c
, &conn
->chan_l
, list
) {
92 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
96 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
107 struct l2cap_chan
*c
;
109 mutex_lock(&conn
->chan_lock
);
110 c
= __l2cap_get_chan_by_scid(conn
, cid
);
113 mutex_unlock(&conn
->chan_lock
);
118 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
120 struct l2cap_chan
*c
;
122 list_for_each_entry(c
, &conn
->chan_l
, list
) {
123 if (c
->ident
== ident
)
129 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
131 struct l2cap_chan
*c
;
133 list_for_each_entry(c
, &chan_list
, global_l
) {
134 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
140 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
144 write_lock(&chan_list_lock
);
146 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
159 for (p
= 0x1001; p
< 0x1100; p
+= 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
161 chan
->psm
= cpu_to_le16(p
);
162 chan
->sport
= cpu_to_le16(p
);
169 write_unlock(&chan_list_lock
);
173 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
175 write_lock(&chan_list_lock
);
179 write_unlock(&chan_list_lock
);
184 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
186 u16 cid
= L2CAP_CID_DYN_START
;
188 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
189 if (!__l2cap_get_chan_by_scid(conn
, cid
))
196 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
198 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
199 state_to_string(state
));
202 chan
->ops
->state_change(chan
->data
, state
);
205 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
207 struct sock
*sk
= chan
->sk
;
210 __l2cap_state_change(chan
, state
);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
216 struct sock
*sk
= chan
->sk
;
221 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
223 struct sock
*sk
= chan
->sk
;
226 __l2cap_chan_set_err(chan
, err
);
230 static void __set_retrans_timer(struct l2cap_chan
*chan
)
232 if (!delayed_work_pending(&chan
->monitor_timer
) &&
233 chan
->retrans_timeout
) {
234 l2cap_set_timer(chan
, &chan
->retrans_timer
,
235 msecs_to_jiffies(chan
->retrans_timeout
));
239 static void __set_monitor_timer(struct l2cap_chan
*chan
)
241 __clear_retrans_timer(chan
);
242 if (chan
->monitor_timeout
) {
243 l2cap_set_timer(chan
, &chan
->monitor_timer
,
244 msecs_to_jiffies(chan
->monitor_timeout
));
248 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
253 skb_queue_walk(head
, skb
) {
254 if (bt_cb(skb
)->control
.txseq
== seq
)
261 /* ---- L2CAP sequence number lists ---- */
263 /* For ERTM, ordered lists of sequence numbers must be tracked for
264 * SREJ requests that are received and for frames that are to be
265 * retransmitted. These seq_list functions implement a singly-linked
266 * list in an array, where membership in the list can also be checked
267 * in constant time. Items can also be added to the tail of the list
268 * and removed from the head in constant time, without further memory
272 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
274 size_t alloc_size
, i
;
276 /* Allocated size is a power of 2 to map sequence numbers
277 * (which may be up to 14 bits) in to a smaller array that is
278 * sized for the negotiated ERTM transmit windows.
280 alloc_size
= roundup_pow_of_two(size
);
282 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
286 seq_list
->mask
= alloc_size
- 1;
287 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
288 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
289 for (i
= 0; i
< alloc_size
; i
++)
290 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
295 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
297 kfree(seq_list
->list
);
300 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
303 /* Constant-time check for list membership */
304 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
307 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
309 u16 mask
= seq_list
->mask
;
311 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
312 /* In case someone tries to pop the head of an empty list */
313 return L2CAP_SEQ_LIST_CLEAR
;
314 } else if (seq_list
->head
== seq
) {
315 /* Head can be removed in constant time */
316 seq_list
->head
= seq_list
->list
[seq
& mask
];
317 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
319 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
320 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
321 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
324 /* Walk the list to find the sequence number */
325 u16 prev
= seq_list
->head
;
326 while (seq_list
->list
[prev
& mask
] != seq
) {
327 prev
= seq_list
->list
[prev
& mask
];
328 if (prev
== L2CAP_SEQ_LIST_TAIL
)
329 return L2CAP_SEQ_LIST_CLEAR
;
332 /* Unlink the number from the list and clear it */
333 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
334 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
335 if (seq_list
->tail
== seq
)
336 seq_list
->tail
= prev
;
341 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
343 /* Remove the head in constant time */
344 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
347 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
351 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
354 for (i
= 0; i
<= seq_list
->mask
; i
++)
355 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
357 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
358 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
361 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
363 u16 mask
= seq_list
->mask
;
365 /* All appends happen in constant time */
367 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
370 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
371 seq_list
->head
= seq
;
373 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
375 seq_list
->tail
= seq
;
376 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
379 static void l2cap_chan_timeout(struct work_struct
*work
)
381 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
383 struct l2cap_conn
*conn
= chan
->conn
;
386 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
388 mutex_lock(&conn
->chan_lock
);
389 l2cap_chan_lock(chan
);
391 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
392 reason
= ECONNREFUSED
;
393 else if (chan
->state
== BT_CONNECT
&&
394 chan
->sec_level
!= BT_SECURITY_SDP
)
395 reason
= ECONNREFUSED
;
399 l2cap_chan_close(chan
, reason
);
401 l2cap_chan_unlock(chan
);
403 chan
->ops
->close(chan
->data
);
404 mutex_unlock(&conn
->chan_lock
);
406 l2cap_chan_put(chan
);
409 struct l2cap_chan
*l2cap_chan_create(void)
411 struct l2cap_chan
*chan
;
413 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
417 mutex_init(&chan
->lock
);
419 write_lock(&chan_list_lock
);
420 list_add(&chan
->global_l
, &chan_list
);
421 write_unlock(&chan_list_lock
);
423 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
425 chan
->state
= BT_OPEN
;
427 atomic_set(&chan
->refcnt
, 1);
429 /* This flag is cleared in l2cap_chan_ready() */
430 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
432 BT_DBG("chan %p", chan
);
437 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
439 write_lock(&chan_list_lock
);
440 list_del(&chan
->global_l
);
441 write_unlock(&chan_list_lock
);
443 l2cap_chan_put(chan
);
446 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
448 chan
->fcs
= L2CAP_FCS_CRC16
;
449 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
450 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
451 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
452 chan
->sec_level
= BT_SECURITY_LOW
;
454 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
457 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
460 __le16_to_cpu(chan
->psm
), chan
->dcid
);
462 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
466 switch (chan
->chan_type
) {
467 case L2CAP_CHAN_CONN_ORIENTED
:
468 if (conn
->hcon
->type
== LE_LINK
) {
470 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
471 chan
->scid
= L2CAP_CID_LE_DATA
;
472 chan
->dcid
= L2CAP_CID_LE_DATA
;
474 /* Alloc CID for connection-oriented socket */
475 chan
->scid
= l2cap_alloc_cid(conn
);
476 chan
->omtu
= L2CAP_DEFAULT_MTU
;
480 case L2CAP_CHAN_CONN_LESS
:
481 /* Connectionless socket */
482 chan
->scid
= L2CAP_CID_CONN_LESS
;
483 chan
->dcid
= L2CAP_CID_CONN_LESS
;
484 chan
->omtu
= L2CAP_DEFAULT_MTU
;
488 /* Raw socket can send/recv signalling messages only */
489 chan
->scid
= L2CAP_CID_SIGNALING
;
490 chan
->dcid
= L2CAP_CID_SIGNALING
;
491 chan
->omtu
= L2CAP_DEFAULT_MTU
;
494 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
495 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
496 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
497 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
498 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
499 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
501 l2cap_chan_hold(chan
);
503 list_add(&chan
->list
, &conn
->chan_l
);
506 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
508 mutex_lock(&conn
->chan_lock
);
509 __l2cap_chan_add(conn
, chan
);
510 mutex_unlock(&conn
->chan_lock
);
513 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
515 struct sock
*sk
= chan
->sk
;
516 struct l2cap_conn
*conn
= chan
->conn
;
517 struct sock
*parent
= bt_sk(sk
)->parent
;
519 __clear_chan_timer(chan
);
521 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
524 /* Delete from channel list */
525 list_del(&chan
->list
);
527 l2cap_chan_put(chan
);
530 hci_conn_put(conn
->hcon
);
535 __l2cap_state_change(chan
, BT_CLOSED
);
536 sock_set_flag(sk
, SOCK_ZAPPED
);
539 __l2cap_chan_set_err(chan
, err
);
542 bt_accept_unlink(sk
);
543 parent
->sk_data_ready(parent
, 0);
545 sk
->sk_state_change(sk
);
549 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
553 case L2CAP_MODE_BASIC
:
556 case L2CAP_MODE_ERTM
:
557 __clear_retrans_timer(chan
);
558 __clear_monitor_timer(chan
);
559 __clear_ack_timer(chan
);
561 skb_queue_purge(&chan
->srej_q
);
563 l2cap_seq_list_free(&chan
->srej_list
);
564 l2cap_seq_list_free(&chan
->retrans_list
);
568 case L2CAP_MODE_STREAMING
:
569 skb_queue_purge(&chan
->tx_q
);
576 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
580 BT_DBG("parent %p", parent
);
582 /* Close not yet accepted channels */
583 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
584 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
586 l2cap_chan_lock(chan
);
587 __clear_chan_timer(chan
);
588 l2cap_chan_close(chan
, ECONNRESET
);
589 l2cap_chan_unlock(chan
);
591 chan
->ops
->close(chan
->data
);
595 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
597 struct l2cap_conn
*conn
= chan
->conn
;
598 struct sock
*sk
= chan
->sk
;
600 BT_DBG("chan %p state %s sk %p", chan
,
601 state_to_string(chan
->state
), sk
);
603 switch (chan
->state
) {
606 l2cap_chan_cleanup_listen(sk
);
608 __l2cap_state_change(chan
, BT_CLOSED
);
609 sock_set_flag(sk
, SOCK_ZAPPED
);
615 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
616 conn
->hcon
->type
== ACL_LINK
) {
617 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
618 l2cap_send_disconn_req(conn
, chan
, reason
);
620 l2cap_chan_del(chan
, reason
);
624 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
625 conn
->hcon
->type
== ACL_LINK
) {
626 struct l2cap_conn_rsp rsp
;
629 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
630 result
= L2CAP_CR_SEC_BLOCK
;
632 result
= L2CAP_CR_BAD_PSM
;
633 l2cap_state_change(chan
, BT_DISCONN
);
635 rsp
.scid
= cpu_to_le16(chan
->dcid
);
636 rsp
.dcid
= cpu_to_le16(chan
->scid
);
637 rsp
.result
= cpu_to_le16(result
);
638 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
639 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
643 l2cap_chan_del(chan
, reason
);
648 l2cap_chan_del(chan
, reason
);
653 sock_set_flag(sk
, SOCK_ZAPPED
);
659 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
661 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
662 switch (chan
->sec_level
) {
663 case BT_SECURITY_HIGH
:
664 return HCI_AT_DEDICATED_BONDING_MITM
;
665 case BT_SECURITY_MEDIUM
:
666 return HCI_AT_DEDICATED_BONDING
;
668 return HCI_AT_NO_BONDING
;
670 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
671 if (chan
->sec_level
== BT_SECURITY_LOW
)
672 chan
->sec_level
= BT_SECURITY_SDP
;
674 if (chan
->sec_level
== BT_SECURITY_HIGH
)
675 return HCI_AT_NO_BONDING_MITM
;
677 return HCI_AT_NO_BONDING
;
679 switch (chan
->sec_level
) {
680 case BT_SECURITY_HIGH
:
681 return HCI_AT_GENERAL_BONDING_MITM
;
682 case BT_SECURITY_MEDIUM
:
683 return HCI_AT_GENERAL_BONDING
;
685 return HCI_AT_NO_BONDING
;
690 /* Service level security */
691 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
693 struct l2cap_conn
*conn
= chan
->conn
;
696 auth_type
= l2cap_get_auth_type(chan
);
698 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
701 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
705 /* Get next available identificator.
706 * 1 - 128 are used by kernel.
707 * 129 - 199 are reserved.
708 * 200 - 254 are used by utilities like l2ping, etc.
711 spin_lock(&conn
->lock
);
713 if (++conn
->tx_ident
> 128)
718 spin_unlock(&conn
->lock
);
723 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
725 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
728 BT_DBG("code 0x%2.2x", code
);
733 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
734 flags
= ACL_START_NO_FLUSH
;
738 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
739 skb
->priority
= HCI_PRIO_MAX
;
741 hci_send_acl(conn
->hchan
, skb
, flags
);
744 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
746 struct hci_conn
*hcon
= chan
->conn
->hcon
;
749 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
752 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
753 lmp_no_flush_capable(hcon
->hdev
))
754 flags
= ACL_START_NO_FLUSH
;
758 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
759 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
762 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
764 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
765 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
767 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
770 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
771 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
778 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
779 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
786 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
788 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
789 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
791 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
794 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
795 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
802 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
803 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
810 static inline void __unpack_control(struct l2cap_chan
*chan
,
813 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
814 __unpack_extended_control(get_unaligned_le32(skb
->data
),
815 &bt_cb(skb
)->control
);
816 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
818 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
819 &bt_cb(skb
)->control
);
820 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
824 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
828 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
829 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
831 if (control
->sframe
) {
832 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
833 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
834 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
836 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
837 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
843 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
847 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
848 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
850 if (control
->sframe
) {
851 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
852 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
853 packed
|= L2CAP_CTRL_FRAME_TYPE
;
855 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
856 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
862 static inline void __pack_control(struct l2cap_chan
*chan
,
863 struct l2cap_ctrl
*control
,
866 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
867 put_unaligned_le32(__pack_extended_control(control
),
868 skb
->data
+ L2CAP_HDR_SIZE
);
870 put_unaligned_le16(__pack_enhanced_control(control
),
871 skb
->data
+ L2CAP_HDR_SIZE
);
875 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
879 struct l2cap_hdr
*lh
;
882 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
883 hlen
= L2CAP_EXT_HDR_SIZE
;
885 hlen
= L2CAP_ENH_HDR_SIZE
;
887 if (chan
->fcs
== L2CAP_FCS_CRC16
)
888 hlen
+= L2CAP_FCS_SIZE
;
890 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
893 return ERR_PTR(-ENOMEM
);
895 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
896 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
897 lh
->cid
= cpu_to_le16(chan
->dcid
);
899 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
900 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
902 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
904 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
905 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
906 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
909 skb
->priority
= HCI_PRIO_MAX
;
913 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
914 struct l2cap_ctrl
*control
)
919 BT_DBG("chan %p, control %p", chan
, control
);
921 if (!control
->sframe
)
924 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
928 if (control
->super
== L2CAP_SUPER_RR
)
929 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
930 else if (control
->super
== L2CAP_SUPER_RNR
)
931 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
933 if (control
->super
!= L2CAP_SUPER_SREJ
) {
934 chan
->last_acked_seq
= control
->reqseq
;
935 __clear_ack_timer(chan
);
938 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
939 control
->final
, control
->poll
, control
->super
);
941 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
942 control_field
= __pack_extended_control(control
);
944 control_field
= __pack_enhanced_control(control
);
946 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
948 l2cap_do_send(chan
, skb
);
951 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
953 struct l2cap_ctrl control
;
955 BT_DBG("chan %p, poll %d", chan
, poll
);
957 memset(&control
, 0, sizeof(control
));
961 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
962 control
.super
= L2CAP_SUPER_RNR
;
964 control
.super
= L2CAP_SUPER_RR
;
966 control
.reqseq
= chan
->buffer_seq
;
967 l2cap_send_sframe(chan
, &control
);
970 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
972 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
975 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
977 struct l2cap_conn
*conn
= chan
->conn
;
978 struct l2cap_conn_req req
;
980 req
.scid
= cpu_to_le16(chan
->scid
);
983 chan
->ident
= l2cap_get_ident(conn
);
985 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
987 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
990 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
992 struct sock
*sk
= chan
->sk
;
997 parent
= bt_sk(sk
)->parent
;
999 BT_DBG("sk %p, parent %p", sk
, parent
);
1001 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1002 chan
->conf_state
= 0;
1003 __clear_chan_timer(chan
);
1005 __l2cap_state_change(chan
, BT_CONNECTED
);
1006 sk
->sk_state_change(sk
);
1009 parent
->sk_data_ready(parent
, 0);
1014 static void l2cap_do_start(struct l2cap_chan
*chan
)
1016 struct l2cap_conn
*conn
= chan
->conn
;
1018 if (conn
->hcon
->type
== LE_LINK
) {
1019 l2cap_chan_ready(chan
);
1023 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1024 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1027 if (l2cap_chan_check_security(chan
) &&
1028 __l2cap_no_conn_pending(chan
))
1029 l2cap_send_conn_req(chan
);
1031 struct l2cap_info_req req
;
1032 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1034 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1035 conn
->info_ident
= l2cap_get_ident(conn
);
1037 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1039 l2cap_send_cmd(conn
, conn
->info_ident
,
1040 L2CAP_INFO_REQ
, sizeof(req
), &req
);
1044 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1046 u32 local_feat_mask
= l2cap_feat_mask
;
1048 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1051 case L2CAP_MODE_ERTM
:
1052 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1053 case L2CAP_MODE_STREAMING
:
1054 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1060 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
1062 struct sock
*sk
= chan
->sk
;
1063 struct l2cap_disconn_req req
;
1068 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1069 __clear_retrans_timer(chan
);
1070 __clear_monitor_timer(chan
);
1071 __clear_ack_timer(chan
);
1074 req
.dcid
= cpu_to_le16(chan
->dcid
);
1075 req
.scid
= cpu_to_le16(chan
->scid
);
1076 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1077 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1080 __l2cap_state_change(chan
, BT_DISCONN
);
1081 __l2cap_chan_set_err(chan
, err
);
1085 /* ---- L2CAP connections ---- */
1086 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1088 struct l2cap_chan
*chan
, *tmp
;
1090 BT_DBG("conn %p", conn
);
1092 mutex_lock(&conn
->chan_lock
);
1094 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1095 struct sock
*sk
= chan
->sk
;
1097 l2cap_chan_lock(chan
);
1099 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1100 l2cap_chan_unlock(chan
);
1104 if (chan
->state
== BT_CONNECT
) {
1105 if (!l2cap_chan_check_security(chan
) ||
1106 !__l2cap_no_conn_pending(chan
)) {
1107 l2cap_chan_unlock(chan
);
1111 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1112 && test_bit(CONF_STATE2_DEVICE
,
1113 &chan
->conf_state
)) {
1114 l2cap_chan_close(chan
, ECONNRESET
);
1115 l2cap_chan_unlock(chan
);
1119 l2cap_send_conn_req(chan
);
1121 } else if (chan
->state
== BT_CONNECT2
) {
1122 struct l2cap_conn_rsp rsp
;
1124 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1125 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1127 if (l2cap_chan_check_security(chan
)) {
1129 if (test_bit(BT_SK_DEFER_SETUP
,
1130 &bt_sk(sk
)->flags
)) {
1131 struct sock
*parent
= bt_sk(sk
)->parent
;
1132 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1133 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1135 parent
->sk_data_ready(parent
, 0);
1138 __l2cap_state_change(chan
, BT_CONFIG
);
1139 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1140 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1144 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1145 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1148 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1151 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1152 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1153 l2cap_chan_unlock(chan
);
1157 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1158 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1159 l2cap_build_conf_req(chan
, buf
), buf
);
1160 chan
->num_conf_req
++;
1163 l2cap_chan_unlock(chan
);
1166 mutex_unlock(&conn
->chan_lock
);
1169 /* Find socket with cid and source/destination bdaddr.
1170 * Returns closest match, locked.
1172 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1176 struct l2cap_chan
*c
, *c1
= NULL
;
1178 read_lock(&chan_list_lock
);
1180 list_for_each_entry(c
, &chan_list
, global_l
) {
1181 struct sock
*sk
= c
->sk
;
1183 if (state
&& c
->state
!= state
)
1186 if (c
->scid
== cid
) {
1187 int src_match
, dst_match
;
1188 int src_any
, dst_any
;
1191 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1192 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1193 if (src_match
&& dst_match
) {
1194 read_unlock(&chan_list_lock
);
1199 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1200 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1201 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1202 (src_any
&& dst_any
))
1207 read_unlock(&chan_list_lock
);
1212 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1214 struct sock
*parent
, *sk
;
1215 struct l2cap_chan
*chan
, *pchan
;
1219 /* Check if we have socket listening on cid */
1220 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1221 conn
->src
, conn
->dst
);
1229 /* Check for backlog size */
1230 if (sk_acceptq_is_full(parent
)) {
1231 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1235 chan
= pchan
->ops
->new_connection(pchan
->data
);
1241 hci_conn_hold(conn
->hcon
);
1243 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1244 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1246 bt_accept_enqueue(parent
, sk
);
1248 l2cap_chan_add(conn
, chan
);
1250 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1252 __l2cap_state_change(chan
, BT_CONNECTED
);
1253 parent
->sk_data_ready(parent
, 0);
1256 release_sock(parent
);
1259 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1261 struct l2cap_chan
*chan
;
1263 BT_DBG("conn %p", conn
);
1265 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1266 l2cap_le_conn_ready(conn
);
1268 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1269 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1271 mutex_lock(&conn
->chan_lock
);
1273 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1275 l2cap_chan_lock(chan
);
1277 if (conn
->hcon
->type
== LE_LINK
) {
1278 if (smp_conn_security(conn
, chan
->sec_level
))
1279 l2cap_chan_ready(chan
);
1281 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1282 struct sock
*sk
= chan
->sk
;
1283 __clear_chan_timer(chan
);
1285 __l2cap_state_change(chan
, BT_CONNECTED
);
1286 sk
->sk_state_change(sk
);
1289 } else if (chan
->state
== BT_CONNECT
)
1290 l2cap_do_start(chan
);
1292 l2cap_chan_unlock(chan
);
1295 mutex_unlock(&conn
->chan_lock
);
1298 /* Notify sockets that we cannot guaranty reliability anymore */
1299 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1301 struct l2cap_chan
*chan
;
1303 BT_DBG("conn %p", conn
);
1305 mutex_lock(&conn
->chan_lock
);
1307 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1308 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1309 __l2cap_chan_set_err(chan
, err
);
1312 mutex_unlock(&conn
->chan_lock
);
1315 static void l2cap_info_timeout(struct work_struct
*work
)
1317 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1320 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1321 conn
->info_ident
= 0;
1323 l2cap_conn_start(conn
);
1326 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1328 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1329 struct l2cap_chan
*chan
, *l
;
1334 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1336 kfree_skb(conn
->rx_skb
);
1338 mutex_lock(&conn
->chan_lock
);
1341 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1342 l2cap_chan_hold(chan
);
1343 l2cap_chan_lock(chan
);
1345 l2cap_chan_del(chan
, err
);
1347 l2cap_chan_unlock(chan
);
1349 chan
->ops
->close(chan
->data
);
1350 l2cap_chan_put(chan
);
1353 mutex_unlock(&conn
->chan_lock
);
1355 hci_chan_del(conn
->hchan
);
1357 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1358 cancel_delayed_work_sync(&conn
->info_timer
);
1360 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1361 cancel_delayed_work_sync(&conn
->security_timer
);
1362 smp_chan_destroy(conn
);
1365 hcon
->l2cap_data
= NULL
;
1369 static void security_timeout(struct work_struct
*work
)
1371 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1372 security_timer
.work
);
1374 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1377 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1379 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1380 struct hci_chan
*hchan
;
1385 hchan
= hci_chan_create(hcon
);
1389 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1391 hci_chan_del(hchan
);
1395 hcon
->l2cap_data
= conn
;
1397 conn
->hchan
= hchan
;
1399 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1401 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1402 conn
->mtu
= hcon
->hdev
->le_mtu
;
1404 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1406 conn
->src
= &hcon
->hdev
->bdaddr
;
1407 conn
->dst
= &hcon
->dst
;
1409 conn
->feat_mask
= 0;
1411 spin_lock_init(&conn
->lock
);
1412 mutex_init(&conn
->chan_lock
);
1414 INIT_LIST_HEAD(&conn
->chan_l
);
1416 if (hcon
->type
== LE_LINK
)
1417 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1419 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1421 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1426 /* ---- Socket interface ---- */
1428 /* Find socket with psm and source / destination bdaddr.
1429 * Returns closest match.
1431 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1435 struct l2cap_chan
*c
, *c1
= NULL
;
1437 read_lock(&chan_list_lock
);
1439 list_for_each_entry(c
, &chan_list
, global_l
) {
1440 struct sock
*sk
= c
->sk
;
1442 if (state
&& c
->state
!= state
)
1445 if (c
->psm
== psm
) {
1446 int src_match
, dst_match
;
1447 int src_any
, dst_any
;
1450 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1451 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1452 if (src_match
&& dst_match
) {
1453 read_unlock(&chan_list_lock
);
1458 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1459 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1460 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1461 (src_any
&& dst_any
))
1466 read_unlock(&chan_list_lock
);
1471 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1472 bdaddr_t
*dst
, u8 dst_type
)
1474 struct sock
*sk
= chan
->sk
;
1475 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1476 struct l2cap_conn
*conn
;
1477 struct hci_conn
*hcon
;
1478 struct hci_dev
*hdev
;
1482 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1483 dst_type
, __le16_to_cpu(chan
->psm
));
1485 hdev
= hci_get_route(dst
, src
);
1487 return -EHOSTUNREACH
;
1491 l2cap_chan_lock(chan
);
1493 /* PSM must be odd and lsb of upper byte must be 0 */
1494 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1495 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1500 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1505 switch (chan
->mode
) {
1506 case L2CAP_MODE_BASIC
:
1508 case L2CAP_MODE_ERTM
:
1509 case L2CAP_MODE_STREAMING
:
1520 switch (sk
->sk_state
) {
1524 /* Already connecting */
1530 /* Already connected */
1546 /* Set destination address and psm */
1547 bacpy(&bt_sk(sk
)->dst
, dst
);
1554 auth_type
= l2cap_get_auth_type(chan
);
1556 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1557 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1558 chan
->sec_level
, auth_type
);
1560 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1561 chan
->sec_level
, auth_type
);
1564 err
= PTR_ERR(hcon
);
1568 conn
= l2cap_conn_add(hcon
, 0);
1575 if (hcon
->type
== LE_LINK
) {
1578 if (!list_empty(&conn
->chan_l
)) {
1587 /* Update source addr of the socket */
1588 bacpy(src
, conn
->src
);
1590 l2cap_chan_unlock(chan
);
1591 l2cap_chan_add(conn
, chan
);
1592 l2cap_chan_lock(chan
);
1594 l2cap_state_change(chan
, BT_CONNECT
);
1595 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1597 if (hcon
->state
== BT_CONNECTED
) {
1598 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1599 __clear_chan_timer(chan
);
1600 if (l2cap_chan_check_security(chan
))
1601 l2cap_state_change(chan
, BT_CONNECTED
);
1603 l2cap_do_start(chan
);
1609 l2cap_chan_unlock(chan
);
1610 hci_dev_unlock(hdev
);
1615 int __l2cap_wait_ack(struct sock
*sk
)
1617 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1618 DECLARE_WAITQUEUE(wait
, current
);
1622 add_wait_queue(sk_sleep(sk
), &wait
);
1623 set_current_state(TASK_INTERRUPTIBLE
);
1624 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1628 if (signal_pending(current
)) {
1629 err
= sock_intr_errno(timeo
);
1634 timeo
= schedule_timeout(timeo
);
1636 set_current_state(TASK_INTERRUPTIBLE
);
1638 err
= sock_error(sk
);
1642 set_current_state(TASK_RUNNING
);
1643 remove_wait_queue(sk_sleep(sk
), &wait
);
1647 static void l2cap_monitor_timeout(struct work_struct
*work
)
1649 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1650 monitor_timer
.work
);
1652 BT_DBG("chan %p", chan
);
1654 l2cap_chan_lock(chan
);
1657 l2cap_chan_unlock(chan
);
1658 l2cap_chan_put(chan
);
1662 l2cap_tx(chan
, 0, 0, L2CAP_EV_MONITOR_TO
);
1664 l2cap_chan_unlock(chan
);
1665 l2cap_chan_put(chan
);
1668 static void l2cap_retrans_timeout(struct work_struct
*work
)
1670 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1671 retrans_timer
.work
);
1673 BT_DBG("chan %p", chan
);
1675 l2cap_chan_lock(chan
);
1678 l2cap_chan_unlock(chan
);
1679 l2cap_chan_put(chan
);
1683 l2cap_tx(chan
, 0, 0, L2CAP_EV_RETRANS_TO
);
1684 l2cap_chan_unlock(chan
);
1685 l2cap_chan_put(chan
);
1688 static int l2cap_streaming_send(struct l2cap_chan
*chan
,
1689 struct sk_buff_head
*skbs
)
1691 struct sk_buff
*skb
;
1692 struct l2cap_ctrl
*control
;
1694 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1696 if (chan
->state
!= BT_CONNECTED
)
1699 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1701 while (!skb_queue_empty(&chan
->tx_q
)) {
1703 skb
= skb_dequeue(&chan
->tx_q
);
1705 bt_cb(skb
)->control
.retries
= 1;
1706 control
= &bt_cb(skb
)->control
;
1708 control
->reqseq
= 0;
1709 control
->txseq
= chan
->next_tx_seq
;
1711 __pack_control(chan
, control
, skb
);
1713 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1714 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1715 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1718 l2cap_do_send(chan
, skb
);
1720 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1722 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1723 chan
->frames_sent
++;
1729 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1731 struct sk_buff
*skb
, *tx_skb
;
1732 struct l2cap_ctrl
*control
;
1735 BT_DBG("chan %p", chan
);
1737 if (chan
->state
!= BT_CONNECTED
)
1740 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1743 while (chan
->tx_send_head
&&
1744 chan
->unacked_frames
< chan
->remote_tx_win
&&
1745 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1747 skb
= chan
->tx_send_head
;
1749 bt_cb(skb
)->control
.retries
= 1;
1750 control
= &bt_cb(skb
)->control
;
1752 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1755 control
->reqseq
= chan
->buffer_seq
;
1756 chan
->last_acked_seq
= chan
->buffer_seq
;
1757 control
->txseq
= chan
->next_tx_seq
;
1759 __pack_control(chan
, control
, skb
);
1761 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1762 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1763 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1766 /* Clone after data has been modified. Data is assumed to be
1767 read-only (for locking purposes) on cloned sk_buffs.
1769 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1774 __set_retrans_timer(chan
);
1776 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1777 chan
->unacked_frames
++;
1778 chan
->frames_sent
++;
1781 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1782 chan
->tx_send_head
= NULL
;
1784 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1786 l2cap_do_send(chan
, tx_skb
);
1787 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1790 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent
,
1791 (int) chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1796 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1798 struct l2cap_ctrl control
;
1799 struct sk_buff
*skb
;
1800 struct sk_buff
*tx_skb
;
1803 BT_DBG("chan %p", chan
);
1805 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1808 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1809 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1811 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1813 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1818 bt_cb(skb
)->control
.retries
++;
1819 control
= bt_cb(skb
)->control
;
1821 if (chan
->max_tx
!= 0 &&
1822 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1823 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1824 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1825 l2cap_seq_list_clear(&chan
->retrans_list
);
1829 control
.reqseq
= chan
->buffer_seq
;
1830 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1835 if (skb_cloned(skb
)) {
1836 /* Cloned sk_buffs are read-only, so we need a
1839 tx_skb
= skb_copy(skb
, GFP_ATOMIC
);
1841 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1845 l2cap_seq_list_clear(&chan
->retrans_list
);
1849 /* Update skb contents */
1850 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1851 put_unaligned_le32(__pack_extended_control(&control
),
1852 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1854 put_unaligned_le16(__pack_enhanced_control(&control
),
1855 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1858 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1859 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1860 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1864 l2cap_do_send(chan
, tx_skb
);
1866 BT_DBG("Resent txseq %d", control
.txseq
);
1868 chan
->last_acked_seq
= chan
->buffer_seq
;
1872 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1873 struct l2cap_ctrl
*control
)
1875 BT_DBG("chan %p, control %p", chan
, control
);
1877 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1878 l2cap_ertm_resend(chan
);
1881 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1882 struct l2cap_ctrl
*control
)
1884 struct sk_buff
*skb
;
1886 BT_DBG("chan %p, control %p", chan
, control
);
1889 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1891 l2cap_seq_list_clear(&chan
->retrans_list
);
1893 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1896 if (chan
->unacked_frames
) {
1897 skb_queue_walk(&chan
->tx_q
, skb
) {
1898 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1899 skb
== chan
->tx_send_head
)
1903 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1904 if (skb
== chan
->tx_send_head
)
1907 l2cap_seq_list_append(&chan
->retrans_list
,
1908 bt_cb(skb
)->control
.txseq
);
1911 l2cap_ertm_resend(chan
);
1915 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1917 struct l2cap_ctrl control
;
1918 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1919 chan
->last_acked_seq
);
1922 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1923 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1925 memset(&control
, 0, sizeof(control
));
1928 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1929 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1930 __clear_ack_timer(chan
);
1931 control
.super
= L2CAP_SUPER_RNR
;
1932 control
.reqseq
= chan
->buffer_seq
;
1933 l2cap_send_sframe(chan
, &control
);
1935 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1936 l2cap_ertm_send(chan
);
1937 /* If any i-frames were sent, they included an ack */
1938 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1942 /* Ack now if the tx window is 3/4ths full.
1943 * Calculate without mul or div
1945 threshold
= chan
->tx_win
;
1946 threshold
+= threshold
<< 1;
1949 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack
,
1952 if (frames_to_ack
>= threshold
) {
1953 __clear_ack_timer(chan
);
1954 control
.super
= L2CAP_SUPER_RR
;
1955 control
.reqseq
= chan
->buffer_seq
;
1956 l2cap_send_sframe(chan
, &control
);
1961 __set_ack_timer(chan
);
1965 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1966 struct msghdr
*msg
, int len
,
1967 int count
, struct sk_buff
*skb
)
1969 struct l2cap_conn
*conn
= chan
->conn
;
1970 struct sk_buff
**frag
;
1973 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1979 /* Continuation fragments (no L2CAP header) */
1980 frag
= &skb_shinfo(skb
)->frag_list
;
1982 struct sk_buff
*tmp
;
1984 count
= min_t(unsigned int, conn
->mtu
, len
);
1986 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1987 msg
->msg_flags
& MSG_DONTWAIT
);
1989 return PTR_ERR(tmp
);
1993 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1996 (*frag
)->priority
= skb
->priority
;
2001 skb
->len
+= (*frag
)->len
;
2002 skb
->data_len
+= (*frag
)->len
;
2004 frag
= &(*frag
)->next
;
2010 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2011 struct msghdr
*msg
, size_t len
,
2014 struct l2cap_conn
*conn
= chan
->conn
;
2015 struct sk_buff
*skb
;
2016 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2017 struct l2cap_hdr
*lh
;
2019 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
2021 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2023 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2024 msg
->msg_flags
& MSG_DONTWAIT
);
2028 skb
->priority
= priority
;
2030 /* Create L2CAP header */
2031 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2032 lh
->cid
= cpu_to_le16(chan
->dcid
);
2033 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2034 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2036 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2037 if (unlikely(err
< 0)) {
2039 return ERR_PTR(err
);
2044 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2045 struct msghdr
*msg
, size_t len
,
2048 struct l2cap_conn
*conn
= chan
->conn
;
2049 struct sk_buff
*skb
;
2051 struct l2cap_hdr
*lh
;
2053 BT_DBG("chan %p len %d", chan
, (int)len
);
2055 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2057 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2058 msg
->msg_flags
& MSG_DONTWAIT
);
2062 skb
->priority
= priority
;
2064 /* Create L2CAP header */
2065 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2066 lh
->cid
= cpu_to_le16(chan
->dcid
);
2067 lh
->len
= cpu_to_le16(len
);
2069 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2070 if (unlikely(err
< 0)) {
2072 return ERR_PTR(err
);
2077 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2078 struct msghdr
*msg
, size_t len
,
2081 struct l2cap_conn
*conn
= chan
->conn
;
2082 struct sk_buff
*skb
;
2083 int err
, count
, hlen
;
2084 struct l2cap_hdr
*lh
;
2086 BT_DBG("chan %p len %d", chan
, (int)len
);
2089 return ERR_PTR(-ENOTCONN
);
2091 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2092 hlen
= L2CAP_EXT_HDR_SIZE
;
2094 hlen
= L2CAP_ENH_HDR_SIZE
;
2097 hlen
+= L2CAP_SDULEN_SIZE
;
2099 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2100 hlen
+= L2CAP_FCS_SIZE
;
2102 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2104 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2105 msg
->msg_flags
& MSG_DONTWAIT
);
2109 /* Create L2CAP header */
2110 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2111 lh
->cid
= cpu_to_le16(chan
->dcid
);
2112 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2114 /* Control header is populated later */
2115 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2116 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2118 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2121 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2123 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2124 if (unlikely(err
< 0)) {
2126 return ERR_PTR(err
);
2129 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2130 bt_cb(skb
)->control
.retries
= 0;
2134 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2135 struct sk_buff_head
*seg_queue
,
2136 struct msghdr
*msg
, size_t len
)
2138 struct sk_buff
*skb
;
2144 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2146 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2147 * so fragmented skbs are not used. The HCI layer's handling
2148 * of fragmented skbs is not compatible with ERTM's queueing.
2151 /* PDU size is derived from the HCI MTU */
2152 pdu_len
= chan
->conn
->mtu
;
2154 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2156 /* Adjust for largest possible L2CAP overhead. */
2157 pdu_len
-= L2CAP_EXT_HDR_SIZE
+ L2CAP_FCS_SIZE
;
2159 /* Remote device may have requested smaller PDUs */
2160 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2162 if (len
<= pdu_len
) {
2163 sar
= L2CAP_SAR_UNSEGMENTED
;
2167 sar
= L2CAP_SAR_START
;
2169 pdu_len
-= L2CAP_SDULEN_SIZE
;
2173 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2176 __skb_queue_purge(seg_queue
);
2177 return PTR_ERR(skb
);
2180 bt_cb(skb
)->control
.sar
= sar
;
2181 __skb_queue_tail(seg_queue
, skb
);
2186 pdu_len
+= L2CAP_SDULEN_SIZE
;
2189 if (len
<= pdu_len
) {
2190 sar
= L2CAP_SAR_END
;
2193 sar
= L2CAP_SAR_CONTINUE
;
2200 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2203 struct sk_buff
*skb
;
2205 struct sk_buff_head seg_queue
;
2207 /* Connectionless channel */
2208 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2209 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2211 return PTR_ERR(skb
);
2213 l2cap_do_send(chan
, skb
);
2217 switch (chan
->mode
) {
2218 case L2CAP_MODE_BASIC
:
2219 /* Check outgoing MTU */
2220 if (len
> chan
->omtu
)
2223 /* Create a basic PDU */
2224 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2226 return PTR_ERR(skb
);
2228 l2cap_do_send(chan
, skb
);
2232 case L2CAP_MODE_ERTM
:
2233 case L2CAP_MODE_STREAMING
:
2234 /* Check outgoing MTU */
2235 if (len
> chan
->omtu
) {
2240 __skb_queue_head_init(&seg_queue
);
2242 /* Do segmentation before calling in to the state machine,
2243 * since it's possible to block while waiting for memory
2246 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2248 /* The channel could have been closed while segmenting,
2249 * check that it is still connected.
2251 if (chan
->state
!= BT_CONNECTED
) {
2252 __skb_queue_purge(&seg_queue
);
2259 if (chan
->mode
== L2CAP_MODE_ERTM
)
2260 err
= l2cap_tx(chan
, 0, &seg_queue
,
2261 L2CAP_EV_DATA_REQUEST
);
2263 err
= l2cap_streaming_send(chan
, &seg_queue
);
2268 /* If the skbs were not queued for sending, they'll still be in
2269 * seg_queue and need to be purged.
2271 __skb_queue_purge(&seg_queue
);
2275 BT_DBG("bad state %1.1x", chan
->mode
);
2282 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2284 struct l2cap_ctrl control
;
2287 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2289 memset(&control
, 0, sizeof(control
));
2291 control
.super
= L2CAP_SUPER_SREJ
;
2293 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2294 seq
= __next_seq(chan
, seq
)) {
2295 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2296 control
.reqseq
= seq
;
2297 l2cap_send_sframe(chan
, &control
);
2298 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2302 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2305 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2307 struct l2cap_ctrl control
;
2309 BT_DBG("chan %p", chan
);
2311 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2314 memset(&control
, 0, sizeof(control
));
2316 control
.super
= L2CAP_SUPER_SREJ
;
2317 control
.reqseq
= chan
->srej_list
.tail
;
2318 l2cap_send_sframe(chan
, &control
);
2321 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2323 struct l2cap_ctrl control
;
2327 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2329 memset(&control
, 0, sizeof(control
));
2331 control
.super
= L2CAP_SUPER_SREJ
;
2333 /* Capture initial list head to allow only one pass through the list. */
2334 initial_head
= chan
->srej_list
.head
;
2337 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2338 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2341 control
.reqseq
= seq
;
2342 l2cap_send_sframe(chan
, &control
);
2343 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2344 } while (chan
->srej_list
.head
!= initial_head
);
2347 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2349 struct sk_buff
*acked_skb
;
2352 BT_DBG("chan %p, reqseq %d", chan
, reqseq
);
2354 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2357 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2358 chan
->expected_ack_seq
, chan
->unacked_frames
);
2360 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2361 ackseq
= __next_seq(chan
, ackseq
)) {
2363 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2365 skb_unlink(acked_skb
, &chan
->tx_q
);
2366 kfree_skb(acked_skb
);
2367 chan
->unacked_frames
--;
2371 chan
->expected_ack_seq
= reqseq
;
2373 if (chan
->unacked_frames
== 0)
2374 __clear_retrans_timer(chan
);
2376 BT_DBG("unacked_frames %d", (int) chan
->unacked_frames
);
2379 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2381 BT_DBG("chan %p", chan
);
2383 chan
->expected_tx_seq
= chan
->buffer_seq
;
2384 l2cap_seq_list_clear(&chan
->srej_list
);
2385 skb_queue_purge(&chan
->srej_q
);
2386 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2389 static int l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2390 struct l2cap_ctrl
*control
,
2391 struct sk_buff_head
*skbs
, u8 event
)
2395 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2399 case L2CAP_EV_DATA_REQUEST
:
2400 if (chan
->tx_send_head
== NULL
)
2401 chan
->tx_send_head
= skb_peek(skbs
);
2403 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2404 l2cap_ertm_send(chan
);
2406 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2407 BT_DBG("Enter LOCAL_BUSY");
2408 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2410 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2411 /* The SREJ_SENT state must be aborted if we are to
2412 * enter the LOCAL_BUSY state.
2414 l2cap_abort_rx_srej_sent(chan
);
2417 l2cap_send_ack(chan
);
2420 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2421 BT_DBG("Exit LOCAL_BUSY");
2422 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2424 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2425 struct l2cap_ctrl local_control
;
2427 memset(&local_control
, 0, sizeof(local_control
));
2428 local_control
.sframe
= 1;
2429 local_control
.super
= L2CAP_SUPER_RR
;
2430 local_control
.poll
= 1;
2431 local_control
.reqseq
= chan
->buffer_seq
;
2432 l2cap_send_sframe(chan
, &local_control
);
2434 chan
->retry_count
= 1;
2435 __set_monitor_timer(chan
);
2436 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2439 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2440 l2cap_process_reqseq(chan
, control
->reqseq
);
2442 case L2CAP_EV_EXPLICIT_POLL
:
2443 l2cap_send_rr_or_rnr(chan
, 1);
2444 chan
->retry_count
= 1;
2445 __set_monitor_timer(chan
);
2446 __clear_ack_timer(chan
);
2447 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2449 case L2CAP_EV_RETRANS_TO
:
2450 l2cap_send_rr_or_rnr(chan
, 1);
2451 chan
->retry_count
= 1;
2452 __set_monitor_timer(chan
);
2453 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2455 case L2CAP_EV_RECV_FBIT
:
2456 /* Nothing to process */
2465 static int l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2466 struct l2cap_ctrl
*control
,
2467 struct sk_buff_head
*skbs
, u8 event
)
2471 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2475 case L2CAP_EV_DATA_REQUEST
:
2476 if (chan
->tx_send_head
== NULL
)
2477 chan
->tx_send_head
= skb_peek(skbs
);
2478 /* Queue data, but don't send. */
2479 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2481 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2482 BT_DBG("Enter LOCAL_BUSY");
2483 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2485 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2486 /* The SREJ_SENT state must be aborted if we are to
2487 * enter the LOCAL_BUSY state.
2489 l2cap_abort_rx_srej_sent(chan
);
2492 l2cap_send_ack(chan
);
2495 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2496 BT_DBG("Exit LOCAL_BUSY");
2497 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2499 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2500 struct l2cap_ctrl local_control
;
2501 memset(&local_control
, 0, sizeof(local_control
));
2502 local_control
.sframe
= 1;
2503 local_control
.super
= L2CAP_SUPER_RR
;
2504 local_control
.poll
= 1;
2505 local_control
.reqseq
= chan
->buffer_seq
;
2506 l2cap_send_sframe(chan
, &local_control
);
2508 chan
->retry_count
= 1;
2509 __set_monitor_timer(chan
);
2510 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2513 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2514 l2cap_process_reqseq(chan
, control
->reqseq
);
2518 case L2CAP_EV_RECV_FBIT
:
2519 if (control
&& control
->final
) {
2520 __clear_monitor_timer(chan
);
2521 if (chan
->unacked_frames
> 0)
2522 __set_retrans_timer(chan
);
2523 chan
->retry_count
= 0;
2524 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2525 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2528 case L2CAP_EV_EXPLICIT_POLL
:
2531 case L2CAP_EV_MONITOR_TO
:
2532 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2533 l2cap_send_rr_or_rnr(chan
, 1);
2534 __set_monitor_timer(chan
);
2535 chan
->retry_count
++;
2537 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2547 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2548 struct sk_buff_head
*skbs
, u8 event
)
2552 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2553 chan
, control
, skbs
, event
, chan
->tx_state
);
2555 switch (chan
->tx_state
) {
2556 case L2CAP_TX_STATE_XMIT
:
2557 err
= l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2559 case L2CAP_TX_STATE_WAIT_F
:
2560 err
= l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2570 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2571 struct l2cap_ctrl
*control
)
2573 BT_DBG("chan %p, control %p", chan
, control
);
2574 l2cap_tx(chan
, control
, 0, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2577 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2578 struct l2cap_ctrl
*control
)
2580 BT_DBG("chan %p, control %p", chan
, control
);
2581 l2cap_tx(chan
, control
, 0, L2CAP_EV_RECV_FBIT
);
2584 /* Copy frame to all raw sockets on that connection */
2585 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2587 struct sk_buff
*nskb
;
2588 struct l2cap_chan
*chan
;
2590 BT_DBG("conn %p", conn
);
2592 mutex_lock(&conn
->chan_lock
);
2594 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2595 struct sock
*sk
= chan
->sk
;
2596 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2599 /* Don't send frame to the socket it came from */
2602 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2606 if (chan
->ops
->recv(chan
->data
, nskb
))
2610 mutex_unlock(&conn
->chan_lock
);
2613 /* ---- L2CAP signalling commands ---- */
2614 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2615 u8 code
, u8 ident
, u16 dlen
, void *data
)
2617 struct sk_buff
*skb
, **frag
;
2618 struct l2cap_cmd_hdr
*cmd
;
2619 struct l2cap_hdr
*lh
;
2622 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2623 conn
, code
, ident
, dlen
);
2625 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2626 count
= min_t(unsigned int, conn
->mtu
, len
);
2628 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2632 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2633 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2635 if (conn
->hcon
->type
== LE_LINK
)
2636 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2638 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2640 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2643 cmd
->len
= cpu_to_le16(dlen
);
2646 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2647 memcpy(skb_put(skb
, count
), data
, count
);
2653 /* Continuation fragments (no L2CAP header) */
2654 frag
= &skb_shinfo(skb
)->frag_list
;
2656 count
= min_t(unsigned int, conn
->mtu
, len
);
2658 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2662 memcpy(skb_put(*frag
, count
), data
, count
);
2667 frag
= &(*frag
)->next
;
2677 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2679 struct l2cap_conf_opt
*opt
= *ptr
;
2682 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2690 *val
= *((u8
*) opt
->val
);
2694 *val
= get_unaligned_le16(opt
->val
);
2698 *val
= get_unaligned_le32(opt
->val
);
2702 *val
= (unsigned long) opt
->val
;
2706 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2710 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2712 struct l2cap_conf_opt
*opt
= *ptr
;
2714 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2721 *((u8
*) opt
->val
) = val
;
2725 put_unaligned_le16(val
, opt
->val
);
2729 put_unaligned_le32(val
, opt
->val
);
2733 memcpy(opt
->val
, (void *) val
, len
);
2737 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2740 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2742 struct l2cap_conf_efs efs
;
2744 switch (chan
->mode
) {
2745 case L2CAP_MODE_ERTM
:
2746 efs
.id
= chan
->local_id
;
2747 efs
.stype
= chan
->local_stype
;
2748 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2749 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2750 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2751 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2754 case L2CAP_MODE_STREAMING
:
2756 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2757 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2758 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2767 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2768 (unsigned long) &efs
);
2771 static void l2cap_ack_timeout(struct work_struct
*work
)
2773 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2777 BT_DBG("chan %p", chan
);
2779 l2cap_chan_lock(chan
);
2781 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2782 chan
->last_acked_seq
);
2785 l2cap_send_rr_or_rnr(chan
, 0);
2787 l2cap_chan_unlock(chan
);
2788 l2cap_chan_put(chan
);
2791 static inline int l2cap_ertm_init(struct l2cap_chan
*chan
)
2795 chan
->next_tx_seq
= 0;
2796 chan
->expected_tx_seq
= 0;
2797 chan
->expected_ack_seq
= 0;
2798 chan
->unacked_frames
= 0;
2799 chan
->buffer_seq
= 0;
2800 chan
->frames_sent
= 0;
2801 chan
->last_acked_seq
= 0;
2803 chan
->sdu_last_frag
= NULL
;
2806 skb_queue_head_init(&chan
->tx_q
);
2808 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2811 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2812 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2814 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2815 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2816 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2818 skb_queue_head_init(&chan
->srej_q
);
2820 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2824 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2826 l2cap_seq_list_free(&chan
->srej_list
);
2831 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2834 case L2CAP_MODE_STREAMING
:
2835 case L2CAP_MODE_ERTM
:
2836 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2840 return L2CAP_MODE_BASIC
;
2844 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2846 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2849 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2851 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2854 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2856 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2857 __l2cap_ews_supported(chan
)) {
2858 /* use extended control field */
2859 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2860 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2862 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2863 L2CAP_DEFAULT_TX_WINDOW
);
2864 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2868 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2870 struct l2cap_conf_req
*req
= data
;
2871 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2872 void *ptr
= req
->data
;
2875 BT_DBG("chan %p", chan
);
2877 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2880 switch (chan
->mode
) {
2881 case L2CAP_MODE_STREAMING
:
2882 case L2CAP_MODE_ERTM
:
2883 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2886 if (__l2cap_efs_supported(chan
))
2887 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2891 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2896 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2897 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2899 switch (chan
->mode
) {
2900 case L2CAP_MODE_BASIC
:
2901 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2902 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2905 rfc
.mode
= L2CAP_MODE_BASIC
;
2907 rfc
.max_transmit
= 0;
2908 rfc
.retrans_timeout
= 0;
2909 rfc
.monitor_timeout
= 0;
2910 rfc
.max_pdu_size
= 0;
2912 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2913 (unsigned long) &rfc
);
2916 case L2CAP_MODE_ERTM
:
2917 rfc
.mode
= L2CAP_MODE_ERTM
;
2918 rfc
.max_transmit
= chan
->max_tx
;
2919 rfc
.retrans_timeout
= 0;
2920 rfc
.monitor_timeout
= 0;
2922 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2923 L2CAP_EXT_HDR_SIZE
-
2926 rfc
.max_pdu_size
= cpu_to_le16(size
);
2928 l2cap_txwin_setup(chan
);
2930 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2931 L2CAP_DEFAULT_TX_WINDOW
);
2933 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2934 (unsigned long) &rfc
);
2936 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2937 l2cap_add_opt_efs(&ptr
, chan
);
2939 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2942 if (chan
->fcs
== L2CAP_FCS_NONE
||
2943 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2944 chan
->fcs
= L2CAP_FCS_NONE
;
2945 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2948 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2949 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2953 case L2CAP_MODE_STREAMING
:
2954 l2cap_txwin_setup(chan
);
2955 rfc
.mode
= L2CAP_MODE_STREAMING
;
2957 rfc
.max_transmit
= 0;
2958 rfc
.retrans_timeout
= 0;
2959 rfc
.monitor_timeout
= 0;
2961 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2962 L2CAP_EXT_HDR_SIZE
-
2965 rfc
.max_pdu_size
= cpu_to_le16(size
);
2967 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2968 (unsigned long) &rfc
);
2970 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2971 l2cap_add_opt_efs(&ptr
, chan
);
2973 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2976 if (chan
->fcs
== L2CAP_FCS_NONE
||
2977 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2978 chan
->fcs
= L2CAP_FCS_NONE
;
2979 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2984 req
->dcid
= cpu_to_le16(chan
->dcid
);
2985 req
->flags
= cpu_to_le16(0);
2990 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2992 struct l2cap_conf_rsp
*rsp
= data
;
2993 void *ptr
= rsp
->data
;
2994 void *req
= chan
->conf_req
;
2995 int len
= chan
->conf_len
;
2996 int type
, hint
, olen
;
2998 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2999 struct l2cap_conf_efs efs
;
3001 u16 mtu
= L2CAP_DEFAULT_MTU
;
3002 u16 result
= L2CAP_CONF_SUCCESS
;
3005 BT_DBG("chan %p", chan
);
3007 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3008 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3010 hint
= type
& L2CAP_CONF_HINT
;
3011 type
&= L2CAP_CONF_MASK
;
3014 case L2CAP_CONF_MTU
:
3018 case L2CAP_CONF_FLUSH_TO
:
3019 chan
->flush_to
= val
;
3022 case L2CAP_CONF_QOS
:
3025 case L2CAP_CONF_RFC
:
3026 if (olen
== sizeof(rfc
))
3027 memcpy(&rfc
, (void *) val
, olen
);
3030 case L2CAP_CONF_FCS
:
3031 if (val
== L2CAP_FCS_NONE
)
3032 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3035 case L2CAP_CONF_EFS
:
3037 if (olen
== sizeof(efs
))
3038 memcpy(&efs
, (void *) val
, olen
);
3041 case L2CAP_CONF_EWS
:
3043 return -ECONNREFUSED
;
3045 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3046 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3047 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3048 chan
->remote_tx_win
= val
;
3055 result
= L2CAP_CONF_UNKNOWN
;
3056 *((u8
*) ptr
++) = type
;
3061 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3064 switch (chan
->mode
) {
3065 case L2CAP_MODE_STREAMING
:
3066 case L2CAP_MODE_ERTM
:
3067 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3068 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3069 chan
->conn
->feat_mask
);
3074 if (__l2cap_efs_supported(chan
))
3075 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3077 return -ECONNREFUSED
;
3080 if (chan
->mode
!= rfc
.mode
)
3081 return -ECONNREFUSED
;
3087 if (chan
->mode
!= rfc
.mode
) {
3088 result
= L2CAP_CONF_UNACCEPT
;
3089 rfc
.mode
= chan
->mode
;
3091 if (chan
->num_conf_rsp
== 1)
3092 return -ECONNREFUSED
;
3094 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3095 sizeof(rfc
), (unsigned long) &rfc
);
3098 if (result
== L2CAP_CONF_SUCCESS
) {
3099 /* Configure output options and let the other side know
3100 * which ones we don't like. */
3102 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3103 result
= L2CAP_CONF_UNACCEPT
;
3106 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3108 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3111 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3112 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3113 efs
.stype
!= chan
->local_stype
) {
3115 result
= L2CAP_CONF_UNACCEPT
;
3117 if (chan
->num_conf_req
>= 1)
3118 return -ECONNREFUSED
;
3120 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3122 (unsigned long) &efs
);
3124 /* Send PENDING Conf Rsp */
3125 result
= L2CAP_CONF_PENDING
;
3126 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3131 case L2CAP_MODE_BASIC
:
3132 chan
->fcs
= L2CAP_FCS_NONE
;
3133 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3136 case L2CAP_MODE_ERTM
:
3137 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3138 chan
->remote_tx_win
= rfc
.txwin_size
;
3140 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3142 chan
->remote_max_tx
= rfc
.max_transmit
;
3144 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3146 L2CAP_EXT_HDR_SIZE
-
3149 rfc
.max_pdu_size
= cpu_to_le16(size
);
3150 chan
->remote_mps
= size
;
3152 rfc
.retrans_timeout
=
3153 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3154 rfc
.monitor_timeout
=
3155 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3157 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3159 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3160 sizeof(rfc
), (unsigned long) &rfc
);
3162 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3163 chan
->remote_id
= efs
.id
;
3164 chan
->remote_stype
= efs
.stype
;
3165 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3166 chan
->remote_flush_to
=
3167 le32_to_cpu(efs
.flush_to
);
3168 chan
->remote_acc_lat
=
3169 le32_to_cpu(efs
.acc_lat
);
3170 chan
->remote_sdu_itime
=
3171 le32_to_cpu(efs
.sdu_itime
);
3172 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3173 sizeof(efs
), (unsigned long) &efs
);
3177 case L2CAP_MODE_STREAMING
:
3178 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3180 L2CAP_EXT_HDR_SIZE
-
3183 rfc
.max_pdu_size
= cpu_to_le16(size
);
3184 chan
->remote_mps
= size
;
3186 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3188 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3189 sizeof(rfc
), (unsigned long) &rfc
);
3194 result
= L2CAP_CONF_UNACCEPT
;
3196 memset(&rfc
, 0, sizeof(rfc
));
3197 rfc
.mode
= chan
->mode
;
3200 if (result
== L2CAP_CONF_SUCCESS
)
3201 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3203 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3204 rsp
->result
= cpu_to_le16(result
);
3205 rsp
->flags
= cpu_to_le16(0x0000);
3210 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3212 struct l2cap_conf_req
*req
= data
;
3213 void *ptr
= req
->data
;
3216 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3217 struct l2cap_conf_efs efs
;
3219 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3221 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3222 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3225 case L2CAP_CONF_MTU
:
3226 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3227 *result
= L2CAP_CONF_UNACCEPT
;
3228 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3231 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3234 case L2CAP_CONF_FLUSH_TO
:
3235 chan
->flush_to
= val
;
3236 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3240 case L2CAP_CONF_RFC
:
3241 if (olen
== sizeof(rfc
))
3242 memcpy(&rfc
, (void *)val
, olen
);
3244 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3245 rfc
.mode
!= chan
->mode
)
3246 return -ECONNREFUSED
;
3250 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3251 sizeof(rfc
), (unsigned long) &rfc
);
3254 case L2CAP_CONF_EWS
:
3255 chan
->tx_win
= min_t(u16
, val
,
3256 L2CAP_DEFAULT_EXT_WINDOW
);
3257 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3261 case L2CAP_CONF_EFS
:
3262 if (olen
== sizeof(efs
))
3263 memcpy(&efs
, (void *)val
, olen
);
3265 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3266 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3267 efs
.stype
!= chan
->local_stype
)
3268 return -ECONNREFUSED
;
3270 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3271 sizeof(efs
), (unsigned long) &efs
);
3276 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3277 return -ECONNREFUSED
;
3279 chan
->mode
= rfc
.mode
;
3281 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3283 case L2CAP_MODE_ERTM
:
3284 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3285 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3286 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3288 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3289 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3290 chan
->local_sdu_itime
=
3291 le32_to_cpu(efs
.sdu_itime
);
3292 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3293 chan
->local_flush_to
=
3294 le32_to_cpu(efs
.flush_to
);
3298 case L2CAP_MODE_STREAMING
:
3299 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3303 req
->dcid
= cpu_to_le16(chan
->dcid
);
3304 req
->flags
= cpu_to_le16(0x0000);
3309 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3311 struct l2cap_conf_rsp
*rsp
= data
;
3312 void *ptr
= rsp
->data
;
3314 BT_DBG("chan %p", chan
);
3316 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3317 rsp
->result
= cpu_to_le16(result
);
3318 rsp
->flags
= cpu_to_le16(flags
);
3323 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3325 struct l2cap_conn_rsp rsp
;
3326 struct l2cap_conn
*conn
= chan
->conn
;
3329 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3330 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3331 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3332 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3333 l2cap_send_cmd(conn
, chan
->ident
,
3334 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3336 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3339 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3340 l2cap_build_conf_req(chan
, buf
), buf
);
3341 chan
->num_conf_req
++;
3344 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3348 struct l2cap_conf_rfc rfc
;
3350 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3352 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3355 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3356 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3359 case L2CAP_CONF_RFC
:
3360 if (olen
== sizeof(rfc
))
3361 memcpy(&rfc
, (void *)val
, olen
);
3366 /* Use sane default values in case a misbehaving remote device
3367 * did not send an RFC option.
3369 rfc
.mode
= chan
->mode
;
3370 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3371 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3372 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
3374 BT_ERR("Expected RFC option was not found, using defaults");
3378 case L2CAP_MODE_ERTM
:
3379 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3380 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3381 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3383 case L2CAP_MODE_STREAMING
:
3384 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3388 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3390 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3392 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3395 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3396 cmd
->ident
== conn
->info_ident
) {
3397 cancel_delayed_work(&conn
->info_timer
);
3399 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3400 conn
->info_ident
= 0;
3402 l2cap_conn_start(conn
);
3408 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3410 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3411 struct l2cap_conn_rsp rsp
;
3412 struct l2cap_chan
*chan
= NULL
, *pchan
;
3413 struct sock
*parent
, *sk
= NULL
;
3414 int result
, status
= L2CAP_CS_NO_INFO
;
3416 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3417 __le16 psm
= req
->psm
;
3419 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3421 /* Check if we have socket listening on psm */
3422 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3424 result
= L2CAP_CR_BAD_PSM
;
3430 mutex_lock(&conn
->chan_lock
);
3433 /* Check if the ACL is secure enough (if not SDP) */
3434 if (psm
!= cpu_to_le16(0x0001) &&
3435 !hci_conn_check_link_mode(conn
->hcon
)) {
3436 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3437 result
= L2CAP_CR_SEC_BLOCK
;
3441 result
= L2CAP_CR_NO_MEM
;
3443 /* Check for backlog size */
3444 if (sk_acceptq_is_full(parent
)) {
3445 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
3449 chan
= pchan
->ops
->new_connection(pchan
->data
);
3455 /* Check if we already have channel with that dcid */
3456 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
3457 sock_set_flag(sk
, SOCK_ZAPPED
);
3458 chan
->ops
->close(chan
->data
);
3462 hci_conn_hold(conn
->hcon
);
3464 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3465 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3469 bt_accept_enqueue(parent
, sk
);
3471 __l2cap_chan_add(conn
, chan
);
3475 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3477 chan
->ident
= cmd
->ident
;
3479 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3480 if (l2cap_chan_check_security(chan
)) {
3481 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3482 __l2cap_state_change(chan
, BT_CONNECT2
);
3483 result
= L2CAP_CR_PEND
;
3484 status
= L2CAP_CS_AUTHOR_PEND
;
3485 parent
->sk_data_ready(parent
, 0);
3487 __l2cap_state_change(chan
, BT_CONFIG
);
3488 result
= L2CAP_CR_SUCCESS
;
3489 status
= L2CAP_CS_NO_INFO
;
3492 __l2cap_state_change(chan
, BT_CONNECT2
);
3493 result
= L2CAP_CR_PEND
;
3494 status
= L2CAP_CS_AUTHEN_PEND
;
3497 __l2cap_state_change(chan
, BT_CONNECT2
);
3498 result
= L2CAP_CR_PEND
;
3499 status
= L2CAP_CS_NO_INFO
;
3503 release_sock(parent
);
3504 mutex_unlock(&conn
->chan_lock
);
3507 rsp
.scid
= cpu_to_le16(scid
);
3508 rsp
.dcid
= cpu_to_le16(dcid
);
3509 rsp
.result
= cpu_to_le16(result
);
3510 rsp
.status
= cpu_to_le16(status
);
3511 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3513 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3514 struct l2cap_info_req info
;
3515 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3517 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3518 conn
->info_ident
= l2cap_get_ident(conn
);
3520 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3522 l2cap_send_cmd(conn
, conn
->info_ident
,
3523 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3526 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3527 result
== L2CAP_CR_SUCCESS
) {
3529 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3530 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3531 l2cap_build_conf_req(chan
, buf
), buf
);
3532 chan
->num_conf_req
++;
3538 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3540 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3541 u16 scid
, dcid
, result
, status
;
3542 struct l2cap_chan
*chan
;
3546 scid
= __le16_to_cpu(rsp
->scid
);
3547 dcid
= __le16_to_cpu(rsp
->dcid
);
3548 result
= __le16_to_cpu(rsp
->result
);
3549 status
= __le16_to_cpu(rsp
->status
);
3551 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3552 dcid
, scid
, result
, status
);
3554 mutex_lock(&conn
->chan_lock
);
3557 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3563 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3572 l2cap_chan_lock(chan
);
3575 case L2CAP_CR_SUCCESS
:
3576 l2cap_state_change(chan
, BT_CONFIG
);
3579 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3581 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3584 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3585 l2cap_build_conf_req(chan
, req
), req
);
3586 chan
->num_conf_req
++;
3590 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3594 l2cap_chan_del(chan
, ECONNREFUSED
);
3598 l2cap_chan_unlock(chan
);
3601 mutex_unlock(&conn
->chan_lock
);
3606 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3608 /* FCS is enabled only in ERTM or streaming mode, if one or both
3611 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3612 chan
->fcs
= L2CAP_FCS_NONE
;
3613 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3614 chan
->fcs
= L2CAP_FCS_CRC16
;
3617 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3619 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3622 struct l2cap_chan
*chan
;
3625 dcid
= __le16_to_cpu(req
->dcid
);
3626 flags
= __le16_to_cpu(req
->flags
);
3628 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3630 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3634 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3635 struct l2cap_cmd_rej_cid rej
;
3637 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3638 rej
.scid
= cpu_to_le16(chan
->scid
);
3639 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3641 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3646 /* Reject if config buffer is too small. */
3647 len
= cmd_len
- sizeof(*req
);
3648 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3649 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3650 l2cap_build_conf_rsp(chan
, rsp
,
3651 L2CAP_CONF_REJECT
, flags
), rsp
);
3656 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3657 chan
->conf_len
+= len
;
3659 if (flags
& 0x0001) {
3660 /* Incomplete config. Send empty response. */
3661 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3662 l2cap_build_conf_rsp(chan
, rsp
,
3663 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3667 /* Complete config. */
3668 len
= l2cap_parse_conf_req(chan
, rsp
);
3670 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3674 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3675 chan
->num_conf_rsp
++;
3677 /* Reset config buffer. */
3680 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3683 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3684 set_default_fcs(chan
);
3686 l2cap_state_change(chan
, BT_CONNECTED
);
3688 if (chan
->mode
== L2CAP_MODE_ERTM
||
3689 chan
->mode
== L2CAP_MODE_STREAMING
)
3690 err
= l2cap_ertm_init(chan
);
3693 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3695 l2cap_chan_ready(chan
);
3700 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3702 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3703 l2cap_build_conf_req(chan
, buf
), buf
);
3704 chan
->num_conf_req
++;
3707 /* Got Conf Rsp PENDING from remote side and asume we sent
3708 Conf Rsp PENDING in the code above */
3709 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3710 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3712 /* check compatibility */
3714 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3715 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3717 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3718 l2cap_build_conf_rsp(chan
, rsp
,
3719 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
3723 l2cap_chan_unlock(chan
);
3727 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3729 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3730 u16 scid
, flags
, result
;
3731 struct l2cap_chan
*chan
;
3732 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3735 scid
= __le16_to_cpu(rsp
->scid
);
3736 flags
= __le16_to_cpu(rsp
->flags
);
3737 result
= __le16_to_cpu(rsp
->result
);
3739 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3742 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3747 case L2CAP_CONF_SUCCESS
:
3748 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3749 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3752 case L2CAP_CONF_PENDING
:
3753 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3755 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3758 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3761 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3765 /* check compatibility */
3767 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3768 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3770 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3771 l2cap_build_conf_rsp(chan
, buf
,
3772 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3776 case L2CAP_CONF_UNACCEPT
:
3777 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3780 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3781 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3785 /* throw out any old stored conf requests */
3786 result
= L2CAP_CONF_SUCCESS
;
3787 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3790 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3794 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3795 L2CAP_CONF_REQ
, len
, req
);
3796 chan
->num_conf_req
++;
3797 if (result
!= L2CAP_CONF_SUCCESS
)
3803 l2cap_chan_set_err(chan
, ECONNRESET
);
3805 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3806 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3813 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3815 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3816 set_default_fcs(chan
);
3818 l2cap_state_change(chan
, BT_CONNECTED
);
3819 if (chan
->mode
== L2CAP_MODE_ERTM
||
3820 chan
->mode
== L2CAP_MODE_STREAMING
)
3821 err
= l2cap_ertm_init(chan
);
3824 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3826 l2cap_chan_ready(chan
);
3830 l2cap_chan_unlock(chan
);
3834 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3836 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3837 struct l2cap_disconn_rsp rsp
;
3839 struct l2cap_chan
*chan
;
3842 scid
= __le16_to_cpu(req
->scid
);
3843 dcid
= __le16_to_cpu(req
->dcid
);
3845 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3847 mutex_lock(&conn
->chan_lock
);
3849 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3851 mutex_unlock(&conn
->chan_lock
);
3855 l2cap_chan_lock(chan
);
3859 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3860 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3861 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3864 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3867 l2cap_chan_hold(chan
);
3868 l2cap_chan_del(chan
, ECONNRESET
);
3870 l2cap_chan_unlock(chan
);
3872 chan
->ops
->close(chan
->data
);
3873 l2cap_chan_put(chan
);
3875 mutex_unlock(&conn
->chan_lock
);
3880 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3882 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3884 struct l2cap_chan
*chan
;
3886 scid
= __le16_to_cpu(rsp
->scid
);
3887 dcid
= __le16_to_cpu(rsp
->dcid
);
3889 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3891 mutex_lock(&conn
->chan_lock
);
3893 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3895 mutex_unlock(&conn
->chan_lock
);
3899 l2cap_chan_lock(chan
);
3901 l2cap_chan_hold(chan
);
3902 l2cap_chan_del(chan
, 0);
3904 l2cap_chan_unlock(chan
);
3906 chan
->ops
->close(chan
->data
);
3907 l2cap_chan_put(chan
);
3909 mutex_unlock(&conn
->chan_lock
);
3914 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3916 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3919 type
= __le16_to_cpu(req
->type
);
3921 BT_DBG("type 0x%4.4x", type
);
3923 if (type
== L2CAP_IT_FEAT_MASK
) {
3925 u32 feat_mask
= l2cap_feat_mask
;
3926 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3927 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3928 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3930 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3933 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3934 | L2CAP_FEAT_EXT_WINDOW
;
3936 put_unaligned_le32(feat_mask
, rsp
->data
);
3937 l2cap_send_cmd(conn
, cmd
->ident
,
3938 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3939 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3941 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3944 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3946 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3948 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3949 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3950 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3951 l2cap_send_cmd(conn
, cmd
->ident
,
3952 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3954 struct l2cap_info_rsp rsp
;
3955 rsp
.type
= cpu_to_le16(type
);
3956 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3957 l2cap_send_cmd(conn
, cmd
->ident
,
3958 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3964 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3966 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3969 type
= __le16_to_cpu(rsp
->type
);
3970 result
= __le16_to_cpu(rsp
->result
);
3972 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3974 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3975 if (cmd
->ident
!= conn
->info_ident
||
3976 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3979 cancel_delayed_work(&conn
->info_timer
);
3981 if (result
!= L2CAP_IR_SUCCESS
) {
3982 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3983 conn
->info_ident
= 0;
3985 l2cap_conn_start(conn
);
3991 case L2CAP_IT_FEAT_MASK
:
3992 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3994 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3995 struct l2cap_info_req req
;
3996 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3998 conn
->info_ident
= l2cap_get_ident(conn
);
4000 l2cap_send_cmd(conn
, conn
->info_ident
,
4001 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4003 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4004 conn
->info_ident
= 0;
4006 l2cap_conn_start(conn
);
4010 case L2CAP_IT_FIXED_CHAN
:
4011 conn
->fixed_chan_mask
= rsp
->data
[0];
4012 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4013 conn
->info_ident
= 0;
4015 l2cap_conn_start(conn
);
4022 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4023 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4026 struct l2cap_create_chan_req
*req
= data
;
4027 struct l2cap_create_chan_rsp rsp
;
4030 if (cmd_len
!= sizeof(*req
))
4036 psm
= le16_to_cpu(req
->psm
);
4037 scid
= le16_to_cpu(req
->scid
);
4039 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
4041 /* Placeholder: Always reject */
4043 rsp
.scid
= cpu_to_le16(scid
);
4044 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4045 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4047 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4053 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
4054 struct l2cap_cmd_hdr
*cmd
, void *data
)
4056 BT_DBG("conn %p", conn
);
4058 return l2cap_connect_rsp(conn
, cmd
, data
);
4061 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
4062 u16 icid
, u16 result
)
4064 struct l2cap_move_chan_rsp rsp
;
4066 BT_DBG("icid %d, result %d", icid
, result
);
4068 rsp
.icid
= cpu_to_le16(icid
);
4069 rsp
.result
= cpu_to_le16(result
);
4071 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
4074 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
4075 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
4077 struct l2cap_move_chan_cfm cfm
;
4080 BT_DBG("icid %d, result %d", icid
, result
);
4082 ident
= l2cap_get_ident(conn
);
4084 chan
->ident
= ident
;
4086 cfm
.icid
= cpu_to_le16(icid
);
4087 cfm
.result
= cpu_to_le16(result
);
4089 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4092 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4095 struct l2cap_move_chan_cfm_rsp rsp
;
4097 BT_DBG("icid %d", icid
);
4099 rsp
.icid
= cpu_to_le16(icid
);
4100 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4103 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4104 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4106 struct l2cap_move_chan_req
*req
= data
;
4108 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4110 if (cmd_len
!= sizeof(*req
))
4113 icid
= le16_to_cpu(req
->icid
);
4115 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
4120 /* Placeholder: Always refuse */
4121 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4126 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4127 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4129 struct l2cap_move_chan_rsp
*rsp
= data
;
4132 if (cmd_len
!= sizeof(*rsp
))
4135 icid
= le16_to_cpu(rsp
->icid
);
4136 result
= le16_to_cpu(rsp
->result
);
4138 BT_DBG("icid %d, result %d", icid
, result
);
4140 /* Placeholder: Always unconfirmed */
4141 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4146 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4147 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4149 struct l2cap_move_chan_cfm
*cfm
= data
;
4152 if (cmd_len
!= sizeof(*cfm
))
4155 icid
= le16_to_cpu(cfm
->icid
);
4156 result
= le16_to_cpu(cfm
->result
);
4158 BT_DBG("icid %d, result %d", icid
, result
);
4160 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4165 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4166 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4168 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4171 if (cmd_len
!= sizeof(*rsp
))
4174 icid
= le16_to_cpu(rsp
->icid
);
4176 BT_DBG("icid %d", icid
);
4181 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4186 if (min
> max
|| min
< 6 || max
> 3200)
4189 if (to_multiplier
< 10 || to_multiplier
> 3200)
4192 if (max
>= to_multiplier
* 8)
4195 max_latency
= (to_multiplier
* 8 / max
) - 1;
4196 if (latency
> 499 || latency
> max_latency
)
4202 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4203 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4205 struct hci_conn
*hcon
= conn
->hcon
;
4206 struct l2cap_conn_param_update_req
*req
;
4207 struct l2cap_conn_param_update_rsp rsp
;
4208 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4211 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4214 cmd_len
= __le16_to_cpu(cmd
->len
);
4215 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4218 req
= (struct l2cap_conn_param_update_req
*) data
;
4219 min
= __le16_to_cpu(req
->min
);
4220 max
= __le16_to_cpu(req
->max
);
4221 latency
= __le16_to_cpu(req
->latency
);
4222 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4224 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4225 min
, max
, latency
, to_multiplier
);
4227 memset(&rsp
, 0, sizeof(rsp
));
4229 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4231 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4233 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4235 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4239 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4244 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4245 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4249 switch (cmd
->code
) {
4250 case L2CAP_COMMAND_REJ
:
4251 l2cap_command_rej(conn
, cmd
, data
);
4254 case L2CAP_CONN_REQ
:
4255 err
= l2cap_connect_req(conn
, cmd
, data
);
4258 case L2CAP_CONN_RSP
:
4259 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4262 case L2CAP_CONF_REQ
:
4263 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4266 case L2CAP_CONF_RSP
:
4267 err
= l2cap_config_rsp(conn
, cmd
, data
);
4270 case L2CAP_DISCONN_REQ
:
4271 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4274 case L2CAP_DISCONN_RSP
:
4275 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4278 case L2CAP_ECHO_REQ
:
4279 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4282 case L2CAP_ECHO_RSP
:
4285 case L2CAP_INFO_REQ
:
4286 err
= l2cap_information_req(conn
, cmd
, data
);
4289 case L2CAP_INFO_RSP
:
4290 err
= l2cap_information_rsp(conn
, cmd
, data
);
4293 case L2CAP_CREATE_CHAN_REQ
:
4294 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4297 case L2CAP_CREATE_CHAN_RSP
:
4298 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4301 case L2CAP_MOVE_CHAN_REQ
:
4302 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4305 case L2CAP_MOVE_CHAN_RSP
:
4306 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4309 case L2CAP_MOVE_CHAN_CFM
:
4310 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4313 case L2CAP_MOVE_CHAN_CFM_RSP
:
4314 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4318 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4326 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4327 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4329 switch (cmd
->code
) {
4330 case L2CAP_COMMAND_REJ
:
4333 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4334 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4336 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4340 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4345 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4346 struct sk_buff
*skb
)
4348 u8
*data
= skb
->data
;
4350 struct l2cap_cmd_hdr cmd
;
4353 l2cap_raw_recv(conn
, skb
);
4355 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4357 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4358 data
+= L2CAP_CMD_HDR_SIZE
;
4359 len
-= L2CAP_CMD_HDR_SIZE
;
4361 cmd_len
= le16_to_cpu(cmd
.len
);
4363 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4365 if (cmd_len
> len
|| !cmd
.ident
) {
4366 BT_DBG("corrupted command");
4370 if (conn
->hcon
->type
== LE_LINK
)
4371 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4373 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4376 struct l2cap_cmd_rej_unk rej
;
4378 BT_ERR("Wrong link type (%d)", err
);
4380 /* FIXME: Map err to a valid reason */
4381 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4382 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4392 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4394 u16 our_fcs
, rcv_fcs
;
4397 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4398 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4400 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4402 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4403 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4404 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4405 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4407 if (our_fcs
!= rcv_fcs
)
4413 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4415 struct l2cap_ctrl control
;
4417 BT_DBG("chan %p", chan
);
4419 memset(&control
, 0, sizeof(control
));
4422 control
.reqseq
= chan
->buffer_seq
;
4423 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4425 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4426 control
.super
= L2CAP_SUPER_RNR
;
4427 l2cap_send_sframe(chan
, &control
);
4430 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4431 chan
->unacked_frames
> 0)
4432 __set_retrans_timer(chan
);
4434 /* Send pending iframes */
4435 l2cap_ertm_send(chan
);
4437 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4438 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4439 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4442 control
.super
= L2CAP_SUPER_RR
;
4443 l2cap_send_sframe(chan
, &control
);
4447 static void append_skb_frag(struct sk_buff
*skb
,
4448 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4450 /* skb->len reflects data in skb as well as all fragments
4451 * skb->data_len reflects only data in fragments
4453 if (!skb_has_frag_list(skb
))
4454 skb_shinfo(skb
)->frag_list
= new_frag
;
4456 new_frag
->next
= NULL
;
4458 (*last_frag
)->next
= new_frag
;
4459 *last_frag
= new_frag
;
4461 skb
->len
+= new_frag
->len
;
4462 skb
->data_len
+= new_frag
->len
;
4463 skb
->truesize
+= new_frag
->truesize
;
4466 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4467 struct l2cap_ctrl
*control
)
4471 switch (control
->sar
) {
4472 case L2CAP_SAR_UNSEGMENTED
:
4476 err
= chan
->ops
->recv(chan
->data
, skb
);
4479 case L2CAP_SAR_START
:
4483 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4484 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4486 if (chan
->sdu_len
> chan
->imtu
) {
4491 if (skb
->len
>= chan
->sdu_len
)
4495 chan
->sdu_last_frag
= skb
;
4501 case L2CAP_SAR_CONTINUE
:
4505 append_skb_frag(chan
->sdu
, skb
,
4506 &chan
->sdu_last_frag
);
4509 if (chan
->sdu
->len
>= chan
->sdu_len
)
4519 append_skb_frag(chan
->sdu
, skb
,
4520 &chan
->sdu_last_frag
);
4523 if (chan
->sdu
->len
!= chan
->sdu_len
)
4526 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
4529 /* Reassembly complete */
4531 chan
->sdu_last_frag
= NULL
;
4539 kfree_skb(chan
->sdu
);
4541 chan
->sdu_last_frag
= NULL
;
4548 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4552 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4555 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4556 l2cap_tx(chan
, 0, 0, event
);
4559 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4562 /* Pass sequential frames to l2cap_reassemble_sdu()
4563 * until a gap is encountered.
4566 BT_DBG("chan %p", chan
);
4568 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4569 struct sk_buff
*skb
;
4570 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4571 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4573 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4578 skb_unlink(skb
, &chan
->srej_q
);
4579 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4580 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4585 if (skb_queue_empty(&chan
->srej_q
)) {
4586 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4587 l2cap_send_ack(chan
);
4593 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4594 struct l2cap_ctrl
*control
)
4596 struct sk_buff
*skb
;
4598 BT_DBG("chan %p, control %p", chan
, control
);
4600 if (control
->reqseq
== chan
->next_tx_seq
) {
4601 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4602 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4606 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4609 BT_DBG("Seq %d not available for retransmission",
4614 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4615 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4616 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4620 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4622 if (control
->poll
) {
4623 l2cap_pass_to_tx(chan
, control
);
4625 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4626 l2cap_retransmit(chan
, control
);
4627 l2cap_ertm_send(chan
);
4629 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4630 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4631 chan
->srej_save_reqseq
= control
->reqseq
;
4634 l2cap_pass_to_tx_fbit(chan
, control
);
4636 if (control
->final
) {
4637 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4638 !test_and_clear_bit(CONN_SREJ_ACT
,
4640 l2cap_retransmit(chan
, control
);
4642 l2cap_retransmit(chan
, control
);
4643 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4644 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4645 chan
->srej_save_reqseq
= control
->reqseq
;
4651 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4652 struct l2cap_ctrl
*control
)
4654 struct sk_buff
*skb
;
4656 BT_DBG("chan %p, control %p", chan
, control
);
4658 if (control
->reqseq
== chan
->next_tx_seq
) {
4659 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4660 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4664 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4666 if (chan
->max_tx
&& skb
&&
4667 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4668 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4669 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4673 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4675 l2cap_pass_to_tx(chan
, control
);
4677 if (control
->final
) {
4678 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4679 l2cap_retransmit_all(chan
, control
);
4681 l2cap_retransmit_all(chan
, control
);
4682 l2cap_ertm_send(chan
);
4683 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4684 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4688 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4690 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4692 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4693 chan
->expected_tx_seq
);
4695 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4696 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4698 /* See notes below regarding "double poll" and
4701 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4702 BT_DBG("Invalid/Ignore - after SREJ");
4703 return L2CAP_TXSEQ_INVALID_IGNORE
;
4705 BT_DBG("Invalid - in window after SREJ sent");
4706 return L2CAP_TXSEQ_INVALID
;
4710 if (chan
->srej_list
.head
== txseq
) {
4711 BT_DBG("Expected SREJ");
4712 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4715 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4716 BT_DBG("Duplicate SREJ - txseq already stored");
4717 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4720 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4721 BT_DBG("Unexpected SREJ - not requested");
4722 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4726 if (chan
->expected_tx_seq
== txseq
) {
4727 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4729 BT_DBG("Invalid - txseq outside tx window");
4730 return L2CAP_TXSEQ_INVALID
;
4733 return L2CAP_TXSEQ_EXPECTED
;
4737 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4738 __seq_offset(chan
, chan
->expected_tx_seq
,
4739 chan
->last_acked_seq
)){
4740 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4741 return L2CAP_TXSEQ_DUPLICATE
;
4744 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4745 /* A source of invalid packets is a "double poll" condition,
4746 * where delays cause us to send multiple poll packets. If
4747 * the remote stack receives and processes both polls,
4748 * sequence numbers can wrap around in such a way that a
4749 * resent frame has a sequence number that looks like new data
4750 * with a sequence gap. This would trigger an erroneous SREJ
4753 * Fortunately, this is impossible with a tx window that's
4754 * less than half of the maximum sequence number, which allows
4755 * invalid frames to be safely ignored.
4757 * With tx window sizes greater than half of the tx window
4758 * maximum, the frame is invalid and cannot be ignored. This
4759 * causes a disconnect.
4762 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4763 BT_DBG("Invalid/Ignore - txseq outside tx window");
4764 return L2CAP_TXSEQ_INVALID_IGNORE
;
4766 BT_DBG("Invalid - txseq outside tx window");
4767 return L2CAP_TXSEQ_INVALID
;
4770 BT_DBG("Unexpected - txseq indicates missing frames");
4771 return L2CAP_TXSEQ_UNEXPECTED
;
4775 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4776 struct l2cap_ctrl
*control
,
4777 struct sk_buff
*skb
, u8 event
)
4780 bool skb_in_use
= 0;
4782 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4786 case L2CAP_EV_RECV_IFRAME
:
4787 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4788 case L2CAP_TXSEQ_EXPECTED
:
4789 l2cap_pass_to_tx(chan
, control
);
4791 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4792 BT_DBG("Busy, discarding expected seq %d",
4797 chan
->expected_tx_seq
= __next_seq(chan
,
4800 chan
->buffer_seq
= chan
->expected_tx_seq
;
4803 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4807 if (control
->final
) {
4808 if (!test_and_clear_bit(CONN_REJ_ACT
,
4809 &chan
->conn_state
)) {
4811 l2cap_retransmit_all(chan
, control
);
4812 l2cap_ertm_send(chan
);
4816 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4817 l2cap_send_ack(chan
);
4819 case L2CAP_TXSEQ_UNEXPECTED
:
4820 l2cap_pass_to_tx(chan
, control
);
4822 /* Can't issue SREJ frames in the local busy state.
4823 * Drop this frame, it will be seen as missing
4824 * when local busy is exited.
4826 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4827 BT_DBG("Busy, discarding unexpected seq %d",
4832 /* There was a gap in the sequence, so an SREJ
4833 * must be sent for each missing frame. The
4834 * current frame is stored for later use.
4836 skb_queue_tail(&chan
->srej_q
, skb
);
4838 BT_DBG("Queued %p (queue len %d)", skb
,
4839 skb_queue_len(&chan
->srej_q
));
4841 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4842 l2cap_seq_list_clear(&chan
->srej_list
);
4843 l2cap_send_srej(chan
, control
->txseq
);
4845 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4847 case L2CAP_TXSEQ_DUPLICATE
:
4848 l2cap_pass_to_tx(chan
, control
);
4850 case L2CAP_TXSEQ_INVALID_IGNORE
:
4852 case L2CAP_TXSEQ_INVALID
:
4854 l2cap_send_disconn_req(chan
->conn
, chan
,
4859 case L2CAP_EV_RECV_RR
:
4860 l2cap_pass_to_tx(chan
, control
);
4861 if (control
->final
) {
4862 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4864 if (!test_and_clear_bit(CONN_REJ_ACT
,
4865 &chan
->conn_state
)) {
4867 l2cap_retransmit_all(chan
, control
);
4870 l2cap_ertm_send(chan
);
4871 } else if (control
->poll
) {
4872 l2cap_send_i_or_rr_or_rnr(chan
);
4874 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4875 &chan
->conn_state
) &&
4876 chan
->unacked_frames
)
4877 __set_retrans_timer(chan
);
4879 l2cap_ertm_send(chan
);
4882 case L2CAP_EV_RECV_RNR
:
4883 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4884 l2cap_pass_to_tx(chan
, control
);
4885 if (control
&& control
->poll
) {
4886 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4887 l2cap_send_rr_or_rnr(chan
, 0);
4889 __clear_retrans_timer(chan
);
4890 l2cap_seq_list_clear(&chan
->retrans_list
);
4892 case L2CAP_EV_RECV_REJ
:
4893 l2cap_handle_rej(chan
, control
);
4895 case L2CAP_EV_RECV_SREJ
:
4896 l2cap_handle_srej(chan
, control
);
4902 if (skb
&& !skb_in_use
) {
4903 BT_DBG("Freeing %p", skb
);
4910 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4911 struct l2cap_ctrl
*control
,
4912 struct sk_buff
*skb
, u8 event
)
4915 u16 txseq
= control
->txseq
;
4916 bool skb_in_use
= 0;
4918 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4922 case L2CAP_EV_RECV_IFRAME
:
4923 switch (l2cap_classify_txseq(chan
, txseq
)) {
4924 case L2CAP_TXSEQ_EXPECTED
:
4925 /* Keep frame for reassembly later */
4926 l2cap_pass_to_tx(chan
, control
);
4927 skb_queue_tail(&chan
->srej_q
, skb
);
4929 BT_DBG("Queued %p (queue len %d)", skb
,
4930 skb_queue_len(&chan
->srej_q
));
4932 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4934 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4935 l2cap_seq_list_pop(&chan
->srej_list
);
4937 l2cap_pass_to_tx(chan
, control
);
4938 skb_queue_tail(&chan
->srej_q
, skb
);
4940 BT_DBG("Queued %p (queue len %d)", skb
,
4941 skb_queue_len(&chan
->srej_q
));
4943 err
= l2cap_rx_queued_iframes(chan
);
4948 case L2CAP_TXSEQ_UNEXPECTED
:
4949 /* Got a frame that can't be reassembled yet.
4950 * Save it for later, and send SREJs to cover
4951 * the missing frames.
4953 skb_queue_tail(&chan
->srej_q
, skb
);
4955 BT_DBG("Queued %p (queue len %d)", skb
,
4956 skb_queue_len(&chan
->srej_q
));
4958 l2cap_pass_to_tx(chan
, control
);
4959 l2cap_send_srej(chan
, control
->txseq
);
4961 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4962 /* This frame was requested with an SREJ, but
4963 * some expected retransmitted frames are
4964 * missing. Request retransmission of missing
4967 skb_queue_tail(&chan
->srej_q
, skb
);
4969 BT_DBG("Queued %p (queue len %d)", skb
,
4970 skb_queue_len(&chan
->srej_q
));
4972 l2cap_pass_to_tx(chan
, control
);
4973 l2cap_send_srej_list(chan
, control
->txseq
);
4975 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4976 /* We've already queued this frame. Drop this copy. */
4977 l2cap_pass_to_tx(chan
, control
);
4979 case L2CAP_TXSEQ_DUPLICATE
:
4980 /* Expecting a later sequence number, so this frame
4981 * was already received. Ignore it completely.
4984 case L2CAP_TXSEQ_INVALID_IGNORE
:
4986 case L2CAP_TXSEQ_INVALID
:
4988 l2cap_send_disconn_req(chan
->conn
, chan
,
4993 case L2CAP_EV_RECV_RR
:
4994 l2cap_pass_to_tx(chan
, control
);
4995 if (control
->final
) {
4996 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4998 if (!test_and_clear_bit(CONN_REJ_ACT
,
4999 &chan
->conn_state
)) {
5001 l2cap_retransmit_all(chan
, control
);
5004 l2cap_ertm_send(chan
);
5005 } else if (control
->poll
) {
5006 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5007 &chan
->conn_state
) &&
5008 chan
->unacked_frames
) {
5009 __set_retrans_timer(chan
);
5012 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5013 l2cap_send_srej_tail(chan
);
5015 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5016 &chan
->conn_state
) &&
5017 chan
->unacked_frames
)
5018 __set_retrans_timer(chan
);
5020 l2cap_send_ack(chan
);
5023 case L2CAP_EV_RECV_RNR
:
5024 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5025 l2cap_pass_to_tx(chan
, control
);
5026 if (control
->poll
) {
5027 l2cap_send_srej_tail(chan
);
5029 struct l2cap_ctrl rr_control
;
5030 memset(&rr_control
, 0, sizeof(rr_control
));
5031 rr_control
.sframe
= 1;
5032 rr_control
.super
= L2CAP_SUPER_RR
;
5033 rr_control
.reqseq
= chan
->buffer_seq
;
5034 l2cap_send_sframe(chan
, &rr_control
);
5038 case L2CAP_EV_RECV_REJ
:
5039 l2cap_handle_rej(chan
, control
);
5041 case L2CAP_EV_RECV_SREJ
:
5042 l2cap_handle_srej(chan
, control
);
5046 if (skb
&& !skb_in_use
) {
5047 BT_DBG("Freeing %p", skb
);
5054 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5056 /* Make sure reqseq is for a packet that has been sent but not acked */
5059 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5060 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5063 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5064 struct sk_buff
*skb
, u8 event
)
5068 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5069 control
, skb
, event
, chan
->rx_state
);
5071 if (__valid_reqseq(chan
, control
->reqseq
)) {
5072 switch (chan
->rx_state
) {
5073 case L2CAP_RX_STATE_RECV
:
5074 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5076 case L2CAP_RX_STATE_SREJ_SENT
:
5077 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5085 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5086 control
->reqseq
, chan
->next_tx_seq
,
5087 chan
->expected_ack_seq
);
5088 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5094 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5095 struct sk_buff
*skb
)
5099 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5102 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5103 L2CAP_TXSEQ_EXPECTED
) {
5104 l2cap_pass_to_tx(chan
, control
);
5106 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5107 __next_seq(chan
, chan
->buffer_seq
));
5109 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5111 l2cap_reassemble_sdu(chan
, skb
, control
);
5114 kfree_skb(chan
->sdu
);
5117 chan
->sdu_last_frag
= NULL
;
5121 BT_DBG("Freeing %p", skb
);
5126 chan
->last_acked_seq
= control
->txseq
;
5127 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5132 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5134 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5138 __unpack_control(chan
, skb
);
5143 * We can just drop the corrupted I-frame here.
5144 * Receiver will miss it and start proper recovery
5145 * procedures and ask for retransmission.
5147 if (l2cap_check_fcs(chan
, skb
))
5150 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5151 len
-= L2CAP_SDULEN_SIZE
;
5153 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5154 len
-= L2CAP_FCS_SIZE
;
5156 if (len
> chan
->mps
) {
5157 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5161 if (!control
->sframe
) {
5164 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5165 control
->sar
, control
->reqseq
, control
->final
,
5168 /* Validate F-bit - F=0 always valid, F=1 only
5169 * valid in TX WAIT_F
5171 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5174 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5175 event
= L2CAP_EV_RECV_IFRAME
;
5176 err
= l2cap_rx(chan
, control
, skb
, event
);
5178 err
= l2cap_stream_rx(chan
, control
, skb
);
5182 l2cap_send_disconn_req(chan
->conn
, chan
,
5185 const u8 rx_func_to_event
[4] = {
5186 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5187 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5190 /* Only I-frames are expected in streaming mode */
5191 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5194 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5195 control
->reqseq
, control
->final
, control
->poll
,
5200 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5204 /* Validate F and P bits */
5205 if (control
->final
&& (control
->poll
||
5206 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5209 event
= rx_func_to_event
[control
->super
];
5210 if (l2cap_rx(chan
, control
, skb
, event
))
5211 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5221 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
5223 struct l2cap_chan
*chan
;
5225 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5227 BT_DBG("unknown cid 0x%4.4x", cid
);
5228 /* Drop packet and return */
5233 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5235 if (chan
->state
!= BT_CONNECTED
)
5238 switch (chan
->mode
) {
5239 case L2CAP_MODE_BASIC
:
5240 /* If socket recv buffers overflows we drop data here
5241 * which is *bad* because L2CAP has to be reliable.
5242 * But we don't have any other choice. L2CAP doesn't
5243 * provide flow control mechanism. */
5245 if (chan
->imtu
< skb
->len
)
5248 if (!chan
->ops
->recv(chan
->data
, skb
))
5252 case L2CAP_MODE_ERTM
:
5253 case L2CAP_MODE_STREAMING
:
5254 l2cap_data_rcv(chan
, skb
);
5258 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5266 l2cap_chan_unlock(chan
);
5271 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
5273 struct l2cap_chan
*chan
;
5275 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5279 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5281 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5284 if (chan
->imtu
< skb
->len
)
5287 if (!chan
->ops
->recv(chan
->data
, skb
))
5296 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5297 struct sk_buff
*skb
)
5299 struct l2cap_chan
*chan
;
5301 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5305 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5307 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5310 if (chan
->imtu
< skb
->len
)
5313 if (!chan
->ops
->recv(chan
->data
, skb
))
5322 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5324 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5328 skb_pull(skb
, L2CAP_HDR_SIZE
);
5329 cid
= __le16_to_cpu(lh
->cid
);
5330 len
= __le16_to_cpu(lh
->len
);
5332 if (len
!= skb
->len
) {
5337 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5340 case L2CAP_CID_LE_SIGNALING
:
5341 case L2CAP_CID_SIGNALING
:
5342 l2cap_sig_channel(conn
, skb
);
5345 case L2CAP_CID_CONN_LESS
:
5346 psm
= get_unaligned((__le16
*) skb
->data
);
5348 l2cap_conless_channel(conn
, psm
, skb
);
5351 case L2CAP_CID_LE_DATA
:
5352 l2cap_att_channel(conn
, cid
, skb
);
5356 if (smp_sig_channel(conn
, skb
))
5357 l2cap_conn_del(conn
->hcon
, EACCES
);
5361 l2cap_data_channel(conn
, cid
, skb
);
5366 /* ---- L2CAP interface with lower layer (HCI) ---- */
5368 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5370 int exact
= 0, lm1
= 0, lm2
= 0;
5371 struct l2cap_chan
*c
;
5373 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
5375 /* Find listening sockets and check their link_mode */
5376 read_lock(&chan_list_lock
);
5377 list_for_each_entry(c
, &chan_list
, global_l
) {
5378 struct sock
*sk
= c
->sk
;
5380 if (c
->state
!= BT_LISTEN
)
5383 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5384 lm1
|= HCI_LM_ACCEPT
;
5385 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5386 lm1
|= HCI_LM_MASTER
;
5388 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5389 lm2
|= HCI_LM_ACCEPT
;
5390 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5391 lm2
|= HCI_LM_MASTER
;
5394 read_unlock(&chan_list_lock
);
5396 return exact
? lm1
: lm2
;
5399 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5401 struct l2cap_conn
*conn
;
5403 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
5406 conn
= l2cap_conn_add(hcon
, status
);
5408 l2cap_conn_ready(conn
);
5410 l2cap_conn_del(hcon
, bt_to_errno(status
));
5415 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5417 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5419 BT_DBG("hcon %p", hcon
);
5422 return HCI_ERROR_REMOTE_USER_TERM
;
5423 return conn
->disc_reason
;
5426 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5428 BT_DBG("hcon %p reason %d", hcon
, reason
);
5430 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5434 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5436 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5439 if (encrypt
== 0x00) {
5440 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5441 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5442 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5443 l2cap_chan_close(chan
, ECONNREFUSED
);
5445 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5446 __clear_chan_timer(chan
);
5450 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5452 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5453 struct l2cap_chan
*chan
;
5458 BT_DBG("conn %p", conn
);
5460 if (hcon
->type
== LE_LINK
) {
5461 if (!status
&& encrypt
)
5462 smp_distribute_keys(conn
, 0);
5463 cancel_delayed_work(&conn
->security_timer
);
5466 mutex_lock(&conn
->chan_lock
);
5468 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5469 l2cap_chan_lock(chan
);
5471 BT_DBG("chan->scid %d", chan
->scid
);
5473 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5474 if (!status
&& encrypt
) {
5475 chan
->sec_level
= hcon
->sec_level
;
5476 l2cap_chan_ready(chan
);
5479 l2cap_chan_unlock(chan
);
5483 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5484 l2cap_chan_unlock(chan
);
5488 if (!status
&& (chan
->state
== BT_CONNECTED
||
5489 chan
->state
== BT_CONFIG
)) {
5490 struct sock
*sk
= chan
->sk
;
5492 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5493 sk
->sk_state_change(sk
);
5495 l2cap_check_encryption(chan
, encrypt
);
5496 l2cap_chan_unlock(chan
);
5500 if (chan
->state
== BT_CONNECT
) {
5502 l2cap_send_conn_req(chan
);
5504 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5506 } else if (chan
->state
== BT_CONNECT2
) {
5507 struct sock
*sk
= chan
->sk
;
5508 struct l2cap_conn_rsp rsp
;
5514 if (test_bit(BT_SK_DEFER_SETUP
,
5515 &bt_sk(sk
)->flags
)) {
5516 struct sock
*parent
= bt_sk(sk
)->parent
;
5517 res
= L2CAP_CR_PEND
;
5518 stat
= L2CAP_CS_AUTHOR_PEND
;
5520 parent
->sk_data_ready(parent
, 0);
5522 __l2cap_state_change(chan
, BT_CONFIG
);
5523 res
= L2CAP_CR_SUCCESS
;
5524 stat
= L2CAP_CS_NO_INFO
;
5527 __l2cap_state_change(chan
, BT_DISCONN
);
5528 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5529 res
= L2CAP_CR_SEC_BLOCK
;
5530 stat
= L2CAP_CS_NO_INFO
;
5535 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5536 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5537 rsp
.result
= cpu_to_le16(res
);
5538 rsp
.status
= cpu_to_le16(stat
);
5539 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5543 l2cap_chan_unlock(chan
);
5546 mutex_unlock(&conn
->chan_lock
);
5551 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5553 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5556 conn
= l2cap_conn_add(hcon
, 0);
5561 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5563 if (!(flags
& ACL_CONT
)) {
5564 struct l2cap_hdr
*hdr
;
5568 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5569 kfree_skb(conn
->rx_skb
);
5570 conn
->rx_skb
= NULL
;
5572 l2cap_conn_unreliable(conn
, ECOMM
);
5575 /* Start fragment always begin with Basic L2CAP header */
5576 if (skb
->len
< L2CAP_HDR_SIZE
) {
5577 BT_ERR("Frame is too short (len %d)", skb
->len
);
5578 l2cap_conn_unreliable(conn
, ECOMM
);
5582 hdr
= (struct l2cap_hdr
*) skb
->data
;
5583 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5585 if (len
== skb
->len
) {
5586 /* Complete frame received */
5587 l2cap_recv_frame(conn
, skb
);
5591 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5593 if (skb
->len
> len
) {
5594 BT_ERR("Frame is too long (len %d, expected len %d)",
5596 l2cap_conn_unreliable(conn
, ECOMM
);
5600 /* Allocate skb for the complete frame (with header) */
5601 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5605 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5607 conn
->rx_len
= len
- skb
->len
;
5609 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5611 if (!conn
->rx_len
) {
5612 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5613 l2cap_conn_unreliable(conn
, ECOMM
);
5617 if (skb
->len
> conn
->rx_len
) {
5618 BT_ERR("Fragment is too long (len %d, expected %d)",
5619 skb
->len
, conn
->rx_len
);
5620 kfree_skb(conn
->rx_skb
);
5621 conn
->rx_skb
= NULL
;
5623 l2cap_conn_unreliable(conn
, ECOMM
);
5627 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5629 conn
->rx_len
-= skb
->len
;
5631 if (!conn
->rx_len
) {
5632 /* Complete frame received */
5633 l2cap_recv_frame(conn
, conn
->rx_skb
);
5634 conn
->rx_skb
= NULL
;
5643 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5645 struct l2cap_chan
*c
;
5647 read_lock(&chan_list_lock
);
5649 list_for_each_entry(c
, &chan_list
, global_l
) {
5650 struct sock
*sk
= c
->sk
;
5652 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5653 batostr(&bt_sk(sk
)->src
),
5654 batostr(&bt_sk(sk
)->dst
),
5655 c
->state
, __le16_to_cpu(c
->psm
),
5656 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5657 c
->sec_level
, c
->mode
);
5660 read_unlock(&chan_list_lock
);
5665 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5667 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5670 static const struct file_operations l2cap_debugfs_fops
= {
5671 .open
= l2cap_debugfs_open
,
5673 .llseek
= seq_lseek
,
5674 .release
= single_release
,
5677 static struct dentry
*l2cap_debugfs
;
5679 int __init
l2cap_init(void)
5683 err
= l2cap_init_sockets();
5688 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5689 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5691 BT_ERR("Failed to create L2CAP debug file");
5697 void l2cap_exit(void)
5699 debugfs_remove(l2cap_debugfs
);
5700 l2cap_cleanup_sockets();
5703 module_param(disable_ertm
, bool, 0644);
5704 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");