2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
77 struct sk_buff_head
*skbs
, u8 event
);
79 /* ---- L2CAP channels ---- */
81 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
85 list_for_each_entry(c
, &conn
->chan_l
, list
) {
92 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
96 list_for_each_entry(c
, &conn
->chan_l
, list
) {
103 /* Find channel with given SCID.
104 * Returns locked channel. */
105 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
107 struct l2cap_chan
*c
;
109 mutex_lock(&conn
->chan_lock
);
110 c
= __l2cap_get_chan_by_scid(conn
, cid
);
113 mutex_unlock(&conn
->chan_lock
);
118 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
120 struct l2cap_chan
*c
;
122 list_for_each_entry(c
, &conn
->chan_l
, list
) {
123 if (c
->ident
== ident
)
129 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
131 struct l2cap_chan
*c
;
133 list_for_each_entry(c
, &chan_list
, global_l
) {
134 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
140 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
144 write_lock(&chan_list_lock
);
146 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
159 for (p
= 0x1001; p
< 0x1100; p
+= 2)
160 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
161 chan
->psm
= cpu_to_le16(p
);
162 chan
->sport
= cpu_to_le16(p
);
169 write_unlock(&chan_list_lock
);
173 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
175 write_lock(&chan_list_lock
);
179 write_unlock(&chan_list_lock
);
184 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
186 u16 cid
= L2CAP_CID_DYN_START
;
188 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
189 if (!__l2cap_get_chan_by_scid(conn
, cid
))
196 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
198 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
199 state_to_string(state
));
202 chan
->ops
->state_change(chan
->data
, state
);
205 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
207 struct sock
*sk
= chan
->sk
;
210 __l2cap_state_change(chan
, state
);
214 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
216 struct sock
*sk
= chan
->sk
;
221 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
223 struct sock
*sk
= chan
->sk
;
226 __l2cap_chan_set_err(chan
, err
);
230 static void __set_retrans_timer(struct l2cap_chan
*chan
)
232 if (!delayed_work_pending(&chan
->monitor_timer
) &&
233 chan
->retrans_timeout
) {
234 l2cap_set_timer(chan
, &chan
->retrans_timer
,
235 msecs_to_jiffies(chan
->retrans_timeout
));
239 static void __set_monitor_timer(struct l2cap_chan
*chan
)
241 __clear_retrans_timer(chan
);
242 if (chan
->monitor_timeout
) {
243 l2cap_set_timer(chan
, &chan
->monitor_timer
,
244 msecs_to_jiffies(chan
->monitor_timeout
));
248 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
253 skb_queue_walk(head
, skb
) {
254 if (bt_cb(skb
)->control
.txseq
== seq
)
261 /* ---- L2CAP sequence number lists ---- */
263 /* For ERTM, ordered lists of sequence numbers must be tracked for
264 * SREJ requests that are received and for frames that are to be
265 * retransmitted. These seq_list functions implement a singly-linked
266 * list in an array, where membership in the list can also be checked
267 * in constant time. Items can also be added to the tail of the list
268 * and removed from the head in constant time, without further memory
272 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
274 size_t alloc_size
, i
;
276 /* Allocated size is a power of 2 to map sequence numbers
277 * (which may be up to 14 bits) in to a smaller array that is
278 * sized for the negotiated ERTM transmit windows.
280 alloc_size
= roundup_pow_of_two(size
);
282 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
286 seq_list
->mask
= alloc_size
- 1;
287 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
288 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
289 for (i
= 0; i
< alloc_size
; i
++)
290 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
295 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
297 kfree(seq_list
->list
);
300 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
303 /* Constant-time check for list membership */
304 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
307 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
309 u16 mask
= seq_list
->mask
;
311 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
312 /* In case someone tries to pop the head of an empty list */
313 return L2CAP_SEQ_LIST_CLEAR
;
314 } else if (seq_list
->head
== seq
) {
315 /* Head can be removed in constant time */
316 seq_list
->head
= seq_list
->list
[seq
& mask
];
317 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
319 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
320 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
321 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
324 /* Walk the list to find the sequence number */
325 u16 prev
= seq_list
->head
;
326 while (seq_list
->list
[prev
& mask
] != seq
) {
327 prev
= seq_list
->list
[prev
& mask
];
328 if (prev
== L2CAP_SEQ_LIST_TAIL
)
329 return L2CAP_SEQ_LIST_CLEAR
;
332 /* Unlink the number from the list and clear it */
333 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
334 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
335 if (seq_list
->tail
== seq
)
336 seq_list
->tail
= prev
;
341 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
343 /* Remove the head in constant time */
344 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
347 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
351 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
354 for (i
= 0; i
<= seq_list
->mask
; i
++)
355 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
357 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
358 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
361 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
363 u16 mask
= seq_list
->mask
;
365 /* All appends happen in constant time */
367 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
370 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
371 seq_list
->head
= seq
;
373 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
375 seq_list
->tail
= seq
;
376 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
379 static void l2cap_chan_timeout(struct work_struct
*work
)
381 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
383 struct l2cap_conn
*conn
= chan
->conn
;
386 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
388 mutex_lock(&conn
->chan_lock
);
389 l2cap_chan_lock(chan
);
391 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
392 reason
= ECONNREFUSED
;
393 else if (chan
->state
== BT_CONNECT
&&
394 chan
->sec_level
!= BT_SECURITY_SDP
)
395 reason
= ECONNREFUSED
;
399 l2cap_chan_close(chan
, reason
);
401 l2cap_chan_unlock(chan
);
403 chan
->ops
->close(chan
->data
);
404 mutex_unlock(&conn
->chan_lock
);
406 l2cap_chan_put(chan
);
409 struct l2cap_chan
*l2cap_chan_create(void)
411 struct l2cap_chan
*chan
;
413 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
417 mutex_init(&chan
->lock
);
419 write_lock(&chan_list_lock
);
420 list_add(&chan
->global_l
, &chan_list
);
421 write_unlock(&chan_list_lock
);
423 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
425 chan
->state
= BT_OPEN
;
427 atomic_set(&chan
->refcnt
, 1);
429 /* This flag is cleared in l2cap_chan_ready() */
430 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
432 BT_DBG("chan %p", chan
);
437 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
439 write_lock(&chan_list_lock
);
440 list_del(&chan
->global_l
);
441 write_unlock(&chan_list_lock
);
443 l2cap_chan_put(chan
);
446 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
448 chan
->fcs
= L2CAP_FCS_CRC16
;
449 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
450 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
451 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
452 chan
->sec_level
= BT_SECURITY_LOW
;
454 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
457 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
459 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
460 __le16_to_cpu(chan
->psm
), chan
->dcid
);
462 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
466 switch (chan
->chan_type
) {
467 case L2CAP_CHAN_CONN_ORIENTED
:
468 if (conn
->hcon
->type
== LE_LINK
) {
470 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
471 chan
->scid
= L2CAP_CID_LE_DATA
;
472 chan
->dcid
= L2CAP_CID_LE_DATA
;
474 /* Alloc CID for connection-oriented socket */
475 chan
->scid
= l2cap_alloc_cid(conn
);
476 chan
->omtu
= L2CAP_DEFAULT_MTU
;
480 case L2CAP_CHAN_CONN_LESS
:
481 /* Connectionless socket */
482 chan
->scid
= L2CAP_CID_CONN_LESS
;
483 chan
->dcid
= L2CAP_CID_CONN_LESS
;
484 chan
->omtu
= L2CAP_DEFAULT_MTU
;
488 /* Raw socket can send/recv signalling messages only */
489 chan
->scid
= L2CAP_CID_SIGNALING
;
490 chan
->dcid
= L2CAP_CID_SIGNALING
;
491 chan
->omtu
= L2CAP_DEFAULT_MTU
;
494 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
495 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
496 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
497 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
498 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
499 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
501 l2cap_chan_hold(chan
);
503 list_add(&chan
->list
, &conn
->chan_l
);
506 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
508 mutex_lock(&conn
->chan_lock
);
509 __l2cap_chan_add(conn
, chan
);
510 mutex_unlock(&conn
->chan_lock
);
513 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
515 struct sock
*sk
= chan
->sk
;
516 struct l2cap_conn
*conn
= chan
->conn
;
517 struct sock
*parent
= bt_sk(sk
)->parent
;
519 __clear_chan_timer(chan
);
521 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
524 /* Delete from channel list */
525 list_del(&chan
->list
);
527 l2cap_chan_put(chan
);
530 hci_conn_put(conn
->hcon
);
535 __l2cap_state_change(chan
, BT_CLOSED
);
536 sock_set_flag(sk
, SOCK_ZAPPED
);
539 __l2cap_chan_set_err(chan
, err
);
542 bt_accept_unlink(sk
);
543 parent
->sk_data_ready(parent
, 0);
545 sk
->sk_state_change(sk
);
549 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
553 case L2CAP_MODE_BASIC
:
556 case L2CAP_MODE_ERTM
:
557 __clear_retrans_timer(chan
);
558 __clear_monitor_timer(chan
);
559 __clear_ack_timer(chan
);
561 skb_queue_purge(&chan
->srej_q
);
563 l2cap_seq_list_free(&chan
->srej_list
);
564 l2cap_seq_list_free(&chan
->retrans_list
);
568 case L2CAP_MODE_STREAMING
:
569 skb_queue_purge(&chan
->tx_q
);
576 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
580 BT_DBG("parent %p", parent
);
582 /* Close not yet accepted channels */
583 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
584 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
586 l2cap_chan_lock(chan
);
587 __clear_chan_timer(chan
);
588 l2cap_chan_close(chan
, ECONNRESET
);
589 l2cap_chan_unlock(chan
);
591 chan
->ops
->close(chan
->data
);
595 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
597 struct l2cap_conn
*conn
= chan
->conn
;
598 struct sock
*sk
= chan
->sk
;
600 BT_DBG("chan %p state %s sk %p", chan
,
601 state_to_string(chan
->state
), sk
);
603 switch (chan
->state
) {
606 l2cap_chan_cleanup_listen(sk
);
608 __l2cap_state_change(chan
, BT_CLOSED
);
609 sock_set_flag(sk
, SOCK_ZAPPED
);
615 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
616 conn
->hcon
->type
== ACL_LINK
) {
617 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
618 l2cap_send_disconn_req(conn
, chan
, reason
);
620 l2cap_chan_del(chan
, reason
);
624 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
625 conn
->hcon
->type
== ACL_LINK
) {
626 struct l2cap_conn_rsp rsp
;
629 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
630 result
= L2CAP_CR_SEC_BLOCK
;
632 result
= L2CAP_CR_BAD_PSM
;
633 l2cap_state_change(chan
, BT_DISCONN
);
635 rsp
.scid
= cpu_to_le16(chan
->dcid
);
636 rsp
.dcid
= cpu_to_le16(chan
->scid
);
637 rsp
.result
= cpu_to_le16(result
);
638 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
639 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
643 l2cap_chan_del(chan
, reason
);
648 l2cap_chan_del(chan
, reason
);
653 sock_set_flag(sk
, SOCK_ZAPPED
);
659 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
661 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
662 switch (chan
->sec_level
) {
663 case BT_SECURITY_HIGH
:
664 return HCI_AT_DEDICATED_BONDING_MITM
;
665 case BT_SECURITY_MEDIUM
:
666 return HCI_AT_DEDICATED_BONDING
;
668 return HCI_AT_NO_BONDING
;
670 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
671 if (chan
->sec_level
== BT_SECURITY_LOW
)
672 chan
->sec_level
= BT_SECURITY_SDP
;
674 if (chan
->sec_level
== BT_SECURITY_HIGH
)
675 return HCI_AT_NO_BONDING_MITM
;
677 return HCI_AT_NO_BONDING
;
679 switch (chan
->sec_level
) {
680 case BT_SECURITY_HIGH
:
681 return HCI_AT_GENERAL_BONDING_MITM
;
682 case BT_SECURITY_MEDIUM
:
683 return HCI_AT_GENERAL_BONDING
;
685 return HCI_AT_NO_BONDING
;
690 /* Service level security */
691 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
693 struct l2cap_conn
*conn
= chan
->conn
;
696 auth_type
= l2cap_get_auth_type(chan
);
698 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
701 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
705 /* Get next available identificator.
706 * 1 - 128 are used by kernel.
707 * 129 - 199 are reserved.
708 * 200 - 254 are used by utilities like l2ping, etc.
711 spin_lock(&conn
->lock
);
713 if (++conn
->tx_ident
> 128)
718 spin_unlock(&conn
->lock
);
723 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
725 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
728 BT_DBG("code 0x%2.2x", code
);
733 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
734 flags
= ACL_START_NO_FLUSH
;
738 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
739 skb
->priority
= HCI_PRIO_MAX
;
741 hci_send_acl(conn
->hchan
, skb
, flags
);
744 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
746 struct hci_conn
*hcon
= chan
->conn
->hcon
;
749 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
752 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
753 lmp_no_flush_capable(hcon
->hdev
))
754 flags
= ACL_START_NO_FLUSH
;
758 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
759 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
762 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
764 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
765 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
767 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
770 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
771 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
778 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
779 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
786 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
788 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
789 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
791 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
794 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
795 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
802 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
803 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
810 static inline void __unpack_control(struct l2cap_chan
*chan
,
813 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
814 __unpack_extended_control(get_unaligned_le32(skb
->data
),
815 &bt_cb(skb
)->control
);
816 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
818 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
819 &bt_cb(skb
)->control
);
820 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
824 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
828 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
829 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
831 if (control
->sframe
) {
832 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
833 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
834 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
836 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
837 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
843 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
847 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
848 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
850 if (control
->sframe
) {
851 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
852 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
853 packed
|= L2CAP_CTRL_FRAME_TYPE
;
855 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
856 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
862 static inline void __pack_control(struct l2cap_chan
*chan
,
863 struct l2cap_ctrl
*control
,
866 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
867 put_unaligned_le32(__pack_extended_control(control
),
868 skb
->data
+ L2CAP_HDR_SIZE
);
870 put_unaligned_le16(__pack_enhanced_control(control
),
871 skb
->data
+ L2CAP_HDR_SIZE
);
875 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
879 struct l2cap_hdr
*lh
;
882 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
883 hlen
= L2CAP_EXT_HDR_SIZE
;
885 hlen
= L2CAP_ENH_HDR_SIZE
;
887 if (chan
->fcs
== L2CAP_FCS_CRC16
)
888 hlen
+= L2CAP_FCS_SIZE
;
890 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
893 return ERR_PTR(-ENOMEM
);
895 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
896 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
897 lh
->cid
= cpu_to_le16(chan
->dcid
);
899 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
900 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
902 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
904 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
905 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
906 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
909 skb
->priority
= HCI_PRIO_MAX
;
913 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
914 struct l2cap_ctrl
*control
)
919 BT_DBG("chan %p, control %p", chan
, control
);
921 if (!control
->sframe
)
924 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
928 if (control
->super
== L2CAP_SUPER_RR
)
929 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
930 else if (control
->super
== L2CAP_SUPER_RNR
)
931 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
933 if (control
->super
!= L2CAP_SUPER_SREJ
) {
934 chan
->last_acked_seq
= control
->reqseq
;
935 __clear_ack_timer(chan
);
938 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
939 control
->final
, control
->poll
, control
->super
);
941 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
942 control_field
= __pack_extended_control(control
);
944 control_field
= __pack_enhanced_control(control
);
946 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
948 l2cap_do_send(chan
, skb
);
951 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
953 struct l2cap_ctrl control
;
955 BT_DBG("chan %p, poll %d", chan
, poll
);
957 memset(&control
, 0, sizeof(control
));
961 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
962 control
.super
= L2CAP_SUPER_RNR
;
964 control
.super
= L2CAP_SUPER_RR
;
966 control
.reqseq
= chan
->buffer_seq
;
967 l2cap_send_sframe(chan
, &control
);
970 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
972 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
975 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
977 struct l2cap_conn
*conn
= chan
->conn
;
978 struct l2cap_conn_req req
;
980 req
.scid
= cpu_to_le16(chan
->scid
);
983 chan
->ident
= l2cap_get_ident(conn
);
985 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
987 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
990 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
992 struct sock
*sk
= chan
->sk
;
997 parent
= bt_sk(sk
)->parent
;
999 BT_DBG("sk %p, parent %p", sk
, parent
);
1001 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1002 chan
->conf_state
= 0;
1003 __clear_chan_timer(chan
);
1005 __l2cap_state_change(chan
, BT_CONNECTED
);
1006 sk
->sk_state_change(sk
);
1009 parent
->sk_data_ready(parent
, 0);
1014 static void l2cap_do_start(struct l2cap_chan
*chan
)
1016 struct l2cap_conn
*conn
= chan
->conn
;
1018 if (conn
->hcon
->type
== LE_LINK
) {
1019 l2cap_chan_ready(chan
);
1023 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1024 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1027 if (l2cap_chan_check_security(chan
) &&
1028 __l2cap_no_conn_pending(chan
))
1029 l2cap_send_conn_req(chan
);
1031 struct l2cap_info_req req
;
1032 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1034 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1035 conn
->info_ident
= l2cap_get_ident(conn
);
1037 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1039 l2cap_send_cmd(conn
, conn
->info_ident
,
1040 L2CAP_INFO_REQ
, sizeof(req
), &req
);
1044 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1046 u32 local_feat_mask
= l2cap_feat_mask
;
1048 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1051 case L2CAP_MODE_ERTM
:
1052 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1053 case L2CAP_MODE_STREAMING
:
1054 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1060 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
1062 struct sock
*sk
= chan
->sk
;
1063 struct l2cap_disconn_req req
;
1068 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1069 __clear_retrans_timer(chan
);
1070 __clear_monitor_timer(chan
);
1071 __clear_ack_timer(chan
);
1074 req
.dcid
= cpu_to_le16(chan
->dcid
);
1075 req
.scid
= cpu_to_le16(chan
->scid
);
1076 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1077 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1080 __l2cap_state_change(chan
, BT_DISCONN
);
1081 __l2cap_chan_set_err(chan
, err
);
1085 /* ---- L2CAP connections ---- */
1086 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1088 struct l2cap_chan
*chan
, *tmp
;
1090 BT_DBG("conn %p", conn
);
1092 mutex_lock(&conn
->chan_lock
);
1094 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1095 struct sock
*sk
= chan
->sk
;
1097 l2cap_chan_lock(chan
);
1099 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1100 l2cap_chan_unlock(chan
);
1104 if (chan
->state
== BT_CONNECT
) {
1105 if (!l2cap_chan_check_security(chan
) ||
1106 !__l2cap_no_conn_pending(chan
)) {
1107 l2cap_chan_unlock(chan
);
1111 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1112 && test_bit(CONF_STATE2_DEVICE
,
1113 &chan
->conf_state
)) {
1114 l2cap_chan_close(chan
, ECONNRESET
);
1115 l2cap_chan_unlock(chan
);
1119 l2cap_send_conn_req(chan
);
1121 } else if (chan
->state
== BT_CONNECT2
) {
1122 struct l2cap_conn_rsp rsp
;
1124 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1125 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1127 if (l2cap_chan_check_security(chan
)) {
1129 if (test_bit(BT_SK_DEFER_SETUP
,
1130 &bt_sk(sk
)->flags
)) {
1131 struct sock
*parent
= bt_sk(sk
)->parent
;
1132 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1133 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1135 parent
->sk_data_ready(parent
, 0);
1138 __l2cap_state_change(chan
, BT_CONFIG
);
1139 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1140 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1144 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1145 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1148 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1151 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1152 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1153 l2cap_chan_unlock(chan
);
1157 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1158 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1159 l2cap_build_conf_req(chan
, buf
), buf
);
1160 chan
->num_conf_req
++;
1163 l2cap_chan_unlock(chan
);
1166 mutex_unlock(&conn
->chan_lock
);
1169 /* Find socket with cid and source/destination bdaddr.
1170 * Returns closest match, locked.
1172 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1176 struct l2cap_chan
*c
, *c1
= NULL
;
1178 read_lock(&chan_list_lock
);
1180 list_for_each_entry(c
, &chan_list
, global_l
) {
1181 struct sock
*sk
= c
->sk
;
1183 if (state
&& c
->state
!= state
)
1186 if (c
->scid
== cid
) {
1187 int src_match
, dst_match
;
1188 int src_any
, dst_any
;
1191 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1192 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1193 if (src_match
&& dst_match
) {
1194 read_unlock(&chan_list_lock
);
1199 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1200 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1201 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1202 (src_any
&& dst_any
))
1207 read_unlock(&chan_list_lock
);
1212 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1214 struct sock
*parent
, *sk
;
1215 struct l2cap_chan
*chan
, *pchan
;
1219 /* Check if we have socket listening on cid */
1220 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1221 conn
->src
, conn
->dst
);
1229 /* Check for backlog size */
1230 if (sk_acceptq_is_full(parent
)) {
1231 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1235 chan
= pchan
->ops
->new_connection(pchan
->data
);
1241 hci_conn_hold(conn
->hcon
);
1243 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1244 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1246 bt_accept_enqueue(parent
, sk
);
1248 l2cap_chan_add(conn
, chan
);
1250 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1252 __l2cap_state_change(chan
, BT_CONNECTED
);
1253 parent
->sk_data_ready(parent
, 0);
1256 release_sock(parent
);
1259 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1261 struct l2cap_chan
*chan
;
1263 BT_DBG("conn %p", conn
);
1265 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1266 l2cap_le_conn_ready(conn
);
1268 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1269 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1271 mutex_lock(&conn
->chan_lock
);
1273 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1275 l2cap_chan_lock(chan
);
1277 if (conn
->hcon
->type
== LE_LINK
) {
1278 if (smp_conn_security(conn
, chan
->sec_level
))
1279 l2cap_chan_ready(chan
);
1281 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1282 struct sock
*sk
= chan
->sk
;
1283 __clear_chan_timer(chan
);
1285 __l2cap_state_change(chan
, BT_CONNECTED
);
1286 sk
->sk_state_change(sk
);
1289 } else if (chan
->state
== BT_CONNECT
)
1290 l2cap_do_start(chan
);
1292 l2cap_chan_unlock(chan
);
1295 mutex_unlock(&conn
->chan_lock
);
1298 /* Notify sockets that we cannot guaranty reliability anymore */
1299 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1301 struct l2cap_chan
*chan
;
1303 BT_DBG("conn %p", conn
);
1305 mutex_lock(&conn
->chan_lock
);
1307 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1308 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1309 __l2cap_chan_set_err(chan
, err
);
1312 mutex_unlock(&conn
->chan_lock
);
1315 static void l2cap_info_timeout(struct work_struct
*work
)
1317 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1320 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1321 conn
->info_ident
= 0;
1323 l2cap_conn_start(conn
);
1326 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1328 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1329 struct l2cap_chan
*chan
, *l
;
1334 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1336 kfree_skb(conn
->rx_skb
);
1338 mutex_lock(&conn
->chan_lock
);
1341 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1342 l2cap_chan_hold(chan
);
1343 l2cap_chan_lock(chan
);
1345 l2cap_chan_del(chan
, err
);
1347 l2cap_chan_unlock(chan
);
1349 chan
->ops
->close(chan
->data
);
1350 l2cap_chan_put(chan
);
1353 mutex_unlock(&conn
->chan_lock
);
1355 hci_chan_del(conn
->hchan
);
1357 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1358 cancel_delayed_work_sync(&conn
->info_timer
);
1360 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1361 cancel_delayed_work_sync(&conn
->security_timer
);
1362 smp_chan_destroy(conn
);
1365 hcon
->l2cap_data
= NULL
;
1369 static void security_timeout(struct work_struct
*work
)
1371 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1372 security_timer
.work
);
1374 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1377 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1379 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1380 struct hci_chan
*hchan
;
1385 hchan
= hci_chan_create(hcon
);
1389 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1391 hci_chan_del(hchan
);
1395 hcon
->l2cap_data
= conn
;
1397 conn
->hchan
= hchan
;
1399 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1401 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1402 conn
->mtu
= hcon
->hdev
->le_mtu
;
1404 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1406 conn
->src
= &hcon
->hdev
->bdaddr
;
1407 conn
->dst
= &hcon
->dst
;
1409 conn
->feat_mask
= 0;
1411 spin_lock_init(&conn
->lock
);
1412 mutex_init(&conn
->chan_lock
);
1414 INIT_LIST_HEAD(&conn
->chan_l
);
1416 if (hcon
->type
== LE_LINK
)
1417 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1419 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1421 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1426 /* ---- Socket interface ---- */
1428 /* Find socket with psm and source / destination bdaddr.
1429 * Returns closest match.
1431 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1435 struct l2cap_chan
*c
, *c1
= NULL
;
1437 read_lock(&chan_list_lock
);
1439 list_for_each_entry(c
, &chan_list
, global_l
) {
1440 struct sock
*sk
= c
->sk
;
1442 if (state
&& c
->state
!= state
)
1445 if (c
->psm
== psm
) {
1446 int src_match
, dst_match
;
1447 int src_any
, dst_any
;
1450 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1451 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1452 if (src_match
&& dst_match
) {
1453 read_unlock(&chan_list_lock
);
1458 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1459 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1460 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1461 (src_any
&& dst_any
))
1466 read_unlock(&chan_list_lock
);
1471 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1472 bdaddr_t
*dst
, u8 dst_type
)
1474 struct sock
*sk
= chan
->sk
;
1475 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1476 struct l2cap_conn
*conn
;
1477 struct hci_conn
*hcon
;
1478 struct hci_dev
*hdev
;
1482 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1483 dst_type
, __le16_to_cpu(chan
->psm
));
1485 hdev
= hci_get_route(dst
, src
);
1487 return -EHOSTUNREACH
;
1491 l2cap_chan_lock(chan
);
1493 /* PSM must be odd and lsb of upper byte must be 0 */
1494 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1495 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1500 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1505 switch (chan
->mode
) {
1506 case L2CAP_MODE_BASIC
:
1508 case L2CAP_MODE_ERTM
:
1509 case L2CAP_MODE_STREAMING
:
1520 switch (sk
->sk_state
) {
1524 /* Already connecting */
1530 /* Already connected */
1546 /* Set destination address and psm */
1547 bacpy(&bt_sk(sk
)->dst
, dst
);
1554 auth_type
= l2cap_get_auth_type(chan
);
1556 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1557 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1558 chan
->sec_level
, auth_type
);
1560 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1561 chan
->sec_level
, auth_type
);
1564 err
= PTR_ERR(hcon
);
1568 conn
= l2cap_conn_add(hcon
, 0);
1575 if (hcon
->type
== LE_LINK
) {
1578 if (!list_empty(&conn
->chan_l
)) {
1587 /* Update source addr of the socket */
1588 bacpy(src
, conn
->src
);
1590 l2cap_chan_unlock(chan
);
1591 l2cap_chan_add(conn
, chan
);
1592 l2cap_chan_lock(chan
);
1594 l2cap_state_change(chan
, BT_CONNECT
);
1595 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1597 if (hcon
->state
== BT_CONNECTED
) {
1598 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1599 __clear_chan_timer(chan
);
1600 if (l2cap_chan_check_security(chan
))
1601 l2cap_state_change(chan
, BT_CONNECTED
);
1603 l2cap_do_start(chan
);
1609 l2cap_chan_unlock(chan
);
1610 hci_dev_unlock(hdev
);
1615 int __l2cap_wait_ack(struct sock
*sk
)
1617 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1618 DECLARE_WAITQUEUE(wait
, current
);
1622 add_wait_queue(sk_sleep(sk
), &wait
);
1623 set_current_state(TASK_INTERRUPTIBLE
);
1624 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1628 if (signal_pending(current
)) {
1629 err
= sock_intr_errno(timeo
);
1634 timeo
= schedule_timeout(timeo
);
1636 set_current_state(TASK_INTERRUPTIBLE
);
1638 err
= sock_error(sk
);
1642 set_current_state(TASK_RUNNING
);
1643 remove_wait_queue(sk_sleep(sk
), &wait
);
1647 static void l2cap_monitor_timeout(struct work_struct
*work
)
1649 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1650 monitor_timer
.work
);
1652 BT_DBG("chan %p", chan
);
1654 l2cap_chan_lock(chan
);
1657 l2cap_chan_unlock(chan
);
1658 l2cap_chan_put(chan
);
1662 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1664 l2cap_chan_unlock(chan
);
1665 l2cap_chan_put(chan
);
1668 static void l2cap_retrans_timeout(struct work_struct
*work
)
1670 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1671 retrans_timer
.work
);
1673 BT_DBG("chan %p", chan
);
1675 l2cap_chan_lock(chan
);
1678 l2cap_chan_unlock(chan
);
1679 l2cap_chan_put(chan
);
1683 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1684 l2cap_chan_unlock(chan
);
1685 l2cap_chan_put(chan
);
1688 static int l2cap_streaming_send(struct l2cap_chan
*chan
,
1689 struct sk_buff_head
*skbs
)
1691 struct sk_buff
*skb
;
1692 struct l2cap_ctrl
*control
;
1694 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1696 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1698 while (!skb_queue_empty(&chan
->tx_q
)) {
1700 skb
= skb_dequeue(&chan
->tx_q
);
1702 bt_cb(skb
)->control
.retries
= 1;
1703 control
= &bt_cb(skb
)->control
;
1705 control
->reqseq
= 0;
1706 control
->txseq
= chan
->next_tx_seq
;
1708 __pack_control(chan
, control
, skb
);
1710 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1711 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1712 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1715 l2cap_do_send(chan
, skb
);
1717 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1719 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1720 chan
->frames_sent
++;
1726 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1728 struct sk_buff
*skb
, *tx_skb
;
1729 struct l2cap_ctrl
*control
;
1732 BT_DBG("chan %p", chan
);
1734 if (chan
->state
!= BT_CONNECTED
)
1737 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1740 while (chan
->tx_send_head
&&
1741 chan
->unacked_frames
< chan
->remote_tx_win
&&
1742 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1744 skb
= chan
->tx_send_head
;
1746 bt_cb(skb
)->control
.retries
= 1;
1747 control
= &bt_cb(skb
)->control
;
1749 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1752 control
->reqseq
= chan
->buffer_seq
;
1753 chan
->last_acked_seq
= chan
->buffer_seq
;
1754 control
->txseq
= chan
->next_tx_seq
;
1756 __pack_control(chan
, control
, skb
);
1758 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1759 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1760 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1763 /* Clone after data has been modified. Data is assumed to be
1764 read-only (for locking purposes) on cloned sk_buffs.
1766 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1771 __set_retrans_timer(chan
);
1773 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1774 chan
->unacked_frames
++;
1775 chan
->frames_sent
++;
1778 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1779 chan
->tx_send_head
= NULL
;
1781 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1783 l2cap_do_send(chan
, tx_skb
);
1784 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1787 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent
,
1788 (int) chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1793 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1795 struct l2cap_ctrl control
;
1796 struct sk_buff
*skb
;
1797 struct sk_buff
*tx_skb
;
1800 BT_DBG("chan %p", chan
);
1802 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1805 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1806 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1808 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1810 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1815 bt_cb(skb
)->control
.retries
++;
1816 control
= bt_cb(skb
)->control
;
1818 if (chan
->max_tx
!= 0 &&
1819 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1820 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1821 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1822 l2cap_seq_list_clear(&chan
->retrans_list
);
1826 control
.reqseq
= chan
->buffer_seq
;
1827 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1832 if (skb_cloned(skb
)) {
1833 /* Cloned sk_buffs are read-only, so we need a
1836 tx_skb
= skb_copy(skb
, GFP_ATOMIC
);
1838 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1842 l2cap_seq_list_clear(&chan
->retrans_list
);
1846 /* Update skb contents */
1847 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1848 put_unaligned_le32(__pack_extended_control(&control
),
1849 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1851 put_unaligned_le16(__pack_enhanced_control(&control
),
1852 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1855 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1856 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1857 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1861 l2cap_do_send(chan
, tx_skb
);
1863 BT_DBG("Resent txseq %d", control
.txseq
);
1865 chan
->last_acked_seq
= chan
->buffer_seq
;
1869 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1870 struct l2cap_ctrl
*control
)
1872 BT_DBG("chan %p, control %p", chan
, control
);
1874 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1875 l2cap_ertm_resend(chan
);
1878 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1879 struct l2cap_ctrl
*control
)
1881 struct sk_buff
*skb
;
1883 BT_DBG("chan %p, control %p", chan
, control
);
1886 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1888 l2cap_seq_list_clear(&chan
->retrans_list
);
1890 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1893 if (chan
->unacked_frames
) {
1894 skb_queue_walk(&chan
->tx_q
, skb
) {
1895 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1896 skb
== chan
->tx_send_head
)
1900 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1901 if (skb
== chan
->tx_send_head
)
1904 l2cap_seq_list_append(&chan
->retrans_list
,
1905 bt_cb(skb
)->control
.txseq
);
1908 l2cap_ertm_resend(chan
);
1912 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1914 struct l2cap_ctrl control
;
1915 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1916 chan
->last_acked_seq
);
1919 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1920 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1922 memset(&control
, 0, sizeof(control
));
1925 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1926 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1927 __clear_ack_timer(chan
);
1928 control
.super
= L2CAP_SUPER_RNR
;
1929 control
.reqseq
= chan
->buffer_seq
;
1930 l2cap_send_sframe(chan
, &control
);
1932 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1933 l2cap_ertm_send(chan
);
1934 /* If any i-frames were sent, they included an ack */
1935 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1939 /* Ack now if the tx window is 3/4ths full.
1940 * Calculate without mul or div
1942 threshold
= chan
->tx_win
;
1943 threshold
+= threshold
<< 1;
1946 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack
,
1949 if (frames_to_ack
>= threshold
) {
1950 __clear_ack_timer(chan
);
1951 control
.super
= L2CAP_SUPER_RR
;
1952 control
.reqseq
= chan
->buffer_seq
;
1953 l2cap_send_sframe(chan
, &control
);
1958 __set_ack_timer(chan
);
1962 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1963 struct msghdr
*msg
, int len
,
1964 int count
, struct sk_buff
*skb
)
1966 struct l2cap_conn
*conn
= chan
->conn
;
1967 struct sk_buff
**frag
;
1970 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1976 /* Continuation fragments (no L2CAP header) */
1977 frag
= &skb_shinfo(skb
)->frag_list
;
1979 struct sk_buff
*tmp
;
1981 count
= min_t(unsigned int, conn
->mtu
, len
);
1983 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1984 msg
->msg_flags
& MSG_DONTWAIT
);
1986 return PTR_ERR(tmp
);
1990 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1993 (*frag
)->priority
= skb
->priority
;
1998 skb
->len
+= (*frag
)->len
;
1999 skb
->data_len
+= (*frag
)->len
;
2001 frag
= &(*frag
)->next
;
2007 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2008 struct msghdr
*msg
, size_t len
,
2011 struct l2cap_conn
*conn
= chan
->conn
;
2012 struct sk_buff
*skb
;
2013 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2014 struct l2cap_hdr
*lh
;
2016 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
2018 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2020 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2021 msg
->msg_flags
& MSG_DONTWAIT
);
2025 skb
->priority
= priority
;
2027 /* Create L2CAP header */
2028 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2029 lh
->cid
= cpu_to_le16(chan
->dcid
);
2030 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2031 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2033 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2034 if (unlikely(err
< 0)) {
2036 return ERR_PTR(err
);
2041 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2042 struct msghdr
*msg
, size_t len
,
2045 struct l2cap_conn
*conn
= chan
->conn
;
2046 struct sk_buff
*skb
;
2048 struct l2cap_hdr
*lh
;
2050 BT_DBG("chan %p len %d", chan
, (int)len
);
2052 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2054 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2055 msg
->msg_flags
& MSG_DONTWAIT
);
2059 skb
->priority
= priority
;
2061 /* Create L2CAP header */
2062 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2063 lh
->cid
= cpu_to_le16(chan
->dcid
);
2064 lh
->len
= cpu_to_le16(len
);
2066 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2067 if (unlikely(err
< 0)) {
2069 return ERR_PTR(err
);
2074 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2075 struct msghdr
*msg
, size_t len
,
2078 struct l2cap_conn
*conn
= chan
->conn
;
2079 struct sk_buff
*skb
;
2080 int err
, count
, hlen
;
2081 struct l2cap_hdr
*lh
;
2083 BT_DBG("chan %p len %d", chan
, (int)len
);
2086 return ERR_PTR(-ENOTCONN
);
2088 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2089 hlen
= L2CAP_EXT_HDR_SIZE
;
2091 hlen
= L2CAP_ENH_HDR_SIZE
;
2094 hlen
+= L2CAP_SDULEN_SIZE
;
2096 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2097 hlen
+= L2CAP_FCS_SIZE
;
2099 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2101 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2102 msg
->msg_flags
& MSG_DONTWAIT
);
2106 /* Create L2CAP header */
2107 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2108 lh
->cid
= cpu_to_le16(chan
->dcid
);
2109 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2111 /* Control header is populated later */
2112 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2113 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2115 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2118 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2120 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2121 if (unlikely(err
< 0)) {
2123 return ERR_PTR(err
);
2126 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2127 bt_cb(skb
)->control
.retries
= 0;
2131 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2132 struct sk_buff_head
*seg_queue
,
2133 struct msghdr
*msg
, size_t len
)
2135 struct sk_buff
*skb
;
2141 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2143 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2144 * so fragmented skbs are not used. The HCI layer's handling
2145 * of fragmented skbs is not compatible with ERTM's queueing.
2148 /* PDU size is derived from the HCI MTU */
2149 pdu_len
= chan
->conn
->mtu
;
2151 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2153 /* Adjust for largest possible L2CAP overhead. */
2154 pdu_len
-= L2CAP_EXT_HDR_SIZE
+ L2CAP_FCS_SIZE
;
2156 /* Remote device may have requested smaller PDUs */
2157 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2159 if (len
<= pdu_len
) {
2160 sar
= L2CAP_SAR_UNSEGMENTED
;
2164 sar
= L2CAP_SAR_START
;
2166 pdu_len
-= L2CAP_SDULEN_SIZE
;
2170 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2173 __skb_queue_purge(seg_queue
);
2174 return PTR_ERR(skb
);
2177 bt_cb(skb
)->control
.sar
= sar
;
2178 __skb_queue_tail(seg_queue
, skb
);
2183 pdu_len
+= L2CAP_SDULEN_SIZE
;
2186 if (len
<= pdu_len
) {
2187 sar
= L2CAP_SAR_END
;
2190 sar
= L2CAP_SAR_CONTINUE
;
2197 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2200 struct sk_buff
*skb
;
2202 struct sk_buff_head seg_queue
;
2204 /* Connectionless channel */
2205 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2206 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2208 return PTR_ERR(skb
);
2210 l2cap_do_send(chan
, skb
);
2214 switch (chan
->mode
) {
2215 case L2CAP_MODE_BASIC
:
2216 /* Check outgoing MTU */
2217 if (len
> chan
->omtu
)
2220 /* Create a basic PDU */
2221 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2223 return PTR_ERR(skb
);
2225 l2cap_do_send(chan
, skb
);
2229 case L2CAP_MODE_ERTM
:
2230 case L2CAP_MODE_STREAMING
:
2231 /* Check outgoing MTU */
2232 if (len
> chan
->omtu
) {
2237 __skb_queue_head_init(&seg_queue
);
2239 /* Do segmentation before calling in to the state machine,
2240 * since it's possible to block while waiting for memory
2243 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2245 /* The channel could have been closed while segmenting,
2246 * check that it is still connected.
2248 if (chan
->state
!= BT_CONNECTED
) {
2249 __skb_queue_purge(&seg_queue
);
2256 if (chan
->mode
== L2CAP_MODE_ERTM
)
2257 err
= l2cap_tx(chan
, NULL
, &seg_queue
,
2258 L2CAP_EV_DATA_REQUEST
);
2260 err
= l2cap_streaming_send(chan
, &seg_queue
);
2265 /* If the skbs were not queued for sending, they'll still be in
2266 * seg_queue and need to be purged.
2268 __skb_queue_purge(&seg_queue
);
2272 BT_DBG("bad state %1.1x", chan
->mode
);
2279 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2281 struct l2cap_ctrl control
;
2284 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2286 memset(&control
, 0, sizeof(control
));
2288 control
.super
= L2CAP_SUPER_SREJ
;
2290 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2291 seq
= __next_seq(chan
, seq
)) {
2292 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2293 control
.reqseq
= seq
;
2294 l2cap_send_sframe(chan
, &control
);
2295 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2299 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2302 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2304 struct l2cap_ctrl control
;
2306 BT_DBG("chan %p", chan
);
2308 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2311 memset(&control
, 0, sizeof(control
));
2313 control
.super
= L2CAP_SUPER_SREJ
;
2314 control
.reqseq
= chan
->srej_list
.tail
;
2315 l2cap_send_sframe(chan
, &control
);
2318 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2320 struct l2cap_ctrl control
;
2324 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2326 memset(&control
, 0, sizeof(control
));
2328 control
.super
= L2CAP_SUPER_SREJ
;
2330 /* Capture initial list head to allow only one pass through the list. */
2331 initial_head
= chan
->srej_list
.head
;
2334 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2335 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2338 control
.reqseq
= seq
;
2339 l2cap_send_sframe(chan
, &control
);
2340 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2341 } while (chan
->srej_list
.head
!= initial_head
);
2344 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2346 struct sk_buff
*acked_skb
;
2349 BT_DBG("chan %p, reqseq %d", chan
, reqseq
);
2351 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2354 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2355 chan
->expected_ack_seq
, chan
->unacked_frames
);
2357 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2358 ackseq
= __next_seq(chan
, ackseq
)) {
2360 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2362 skb_unlink(acked_skb
, &chan
->tx_q
);
2363 kfree_skb(acked_skb
);
2364 chan
->unacked_frames
--;
2368 chan
->expected_ack_seq
= reqseq
;
2370 if (chan
->unacked_frames
== 0)
2371 __clear_retrans_timer(chan
);
2373 BT_DBG("unacked_frames %d", (int) chan
->unacked_frames
);
2376 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2378 BT_DBG("chan %p", chan
);
2380 chan
->expected_tx_seq
= chan
->buffer_seq
;
2381 l2cap_seq_list_clear(&chan
->srej_list
);
2382 skb_queue_purge(&chan
->srej_q
);
2383 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2386 static int l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2387 struct l2cap_ctrl
*control
,
2388 struct sk_buff_head
*skbs
, u8 event
)
2392 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2396 case L2CAP_EV_DATA_REQUEST
:
2397 if (chan
->tx_send_head
== NULL
)
2398 chan
->tx_send_head
= skb_peek(skbs
);
2400 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2401 l2cap_ertm_send(chan
);
2403 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2404 BT_DBG("Enter LOCAL_BUSY");
2405 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2407 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2408 /* The SREJ_SENT state must be aborted if we are to
2409 * enter the LOCAL_BUSY state.
2411 l2cap_abort_rx_srej_sent(chan
);
2414 l2cap_send_ack(chan
);
2417 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2418 BT_DBG("Exit LOCAL_BUSY");
2419 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2421 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2422 struct l2cap_ctrl local_control
;
2424 memset(&local_control
, 0, sizeof(local_control
));
2425 local_control
.sframe
= 1;
2426 local_control
.super
= L2CAP_SUPER_RR
;
2427 local_control
.poll
= 1;
2428 local_control
.reqseq
= chan
->buffer_seq
;
2429 l2cap_send_sframe(chan
, &local_control
);
2431 chan
->retry_count
= 1;
2432 __set_monitor_timer(chan
);
2433 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2436 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2437 l2cap_process_reqseq(chan
, control
->reqseq
);
2439 case L2CAP_EV_EXPLICIT_POLL
:
2440 l2cap_send_rr_or_rnr(chan
, 1);
2441 chan
->retry_count
= 1;
2442 __set_monitor_timer(chan
);
2443 __clear_ack_timer(chan
);
2444 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2446 case L2CAP_EV_RETRANS_TO
:
2447 l2cap_send_rr_or_rnr(chan
, 1);
2448 chan
->retry_count
= 1;
2449 __set_monitor_timer(chan
);
2450 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2452 case L2CAP_EV_RECV_FBIT
:
2453 /* Nothing to process */
2462 static int l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2463 struct l2cap_ctrl
*control
,
2464 struct sk_buff_head
*skbs
, u8 event
)
2468 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2472 case L2CAP_EV_DATA_REQUEST
:
2473 if (chan
->tx_send_head
== NULL
)
2474 chan
->tx_send_head
= skb_peek(skbs
);
2475 /* Queue data, but don't send. */
2476 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2478 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2479 BT_DBG("Enter LOCAL_BUSY");
2480 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2482 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2483 /* The SREJ_SENT state must be aborted if we are to
2484 * enter the LOCAL_BUSY state.
2486 l2cap_abort_rx_srej_sent(chan
);
2489 l2cap_send_ack(chan
);
2492 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2493 BT_DBG("Exit LOCAL_BUSY");
2494 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2496 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2497 struct l2cap_ctrl local_control
;
2498 memset(&local_control
, 0, sizeof(local_control
));
2499 local_control
.sframe
= 1;
2500 local_control
.super
= L2CAP_SUPER_RR
;
2501 local_control
.poll
= 1;
2502 local_control
.reqseq
= chan
->buffer_seq
;
2503 l2cap_send_sframe(chan
, &local_control
);
2505 chan
->retry_count
= 1;
2506 __set_monitor_timer(chan
);
2507 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2510 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2511 l2cap_process_reqseq(chan
, control
->reqseq
);
2515 case L2CAP_EV_RECV_FBIT
:
2516 if (control
&& control
->final
) {
2517 __clear_monitor_timer(chan
);
2518 if (chan
->unacked_frames
> 0)
2519 __set_retrans_timer(chan
);
2520 chan
->retry_count
= 0;
2521 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2522 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2525 case L2CAP_EV_EXPLICIT_POLL
:
2528 case L2CAP_EV_MONITOR_TO
:
2529 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2530 l2cap_send_rr_or_rnr(chan
, 1);
2531 __set_monitor_timer(chan
);
2532 chan
->retry_count
++;
2534 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2544 static int l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2545 struct sk_buff_head
*skbs
, u8 event
)
2549 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2550 chan
, control
, skbs
, event
, chan
->tx_state
);
2552 switch (chan
->tx_state
) {
2553 case L2CAP_TX_STATE_XMIT
:
2554 err
= l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2556 case L2CAP_TX_STATE_WAIT_F
:
2557 err
= l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2567 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2568 struct l2cap_ctrl
*control
)
2570 BT_DBG("chan %p, control %p", chan
, control
);
2571 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2574 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2575 struct l2cap_ctrl
*control
)
2577 BT_DBG("chan %p, control %p", chan
, control
);
2578 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2581 /* Copy frame to all raw sockets on that connection */
2582 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2584 struct sk_buff
*nskb
;
2585 struct l2cap_chan
*chan
;
2587 BT_DBG("conn %p", conn
);
2589 mutex_lock(&conn
->chan_lock
);
2591 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2592 struct sock
*sk
= chan
->sk
;
2593 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2596 /* Don't send frame to the socket it came from */
2599 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2603 if (chan
->ops
->recv(chan
->data
, nskb
))
2607 mutex_unlock(&conn
->chan_lock
);
2610 /* ---- L2CAP signalling commands ---- */
2611 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2612 u8 code
, u8 ident
, u16 dlen
, void *data
)
2614 struct sk_buff
*skb
, **frag
;
2615 struct l2cap_cmd_hdr
*cmd
;
2616 struct l2cap_hdr
*lh
;
2619 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2620 conn
, code
, ident
, dlen
);
2622 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2623 count
= min_t(unsigned int, conn
->mtu
, len
);
2625 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2629 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2630 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2632 if (conn
->hcon
->type
== LE_LINK
)
2633 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2635 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2637 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2640 cmd
->len
= cpu_to_le16(dlen
);
2643 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2644 memcpy(skb_put(skb
, count
), data
, count
);
2650 /* Continuation fragments (no L2CAP header) */
2651 frag
= &skb_shinfo(skb
)->frag_list
;
2653 count
= min_t(unsigned int, conn
->mtu
, len
);
2655 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2659 memcpy(skb_put(*frag
, count
), data
, count
);
2664 frag
= &(*frag
)->next
;
2674 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2676 struct l2cap_conf_opt
*opt
= *ptr
;
2679 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2687 *val
= *((u8
*) opt
->val
);
2691 *val
= get_unaligned_le16(opt
->val
);
2695 *val
= get_unaligned_le32(opt
->val
);
2699 *val
= (unsigned long) opt
->val
;
2703 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2707 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2709 struct l2cap_conf_opt
*opt
= *ptr
;
2711 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2718 *((u8
*) opt
->val
) = val
;
2722 put_unaligned_le16(val
, opt
->val
);
2726 put_unaligned_le32(val
, opt
->val
);
2730 memcpy(opt
->val
, (void *) val
, len
);
2734 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2737 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2739 struct l2cap_conf_efs efs
;
2741 switch (chan
->mode
) {
2742 case L2CAP_MODE_ERTM
:
2743 efs
.id
= chan
->local_id
;
2744 efs
.stype
= chan
->local_stype
;
2745 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2746 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2747 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2748 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2751 case L2CAP_MODE_STREAMING
:
2753 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2754 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2755 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2764 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2765 (unsigned long) &efs
);
2768 static void l2cap_ack_timeout(struct work_struct
*work
)
2770 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2774 BT_DBG("chan %p", chan
);
2776 l2cap_chan_lock(chan
);
2778 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2779 chan
->last_acked_seq
);
2782 l2cap_send_rr_or_rnr(chan
, 0);
2784 l2cap_chan_unlock(chan
);
2785 l2cap_chan_put(chan
);
2788 static inline int l2cap_ertm_init(struct l2cap_chan
*chan
)
2792 chan
->next_tx_seq
= 0;
2793 chan
->expected_tx_seq
= 0;
2794 chan
->expected_ack_seq
= 0;
2795 chan
->unacked_frames
= 0;
2796 chan
->buffer_seq
= 0;
2797 chan
->frames_sent
= 0;
2798 chan
->last_acked_seq
= 0;
2800 chan
->sdu_last_frag
= NULL
;
2803 skb_queue_head_init(&chan
->tx_q
);
2805 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2808 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2809 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2811 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2812 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2813 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2815 skb_queue_head_init(&chan
->srej_q
);
2817 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2821 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2823 l2cap_seq_list_free(&chan
->srej_list
);
2828 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2831 case L2CAP_MODE_STREAMING
:
2832 case L2CAP_MODE_ERTM
:
2833 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2837 return L2CAP_MODE_BASIC
;
2841 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2843 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2846 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2848 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2851 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2853 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2854 __l2cap_ews_supported(chan
)) {
2855 /* use extended control field */
2856 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2857 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2859 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2860 L2CAP_DEFAULT_TX_WINDOW
);
2861 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2865 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2867 struct l2cap_conf_req
*req
= data
;
2868 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2869 void *ptr
= req
->data
;
2872 BT_DBG("chan %p", chan
);
2874 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2877 switch (chan
->mode
) {
2878 case L2CAP_MODE_STREAMING
:
2879 case L2CAP_MODE_ERTM
:
2880 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2883 if (__l2cap_efs_supported(chan
))
2884 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2888 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2893 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2894 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2896 switch (chan
->mode
) {
2897 case L2CAP_MODE_BASIC
:
2898 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2899 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2902 rfc
.mode
= L2CAP_MODE_BASIC
;
2904 rfc
.max_transmit
= 0;
2905 rfc
.retrans_timeout
= 0;
2906 rfc
.monitor_timeout
= 0;
2907 rfc
.max_pdu_size
= 0;
2909 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2910 (unsigned long) &rfc
);
2913 case L2CAP_MODE_ERTM
:
2914 rfc
.mode
= L2CAP_MODE_ERTM
;
2915 rfc
.max_transmit
= chan
->max_tx
;
2916 rfc
.retrans_timeout
= 0;
2917 rfc
.monitor_timeout
= 0;
2919 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2920 L2CAP_EXT_HDR_SIZE
-
2923 rfc
.max_pdu_size
= cpu_to_le16(size
);
2925 l2cap_txwin_setup(chan
);
2927 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2928 L2CAP_DEFAULT_TX_WINDOW
);
2930 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2931 (unsigned long) &rfc
);
2933 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2934 l2cap_add_opt_efs(&ptr
, chan
);
2936 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2939 if (chan
->fcs
== L2CAP_FCS_NONE
||
2940 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2941 chan
->fcs
= L2CAP_FCS_NONE
;
2942 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2945 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2946 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2950 case L2CAP_MODE_STREAMING
:
2951 l2cap_txwin_setup(chan
);
2952 rfc
.mode
= L2CAP_MODE_STREAMING
;
2954 rfc
.max_transmit
= 0;
2955 rfc
.retrans_timeout
= 0;
2956 rfc
.monitor_timeout
= 0;
2958 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2959 L2CAP_EXT_HDR_SIZE
-
2962 rfc
.max_pdu_size
= cpu_to_le16(size
);
2964 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2965 (unsigned long) &rfc
);
2967 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2968 l2cap_add_opt_efs(&ptr
, chan
);
2970 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2973 if (chan
->fcs
== L2CAP_FCS_NONE
||
2974 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2975 chan
->fcs
= L2CAP_FCS_NONE
;
2976 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2981 req
->dcid
= cpu_to_le16(chan
->dcid
);
2982 req
->flags
= cpu_to_le16(0);
2987 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2989 struct l2cap_conf_rsp
*rsp
= data
;
2990 void *ptr
= rsp
->data
;
2991 void *req
= chan
->conf_req
;
2992 int len
= chan
->conf_len
;
2993 int type
, hint
, olen
;
2995 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2996 struct l2cap_conf_efs efs
;
2998 u16 mtu
= L2CAP_DEFAULT_MTU
;
2999 u16 result
= L2CAP_CONF_SUCCESS
;
3002 BT_DBG("chan %p", chan
);
3004 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3005 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3007 hint
= type
& L2CAP_CONF_HINT
;
3008 type
&= L2CAP_CONF_MASK
;
3011 case L2CAP_CONF_MTU
:
3015 case L2CAP_CONF_FLUSH_TO
:
3016 chan
->flush_to
= val
;
3019 case L2CAP_CONF_QOS
:
3022 case L2CAP_CONF_RFC
:
3023 if (olen
== sizeof(rfc
))
3024 memcpy(&rfc
, (void *) val
, olen
);
3027 case L2CAP_CONF_FCS
:
3028 if (val
== L2CAP_FCS_NONE
)
3029 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3032 case L2CAP_CONF_EFS
:
3034 if (olen
== sizeof(efs
))
3035 memcpy(&efs
, (void *) val
, olen
);
3038 case L2CAP_CONF_EWS
:
3040 return -ECONNREFUSED
;
3042 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3043 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3044 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3045 chan
->remote_tx_win
= val
;
3052 result
= L2CAP_CONF_UNKNOWN
;
3053 *((u8
*) ptr
++) = type
;
3058 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3061 switch (chan
->mode
) {
3062 case L2CAP_MODE_STREAMING
:
3063 case L2CAP_MODE_ERTM
:
3064 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3065 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3066 chan
->conn
->feat_mask
);
3071 if (__l2cap_efs_supported(chan
))
3072 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3074 return -ECONNREFUSED
;
3077 if (chan
->mode
!= rfc
.mode
)
3078 return -ECONNREFUSED
;
3084 if (chan
->mode
!= rfc
.mode
) {
3085 result
= L2CAP_CONF_UNACCEPT
;
3086 rfc
.mode
= chan
->mode
;
3088 if (chan
->num_conf_rsp
== 1)
3089 return -ECONNREFUSED
;
3091 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3092 sizeof(rfc
), (unsigned long) &rfc
);
3095 if (result
== L2CAP_CONF_SUCCESS
) {
3096 /* Configure output options and let the other side know
3097 * which ones we don't like. */
3099 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3100 result
= L2CAP_CONF_UNACCEPT
;
3103 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3105 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3108 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3109 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3110 efs
.stype
!= chan
->local_stype
) {
3112 result
= L2CAP_CONF_UNACCEPT
;
3114 if (chan
->num_conf_req
>= 1)
3115 return -ECONNREFUSED
;
3117 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3119 (unsigned long) &efs
);
3121 /* Send PENDING Conf Rsp */
3122 result
= L2CAP_CONF_PENDING
;
3123 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3128 case L2CAP_MODE_BASIC
:
3129 chan
->fcs
= L2CAP_FCS_NONE
;
3130 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3133 case L2CAP_MODE_ERTM
:
3134 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3135 chan
->remote_tx_win
= rfc
.txwin_size
;
3137 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3139 chan
->remote_max_tx
= rfc
.max_transmit
;
3141 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3143 L2CAP_EXT_HDR_SIZE
-
3146 rfc
.max_pdu_size
= cpu_to_le16(size
);
3147 chan
->remote_mps
= size
;
3149 rfc
.retrans_timeout
=
3150 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3151 rfc
.monitor_timeout
=
3152 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3154 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3156 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3157 sizeof(rfc
), (unsigned long) &rfc
);
3159 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3160 chan
->remote_id
= efs
.id
;
3161 chan
->remote_stype
= efs
.stype
;
3162 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3163 chan
->remote_flush_to
=
3164 le32_to_cpu(efs
.flush_to
);
3165 chan
->remote_acc_lat
=
3166 le32_to_cpu(efs
.acc_lat
);
3167 chan
->remote_sdu_itime
=
3168 le32_to_cpu(efs
.sdu_itime
);
3169 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3170 sizeof(efs
), (unsigned long) &efs
);
3174 case L2CAP_MODE_STREAMING
:
3175 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3177 L2CAP_EXT_HDR_SIZE
-
3180 rfc
.max_pdu_size
= cpu_to_le16(size
);
3181 chan
->remote_mps
= size
;
3183 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3185 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3186 sizeof(rfc
), (unsigned long) &rfc
);
3191 result
= L2CAP_CONF_UNACCEPT
;
3193 memset(&rfc
, 0, sizeof(rfc
));
3194 rfc
.mode
= chan
->mode
;
3197 if (result
== L2CAP_CONF_SUCCESS
)
3198 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3200 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3201 rsp
->result
= cpu_to_le16(result
);
3202 rsp
->flags
= cpu_to_le16(0x0000);
3207 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3209 struct l2cap_conf_req
*req
= data
;
3210 void *ptr
= req
->data
;
3213 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3214 struct l2cap_conf_efs efs
;
3216 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3218 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3219 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3222 case L2CAP_CONF_MTU
:
3223 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3224 *result
= L2CAP_CONF_UNACCEPT
;
3225 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3228 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3231 case L2CAP_CONF_FLUSH_TO
:
3232 chan
->flush_to
= val
;
3233 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3237 case L2CAP_CONF_RFC
:
3238 if (olen
== sizeof(rfc
))
3239 memcpy(&rfc
, (void *)val
, olen
);
3241 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3242 rfc
.mode
!= chan
->mode
)
3243 return -ECONNREFUSED
;
3247 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3248 sizeof(rfc
), (unsigned long) &rfc
);
3251 case L2CAP_CONF_EWS
:
3252 chan
->tx_win
= min_t(u16
, val
,
3253 L2CAP_DEFAULT_EXT_WINDOW
);
3254 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3258 case L2CAP_CONF_EFS
:
3259 if (olen
== sizeof(efs
))
3260 memcpy(&efs
, (void *)val
, olen
);
3262 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3263 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3264 efs
.stype
!= chan
->local_stype
)
3265 return -ECONNREFUSED
;
3267 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3268 sizeof(efs
), (unsigned long) &efs
);
3273 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3274 return -ECONNREFUSED
;
3276 chan
->mode
= rfc
.mode
;
3278 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3280 case L2CAP_MODE_ERTM
:
3281 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3282 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3283 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3285 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3286 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3287 chan
->local_sdu_itime
=
3288 le32_to_cpu(efs
.sdu_itime
);
3289 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3290 chan
->local_flush_to
=
3291 le32_to_cpu(efs
.flush_to
);
3295 case L2CAP_MODE_STREAMING
:
3296 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3300 req
->dcid
= cpu_to_le16(chan
->dcid
);
3301 req
->flags
= cpu_to_le16(0x0000);
3306 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3308 struct l2cap_conf_rsp
*rsp
= data
;
3309 void *ptr
= rsp
->data
;
3311 BT_DBG("chan %p", chan
);
3313 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3314 rsp
->result
= cpu_to_le16(result
);
3315 rsp
->flags
= cpu_to_le16(flags
);
3320 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3322 struct l2cap_conn_rsp rsp
;
3323 struct l2cap_conn
*conn
= chan
->conn
;
3326 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3327 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3328 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
3329 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
3330 l2cap_send_cmd(conn
, chan
->ident
,
3331 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3333 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3336 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3337 l2cap_build_conf_req(chan
, buf
), buf
);
3338 chan
->num_conf_req
++;
3341 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3345 struct l2cap_conf_rfc rfc
;
3347 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3349 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3352 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3353 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3356 case L2CAP_CONF_RFC
:
3357 if (olen
== sizeof(rfc
))
3358 memcpy(&rfc
, (void *)val
, olen
);
3363 /* Use sane default values in case a misbehaving remote device
3364 * did not send an RFC option.
3366 rfc
.mode
= chan
->mode
;
3367 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3368 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3369 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
3371 BT_ERR("Expected RFC option was not found, using defaults");
3375 case L2CAP_MODE_ERTM
:
3376 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3377 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3378 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3380 case L2CAP_MODE_STREAMING
:
3381 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3385 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3387 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3389 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3392 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3393 cmd
->ident
== conn
->info_ident
) {
3394 cancel_delayed_work(&conn
->info_timer
);
3396 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3397 conn
->info_ident
= 0;
3399 l2cap_conn_start(conn
);
3405 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3407 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3408 struct l2cap_conn_rsp rsp
;
3409 struct l2cap_chan
*chan
= NULL
, *pchan
;
3410 struct sock
*parent
, *sk
= NULL
;
3411 int result
, status
= L2CAP_CS_NO_INFO
;
3413 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3414 __le16 psm
= req
->psm
;
3416 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3418 /* Check if we have socket listening on psm */
3419 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3421 result
= L2CAP_CR_BAD_PSM
;
3427 mutex_lock(&conn
->chan_lock
);
3430 /* Check if the ACL is secure enough (if not SDP) */
3431 if (psm
!= cpu_to_le16(0x0001) &&
3432 !hci_conn_check_link_mode(conn
->hcon
)) {
3433 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3434 result
= L2CAP_CR_SEC_BLOCK
;
3438 result
= L2CAP_CR_NO_MEM
;
3440 /* Check for backlog size */
3441 if (sk_acceptq_is_full(parent
)) {
3442 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
3446 chan
= pchan
->ops
->new_connection(pchan
->data
);
3452 /* Check if we already have channel with that dcid */
3453 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
3454 sock_set_flag(sk
, SOCK_ZAPPED
);
3455 chan
->ops
->close(chan
->data
);
3459 hci_conn_hold(conn
->hcon
);
3461 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3462 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3466 bt_accept_enqueue(parent
, sk
);
3468 __l2cap_chan_add(conn
, chan
);
3472 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3474 chan
->ident
= cmd
->ident
;
3476 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3477 if (l2cap_chan_check_security(chan
)) {
3478 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3479 __l2cap_state_change(chan
, BT_CONNECT2
);
3480 result
= L2CAP_CR_PEND
;
3481 status
= L2CAP_CS_AUTHOR_PEND
;
3482 parent
->sk_data_ready(parent
, 0);
3484 __l2cap_state_change(chan
, BT_CONFIG
);
3485 result
= L2CAP_CR_SUCCESS
;
3486 status
= L2CAP_CS_NO_INFO
;
3489 __l2cap_state_change(chan
, BT_CONNECT2
);
3490 result
= L2CAP_CR_PEND
;
3491 status
= L2CAP_CS_AUTHEN_PEND
;
3494 __l2cap_state_change(chan
, BT_CONNECT2
);
3495 result
= L2CAP_CR_PEND
;
3496 status
= L2CAP_CS_NO_INFO
;
3500 release_sock(parent
);
3501 mutex_unlock(&conn
->chan_lock
);
3504 rsp
.scid
= cpu_to_le16(scid
);
3505 rsp
.dcid
= cpu_to_le16(dcid
);
3506 rsp
.result
= cpu_to_le16(result
);
3507 rsp
.status
= cpu_to_le16(status
);
3508 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3510 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3511 struct l2cap_info_req info
;
3512 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3514 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3515 conn
->info_ident
= l2cap_get_ident(conn
);
3517 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3519 l2cap_send_cmd(conn
, conn
->info_ident
,
3520 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3523 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3524 result
== L2CAP_CR_SUCCESS
) {
3526 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3527 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3528 l2cap_build_conf_req(chan
, buf
), buf
);
3529 chan
->num_conf_req
++;
3535 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3537 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3538 u16 scid
, dcid
, result
, status
;
3539 struct l2cap_chan
*chan
;
3543 scid
= __le16_to_cpu(rsp
->scid
);
3544 dcid
= __le16_to_cpu(rsp
->dcid
);
3545 result
= __le16_to_cpu(rsp
->result
);
3546 status
= __le16_to_cpu(rsp
->status
);
3548 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3549 dcid
, scid
, result
, status
);
3551 mutex_lock(&conn
->chan_lock
);
3554 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3560 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3569 l2cap_chan_lock(chan
);
3572 case L2CAP_CR_SUCCESS
:
3573 l2cap_state_change(chan
, BT_CONFIG
);
3576 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3578 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3581 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3582 l2cap_build_conf_req(chan
, req
), req
);
3583 chan
->num_conf_req
++;
3587 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3591 l2cap_chan_del(chan
, ECONNREFUSED
);
3595 l2cap_chan_unlock(chan
);
3598 mutex_unlock(&conn
->chan_lock
);
3603 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3605 /* FCS is enabled only in ERTM or streaming mode, if one or both
3608 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3609 chan
->fcs
= L2CAP_FCS_NONE
;
3610 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3611 chan
->fcs
= L2CAP_FCS_CRC16
;
3614 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3616 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3619 struct l2cap_chan
*chan
;
3622 dcid
= __le16_to_cpu(req
->dcid
);
3623 flags
= __le16_to_cpu(req
->flags
);
3625 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3627 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3631 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3632 struct l2cap_cmd_rej_cid rej
;
3634 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3635 rej
.scid
= cpu_to_le16(chan
->scid
);
3636 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3638 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3643 /* Reject if config buffer is too small. */
3644 len
= cmd_len
- sizeof(*req
);
3645 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3646 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3647 l2cap_build_conf_rsp(chan
, rsp
,
3648 L2CAP_CONF_REJECT
, flags
), rsp
);
3653 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3654 chan
->conf_len
+= len
;
3656 if (flags
& 0x0001) {
3657 /* Incomplete config. Send empty response. */
3658 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3659 l2cap_build_conf_rsp(chan
, rsp
,
3660 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3664 /* Complete config. */
3665 len
= l2cap_parse_conf_req(chan
, rsp
);
3667 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3671 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3672 chan
->num_conf_rsp
++;
3674 /* Reset config buffer. */
3677 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3680 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3681 set_default_fcs(chan
);
3683 l2cap_state_change(chan
, BT_CONNECTED
);
3685 if (chan
->mode
== L2CAP_MODE_ERTM
||
3686 chan
->mode
== L2CAP_MODE_STREAMING
)
3687 err
= l2cap_ertm_init(chan
);
3690 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3692 l2cap_chan_ready(chan
);
3697 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3699 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3700 l2cap_build_conf_req(chan
, buf
), buf
);
3701 chan
->num_conf_req
++;
3704 /* Got Conf Rsp PENDING from remote side and asume we sent
3705 Conf Rsp PENDING in the code above */
3706 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3707 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3709 /* check compatibility */
3711 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3712 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3714 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3715 l2cap_build_conf_rsp(chan
, rsp
,
3716 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
3720 l2cap_chan_unlock(chan
);
3724 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3726 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3727 u16 scid
, flags
, result
;
3728 struct l2cap_chan
*chan
;
3729 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3732 scid
= __le16_to_cpu(rsp
->scid
);
3733 flags
= __le16_to_cpu(rsp
->flags
);
3734 result
= __le16_to_cpu(rsp
->result
);
3736 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3739 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3744 case L2CAP_CONF_SUCCESS
:
3745 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3746 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3749 case L2CAP_CONF_PENDING
:
3750 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3752 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3755 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3758 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3762 /* check compatibility */
3764 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3765 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3767 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3768 l2cap_build_conf_rsp(chan
, buf
,
3769 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3773 case L2CAP_CONF_UNACCEPT
:
3774 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3777 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3778 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3782 /* throw out any old stored conf requests */
3783 result
= L2CAP_CONF_SUCCESS
;
3784 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3787 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3791 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3792 L2CAP_CONF_REQ
, len
, req
);
3793 chan
->num_conf_req
++;
3794 if (result
!= L2CAP_CONF_SUCCESS
)
3800 l2cap_chan_set_err(chan
, ECONNRESET
);
3802 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3803 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3810 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3812 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3813 set_default_fcs(chan
);
3815 l2cap_state_change(chan
, BT_CONNECTED
);
3816 if (chan
->mode
== L2CAP_MODE_ERTM
||
3817 chan
->mode
== L2CAP_MODE_STREAMING
)
3818 err
= l2cap_ertm_init(chan
);
3821 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3823 l2cap_chan_ready(chan
);
3827 l2cap_chan_unlock(chan
);
3831 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3833 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3834 struct l2cap_disconn_rsp rsp
;
3836 struct l2cap_chan
*chan
;
3839 scid
= __le16_to_cpu(req
->scid
);
3840 dcid
= __le16_to_cpu(req
->dcid
);
3842 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3844 mutex_lock(&conn
->chan_lock
);
3846 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3848 mutex_unlock(&conn
->chan_lock
);
3852 l2cap_chan_lock(chan
);
3856 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3857 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3858 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3861 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3864 l2cap_chan_hold(chan
);
3865 l2cap_chan_del(chan
, ECONNRESET
);
3867 l2cap_chan_unlock(chan
);
3869 chan
->ops
->close(chan
->data
);
3870 l2cap_chan_put(chan
);
3872 mutex_unlock(&conn
->chan_lock
);
3877 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3879 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3881 struct l2cap_chan
*chan
;
3883 scid
= __le16_to_cpu(rsp
->scid
);
3884 dcid
= __le16_to_cpu(rsp
->dcid
);
3886 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3888 mutex_lock(&conn
->chan_lock
);
3890 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3892 mutex_unlock(&conn
->chan_lock
);
3896 l2cap_chan_lock(chan
);
3898 l2cap_chan_hold(chan
);
3899 l2cap_chan_del(chan
, 0);
3901 l2cap_chan_unlock(chan
);
3903 chan
->ops
->close(chan
->data
);
3904 l2cap_chan_put(chan
);
3906 mutex_unlock(&conn
->chan_lock
);
3911 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3913 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3916 type
= __le16_to_cpu(req
->type
);
3918 BT_DBG("type 0x%4.4x", type
);
3920 if (type
== L2CAP_IT_FEAT_MASK
) {
3922 u32 feat_mask
= l2cap_feat_mask
;
3923 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3924 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3925 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3927 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3930 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3931 | L2CAP_FEAT_EXT_WINDOW
;
3933 put_unaligned_le32(feat_mask
, rsp
->data
);
3934 l2cap_send_cmd(conn
, cmd
->ident
,
3935 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3936 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3938 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3941 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3943 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3945 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3946 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3947 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3948 l2cap_send_cmd(conn
, cmd
->ident
,
3949 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3951 struct l2cap_info_rsp rsp
;
3952 rsp
.type
= cpu_to_le16(type
);
3953 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3954 l2cap_send_cmd(conn
, cmd
->ident
,
3955 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3961 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3963 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3966 type
= __le16_to_cpu(rsp
->type
);
3967 result
= __le16_to_cpu(rsp
->result
);
3969 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3971 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3972 if (cmd
->ident
!= conn
->info_ident
||
3973 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3976 cancel_delayed_work(&conn
->info_timer
);
3978 if (result
!= L2CAP_IR_SUCCESS
) {
3979 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3980 conn
->info_ident
= 0;
3982 l2cap_conn_start(conn
);
3988 case L2CAP_IT_FEAT_MASK
:
3989 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3991 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3992 struct l2cap_info_req req
;
3993 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3995 conn
->info_ident
= l2cap_get_ident(conn
);
3997 l2cap_send_cmd(conn
, conn
->info_ident
,
3998 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4000 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4001 conn
->info_ident
= 0;
4003 l2cap_conn_start(conn
);
4007 case L2CAP_IT_FIXED_CHAN
:
4008 conn
->fixed_chan_mask
= rsp
->data
[0];
4009 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4010 conn
->info_ident
= 0;
4012 l2cap_conn_start(conn
);
4019 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4020 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4023 struct l2cap_create_chan_req
*req
= data
;
4024 struct l2cap_create_chan_rsp rsp
;
4027 if (cmd_len
!= sizeof(*req
))
4033 psm
= le16_to_cpu(req
->psm
);
4034 scid
= le16_to_cpu(req
->scid
);
4036 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
4038 /* Placeholder: Always reject */
4040 rsp
.scid
= cpu_to_le16(scid
);
4041 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4042 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4044 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4050 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
4051 struct l2cap_cmd_hdr
*cmd
, void *data
)
4053 BT_DBG("conn %p", conn
);
4055 return l2cap_connect_rsp(conn
, cmd
, data
);
4058 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
4059 u16 icid
, u16 result
)
4061 struct l2cap_move_chan_rsp rsp
;
4063 BT_DBG("icid %d, result %d", icid
, result
);
4065 rsp
.icid
= cpu_to_le16(icid
);
4066 rsp
.result
= cpu_to_le16(result
);
4068 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
4071 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
4072 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
4074 struct l2cap_move_chan_cfm cfm
;
4077 BT_DBG("icid %d, result %d", icid
, result
);
4079 ident
= l2cap_get_ident(conn
);
4081 chan
->ident
= ident
;
4083 cfm
.icid
= cpu_to_le16(icid
);
4084 cfm
.result
= cpu_to_le16(result
);
4086 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4089 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4092 struct l2cap_move_chan_cfm_rsp rsp
;
4094 BT_DBG("icid %d", icid
);
4096 rsp
.icid
= cpu_to_le16(icid
);
4097 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4100 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4101 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4103 struct l2cap_move_chan_req
*req
= data
;
4105 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4107 if (cmd_len
!= sizeof(*req
))
4110 icid
= le16_to_cpu(req
->icid
);
4112 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
4117 /* Placeholder: Always refuse */
4118 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4123 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4124 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4126 struct l2cap_move_chan_rsp
*rsp
= data
;
4129 if (cmd_len
!= sizeof(*rsp
))
4132 icid
= le16_to_cpu(rsp
->icid
);
4133 result
= le16_to_cpu(rsp
->result
);
4135 BT_DBG("icid %d, result %d", icid
, result
);
4137 /* Placeholder: Always unconfirmed */
4138 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4143 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4144 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4146 struct l2cap_move_chan_cfm
*cfm
= data
;
4149 if (cmd_len
!= sizeof(*cfm
))
4152 icid
= le16_to_cpu(cfm
->icid
);
4153 result
= le16_to_cpu(cfm
->result
);
4155 BT_DBG("icid %d, result %d", icid
, result
);
4157 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4162 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4163 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4165 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4168 if (cmd_len
!= sizeof(*rsp
))
4171 icid
= le16_to_cpu(rsp
->icid
);
4173 BT_DBG("icid %d", icid
);
4178 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4183 if (min
> max
|| min
< 6 || max
> 3200)
4186 if (to_multiplier
< 10 || to_multiplier
> 3200)
4189 if (max
>= to_multiplier
* 8)
4192 max_latency
= (to_multiplier
* 8 / max
) - 1;
4193 if (latency
> 499 || latency
> max_latency
)
4199 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4200 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4202 struct hci_conn
*hcon
= conn
->hcon
;
4203 struct l2cap_conn_param_update_req
*req
;
4204 struct l2cap_conn_param_update_rsp rsp
;
4205 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4208 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4211 cmd_len
= __le16_to_cpu(cmd
->len
);
4212 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4215 req
= (struct l2cap_conn_param_update_req
*) data
;
4216 min
= __le16_to_cpu(req
->min
);
4217 max
= __le16_to_cpu(req
->max
);
4218 latency
= __le16_to_cpu(req
->latency
);
4219 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4221 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4222 min
, max
, latency
, to_multiplier
);
4224 memset(&rsp
, 0, sizeof(rsp
));
4226 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4228 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4230 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4232 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4236 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4241 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4242 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4246 switch (cmd
->code
) {
4247 case L2CAP_COMMAND_REJ
:
4248 l2cap_command_rej(conn
, cmd
, data
);
4251 case L2CAP_CONN_REQ
:
4252 err
= l2cap_connect_req(conn
, cmd
, data
);
4255 case L2CAP_CONN_RSP
:
4256 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4259 case L2CAP_CONF_REQ
:
4260 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4263 case L2CAP_CONF_RSP
:
4264 err
= l2cap_config_rsp(conn
, cmd
, data
);
4267 case L2CAP_DISCONN_REQ
:
4268 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4271 case L2CAP_DISCONN_RSP
:
4272 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4275 case L2CAP_ECHO_REQ
:
4276 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4279 case L2CAP_ECHO_RSP
:
4282 case L2CAP_INFO_REQ
:
4283 err
= l2cap_information_req(conn
, cmd
, data
);
4286 case L2CAP_INFO_RSP
:
4287 err
= l2cap_information_rsp(conn
, cmd
, data
);
4290 case L2CAP_CREATE_CHAN_REQ
:
4291 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4294 case L2CAP_CREATE_CHAN_RSP
:
4295 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4298 case L2CAP_MOVE_CHAN_REQ
:
4299 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4302 case L2CAP_MOVE_CHAN_RSP
:
4303 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4306 case L2CAP_MOVE_CHAN_CFM
:
4307 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4310 case L2CAP_MOVE_CHAN_CFM_RSP
:
4311 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4315 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4323 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4324 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4326 switch (cmd
->code
) {
4327 case L2CAP_COMMAND_REJ
:
4330 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4331 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4333 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4337 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4342 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4343 struct sk_buff
*skb
)
4345 u8
*data
= skb
->data
;
4347 struct l2cap_cmd_hdr cmd
;
4350 l2cap_raw_recv(conn
, skb
);
4352 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4354 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4355 data
+= L2CAP_CMD_HDR_SIZE
;
4356 len
-= L2CAP_CMD_HDR_SIZE
;
4358 cmd_len
= le16_to_cpu(cmd
.len
);
4360 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4362 if (cmd_len
> len
|| !cmd
.ident
) {
4363 BT_DBG("corrupted command");
4367 if (conn
->hcon
->type
== LE_LINK
)
4368 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4370 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4373 struct l2cap_cmd_rej_unk rej
;
4375 BT_ERR("Wrong link type (%d)", err
);
4377 /* FIXME: Map err to a valid reason */
4378 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4379 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4389 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4391 u16 our_fcs
, rcv_fcs
;
4394 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4395 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4397 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4399 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4400 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4401 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4402 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4404 if (our_fcs
!= rcv_fcs
)
4410 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4412 struct l2cap_ctrl control
;
4414 BT_DBG("chan %p", chan
);
4416 memset(&control
, 0, sizeof(control
));
4419 control
.reqseq
= chan
->buffer_seq
;
4420 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4422 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4423 control
.super
= L2CAP_SUPER_RNR
;
4424 l2cap_send_sframe(chan
, &control
);
4427 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4428 chan
->unacked_frames
> 0)
4429 __set_retrans_timer(chan
);
4431 /* Send pending iframes */
4432 l2cap_ertm_send(chan
);
4434 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4435 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4436 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4439 control
.super
= L2CAP_SUPER_RR
;
4440 l2cap_send_sframe(chan
, &control
);
4444 static void append_skb_frag(struct sk_buff
*skb
,
4445 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4447 /* skb->len reflects data in skb as well as all fragments
4448 * skb->data_len reflects only data in fragments
4450 if (!skb_has_frag_list(skb
))
4451 skb_shinfo(skb
)->frag_list
= new_frag
;
4453 new_frag
->next
= NULL
;
4455 (*last_frag
)->next
= new_frag
;
4456 *last_frag
= new_frag
;
4458 skb
->len
+= new_frag
->len
;
4459 skb
->data_len
+= new_frag
->len
;
4460 skb
->truesize
+= new_frag
->truesize
;
4463 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4464 struct l2cap_ctrl
*control
)
4468 switch (control
->sar
) {
4469 case L2CAP_SAR_UNSEGMENTED
:
4473 err
= chan
->ops
->recv(chan
->data
, skb
);
4476 case L2CAP_SAR_START
:
4480 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4481 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4483 if (chan
->sdu_len
> chan
->imtu
) {
4488 if (skb
->len
>= chan
->sdu_len
)
4492 chan
->sdu_last_frag
= skb
;
4498 case L2CAP_SAR_CONTINUE
:
4502 append_skb_frag(chan
->sdu
, skb
,
4503 &chan
->sdu_last_frag
);
4506 if (chan
->sdu
->len
>= chan
->sdu_len
)
4516 append_skb_frag(chan
->sdu
, skb
,
4517 &chan
->sdu_last_frag
);
4520 if (chan
->sdu
->len
!= chan
->sdu_len
)
4523 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
4526 /* Reassembly complete */
4528 chan
->sdu_last_frag
= NULL
;
4536 kfree_skb(chan
->sdu
);
4538 chan
->sdu_last_frag
= NULL
;
4545 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4549 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4552 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4553 l2cap_tx(chan
, NULL
, NULL
, event
);
4556 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4559 /* Pass sequential frames to l2cap_reassemble_sdu()
4560 * until a gap is encountered.
4563 BT_DBG("chan %p", chan
);
4565 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4566 struct sk_buff
*skb
;
4567 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4568 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4570 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4575 skb_unlink(skb
, &chan
->srej_q
);
4576 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4577 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4582 if (skb_queue_empty(&chan
->srej_q
)) {
4583 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4584 l2cap_send_ack(chan
);
4590 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4591 struct l2cap_ctrl
*control
)
4593 struct sk_buff
*skb
;
4595 BT_DBG("chan %p, control %p", chan
, control
);
4597 if (control
->reqseq
== chan
->next_tx_seq
) {
4598 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4599 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4603 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4606 BT_DBG("Seq %d not available for retransmission",
4611 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4612 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4613 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4617 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4619 if (control
->poll
) {
4620 l2cap_pass_to_tx(chan
, control
);
4622 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4623 l2cap_retransmit(chan
, control
);
4624 l2cap_ertm_send(chan
);
4626 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4627 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4628 chan
->srej_save_reqseq
= control
->reqseq
;
4631 l2cap_pass_to_tx_fbit(chan
, control
);
4633 if (control
->final
) {
4634 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4635 !test_and_clear_bit(CONN_SREJ_ACT
,
4637 l2cap_retransmit(chan
, control
);
4639 l2cap_retransmit(chan
, control
);
4640 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4641 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4642 chan
->srej_save_reqseq
= control
->reqseq
;
4648 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4649 struct l2cap_ctrl
*control
)
4651 struct sk_buff
*skb
;
4653 BT_DBG("chan %p, control %p", chan
, control
);
4655 if (control
->reqseq
== chan
->next_tx_seq
) {
4656 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4657 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4661 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4663 if (chan
->max_tx
&& skb
&&
4664 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4665 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4666 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4670 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4672 l2cap_pass_to_tx(chan
, control
);
4674 if (control
->final
) {
4675 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4676 l2cap_retransmit_all(chan
, control
);
4678 l2cap_retransmit_all(chan
, control
);
4679 l2cap_ertm_send(chan
);
4680 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4681 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4685 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4687 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4689 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4690 chan
->expected_tx_seq
);
4692 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4693 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4695 /* See notes below regarding "double poll" and
4698 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4699 BT_DBG("Invalid/Ignore - after SREJ");
4700 return L2CAP_TXSEQ_INVALID_IGNORE
;
4702 BT_DBG("Invalid - in window after SREJ sent");
4703 return L2CAP_TXSEQ_INVALID
;
4707 if (chan
->srej_list
.head
== txseq
) {
4708 BT_DBG("Expected SREJ");
4709 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4712 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4713 BT_DBG("Duplicate SREJ - txseq already stored");
4714 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4717 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4718 BT_DBG("Unexpected SREJ - not requested");
4719 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4723 if (chan
->expected_tx_seq
== txseq
) {
4724 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4726 BT_DBG("Invalid - txseq outside tx window");
4727 return L2CAP_TXSEQ_INVALID
;
4730 return L2CAP_TXSEQ_EXPECTED
;
4734 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4735 __seq_offset(chan
, chan
->expected_tx_seq
,
4736 chan
->last_acked_seq
)){
4737 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4738 return L2CAP_TXSEQ_DUPLICATE
;
4741 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4742 /* A source of invalid packets is a "double poll" condition,
4743 * where delays cause us to send multiple poll packets. If
4744 * the remote stack receives and processes both polls,
4745 * sequence numbers can wrap around in such a way that a
4746 * resent frame has a sequence number that looks like new data
4747 * with a sequence gap. This would trigger an erroneous SREJ
4750 * Fortunately, this is impossible with a tx window that's
4751 * less than half of the maximum sequence number, which allows
4752 * invalid frames to be safely ignored.
4754 * With tx window sizes greater than half of the tx window
4755 * maximum, the frame is invalid and cannot be ignored. This
4756 * causes a disconnect.
4759 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4760 BT_DBG("Invalid/Ignore - txseq outside tx window");
4761 return L2CAP_TXSEQ_INVALID_IGNORE
;
4763 BT_DBG("Invalid - txseq outside tx window");
4764 return L2CAP_TXSEQ_INVALID
;
4767 BT_DBG("Unexpected - txseq indicates missing frames");
4768 return L2CAP_TXSEQ_UNEXPECTED
;
4772 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4773 struct l2cap_ctrl
*control
,
4774 struct sk_buff
*skb
, u8 event
)
4777 bool skb_in_use
= 0;
4779 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4783 case L2CAP_EV_RECV_IFRAME
:
4784 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4785 case L2CAP_TXSEQ_EXPECTED
:
4786 l2cap_pass_to_tx(chan
, control
);
4788 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4789 BT_DBG("Busy, discarding expected seq %d",
4794 chan
->expected_tx_seq
= __next_seq(chan
,
4797 chan
->buffer_seq
= chan
->expected_tx_seq
;
4800 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4804 if (control
->final
) {
4805 if (!test_and_clear_bit(CONN_REJ_ACT
,
4806 &chan
->conn_state
)) {
4808 l2cap_retransmit_all(chan
, control
);
4809 l2cap_ertm_send(chan
);
4813 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4814 l2cap_send_ack(chan
);
4816 case L2CAP_TXSEQ_UNEXPECTED
:
4817 l2cap_pass_to_tx(chan
, control
);
4819 /* Can't issue SREJ frames in the local busy state.
4820 * Drop this frame, it will be seen as missing
4821 * when local busy is exited.
4823 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4824 BT_DBG("Busy, discarding unexpected seq %d",
4829 /* There was a gap in the sequence, so an SREJ
4830 * must be sent for each missing frame. The
4831 * current frame is stored for later use.
4833 skb_queue_tail(&chan
->srej_q
, skb
);
4835 BT_DBG("Queued %p (queue len %d)", skb
,
4836 skb_queue_len(&chan
->srej_q
));
4838 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4839 l2cap_seq_list_clear(&chan
->srej_list
);
4840 l2cap_send_srej(chan
, control
->txseq
);
4842 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4844 case L2CAP_TXSEQ_DUPLICATE
:
4845 l2cap_pass_to_tx(chan
, control
);
4847 case L2CAP_TXSEQ_INVALID_IGNORE
:
4849 case L2CAP_TXSEQ_INVALID
:
4851 l2cap_send_disconn_req(chan
->conn
, chan
,
4856 case L2CAP_EV_RECV_RR
:
4857 l2cap_pass_to_tx(chan
, control
);
4858 if (control
->final
) {
4859 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4861 if (!test_and_clear_bit(CONN_REJ_ACT
,
4862 &chan
->conn_state
)) {
4864 l2cap_retransmit_all(chan
, control
);
4867 l2cap_ertm_send(chan
);
4868 } else if (control
->poll
) {
4869 l2cap_send_i_or_rr_or_rnr(chan
);
4871 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4872 &chan
->conn_state
) &&
4873 chan
->unacked_frames
)
4874 __set_retrans_timer(chan
);
4876 l2cap_ertm_send(chan
);
4879 case L2CAP_EV_RECV_RNR
:
4880 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4881 l2cap_pass_to_tx(chan
, control
);
4882 if (control
&& control
->poll
) {
4883 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4884 l2cap_send_rr_or_rnr(chan
, 0);
4886 __clear_retrans_timer(chan
);
4887 l2cap_seq_list_clear(&chan
->retrans_list
);
4889 case L2CAP_EV_RECV_REJ
:
4890 l2cap_handle_rej(chan
, control
);
4892 case L2CAP_EV_RECV_SREJ
:
4893 l2cap_handle_srej(chan
, control
);
4899 if (skb
&& !skb_in_use
) {
4900 BT_DBG("Freeing %p", skb
);
4907 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4908 struct l2cap_ctrl
*control
,
4909 struct sk_buff
*skb
, u8 event
)
4912 u16 txseq
= control
->txseq
;
4913 bool skb_in_use
= 0;
4915 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4919 case L2CAP_EV_RECV_IFRAME
:
4920 switch (l2cap_classify_txseq(chan
, txseq
)) {
4921 case L2CAP_TXSEQ_EXPECTED
:
4922 /* Keep frame for reassembly later */
4923 l2cap_pass_to_tx(chan
, control
);
4924 skb_queue_tail(&chan
->srej_q
, skb
);
4926 BT_DBG("Queued %p (queue len %d)", skb
,
4927 skb_queue_len(&chan
->srej_q
));
4929 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4931 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4932 l2cap_seq_list_pop(&chan
->srej_list
);
4934 l2cap_pass_to_tx(chan
, control
);
4935 skb_queue_tail(&chan
->srej_q
, skb
);
4937 BT_DBG("Queued %p (queue len %d)", skb
,
4938 skb_queue_len(&chan
->srej_q
));
4940 err
= l2cap_rx_queued_iframes(chan
);
4945 case L2CAP_TXSEQ_UNEXPECTED
:
4946 /* Got a frame that can't be reassembled yet.
4947 * Save it for later, and send SREJs to cover
4948 * the missing frames.
4950 skb_queue_tail(&chan
->srej_q
, skb
);
4952 BT_DBG("Queued %p (queue len %d)", skb
,
4953 skb_queue_len(&chan
->srej_q
));
4955 l2cap_pass_to_tx(chan
, control
);
4956 l2cap_send_srej(chan
, control
->txseq
);
4958 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4959 /* This frame was requested with an SREJ, but
4960 * some expected retransmitted frames are
4961 * missing. Request retransmission of missing
4964 skb_queue_tail(&chan
->srej_q
, skb
);
4966 BT_DBG("Queued %p (queue len %d)", skb
,
4967 skb_queue_len(&chan
->srej_q
));
4969 l2cap_pass_to_tx(chan
, control
);
4970 l2cap_send_srej_list(chan
, control
->txseq
);
4972 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4973 /* We've already queued this frame. Drop this copy. */
4974 l2cap_pass_to_tx(chan
, control
);
4976 case L2CAP_TXSEQ_DUPLICATE
:
4977 /* Expecting a later sequence number, so this frame
4978 * was already received. Ignore it completely.
4981 case L2CAP_TXSEQ_INVALID_IGNORE
:
4983 case L2CAP_TXSEQ_INVALID
:
4985 l2cap_send_disconn_req(chan
->conn
, chan
,
4990 case L2CAP_EV_RECV_RR
:
4991 l2cap_pass_to_tx(chan
, control
);
4992 if (control
->final
) {
4993 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4995 if (!test_and_clear_bit(CONN_REJ_ACT
,
4996 &chan
->conn_state
)) {
4998 l2cap_retransmit_all(chan
, control
);
5001 l2cap_ertm_send(chan
);
5002 } else if (control
->poll
) {
5003 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5004 &chan
->conn_state
) &&
5005 chan
->unacked_frames
) {
5006 __set_retrans_timer(chan
);
5009 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5010 l2cap_send_srej_tail(chan
);
5012 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5013 &chan
->conn_state
) &&
5014 chan
->unacked_frames
)
5015 __set_retrans_timer(chan
);
5017 l2cap_send_ack(chan
);
5020 case L2CAP_EV_RECV_RNR
:
5021 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5022 l2cap_pass_to_tx(chan
, control
);
5023 if (control
->poll
) {
5024 l2cap_send_srej_tail(chan
);
5026 struct l2cap_ctrl rr_control
;
5027 memset(&rr_control
, 0, sizeof(rr_control
));
5028 rr_control
.sframe
= 1;
5029 rr_control
.super
= L2CAP_SUPER_RR
;
5030 rr_control
.reqseq
= chan
->buffer_seq
;
5031 l2cap_send_sframe(chan
, &rr_control
);
5035 case L2CAP_EV_RECV_REJ
:
5036 l2cap_handle_rej(chan
, control
);
5038 case L2CAP_EV_RECV_SREJ
:
5039 l2cap_handle_srej(chan
, control
);
5043 if (skb
&& !skb_in_use
) {
5044 BT_DBG("Freeing %p", skb
);
5051 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5053 /* Make sure reqseq is for a packet that has been sent but not acked */
5056 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5057 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5060 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5061 struct sk_buff
*skb
, u8 event
)
5065 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5066 control
, skb
, event
, chan
->rx_state
);
5068 if (__valid_reqseq(chan
, control
->reqseq
)) {
5069 switch (chan
->rx_state
) {
5070 case L2CAP_RX_STATE_RECV
:
5071 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5073 case L2CAP_RX_STATE_SREJ_SENT
:
5074 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5082 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5083 control
->reqseq
, chan
->next_tx_seq
,
5084 chan
->expected_ack_seq
);
5085 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5091 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5092 struct sk_buff
*skb
)
5096 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5099 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5100 L2CAP_TXSEQ_EXPECTED
) {
5101 l2cap_pass_to_tx(chan
, control
);
5103 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5104 __next_seq(chan
, chan
->buffer_seq
));
5106 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5108 l2cap_reassemble_sdu(chan
, skb
, control
);
5111 kfree_skb(chan
->sdu
);
5114 chan
->sdu_last_frag
= NULL
;
5118 BT_DBG("Freeing %p", skb
);
5123 chan
->last_acked_seq
= control
->txseq
;
5124 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5129 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5131 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5135 __unpack_control(chan
, skb
);
5140 * We can just drop the corrupted I-frame here.
5141 * Receiver will miss it and start proper recovery
5142 * procedures and ask for retransmission.
5144 if (l2cap_check_fcs(chan
, skb
))
5147 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5148 len
-= L2CAP_SDULEN_SIZE
;
5150 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5151 len
-= L2CAP_FCS_SIZE
;
5153 if (len
> chan
->mps
) {
5154 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5158 if (!control
->sframe
) {
5161 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5162 control
->sar
, control
->reqseq
, control
->final
,
5165 /* Validate F-bit - F=0 always valid, F=1 only
5166 * valid in TX WAIT_F
5168 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5171 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5172 event
= L2CAP_EV_RECV_IFRAME
;
5173 err
= l2cap_rx(chan
, control
, skb
, event
);
5175 err
= l2cap_stream_rx(chan
, control
, skb
);
5179 l2cap_send_disconn_req(chan
->conn
, chan
,
5182 const u8 rx_func_to_event
[4] = {
5183 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5184 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5187 /* Only I-frames are expected in streaming mode */
5188 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5191 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5192 control
->reqseq
, control
->final
, control
->poll
,
5197 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5201 /* Validate F and P bits */
5202 if (control
->final
&& (control
->poll
||
5203 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5206 event
= rx_func_to_event
[control
->super
];
5207 if (l2cap_rx(chan
, control
, skb
, event
))
5208 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5218 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
5220 struct l2cap_chan
*chan
;
5222 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5224 BT_DBG("unknown cid 0x%4.4x", cid
);
5225 /* Drop packet and return */
5230 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5232 if (chan
->state
!= BT_CONNECTED
)
5235 switch (chan
->mode
) {
5236 case L2CAP_MODE_BASIC
:
5237 /* If socket recv buffers overflows we drop data here
5238 * which is *bad* because L2CAP has to be reliable.
5239 * But we don't have any other choice. L2CAP doesn't
5240 * provide flow control mechanism. */
5242 if (chan
->imtu
< skb
->len
)
5245 if (!chan
->ops
->recv(chan
->data
, skb
))
5249 case L2CAP_MODE_ERTM
:
5250 case L2CAP_MODE_STREAMING
:
5251 l2cap_data_rcv(chan
, skb
);
5255 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5263 l2cap_chan_unlock(chan
);
5268 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
5270 struct l2cap_chan
*chan
;
5272 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5276 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5278 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5281 if (chan
->imtu
< skb
->len
)
5284 if (!chan
->ops
->recv(chan
->data
, skb
))
5293 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5294 struct sk_buff
*skb
)
5296 struct l2cap_chan
*chan
;
5298 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5302 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5304 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5307 if (chan
->imtu
< skb
->len
)
5310 if (!chan
->ops
->recv(chan
->data
, skb
))
5319 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5321 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5325 skb_pull(skb
, L2CAP_HDR_SIZE
);
5326 cid
= __le16_to_cpu(lh
->cid
);
5327 len
= __le16_to_cpu(lh
->len
);
5329 if (len
!= skb
->len
) {
5334 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5337 case L2CAP_CID_LE_SIGNALING
:
5338 case L2CAP_CID_SIGNALING
:
5339 l2cap_sig_channel(conn
, skb
);
5342 case L2CAP_CID_CONN_LESS
:
5343 psm
= get_unaligned((__le16
*) skb
->data
);
5345 l2cap_conless_channel(conn
, psm
, skb
);
5348 case L2CAP_CID_LE_DATA
:
5349 l2cap_att_channel(conn
, cid
, skb
);
5353 if (smp_sig_channel(conn
, skb
))
5354 l2cap_conn_del(conn
->hcon
, EACCES
);
5358 l2cap_data_channel(conn
, cid
, skb
);
5363 /* ---- L2CAP interface with lower layer (HCI) ---- */
5365 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5367 int exact
= 0, lm1
= 0, lm2
= 0;
5368 struct l2cap_chan
*c
;
5370 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
5372 /* Find listening sockets and check their link_mode */
5373 read_lock(&chan_list_lock
);
5374 list_for_each_entry(c
, &chan_list
, global_l
) {
5375 struct sock
*sk
= c
->sk
;
5377 if (c
->state
!= BT_LISTEN
)
5380 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5381 lm1
|= HCI_LM_ACCEPT
;
5382 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5383 lm1
|= HCI_LM_MASTER
;
5385 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5386 lm2
|= HCI_LM_ACCEPT
;
5387 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5388 lm2
|= HCI_LM_MASTER
;
5391 read_unlock(&chan_list_lock
);
5393 return exact
? lm1
: lm2
;
5396 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5398 struct l2cap_conn
*conn
;
5400 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
5403 conn
= l2cap_conn_add(hcon
, status
);
5405 l2cap_conn_ready(conn
);
5407 l2cap_conn_del(hcon
, bt_to_errno(status
));
5412 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5414 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5416 BT_DBG("hcon %p", hcon
);
5419 return HCI_ERROR_REMOTE_USER_TERM
;
5420 return conn
->disc_reason
;
5423 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5425 BT_DBG("hcon %p reason %d", hcon
, reason
);
5427 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5431 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5433 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5436 if (encrypt
== 0x00) {
5437 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5438 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5439 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5440 l2cap_chan_close(chan
, ECONNREFUSED
);
5442 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5443 __clear_chan_timer(chan
);
5447 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5449 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5450 struct l2cap_chan
*chan
;
5455 BT_DBG("conn %p", conn
);
5457 if (hcon
->type
== LE_LINK
) {
5458 if (!status
&& encrypt
)
5459 smp_distribute_keys(conn
, 0);
5460 cancel_delayed_work(&conn
->security_timer
);
5463 mutex_lock(&conn
->chan_lock
);
5465 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5466 l2cap_chan_lock(chan
);
5468 BT_DBG("chan->scid %d", chan
->scid
);
5470 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5471 if (!status
&& encrypt
) {
5472 chan
->sec_level
= hcon
->sec_level
;
5473 l2cap_chan_ready(chan
);
5476 l2cap_chan_unlock(chan
);
5480 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5481 l2cap_chan_unlock(chan
);
5485 if (!status
&& (chan
->state
== BT_CONNECTED
||
5486 chan
->state
== BT_CONFIG
)) {
5487 struct sock
*sk
= chan
->sk
;
5489 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5490 sk
->sk_state_change(sk
);
5492 l2cap_check_encryption(chan
, encrypt
);
5493 l2cap_chan_unlock(chan
);
5497 if (chan
->state
== BT_CONNECT
) {
5499 l2cap_send_conn_req(chan
);
5501 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5503 } else if (chan
->state
== BT_CONNECT2
) {
5504 struct sock
*sk
= chan
->sk
;
5505 struct l2cap_conn_rsp rsp
;
5511 if (test_bit(BT_SK_DEFER_SETUP
,
5512 &bt_sk(sk
)->flags
)) {
5513 struct sock
*parent
= bt_sk(sk
)->parent
;
5514 res
= L2CAP_CR_PEND
;
5515 stat
= L2CAP_CS_AUTHOR_PEND
;
5517 parent
->sk_data_ready(parent
, 0);
5519 __l2cap_state_change(chan
, BT_CONFIG
);
5520 res
= L2CAP_CR_SUCCESS
;
5521 stat
= L2CAP_CS_NO_INFO
;
5524 __l2cap_state_change(chan
, BT_DISCONN
);
5525 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5526 res
= L2CAP_CR_SEC_BLOCK
;
5527 stat
= L2CAP_CS_NO_INFO
;
5532 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5533 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5534 rsp
.result
= cpu_to_le16(res
);
5535 rsp
.status
= cpu_to_le16(stat
);
5536 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5540 l2cap_chan_unlock(chan
);
5543 mutex_unlock(&conn
->chan_lock
);
5548 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5550 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5553 conn
= l2cap_conn_add(hcon
, 0);
5558 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5560 if (!(flags
& ACL_CONT
)) {
5561 struct l2cap_hdr
*hdr
;
5565 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5566 kfree_skb(conn
->rx_skb
);
5567 conn
->rx_skb
= NULL
;
5569 l2cap_conn_unreliable(conn
, ECOMM
);
5572 /* Start fragment always begin with Basic L2CAP header */
5573 if (skb
->len
< L2CAP_HDR_SIZE
) {
5574 BT_ERR("Frame is too short (len %d)", skb
->len
);
5575 l2cap_conn_unreliable(conn
, ECOMM
);
5579 hdr
= (struct l2cap_hdr
*) skb
->data
;
5580 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5582 if (len
== skb
->len
) {
5583 /* Complete frame received */
5584 l2cap_recv_frame(conn
, skb
);
5588 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5590 if (skb
->len
> len
) {
5591 BT_ERR("Frame is too long (len %d, expected len %d)",
5593 l2cap_conn_unreliable(conn
, ECOMM
);
5597 /* Allocate skb for the complete frame (with header) */
5598 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5602 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5604 conn
->rx_len
= len
- skb
->len
;
5606 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5608 if (!conn
->rx_len
) {
5609 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5610 l2cap_conn_unreliable(conn
, ECOMM
);
5614 if (skb
->len
> conn
->rx_len
) {
5615 BT_ERR("Fragment is too long (len %d, expected %d)",
5616 skb
->len
, conn
->rx_len
);
5617 kfree_skb(conn
->rx_skb
);
5618 conn
->rx_skb
= NULL
;
5620 l2cap_conn_unreliable(conn
, ECOMM
);
5624 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5626 conn
->rx_len
-= skb
->len
;
5628 if (!conn
->rx_len
) {
5629 /* Complete frame received */
5630 l2cap_recv_frame(conn
, conn
->rx_skb
);
5631 conn
->rx_skb
= NULL
;
5640 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5642 struct l2cap_chan
*c
;
5644 read_lock(&chan_list_lock
);
5646 list_for_each_entry(c
, &chan_list
, global_l
) {
5647 struct sock
*sk
= c
->sk
;
5649 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5650 batostr(&bt_sk(sk
)->src
),
5651 batostr(&bt_sk(sk
)->dst
),
5652 c
->state
, __le16_to_cpu(c
->psm
),
5653 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5654 c
->sec_level
, c
->mode
);
5657 read_unlock(&chan_list_lock
);
5662 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5664 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5667 static const struct file_operations l2cap_debugfs_fops
= {
5668 .open
= l2cap_debugfs_open
,
5670 .llseek
= seq_lseek
,
5671 .release
= single_release
,
5674 static struct dentry
*l2cap_debugfs
;
5676 int __init
l2cap_init(void)
5680 err
= l2cap_init_sockets();
5685 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5686 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5688 BT_ERR("Failed to create L2CAP debug file");
5694 void l2cap_exit(void)
5696 debugfs_remove(l2cap_debugfs
);
5697 l2cap_cleanup_sockets();
5700 module_param(disable_ertm
, bool, 0644);
5701 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");