2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
82 list_for_each_entry(c
, &conn
->chan_l
, list
) {
89 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
93 list_for_each_entry(c
, &conn
->chan_l
, list
) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
104 struct l2cap_chan
*c
;
106 mutex_lock(&conn
->chan_lock
);
107 c
= __l2cap_get_chan_by_scid(conn
, cid
);
110 mutex_unlock(&conn
->chan_lock
);
115 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
117 struct l2cap_chan
*c
;
119 list_for_each_entry(c
, &conn
->chan_l
, list
) {
120 if (c
->ident
== ident
)
126 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
128 struct l2cap_chan
*c
;
130 list_for_each_entry(c
, &chan_list
, global_l
) {
131 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
137 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
141 write_lock(&chan_list_lock
);
143 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
156 for (p
= 0x1001; p
< 0x1100; p
+= 2)
157 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
158 chan
->psm
= cpu_to_le16(p
);
159 chan
->sport
= cpu_to_le16(p
);
166 write_unlock(&chan_list_lock
);
170 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
172 write_lock(&chan_list_lock
);
176 write_unlock(&chan_list_lock
);
181 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
183 u16 cid
= L2CAP_CID_DYN_START
;
185 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
186 if (!__l2cap_get_chan_by_scid(conn
, cid
))
193 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
195 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
196 state_to_string(state
));
199 chan
->ops
->state_change(chan
->data
, state
);
202 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
204 struct sock
*sk
= chan
->sk
;
207 __l2cap_state_change(chan
, state
);
211 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
213 struct sock
*sk
= chan
->sk
;
218 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
220 struct sock
*sk
= chan
->sk
;
223 __l2cap_chan_set_err(chan
, err
);
227 /* ---- L2CAP sequence number lists ---- */
229 /* For ERTM, ordered lists of sequence numbers must be tracked for
230 * SREJ requests that are received and for frames that are to be
231 * retransmitted. These seq_list functions implement a singly-linked
232 * list in an array, where membership in the list can also be checked
233 * in constant time. Items can also be added to the tail of the list
234 * and removed from the head in constant time, without further memory
238 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
240 size_t alloc_size
, i
;
242 /* Allocated size is a power of 2 to map sequence numbers
243 * (which may be up to 14 bits) in to a smaller array that is
244 * sized for the negotiated ERTM transmit windows.
246 alloc_size
= roundup_pow_of_two(size
);
248 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
252 seq_list
->mask
= alloc_size
- 1;
253 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
254 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
255 for (i
= 0; i
< alloc_size
; i
++)
256 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
261 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
263 kfree(seq_list
->list
);
266 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
269 /* Constant-time check for list membership */
270 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
273 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
275 u16 mask
= seq_list
->mask
;
277 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
278 /* In case someone tries to pop the head of an empty list */
279 return L2CAP_SEQ_LIST_CLEAR
;
280 } else if (seq_list
->head
== seq
) {
281 /* Head can be removed in constant time */
282 seq_list
->head
= seq_list
->list
[seq
& mask
];
283 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
285 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
286 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
287 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
290 /* Walk the list to find the sequence number */
291 u16 prev
= seq_list
->head
;
292 while (seq_list
->list
[prev
& mask
] != seq
) {
293 prev
= seq_list
->list
[prev
& mask
];
294 if (prev
== L2CAP_SEQ_LIST_TAIL
)
295 return L2CAP_SEQ_LIST_CLEAR
;
298 /* Unlink the number from the list and clear it */
299 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
300 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
301 if (seq_list
->tail
== seq
)
302 seq_list
->tail
= prev
;
307 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
309 /* Remove the head in constant time */
310 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
313 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
317 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
320 for (i
= 0; i
<= seq_list
->mask
; i
++)
321 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
323 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
324 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
327 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
329 u16 mask
= seq_list
->mask
;
331 /* All appends happen in constant time */
333 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
336 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
337 seq_list
->head
= seq
;
339 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
341 seq_list
->tail
= seq
;
342 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
345 static void l2cap_chan_timeout(struct work_struct
*work
)
347 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
349 struct l2cap_conn
*conn
= chan
->conn
;
352 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
354 mutex_lock(&conn
->chan_lock
);
355 l2cap_chan_lock(chan
);
357 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
358 reason
= ECONNREFUSED
;
359 else if (chan
->state
== BT_CONNECT
&&
360 chan
->sec_level
!= BT_SECURITY_SDP
)
361 reason
= ECONNREFUSED
;
365 l2cap_chan_close(chan
, reason
);
367 l2cap_chan_unlock(chan
);
369 chan
->ops
->close(chan
->data
);
370 mutex_unlock(&conn
->chan_lock
);
372 l2cap_chan_put(chan
);
375 struct l2cap_chan
*l2cap_chan_create(void)
377 struct l2cap_chan
*chan
;
379 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
383 mutex_init(&chan
->lock
);
385 write_lock(&chan_list_lock
);
386 list_add(&chan
->global_l
, &chan_list
);
387 write_unlock(&chan_list_lock
);
389 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
391 chan
->state
= BT_OPEN
;
393 atomic_set(&chan
->refcnt
, 1);
395 /* This flag is cleared in l2cap_chan_ready() */
396 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
398 BT_DBG("chan %p", chan
);
403 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
405 write_lock(&chan_list_lock
);
406 list_del(&chan
->global_l
);
407 write_unlock(&chan_list_lock
);
409 l2cap_chan_put(chan
);
412 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
414 chan
->fcs
= L2CAP_FCS_CRC16
;
415 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
416 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
417 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
418 chan
->sec_level
= BT_SECURITY_LOW
;
420 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
423 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
425 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
426 __le16_to_cpu(chan
->psm
), chan
->dcid
);
428 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
432 switch (chan
->chan_type
) {
433 case L2CAP_CHAN_CONN_ORIENTED
:
434 if (conn
->hcon
->type
== LE_LINK
) {
436 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
437 chan
->scid
= L2CAP_CID_LE_DATA
;
438 chan
->dcid
= L2CAP_CID_LE_DATA
;
440 /* Alloc CID for connection-oriented socket */
441 chan
->scid
= l2cap_alloc_cid(conn
);
442 chan
->omtu
= L2CAP_DEFAULT_MTU
;
446 case L2CAP_CHAN_CONN_LESS
:
447 /* Connectionless socket */
448 chan
->scid
= L2CAP_CID_CONN_LESS
;
449 chan
->dcid
= L2CAP_CID_CONN_LESS
;
450 chan
->omtu
= L2CAP_DEFAULT_MTU
;
454 /* Raw socket can send/recv signalling messages only */
455 chan
->scid
= L2CAP_CID_SIGNALING
;
456 chan
->dcid
= L2CAP_CID_SIGNALING
;
457 chan
->omtu
= L2CAP_DEFAULT_MTU
;
460 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
461 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
462 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
463 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
464 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
465 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
467 l2cap_chan_hold(chan
);
469 list_add(&chan
->list
, &conn
->chan_l
);
472 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
474 mutex_lock(&conn
->chan_lock
);
475 __l2cap_chan_add(conn
, chan
);
476 mutex_unlock(&conn
->chan_lock
);
479 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
481 struct sock
*sk
= chan
->sk
;
482 struct l2cap_conn
*conn
= chan
->conn
;
483 struct sock
*parent
= bt_sk(sk
)->parent
;
485 __clear_chan_timer(chan
);
487 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
490 /* Delete from channel list */
491 list_del(&chan
->list
);
493 l2cap_chan_put(chan
);
496 hci_conn_put(conn
->hcon
);
501 __l2cap_state_change(chan
, BT_CLOSED
);
502 sock_set_flag(sk
, SOCK_ZAPPED
);
505 __l2cap_chan_set_err(chan
, err
);
508 bt_accept_unlink(sk
);
509 parent
->sk_data_ready(parent
, 0);
511 sk
->sk_state_change(sk
);
515 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
518 skb_queue_purge(&chan
->tx_q
);
520 if (chan
->mode
== L2CAP_MODE_ERTM
) {
521 struct srej_list
*l
, *tmp
;
523 __clear_retrans_timer(chan
);
524 __clear_monitor_timer(chan
);
525 __clear_ack_timer(chan
);
527 skb_queue_purge(&chan
->srej_q
);
529 l2cap_seq_list_free(&chan
->srej_list
);
530 l2cap_seq_list_free(&chan
->retrans_list
);
531 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
538 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
542 BT_DBG("parent %p", parent
);
544 /* Close not yet accepted channels */
545 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
546 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
548 l2cap_chan_lock(chan
);
549 __clear_chan_timer(chan
);
550 l2cap_chan_close(chan
, ECONNRESET
);
551 l2cap_chan_unlock(chan
);
553 chan
->ops
->close(chan
->data
);
557 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
559 struct l2cap_conn
*conn
= chan
->conn
;
560 struct sock
*sk
= chan
->sk
;
562 BT_DBG("chan %p state %s sk %p", chan
,
563 state_to_string(chan
->state
), sk
);
565 switch (chan
->state
) {
568 l2cap_chan_cleanup_listen(sk
);
570 __l2cap_state_change(chan
, BT_CLOSED
);
571 sock_set_flag(sk
, SOCK_ZAPPED
);
577 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
578 conn
->hcon
->type
== ACL_LINK
) {
579 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
580 l2cap_send_disconn_req(conn
, chan
, reason
);
582 l2cap_chan_del(chan
, reason
);
586 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
587 conn
->hcon
->type
== ACL_LINK
) {
588 struct l2cap_conn_rsp rsp
;
591 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
592 result
= L2CAP_CR_SEC_BLOCK
;
594 result
= L2CAP_CR_BAD_PSM
;
595 l2cap_state_change(chan
, BT_DISCONN
);
597 rsp
.scid
= cpu_to_le16(chan
->dcid
);
598 rsp
.dcid
= cpu_to_le16(chan
->scid
);
599 rsp
.result
= cpu_to_le16(result
);
600 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
601 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
605 l2cap_chan_del(chan
, reason
);
610 l2cap_chan_del(chan
, reason
);
615 sock_set_flag(sk
, SOCK_ZAPPED
);
621 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
623 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
624 switch (chan
->sec_level
) {
625 case BT_SECURITY_HIGH
:
626 return HCI_AT_DEDICATED_BONDING_MITM
;
627 case BT_SECURITY_MEDIUM
:
628 return HCI_AT_DEDICATED_BONDING
;
630 return HCI_AT_NO_BONDING
;
632 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
633 if (chan
->sec_level
== BT_SECURITY_LOW
)
634 chan
->sec_level
= BT_SECURITY_SDP
;
636 if (chan
->sec_level
== BT_SECURITY_HIGH
)
637 return HCI_AT_NO_BONDING_MITM
;
639 return HCI_AT_NO_BONDING
;
641 switch (chan
->sec_level
) {
642 case BT_SECURITY_HIGH
:
643 return HCI_AT_GENERAL_BONDING_MITM
;
644 case BT_SECURITY_MEDIUM
:
645 return HCI_AT_GENERAL_BONDING
;
647 return HCI_AT_NO_BONDING
;
652 /* Service level security */
653 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
655 struct l2cap_conn
*conn
= chan
->conn
;
658 auth_type
= l2cap_get_auth_type(chan
);
660 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
663 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
667 /* Get next available identificator.
668 * 1 - 128 are used by kernel.
669 * 129 - 199 are reserved.
670 * 200 - 254 are used by utilities like l2ping, etc.
673 spin_lock(&conn
->lock
);
675 if (++conn
->tx_ident
> 128)
680 spin_unlock(&conn
->lock
);
685 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
687 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
690 BT_DBG("code 0x%2.2x", code
);
695 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
696 flags
= ACL_START_NO_FLUSH
;
700 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
701 skb
->priority
= HCI_PRIO_MAX
;
703 hci_send_acl(conn
->hchan
, skb
, flags
);
706 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
708 struct hci_conn
*hcon
= chan
->conn
->hcon
;
711 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
714 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
715 lmp_no_flush_capable(hcon
->hdev
))
716 flags
= ACL_START_NO_FLUSH
;
720 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
721 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
724 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
726 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
727 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
729 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
732 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
733 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
740 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
741 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
748 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
750 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
751 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
753 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
756 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
757 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
764 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
765 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
772 static inline void __unpack_control(struct l2cap_chan
*chan
,
775 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
776 __unpack_extended_control(get_unaligned_le32(skb
->data
),
777 &bt_cb(skb
)->control
);
779 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
780 &bt_cb(skb
)->control
);
784 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
788 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
789 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
791 if (control
->sframe
) {
792 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
793 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
794 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
796 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
797 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
803 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
807 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
808 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
810 if (control
->sframe
) {
811 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
812 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
813 packed
|= L2CAP_CTRL_FRAME_TYPE
;
815 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
816 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
822 static inline void __pack_control(struct l2cap_chan
*chan
,
823 struct l2cap_ctrl
*control
,
826 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
827 put_unaligned_le32(__pack_extended_control(control
),
828 skb
->data
+ L2CAP_HDR_SIZE
);
830 put_unaligned_le16(__pack_enhanced_control(control
),
831 skb
->data
+ L2CAP_HDR_SIZE
);
835 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u32 control
)
838 struct l2cap_hdr
*lh
;
839 struct l2cap_conn
*conn
= chan
->conn
;
842 if (chan
->state
!= BT_CONNECTED
)
845 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
846 hlen
= L2CAP_EXT_HDR_SIZE
;
848 hlen
= L2CAP_ENH_HDR_SIZE
;
850 if (chan
->fcs
== L2CAP_FCS_CRC16
)
851 hlen
+= L2CAP_FCS_SIZE
;
853 BT_DBG("chan %p, control 0x%8.8x", chan
, control
);
855 count
= min_t(unsigned int, conn
->mtu
, hlen
);
857 control
|= __set_sframe(chan
);
859 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
860 control
|= __set_ctrl_final(chan
);
862 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
863 control
|= __set_ctrl_poll(chan
);
865 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
869 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
870 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
871 lh
->cid
= cpu_to_le16(chan
->dcid
);
873 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
875 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
876 u16 fcs
= crc16(0, (u8
*)lh
, count
- L2CAP_FCS_SIZE
);
877 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
880 skb
->priority
= HCI_PRIO_MAX
;
881 l2cap_do_send(chan
, skb
);
884 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u32 control
)
886 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
887 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
888 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
890 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
892 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
894 l2cap_send_sframe(chan
, control
);
897 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
899 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
902 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
904 struct l2cap_conn
*conn
= chan
->conn
;
905 struct l2cap_conn_req req
;
907 req
.scid
= cpu_to_le16(chan
->scid
);
910 chan
->ident
= l2cap_get_ident(conn
);
912 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
914 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
917 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
919 struct sock
*sk
= chan
->sk
;
924 parent
= bt_sk(sk
)->parent
;
926 BT_DBG("sk %p, parent %p", sk
, parent
);
928 /* This clears all conf flags, including CONF_NOT_COMPLETE */
929 chan
->conf_state
= 0;
930 __clear_chan_timer(chan
);
932 __l2cap_state_change(chan
, BT_CONNECTED
);
933 sk
->sk_state_change(sk
);
936 parent
->sk_data_ready(parent
, 0);
941 static void l2cap_do_start(struct l2cap_chan
*chan
)
943 struct l2cap_conn
*conn
= chan
->conn
;
945 if (conn
->hcon
->type
== LE_LINK
) {
946 l2cap_chan_ready(chan
);
950 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
951 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
954 if (l2cap_chan_check_security(chan
) &&
955 __l2cap_no_conn_pending(chan
))
956 l2cap_send_conn_req(chan
);
958 struct l2cap_info_req req
;
959 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
961 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
962 conn
->info_ident
= l2cap_get_ident(conn
);
964 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
966 l2cap_send_cmd(conn
, conn
->info_ident
,
967 L2CAP_INFO_REQ
, sizeof(req
), &req
);
971 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
973 u32 local_feat_mask
= l2cap_feat_mask
;
975 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
978 case L2CAP_MODE_ERTM
:
979 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
980 case L2CAP_MODE_STREAMING
:
981 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
987 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
989 struct sock
*sk
= chan
->sk
;
990 struct l2cap_disconn_req req
;
995 if (chan
->mode
== L2CAP_MODE_ERTM
) {
996 __clear_retrans_timer(chan
);
997 __clear_monitor_timer(chan
);
998 __clear_ack_timer(chan
);
1001 req
.dcid
= cpu_to_le16(chan
->dcid
);
1002 req
.scid
= cpu_to_le16(chan
->scid
);
1003 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1004 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1007 __l2cap_state_change(chan
, BT_DISCONN
);
1008 __l2cap_chan_set_err(chan
, err
);
1012 /* ---- L2CAP connections ---- */
1013 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1015 struct l2cap_chan
*chan
, *tmp
;
1017 BT_DBG("conn %p", conn
);
1019 mutex_lock(&conn
->chan_lock
);
1021 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1022 struct sock
*sk
= chan
->sk
;
1024 l2cap_chan_lock(chan
);
1026 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1027 l2cap_chan_unlock(chan
);
1031 if (chan
->state
== BT_CONNECT
) {
1032 if (!l2cap_chan_check_security(chan
) ||
1033 !__l2cap_no_conn_pending(chan
)) {
1034 l2cap_chan_unlock(chan
);
1038 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1039 && test_bit(CONF_STATE2_DEVICE
,
1040 &chan
->conf_state
)) {
1041 l2cap_chan_close(chan
, ECONNRESET
);
1042 l2cap_chan_unlock(chan
);
1046 l2cap_send_conn_req(chan
);
1048 } else if (chan
->state
== BT_CONNECT2
) {
1049 struct l2cap_conn_rsp rsp
;
1051 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1052 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1054 if (l2cap_chan_check_security(chan
)) {
1056 if (test_bit(BT_SK_DEFER_SETUP
,
1057 &bt_sk(sk
)->flags
)) {
1058 struct sock
*parent
= bt_sk(sk
)->parent
;
1059 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1060 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1062 parent
->sk_data_ready(parent
, 0);
1065 __l2cap_state_change(chan
, BT_CONFIG
);
1066 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1067 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1071 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1072 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1075 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1078 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1079 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1080 l2cap_chan_unlock(chan
);
1084 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1085 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1086 l2cap_build_conf_req(chan
, buf
), buf
);
1087 chan
->num_conf_req
++;
1090 l2cap_chan_unlock(chan
);
1093 mutex_unlock(&conn
->chan_lock
);
1096 /* Find socket with cid and source/destination bdaddr.
1097 * Returns closest match, locked.
1099 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1103 struct l2cap_chan
*c
, *c1
= NULL
;
1105 read_lock(&chan_list_lock
);
1107 list_for_each_entry(c
, &chan_list
, global_l
) {
1108 struct sock
*sk
= c
->sk
;
1110 if (state
&& c
->state
!= state
)
1113 if (c
->scid
== cid
) {
1114 int src_match
, dst_match
;
1115 int src_any
, dst_any
;
1118 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1119 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1120 if (src_match
&& dst_match
) {
1121 read_unlock(&chan_list_lock
);
1126 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1127 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1128 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1129 (src_any
&& dst_any
))
1134 read_unlock(&chan_list_lock
);
1139 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1141 struct sock
*parent
, *sk
;
1142 struct l2cap_chan
*chan
, *pchan
;
1146 /* Check if we have socket listening on cid */
1147 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1148 conn
->src
, conn
->dst
);
1156 /* Check for backlog size */
1157 if (sk_acceptq_is_full(parent
)) {
1158 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1162 chan
= pchan
->ops
->new_connection(pchan
->data
);
1168 hci_conn_hold(conn
->hcon
);
1170 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1171 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1173 bt_accept_enqueue(parent
, sk
);
1175 l2cap_chan_add(conn
, chan
);
1177 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1179 __l2cap_state_change(chan
, BT_CONNECTED
);
1180 parent
->sk_data_ready(parent
, 0);
1183 release_sock(parent
);
1186 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1188 struct l2cap_chan
*chan
;
1190 BT_DBG("conn %p", conn
);
1192 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1193 l2cap_le_conn_ready(conn
);
1195 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1196 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1198 mutex_lock(&conn
->chan_lock
);
1200 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1202 l2cap_chan_lock(chan
);
1204 if (conn
->hcon
->type
== LE_LINK
) {
1205 if (smp_conn_security(conn
, chan
->sec_level
))
1206 l2cap_chan_ready(chan
);
1208 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1209 struct sock
*sk
= chan
->sk
;
1210 __clear_chan_timer(chan
);
1212 __l2cap_state_change(chan
, BT_CONNECTED
);
1213 sk
->sk_state_change(sk
);
1216 } else if (chan
->state
== BT_CONNECT
)
1217 l2cap_do_start(chan
);
1219 l2cap_chan_unlock(chan
);
1222 mutex_unlock(&conn
->chan_lock
);
1225 /* Notify sockets that we cannot guaranty reliability anymore */
1226 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1228 struct l2cap_chan
*chan
;
1230 BT_DBG("conn %p", conn
);
1232 mutex_lock(&conn
->chan_lock
);
1234 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1235 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1236 __l2cap_chan_set_err(chan
, err
);
1239 mutex_unlock(&conn
->chan_lock
);
1242 static void l2cap_info_timeout(struct work_struct
*work
)
1244 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1247 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1248 conn
->info_ident
= 0;
1250 l2cap_conn_start(conn
);
1253 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1255 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1256 struct l2cap_chan
*chan
, *l
;
1261 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1263 kfree_skb(conn
->rx_skb
);
1265 mutex_lock(&conn
->chan_lock
);
1268 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1269 l2cap_chan_hold(chan
);
1270 l2cap_chan_lock(chan
);
1272 l2cap_chan_del(chan
, err
);
1274 l2cap_chan_unlock(chan
);
1276 chan
->ops
->close(chan
->data
);
1277 l2cap_chan_put(chan
);
1280 mutex_unlock(&conn
->chan_lock
);
1282 hci_chan_del(conn
->hchan
);
1284 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1285 cancel_delayed_work_sync(&conn
->info_timer
);
1287 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1288 cancel_delayed_work_sync(&conn
->security_timer
);
1289 smp_chan_destroy(conn
);
1292 hcon
->l2cap_data
= NULL
;
1296 static void security_timeout(struct work_struct
*work
)
1298 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1299 security_timer
.work
);
1301 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1304 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1306 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1307 struct hci_chan
*hchan
;
1312 hchan
= hci_chan_create(hcon
);
1316 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1318 hci_chan_del(hchan
);
1322 hcon
->l2cap_data
= conn
;
1324 conn
->hchan
= hchan
;
1326 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1328 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1329 conn
->mtu
= hcon
->hdev
->le_mtu
;
1331 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1333 conn
->src
= &hcon
->hdev
->bdaddr
;
1334 conn
->dst
= &hcon
->dst
;
1336 conn
->feat_mask
= 0;
1338 spin_lock_init(&conn
->lock
);
1339 mutex_init(&conn
->chan_lock
);
1341 INIT_LIST_HEAD(&conn
->chan_l
);
1343 if (hcon
->type
== LE_LINK
)
1344 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1346 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1348 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1353 /* ---- Socket interface ---- */
1355 /* Find socket with psm and source / destination bdaddr.
1356 * Returns closest match.
1358 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1362 struct l2cap_chan
*c
, *c1
= NULL
;
1364 read_lock(&chan_list_lock
);
1366 list_for_each_entry(c
, &chan_list
, global_l
) {
1367 struct sock
*sk
= c
->sk
;
1369 if (state
&& c
->state
!= state
)
1372 if (c
->psm
== psm
) {
1373 int src_match
, dst_match
;
1374 int src_any
, dst_any
;
1377 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1378 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1379 if (src_match
&& dst_match
) {
1380 read_unlock(&chan_list_lock
);
1385 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1386 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1387 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1388 (src_any
&& dst_any
))
1393 read_unlock(&chan_list_lock
);
1398 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1399 bdaddr_t
*dst
, u8 dst_type
)
1401 struct sock
*sk
= chan
->sk
;
1402 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1403 struct l2cap_conn
*conn
;
1404 struct hci_conn
*hcon
;
1405 struct hci_dev
*hdev
;
1409 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1410 dst_type
, __le16_to_cpu(chan
->psm
));
1412 hdev
= hci_get_route(dst
, src
);
1414 return -EHOSTUNREACH
;
1418 l2cap_chan_lock(chan
);
1420 /* PSM must be odd and lsb of upper byte must be 0 */
1421 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1422 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1427 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1432 switch (chan
->mode
) {
1433 case L2CAP_MODE_BASIC
:
1435 case L2CAP_MODE_ERTM
:
1436 case L2CAP_MODE_STREAMING
:
1447 switch (sk
->sk_state
) {
1451 /* Already connecting */
1457 /* Already connected */
1473 /* Set destination address and psm */
1474 bacpy(&bt_sk(sk
)->dst
, dst
);
1481 auth_type
= l2cap_get_auth_type(chan
);
1483 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1484 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1485 chan
->sec_level
, auth_type
);
1487 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1488 chan
->sec_level
, auth_type
);
1491 err
= PTR_ERR(hcon
);
1495 conn
= l2cap_conn_add(hcon
, 0);
1502 if (hcon
->type
== LE_LINK
) {
1505 if (!list_empty(&conn
->chan_l
)) {
1514 /* Update source addr of the socket */
1515 bacpy(src
, conn
->src
);
1517 l2cap_chan_unlock(chan
);
1518 l2cap_chan_add(conn
, chan
);
1519 l2cap_chan_lock(chan
);
1521 l2cap_state_change(chan
, BT_CONNECT
);
1522 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1524 if (hcon
->state
== BT_CONNECTED
) {
1525 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1526 __clear_chan_timer(chan
);
1527 if (l2cap_chan_check_security(chan
))
1528 l2cap_state_change(chan
, BT_CONNECTED
);
1530 l2cap_do_start(chan
);
1536 l2cap_chan_unlock(chan
);
1537 hci_dev_unlock(hdev
);
1542 int __l2cap_wait_ack(struct sock
*sk
)
1544 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1545 DECLARE_WAITQUEUE(wait
, current
);
1549 add_wait_queue(sk_sleep(sk
), &wait
);
1550 set_current_state(TASK_INTERRUPTIBLE
);
1551 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1555 if (signal_pending(current
)) {
1556 err
= sock_intr_errno(timeo
);
1561 timeo
= schedule_timeout(timeo
);
1563 set_current_state(TASK_INTERRUPTIBLE
);
1565 err
= sock_error(sk
);
1569 set_current_state(TASK_RUNNING
);
1570 remove_wait_queue(sk_sleep(sk
), &wait
);
1574 static void l2cap_monitor_timeout(struct work_struct
*work
)
1576 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1577 monitor_timer
.work
);
1579 BT_DBG("chan %p", chan
);
1581 l2cap_chan_lock(chan
);
1583 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1584 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1585 l2cap_chan_unlock(chan
);
1586 l2cap_chan_put(chan
);
1590 chan
->retry_count
++;
1591 __set_monitor_timer(chan
);
1593 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1594 l2cap_chan_unlock(chan
);
1595 l2cap_chan_put(chan
);
1598 static void l2cap_retrans_timeout(struct work_struct
*work
)
1600 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1601 retrans_timer
.work
);
1603 BT_DBG("chan %p", chan
);
1605 l2cap_chan_lock(chan
);
1607 chan
->retry_count
= 1;
1608 __set_monitor_timer(chan
);
1610 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1612 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1614 l2cap_chan_unlock(chan
);
1615 l2cap_chan_put(chan
);
1618 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1620 struct sk_buff
*skb
;
1622 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1623 chan
->unacked_frames
) {
1624 if (bt_cb(skb
)->control
.txseq
== chan
->expected_ack_seq
)
1627 skb
= skb_dequeue(&chan
->tx_q
);
1630 chan
->unacked_frames
--;
1633 if (!chan
->unacked_frames
)
1634 __clear_retrans_timer(chan
);
1637 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1639 struct sk_buff
*skb
;
1643 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1644 control
= __get_control(chan
, skb
->data
+ L2CAP_HDR_SIZE
);
1645 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1646 control
|= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
1647 __put_control(chan
, control
, skb
->data
+ L2CAP_HDR_SIZE
);
1649 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1650 fcs
= crc16(0, (u8
*)skb
->data
,
1651 skb
->len
- L2CAP_FCS_SIZE
);
1652 put_unaligned_le16(fcs
,
1653 skb
->data
+ skb
->len
- L2CAP_FCS_SIZE
);
1656 l2cap_do_send(chan
, skb
);
1658 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1662 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1664 struct sk_buff
*skb
, *tx_skb
;
1668 skb
= skb_peek(&chan
->tx_q
);
1672 while (bt_cb(skb
)->control
.txseq
!= tx_seq
) {
1673 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1676 skb
= skb_queue_next(&chan
->tx_q
, skb
);
1679 if (bt_cb(skb
)->control
.retries
== chan
->remote_max_tx
&&
1680 chan
->remote_max_tx
) {
1681 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1685 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1686 bt_cb(skb
)->control
.retries
++;
1688 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1689 control
&= __get_sar_mask(chan
);
1691 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1692 control
|= __set_ctrl_final(chan
);
1694 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1695 control
|= __set_txseq(chan
, tx_seq
);
1697 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1699 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1700 fcs
= crc16(0, (u8
*)tx_skb
->data
,
1701 tx_skb
->len
- L2CAP_FCS_SIZE
);
1702 put_unaligned_le16(fcs
,
1703 tx_skb
->data
+ tx_skb
->len
- L2CAP_FCS_SIZE
);
1706 l2cap_do_send(chan
, tx_skb
);
1709 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1711 struct sk_buff
*skb
, *tx_skb
;
1716 if (chan
->state
!= BT_CONNECTED
)
1719 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1722 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1724 if (bt_cb(skb
)->control
.retries
== chan
->remote_max_tx
&&
1725 chan
->remote_max_tx
) {
1726 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1730 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1732 bt_cb(skb
)->control
.retries
++;
1734 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1735 control
&= __get_sar_mask(chan
);
1737 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1738 control
|= __set_ctrl_final(chan
);
1740 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1741 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1742 control
|= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
1744 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1746 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1747 fcs
= crc16(0, (u8
*)skb
->data
,
1748 tx_skb
->len
- L2CAP_FCS_SIZE
);
1749 put_unaligned_le16(fcs
, skb
->data
+
1750 tx_skb
->len
- L2CAP_FCS_SIZE
);
1753 l2cap_do_send(chan
, tx_skb
);
1755 __set_retrans_timer(chan
);
1757 bt_cb(skb
)->control
.txseq
= chan
->next_tx_seq
;
1759 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1761 if (bt_cb(skb
)->control
.retries
== 1) {
1762 chan
->unacked_frames
++;
1765 __clear_ack_timer(chan
);
1768 chan
->frames_sent
++;
1770 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1771 chan
->tx_send_head
= NULL
;
1773 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1779 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1783 if (!skb_queue_empty(&chan
->tx_q
))
1784 chan
->tx_send_head
= chan
->tx_q
.next
;
1786 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1787 ret
= l2cap_ertm_send(chan
);
1791 static void __l2cap_send_ack(struct l2cap_chan
*chan
)
1795 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1797 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1798 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1799 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1800 l2cap_send_sframe(chan
, control
);
1804 if (l2cap_ertm_send(chan
) > 0)
1807 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1808 l2cap_send_sframe(chan
, control
);
1811 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1813 __clear_ack_timer(chan
);
1814 __l2cap_send_ack(chan
);
1817 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1819 struct srej_list
*tail
;
1822 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1823 control
|= __set_ctrl_final(chan
);
1825 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1826 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1828 l2cap_send_sframe(chan
, control
);
1831 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1832 struct msghdr
*msg
, int len
,
1833 int count
, struct sk_buff
*skb
)
1835 struct l2cap_conn
*conn
= chan
->conn
;
1836 struct sk_buff
**frag
;
1839 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1845 /* Continuation fragments (no L2CAP header) */
1846 frag
= &skb_shinfo(skb
)->frag_list
;
1848 struct sk_buff
*tmp
;
1850 count
= min_t(unsigned int, conn
->mtu
, len
);
1852 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1853 msg
->msg_flags
& MSG_DONTWAIT
);
1855 return PTR_ERR(tmp
);
1859 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1862 (*frag
)->priority
= skb
->priority
;
1867 skb
->len
+= (*frag
)->len
;
1868 skb
->data_len
+= (*frag
)->len
;
1870 frag
= &(*frag
)->next
;
1876 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1877 struct msghdr
*msg
, size_t len
,
1880 struct l2cap_conn
*conn
= chan
->conn
;
1881 struct sk_buff
*skb
;
1882 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1883 struct l2cap_hdr
*lh
;
1885 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1887 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1889 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1890 msg
->msg_flags
& MSG_DONTWAIT
);
1894 skb
->priority
= priority
;
1896 /* Create L2CAP header */
1897 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1898 lh
->cid
= cpu_to_le16(chan
->dcid
);
1899 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
1900 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
1902 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1903 if (unlikely(err
< 0)) {
1905 return ERR_PTR(err
);
1910 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1911 struct msghdr
*msg
, size_t len
,
1914 struct l2cap_conn
*conn
= chan
->conn
;
1915 struct sk_buff
*skb
;
1917 struct l2cap_hdr
*lh
;
1919 BT_DBG("chan %p len %d", chan
, (int)len
);
1921 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
1923 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
1924 msg
->msg_flags
& MSG_DONTWAIT
);
1928 skb
->priority
= priority
;
1930 /* Create L2CAP header */
1931 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1932 lh
->cid
= cpu_to_le16(chan
->dcid
);
1933 lh
->len
= cpu_to_le16(len
);
1935 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1936 if (unlikely(err
< 0)) {
1938 return ERR_PTR(err
);
1943 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1944 struct msghdr
*msg
, size_t len
,
1947 struct l2cap_conn
*conn
= chan
->conn
;
1948 struct sk_buff
*skb
;
1949 int err
, count
, hlen
;
1950 struct l2cap_hdr
*lh
;
1952 BT_DBG("chan %p len %d", chan
, (int)len
);
1955 return ERR_PTR(-ENOTCONN
);
1957 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1958 hlen
= L2CAP_EXT_HDR_SIZE
;
1960 hlen
= L2CAP_ENH_HDR_SIZE
;
1963 hlen
+= L2CAP_SDULEN_SIZE
;
1965 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1966 hlen
+= L2CAP_FCS_SIZE
;
1968 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1970 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1971 msg
->msg_flags
& MSG_DONTWAIT
);
1975 /* Create L2CAP header */
1976 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1977 lh
->cid
= cpu_to_le16(chan
->dcid
);
1978 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1980 __put_control(chan
, 0, skb_put(skb
, __ctrl_size(chan
)));
1983 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
1985 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1986 if (unlikely(err
< 0)) {
1988 return ERR_PTR(err
);
1991 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1992 put_unaligned_le16(0, skb_put(skb
, L2CAP_FCS_SIZE
));
1994 bt_cb(skb
)->control
.retries
= 0;
1998 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
1999 struct sk_buff_head
*seg_queue
,
2000 struct msghdr
*msg
, size_t len
)
2002 struct sk_buff
*skb
;
2008 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2010 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2011 * so fragmented skbs are not used. The HCI layer's handling
2012 * of fragmented skbs is not compatible with ERTM's queueing.
2015 /* PDU size is derived from the HCI MTU */
2016 pdu_len
= chan
->conn
->mtu
;
2018 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2020 /* Adjust for largest possible L2CAP overhead. */
2021 pdu_len
-= L2CAP_EXT_HDR_SIZE
+ L2CAP_FCS_SIZE
;
2023 /* Remote device may have requested smaller PDUs */
2024 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2026 if (len
<= pdu_len
) {
2027 sar
= L2CAP_SAR_UNSEGMENTED
;
2031 sar
= L2CAP_SAR_START
;
2033 pdu_len
-= L2CAP_SDULEN_SIZE
;
2037 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2040 __skb_queue_purge(seg_queue
);
2041 return PTR_ERR(skb
);
2044 bt_cb(skb
)->control
.sar
= sar
;
2045 __skb_queue_tail(seg_queue
, skb
);
2050 pdu_len
+= L2CAP_SDULEN_SIZE
;
2053 if (len
<= pdu_len
) {
2054 sar
= L2CAP_SAR_END
;
2057 sar
= L2CAP_SAR_CONTINUE
;
2064 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2067 struct sk_buff
*skb
;
2069 struct sk_buff_head seg_queue
;
2071 /* Connectionless channel */
2072 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2073 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2075 return PTR_ERR(skb
);
2077 l2cap_do_send(chan
, skb
);
2081 switch (chan
->mode
) {
2082 case L2CAP_MODE_BASIC
:
2083 /* Check outgoing MTU */
2084 if (len
> chan
->omtu
)
2087 /* Create a basic PDU */
2088 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2090 return PTR_ERR(skb
);
2092 l2cap_do_send(chan
, skb
);
2096 case L2CAP_MODE_ERTM
:
2097 case L2CAP_MODE_STREAMING
:
2098 /* Check outgoing MTU */
2099 if (len
> chan
->omtu
) {
2104 __skb_queue_head_init(&seg_queue
);
2106 /* Do segmentation before calling in to the state machine,
2107 * since it's possible to block while waiting for memory
2110 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2112 /* The channel could have been closed while segmenting,
2113 * check that it is still connected.
2115 if (chan
->state
!= BT_CONNECTED
) {
2116 __skb_queue_purge(&seg_queue
);
2123 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->tx_send_head
== NULL
)
2124 chan
->tx_send_head
= seg_queue
.next
;
2125 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2127 if (chan
->mode
== L2CAP_MODE_ERTM
)
2128 err
= l2cap_ertm_send(chan
);
2130 l2cap_streaming_send(chan
);
2135 /* If the skbs were not queued for sending, they'll still be in
2136 * seg_queue and need to be purged.
2138 __skb_queue_purge(&seg_queue
);
2142 BT_DBG("bad state %1.1x", chan
->mode
);
2149 /* Copy frame to all raw sockets on that connection */
2150 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2152 struct sk_buff
*nskb
;
2153 struct l2cap_chan
*chan
;
2155 BT_DBG("conn %p", conn
);
2157 mutex_lock(&conn
->chan_lock
);
2159 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2160 struct sock
*sk
= chan
->sk
;
2161 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2164 /* Don't send frame to the socket it came from */
2167 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2171 if (chan
->ops
->recv(chan
->data
, nskb
))
2175 mutex_unlock(&conn
->chan_lock
);
2178 /* ---- L2CAP signalling commands ---- */
2179 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2180 u8 code
, u8 ident
, u16 dlen
, void *data
)
2182 struct sk_buff
*skb
, **frag
;
2183 struct l2cap_cmd_hdr
*cmd
;
2184 struct l2cap_hdr
*lh
;
2187 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2188 conn
, code
, ident
, dlen
);
2190 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2191 count
= min_t(unsigned int, conn
->mtu
, len
);
2193 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2197 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2198 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2200 if (conn
->hcon
->type
== LE_LINK
)
2201 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2203 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2205 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2208 cmd
->len
= cpu_to_le16(dlen
);
2211 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2212 memcpy(skb_put(skb
, count
), data
, count
);
2218 /* Continuation fragments (no L2CAP header) */
2219 frag
= &skb_shinfo(skb
)->frag_list
;
2221 count
= min_t(unsigned int, conn
->mtu
, len
);
2223 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2227 memcpy(skb_put(*frag
, count
), data
, count
);
2232 frag
= &(*frag
)->next
;
2242 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2244 struct l2cap_conf_opt
*opt
= *ptr
;
2247 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2255 *val
= *((u8
*) opt
->val
);
2259 *val
= get_unaligned_le16(opt
->val
);
2263 *val
= get_unaligned_le32(opt
->val
);
2267 *val
= (unsigned long) opt
->val
;
2271 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2275 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2277 struct l2cap_conf_opt
*opt
= *ptr
;
2279 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2286 *((u8
*) opt
->val
) = val
;
2290 put_unaligned_le16(val
, opt
->val
);
2294 put_unaligned_le32(val
, opt
->val
);
2298 memcpy(opt
->val
, (void *) val
, len
);
2302 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2305 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2307 struct l2cap_conf_efs efs
;
2309 switch (chan
->mode
) {
2310 case L2CAP_MODE_ERTM
:
2311 efs
.id
= chan
->local_id
;
2312 efs
.stype
= chan
->local_stype
;
2313 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2314 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2315 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2316 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2319 case L2CAP_MODE_STREAMING
:
2321 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2322 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2323 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2332 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2333 (unsigned long) &efs
);
2336 static void l2cap_ack_timeout(struct work_struct
*work
)
2338 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2341 BT_DBG("chan %p", chan
);
2343 l2cap_chan_lock(chan
);
2345 __l2cap_send_ack(chan
);
2347 l2cap_chan_unlock(chan
);
2349 l2cap_chan_put(chan
);
2352 static inline int l2cap_ertm_init(struct l2cap_chan
*chan
)
2356 chan
->next_tx_seq
= 0;
2357 chan
->expected_tx_seq
= 0;
2358 chan
->expected_ack_seq
= 0;
2359 chan
->unacked_frames
= 0;
2360 chan
->buffer_seq
= 0;
2361 chan
->num_acked
= 0;
2362 chan
->frames_sent
= 0;
2363 chan
->last_acked_seq
= 0;
2365 chan
->sdu_last_frag
= NULL
;
2368 skb_queue_head_init(&chan
->tx_q
);
2370 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2373 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2374 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2376 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2377 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2378 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2380 skb_queue_head_init(&chan
->srej_q
);
2382 INIT_LIST_HEAD(&chan
->srej_l
);
2383 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2387 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2389 l2cap_seq_list_free(&chan
->srej_list
);
2394 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2397 case L2CAP_MODE_STREAMING
:
2398 case L2CAP_MODE_ERTM
:
2399 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2403 return L2CAP_MODE_BASIC
;
2407 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2409 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2412 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2414 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2417 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2419 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2420 __l2cap_ews_supported(chan
)) {
2421 /* use extended control field */
2422 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2423 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2425 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2426 L2CAP_DEFAULT_TX_WINDOW
);
2427 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2431 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2433 struct l2cap_conf_req
*req
= data
;
2434 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2435 void *ptr
= req
->data
;
2438 BT_DBG("chan %p", chan
);
2440 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2443 switch (chan
->mode
) {
2444 case L2CAP_MODE_STREAMING
:
2445 case L2CAP_MODE_ERTM
:
2446 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2449 if (__l2cap_efs_supported(chan
))
2450 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2454 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2459 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2460 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2462 switch (chan
->mode
) {
2463 case L2CAP_MODE_BASIC
:
2464 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2465 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2468 rfc
.mode
= L2CAP_MODE_BASIC
;
2470 rfc
.max_transmit
= 0;
2471 rfc
.retrans_timeout
= 0;
2472 rfc
.monitor_timeout
= 0;
2473 rfc
.max_pdu_size
= 0;
2475 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2476 (unsigned long) &rfc
);
2479 case L2CAP_MODE_ERTM
:
2480 rfc
.mode
= L2CAP_MODE_ERTM
;
2481 rfc
.max_transmit
= chan
->max_tx
;
2482 rfc
.retrans_timeout
= 0;
2483 rfc
.monitor_timeout
= 0;
2485 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2486 L2CAP_EXT_HDR_SIZE
-
2489 rfc
.max_pdu_size
= cpu_to_le16(size
);
2491 l2cap_txwin_setup(chan
);
2493 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2494 L2CAP_DEFAULT_TX_WINDOW
);
2496 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2497 (unsigned long) &rfc
);
2499 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2500 l2cap_add_opt_efs(&ptr
, chan
);
2502 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2505 if (chan
->fcs
== L2CAP_FCS_NONE
||
2506 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2507 chan
->fcs
= L2CAP_FCS_NONE
;
2508 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2511 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2512 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2516 case L2CAP_MODE_STREAMING
:
2517 rfc
.mode
= L2CAP_MODE_STREAMING
;
2519 rfc
.max_transmit
= 0;
2520 rfc
.retrans_timeout
= 0;
2521 rfc
.monitor_timeout
= 0;
2523 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2524 L2CAP_EXT_HDR_SIZE
-
2527 rfc
.max_pdu_size
= cpu_to_le16(size
);
2529 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2530 (unsigned long) &rfc
);
2532 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2533 l2cap_add_opt_efs(&ptr
, chan
);
2535 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2538 if (chan
->fcs
== L2CAP_FCS_NONE
||
2539 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2540 chan
->fcs
= L2CAP_FCS_NONE
;
2541 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2546 req
->dcid
= cpu_to_le16(chan
->dcid
);
2547 req
->flags
= cpu_to_le16(0);
2552 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2554 struct l2cap_conf_rsp
*rsp
= data
;
2555 void *ptr
= rsp
->data
;
2556 void *req
= chan
->conf_req
;
2557 int len
= chan
->conf_len
;
2558 int type
, hint
, olen
;
2560 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2561 struct l2cap_conf_efs efs
;
2563 u16 mtu
= L2CAP_DEFAULT_MTU
;
2564 u16 result
= L2CAP_CONF_SUCCESS
;
2567 BT_DBG("chan %p", chan
);
2569 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2570 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2572 hint
= type
& L2CAP_CONF_HINT
;
2573 type
&= L2CAP_CONF_MASK
;
2576 case L2CAP_CONF_MTU
:
2580 case L2CAP_CONF_FLUSH_TO
:
2581 chan
->flush_to
= val
;
2584 case L2CAP_CONF_QOS
:
2587 case L2CAP_CONF_RFC
:
2588 if (olen
== sizeof(rfc
))
2589 memcpy(&rfc
, (void *) val
, olen
);
2592 case L2CAP_CONF_FCS
:
2593 if (val
== L2CAP_FCS_NONE
)
2594 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2597 case L2CAP_CONF_EFS
:
2599 if (olen
== sizeof(efs
))
2600 memcpy(&efs
, (void *) val
, olen
);
2603 case L2CAP_CONF_EWS
:
2605 return -ECONNREFUSED
;
2607 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2608 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2609 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2610 chan
->remote_tx_win
= val
;
2617 result
= L2CAP_CONF_UNKNOWN
;
2618 *((u8
*) ptr
++) = type
;
2623 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2626 switch (chan
->mode
) {
2627 case L2CAP_MODE_STREAMING
:
2628 case L2CAP_MODE_ERTM
:
2629 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2630 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2631 chan
->conn
->feat_mask
);
2636 if (__l2cap_efs_supported(chan
))
2637 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2639 return -ECONNREFUSED
;
2642 if (chan
->mode
!= rfc
.mode
)
2643 return -ECONNREFUSED
;
2649 if (chan
->mode
!= rfc
.mode
) {
2650 result
= L2CAP_CONF_UNACCEPT
;
2651 rfc
.mode
= chan
->mode
;
2653 if (chan
->num_conf_rsp
== 1)
2654 return -ECONNREFUSED
;
2656 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2657 sizeof(rfc
), (unsigned long) &rfc
);
2660 if (result
== L2CAP_CONF_SUCCESS
) {
2661 /* Configure output options and let the other side know
2662 * which ones we don't like. */
2664 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2665 result
= L2CAP_CONF_UNACCEPT
;
2668 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2670 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2673 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2674 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2675 efs
.stype
!= chan
->local_stype
) {
2677 result
= L2CAP_CONF_UNACCEPT
;
2679 if (chan
->num_conf_req
>= 1)
2680 return -ECONNREFUSED
;
2682 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2684 (unsigned long) &efs
);
2686 /* Send PENDING Conf Rsp */
2687 result
= L2CAP_CONF_PENDING
;
2688 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2693 case L2CAP_MODE_BASIC
:
2694 chan
->fcs
= L2CAP_FCS_NONE
;
2695 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2698 case L2CAP_MODE_ERTM
:
2699 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2700 chan
->remote_tx_win
= rfc
.txwin_size
;
2702 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2704 chan
->remote_max_tx
= rfc
.max_transmit
;
2706 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2708 L2CAP_EXT_HDR_SIZE
-
2711 rfc
.max_pdu_size
= cpu_to_le16(size
);
2712 chan
->remote_mps
= size
;
2714 rfc
.retrans_timeout
=
2715 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2716 rfc
.monitor_timeout
=
2717 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2719 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2721 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2722 sizeof(rfc
), (unsigned long) &rfc
);
2724 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2725 chan
->remote_id
= efs
.id
;
2726 chan
->remote_stype
= efs
.stype
;
2727 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
2728 chan
->remote_flush_to
=
2729 le32_to_cpu(efs
.flush_to
);
2730 chan
->remote_acc_lat
=
2731 le32_to_cpu(efs
.acc_lat
);
2732 chan
->remote_sdu_itime
=
2733 le32_to_cpu(efs
.sdu_itime
);
2734 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2735 sizeof(efs
), (unsigned long) &efs
);
2739 case L2CAP_MODE_STREAMING
:
2740 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2742 L2CAP_EXT_HDR_SIZE
-
2745 rfc
.max_pdu_size
= cpu_to_le16(size
);
2746 chan
->remote_mps
= size
;
2748 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2750 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2751 sizeof(rfc
), (unsigned long) &rfc
);
2756 result
= L2CAP_CONF_UNACCEPT
;
2758 memset(&rfc
, 0, sizeof(rfc
));
2759 rfc
.mode
= chan
->mode
;
2762 if (result
== L2CAP_CONF_SUCCESS
)
2763 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2765 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2766 rsp
->result
= cpu_to_le16(result
);
2767 rsp
->flags
= cpu_to_le16(0x0000);
2772 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2774 struct l2cap_conf_req
*req
= data
;
2775 void *ptr
= req
->data
;
2778 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2779 struct l2cap_conf_efs efs
;
2781 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2783 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2784 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2787 case L2CAP_CONF_MTU
:
2788 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2789 *result
= L2CAP_CONF_UNACCEPT
;
2790 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2793 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2796 case L2CAP_CONF_FLUSH_TO
:
2797 chan
->flush_to
= val
;
2798 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2802 case L2CAP_CONF_RFC
:
2803 if (olen
== sizeof(rfc
))
2804 memcpy(&rfc
, (void *)val
, olen
);
2806 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2807 rfc
.mode
!= chan
->mode
)
2808 return -ECONNREFUSED
;
2812 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2813 sizeof(rfc
), (unsigned long) &rfc
);
2816 case L2CAP_CONF_EWS
:
2817 chan
->tx_win
= min_t(u16
, val
,
2818 L2CAP_DEFAULT_EXT_WINDOW
);
2819 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2823 case L2CAP_CONF_EFS
:
2824 if (olen
== sizeof(efs
))
2825 memcpy(&efs
, (void *)val
, olen
);
2827 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2828 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2829 efs
.stype
!= chan
->local_stype
)
2830 return -ECONNREFUSED
;
2832 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2833 sizeof(efs
), (unsigned long) &efs
);
2838 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2839 return -ECONNREFUSED
;
2841 chan
->mode
= rfc
.mode
;
2843 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
2845 case L2CAP_MODE_ERTM
:
2846 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2847 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2848 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2850 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2851 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
2852 chan
->local_sdu_itime
=
2853 le32_to_cpu(efs
.sdu_itime
);
2854 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
2855 chan
->local_flush_to
=
2856 le32_to_cpu(efs
.flush_to
);
2860 case L2CAP_MODE_STREAMING
:
2861 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2865 req
->dcid
= cpu_to_le16(chan
->dcid
);
2866 req
->flags
= cpu_to_le16(0x0000);
2871 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2873 struct l2cap_conf_rsp
*rsp
= data
;
2874 void *ptr
= rsp
->data
;
2876 BT_DBG("chan %p", chan
);
2878 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2879 rsp
->result
= cpu_to_le16(result
);
2880 rsp
->flags
= cpu_to_le16(flags
);
2885 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2887 struct l2cap_conn_rsp rsp
;
2888 struct l2cap_conn
*conn
= chan
->conn
;
2891 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2892 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2893 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2894 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2895 l2cap_send_cmd(conn
, chan
->ident
,
2896 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2898 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2901 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2902 l2cap_build_conf_req(chan
, buf
), buf
);
2903 chan
->num_conf_req
++;
2906 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2910 struct l2cap_conf_rfc rfc
;
2912 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2914 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2917 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2918 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2921 case L2CAP_CONF_RFC
:
2922 if (olen
== sizeof(rfc
))
2923 memcpy(&rfc
, (void *)val
, olen
);
2928 /* Use sane default values in case a misbehaving remote device
2929 * did not send an RFC option.
2931 rfc
.mode
= chan
->mode
;
2932 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2933 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2934 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
2936 BT_ERR("Expected RFC option was not found, using defaults");
2940 case L2CAP_MODE_ERTM
:
2941 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2942 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2943 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2945 case L2CAP_MODE_STREAMING
:
2946 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2950 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2952 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2954 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2957 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2958 cmd
->ident
== conn
->info_ident
) {
2959 cancel_delayed_work(&conn
->info_timer
);
2961 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2962 conn
->info_ident
= 0;
2964 l2cap_conn_start(conn
);
2970 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2972 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2973 struct l2cap_conn_rsp rsp
;
2974 struct l2cap_chan
*chan
= NULL
, *pchan
;
2975 struct sock
*parent
, *sk
= NULL
;
2976 int result
, status
= L2CAP_CS_NO_INFO
;
2978 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2979 __le16 psm
= req
->psm
;
2981 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
2983 /* Check if we have socket listening on psm */
2984 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
2986 result
= L2CAP_CR_BAD_PSM
;
2992 mutex_lock(&conn
->chan_lock
);
2995 /* Check if the ACL is secure enough (if not SDP) */
2996 if (psm
!= cpu_to_le16(0x0001) &&
2997 !hci_conn_check_link_mode(conn
->hcon
)) {
2998 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
2999 result
= L2CAP_CR_SEC_BLOCK
;
3003 result
= L2CAP_CR_NO_MEM
;
3005 /* Check for backlog size */
3006 if (sk_acceptq_is_full(parent
)) {
3007 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
3011 chan
= pchan
->ops
->new_connection(pchan
->data
);
3017 /* Check if we already have channel with that dcid */
3018 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
3019 sock_set_flag(sk
, SOCK_ZAPPED
);
3020 chan
->ops
->close(chan
->data
);
3024 hci_conn_hold(conn
->hcon
);
3026 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3027 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3031 bt_accept_enqueue(parent
, sk
);
3033 __l2cap_chan_add(conn
, chan
);
3037 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3039 chan
->ident
= cmd
->ident
;
3041 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3042 if (l2cap_chan_check_security(chan
)) {
3043 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3044 __l2cap_state_change(chan
, BT_CONNECT2
);
3045 result
= L2CAP_CR_PEND
;
3046 status
= L2CAP_CS_AUTHOR_PEND
;
3047 parent
->sk_data_ready(parent
, 0);
3049 __l2cap_state_change(chan
, BT_CONFIG
);
3050 result
= L2CAP_CR_SUCCESS
;
3051 status
= L2CAP_CS_NO_INFO
;
3054 __l2cap_state_change(chan
, BT_CONNECT2
);
3055 result
= L2CAP_CR_PEND
;
3056 status
= L2CAP_CS_AUTHEN_PEND
;
3059 __l2cap_state_change(chan
, BT_CONNECT2
);
3060 result
= L2CAP_CR_PEND
;
3061 status
= L2CAP_CS_NO_INFO
;
3065 release_sock(parent
);
3066 mutex_unlock(&conn
->chan_lock
);
3069 rsp
.scid
= cpu_to_le16(scid
);
3070 rsp
.dcid
= cpu_to_le16(dcid
);
3071 rsp
.result
= cpu_to_le16(result
);
3072 rsp
.status
= cpu_to_le16(status
);
3073 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3075 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3076 struct l2cap_info_req info
;
3077 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3079 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3080 conn
->info_ident
= l2cap_get_ident(conn
);
3082 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3084 l2cap_send_cmd(conn
, conn
->info_ident
,
3085 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3088 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3089 result
== L2CAP_CR_SUCCESS
) {
3091 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3092 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3093 l2cap_build_conf_req(chan
, buf
), buf
);
3094 chan
->num_conf_req
++;
3100 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3102 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3103 u16 scid
, dcid
, result
, status
;
3104 struct l2cap_chan
*chan
;
3108 scid
= __le16_to_cpu(rsp
->scid
);
3109 dcid
= __le16_to_cpu(rsp
->dcid
);
3110 result
= __le16_to_cpu(rsp
->result
);
3111 status
= __le16_to_cpu(rsp
->status
);
3113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3114 dcid
, scid
, result
, status
);
3116 mutex_lock(&conn
->chan_lock
);
3119 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3125 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3134 l2cap_chan_lock(chan
);
3137 case L2CAP_CR_SUCCESS
:
3138 l2cap_state_change(chan
, BT_CONFIG
);
3141 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3143 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3146 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3147 l2cap_build_conf_req(chan
, req
), req
);
3148 chan
->num_conf_req
++;
3152 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3156 l2cap_chan_del(chan
, ECONNREFUSED
);
3160 l2cap_chan_unlock(chan
);
3163 mutex_unlock(&conn
->chan_lock
);
3168 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3170 /* FCS is enabled only in ERTM or streaming mode, if one or both
3173 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3174 chan
->fcs
= L2CAP_FCS_NONE
;
3175 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3176 chan
->fcs
= L2CAP_FCS_CRC16
;
3179 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3181 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3184 struct l2cap_chan
*chan
;
3187 dcid
= __le16_to_cpu(req
->dcid
);
3188 flags
= __le16_to_cpu(req
->flags
);
3190 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3192 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3196 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3197 struct l2cap_cmd_rej_cid rej
;
3199 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3200 rej
.scid
= cpu_to_le16(chan
->scid
);
3201 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3203 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3208 /* Reject if config buffer is too small. */
3209 len
= cmd_len
- sizeof(*req
);
3210 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3211 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3212 l2cap_build_conf_rsp(chan
, rsp
,
3213 L2CAP_CONF_REJECT
, flags
), rsp
);
3218 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3219 chan
->conf_len
+= len
;
3221 if (flags
& 0x0001) {
3222 /* Incomplete config. Send empty response. */
3223 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3224 l2cap_build_conf_rsp(chan
, rsp
,
3225 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3229 /* Complete config. */
3230 len
= l2cap_parse_conf_req(chan
, rsp
);
3232 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3236 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3237 chan
->num_conf_rsp
++;
3239 /* Reset config buffer. */
3242 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3245 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3246 set_default_fcs(chan
);
3248 l2cap_state_change(chan
, BT_CONNECTED
);
3250 if (chan
->mode
== L2CAP_MODE_ERTM
||
3251 chan
->mode
== L2CAP_MODE_STREAMING
)
3252 err
= l2cap_ertm_init(chan
);
3255 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3257 l2cap_chan_ready(chan
);
3262 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3264 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3265 l2cap_build_conf_req(chan
, buf
), buf
);
3266 chan
->num_conf_req
++;
3269 /* Got Conf Rsp PENDING from remote side and asume we sent
3270 Conf Rsp PENDING in the code above */
3271 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3272 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3274 /* check compatibility */
3276 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3277 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3279 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3280 l2cap_build_conf_rsp(chan
, rsp
,
3281 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
3285 l2cap_chan_unlock(chan
);
3289 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3291 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3292 u16 scid
, flags
, result
;
3293 struct l2cap_chan
*chan
;
3294 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3297 scid
= __le16_to_cpu(rsp
->scid
);
3298 flags
= __le16_to_cpu(rsp
->flags
);
3299 result
= __le16_to_cpu(rsp
->result
);
3301 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3304 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3309 case L2CAP_CONF_SUCCESS
:
3310 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3311 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3314 case L2CAP_CONF_PENDING
:
3315 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3317 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3320 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3323 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3327 /* check compatibility */
3329 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3330 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3332 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3333 l2cap_build_conf_rsp(chan
, buf
,
3334 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3338 case L2CAP_CONF_UNACCEPT
:
3339 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3342 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3343 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3347 /* throw out any old stored conf requests */
3348 result
= L2CAP_CONF_SUCCESS
;
3349 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3352 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3356 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3357 L2CAP_CONF_REQ
, len
, req
);
3358 chan
->num_conf_req
++;
3359 if (result
!= L2CAP_CONF_SUCCESS
)
3365 l2cap_chan_set_err(chan
, ECONNRESET
);
3367 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3368 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3375 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3377 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3378 set_default_fcs(chan
);
3380 l2cap_state_change(chan
, BT_CONNECTED
);
3381 if (chan
->mode
== L2CAP_MODE_ERTM
||
3382 chan
->mode
== L2CAP_MODE_STREAMING
)
3383 err
= l2cap_ertm_init(chan
);
3386 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3388 l2cap_chan_ready(chan
);
3392 l2cap_chan_unlock(chan
);
3396 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3398 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3399 struct l2cap_disconn_rsp rsp
;
3401 struct l2cap_chan
*chan
;
3404 scid
= __le16_to_cpu(req
->scid
);
3405 dcid
= __le16_to_cpu(req
->dcid
);
3407 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3409 mutex_lock(&conn
->chan_lock
);
3411 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3413 mutex_unlock(&conn
->chan_lock
);
3417 l2cap_chan_lock(chan
);
3421 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3422 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3423 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3426 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3429 l2cap_chan_hold(chan
);
3430 l2cap_chan_del(chan
, ECONNRESET
);
3432 l2cap_chan_unlock(chan
);
3434 chan
->ops
->close(chan
->data
);
3435 l2cap_chan_put(chan
);
3437 mutex_unlock(&conn
->chan_lock
);
3442 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3444 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3446 struct l2cap_chan
*chan
;
3448 scid
= __le16_to_cpu(rsp
->scid
);
3449 dcid
= __le16_to_cpu(rsp
->dcid
);
3451 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3453 mutex_lock(&conn
->chan_lock
);
3455 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3457 mutex_unlock(&conn
->chan_lock
);
3461 l2cap_chan_lock(chan
);
3463 l2cap_chan_hold(chan
);
3464 l2cap_chan_del(chan
, 0);
3466 l2cap_chan_unlock(chan
);
3468 chan
->ops
->close(chan
->data
);
3469 l2cap_chan_put(chan
);
3471 mutex_unlock(&conn
->chan_lock
);
3476 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3478 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3481 type
= __le16_to_cpu(req
->type
);
3483 BT_DBG("type 0x%4.4x", type
);
3485 if (type
== L2CAP_IT_FEAT_MASK
) {
3487 u32 feat_mask
= l2cap_feat_mask
;
3488 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3489 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3490 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3492 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3495 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3496 | L2CAP_FEAT_EXT_WINDOW
;
3498 put_unaligned_le32(feat_mask
, rsp
->data
);
3499 l2cap_send_cmd(conn
, cmd
->ident
,
3500 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3501 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3503 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3506 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3508 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3510 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3511 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3512 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3513 l2cap_send_cmd(conn
, cmd
->ident
,
3514 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3516 struct l2cap_info_rsp rsp
;
3517 rsp
.type
= cpu_to_le16(type
);
3518 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3519 l2cap_send_cmd(conn
, cmd
->ident
,
3520 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3526 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3528 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3531 type
= __le16_to_cpu(rsp
->type
);
3532 result
= __le16_to_cpu(rsp
->result
);
3534 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3536 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3537 if (cmd
->ident
!= conn
->info_ident
||
3538 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3541 cancel_delayed_work(&conn
->info_timer
);
3543 if (result
!= L2CAP_IR_SUCCESS
) {
3544 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3545 conn
->info_ident
= 0;
3547 l2cap_conn_start(conn
);
3553 case L2CAP_IT_FEAT_MASK
:
3554 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3556 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3557 struct l2cap_info_req req
;
3558 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3560 conn
->info_ident
= l2cap_get_ident(conn
);
3562 l2cap_send_cmd(conn
, conn
->info_ident
,
3563 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3565 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3566 conn
->info_ident
= 0;
3568 l2cap_conn_start(conn
);
3572 case L2CAP_IT_FIXED_CHAN
:
3573 conn
->fixed_chan_mask
= rsp
->data
[0];
3574 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3575 conn
->info_ident
= 0;
3577 l2cap_conn_start(conn
);
3584 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3585 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3588 struct l2cap_create_chan_req
*req
= data
;
3589 struct l2cap_create_chan_rsp rsp
;
3592 if (cmd_len
!= sizeof(*req
))
3598 psm
= le16_to_cpu(req
->psm
);
3599 scid
= le16_to_cpu(req
->scid
);
3601 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3603 /* Placeholder: Always reject */
3605 rsp
.scid
= cpu_to_le16(scid
);
3606 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
3607 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3609 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3615 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3616 struct l2cap_cmd_hdr
*cmd
, void *data
)
3618 BT_DBG("conn %p", conn
);
3620 return l2cap_connect_rsp(conn
, cmd
, data
);
3623 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3624 u16 icid
, u16 result
)
3626 struct l2cap_move_chan_rsp rsp
;
3628 BT_DBG("icid %d, result %d", icid
, result
);
3630 rsp
.icid
= cpu_to_le16(icid
);
3631 rsp
.result
= cpu_to_le16(result
);
3633 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3636 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3637 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3639 struct l2cap_move_chan_cfm cfm
;
3642 BT_DBG("icid %d, result %d", icid
, result
);
3644 ident
= l2cap_get_ident(conn
);
3646 chan
->ident
= ident
;
3648 cfm
.icid
= cpu_to_le16(icid
);
3649 cfm
.result
= cpu_to_le16(result
);
3651 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3654 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
3657 struct l2cap_move_chan_cfm_rsp rsp
;
3659 BT_DBG("icid %d", icid
);
3661 rsp
.icid
= cpu_to_le16(icid
);
3662 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
3665 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
3666 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3668 struct l2cap_move_chan_req
*req
= data
;
3670 u16 result
= L2CAP_MR_NOT_ALLOWED
;
3672 if (cmd_len
!= sizeof(*req
))
3675 icid
= le16_to_cpu(req
->icid
);
3677 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
3682 /* Placeholder: Always refuse */
3683 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
3688 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
3689 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3691 struct l2cap_move_chan_rsp
*rsp
= data
;
3694 if (cmd_len
!= sizeof(*rsp
))
3697 icid
= le16_to_cpu(rsp
->icid
);
3698 result
= le16_to_cpu(rsp
->result
);
3700 BT_DBG("icid %d, result %d", icid
, result
);
3702 /* Placeholder: Always unconfirmed */
3703 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
3708 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
3709 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3711 struct l2cap_move_chan_cfm
*cfm
= data
;
3714 if (cmd_len
!= sizeof(*cfm
))
3717 icid
= le16_to_cpu(cfm
->icid
);
3718 result
= le16_to_cpu(cfm
->result
);
3720 BT_DBG("icid %d, result %d", icid
, result
);
3722 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
3727 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
3728 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3730 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
3733 if (cmd_len
!= sizeof(*rsp
))
3736 icid
= le16_to_cpu(rsp
->icid
);
3738 BT_DBG("icid %d", icid
);
3743 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
3748 if (min
> max
|| min
< 6 || max
> 3200)
3751 if (to_multiplier
< 10 || to_multiplier
> 3200)
3754 if (max
>= to_multiplier
* 8)
3757 max_latency
= (to_multiplier
* 8 / max
) - 1;
3758 if (latency
> 499 || latency
> max_latency
)
3764 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
3765 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3767 struct hci_conn
*hcon
= conn
->hcon
;
3768 struct l2cap_conn_param_update_req
*req
;
3769 struct l2cap_conn_param_update_rsp rsp
;
3770 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
3773 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
3776 cmd_len
= __le16_to_cpu(cmd
->len
);
3777 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
3780 req
= (struct l2cap_conn_param_update_req
*) data
;
3781 min
= __le16_to_cpu(req
->min
);
3782 max
= __le16_to_cpu(req
->max
);
3783 latency
= __le16_to_cpu(req
->latency
);
3784 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
3786 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3787 min
, max
, latency
, to_multiplier
);
3789 memset(&rsp
, 0, sizeof(rsp
));
3791 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
3793 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
3795 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
3797 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
3801 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
3806 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
3807 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3811 switch (cmd
->code
) {
3812 case L2CAP_COMMAND_REJ
:
3813 l2cap_command_rej(conn
, cmd
, data
);
3816 case L2CAP_CONN_REQ
:
3817 err
= l2cap_connect_req(conn
, cmd
, data
);
3820 case L2CAP_CONN_RSP
:
3821 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3824 case L2CAP_CONF_REQ
:
3825 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3828 case L2CAP_CONF_RSP
:
3829 err
= l2cap_config_rsp(conn
, cmd
, data
);
3832 case L2CAP_DISCONN_REQ
:
3833 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3836 case L2CAP_DISCONN_RSP
:
3837 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3840 case L2CAP_ECHO_REQ
:
3841 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3844 case L2CAP_ECHO_RSP
:
3847 case L2CAP_INFO_REQ
:
3848 err
= l2cap_information_req(conn
, cmd
, data
);
3851 case L2CAP_INFO_RSP
:
3852 err
= l2cap_information_rsp(conn
, cmd
, data
);
3855 case L2CAP_CREATE_CHAN_REQ
:
3856 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
3859 case L2CAP_CREATE_CHAN_RSP
:
3860 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
3863 case L2CAP_MOVE_CHAN_REQ
:
3864 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
3867 case L2CAP_MOVE_CHAN_RSP
:
3868 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
3871 case L2CAP_MOVE_CHAN_CFM
:
3872 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
3875 case L2CAP_MOVE_CHAN_CFM_RSP
:
3876 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
3880 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3888 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3889 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3891 switch (cmd
->code
) {
3892 case L2CAP_COMMAND_REJ
:
3895 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3896 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3898 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3902 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3907 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3908 struct sk_buff
*skb
)
3910 u8
*data
= skb
->data
;
3912 struct l2cap_cmd_hdr cmd
;
3915 l2cap_raw_recv(conn
, skb
);
3917 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3919 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3920 data
+= L2CAP_CMD_HDR_SIZE
;
3921 len
-= L2CAP_CMD_HDR_SIZE
;
3923 cmd_len
= le16_to_cpu(cmd
.len
);
3925 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3927 if (cmd_len
> len
|| !cmd
.ident
) {
3928 BT_DBG("corrupted command");
3932 if (conn
->hcon
->type
== LE_LINK
)
3933 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3935 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3938 struct l2cap_cmd_rej_unk rej
;
3940 BT_ERR("Wrong link type (%d)", err
);
3942 /* FIXME: Map err to a valid reason */
3943 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3944 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3954 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3956 u16 our_fcs
, rcv_fcs
;
3959 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3960 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3962 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3964 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3965 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
3966 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3967 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3969 if (our_fcs
!= rcv_fcs
)
3975 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3979 chan
->frames_sent
= 0;
3981 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3983 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3984 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3985 l2cap_send_sframe(chan
, control
);
3986 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3989 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3990 l2cap_retransmit_frames(chan
);
3992 l2cap_ertm_send(chan
);
3994 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3995 chan
->frames_sent
== 0) {
3996 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3997 l2cap_send_sframe(chan
, control
);
4001 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
4003 struct sk_buff
*next_skb
;
4004 int tx_seq_offset
, next_tx_seq_offset
;
4006 bt_cb(skb
)->control
.txseq
= tx_seq
;
4007 bt_cb(skb
)->control
.sar
= sar
;
4009 next_skb
= skb_peek(&chan
->srej_q
);
4011 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
4014 if (bt_cb(next_skb
)->control
.txseq
== tx_seq
)
4017 next_tx_seq_offset
= __seq_offset(chan
,
4018 bt_cb(next_skb
)->control
.txseq
, chan
->buffer_seq
);
4020 if (next_tx_seq_offset
> tx_seq_offset
) {
4021 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
4025 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
4028 next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
);
4031 __skb_queue_tail(&chan
->srej_q
, skb
);
4036 static void append_skb_frag(struct sk_buff
*skb
,
4037 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4039 /* skb->len reflects data in skb as well as all fragments
4040 * skb->data_len reflects only data in fragments
4042 if (!skb_has_frag_list(skb
))
4043 skb_shinfo(skb
)->frag_list
= new_frag
;
4045 new_frag
->next
= NULL
;
4047 (*last_frag
)->next
= new_frag
;
4048 *last_frag
= new_frag
;
4050 skb
->len
+= new_frag
->len
;
4051 skb
->data_len
+= new_frag
->len
;
4052 skb
->truesize
+= new_frag
->truesize
;
4055 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u32 control
)
4059 switch (__get_ctrl_sar(chan
, control
)) {
4060 case L2CAP_SAR_UNSEGMENTED
:
4064 err
= chan
->ops
->recv(chan
->data
, skb
);
4067 case L2CAP_SAR_START
:
4071 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4072 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4074 if (chan
->sdu_len
> chan
->imtu
) {
4079 if (skb
->len
>= chan
->sdu_len
)
4083 chan
->sdu_last_frag
= skb
;
4089 case L2CAP_SAR_CONTINUE
:
4093 append_skb_frag(chan
->sdu
, skb
,
4094 &chan
->sdu_last_frag
);
4097 if (chan
->sdu
->len
>= chan
->sdu_len
)
4107 append_skb_frag(chan
->sdu
, skb
,
4108 &chan
->sdu_last_frag
);
4111 if (chan
->sdu
->len
!= chan
->sdu_len
)
4114 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
4117 /* Reassembly complete */
4119 chan
->sdu_last_frag
= NULL
;
4127 kfree_skb(chan
->sdu
);
4129 chan
->sdu_last_frag
= NULL
;
4136 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
4138 BT_DBG("chan %p, Enter local busy", chan
);
4140 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4141 l2cap_seq_list_clear(&chan
->srej_list
);
4143 __set_ack_timer(chan
);
4146 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
4150 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4153 control
= __set_reqseq(chan
, chan
->buffer_seq
);
4154 control
|= __set_ctrl_poll(chan
);
4155 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4156 l2cap_send_sframe(chan
, control
);
4157 chan
->retry_count
= 1;
4159 __clear_retrans_timer(chan
);
4160 __set_monitor_timer(chan
);
4162 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
4165 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4166 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
4168 BT_DBG("chan %p, Exit local busy", chan
);
4171 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4173 if (chan
->mode
== L2CAP_MODE_ERTM
) {
4175 l2cap_ertm_enter_local_busy(chan
);
4177 l2cap_ertm_exit_local_busy(chan
);
4181 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
4183 struct sk_buff
*skb
;
4186 while ((skb
= skb_peek(&chan
->srej_q
)) &&
4187 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4190 if (bt_cb(skb
)->control
.txseq
!= tx_seq
)
4193 skb
= skb_dequeue(&chan
->srej_q
);
4194 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
4195 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4198 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4202 chan
->buffer_seq_srej
= __next_seq(chan
, chan
->buffer_seq_srej
);
4203 tx_seq
= __next_seq(chan
, tx_seq
);
4207 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4209 struct srej_list
*l
, *tmp
;
4212 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
4213 if (l
->tx_seq
== tx_seq
) {
4218 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4219 control
|= __set_reqseq(chan
, l
->tx_seq
);
4220 l2cap_send_sframe(chan
, control
);
4222 list_add_tail(&l
->list
, &chan
->srej_l
);
4226 static int l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4228 struct srej_list
*new;
4231 while (tx_seq
!= chan
->expected_tx_seq
) {
4232 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4233 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
4234 l2cap_seq_list_append(&chan
->srej_list
, chan
->expected_tx_seq
);
4235 l2cap_send_sframe(chan
, control
);
4237 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
4241 new->tx_seq
= chan
->expected_tx_seq
;
4243 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4245 list_add_tail(&new->list
, &chan
->srej_l
);
4248 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4253 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4255 u16 tx_seq
= __get_txseq(chan
, rx_control
);
4256 u16 req_seq
= __get_reqseq(chan
, rx_control
);
4257 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
4258 int tx_seq_offset
, expected_tx_seq_offset
;
4259 int num_to_ack
= (chan
->tx_win
/6) + 1;
4262 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan
, skb
->len
,
4263 tx_seq
, rx_control
);
4265 if (__is_ctrl_final(chan
, rx_control
) &&
4266 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4267 __clear_monitor_timer(chan
);
4268 if (chan
->unacked_frames
> 0)
4269 __set_retrans_timer(chan
);
4270 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4273 chan
->expected_ack_seq
= req_seq
;
4274 l2cap_drop_acked_frames(chan
);
4276 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
4278 /* invalid tx_seq */
4279 if (tx_seq_offset
>= chan
->tx_win
) {
4280 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4284 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4285 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4286 l2cap_send_ack(chan
);
4290 if (tx_seq
== chan
->expected_tx_seq
)
4293 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4294 struct srej_list
*first
;
4296 first
= list_first_entry(&chan
->srej_l
,
4297 struct srej_list
, list
);
4298 if (tx_seq
== first
->tx_seq
) {
4299 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4300 l2cap_check_srej_gap(chan
, tx_seq
);
4302 list_del(&first
->list
);
4305 if (list_empty(&chan
->srej_l
)) {
4306 chan
->buffer_seq
= chan
->buffer_seq_srej
;
4307 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4308 l2cap_send_ack(chan
);
4309 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
4312 struct srej_list
*l
;
4314 /* duplicated tx_seq */
4315 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
4318 list_for_each_entry(l
, &chan
->srej_l
, list
) {
4319 if (l
->tx_seq
== tx_seq
) {
4320 l2cap_resend_srejframe(chan
, tx_seq
);
4325 err
= l2cap_send_srejframe(chan
, tx_seq
);
4327 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4332 expected_tx_seq_offset
= __seq_offset(chan
,
4333 chan
->expected_tx_seq
, chan
->buffer_seq
);
4335 /* duplicated tx_seq */
4336 if (tx_seq_offset
< expected_tx_seq_offset
)
4339 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4341 BT_DBG("chan %p, Enter SREJ", chan
);
4343 INIT_LIST_HEAD(&chan
->srej_l
);
4344 chan
->buffer_seq_srej
= chan
->buffer_seq
;
4346 __skb_queue_head_init(&chan
->srej_q
);
4347 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4349 /* Set P-bit only if there are some I-frames to ack. */
4350 if (__clear_ack_timer(chan
))
4351 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
4353 err
= l2cap_send_srejframe(chan
, tx_seq
);
4355 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4362 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4364 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4365 bt_cb(skb
)->control
.txseq
= tx_seq
;
4366 bt_cb(skb
)->control
.sar
= sar
;
4367 __skb_queue_tail(&chan
->srej_q
, skb
);
4371 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
4372 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4375 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4379 if (__is_ctrl_final(chan
, rx_control
)) {
4380 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4381 l2cap_retransmit_frames(chan
);
4385 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
4386 if (chan
->num_acked
== num_to_ack
- 1)
4387 l2cap_send_ack(chan
);
4389 __set_ack_timer(chan
);
4398 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4400 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
,
4401 __get_reqseq(chan
, rx_control
), rx_control
);
4403 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
4404 l2cap_drop_acked_frames(chan
);
4406 if (__is_ctrl_poll(chan
, rx_control
)) {
4407 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4408 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4409 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4410 (chan
->unacked_frames
> 0))
4411 __set_retrans_timer(chan
);
4413 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4414 l2cap_send_srejtail(chan
);
4416 l2cap_send_i_or_rr_or_rnr(chan
);
4419 } else if (__is_ctrl_final(chan
, rx_control
)) {
4420 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4422 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4423 l2cap_retransmit_frames(chan
);
4426 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4427 (chan
->unacked_frames
> 0))
4428 __set_retrans_timer(chan
);
4430 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4431 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
4432 l2cap_send_ack(chan
);
4434 l2cap_ertm_send(chan
);
4438 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4440 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4442 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4444 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4446 chan
->expected_ack_seq
= tx_seq
;
4447 l2cap_drop_acked_frames(chan
);
4449 if (__is_ctrl_final(chan
, rx_control
)) {
4450 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4451 l2cap_retransmit_frames(chan
);
4453 l2cap_retransmit_frames(chan
);
4455 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
4456 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4459 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4461 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4463 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4465 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4467 if (__is_ctrl_poll(chan
, rx_control
)) {
4468 chan
->expected_ack_seq
= tx_seq
;
4469 l2cap_drop_acked_frames(chan
);
4471 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4472 l2cap_retransmit_one_frame(chan
, tx_seq
);
4474 l2cap_ertm_send(chan
);
4476 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4477 chan
->srej_save_reqseq
= tx_seq
;
4478 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4480 } else if (__is_ctrl_final(chan
, rx_control
)) {
4481 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
4482 chan
->srej_save_reqseq
== tx_seq
)
4483 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4485 l2cap_retransmit_one_frame(chan
, tx_seq
);
4487 l2cap_retransmit_one_frame(chan
, tx_seq
);
4488 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4489 chan
->srej_save_reqseq
= tx_seq
;
4490 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4495 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4497 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4499 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4501 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4502 chan
->expected_ack_seq
= tx_seq
;
4503 l2cap_drop_acked_frames(chan
);
4505 if (__is_ctrl_poll(chan
, rx_control
))
4506 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4508 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4509 __clear_retrans_timer(chan
);
4510 if (__is_ctrl_poll(chan
, rx_control
))
4511 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
4515 if (__is_ctrl_poll(chan
, rx_control
)) {
4516 l2cap_send_srejtail(chan
);
4518 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4519 l2cap_send_sframe(chan
, rx_control
);
4523 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4525 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan
, rx_control
, skb
->len
);
4527 if (__is_ctrl_final(chan
, rx_control
) &&
4528 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4529 __clear_monitor_timer(chan
);
4530 if (chan
->unacked_frames
> 0)
4531 __set_retrans_timer(chan
);
4532 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4535 switch (__get_ctrl_super(chan
, rx_control
)) {
4536 case L2CAP_SUPER_RR
:
4537 l2cap_data_channel_rrframe(chan
, rx_control
);
4540 case L2CAP_SUPER_REJ
:
4541 l2cap_data_channel_rejframe(chan
, rx_control
);
4544 case L2CAP_SUPER_SREJ
:
4545 l2cap_data_channel_srejframe(chan
, rx_control
);
4548 case L2CAP_SUPER_RNR
:
4549 l2cap_data_channel_rnrframe(chan
, rx_control
);
4557 static int l2cap_ertm_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4561 int len
, next_tx_seq_offset
, req_seq_offset
;
4563 __unpack_control(chan
, skb
);
4565 control
= __get_control(chan
, skb
->data
);
4566 skb_pull(skb
, __ctrl_size(chan
));
4570 * We can just drop the corrupted I-frame here.
4571 * Receiver will miss it and start proper recovery
4572 * procedures and ask retransmission.
4574 if (l2cap_check_fcs(chan
, skb
))
4577 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
4578 len
-= L2CAP_SDULEN_SIZE
;
4580 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4581 len
-= L2CAP_FCS_SIZE
;
4583 if (len
> chan
->mps
) {
4584 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4588 req_seq
= __get_reqseq(chan
, control
);
4590 req_seq_offset
= __seq_offset(chan
, req_seq
, chan
->expected_ack_seq
);
4592 next_tx_seq_offset
= __seq_offset(chan
, chan
->next_tx_seq
,
4593 chan
->expected_ack_seq
);
4595 /* check for invalid req-seq */
4596 if (req_seq_offset
> next_tx_seq_offset
) {
4597 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4601 if (!__is_sframe(chan
, control
)) {
4603 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4607 l2cap_data_channel_iframe(chan
, control
, skb
);
4611 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4615 l2cap_data_channel_sframe(chan
, control
, skb
);
4625 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4627 struct l2cap_chan
*chan
;
4632 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4634 BT_DBG("unknown cid 0x%4.4x", cid
);
4635 /* Drop packet and return */
4640 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4642 if (chan
->state
!= BT_CONNECTED
)
4645 switch (chan
->mode
) {
4646 case L2CAP_MODE_BASIC
:
4647 /* If socket recv buffers overflows we drop data here
4648 * which is *bad* because L2CAP has to be reliable.
4649 * But we don't have any other choice. L2CAP doesn't
4650 * provide flow control mechanism. */
4652 if (chan
->imtu
< skb
->len
)
4655 if (!chan
->ops
->recv(chan
->data
, skb
))
4659 case L2CAP_MODE_ERTM
:
4660 l2cap_ertm_data_rcv(chan
, skb
);
4664 case L2CAP_MODE_STREAMING
:
4665 control
= __get_control(chan
, skb
->data
);
4666 skb_pull(skb
, __ctrl_size(chan
));
4669 if (l2cap_check_fcs(chan
, skb
))
4672 if (__is_sar_start(chan
, control
))
4673 len
-= L2CAP_SDULEN_SIZE
;
4675 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4676 len
-= L2CAP_FCS_SIZE
;
4678 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
4681 tx_seq
= __get_txseq(chan
, control
);
4683 if (chan
->expected_tx_seq
!= tx_seq
) {
4684 /* Frame(s) missing - must discard partial SDU */
4685 kfree_skb(chan
->sdu
);
4687 chan
->sdu_last_frag
= NULL
;
4690 /* TODO: Notify userland of missing data */
4693 chan
->expected_tx_seq
= __next_seq(chan
, tx_seq
);
4695 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
4696 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4701 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
4709 l2cap_chan_unlock(chan
);
4714 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4716 struct l2cap_chan
*chan
;
4718 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
4722 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4724 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4727 if (chan
->imtu
< skb
->len
)
4730 if (!chan
->ops
->recv(chan
->data
, skb
))
4739 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
4740 struct sk_buff
*skb
)
4742 struct l2cap_chan
*chan
;
4744 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
4748 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4750 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4753 if (chan
->imtu
< skb
->len
)
4756 if (!chan
->ops
->recv(chan
->data
, skb
))
4765 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4767 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4771 skb_pull(skb
, L2CAP_HDR_SIZE
);
4772 cid
= __le16_to_cpu(lh
->cid
);
4773 len
= __le16_to_cpu(lh
->len
);
4775 if (len
!= skb
->len
) {
4780 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4783 case L2CAP_CID_LE_SIGNALING
:
4784 case L2CAP_CID_SIGNALING
:
4785 l2cap_sig_channel(conn
, skb
);
4788 case L2CAP_CID_CONN_LESS
:
4789 psm
= get_unaligned((__le16
*) skb
->data
);
4791 l2cap_conless_channel(conn
, psm
, skb
);
4794 case L2CAP_CID_LE_DATA
:
4795 l2cap_att_channel(conn
, cid
, skb
);
4799 if (smp_sig_channel(conn
, skb
))
4800 l2cap_conn_del(conn
->hcon
, EACCES
);
4804 l2cap_data_channel(conn
, cid
, skb
);
4809 /* ---- L2CAP interface with lower layer (HCI) ---- */
4811 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
4813 int exact
= 0, lm1
= 0, lm2
= 0;
4814 struct l2cap_chan
*c
;
4816 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4818 /* Find listening sockets and check their link_mode */
4819 read_lock(&chan_list_lock
);
4820 list_for_each_entry(c
, &chan_list
, global_l
) {
4821 struct sock
*sk
= c
->sk
;
4823 if (c
->state
!= BT_LISTEN
)
4826 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4827 lm1
|= HCI_LM_ACCEPT
;
4828 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4829 lm1
|= HCI_LM_MASTER
;
4831 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4832 lm2
|= HCI_LM_ACCEPT
;
4833 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4834 lm2
|= HCI_LM_MASTER
;
4837 read_unlock(&chan_list_lock
);
4839 return exact
? lm1
: lm2
;
4842 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4844 struct l2cap_conn
*conn
;
4846 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4849 conn
= l2cap_conn_add(hcon
, status
);
4851 l2cap_conn_ready(conn
);
4853 l2cap_conn_del(hcon
, bt_to_errno(status
));
4858 int l2cap_disconn_ind(struct hci_conn
*hcon
)
4860 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4862 BT_DBG("hcon %p", hcon
);
4865 return HCI_ERROR_REMOTE_USER_TERM
;
4866 return conn
->disc_reason
;
4869 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4871 BT_DBG("hcon %p reason %d", hcon
, reason
);
4873 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4877 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4879 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4882 if (encrypt
== 0x00) {
4883 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4884 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
4885 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4886 l2cap_chan_close(chan
, ECONNREFUSED
);
4888 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4889 __clear_chan_timer(chan
);
4893 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4895 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4896 struct l2cap_chan
*chan
;
4901 BT_DBG("conn %p", conn
);
4903 if (hcon
->type
== LE_LINK
) {
4904 if (!status
&& encrypt
)
4905 smp_distribute_keys(conn
, 0);
4906 cancel_delayed_work(&conn
->security_timer
);
4909 mutex_lock(&conn
->chan_lock
);
4911 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4912 l2cap_chan_lock(chan
);
4914 BT_DBG("chan->scid %d", chan
->scid
);
4916 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4917 if (!status
&& encrypt
) {
4918 chan
->sec_level
= hcon
->sec_level
;
4919 l2cap_chan_ready(chan
);
4922 l2cap_chan_unlock(chan
);
4926 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4927 l2cap_chan_unlock(chan
);
4931 if (!status
&& (chan
->state
== BT_CONNECTED
||
4932 chan
->state
== BT_CONFIG
)) {
4933 struct sock
*sk
= chan
->sk
;
4935 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
4936 sk
->sk_state_change(sk
);
4938 l2cap_check_encryption(chan
, encrypt
);
4939 l2cap_chan_unlock(chan
);
4943 if (chan
->state
== BT_CONNECT
) {
4945 l2cap_send_conn_req(chan
);
4947 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4949 } else if (chan
->state
== BT_CONNECT2
) {
4950 struct sock
*sk
= chan
->sk
;
4951 struct l2cap_conn_rsp rsp
;
4957 if (test_bit(BT_SK_DEFER_SETUP
,
4958 &bt_sk(sk
)->flags
)) {
4959 struct sock
*parent
= bt_sk(sk
)->parent
;
4960 res
= L2CAP_CR_PEND
;
4961 stat
= L2CAP_CS_AUTHOR_PEND
;
4963 parent
->sk_data_ready(parent
, 0);
4965 __l2cap_state_change(chan
, BT_CONFIG
);
4966 res
= L2CAP_CR_SUCCESS
;
4967 stat
= L2CAP_CS_NO_INFO
;
4970 __l2cap_state_change(chan
, BT_DISCONN
);
4971 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4972 res
= L2CAP_CR_SEC_BLOCK
;
4973 stat
= L2CAP_CS_NO_INFO
;
4978 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4979 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4980 rsp
.result
= cpu_to_le16(res
);
4981 rsp
.status
= cpu_to_le16(stat
);
4982 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4986 l2cap_chan_unlock(chan
);
4989 mutex_unlock(&conn
->chan_lock
);
4994 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4996 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4999 conn
= l2cap_conn_add(hcon
, 0);
5004 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5006 if (!(flags
& ACL_CONT
)) {
5007 struct l2cap_hdr
*hdr
;
5011 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5012 kfree_skb(conn
->rx_skb
);
5013 conn
->rx_skb
= NULL
;
5015 l2cap_conn_unreliable(conn
, ECOMM
);
5018 /* Start fragment always begin with Basic L2CAP header */
5019 if (skb
->len
< L2CAP_HDR_SIZE
) {
5020 BT_ERR("Frame is too short (len %d)", skb
->len
);
5021 l2cap_conn_unreliable(conn
, ECOMM
);
5025 hdr
= (struct l2cap_hdr
*) skb
->data
;
5026 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5028 if (len
== skb
->len
) {
5029 /* Complete frame received */
5030 l2cap_recv_frame(conn
, skb
);
5034 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5036 if (skb
->len
> len
) {
5037 BT_ERR("Frame is too long (len %d, expected len %d)",
5039 l2cap_conn_unreliable(conn
, ECOMM
);
5043 /* Allocate skb for the complete frame (with header) */
5044 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5048 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5050 conn
->rx_len
= len
- skb
->len
;
5052 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5054 if (!conn
->rx_len
) {
5055 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5056 l2cap_conn_unreliable(conn
, ECOMM
);
5060 if (skb
->len
> conn
->rx_len
) {
5061 BT_ERR("Fragment is too long (len %d, expected %d)",
5062 skb
->len
, conn
->rx_len
);
5063 kfree_skb(conn
->rx_skb
);
5064 conn
->rx_skb
= NULL
;
5066 l2cap_conn_unreliable(conn
, ECOMM
);
5070 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5072 conn
->rx_len
-= skb
->len
;
5074 if (!conn
->rx_len
) {
5075 /* Complete frame received */
5076 l2cap_recv_frame(conn
, conn
->rx_skb
);
5077 conn
->rx_skb
= NULL
;
5086 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5088 struct l2cap_chan
*c
;
5090 read_lock(&chan_list_lock
);
5092 list_for_each_entry(c
, &chan_list
, global_l
) {
5093 struct sock
*sk
= c
->sk
;
5095 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5096 batostr(&bt_sk(sk
)->src
),
5097 batostr(&bt_sk(sk
)->dst
),
5098 c
->state
, __le16_to_cpu(c
->psm
),
5099 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5100 c
->sec_level
, c
->mode
);
5103 read_unlock(&chan_list_lock
);
5108 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5110 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5113 static const struct file_operations l2cap_debugfs_fops
= {
5114 .open
= l2cap_debugfs_open
,
5116 .llseek
= seq_lseek
,
5117 .release
= single_release
,
5120 static struct dentry
*l2cap_debugfs
;
5122 int __init
l2cap_init(void)
5126 err
= l2cap_init_sockets();
5131 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5132 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5134 BT_ERR("Failed to create L2CAP debug file");
5140 void l2cap_exit(void)
5142 debugfs_remove(l2cap_debugfs
);
5143 l2cap_cleanup_sockets();
5146 module_param(disable_ertm
, bool, 0644);
5147 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");