2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
43 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
44 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
46 static LIST_HEAD(chan_list
);
47 static DEFINE_RWLOCK(chan_list_lock
);
49 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
50 u8 code
, u8 ident
, u16 dlen
, void *data
);
51 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
53 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
54 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
55 struct l2cap_chan
*chan
, int err
);
57 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
58 struct sk_buff_head
*skbs
, u8 event
);
60 /* ---- L2CAP channels ---- */
62 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
66 list_for_each_entry(c
, &conn
->chan_l
, list
) {
73 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
77 list_for_each_entry(c
, &conn
->chan_l
, list
) {
84 /* Find channel with given SCID.
85 * Returns locked channel. */
86 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
90 mutex_lock(&conn
->chan_lock
);
91 c
= __l2cap_get_chan_by_scid(conn
, cid
);
94 mutex_unlock(&conn
->chan_lock
);
99 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
101 struct l2cap_chan
*c
;
103 list_for_each_entry(c
, &conn
->chan_l
, list
) {
104 if (c
->ident
== ident
)
110 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
112 struct l2cap_chan
*c
;
114 list_for_each_entry(c
, &chan_list
, global_l
) {
115 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
121 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
125 write_lock(&chan_list_lock
);
127 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
140 for (p
= 0x1001; p
< 0x1100; p
+= 2)
141 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
142 chan
->psm
= cpu_to_le16(p
);
143 chan
->sport
= cpu_to_le16(p
);
150 write_unlock(&chan_list_lock
);
154 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
156 write_lock(&chan_list_lock
);
160 write_unlock(&chan_list_lock
);
165 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
167 u16 cid
= L2CAP_CID_DYN_START
;
169 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
170 if (!__l2cap_get_chan_by_scid(conn
, cid
))
177 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
179 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
180 state_to_string(state
));
183 chan
->ops
->state_change(chan
, state
);
186 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
188 struct sock
*sk
= chan
->sk
;
191 __l2cap_state_change(chan
, state
);
195 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
197 struct sock
*sk
= chan
->sk
;
202 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
204 struct sock
*sk
= chan
->sk
;
207 __l2cap_chan_set_err(chan
, err
);
211 static void __set_retrans_timer(struct l2cap_chan
*chan
)
213 if (!delayed_work_pending(&chan
->monitor_timer
) &&
214 chan
->retrans_timeout
) {
215 l2cap_set_timer(chan
, &chan
->retrans_timer
,
216 msecs_to_jiffies(chan
->retrans_timeout
));
220 static void __set_monitor_timer(struct l2cap_chan
*chan
)
222 __clear_retrans_timer(chan
);
223 if (chan
->monitor_timeout
) {
224 l2cap_set_timer(chan
, &chan
->monitor_timer
,
225 msecs_to_jiffies(chan
->monitor_timeout
));
229 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
234 skb_queue_walk(head
, skb
) {
235 if (bt_cb(skb
)->control
.txseq
== seq
)
242 /* ---- L2CAP sequence number lists ---- */
244 /* For ERTM, ordered lists of sequence numbers must be tracked for
245 * SREJ requests that are received and for frames that are to be
246 * retransmitted. These seq_list functions implement a singly-linked
247 * list in an array, where membership in the list can also be checked
248 * in constant time. Items can also be added to the tail of the list
249 * and removed from the head in constant time, without further memory
253 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
255 size_t alloc_size
, i
;
257 /* Allocated size is a power of 2 to map sequence numbers
258 * (which may be up to 14 bits) in to a smaller array that is
259 * sized for the negotiated ERTM transmit windows.
261 alloc_size
= roundup_pow_of_two(size
);
263 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
267 seq_list
->mask
= alloc_size
- 1;
268 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
269 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
270 for (i
= 0; i
< alloc_size
; i
++)
271 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
276 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
278 kfree(seq_list
->list
);
281 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
284 /* Constant-time check for list membership */
285 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
288 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
290 u16 mask
= seq_list
->mask
;
292 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
293 /* In case someone tries to pop the head of an empty list */
294 return L2CAP_SEQ_LIST_CLEAR
;
295 } else if (seq_list
->head
== seq
) {
296 /* Head can be removed in constant time */
297 seq_list
->head
= seq_list
->list
[seq
& mask
];
298 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
300 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
301 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
302 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
305 /* Walk the list to find the sequence number */
306 u16 prev
= seq_list
->head
;
307 while (seq_list
->list
[prev
& mask
] != seq
) {
308 prev
= seq_list
->list
[prev
& mask
];
309 if (prev
== L2CAP_SEQ_LIST_TAIL
)
310 return L2CAP_SEQ_LIST_CLEAR
;
313 /* Unlink the number from the list and clear it */
314 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
315 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
316 if (seq_list
->tail
== seq
)
317 seq_list
->tail
= prev
;
322 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
324 /* Remove the head in constant time */
325 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
328 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
332 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
335 for (i
= 0; i
<= seq_list
->mask
; i
++)
336 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
338 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
339 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
342 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
344 u16 mask
= seq_list
->mask
;
346 /* All appends happen in constant time */
348 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
351 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
352 seq_list
->head
= seq
;
354 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
356 seq_list
->tail
= seq
;
357 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
360 static void l2cap_chan_timeout(struct work_struct
*work
)
362 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
364 struct l2cap_conn
*conn
= chan
->conn
;
367 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
369 mutex_lock(&conn
->chan_lock
);
370 l2cap_chan_lock(chan
);
372 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
373 reason
= ECONNREFUSED
;
374 else if (chan
->state
== BT_CONNECT
&&
375 chan
->sec_level
!= BT_SECURITY_SDP
)
376 reason
= ECONNREFUSED
;
380 l2cap_chan_close(chan
, reason
);
382 l2cap_chan_unlock(chan
);
384 chan
->ops
->close(chan
);
385 mutex_unlock(&conn
->chan_lock
);
387 l2cap_chan_put(chan
);
390 struct l2cap_chan
*l2cap_chan_create(void)
392 struct l2cap_chan
*chan
;
394 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
398 mutex_init(&chan
->lock
);
400 write_lock(&chan_list_lock
);
401 list_add(&chan
->global_l
, &chan_list
);
402 write_unlock(&chan_list_lock
);
404 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
406 chan
->state
= BT_OPEN
;
408 atomic_set(&chan
->refcnt
, 1);
410 /* This flag is cleared in l2cap_chan_ready() */
411 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
413 BT_DBG("chan %p", chan
);
418 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
420 write_lock(&chan_list_lock
);
421 list_del(&chan
->global_l
);
422 write_unlock(&chan_list_lock
);
424 l2cap_chan_put(chan
);
427 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
429 chan
->fcs
= L2CAP_FCS_CRC16
;
430 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
431 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
432 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
433 chan
->sec_level
= BT_SECURITY_LOW
;
435 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
438 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
440 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
441 __le16_to_cpu(chan
->psm
), chan
->dcid
);
443 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
447 switch (chan
->chan_type
) {
448 case L2CAP_CHAN_CONN_ORIENTED
:
449 if (conn
->hcon
->type
== LE_LINK
) {
451 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
452 chan
->scid
= L2CAP_CID_LE_DATA
;
453 chan
->dcid
= L2CAP_CID_LE_DATA
;
455 /* Alloc CID for connection-oriented socket */
456 chan
->scid
= l2cap_alloc_cid(conn
);
457 chan
->omtu
= L2CAP_DEFAULT_MTU
;
461 case L2CAP_CHAN_CONN_LESS
:
462 /* Connectionless socket */
463 chan
->scid
= L2CAP_CID_CONN_LESS
;
464 chan
->dcid
= L2CAP_CID_CONN_LESS
;
465 chan
->omtu
= L2CAP_DEFAULT_MTU
;
468 case L2CAP_CHAN_CONN_FIX_A2MP
:
469 chan
->scid
= L2CAP_CID_A2MP
;
470 chan
->dcid
= L2CAP_CID_A2MP
;
471 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
472 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
476 /* Raw socket can send/recv signalling messages only */
477 chan
->scid
= L2CAP_CID_SIGNALING
;
478 chan
->dcid
= L2CAP_CID_SIGNALING
;
479 chan
->omtu
= L2CAP_DEFAULT_MTU
;
482 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
483 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
484 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
485 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
486 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
487 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
489 l2cap_chan_hold(chan
);
491 list_add(&chan
->list
, &conn
->chan_l
);
494 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
496 mutex_lock(&conn
->chan_lock
);
497 __l2cap_chan_add(conn
, chan
);
498 mutex_unlock(&conn
->chan_lock
);
501 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
503 struct l2cap_conn
*conn
= chan
->conn
;
505 __clear_chan_timer(chan
);
507 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
510 /* Delete from channel list */
511 list_del(&chan
->list
);
513 l2cap_chan_put(chan
);
516 hci_conn_put(conn
->hcon
);
519 if (chan
->ops
->teardown
)
520 chan
->ops
->teardown(chan
, err
);
522 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
526 case L2CAP_MODE_BASIC
:
529 case L2CAP_MODE_ERTM
:
530 __clear_retrans_timer(chan
);
531 __clear_monitor_timer(chan
);
532 __clear_ack_timer(chan
);
534 skb_queue_purge(&chan
->srej_q
);
536 l2cap_seq_list_free(&chan
->srej_list
);
537 l2cap_seq_list_free(&chan
->retrans_list
);
541 case L2CAP_MODE_STREAMING
:
542 skb_queue_purge(&chan
->tx_q
);
549 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
551 struct l2cap_conn
*conn
= chan
->conn
;
552 struct sock
*sk
= chan
->sk
;
554 BT_DBG("chan %p state %s sk %p", chan
,
555 state_to_string(chan
->state
), sk
);
557 switch (chan
->state
) {
559 if (chan
->ops
->teardown
)
560 chan
->ops
->teardown(chan
, 0);
565 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
566 conn
->hcon
->type
== ACL_LINK
) {
567 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
568 l2cap_send_disconn_req(conn
, chan
, reason
);
570 l2cap_chan_del(chan
, reason
);
574 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
575 conn
->hcon
->type
== ACL_LINK
) {
576 struct l2cap_conn_rsp rsp
;
579 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
580 result
= L2CAP_CR_SEC_BLOCK
;
582 result
= L2CAP_CR_BAD_PSM
;
583 l2cap_state_change(chan
, BT_DISCONN
);
585 rsp
.scid
= cpu_to_le16(chan
->dcid
);
586 rsp
.dcid
= cpu_to_le16(chan
->scid
);
587 rsp
.result
= cpu_to_le16(result
);
588 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
589 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
593 l2cap_chan_del(chan
, reason
);
598 l2cap_chan_del(chan
, reason
);
602 if (chan
->ops
->teardown
)
603 chan
->ops
->teardown(chan
, 0);
608 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
610 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
611 switch (chan
->sec_level
) {
612 case BT_SECURITY_HIGH
:
613 return HCI_AT_DEDICATED_BONDING_MITM
;
614 case BT_SECURITY_MEDIUM
:
615 return HCI_AT_DEDICATED_BONDING
;
617 return HCI_AT_NO_BONDING
;
619 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
620 if (chan
->sec_level
== BT_SECURITY_LOW
)
621 chan
->sec_level
= BT_SECURITY_SDP
;
623 if (chan
->sec_level
== BT_SECURITY_HIGH
)
624 return HCI_AT_NO_BONDING_MITM
;
626 return HCI_AT_NO_BONDING
;
628 switch (chan
->sec_level
) {
629 case BT_SECURITY_HIGH
:
630 return HCI_AT_GENERAL_BONDING_MITM
;
631 case BT_SECURITY_MEDIUM
:
632 return HCI_AT_GENERAL_BONDING
;
634 return HCI_AT_NO_BONDING
;
639 /* Service level security */
640 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
642 struct l2cap_conn
*conn
= chan
->conn
;
645 auth_type
= l2cap_get_auth_type(chan
);
647 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
650 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
654 /* Get next available identificator.
655 * 1 - 128 are used by kernel.
656 * 129 - 199 are reserved.
657 * 200 - 254 are used by utilities like l2ping, etc.
660 spin_lock(&conn
->lock
);
662 if (++conn
->tx_ident
> 128)
667 spin_unlock(&conn
->lock
);
672 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
674 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
677 BT_DBG("code 0x%2.2x", code
);
682 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
683 flags
= ACL_START_NO_FLUSH
;
687 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
688 skb
->priority
= HCI_PRIO_MAX
;
690 hci_send_acl(conn
->hchan
, skb
, flags
);
693 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
695 struct hci_conn
*hcon
= chan
->conn
->hcon
;
698 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
701 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
702 lmp_no_flush_capable(hcon
->hdev
))
703 flags
= ACL_START_NO_FLUSH
;
707 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
708 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
711 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
713 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
714 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
716 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
719 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
720 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
727 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
728 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
735 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
737 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
738 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
740 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
743 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
744 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
751 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
752 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
759 static inline void __unpack_control(struct l2cap_chan
*chan
,
762 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
763 __unpack_extended_control(get_unaligned_le32(skb
->data
),
764 &bt_cb(skb
)->control
);
765 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
767 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
768 &bt_cb(skb
)->control
);
769 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
773 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
777 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
778 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
780 if (control
->sframe
) {
781 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
782 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
783 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
785 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
786 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
792 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
796 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
797 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
799 if (control
->sframe
) {
800 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
801 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
802 packed
|= L2CAP_CTRL_FRAME_TYPE
;
804 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
805 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
811 static inline void __pack_control(struct l2cap_chan
*chan
,
812 struct l2cap_ctrl
*control
,
815 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
816 put_unaligned_le32(__pack_extended_control(control
),
817 skb
->data
+ L2CAP_HDR_SIZE
);
819 put_unaligned_le16(__pack_enhanced_control(control
),
820 skb
->data
+ L2CAP_HDR_SIZE
);
824 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
828 struct l2cap_hdr
*lh
;
831 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
832 hlen
= L2CAP_EXT_HDR_SIZE
;
834 hlen
= L2CAP_ENH_HDR_SIZE
;
836 if (chan
->fcs
== L2CAP_FCS_CRC16
)
837 hlen
+= L2CAP_FCS_SIZE
;
839 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
842 return ERR_PTR(-ENOMEM
);
844 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
845 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
846 lh
->cid
= cpu_to_le16(chan
->dcid
);
848 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
849 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
851 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
853 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
854 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
855 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
858 skb
->priority
= HCI_PRIO_MAX
;
862 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
863 struct l2cap_ctrl
*control
)
868 BT_DBG("chan %p, control %p", chan
, control
);
870 if (!control
->sframe
)
873 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
877 if (control
->super
== L2CAP_SUPER_RR
)
878 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
879 else if (control
->super
== L2CAP_SUPER_RNR
)
880 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
882 if (control
->super
!= L2CAP_SUPER_SREJ
) {
883 chan
->last_acked_seq
= control
->reqseq
;
884 __clear_ack_timer(chan
);
887 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
888 control
->final
, control
->poll
, control
->super
);
890 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
891 control_field
= __pack_extended_control(control
);
893 control_field
= __pack_enhanced_control(control
);
895 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
897 l2cap_do_send(chan
, skb
);
900 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
902 struct l2cap_ctrl control
;
904 BT_DBG("chan %p, poll %d", chan
, poll
);
906 memset(&control
, 0, sizeof(control
));
910 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
911 control
.super
= L2CAP_SUPER_RNR
;
913 control
.super
= L2CAP_SUPER_RR
;
915 control
.reqseq
= chan
->buffer_seq
;
916 l2cap_send_sframe(chan
, &control
);
919 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
921 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
924 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
926 struct l2cap_conn
*conn
= chan
->conn
;
927 struct l2cap_conn_req req
;
929 req
.scid
= cpu_to_le16(chan
->scid
);
932 chan
->ident
= l2cap_get_ident(conn
);
934 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
936 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
939 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
941 /* This clears all conf flags, including CONF_NOT_COMPLETE */
942 chan
->conf_state
= 0;
943 __clear_chan_timer(chan
);
945 chan
->state
= BT_CONNECTED
;
947 if (chan
->ops
->ready
)
948 chan
->ops
->ready(chan
);
951 static void l2cap_do_start(struct l2cap_chan
*chan
)
953 struct l2cap_conn
*conn
= chan
->conn
;
955 if (conn
->hcon
->type
== LE_LINK
) {
956 l2cap_chan_ready(chan
);
960 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
961 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
964 if (l2cap_chan_check_security(chan
) &&
965 __l2cap_no_conn_pending(chan
))
966 l2cap_send_conn_req(chan
);
968 struct l2cap_info_req req
;
969 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
971 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
972 conn
->info_ident
= l2cap_get_ident(conn
);
974 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
976 l2cap_send_cmd(conn
, conn
->info_ident
,
977 L2CAP_INFO_REQ
, sizeof(req
), &req
);
981 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
983 u32 local_feat_mask
= l2cap_feat_mask
;
985 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
988 case L2CAP_MODE_ERTM
:
989 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
990 case L2CAP_MODE_STREAMING
:
991 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
997 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
999 struct sock
*sk
= chan
->sk
;
1000 struct l2cap_disconn_req req
;
1005 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1006 __clear_retrans_timer(chan
);
1007 __clear_monitor_timer(chan
);
1008 __clear_ack_timer(chan
);
1011 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1012 __l2cap_state_change(chan
, BT_DISCONN
);
1016 req
.dcid
= cpu_to_le16(chan
->dcid
);
1017 req
.scid
= cpu_to_le16(chan
->scid
);
1018 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1019 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1022 __l2cap_state_change(chan
, BT_DISCONN
);
1023 __l2cap_chan_set_err(chan
, err
);
1027 /* ---- L2CAP connections ---- */
1028 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1030 struct l2cap_chan
*chan
, *tmp
;
1032 BT_DBG("conn %p", conn
);
1034 mutex_lock(&conn
->chan_lock
);
1036 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1037 struct sock
*sk
= chan
->sk
;
1039 l2cap_chan_lock(chan
);
1041 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1042 l2cap_chan_unlock(chan
);
1046 if (chan
->state
== BT_CONNECT
) {
1047 if (!l2cap_chan_check_security(chan
) ||
1048 !__l2cap_no_conn_pending(chan
)) {
1049 l2cap_chan_unlock(chan
);
1053 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1054 && test_bit(CONF_STATE2_DEVICE
,
1055 &chan
->conf_state
)) {
1056 l2cap_chan_close(chan
, ECONNRESET
);
1057 l2cap_chan_unlock(chan
);
1061 l2cap_send_conn_req(chan
);
1063 } else if (chan
->state
== BT_CONNECT2
) {
1064 struct l2cap_conn_rsp rsp
;
1066 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1067 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1069 if (l2cap_chan_check_security(chan
)) {
1071 if (test_bit(BT_SK_DEFER_SETUP
,
1072 &bt_sk(sk
)->flags
)) {
1073 struct sock
*parent
= bt_sk(sk
)->parent
;
1074 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1075 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1077 parent
->sk_data_ready(parent
, 0);
1080 __l2cap_state_change(chan
, BT_CONFIG
);
1081 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1082 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1086 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1087 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1090 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1093 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1094 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1095 l2cap_chan_unlock(chan
);
1099 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1100 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1101 l2cap_build_conf_req(chan
, buf
), buf
);
1102 chan
->num_conf_req
++;
1105 l2cap_chan_unlock(chan
);
1108 mutex_unlock(&conn
->chan_lock
);
1111 /* Find socket with cid and source/destination bdaddr.
1112 * Returns closest match, locked.
1114 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1118 struct l2cap_chan
*c
, *c1
= NULL
;
1120 read_lock(&chan_list_lock
);
1122 list_for_each_entry(c
, &chan_list
, global_l
) {
1123 struct sock
*sk
= c
->sk
;
1125 if (state
&& c
->state
!= state
)
1128 if (c
->scid
== cid
) {
1129 int src_match
, dst_match
;
1130 int src_any
, dst_any
;
1133 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1134 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1135 if (src_match
&& dst_match
) {
1136 read_unlock(&chan_list_lock
);
1141 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1142 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1143 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1144 (src_any
&& dst_any
))
1149 read_unlock(&chan_list_lock
);
1154 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1156 struct sock
*parent
, *sk
;
1157 struct l2cap_chan
*chan
, *pchan
;
1161 /* Check if we have socket listening on cid */
1162 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1163 conn
->src
, conn
->dst
);
1171 chan
= pchan
->ops
->new_connection(pchan
);
1177 hci_conn_hold(conn
->hcon
);
1179 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1180 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1182 bt_accept_enqueue(parent
, sk
);
1184 l2cap_chan_add(conn
, chan
);
1186 l2cap_chan_ready(chan
);
1189 release_sock(parent
);
1192 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1194 struct l2cap_chan
*chan
;
1196 BT_DBG("conn %p", conn
);
1198 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1199 l2cap_le_conn_ready(conn
);
1201 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1202 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1204 mutex_lock(&conn
->chan_lock
);
1206 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1208 l2cap_chan_lock(chan
);
1210 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1211 l2cap_chan_unlock(chan
);
1215 if (conn
->hcon
->type
== LE_LINK
) {
1216 if (smp_conn_security(conn
, chan
->sec_level
))
1217 l2cap_chan_ready(chan
);
1219 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1220 struct sock
*sk
= chan
->sk
;
1221 __clear_chan_timer(chan
);
1223 __l2cap_state_change(chan
, BT_CONNECTED
);
1224 sk
->sk_state_change(sk
);
1227 } else if (chan
->state
== BT_CONNECT
)
1228 l2cap_do_start(chan
);
1230 l2cap_chan_unlock(chan
);
1233 mutex_unlock(&conn
->chan_lock
);
1236 /* Notify sockets that we cannot guaranty reliability anymore */
1237 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1239 struct l2cap_chan
*chan
;
1241 BT_DBG("conn %p", conn
);
1243 mutex_lock(&conn
->chan_lock
);
1245 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1246 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1247 __l2cap_chan_set_err(chan
, err
);
1250 mutex_unlock(&conn
->chan_lock
);
1253 static void l2cap_info_timeout(struct work_struct
*work
)
1255 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1258 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1259 conn
->info_ident
= 0;
1261 l2cap_conn_start(conn
);
1264 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1266 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1267 struct l2cap_chan
*chan
, *l
;
1272 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1274 kfree_skb(conn
->rx_skb
);
1276 mutex_lock(&conn
->chan_lock
);
1279 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1280 l2cap_chan_hold(chan
);
1281 l2cap_chan_lock(chan
);
1283 l2cap_chan_del(chan
, err
);
1285 l2cap_chan_unlock(chan
);
1287 chan
->ops
->close(chan
);
1288 l2cap_chan_put(chan
);
1291 mutex_unlock(&conn
->chan_lock
);
1293 hci_chan_del(conn
->hchan
);
1295 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1296 cancel_delayed_work_sync(&conn
->info_timer
);
1298 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1299 cancel_delayed_work_sync(&conn
->security_timer
);
1300 smp_chan_destroy(conn
);
1303 hcon
->l2cap_data
= NULL
;
1307 static void security_timeout(struct work_struct
*work
)
1309 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1310 security_timer
.work
);
1312 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1315 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1317 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1318 struct hci_chan
*hchan
;
1323 hchan
= hci_chan_create(hcon
);
1327 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1329 hci_chan_del(hchan
);
1333 hcon
->l2cap_data
= conn
;
1335 conn
->hchan
= hchan
;
1337 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1339 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1340 conn
->mtu
= hcon
->hdev
->le_mtu
;
1342 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1344 conn
->src
= &hcon
->hdev
->bdaddr
;
1345 conn
->dst
= &hcon
->dst
;
1347 conn
->feat_mask
= 0;
1349 spin_lock_init(&conn
->lock
);
1350 mutex_init(&conn
->chan_lock
);
1352 INIT_LIST_HEAD(&conn
->chan_l
);
1354 if (hcon
->type
== LE_LINK
)
1355 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1357 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1359 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1364 /* ---- Socket interface ---- */
1366 /* Find socket with psm and source / destination bdaddr.
1367 * Returns closest match.
1369 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1373 struct l2cap_chan
*c
, *c1
= NULL
;
1375 read_lock(&chan_list_lock
);
1377 list_for_each_entry(c
, &chan_list
, global_l
) {
1378 struct sock
*sk
= c
->sk
;
1380 if (state
&& c
->state
!= state
)
1383 if (c
->psm
== psm
) {
1384 int src_match
, dst_match
;
1385 int src_any
, dst_any
;
1388 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1389 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1390 if (src_match
&& dst_match
) {
1391 read_unlock(&chan_list_lock
);
1396 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1397 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1398 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1399 (src_any
&& dst_any
))
1404 read_unlock(&chan_list_lock
);
1409 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1410 bdaddr_t
*dst
, u8 dst_type
)
1412 struct sock
*sk
= chan
->sk
;
1413 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1414 struct l2cap_conn
*conn
;
1415 struct hci_conn
*hcon
;
1416 struct hci_dev
*hdev
;
1420 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1421 dst_type
, __le16_to_cpu(chan
->psm
));
1423 hdev
= hci_get_route(dst
, src
);
1425 return -EHOSTUNREACH
;
1429 l2cap_chan_lock(chan
);
1431 /* PSM must be odd and lsb of upper byte must be 0 */
1432 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1433 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1438 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1443 switch (chan
->mode
) {
1444 case L2CAP_MODE_BASIC
:
1446 case L2CAP_MODE_ERTM
:
1447 case L2CAP_MODE_STREAMING
:
1456 switch (chan
->state
) {
1460 /* Already connecting */
1465 /* Already connected */
1479 /* Set destination address and psm */
1481 bacpy(&bt_sk(sk
)->dst
, dst
);
1487 auth_type
= l2cap_get_auth_type(chan
);
1489 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1490 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1491 chan
->sec_level
, auth_type
);
1493 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1494 chan
->sec_level
, auth_type
);
1497 err
= PTR_ERR(hcon
);
1501 conn
= l2cap_conn_add(hcon
, 0);
1508 if (hcon
->type
== LE_LINK
) {
1511 if (!list_empty(&conn
->chan_l
)) {
1520 /* Update source addr of the socket */
1521 bacpy(src
, conn
->src
);
1523 l2cap_chan_unlock(chan
);
1524 l2cap_chan_add(conn
, chan
);
1525 l2cap_chan_lock(chan
);
1527 l2cap_state_change(chan
, BT_CONNECT
);
1528 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1530 if (hcon
->state
== BT_CONNECTED
) {
1531 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1532 __clear_chan_timer(chan
);
1533 if (l2cap_chan_check_security(chan
))
1534 l2cap_state_change(chan
, BT_CONNECTED
);
1536 l2cap_do_start(chan
);
1542 l2cap_chan_unlock(chan
);
1543 hci_dev_unlock(hdev
);
1548 int __l2cap_wait_ack(struct sock
*sk
)
1550 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1551 DECLARE_WAITQUEUE(wait
, current
);
1555 add_wait_queue(sk_sleep(sk
), &wait
);
1556 set_current_state(TASK_INTERRUPTIBLE
);
1557 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1561 if (signal_pending(current
)) {
1562 err
= sock_intr_errno(timeo
);
1567 timeo
= schedule_timeout(timeo
);
1569 set_current_state(TASK_INTERRUPTIBLE
);
1571 err
= sock_error(sk
);
1575 set_current_state(TASK_RUNNING
);
1576 remove_wait_queue(sk_sleep(sk
), &wait
);
1580 static void l2cap_monitor_timeout(struct work_struct
*work
)
1582 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1583 monitor_timer
.work
);
1585 BT_DBG("chan %p", chan
);
1587 l2cap_chan_lock(chan
);
1590 l2cap_chan_unlock(chan
);
1591 l2cap_chan_put(chan
);
1595 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1597 l2cap_chan_unlock(chan
);
1598 l2cap_chan_put(chan
);
1601 static void l2cap_retrans_timeout(struct work_struct
*work
)
1603 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1604 retrans_timer
.work
);
1606 BT_DBG("chan %p", chan
);
1608 l2cap_chan_lock(chan
);
1611 l2cap_chan_unlock(chan
);
1612 l2cap_chan_put(chan
);
1616 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1617 l2cap_chan_unlock(chan
);
1618 l2cap_chan_put(chan
);
1621 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1622 struct sk_buff_head
*skbs
)
1624 struct sk_buff
*skb
;
1625 struct l2cap_ctrl
*control
;
1627 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1629 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1631 while (!skb_queue_empty(&chan
->tx_q
)) {
1633 skb
= skb_dequeue(&chan
->tx_q
);
1635 bt_cb(skb
)->control
.retries
= 1;
1636 control
= &bt_cb(skb
)->control
;
1638 control
->reqseq
= 0;
1639 control
->txseq
= chan
->next_tx_seq
;
1641 __pack_control(chan
, control
, skb
);
1643 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1644 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1645 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1648 l2cap_do_send(chan
, skb
);
1650 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1652 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1653 chan
->frames_sent
++;
1657 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1659 struct sk_buff
*skb
, *tx_skb
;
1660 struct l2cap_ctrl
*control
;
1663 BT_DBG("chan %p", chan
);
1665 if (chan
->state
!= BT_CONNECTED
)
1668 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1671 while (chan
->tx_send_head
&&
1672 chan
->unacked_frames
< chan
->remote_tx_win
&&
1673 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1675 skb
= chan
->tx_send_head
;
1677 bt_cb(skb
)->control
.retries
= 1;
1678 control
= &bt_cb(skb
)->control
;
1680 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1683 control
->reqseq
= chan
->buffer_seq
;
1684 chan
->last_acked_seq
= chan
->buffer_seq
;
1685 control
->txseq
= chan
->next_tx_seq
;
1687 __pack_control(chan
, control
, skb
);
1689 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1690 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1691 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1694 /* Clone after data has been modified. Data is assumed to be
1695 read-only (for locking purposes) on cloned sk_buffs.
1697 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1702 __set_retrans_timer(chan
);
1704 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1705 chan
->unacked_frames
++;
1706 chan
->frames_sent
++;
1709 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1710 chan
->tx_send_head
= NULL
;
1712 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1714 l2cap_do_send(chan
, tx_skb
);
1715 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1718 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent
,
1719 (int) chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1724 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1726 struct l2cap_ctrl control
;
1727 struct sk_buff
*skb
;
1728 struct sk_buff
*tx_skb
;
1731 BT_DBG("chan %p", chan
);
1733 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1736 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1737 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1739 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1741 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1746 bt_cb(skb
)->control
.retries
++;
1747 control
= bt_cb(skb
)->control
;
1749 if (chan
->max_tx
!= 0 &&
1750 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1751 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1752 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1753 l2cap_seq_list_clear(&chan
->retrans_list
);
1757 control
.reqseq
= chan
->buffer_seq
;
1758 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1763 if (skb_cloned(skb
)) {
1764 /* Cloned sk_buffs are read-only, so we need a
1767 tx_skb
= skb_copy(skb
, GFP_ATOMIC
);
1769 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1773 l2cap_seq_list_clear(&chan
->retrans_list
);
1777 /* Update skb contents */
1778 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1779 put_unaligned_le32(__pack_extended_control(&control
),
1780 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1782 put_unaligned_le16(__pack_enhanced_control(&control
),
1783 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1786 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1787 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1788 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1792 l2cap_do_send(chan
, tx_skb
);
1794 BT_DBG("Resent txseq %d", control
.txseq
);
1796 chan
->last_acked_seq
= chan
->buffer_seq
;
1800 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1801 struct l2cap_ctrl
*control
)
1803 BT_DBG("chan %p, control %p", chan
, control
);
1805 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1806 l2cap_ertm_resend(chan
);
1809 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1810 struct l2cap_ctrl
*control
)
1812 struct sk_buff
*skb
;
1814 BT_DBG("chan %p, control %p", chan
, control
);
1817 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1819 l2cap_seq_list_clear(&chan
->retrans_list
);
1821 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1824 if (chan
->unacked_frames
) {
1825 skb_queue_walk(&chan
->tx_q
, skb
) {
1826 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1827 skb
== chan
->tx_send_head
)
1831 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1832 if (skb
== chan
->tx_send_head
)
1835 l2cap_seq_list_append(&chan
->retrans_list
,
1836 bt_cb(skb
)->control
.txseq
);
1839 l2cap_ertm_resend(chan
);
1843 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1845 struct l2cap_ctrl control
;
1846 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1847 chan
->last_acked_seq
);
1850 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1851 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1853 memset(&control
, 0, sizeof(control
));
1856 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1857 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1858 __clear_ack_timer(chan
);
1859 control
.super
= L2CAP_SUPER_RNR
;
1860 control
.reqseq
= chan
->buffer_seq
;
1861 l2cap_send_sframe(chan
, &control
);
1863 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1864 l2cap_ertm_send(chan
);
1865 /* If any i-frames were sent, they included an ack */
1866 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1870 /* Ack now if the tx window is 3/4ths full.
1871 * Calculate without mul or div
1873 threshold
= chan
->tx_win
;
1874 threshold
+= threshold
<< 1;
1877 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack
,
1880 if (frames_to_ack
>= threshold
) {
1881 __clear_ack_timer(chan
);
1882 control
.super
= L2CAP_SUPER_RR
;
1883 control
.reqseq
= chan
->buffer_seq
;
1884 l2cap_send_sframe(chan
, &control
);
1889 __set_ack_timer(chan
);
1893 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1894 struct msghdr
*msg
, int len
,
1895 int count
, struct sk_buff
*skb
)
1897 struct l2cap_conn
*conn
= chan
->conn
;
1898 struct sk_buff
**frag
;
1901 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1907 /* Continuation fragments (no L2CAP header) */
1908 frag
= &skb_shinfo(skb
)->frag_list
;
1910 struct sk_buff
*tmp
;
1912 count
= min_t(unsigned int, conn
->mtu
, len
);
1914 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1915 msg
->msg_flags
& MSG_DONTWAIT
);
1917 return PTR_ERR(tmp
);
1921 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1924 (*frag
)->priority
= skb
->priority
;
1929 skb
->len
+= (*frag
)->len
;
1930 skb
->data_len
+= (*frag
)->len
;
1932 frag
= &(*frag
)->next
;
1938 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1939 struct msghdr
*msg
, size_t len
,
1942 struct l2cap_conn
*conn
= chan
->conn
;
1943 struct sk_buff
*skb
;
1944 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1945 struct l2cap_hdr
*lh
;
1947 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1949 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1951 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1952 msg
->msg_flags
& MSG_DONTWAIT
);
1956 skb
->priority
= priority
;
1958 /* Create L2CAP header */
1959 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1960 lh
->cid
= cpu_to_le16(chan
->dcid
);
1961 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
1962 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
1964 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1965 if (unlikely(err
< 0)) {
1967 return ERR_PTR(err
);
1972 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1973 struct msghdr
*msg
, size_t len
,
1976 struct l2cap_conn
*conn
= chan
->conn
;
1977 struct sk_buff
*skb
;
1979 struct l2cap_hdr
*lh
;
1981 BT_DBG("chan %p len %d", chan
, (int)len
);
1983 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
1985 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
1986 msg
->msg_flags
& MSG_DONTWAIT
);
1990 skb
->priority
= priority
;
1992 /* Create L2CAP header */
1993 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1994 lh
->cid
= cpu_to_le16(chan
->dcid
);
1995 lh
->len
= cpu_to_le16(len
);
1997 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1998 if (unlikely(err
< 0)) {
2000 return ERR_PTR(err
);
2005 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2006 struct msghdr
*msg
, size_t len
,
2009 struct l2cap_conn
*conn
= chan
->conn
;
2010 struct sk_buff
*skb
;
2011 int err
, count
, hlen
;
2012 struct l2cap_hdr
*lh
;
2014 BT_DBG("chan %p len %d", chan
, (int)len
);
2017 return ERR_PTR(-ENOTCONN
);
2019 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2020 hlen
= L2CAP_EXT_HDR_SIZE
;
2022 hlen
= L2CAP_ENH_HDR_SIZE
;
2025 hlen
+= L2CAP_SDULEN_SIZE
;
2027 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2028 hlen
+= L2CAP_FCS_SIZE
;
2030 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2032 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2033 msg
->msg_flags
& MSG_DONTWAIT
);
2037 /* Create L2CAP header */
2038 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2039 lh
->cid
= cpu_to_le16(chan
->dcid
);
2040 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2042 /* Control header is populated later */
2043 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2044 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2046 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2049 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2051 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2052 if (unlikely(err
< 0)) {
2054 return ERR_PTR(err
);
2057 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2058 bt_cb(skb
)->control
.retries
= 0;
2062 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2063 struct sk_buff_head
*seg_queue
,
2064 struct msghdr
*msg
, size_t len
)
2066 struct sk_buff
*skb
;
2072 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2074 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2075 * so fragmented skbs are not used. The HCI layer's handling
2076 * of fragmented skbs is not compatible with ERTM's queueing.
2079 /* PDU size is derived from the HCI MTU */
2080 pdu_len
= chan
->conn
->mtu
;
2082 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2084 /* Adjust for largest possible L2CAP overhead. */
2086 pdu_len
-= L2CAP_FCS_SIZE
;
2088 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2089 pdu_len
-= L2CAP_EXT_HDR_SIZE
;
2091 pdu_len
-= L2CAP_ENH_HDR_SIZE
;
2093 /* Remote device may have requested smaller PDUs */
2094 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2096 if (len
<= pdu_len
) {
2097 sar
= L2CAP_SAR_UNSEGMENTED
;
2101 sar
= L2CAP_SAR_START
;
2103 pdu_len
-= L2CAP_SDULEN_SIZE
;
2107 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2110 __skb_queue_purge(seg_queue
);
2111 return PTR_ERR(skb
);
2114 bt_cb(skb
)->control
.sar
= sar
;
2115 __skb_queue_tail(seg_queue
, skb
);
2120 pdu_len
+= L2CAP_SDULEN_SIZE
;
2123 if (len
<= pdu_len
) {
2124 sar
= L2CAP_SAR_END
;
2127 sar
= L2CAP_SAR_CONTINUE
;
2134 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2137 struct sk_buff
*skb
;
2139 struct sk_buff_head seg_queue
;
2141 /* Connectionless channel */
2142 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2143 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2145 return PTR_ERR(skb
);
2147 l2cap_do_send(chan
, skb
);
2151 switch (chan
->mode
) {
2152 case L2CAP_MODE_BASIC
:
2153 /* Check outgoing MTU */
2154 if (len
> chan
->omtu
)
2157 /* Create a basic PDU */
2158 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2160 return PTR_ERR(skb
);
2162 l2cap_do_send(chan
, skb
);
2166 case L2CAP_MODE_ERTM
:
2167 case L2CAP_MODE_STREAMING
:
2168 /* Check outgoing MTU */
2169 if (len
> chan
->omtu
) {
2174 __skb_queue_head_init(&seg_queue
);
2176 /* Do segmentation before calling in to the state machine,
2177 * since it's possible to block while waiting for memory
2180 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2182 /* The channel could have been closed while segmenting,
2183 * check that it is still connected.
2185 if (chan
->state
!= BT_CONNECTED
) {
2186 __skb_queue_purge(&seg_queue
);
2193 if (chan
->mode
== L2CAP_MODE_ERTM
)
2194 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2196 l2cap_streaming_send(chan
, &seg_queue
);
2200 /* If the skbs were not queued for sending, they'll still be in
2201 * seg_queue and need to be purged.
2203 __skb_queue_purge(&seg_queue
);
2207 BT_DBG("bad state %1.1x", chan
->mode
);
2214 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2216 struct l2cap_ctrl control
;
2219 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2221 memset(&control
, 0, sizeof(control
));
2223 control
.super
= L2CAP_SUPER_SREJ
;
2225 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2226 seq
= __next_seq(chan
, seq
)) {
2227 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2228 control
.reqseq
= seq
;
2229 l2cap_send_sframe(chan
, &control
);
2230 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2234 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2237 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2239 struct l2cap_ctrl control
;
2241 BT_DBG("chan %p", chan
);
2243 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2246 memset(&control
, 0, sizeof(control
));
2248 control
.super
= L2CAP_SUPER_SREJ
;
2249 control
.reqseq
= chan
->srej_list
.tail
;
2250 l2cap_send_sframe(chan
, &control
);
2253 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2255 struct l2cap_ctrl control
;
2259 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2261 memset(&control
, 0, sizeof(control
));
2263 control
.super
= L2CAP_SUPER_SREJ
;
2265 /* Capture initial list head to allow only one pass through the list. */
2266 initial_head
= chan
->srej_list
.head
;
2269 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2270 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2273 control
.reqseq
= seq
;
2274 l2cap_send_sframe(chan
, &control
);
2275 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2276 } while (chan
->srej_list
.head
!= initial_head
);
2279 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2281 struct sk_buff
*acked_skb
;
2284 BT_DBG("chan %p, reqseq %d", chan
, reqseq
);
2286 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2289 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2290 chan
->expected_ack_seq
, chan
->unacked_frames
);
2292 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2293 ackseq
= __next_seq(chan
, ackseq
)) {
2295 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2297 skb_unlink(acked_skb
, &chan
->tx_q
);
2298 kfree_skb(acked_skb
);
2299 chan
->unacked_frames
--;
2303 chan
->expected_ack_seq
= reqseq
;
2305 if (chan
->unacked_frames
== 0)
2306 __clear_retrans_timer(chan
);
2308 BT_DBG("unacked_frames %d", (int) chan
->unacked_frames
);
2311 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2313 BT_DBG("chan %p", chan
);
2315 chan
->expected_tx_seq
= chan
->buffer_seq
;
2316 l2cap_seq_list_clear(&chan
->srej_list
);
2317 skb_queue_purge(&chan
->srej_q
);
2318 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2321 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2322 struct l2cap_ctrl
*control
,
2323 struct sk_buff_head
*skbs
, u8 event
)
2325 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2329 case L2CAP_EV_DATA_REQUEST
:
2330 if (chan
->tx_send_head
== NULL
)
2331 chan
->tx_send_head
= skb_peek(skbs
);
2333 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2334 l2cap_ertm_send(chan
);
2336 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2337 BT_DBG("Enter LOCAL_BUSY");
2338 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2340 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2341 /* The SREJ_SENT state must be aborted if we are to
2342 * enter the LOCAL_BUSY state.
2344 l2cap_abort_rx_srej_sent(chan
);
2347 l2cap_send_ack(chan
);
2350 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2351 BT_DBG("Exit LOCAL_BUSY");
2352 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2354 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2355 struct l2cap_ctrl local_control
;
2357 memset(&local_control
, 0, sizeof(local_control
));
2358 local_control
.sframe
= 1;
2359 local_control
.super
= L2CAP_SUPER_RR
;
2360 local_control
.poll
= 1;
2361 local_control
.reqseq
= chan
->buffer_seq
;
2362 l2cap_send_sframe(chan
, &local_control
);
2364 chan
->retry_count
= 1;
2365 __set_monitor_timer(chan
);
2366 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2369 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2370 l2cap_process_reqseq(chan
, control
->reqseq
);
2372 case L2CAP_EV_EXPLICIT_POLL
:
2373 l2cap_send_rr_or_rnr(chan
, 1);
2374 chan
->retry_count
= 1;
2375 __set_monitor_timer(chan
);
2376 __clear_ack_timer(chan
);
2377 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2379 case L2CAP_EV_RETRANS_TO
:
2380 l2cap_send_rr_or_rnr(chan
, 1);
2381 chan
->retry_count
= 1;
2382 __set_monitor_timer(chan
);
2383 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2385 case L2CAP_EV_RECV_FBIT
:
2386 /* Nothing to process */
2393 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2394 struct l2cap_ctrl
*control
,
2395 struct sk_buff_head
*skbs
, u8 event
)
2397 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2401 case L2CAP_EV_DATA_REQUEST
:
2402 if (chan
->tx_send_head
== NULL
)
2403 chan
->tx_send_head
= skb_peek(skbs
);
2404 /* Queue data, but don't send. */
2405 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2407 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2408 BT_DBG("Enter LOCAL_BUSY");
2409 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2411 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2412 /* The SREJ_SENT state must be aborted if we are to
2413 * enter the LOCAL_BUSY state.
2415 l2cap_abort_rx_srej_sent(chan
);
2418 l2cap_send_ack(chan
);
2421 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2422 BT_DBG("Exit LOCAL_BUSY");
2423 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2425 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2426 struct l2cap_ctrl local_control
;
2427 memset(&local_control
, 0, sizeof(local_control
));
2428 local_control
.sframe
= 1;
2429 local_control
.super
= L2CAP_SUPER_RR
;
2430 local_control
.poll
= 1;
2431 local_control
.reqseq
= chan
->buffer_seq
;
2432 l2cap_send_sframe(chan
, &local_control
);
2434 chan
->retry_count
= 1;
2435 __set_monitor_timer(chan
);
2436 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2439 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2440 l2cap_process_reqseq(chan
, control
->reqseq
);
2444 case L2CAP_EV_RECV_FBIT
:
2445 if (control
&& control
->final
) {
2446 __clear_monitor_timer(chan
);
2447 if (chan
->unacked_frames
> 0)
2448 __set_retrans_timer(chan
);
2449 chan
->retry_count
= 0;
2450 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2451 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2454 case L2CAP_EV_EXPLICIT_POLL
:
2457 case L2CAP_EV_MONITOR_TO
:
2458 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2459 l2cap_send_rr_or_rnr(chan
, 1);
2460 __set_monitor_timer(chan
);
2461 chan
->retry_count
++;
2463 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2471 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2472 struct sk_buff_head
*skbs
, u8 event
)
2474 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2475 chan
, control
, skbs
, event
, chan
->tx_state
);
2477 switch (chan
->tx_state
) {
2478 case L2CAP_TX_STATE_XMIT
:
2479 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2481 case L2CAP_TX_STATE_WAIT_F
:
2482 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2490 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2491 struct l2cap_ctrl
*control
)
2493 BT_DBG("chan %p, control %p", chan
, control
);
2494 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2497 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2498 struct l2cap_ctrl
*control
)
2500 BT_DBG("chan %p, control %p", chan
, control
);
2501 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2504 /* Copy frame to all raw sockets on that connection */
2505 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2507 struct sk_buff
*nskb
;
2508 struct l2cap_chan
*chan
;
2510 BT_DBG("conn %p", conn
);
2512 mutex_lock(&conn
->chan_lock
);
2514 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2515 struct sock
*sk
= chan
->sk
;
2516 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2519 /* Don't send frame to the socket it came from */
2522 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2526 if (chan
->ops
->recv(chan
, nskb
))
2530 mutex_unlock(&conn
->chan_lock
);
2533 /* ---- L2CAP signalling commands ---- */
2534 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2535 u8 code
, u8 ident
, u16 dlen
, void *data
)
2537 struct sk_buff
*skb
, **frag
;
2538 struct l2cap_cmd_hdr
*cmd
;
2539 struct l2cap_hdr
*lh
;
2542 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2543 conn
, code
, ident
, dlen
);
2545 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2546 count
= min_t(unsigned int, conn
->mtu
, len
);
2548 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2552 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2553 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2555 if (conn
->hcon
->type
== LE_LINK
)
2556 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2558 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2560 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2563 cmd
->len
= cpu_to_le16(dlen
);
2566 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2567 memcpy(skb_put(skb
, count
), data
, count
);
2573 /* Continuation fragments (no L2CAP header) */
2574 frag
= &skb_shinfo(skb
)->frag_list
;
2576 count
= min_t(unsigned int, conn
->mtu
, len
);
2578 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2582 memcpy(skb_put(*frag
, count
), data
, count
);
2587 frag
= &(*frag
)->next
;
2597 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2599 struct l2cap_conf_opt
*opt
= *ptr
;
2602 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2610 *val
= *((u8
*) opt
->val
);
2614 *val
= get_unaligned_le16(opt
->val
);
2618 *val
= get_unaligned_le32(opt
->val
);
2622 *val
= (unsigned long) opt
->val
;
2626 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2630 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2632 struct l2cap_conf_opt
*opt
= *ptr
;
2634 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2641 *((u8
*) opt
->val
) = val
;
2645 put_unaligned_le16(val
, opt
->val
);
2649 put_unaligned_le32(val
, opt
->val
);
2653 memcpy(opt
->val
, (void *) val
, len
);
2657 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2660 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2662 struct l2cap_conf_efs efs
;
2664 switch (chan
->mode
) {
2665 case L2CAP_MODE_ERTM
:
2666 efs
.id
= chan
->local_id
;
2667 efs
.stype
= chan
->local_stype
;
2668 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2669 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2670 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2671 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2674 case L2CAP_MODE_STREAMING
:
2676 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2677 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2678 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2687 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2688 (unsigned long) &efs
);
2691 static void l2cap_ack_timeout(struct work_struct
*work
)
2693 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2697 BT_DBG("chan %p", chan
);
2699 l2cap_chan_lock(chan
);
2701 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2702 chan
->last_acked_seq
);
2705 l2cap_send_rr_or_rnr(chan
, 0);
2707 l2cap_chan_unlock(chan
);
2708 l2cap_chan_put(chan
);
2711 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2715 chan
->next_tx_seq
= 0;
2716 chan
->expected_tx_seq
= 0;
2717 chan
->expected_ack_seq
= 0;
2718 chan
->unacked_frames
= 0;
2719 chan
->buffer_seq
= 0;
2720 chan
->frames_sent
= 0;
2721 chan
->last_acked_seq
= 0;
2723 chan
->sdu_last_frag
= NULL
;
2726 skb_queue_head_init(&chan
->tx_q
);
2728 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2731 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2732 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2734 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2735 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2736 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2738 skb_queue_head_init(&chan
->srej_q
);
2740 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2744 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2746 l2cap_seq_list_free(&chan
->srej_list
);
2751 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2754 case L2CAP_MODE_STREAMING
:
2755 case L2CAP_MODE_ERTM
:
2756 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2760 return L2CAP_MODE_BASIC
;
2764 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2766 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2769 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2771 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2774 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2776 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2777 __l2cap_ews_supported(chan
)) {
2778 /* use extended control field */
2779 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2780 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2782 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2783 L2CAP_DEFAULT_TX_WINDOW
);
2784 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2788 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2790 struct l2cap_conf_req
*req
= data
;
2791 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2792 void *ptr
= req
->data
;
2795 BT_DBG("chan %p", chan
);
2797 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2800 switch (chan
->mode
) {
2801 case L2CAP_MODE_STREAMING
:
2802 case L2CAP_MODE_ERTM
:
2803 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2806 if (__l2cap_efs_supported(chan
))
2807 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2811 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2816 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2817 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2819 switch (chan
->mode
) {
2820 case L2CAP_MODE_BASIC
:
2821 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2822 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2825 rfc
.mode
= L2CAP_MODE_BASIC
;
2827 rfc
.max_transmit
= 0;
2828 rfc
.retrans_timeout
= 0;
2829 rfc
.monitor_timeout
= 0;
2830 rfc
.max_pdu_size
= 0;
2832 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2833 (unsigned long) &rfc
);
2836 case L2CAP_MODE_ERTM
:
2837 rfc
.mode
= L2CAP_MODE_ERTM
;
2838 rfc
.max_transmit
= chan
->max_tx
;
2839 rfc
.retrans_timeout
= 0;
2840 rfc
.monitor_timeout
= 0;
2842 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2843 L2CAP_EXT_HDR_SIZE
-
2846 rfc
.max_pdu_size
= cpu_to_le16(size
);
2848 l2cap_txwin_setup(chan
);
2850 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2851 L2CAP_DEFAULT_TX_WINDOW
);
2853 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2854 (unsigned long) &rfc
);
2856 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2857 l2cap_add_opt_efs(&ptr
, chan
);
2859 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2862 if (chan
->fcs
== L2CAP_FCS_NONE
||
2863 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2864 chan
->fcs
= L2CAP_FCS_NONE
;
2865 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2868 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2869 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2873 case L2CAP_MODE_STREAMING
:
2874 l2cap_txwin_setup(chan
);
2875 rfc
.mode
= L2CAP_MODE_STREAMING
;
2877 rfc
.max_transmit
= 0;
2878 rfc
.retrans_timeout
= 0;
2879 rfc
.monitor_timeout
= 0;
2881 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2882 L2CAP_EXT_HDR_SIZE
-
2885 rfc
.max_pdu_size
= cpu_to_le16(size
);
2887 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2888 (unsigned long) &rfc
);
2890 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2891 l2cap_add_opt_efs(&ptr
, chan
);
2893 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2896 if (chan
->fcs
== L2CAP_FCS_NONE
||
2897 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2898 chan
->fcs
= L2CAP_FCS_NONE
;
2899 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2904 req
->dcid
= cpu_to_le16(chan
->dcid
);
2905 req
->flags
= __constant_cpu_to_le16(0);
2910 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2912 struct l2cap_conf_rsp
*rsp
= data
;
2913 void *ptr
= rsp
->data
;
2914 void *req
= chan
->conf_req
;
2915 int len
= chan
->conf_len
;
2916 int type
, hint
, olen
;
2918 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2919 struct l2cap_conf_efs efs
;
2921 u16 mtu
= L2CAP_DEFAULT_MTU
;
2922 u16 result
= L2CAP_CONF_SUCCESS
;
2925 BT_DBG("chan %p", chan
);
2927 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2928 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2930 hint
= type
& L2CAP_CONF_HINT
;
2931 type
&= L2CAP_CONF_MASK
;
2934 case L2CAP_CONF_MTU
:
2938 case L2CAP_CONF_FLUSH_TO
:
2939 chan
->flush_to
= val
;
2942 case L2CAP_CONF_QOS
:
2945 case L2CAP_CONF_RFC
:
2946 if (olen
== sizeof(rfc
))
2947 memcpy(&rfc
, (void *) val
, olen
);
2950 case L2CAP_CONF_FCS
:
2951 if (val
== L2CAP_FCS_NONE
)
2952 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2955 case L2CAP_CONF_EFS
:
2957 if (olen
== sizeof(efs
))
2958 memcpy(&efs
, (void *) val
, olen
);
2961 case L2CAP_CONF_EWS
:
2963 return -ECONNREFUSED
;
2965 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2966 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2967 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2968 chan
->remote_tx_win
= val
;
2975 result
= L2CAP_CONF_UNKNOWN
;
2976 *((u8
*) ptr
++) = type
;
2981 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2984 switch (chan
->mode
) {
2985 case L2CAP_MODE_STREAMING
:
2986 case L2CAP_MODE_ERTM
:
2987 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2988 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2989 chan
->conn
->feat_mask
);
2994 if (__l2cap_efs_supported(chan
))
2995 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2997 return -ECONNREFUSED
;
3000 if (chan
->mode
!= rfc
.mode
)
3001 return -ECONNREFUSED
;
3007 if (chan
->mode
!= rfc
.mode
) {
3008 result
= L2CAP_CONF_UNACCEPT
;
3009 rfc
.mode
= chan
->mode
;
3011 if (chan
->num_conf_rsp
== 1)
3012 return -ECONNREFUSED
;
3014 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3015 sizeof(rfc
), (unsigned long) &rfc
);
3018 if (result
== L2CAP_CONF_SUCCESS
) {
3019 /* Configure output options and let the other side know
3020 * which ones we don't like. */
3022 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3023 result
= L2CAP_CONF_UNACCEPT
;
3026 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3028 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3031 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3032 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3033 efs
.stype
!= chan
->local_stype
) {
3035 result
= L2CAP_CONF_UNACCEPT
;
3037 if (chan
->num_conf_req
>= 1)
3038 return -ECONNREFUSED
;
3040 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3042 (unsigned long) &efs
);
3044 /* Send PENDING Conf Rsp */
3045 result
= L2CAP_CONF_PENDING
;
3046 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3051 case L2CAP_MODE_BASIC
:
3052 chan
->fcs
= L2CAP_FCS_NONE
;
3053 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3056 case L2CAP_MODE_ERTM
:
3057 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3058 chan
->remote_tx_win
= rfc
.txwin_size
;
3060 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3062 chan
->remote_max_tx
= rfc
.max_transmit
;
3064 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3066 L2CAP_EXT_HDR_SIZE
-
3069 rfc
.max_pdu_size
= cpu_to_le16(size
);
3070 chan
->remote_mps
= size
;
3072 rfc
.retrans_timeout
=
3073 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3074 rfc
.monitor_timeout
=
3075 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3077 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3079 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3080 sizeof(rfc
), (unsigned long) &rfc
);
3082 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3083 chan
->remote_id
= efs
.id
;
3084 chan
->remote_stype
= efs
.stype
;
3085 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3086 chan
->remote_flush_to
=
3087 le32_to_cpu(efs
.flush_to
);
3088 chan
->remote_acc_lat
=
3089 le32_to_cpu(efs
.acc_lat
);
3090 chan
->remote_sdu_itime
=
3091 le32_to_cpu(efs
.sdu_itime
);
3092 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3093 sizeof(efs
), (unsigned long) &efs
);
3097 case L2CAP_MODE_STREAMING
:
3098 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3100 L2CAP_EXT_HDR_SIZE
-
3103 rfc
.max_pdu_size
= cpu_to_le16(size
);
3104 chan
->remote_mps
= size
;
3106 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3108 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3109 sizeof(rfc
), (unsigned long) &rfc
);
3114 result
= L2CAP_CONF_UNACCEPT
;
3116 memset(&rfc
, 0, sizeof(rfc
));
3117 rfc
.mode
= chan
->mode
;
3120 if (result
== L2CAP_CONF_SUCCESS
)
3121 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3123 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3124 rsp
->result
= cpu_to_le16(result
);
3125 rsp
->flags
= __constant_cpu_to_le16(0);
3130 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3132 struct l2cap_conf_req
*req
= data
;
3133 void *ptr
= req
->data
;
3136 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3137 struct l2cap_conf_efs efs
;
3139 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3141 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3142 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3145 case L2CAP_CONF_MTU
:
3146 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3147 *result
= L2CAP_CONF_UNACCEPT
;
3148 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3151 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3154 case L2CAP_CONF_FLUSH_TO
:
3155 chan
->flush_to
= val
;
3156 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3160 case L2CAP_CONF_RFC
:
3161 if (olen
== sizeof(rfc
))
3162 memcpy(&rfc
, (void *)val
, olen
);
3164 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3165 rfc
.mode
!= chan
->mode
)
3166 return -ECONNREFUSED
;
3170 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3171 sizeof(rfc
), (unsigned long) &rfc
);
3174 case L2CAP_CONF_EWS
:
3175 chan
->tx_win
= min_t(u16
, val
,
3176 L2CAP_DEFAULT_EXT_WINDOW
);
3177 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3181 case L2CAP_CONF_EFS
:
3182 if (olen
== sizeof(efs
))
3183 memcpy(&efs
, (void *)val
, olen
);
3185 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3186 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3187 efs
.stype
!= chan
->local_stype
)
3188 return -ECONNREFUSED
;
3190 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3191 sizeof(efs
), (unsigned long) &efs
);
3196 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3197 return -ECONNREFUSED
;
3199 chan
->mode
= rfc
.mode
;
3201 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3203 case L2CAP_MODE_ERTM
:
3204 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3205 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3206 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3208 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3209 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3210 chan
->local_sdu_itime
=
3211 le32_to_cpu(efs
.sdu_itime
);
3212 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3213 chan
->local_flush_to
=
3214 le32_to_cpu(efs
.flush_to
);
3218 case L2CAP_MODE_STREAMING
:
3219 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3223 req
->dcid
= cpu_to_le16(chan
->dcid
);
3224 req
->flags
= __constant_cpu_to_le16(0);
3229 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3231 struct l2cap_conf_rsp
*rsp
= data
;
3232 void *ptr
= rsp
->data
;
3234 BT_DBG("chan %p", chan
);
3236 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3237 rsp
->result
= cpu_to_le16(result
);
3238 rsp
->flags
= cpu_to_le16(flags
);
3243 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3245 struct l2cap_conn_rsp rsp
;
3246 struct l2cap_conn
*conn
= chan
->conn
;
3249 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3250 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3251 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3252 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3253 l2cap_send_cmd(conn
, chan
->ident
,
3254 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3256 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3259 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3260 l2cap_build_conf_req(chan
, buf
), buf
);
3261 chan
->num_conf_req
++;
3264 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3268 struct l2cap_conf_rfc rfc
;
3270 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3272 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3275 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3276 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3279 case L2CAP_CONF_RFC
:
3280 if (olen
== sizeof(rfc
))
3281 memcpy(&rfc
, (void *)val
, olen
);
3286 /* Use sane default values in case a misbehaving remote device
3287 * did not send an RFC option.
3289 rfc
.mode
= chan
->mode
;
3290 rfc
.retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3291 rfc
.monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3292 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
3294 BT_ERR("Expected RFC option was not found, using defaults");
3298 case L2CAP_MODE_ERTM
:
3299 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3300 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3301 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3303 case L2CAP_MODE_STREAMING
:
3304 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3308 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3310 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3312 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3315 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3316 cmd
->ident
== conn
->info_ident
) {
3317 cancel_delayed_work(&conn
->info_timer
);
3319 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3320 conn
->info_ident
= 0;
3322 l2cap_conn_start(conn
);
3328 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3330 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3331 struct l2cap_conn_rsp rsp
;
3332 struct l2cap_chan
*chan
= NULL
, *pchan
;
3333 struct sock
*parent
, *sk
= NULL
;
3334 int result
, status
= L2CAP_CS_NO_INFO
;
3336 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3337 __le16 psm
= req
->psm
;
3339 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3341 /* Check if we have socket listening on psm */
3342 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3344 result
= L2CAP_CR_BAD_PSM
;
3350 mutex_lock(&conn
->chan_lock
);
3353 /* Check if the ACL is secure enough (if not SDP) */
3354 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3355 !hci_conn_check_link_mode(conn
->hcon
)) {
3356 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3357 result
= L2CAP_CR_SEC_BLOCK
;
3361 result
= L2CAP_CR_NO_MEM
;
3363 /* Check if we already have channel with that dcid */
3364 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3367 chan
= pchan
->ops
->new_connection(pchan
);
3373 hci_conn_hold(conn
->hcon
);
3375 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3376 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3380 bt_accept_enqueue(parent
, sk
);
3382 __l2cap_chan_add(conn
, chan
);
3386 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3388 chan
->ident
= cmd
->ident
;
3390 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3391 if (l2cap_chan_check_security(chan
)) {
3392 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3393 __l2cap_state_change(chan
, BT_CONNECT2
);
3394 result
= L2CAP_CR_PEND
;
3395 status
= L2CAP_CS_AUTHOR_PEND
;
3396 parent
->sk_data_ready(parent
, 0);
3398 __l2cap_state_change(chan
, BT_CONFIG
);
3399 result
= L2CAP_CR_SUCCESS
;
3400 status
= L2CAP_CS_NO_INFO
;
3403 __l2cap_state_change(chan
, BT_CONNECT2
);
3404 result
= L2CAP_CR_PEND
;
3405 status
= L2CAP_CS_AUTHEN_PEND
;
3408 __l2cap_state_change(chan
, BT_CONNECT2
);
3409 result
= L2CAP_CR_PEND
;
3410 status
= L2CAP_CS_NO_INFO
;
3414 release_sock(parent
);
3415 mutex_unlock(&conn
->chan_lock
);
3418 rsp
.scid
= cpu_to_le16(scid
);
3419 rsp
.dcid
= cpu_to_le16(dcid
);
3420 rsp
.result
= cpu_to_le16(result
);
3421 rsp
.status
= cpu_to_le16(status
);
3422 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3424 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3425 struct l2cap_info_req info
;
3426 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3428 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3429 conn
->info_ident
= l2cap_get_ident(conn
);
3431 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3433 l2cap_send_cmd(conn
, conn
->info_ident
,
3434 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3437 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3438 result
== L2CAP_CR_SUCCESS
) {
3440 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3441 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3442 l2cap_build_conf_req(chan
, buf
), buf
);
3443 chan
->num_conf_req
++;
3449 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3451 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3452 u16 scid
, dcid
, result
, status
;
3453 struct l2cap_chan
*chan
;
3457 scid
= __le16_to_cpu(rsp
->scid
);
3458 dcid
= __le16_to_cpu(rsp
->dcid
);
3459 result
= __le16_to_cpu(rsp
->result
);
3460 status
= __le16_to_cpu(rsp
->status
);
3462 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3463 dcid
, scid
, result
, status
);
3465 mutex_lock(&conn
->chan_lock
);
3468 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3474 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3483 l2cap_chan_lock(chan
);
3486 case L2CAP_CR_SUCCESS
:
3487 l2cap_state_change(chan
, BT_CONFIG
);
3490 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3492 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3495 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3496 l2cap_build_conf_req(chan
, req
), req
);
3497 chan
->num_conf_req
++;
3501 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3505 l2cap_chan_del(chan
, ECONNREFUSED
);
3509 l2cap_chan_unlock(chan
);
3512 mutex_unlock(&conn
->chan_lock
);
3517 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3519 /* FCS is enabled only in ERTM or streaming mode, if one or both
3522 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3523 chan
->fcs
= L2CAP_FCS_NONE
;
3524 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3525 chan
->fcs
= L2CAP_FCS_CRC16
;
3528 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3530 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3533 struct l2cap_chan
*chan
;
3536 dcid
= __le16_to_cpu(req
->dcid
);
3537 flags
= __le16_to_cpu(req
->flags
);
3539 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3541 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3545 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3546 struct l2cap_cmd_rej_cid rej
;
3548 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3549 rej
.scid
= cpu_to_le16(chan
->scid
);
3550 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3552 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3557 /* Reject if config buffer is too small. */
3558 len
= cmd_len
- sizeof(*req
);
3559 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3560 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3561 l2cap_build_conf_rsp(chan
, rsp
,
3562 L2CAP_CONF_REJECT
, flags
), rsp
);
3567 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3568 chan
->conf_len
+= len
;
3570 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3571 /* Incomplete config. Send empty response. */
3572 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3573 l2cap_build_conf_rsp(chan
, rsp
,
3574 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3578 /* Complete config. */
3579 len
= l2cap_parse_conf_req(chan
, rsp
);
3581 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3585 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3586 chan
->num_conf_rsp
++;
3588 /* Reset config buffer. */
3591 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3594 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3595 set_default_fcs(chan
);
3597 if (chan
->mode
== L2CAP_MODE_ERTM
||
3598 chan
->mode
== L2CAP_MODE_STREAMING
)
3599 err
= l2cap_ertm_init(chan
);
3602 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3604 l2cap_chan_ready(chan
);
3609 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3611 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3612 l2cap_build_conf_req(chan
, buf
), buf
);
3613 chan
->num_conf_req
++;
3616 /* Got Conf Rsp PENDING from remote side and asume we sent
3617 Conf Rsp PENDING in the code above */
3618 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3619 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3621 /* check compatibility */
3623 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3624 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3626 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3627 l2cap_build_conf_rsp(chan
, rsp
,
3628 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3632 l2cap_chan_unlock(chan
);
3636 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3638 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3639 u16 scid
, flags
, result
;
3640 struct l2cap_chan
*chan
;
3641 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3644 scid
= __le16_to_cpu(rsp
->scid
);
3645 flags
= __le16_to_cpu(rsp
->flags
);
3646 result
= __le16_to_cpu(rsp
->result
);
3648 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3651 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3656 case L2CAP_CONF_SUCCESS
:
3657 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3658 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3661 case L2CAP_CONF_PENDING
:
3662 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3664 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3667 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3670 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3674 /* check compatibility */
3676 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3677 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3679 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3680 l2cap_build_conf_rsp(chan
, buf
,
3681 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3685 case L2CAP_CONF_UNACCEPT
:
3686 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3689 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3690 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3694 /* throw out any old stored conf requests */
3695 result
= L2CAP_CONF_SUCCESS
;
3696 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3699 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3703 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3704 L2CAP_CONF_REQ
, len
, req
);
3705 chan
->num_conf_req
++;
3706 if (result
!= L2CAP_CONF_SUCCESS
)
3712 l2cap_chan_set_err(chan
, ECONNRESET
);
3714 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3715 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3719 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
3722 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3724 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3725 set_default_fcs(chan
);
3727 if (chan
->mode
== L2CAP_MODE_ERTM
||
3728 chan
->mode
== L2CAP_MODE_STREAMING
)
3729 err
= l2cap_ertm_init(chan
);
3732 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3734 l2cap_chan_ready(chan
);
3738 l2cap_chan_unlock(chan
);
3742 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3744 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3745 struct l2cap_disconn_rsp rsp
;
3747 struct l2cap_chan
*chan
;
3750 scid
= __le16_to_cpu(req
->scid
);
3751 dcid
= __le16_to_cpu(req
->dcid
);
3753 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3755 mutex_lock(&conn
->chan_lock
);
3757 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3759 mutex_unlock(&conn
->chan_lock
);
3763 l2cap_chan_lock(chan
);
3767 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3768 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3769 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3772 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3775 l2cap_chan_hold(chan
);
3776 l2cap_chan_del(chan
, ECONNRESET
);
3778 l2cap_chan_unlock(chan
);
3780 chan
->ops
->close(chan
);
3781 l2cap_chan_put(chan
);
3783 mutex_unlock(&conn
->chan_lock
);
3788 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3790 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3792 struct l2cap_chan
*chan
;
3794 scid
= __le16_to_cpu(rsp
->scid
);
3795 dcid
= __le16_to_cpu(rsp
->dcid
);
3797 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3799 mutex_lock(&conn
->chan_lock
);
3801 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3803 mutex_unlock(&conn
->chan_lock
);
3807 l2cap_chan_lock(chan
);
3809 l2cap_chan_hold(chan
);
3810 l2cap_chan_del(chan
, 0);
3812 l2cap_chan_unlock(chan
);
3814 chan
->ops
->close(chan
);
3815 l2cap_chan_put(chan
);
3817 mutex_unlock(&conn
->chan_lock
);
3822 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3824 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3827 type
= __le16_to_cpu(req
->type
);
3829 BT_DBG("type 0x%4.4x", type
);
3831 if (type
== L2CAP_IT_FEAT_MASK
) {
3833 u32 feat_mask
= l2cap_feat_mask
;
3834 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3835 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3836 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3838 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3841 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3842 | L2CAP_FEAT_EXT_WINDOW
;
3844 put_unaligned_le32(feat_mask
, rsp
->data
);
3845 l2cap_send_cmd(conn
, cmd
->ident
,
3846 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3847 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3849 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3852 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3854 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3856 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3857 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3858 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3859 l2cap_send_cmd(conn
, cmd
->ident
,
3860 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3862 struct l2cap_info_rsp rsp
;
3863 rsp
.type
= cpu_to_le16(type
);
3864 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
3865 l2cap_send_cmd(conn
, cmd
->ident
,
3866 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3872 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3874 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3877 type
= __le16_to_cpu(rsp
->type
);
3878 result
= __le16_to_cpu(rsp
->result
);
3880 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3882 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3883 if (cmd
->ident
!= conn
->info_ident
||
3884 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3887 cancel_delayed_work(&conn
->info_timer
);
3889 if (result
!= L2CAP_IR_SUCCESS
) {
3890 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3891 conn
->info_ident
= 0;
3893 l2cap_conn_start(conn
);
3899 case L2CAP_IT_FEAT_MASK
:
3900 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3902 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3903 struct l2cap_info_req req
;
3904 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3906 conn
->info_ident
= l2cap_get_ident(conn
);
3908 l2cap_send_cmd(conn
, conn
->info_ident
,
3909 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3911 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3912 conn
->info_ident
= 0;
3914 l2cap_conn_start(conn
);
3918 case L2CAP_IT_FIXED_CHAN
:
3919 conn
->fixed_chan_mask
= rsp
->data
[0];
3920 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3921 conn
->info_ident
= 0;
3923 l2cap_conn_start(conn
);
3930 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3931 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3934 struct l2cap_create_chan_req
*req
= data
;
3935 struct l2cap_create_chan_rsp rsp
;
3938 if (cmd_len
!= sizeof(*req
))
3944 psm
= le16_to_cpu(req
->psm
);
3945 scid
= le16_to_cpu(req
->scid
);
3947 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3949 /* Placeholder: Always reject */
3951 rsp
.scid
= cpu_to_le16(scid
);
3952 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
3953 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3955 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3961 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3962 struct l2cap_cmd_hdr
*cmd
, void *data
)
3964 BT_DBG("conn %p", conn
);
3966 return l2cap_connect_rsp(conn
, cmd
, data
);
3969 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3970 u16 icid
, u16 result
)
3972 struct l2cap_move_chan_rsp rsp
;
3974 BT_DBG("icid %d, result %d", icid
, result
);
3976 rsp
.icid
= cpu_to_le16(icid
);
3977 rsp
.result
= cpu_to_le16(result
);
3979 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3982 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3983 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3985 struct l2cap_move_chan_cfm cfm
;
3988 BT_DBG("icid %d, result %d", icid
, result
);
3990 ident
= l2cap_get_ident(conn
);
3992 chan
->ident
= ident
;
3994 cfm
.icid
= cpu_to_le16(icid
);
3995 cfm
.result
= cpu_to_le16(result
);
3997 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4000 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4003 struct l2cap_move_chan_cfm_rsp rsp
;
4005 BT_DBG("icid %d", icid
);
4007 rsp
.icid
= cpu_to_le16(icid
);
4008 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4011 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4012 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4014 struct l2cap_move_chan_req
*req
= data
;
4016 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4018 if (cmd_len
!= sizeof(*req
))
4021 icid
= le16_to_cpu(req
->icid
);
4023 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
4028 /* Placeholder: Always refuse */
4029 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4034 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4035 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4037 struct l2cap_move_chan_rsp
*rsp
= data
;
4040 if (cmd_len
!= sizeof(*rsp
))
4043 icid
= le16_to_cpu(rsp
->icid
);
4044 result
= le16_to_cpu(rsp
->result
);
4046 BT_DBG("icid %d, result %d", icid
, result
);
4048 /* Placeholder: Always unconfirmed */
4049 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4054 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4055 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4057 struct l2cap_move_chan_cfm
*cfm
= data
;
4060 if (cmd_len
!= sizeof(*cfm
))
4063 icid
= le16_to_cpu(cfm
->icid
);
4064 result
= le16_to_cpu(cfm
->result
);
4066 BT_DBG("icid %d, result %d", icid
, result
);
4068 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4073 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4074 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4076 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4079 if (cmd_len
!= sizeof(*rsp
))
4082 icid
= le16_to_cpu(rsp
->icid
);
4084 BT_DBG("icid %d", icid
);
4089 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4094 if (min
> max
|| min
< 6 || max
> 3200)
4097 if (to_multiplier
< 10 || to_multiplier
> 3200)
4100 if (max
>= to_multiplier
* 8)
4103 max_latency
= (to_multiplier
* 8 / max
) - 1;
4104 if (latency
> 499 || latency
> max_latency
)
4110 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4111 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4113 struct hci_conn
*hcon
= conn
->hcon
;
4114 struct l2cap_conn_param_update_req
*req
;
4115 struct l2cap_conn_param_update_rsp rsp
;
4116 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4119 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4122 cmd_len
= __le16_to_cpu(cmd
->len
);
4123 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4126 req
= (struct l2cap_conn_param_update_req
*) data
;
4127 min
= __le16_to_cpu(req
->min
);
4128 max
= __le16_to_cpu(req
->max
);
4129 latency
= __le16_to_cpu(req
->latency
);
4130 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4132 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4133 min
, max
, latency
, to_multiplier
);
4135 memset(&rsp
, 0, sizeof(rsp
));
4137 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4139 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4141 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4143 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4147 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4152 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4153 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4157 switch (cmd
->code
) {
4158 case L2CAP_COMMAND_REJ
:
4159 l2cap_command_rej(conn
, cmd
, data
);
4162 case L2CAP_CONN_REQ
:
4163 err
= l2cap_connect_req(conn
, cmd
, data
);
4166 case L2CAP_CONN_RSP
:
4167 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4170 case L2CAP_CONF_REQ
:
4171 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4174 case L2CAP_CONF_RSP
:
4175 err
= l2cap_config_rsp(conn
, cmd
, data
);
4178 case L2CAP_DISCONN_REQ
:
4179 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4182 case L2CAP_DISCONN_RSP
:
4183 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4186 case L2CAP_ECHO_REQ
:
4187 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4190 case L2CAP_ECHO_RSP
:
4193 case L2CAP_INFO_REQ
:
4194 err
= l2cap_information_req(conn
, cmd
, data
);
4197 case L2CAP_INFO_RSP
:
4198 err
= l2cap_information_rsp(conn
, cmd
, data
);
4201 case L2CAP_CREATE_CHAN_REQ
:
4202 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4205 case L2CAP_CREATE_CHAN_RSP
:
4206 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4209 case L2CAP_MOVE_CHAN_REQ
:
4210 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4213 case L2CAP_MOVE_CHAN_RSP
:
4214 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4217 case L2CAP_MOVE_CHAN_CFM
:
4218 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4221 case L2CAP_MOVE_CHAN_CFM_RSP
:
4222 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4226 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4234 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4235 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4237 switch (cmd
->code
) {
4238 case L2CAP_COMMAND_REJ
:
4241 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4242 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4244 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4248 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4253 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4254 struct sk_buff
*skb
)
4256 u8
*data
= skb
->data
;
4258 struct l2cap_cmd_hdr cmd
;
4261 l2cap_raw_recv(conn
, skb
);
4263 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4265 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4266 data
+= L2CAP_CMD_HDR_SIZE
;
4267 len
-= L2CAP_CMD_HDR_SIZE
;
4269 cmd_len
= le16_to_cpu(cmd
.len
);
4271 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4273 if (cmd_len
> len
|| !cmd
.ident
) {
4274 BT_DBG("corrupted command");
4278 if (conn
->hcon
->type
== LE_LINK
)
4279 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4281 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4284 struct l2cap_cmd_rej_unk rej
;
4286 BT_ERR("Wrong link type (%d)", err
);
4288 /* FIXME: Map err to a valid reason */
4289 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4290 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4300 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4302 u16 our_fcs
, rcv_fcs
;
4305 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4306 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4308 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4310 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4311 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4312 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4313 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4315 if (our_fcs
!= rcv_fcs
)
4321 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4323 struct l2cap_ctrl control
;
4325 BT_DBG("chan %p", chan
);
4327 memset(&control
, 0, sizeof(control
));
4330 control
.reqseq
= chan
->buffer_seq
;
4331 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4333 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4334 control
.super
= L2CAP_SUPER_RNR
;
4335 l2cap_send_sframe(chan
, &control
);
4338 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4339 chan
->unacked_frames
> 0)
4340 __set_retrans_timer(chan
);
4342 /* Send pending iframes */
4343 l2cap_ertm_send(chan
);
4345 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4346 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4347 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4350 control
.super
= L2CAP_SUPER_RR
;
4351 l2cap_send_sframe(chan
, &control
);
4355 static void append_skb_frag(struct sk_buff
*skb
,
4356 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4358 /* skb->len reflects data in skb as well as all fragments
4359 * skb->data_len reflects only data in fragments
4361 if (!skb_has_frag_list(skb
))
4362 skb_shinfo(skb
)->frag_list
= new_frag
;
4364 new_frag
->next
= NULL
;
4366 (*last_frag
)->next
= new_frag
;
4367 *last_frag
= new_frag
;
4369 skb
->len
+= new_frag
->len
;
4370 skb
->data_len
+= new_frag
->len
;
4371 skb
->truesize
+= new_frag
->truesize
;
4374 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4375 struct l2cap_ctrl
*control
)
4379 switch (control
->sar
) {
4380 case L2CAP_SAR_UNSEGMENTED
:
4384 err
= chan
->ops
->recv(chan
, skb
);
4387 case L2CAP_SAR_START
:
4391 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4392 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4394 if (chan
->sdu_len
> chan
->imtu
) {
4399 if (skb
->len
>= chan
->sdu_len
)
4403 chan
->sdu_last_frag
= skb
;
4409 case L2CAP_SAR_CONTINUE
:
4413 append_skb_frag(chan
->sdu
, skb
,
4414 &chan
->sdu_last_frag
);
4417 if (chan
->sdu
->len
>= chan
->sdu_len
)
4427 append_skb_frag(chan
->sdu
, skb
,
4428 &chan
->sdu_last_frag
);
4431 if (chan
->sdu
->len
!= chan
->sdu_len
)
4434 err
= chan
->ops
->recv(chan
, chan
->sdu
);
4437 /* Reassembly complete */
4439 chan
->sdu_last_frag
= NULL
;
4447 kfree_skb(chan
->sdu
);
4449 chan
->sdu_last_frag
= NULL
;
4456 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4460 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4463 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4464 l2cap_tx(chan
, NULL
, NULL
, event
);
4467 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4470 /* Pass sequential frames to l2cap_reassemble_sdu()
4471 * until a gap is encountered.
4474 BT_DBG("chan %p", chan
);
4476 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4477 struct sk_buff
*skb
;
4478 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4479 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4481 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4486 skb_unlink(skb
, &chan
->srej_q
);
4487 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4488 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4493 if (skb_queue_empty(&chan
->srej_q
)) {
4494 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4495 l2cap_send_ack(chan
);
4501 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4502 struct l2cap_ctrl
*control
)
4504 struct sk_buff
*skb
;
4506 BT_DBG("chan %p, control %p", chan
, control
);
4508 if (control
->reqseq
== chan
->next_tx_seq
) {
4509 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4510 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4514 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4517 BT_DBG("Seq %d not available for retransmission",
4522 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4523 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4524 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4528 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4530 if (control
->poll
) {
4531 l2cap_pass_to_tx(chan
, control
);
4533 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4534 l2cap_retransmit(chan
, control
);
4535 l2cap_ertm_send(chan
);
4537 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4538 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4539 chan
->srej_save_reqseq
= control
->reqseq
;
4542 l2cap_pass_to_tx_fbit(chan
, control
);
4544 if (control
->final
) {
4545 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4546 !test_and_clear_bit(CONN_SREJ_ACT
,
4548 l2cap_retransmit(chan
, control
);
4550 l2cap_retransmit(chan
, control
);
4551 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4552 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4553 chan
->srej_save_reqseq
= control
->reqseq
;
4559 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4560 struct l2cap_ctrl
*control
)
4562 struct sk_buff
*skb
;
4564 BT_DBG("chan %p, control %p", chan
, control
);
4566 if (control
->reqseq
== chan
->next_tx_seq
) {
4567 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4568 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4572 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4574 if (chan
->max_tx
&& skb
&&
4575 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4576 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4577 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4581 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4583 l2cap_pass_to_tx(chan
, control
);
4585 if (control
->final
) {
4586 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4587 l2cap_retransmit_all(chan
, control
);
4589 l2cap_retransmit_all(chan
, control
);
4590 l2cap_ertm_send(chan
);
4591 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4592 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4596 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4598 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4600 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4601 chan
->expected_tx_seq
);
4603 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4604 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4606 /* See notes below regarding "double poll" and
4609 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4610 BT_DBG("Invalid/Ignore - after SREJ");
4611 return L2CAP_TXSEQ_INVALID_IGNORE
;
4613 BT_DBG("Invalid - in window after SREJ sent");
4614 return L2CAP_TXSEQ_INVALID
;
4618 if (chan
->srej_list
.head
== txseq
) {
4619 BT_DBG("Expected SREJ");
4620 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4623 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4624 BT_DBG("Duplicate SREJ - txseq already stored");
4625 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4628 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4629 BT_DBG("Unexpected SREJ - not requested");
4630 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4634 if (chan
->expected_tx_seq
== txseq
) {
4635 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4637 BT_DBG("Invalid - txseq outside tx window");
4638 return L2CAP_TXSEQ_INVALID
;
4641 return L2CAP_TXSEQ_EXPECTED
;
4645 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4646 __seq_offset(chan
, chan
->expected_tx_seq
,
4647 chan
->last_acked_seq
)){
4648 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4649 return L2CAP_TXSEQ_DUPLICATE
;
4652 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4653 /* A source of invalid packets is a "double poll" condition,
4654 * where delays cause us to send multiple poll packets. If
4655 * the remote stack receives and processes both polls,
4656 * sequence numbers can wrap around in such a way that a
4657 * resent frame has a sequence number that looks like new data
4658 * with a sequence gap. This would trigger an erroneous SREJ
4661 * Fortunately, this is impossible with a tx window that's
4662 * less than half of the maximum sequence number, which allows
4663 * invalid frames to be safely ignored.
4665 * With tx window sizes greater than half of the tx window
4666 * maximum, the frame is invalid and cannot be ignored. This
4667 * causes a disconnect.
4670 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4671 BT_DBG("Invalid/Ignore - txseq outside tx window");
4672 return L2CAP_TXSEQ_INVALID_IGNORE
;
4674 BT_DBG("Invalid - txseq outside tx window");
4675 return L2CAP_TXSEQ_INVALID
;
4678 BT_DBG("Unexpected - txseq indicates missing frames");
4679 return L2CAP_TXSEQ_UNEXPECTED
;
4683 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4684 struct l2cap_ctrl
*control
,
4685 struct sk_buff
*skb
, u8 event
)
4688 bool skb_in_use
= 0;
4690 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4694 case L2CAP_EV_RECV_IFRAME
:
4695 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4696 case L2CAP_TXSEQ_EXPECTED
:
4697 l2cap_pass_to_tx(chan
, control
);
4699 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4700 BT_DBG("Busy, discarding expected seq %d",
4705 chan
->expected_tx_seq
= __next_seq(chan
,
4708 chan
->buffer_seq
= chan
->expected_tx_seq
;
4711 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4715 if (control
->final
) {
4716 if (!test_and_clear_bit(CONN_REJ_ACT
,
4717 &chan
->conn_state
)) {
4719 l2cap_retransmit_all(chan
, control
);
4720 l2cap_ertm_send(chan
);
4724 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4725 l2cap_send_ack(chan
);
4727 case L2CAP_TXSEQ_UNEXPECTED
:
4728 l2cap_pass_to_tx(chan
, control
);
4730 /* Can't issue SREJ frames in the local busy state.
4731 * Drop this frame, it will be seen as missing
4732 * when local busy is exited.
4734 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4735 BT_DBG("Busy, discarding unexpected seq %d",
4740 /* There was a gap in the sequence, so an SREJ
4741 * must be sent for each missing frame. The
4742 * current frame is stored for later use.
4744 skb_queue_tail(&chan
->srej_q
, skb
);
4746 BT_DBG("Queued %p (queue len %d)", skb
,
4747 skb_queue_len(&chan
->srej_q
));
4749 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4750 l2cap_seq_list_clear(&chan
->srej_list
);
4751 l2cap_send_srej(chan
, control
->txseq
);
4753 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4755 case L2CAP_TXSEQ_DUPLICATE
:
4756 l2cap_pass_to_tx(chan
, control
);
4758 case L2CAP_TXSEQ_INVALID_IGNORE
:
4760 case L2CAP_TXSEQ_INVALID
:
4762 l2cap_send_disconn_req(chan
->conn
, chan
,
4767 case L2CAP_EV_RECV_RR
:
4768 l2cap_pass_to_tx(chan
, control
);
4769 if (control
->final
) {
4770 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4772 if (!test_and_clear_bit(CONN_REJ_ACT
,
4773 &chan
->conn_state
)) {
4775 l2cap_retransmit_all(chan
, control
);
4778 l2cap_ertm_send(chan
);
4779 } else if (control
->poll
) {
4780 l2cap_send_i_or_rr_or_rnr(chan
);
4782 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4783 &chan
->conn_state
) &&
4784 chan
->unacked_frames
)
4785 __set_retrans_timer(chan
);
4787 l2cap_ertm_send(chan
);
4790 case L2CAP_EV_RECV_RNR
:
4791 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4792 l2cap_pass_to_tx(chan
, control
);
4793 if (control
&& control
->poll
) {
4794 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4795 l2cap_send_rr_or_rnr(chan
, 0);
4797 __clear_retrans_timer(chan
);
4798 l2cap_seq_list_clear(&chan
->retrans_list
);
4800 case L2CAP_EV_RECV_REJ
:
4801 l2cap_handle_rej(chan
, control
);
4803 case L2CAP_EV_RECV_SREJ
:
4804 l2cap_handle_srej(chan
, control
);
4810 if (skb
&& !skb_in_use
) {
4811 BT_DBG("Freeing %p", skb
);
4818 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4819 struct l2cap_ctrl
*control
,
4820 struct sk_buff
*skb
, u8 event
)
4823 u16 txseq
= control
->txseq
;
4824 bool skb_in_use
= 0;
4826 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4830 case L2CAP_EV_RECV_IFRAME
:
4831 switch (l2cap_classify_txseq(chan
, txseq
)) {
4832 case L2CAP_TXSEQ_EXPECTED
:
4833 /* Keep frame for reassembly later */
4834 l2cap_pass_to_tx(chan
, control
);
4835 skb_queue_tail(&chan
->srej_q
, skb
);
4837 BT_DBG("Queued %p (queue len %d)", skb
,
4838 skb_queue_len(&chan
->srej_q
));
4840 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4842 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4843 l2cap_seq_list_pop(&chan
->srej_list
);
4845 l2cap_pass_to_tx(chan
, control
);
4846 skb_queue_tail(&chan
->srej_q
, skb
);
4848 BT_DBG("Queued %p (queue len %d)", skb
,
4849 skb_queue_len(&chan
->srej_q
));
4851 err
= l2cap_rx_queued_iframes(chan
);
4856 case L2CAP_TXSEQ_UNEXPECTED
:
4857 /* Got a frame that can't be reassembled yet.
4858 * Save it for later, and send SREJs to cover
4859 * the missing frames.
4861 skb_queue_tail(&chan
->srej_q
, skb
);
4863 BT_DBG("Queued %p (queue len %d)", skb
,
4864 skb_queue_len(&chan
->srej_q
));
4866 l2cap_pass_to_tx(chan
, control
);
4867 l2cap_send_srej(chan
, control
->txseq
);
4869 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4870 /* This frame was requested with an SREJ, but
4871 * some expected retransmitted frames are
4872 * missing. Request retransmission of missing
4875 skb_queue_tail(&chan
->srej_q
, skb
);
4877 BT_DBG("Queued %p (queue len %d)", skb
,
4878 skb_queue_len(&chan
->srej_q
));
4880 l2cap_pass_to_tx(chan
, control
);
4881 l2cap_send_srej_list(chan
, control
->txseq
);
4883 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4884 /* We've already queued this frame. Drop this copy. */
4885 l2cap_pass_to_tx(chan
, control
);
4887 case L2CAP_TXSEQ_DUPLICATE
:
4888 /* Expecting a later sequence number, so this frame
4889 * was already received. Ignore it completely.
4892 case L2CAP_TXSEQ_INVALID_IGNORE
:
4894 case L2CAP_TXSEQ_INVALID
:
4896 l2cap_send_disconn_req(chan
->conn
, chan
,
4901 case L2CAP_EV_RECV_RR
:
4902 l2cap_pass_to_tx(chan
, control
);
4903 if (control
->final
) {
4904 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4906 if (!test_and_clear_bit(CONN_REJ_ACT
,
4907 &chan
->conn_state
)) {
4909 l2cap_retransmit_all(chan
, control
);
4912 l2cap_ertm_send(chan
);
4913 } else if (control
->poll
) {
4914 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4915 &chan
->conn_state
) &&
4916 chan
->unacked_frames
) {
4917 __set_retrans_timer(chan
);
4920 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4921 l2cap_send_srej_tail(chan
);
4923 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4924 &chan
->conn_state
) &&
4925 chan
->unacked_frames
)
4926 __set_retrans_timer(chan
);
4928 l2cap_send_ack(chan
);
4931 case L2CAP_EV_RECV_RNR
:
4932 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4933 l2cap_pass_to_tx(chan
, control
);
4934 if (control
->poll
) {
4935 l2cap_send_srej_tail(chan
);
4937 struct l2cap_ctrl rr_control
;
4938 memset(&rr_control
, 0, sizeof(rr_control
));
4939 rr_control
.sframe
= 1;
4940 rr_control
.super
= L2CAP_SUPER_RR
;
4941 rr_control
.reqseq
= chan
->buffer_seq
;
4942 l2cap_send_sframe(chan
, &rr_control
);
4946 case L2CAP_EV_RECV_REJ
:
4947 l2cap_handle_rej(chan
, control
);
4949 case L2CAP_EV_RECV_SREJ
:
4950 l2cap_handle_srej(chan
, control
);
4954 if (skb
&& !skb_in_use
) {
4955 BT_DBG("Freeing %p", skb
);
4962 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
4964 /* Make sure reqseq is for a packet that has been sent but not acked */
4967 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
4968 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
4971 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
4972 struct sk_buff
*skb
, u8 event
)
4976 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
4977 control
, skb
, event
, chan
->rx_state
);
4979 if (__valid_reqseq(chan
, control
->reqseq
)) {
4980 switch (chan
->rx_state
) {
4981 case L2CAP_RX_STATE_RECV
:
4982 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
4984 case L2CAP_RX_STATE_SREJ_SENT
:
4985 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
4993 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4994 control
->reqseq
, chan
->next_tx_seq
,
4995 chan
->expected_ack_seq
);
4996 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5002 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5003 struct sk_buff
*skb
)
5007 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5010 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5011 L2CAP_TXSEQ_EXPECTED
) {
5012 l2cap_pass_to_tx(chan
, control
);
5014 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5015 __next_seq(chan
, chan
->buffer_seq
));
5017 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5019 l2cap_reassemble_sdu(chan
, skb
, control
);
5022 kfree_skb(chan
->sdu
);
5025 chan
->sdu_last_frag
= NULL
;
5029 BT_DBG("Freeing %p", skb
);
5034 chan
->last_acked_seq
= control
->txseq
;
5035 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5040 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5042 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5046 __unpack_control(chan
, skb
);
5051 * We can just drop the corrupted I-frame here.
5052 * Receiver will miss it and start proper recovery
5053 * procedures and ask for retransmission.
5055 if (l2cap_check_fcs(chan
, skb
))
5058 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5059 len
-= L2CAP_SDULEN_SIZE
;
5061 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5062 len
-= L2CAP_FCS_SIZE
;
5064 if (len
> chan
->mps
) {
5065 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5069 if (!control
->sframe
) {
5072 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5073 control
->sar
, control
->reqseq
, control
->final
,
5076 /* Validate F-bit - F=0 always valid, F=1 only
5077 * valid in TX WAIT_F
5079 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5082 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5083 event
= L2CAP_EV_RECV_IFRAME
;
5084 err
= l2cap_rx(chan
, control
, skb
, event
);
5086 err
= l2cap_stream_rx(chan
, control
, skb
);
5090 l2cap_send_disconn_req(chan
->conn
, chan
,
5093 const u8 rx_func_to_event
[4] = {
5094 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5095 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5098 /* Only I-frames are expected in streaming mode */
5099 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5102 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5103 control
->reqseq
, control
->final
, control
->poll
,
5108 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5112 /* Validate F and P bits */
5113 if (control
->final
&& (control
->poll
||
5114 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5117 event
= rx_func_to_event
[control
->super
];
5118 if (l2cap_rx(chan
, control
, skb
, event
))
5119 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5129 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
5131 struct l2cap_chan
*chan
;
5133 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5135 BT_DBG("unknown cid 0x%4.4x", cid
);
5136 /* Drop packet and return */
5141 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5143 if (chan
->state
!= BT_CONNECTED
)
5146 switch (chan
->mode
) {
5147 case L2CAP_MODE_BASIC
:
5148 /* If socket recv buffers overflows we drop data here
5149 * which is *bad* because L2CAP has to be reliable.
5150 * But we don't have any other choice. L2CAP doesn't
5151 * provide flow control mechanism. */
5153 if (chan
->imtu
< skb
->len
)
5156 if (!chan
->ops
->recv(chan
, skb
))
5160 case L2CAP_MODE_ERTM
:
5161 case L2CAP_MODE_STREAMING
:
5162 l2cap_data_rcv(chan
, skb
);
5166 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5174 l2cap_chan_unlock(chan
);
5179 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
5181 struct l2cap_chan
*chan
;
5183 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5187 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5189 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5192 if (chan
->imtu
< skb
->len
)
5195 if (!chan
->ops
->recv(chan
, skb
))
5204 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5205 struct sk_buff
*skb
)
5207 struct l2cap_chan
*chan
;
5209 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5213 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5215 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5218 if (chan
->imtu
< skb
->len
)
5221 if (!chan
->ops
->recv(chan
, skb
))
5230 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5232 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5236 skb_pull(skb
, L2CAP_HDR_SIZE
);
5237 cid
= __le16_to_cpu(lh
->cid
);
5238 len
= __le16_to_cpu(lh
->len
);
5240 if (len
!= skb
->len
) {
5245 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5248 case L2CAP_CID_LE_SIGNALING
:
5249 case L2CAP_CID_SIGNALING
:
5250 l2cap_sig_channel(conn
, skb
);
5253 case L2CAP_CID_CONN_LESS
:
5254 psm
= get_unaligned((__le16
*) skb
->data
);
5255 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
5256 l2cap_conless_channel(conn
, psm
, skb
);
5259 case L2CAP_CID_LE_DATA
:
5260 l2cap_att_channel(conn
, cid
, skb
);
5264 if (smp_sig_channel(conn
, skb
))
5265 l2cap_conn_del(conn
->hcon
, EACCES
);
5269 l2cap_data_channel(conn
, cid
, skb
);
5274 /* ---- L2CAP interface with lower layer (HCI) ---- */
5276 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5278 int exact
= 0, lm1
= 0, lm2
= 0;
5279 struct l2cap_chan
*c
;
5281 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
5283 /* Find listening sockets and check their link_mode */
5284 read_lock(&chan_list_lock
);
5285 list_for_each_entry(c
, &chan_list
, global_l
) {
5286 struct sock
*sk
= c
->sk
;
5288 if (c
->state
!= BT_LISTEN
)
5291 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5292 lm1
|= HCI_LM_ACCEPT
;
5293 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5294 lm1
|= HCI_LM_MASTER
;
5296 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5297 lm2
|= HCI_LM_ACCEPT
;
5298 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5299 lm2
|= HCI_LM_MASTER
;
5302 read_unlock(&chan_list_lock
);
5304 return exact
? lm1
: lm2
;
5307 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5309 struct l2cap_conn
*conn
;
5311 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
5314 conn
= l2cap_conn_add(hcon
, status
);
5316 l2cap_conn_ready(conn
);
5318 l2cap_conn_del(hcon
, bt_to_errno(status
));
5323 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5325 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5327 BT_DBG("hcon %p", hcon
);
5330 return HCI_ERROR_REMOTE_USER_TERM
;
5331 return conn
->disc_reason
;
5334 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5336 BT_DBG("hcon %p reason %d", hcon
, reason
);
5338 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5342 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5344 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5347 if (encrypt
== 0x00) {
5348 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5349 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5350 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5351 l2cap_chan_close(chan
, ECONNREFUSED
);
5353 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5354 __clear_chan_timer(chan
);
5358 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5360 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5361 struct l2cap_chan
*chan
;
5366 BT_DBG("conn %p", conn
);
5368 if (hcon
->type
== LE_LINK
) {
5369 if (!status
&& encrypt
)
5370 smp_distribute_keys(conn
, 0);
5371 cancel_delayed_work(&conn
->security_timer
);
5374 mutex_lock(&conn
->chan_lock
);
5376 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5377 l2cap_chan_lock(chan
);
5379 BT_DBG("chan->scid %d", chan
->scid
);
5381 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5382 if (!status
&& encrypt
) {
5383 chan
->sec_level
= hcon
->sec_level
;
5384 l2cap_chan_ready(chan
);
5387 l2cap_chan_unlock(chan
);
5391 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5392 l2cap_chan_unlock(chan
);
5396 if (!status
&& (chan
->state
== BT_CONNECTED
||
5397 chan
->state
== BT_CONFIG
)) {
5398 struct sock
*sk
= chan
->sk
;
5400 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5401 sk
->sk_state_change(sk
);
5403 l2cap_check_encryption(chan
, encrypt
);
5404 l2cap_chan_unlock(chan
);
5408 if (chan
->state
== BT_CONNECT
) {
5410 l2cap_send_conn_req(chan
);
5412 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5414 } else if (chan
->state
== BT_CONNECT2
) {
5415 struct sock
*sk
= chan
->sk
;
5416 struct l2cap_conn_rsp rsp
;
5422 if (test_bit(BT_SK_DEFER_SETUP
,
5423 &bt_sk(sk
)->flags
)) {
5424 struct sock
*parent
= bt_sk(sk
)->parent
;
5425 res
= L2CAP_CR_PEND
;
5426 stat
= L2CAP_CS_AUTHOR_PEND
;
5428 parent
->sk_data_ready(parent
, 0);
5430 __l2cap_state_change(chan
, BT_CONFIG
);
5431 res
= L2CAP_CR_SUCCESS
;
5432 stat
= L2CAP_CS_NO_INFO
;
5435 __l2cap_state_change(chan
, BT_DISCONN
);
5436 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5437 res
= L2CAP_CR_SEC_BLOCK
;
5438 stat
= L2CAP_CS_NO_INFO
;
5443 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5444 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5445 rsp
.result
= cpu_to_le16(res
);
5446 rsp
.status
= cpu_to_le16(stat
);
5447 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5450 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
5451 res
== L2CAP_CR_SUCCESS
) {
5453 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
5454 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
5456 l2cap_build_conf_req(chan
, buf
),
5458 chan
->num_conf_req
++;
5462 l2cap_chan_unlock(chan
);
5465 mutex_unlock(&conn
->chan_lock
);
5470 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5472 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5475 conn
= l2cap_conn_add(hcon
, 0);
5480 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5482 if (!(flags
& ACL_CONT
)) {
5483 struct l2cap_hdr
*hdr
;
5487 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5488 kfree_skb(conn
->rx_skb
);
5489 conn
->rx_skb
= NULL
;
5491 l2cap_conn_unreliable(conn
, ECOMM
);
5494 /* Start fragment always begin with Basic L2CAP header */
5495 if (skb
->len
< L2CAP_HDR_SIZE
) {
5496 BT_ERR("Frame is too short (len %d)", skb
->len
);
5497 l2cap_conn_unreliable(conn
, ECOMM
);
5501 hdr
= (struct l2cap_hdr
*) skb
->data
;
5502 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5504 if (len
== skb
->len
) {
5505 /* Complete frame received */
5506 l2cap_recv_frame(conn
, skb
);
5510 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5512 if (skb
->len
> len
) {
5513 BT_ERR("Frame is too long (len %d, expected len %d)",
5515 l2cap_conn_unreliable(conn
, ECOMM
);
5519 /* Allocate skb for the complete frame (with header) */
5520 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5524 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5526 conn
->rx_len
= len
- skb
->len
;
5528 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5530 if (!conn
->rx_len
) {
5531 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5532 l2cap_conn_unreliable(conn
, ECOMM
);
5536 if (skb
->len
> conn
->rx_len
) {
5537 BT_ERR("Fragment is too long (len %d, expected %d)",
5538 skb
->len
, conn
->rx_len
);
5539 kfree_skb(conn
->rx_skb
);
5540 conn
->rx_skb
= NULL
;
5542 l2cap_conn_unreliable(conn
, ECOMM
);
5546 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5548 conn
->rx_len
-= skb
->len
;
5550 if (!conn
->rx_len
) {
5551 /* Complete frame received */
5552 l2cap_recv_frame(conn
, conn
->rx_skb
);
5553 conn
->rx_skb
= NULL
;
5562 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5564 struct l2cap_chan
*c
;
5566 read_lock(&chan_list_lock
);
5568 list_for_each_entry(c
, &chan_list
, global_l
) {
5569 struct sock
*sk
= c
->sk
;
5571 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5572 batostr(&bt_sk(sk
)->src
),
5573 batostr(&bt_sk(sk
)->dst
),
5574 c
->state
, __le16_to_cpu(c
->psm
),
5575 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5576 c
->sec_level
, c
->mode
);
5579 read_unlock(&chan_list_lock
);
5584 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5586 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5589 static const struct file_operations l2cap_debugfs_fops
= {
5590 .open
= l2cap_debugfs_open
,
5592 .llseek
= seq_lseek
,
5593 .release
= single_release
,
5596 static struct dentry
*l2cap_debugfs
;
5598 int __init
l2cap_init(void)
5602 err
= l2cap_init_sockets();
5607 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5608 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5610 BT_ERR("Failed to create L2CAP debug file");
5616 void l2cap_exit(void)
5618 debugfs_remove(l2cap_debugfs
);
5619 l2cap_cleanup_sockets();
5622 module_param(disable_ertm
, bool, 0644);
5623 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");