2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
45 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
47 static LIST_HEAD(chan_list
);
48 static DEFINE_RWLOCK(chan_list_lock
);
50 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
51 u8 code
, u8 ident
, u16 dlen
, void *data
);
52 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
54 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
55 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
56 struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
68 list_for_each_entry(c
, &conn
->chan_l
, list
) {
75 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
80 list_for_each_entry(c
, &conn
->chan_l
, list
) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
94 mutex_lock(&conn
->chan_lock
);
95 c
= __l2cap_get_chan_by_scid(conn
, cid
);
98 mutex_unlock(&conn
->chan_lock
);
103 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
106 struct l2cap_chan
*c
;
108 list_for_each_entry(c
, &conn
->chan_l
, list
) {
109 if (c
->ident
== ident
)
115 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
117 struct l2cap_chan
*c
;
119 list_for_each_entry(c
, &chan_list
, global_l
) {
120 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
126 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
130 write_lock(&chan_list_lock
);
132 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
145 for (p
= 0x1001; p
< 0x1100; p
+= 2)
146 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
147 chan
->psm
= cpu_to_le16(p
);
148 chan
->sport
= cpu_to_le16(p
);
155 write_unlock(&chan_list_lock
);
159 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
161 write_lock(&chan_list_lock
);
165 write_unlock(&chan_list_lock
);
170 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
172 u16 cid
= L2CAP_CID_DYN_START
;
174 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
175 if (!__l2cap_get_chan_by_scid(conn
, cid
))
182 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
184 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
185 state_to_string(state
));
188 chan
->ops
->state_change(chan
, state
);
191 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
193 struct sock
*sk
= chan
->sk
;
196 __l2cap_state_change(chan
, state
);
200 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
202 struct sock
*sk
= chan
->sk
;
207 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
209 struct sock
*sk
= chan
->sk
;
212 __l2cap_chan_set_err(chan
, err
);
216 static void __set_retrans_timer(struct l2cap_chan
*chan
)
218 if (!delayed_work_pending(&chan
->monitor_timer
) &&
219 chan
->retrans_timeout
) {
220 l2cap_set_timer(chan
, &chan
->retrans_timer
,
221 msecs_to_jiffies(chan
->retrans_timeout
));
225 static void __set_monitor_timer(struct l2cap_chan
*chan
)
227 __clear_retrans_timer(chan
);
228 if (chan
->monitor_timeout
) {
229 l2cap_set_timer(chan
, &chan
->monitor_timer
,
230 msecs_to_jiffies(chan
->monitor_timeout
));
234 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
239 skb_queue_walk(head
, skb
) {
240 if (bt_cb(skb
)->control
.txseq
== seq
)
247 /* ---- L2CAP sequence number lists ---- */
249 /* For ERTM, ordered lists of sequence numbers must be tracked for
250 * SREJ requests that are received and for frames that are to be
251 * retransmitted. These seq_list functions implement a singly-linked
252 * list in an array, where membership in the list can also be checked
253 * in constant time. Items can also be added to the tail of the list
254 * and removed from the head in constant time, without further memory
258 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
260 size_t alloc_size
, i
;
262 /* Allocated size is a power of 2 to map sequence numbers
263 * (which may be up to 14 bits) in to a smaller array that is
264 * sized for the negotiated ERTM transmit windows.
266 alloc_size
= roundup_pow_of_two(size
);
268 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
272 seq_list
->mask
= alloc_size
- 1;
273 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
274 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
275 for (i
= 0; i
< alloc_size
; i
++)
276 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
281 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
283 kfree(seq_list
->list
);
286 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
289 /* Constant-time check for list membership */
290 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
293 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
295 u16 mask
= seq_list
->mask
;
297 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
298 /* In case someone tries to pop the head of an empty list */
299 return L2CAP_SEQ_LIST_CLEAR
;
300 } else if (seq_list
->head
== seq
) {
301 /* Head can be removed in constant time */
302 seq_list
->head
= seq_list
->list
[seq
& mask
];
303 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
305 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
306 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
307 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
310 /* Walk the list to find the sequence number */
311 u16 prev
= seq_list
->head
;
312 while (seq_list
->list
[prev
& mask
] != seq
) {
313 prev
= seq_list
->list
[prev
& mask
];
314 if (prev
== L2CAP_SEQ_LIST_TAIL
)
315 return L2CAP_SEQ_LIST_CLEAR
;
318 /* Unlink the number from the list and clear it */
319 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
320 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
321 if (seq_list
->tail
== seq
)
322 seq_list
->tail
= prev
;
327 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
329 /* Remove the head in constant time */
330 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
333 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
337 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
340 for (i
= 0; i
<= seq_list
->mask
; i
++)
341 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
343 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
344 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
347 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
349 u16 mask
= seq_list
->mask
;
351 /* All appends happen in constant time */
353 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
356 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
357 seq_list
->head
= seq
;
359 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
361 seq_list
->tail
= seq
;
362 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
365 static void l2cap_chan_timeout(struct work_struct
*work
)
367 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
369 struct l2cap_conn
*conn
= chan
->conn
;
372 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
374 mutex_lock(&conn
->chan_lock
);
375 l2cap_chan_lock(chan
);
377 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
378 reason
= ECONNREFUSED
;
379 else if (chan
->state
== BT_CONNECT
&&
380 chan
->sec_level
!= BT_SECURITY_SDP
)
381 reason
= ECONNREFUSED
;
385 l2cap_chan_close(chan
, reason
);
387 l2cap_chan_unlock(chan
);
389 chan
->ops
->close(chan
);
390 mutex_unlock(&conn
->chan_lock
);
392 l2cap_chan_put(chan
);
395 struct l2cap_chan
*l2cap_chan_create(void)
397 struct l2cap_chan
*chan
;
399 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
403 mutex_init(&chan
->lock
);
405 write_lock(&chan_list_lock
);
406 list_add(&chan
->global_l
, &chan_list
);
407 write_unlock(&chan_list_lock
);
409 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
411 chan
->state
= BT_OPEN
;
413 kref_init(&chan
->kref
);
415 /* This flag is cleared in l2cap_chan_ready() */
416 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
418 BT_DBG("chan %p", chan
);
423 static void l2cap_chan_destroy(struct kref
*kref
)
425 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
427 BT_DBG("chan %p", chan
);
429 write_lock(&chan_list_lock
);
430 list_del(&chan
->global_l
);
431 write_unlock(&chan_list_lock
);
436 void l2cap_chan_hold(struct l2cap_chan
*c
)
438 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
443 void l2cap_chan_put(struct l2cap_chan
*c
)
445 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
447 kref_put(&c
->kref
, l2cap_chan_destroy
);
450 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
452 chan
->fcs
= L2CAP_FCS_CRC16
;
453 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
454 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
455 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
456 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
457 chan
->sec_level
= BT_SECURITY_LOW
;
459 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
462 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
464 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
465 __le16_to_cpu(chan
->psm
), chan
->dcid
);
467 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
471 switch (chan
->chan_type
) {
472 case L2CAP_CHAN_CONN_ORIENTED
:
473 if (conn
->hcon
->type
== LE_LINK
) {
475 chan
->omtu
= L2CAP_DEFAULT_MTU
;
476 chan
->scid
= L2CAP_CID_LE_DATA
;
477 chan
->dcid
= L2CAP_CID_LE_DATA
;
479 /* Alloc CID for connection-oriented socket */
480 chan
->scid
= l2cap_alloc_cid(conn
);
481 chan
->omtu
= L2CAP_DEFAULT_MTU
;
485 case L2CAP_CHAN_CONN_LESS
:
486 /* Connectionless socket */
487 chan
->scid
= L2CAP_CID_CONN_LESS
;
488 chan
->dcid
= L2CAP_CID_CONN_LESS
;
489 chan
->omtu
= L2CAP_DEFAULT_MTU
;
492 case L2CAP_CHAN_CONN_FIX_A2MP
:
493 chan
->scid
= L2CAP_CID_A2MP
;
494 chan
->dcid
= L2CAP_CID_A2MP
;
495 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
496 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
500 /* Raw socket can send/recv signalling messages only */
501 chan
->scid
= L2CAP_CID_SIGNALING
;
502 chan
->dcid
= L2CAP_CID_SIGNALING
;
503 chan
->omtu
= L2CAP_DEFAULT_MTU
;
506 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
507 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
508 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
509 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
510 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
511 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
513 l2cap_chan_hold(chan
);
515 list_add(&chan
->list
, &conn
->chan_l
);
518 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
520 mutex_lock(&conn
->chan_lock
);
521 __l2cap_chan_add(conn
, chan
);
522 mutex_unlock(&conn
->chan_lock
);
525 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
527 struct l2cap_conn
*conn
= chan
->conn
;
529 __clear_chan_timer(chan
);
531 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
534 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
535 /* Delete from channel list */
536 list_del(&chan
->list
);
538 l2cap_chan_put(chan
);
542 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
543 hci_conn_put(conn
->hcon
);
545 if (mgr
&& mgr
->bredr_chan
== chan
)
546 mgr
->bredr_chan
= NULL
;
549 chan
->ops
->teardown(chan
, err
);
551 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
555 case L2CAP_MODE_BASIC
:
558 case L2CAP_MODE_ERTM
:
559 __clear_retrans_timer(chan
);
560 __clear_monitor_timer(chan
);
561 __clear_ack_timer(chan
);
563 skb_queue_purge(&chan
->srej_q
);
565 l2cap_seq_list_free(&chan
->srej_list
);
566 l2cap_seq_list_free(&chan
->retrans_list
);
570 case L2CAP_MODE_STREAMING
:
571 skb_queue_purge(&chan
->tx_q
);
578 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
580 struct l2cap_conn
*conn
= chan
->conn
;
581 struct sock
*sk
= chan
->sk
;
583 BT_DBG("chan %p state %s sk %p", chan
, state_to_string(chan
->state
),
586 switch (chan
->state
) {
588 chan
->ops
->teardown(chan
, 0);
593 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
594 conn
->hcon
->type
== ACL_LINK
) {
595 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
596 l2cap_send_disconn_req(conn
, chan
, reason
);
598 l2cap_chan_del(chan
, reason
);
602 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
603 conn
->hcon
->type
== ACL_LINK
) {
604 struct l2cap_conn_rsp rsp
;
607 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
608 result
= L2CAP_CR_SEC_BLOCK
;
610 result
= L2CAP_CR_BAD_PSM
;
611 l2cap_state_change(chan
, BT_DISCONN
);
613 rsp
.scid
= cpu_to_le16(chan
->dcid
);
614 rsp
.dcid
= cpu_to_le16(chan
->scid
);
615 rsp
.result
= cpu_to_le16(result
);
616 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
617 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
621 l2cap_chan_del(chan
, reason
);
626 l2cap_chan_del(chan
, reason
);
630 chan
->ops
->teardown(chan
, 0);
635 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
637 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
638 switch (chan
->sec_level
) {
639 case BT_SECURITY_HIGH
:
640 return HCI_AT_DEDICATED_BONDING_MITM
;
641 case BT_SECURITY_MEDIUM
:
642 return HCI_AT_DEDICATED_BONDING
;
644 return HCI_AT_NO_BONDING
;
646 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
647 if (chan
->sec_level
== BT_SECURITY_LOW
)
648 chan
->sec_level
= BT_SECURITY_SDP
;
650 if (chan
->sec_level
== BT_SECURITY_HIGH
)
651 return HCI_AT_NO_BONDING_MITM
;
653 return HCI_AT_NO_BONDING
;
655 switch (chan
->sec_level
) {
656 case BT_SECURITY_HIGH
:
657 return HCI_AT_GENERAL_BONDING_MITM
;
658 case BT_SECURITY_MEDIUM
:
659 return HCI_AT_GENERAL_BONDING
;
661 return HCI_AT_NO_BONDING
;
666 /* Service level security */
667 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
669 struct l2cap_conn
*conn
= chan
->conn
;
672 auth_type
= l2cap_get_auth_type(chan
);
674 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
677 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
681 /* Get next available identificator.
682 * 1 - 128 are used by kernel.
683 * 129 - 199 are reserved.
684 * 200 - 254 are used by utilities like l2ping, etc.
687 spin_lock(&conn
->lock
);
689 if (++conn
->tx_ident
> 128)
694 spin_unlock(&conn
->lock
);
699 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
702 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
705 BT_DBG("code 0x%2.2x", code
);
710 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
711 flags
= ACL_START_NO_FLUSH
;
715 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
716 skb
->priority
= HCI_PRIO_MAX
;
718 hci_send_acl(conn
->hchan
, skb
, flags
);
721 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
723 struct hci_conn
*hcon
= chan
->conn
->hcon
;
726 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
729 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
730 lmp_no_flush_capable(hcon
->hdev
))
731 flags
= ACL_START_NO_FLUSH
;
735 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
736 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
739 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
741 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
742 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
744 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
747 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
748 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
755 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
756 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
763 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
765 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
766 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
768 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
771 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
772 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
779 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
780 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
787 static inline void __unpack_control(struct l2cap_chan
*chan
,
790 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
791 __unpack_extended_control(get_unaligned_le32(skb
->data
),
792 &bt_cb(skb
)->control
);
793 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
795 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
796 &bt_cb(skb
)->control
);
797 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
801 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
805 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
806 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
808 if (control
->sframe
) {
809 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
810 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
811 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
813 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
814 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
820 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
824 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
825 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
827 if (control
->sframe
) {
828 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
829 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
830 packed
|= L2CAP_CTRL_FRAME_TYPE
;
832 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
833 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
839 static inline void __pack_control(struct l2cap_chan
*chan
,
840 struct l2cap_ctrl
*control
,
843 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
844 put_unaligned_le32(__pack_extended_control(control
),
845 skb
->data
+ L2CAP_HDR_SIZE
);
847 put_unaligned_le16(__pack_enhanced_control(control
),
848 skb
->data
+ L2CAP_HDR_SIZE
);
852 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
854 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
855 return L2CAP_EXT_HDR_SIZE
;
857 return L2CAP_ENH_HDR_SIZE
;
860 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
864 struct l2cap_hdr
*lh
;
865 int hlen
= __ertm_hdr_size(chan
);
867 if (chan
->fcs
== L2CAP_FCS_CRC16
)
868 hlen
+= L2CAP_FCS_SIZE
;
870 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
873 return ERR_PTR(-ENOMEM
);
875 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
876 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
877 lh
->cid
= cpu_to_le16(chan
->dcid
);
879 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
880 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
882 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
884 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
885 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
886 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
889 skb
->priority
= HCI_PRIO_MAX
;
893 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
894 struct l2cap_ctrl
*control
)
899 BT_DBG("chan %p, control %p", chan
, control
);
901 if (!control
->sframe
)
904 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
908 if (control
->super
== L2CAP_SUPER_RR
)
909 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
910 else if (control
->super
== L2CAP_SUPER_RNR
)
911 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
913 if (control
->super
!= L2CAP_SUPER_SREJ
) {
914 chan
->last_acked_seq
= control
->reqseq
;
915 __clear_ack_timer(chan
);
918 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
919 control
->final
, control
->poll
, control
->super
);
921 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
922 control_field
= __pack_extended_control(control
);
924 control_field
= __pack_enhanced_control(control
);
926 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
928 l2cap_do_send(chan
, skb
);
931 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
933 struct l2cap_ctrl control
;
935 BT_DBG("chan %p, poll %d", chan
, poll
);
937 memset(&control
, 0, sizeof(control
));
941 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
942 control
.super
= L2CAP_SUPER_RNR
;
944 control
.super
= L2CAP_SUPER_RR
;
946 control
.reqseq
= chan
->buffer_seq
;
947 l2cap_send_sframe(chan
, &control
);
950 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
952 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
955 static bool __amp_capable(struct l2cap_chan
*chan
)
957 struct l2cap_conn
*conn
= chan
->conn
;
960 chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
&&
961 conn
->fixed_chan_mask
& L2CAP_FC_A2MP
)
967 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
969 struct l2cap_conn
*conn
= chan
->conn
;
970 struct l2cap_conn_req req
;
972 req
.scid
= cpu_to_le16(chan
->scid
);
975 chan
->ident
= l2cap_get_ident(conn
);
977 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
979 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
982 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
984 /* This clears all conf flags, including CONF_NOT_COMPLETE */
985 chan
->conf_state
= 0;
986 __clear_chan_timer(chan
);
988 chan
->state
= BT_CONNECTED
;
990 chan
->ops
->ready(chan
);
993 static void l2cap_start_connection(struct l2cap_chan
*chan
)
995 if (__amp_capable(chan
)) {
996 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
997 a2mp_discover_amp(chan
);
999 l2cap_send_conn_req(chan
);
1003 static void l2cap_do_start(struct l2cap_chan
*chan
)
1005 struct l2cap_conn
*conn
= chan
->conn
;
1007 if (conn
->hcon
->type
== LE_LINK
) {
1008 l2cap_chan_ready(chan
);
1012 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1013 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1016 if (l2cap_chan_check_security(chan
) &&
1017 __l2cap_no_conn_pending(chan
)) {
1018 l2cap_start_connection(chan
);
1021 struct l2cap_info_req req
;
1022 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1024 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1025 conn
->info_ident
= l2cap_get_ident(conn
);
1027 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1029 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1034 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1036 u32 local_feat_mask
= l2cap_feat_mask
;
1038 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1041 case L2CAP_MODE_ERTM
:
1042 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1043 case L2CAP_MODE_STREAMING
:
1044 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1050 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
1051 struct l2cap_chan
*chan
, int err
)
1053 struct sock
*sk
= chan
->sk
;
1054 struct l2cap_disconn_req req
;
1059 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1060 __clear_retrans_timer(chan
);
1061 __clear_monitor_timer(chan
);
1062 __clear_ack_timer(chan
);
1065 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1066 l2cap_state_change(chan
, BT_DISCONN
);
1070 req
.dcid
= cpu_to_le16(chan
->dcid
);
1071 req
.scid
= cpu_to_le16(chan
->scid
);
1072 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1076 __l2cap_state_change(chan
, BT_DISCONN
);
1077 __l2cap_chan_set_err(chan
, err
);
1081 /* ---- L2CAP connections ---- */
1082 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1084 struct l2cap_chan
*chan
, *tmp
;
1086 BT_DBG("conn %p", conn
);
1088 mutex_lock(&conn
->chan_lock
);
1090 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1091 struct sock
*sk
= chan
->sk
;
1093 l2cap_chan_lock(chan
);
1095 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1096 l2cap_chan_unlock(chan
);
1100 if (chan
->state
== BT_CONNECT
) {
1101 if (!l2cap_chan_check_security(chan
) ||
1102 !__l2cap_no_conn_pending(chan
)) {
1103 l2cap_chan_unlock(chan
);
1107 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1108 && test_bit(CONF_STATE2_DEVICE
,
1109 &chan
->conf_state
)) {
1110 l2cap_chan_close(chan
, ECONNRESET
);
1111 l2cap_chan_unlock(chan
);
1115 l2cap_start_connection(chan
);
1117 } else if (chan
->state
== BT_CONNECT2
) {
1118 struct l2cap_conn_rsp rsp
;
1120 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1121 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1123 if (l2cap_chan_check_security(chan
)) {
1125 if (test_bit(BT_SK_DEFER_SETUP
,
1126 &bt_sk(sk
)->flags
)) {
1127 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1128 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1129 chan
->ops
->defer(chan
);
1132 __l2cap_state_change(chan
, BT_CONFIG
);
1133 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1134 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1138 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1139 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1142 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1145 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1146 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1147 l2cap_chan_unlock(chan
);
1151 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1152 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1153 l2cap_build_conf_req(chan
, buf
), buf
);
1154 chan
->num_conf_req
++;
1157 l2cap_chan_unlock(chan
);
1160 mutex_unlock(&conn
->chan_lock
);
1163 /* Find socket with cid and source/destination bdaddr.
1164 * Returns closest match, locked.
1166 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1170 struct l2cap_chan
*c
, *c1
= NULL
;
1172 read_lock(&chan_list_lock
);
1174 list_for_each_entry(c
, &chan_list
, global_l
) {
1175 struct sock
*sk
= c
->sk
;
1177 if (state
&& c
->state
!= state
)
1180 if (c
->scid
== cid
) {
1181 int src_match
, dst_match
;
1182 int src_any
, dst_any
;
1185 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1186 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1187 if (src_match
&& dst_match
) {
1188 read_unlock(&chan_list_lock
);
1193 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1194 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1195 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1196 (src_any
&& dst_any
))
1201 read_unlock(&chan_list_lock
);
1206 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1208 struct sock
*parent
, *sk
;
1209 struct l2cap_chan
*chan
, *pchan
;
1213 /* Check if we have socket listening on cid */
1214 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1215 conn
->src
, conn
->dst
);
1223 chan
= pchan
->ops
->new_connection(pchan
);
1229 hci_conn_hold(conn
->hcon
);
1230 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
1232 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1233 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1235 l2cap_chan_add(conn
, chan
);
1237 l2cap_chan_ready(chan
);
1240 release_sock(parent
);
1243 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1245 struct l2cap_chan
*chan
;
1246 struct hci_conn
*hcon
= conn
->hcon
;
1248 BT_DBG("conn %p", conn
);
1250 if (!hcon
->out
&& hcon
->type
== LE_LINK
)
1251 l2cap_le_conn_ready(conn
);
1253 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1254 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1256 mutex_lock(&conn
->chan_lock
);
1258 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1260 l2cap_chan_lock(chan
);
1262 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1263 l2cap_chan_unlock(chan
);
1267 if (hcon
->type
== LE_LINK
) {
1268 if (smp_conn_security(hcon
, chan
->sec_level
))
1269 l2cap_chan_ready(chan
);
1271 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1272 struct sock
*sk
= chan
->sk
;
1273 __clear_chan_timer(chan
);
1275 __l2cap_state_change(chan
, BT_CONNECTED
);
1276 sk
->sk_state_change(sk
);
1279 } else if (chan
->state
== BT_CONNECT
)
1280 l2cap_do_start(chan
);
1282 l2cap_chan_unlock(chan
);
1285 mutex_unlock(&conn
->chan_lock
);
1288 /* Notify sockets that we cannot guaranty reliability anymore */
1289 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1291 struct l2cap_chan
*chan
;
1293 BT_DBG("conn %p", conn
);
1295 mutex_lock(&conn
->chan_lock
);
1297 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1298 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1299 l2cap_chan_set_err(chan
, err
);
1302 mutex_unlock(&conn
->chan_lock
);
1305 static void l2cap_info_timeout(struct work_struct
*work
)
1307 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1310 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1311 conn
->info_ident
= 0;
1313 l2cap_conn_start(conn
);
1316 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1318 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1319 struct l2cap_chan
*chan
, *l
;
1324 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1326 kfree_skb(conn
->rx_skb
);
1328 mutex_lock(&conn
->chan_lock
);
1331 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1332 l2cap_chan_hold(chan
);
1333 l2cap_chan_lock(chan
);
1335 l2cap_chan_del(chan
, err
);
1337 l2cap_chan_unlock(chan
);
1339 chan
->ops
->close(chan
);
1340 l2cap_chan_put(chan
);
1343 mutex_unlock(&conn
->chan_lock
);
1345 hci_chan_del(conn
->hchan
);
1347 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1348 cancel_delayed_work_sync(&conn
->info_timer
);
1350 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1351 cancel_delayed_work_sync(&conn
->security_timer
);
1352 smp_chan_destroy(conn
);
1355 hcon
->l2cap_data
= NULL
;
1359 static void security_timeout(struct work_struct
*work
)
1361 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1362 security_timer
.work
);
1364 BT_DBG("conn %p", conn
);
1366 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1367 smp_chan_destroy(conn
);
1368 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1372 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1374 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1375 struct hci_chan
*hchan
;
1380 hchan
= hci_chan_create(hcon
);
1384 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1386 hci_chan_del(hchan
);
1390 hcon
->l2cap_data
= conn
;
1392 conn
->hchan
= hchan
;
1394 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1396 switch (hcon
->type
) {
1398 conn
->mtu
= hcon
->hdev
->block_mtu
;
1402 if (hcon
->hdev
->le_mtu
) {
1403 conn
->mtu
= hcon
->hdev
->le_mtu
;
1409 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1413 conn
->src
= &hcon
->hdev
->bdaddr
;
1414 conn
->dst
= &hcon
->dst
;
1416 conn
->feat_mask
= 0;
1418 spin_lock_init(&conn
->lock
);
1419 mutex_init(&conn
->chan_lock
);
1421 INIT_LIST_HEAD(&conn
->chan_l
);
1423 if (hcon
->type
== LE_LINK
)
1424 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1426 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1428 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1433 /* ---- Socket interface ---- */
1435 /* Find socket with psm and source / destination bdaddr.
1436 * Returns closest match.
1438 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1442 struct l2cap_chan
*c
, *c1
= NULL
;
1444 read_lock(&chan_list_lock
);
1446 list_for_each_entry(c
, &chan_list
, global_l
) {
1447 struct sock
*sk
= c
->sk
;
1449 if (state
&& c
->state
!= state
)
1452 if (c
->psm
== psm
) {
1453 int src_match
, dst_match
;
1454 int src_any
, dst_any
;
1457 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1458 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1459 if (src_match
&& dst_match
) {
1460 read_unlock(&chan_list_lock
);
1465 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1466 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1467 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1468 (src_any
&& dst_any
))
1473 read_unlock(&chan_list_lock
);
1478 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1479 bdaddr_t
*dst
, u8 dst_type
)
1481 struct sock
*sk
= chan
->sk
;
1482 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1483 struct l2cap_conn
*conn
;
1484 struct hci_conn
*hcon
;
1485 struct hci_dev
*hdev
;
1489 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src
, dst
,
1490 dst_type
, __le16_to_cpu(psm
));
1492 hdev
= hci_get_route(dst
, src
);
1494 return -EHOSTUNREACH
;
1498 l2cap_chan_lock(chan
);
1500 /* PSM must be odd and lsb of upper byte must be 0 */
1501 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1502 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1507 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1512 switch (chan
->mode
) {
1513 case L2CAP_MODE_BASIC
:
1515 case L2CAP_MODE_ERTM
:
1516 case L2CAP_MODE_STREAMING
:
1525 switch (chan
->state
) {
1529 /* Already connecting */
1534 /* Already connected */
1548 /* Set destination address and psm */
1550 bacpy(&bt_sk(sk
)->dst
, dst
);
1556 auth_type
= l2cap_get_auth_type(chan
);
1558 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1559 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1560 chan
->sec_level
, auth_type
);
1562 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1563 chan
->sec_level
, auth_type
);
1566 err
= PTR_ERR(hcon
);
1570 conn
= l2cap_conn_add(hcon
, 0);
1577 if (hcon
->type
== LE_LINK
) {
1580 if (!list_empty(&conn
->chan_l
)) {
1589 /* Update source addr of the socket */
1590 bacpy(src
, conn
->src
);
1592 l2cap_chan_unlock(chan
);
1593 l2cap_chan_add(conn
, chan
);
1594 l2cap_chan_lock(chan
);
1596 l2cap_state_change(chan
, BT_CONNECT
);
1597 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1599 if (hcon
->state
== BT_CONNECTED
) {
1600 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1601 __clear_chan_timer(chan
);
1602 if (l2cap_chan_check_security(chan
))
1603 l2cap_state_change(chan
, BT_CONNECTED
);
1605 l2cap_do_start(chan
);
1611 l2cap_chan_unlock(chan
);
1612 hci_dev_unlock(hdev
);
1617 int __l2cap_wait_ack(struct sock
*sk
)
1619 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1620 DECLARE_WAITQUEUE(wait
, current
);
1624 add_wait_queue(sk_sleep(sk
), &wait
);
1625 set_current_state(TASK_INTERRUPTIBLE
);
1626 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1630 if (signal_pending(current
)) {
1631 err
= sock_intr_errno(timeo
);
1636 timeo
= schedule_timeout(timeo
);
1638 set_current_state(TASK_INTERRUPTIBLE
);
1640 err
= sock_error(sk
);
1644 set_current_state(TASK_RUNNING
);
1645 remove_wait_queue(sk_sleep(sk
), &wait
);
1649 static void l2cap_monitor_timeout(struct work_struct
*work
)
1651 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1652 monitor_timer
.work
);
1654 BT_DBG("chan %p", chan
);
1656 l2cap_chan_lock(chan
);
1659 l2cap_chan_unlock(chan
);
1660 l2cap_chan_put(chan
);
1664 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1666 l2cap_chan_unlock(chan
);
1667 l2cap_chan_put(chan
);
1670 static void l2cap_retrans_timeout(struct work_struct
*work
)
1672 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1673 retrans_timer
.work
);
1675 BT_DBG("chan %p", chan
);
1677 l2cap_chan_lock(chan
);
1680 l2cap_chan_unlock(chan
);
1681 l2cap_chan_put(chan
);
1685 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1686 l2cap_chan_unlock(chan
);
1687 l2cap_chan_put(chan
);
1690 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1691 struct sk_buff_head
*skbs
)
1693 struct sk_buff
*skb
;
1694 struct l2cap_ctrl
*control
;
1696 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1698 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1700 while (!skb_queue_empty(&chan
->tx_q
)) {
1702 skb
= skb_dequeue(&chan
->tx_q
);
1704 bt_cb(skb
)->control
.retries
= 1;
1705 control
= &bt_cb(skb
)->control
;
1707 control
->reqseq
= 0;
1708 control
->txseq
= chan
->next_tx_seq
;
1710 __pack_control(chan
, control
, skb
);
1712 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1713 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1714 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1717 l2cap_do_send(chan
, skb
);
1719 BT_DBG("Sent txseq %u", control
->txseq
);
1721 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1722 chan
->frames_sent
++;
1726 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1728 struct sk_buff
*skb
, *tx_skb
;
1729 struct l2cap_ctrl
*control
;
1732 BT_DBG("chan %p", chan
);
1734 if (chan
->state
!= BT_CONNECTED
)
1737 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1740 while (chan
->tx_send_head
&&
1741 chan
->unacked_frames
< chan
->remote_tx_win
&&
1742 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1744 skb
= chan
->tx_send_head
;
1746 bt_cb(skb
)->control
.retries
= 1;
1747 control
= &bt_cb(skb
)->control
;
1749 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1752 control
->reqseq
= chan
->buffer_seq
;
1753 chan
->last_acked_seq
= chan
->buffer_seq
;
1754 control
->txseq
= chan
->next_tx_seq
;
1756 __pack_control(chan
, control
, skb
);
1758 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1759 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1760 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1763 /* Clone after data has been modified. Data is assumed to be
1764 read-only (for locking purposes) on cloned sk_buffs.
1766 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1771 __set_retrans_timer(chan
);
1773 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1774 chan
->unacked_frames
++;
1775 chan
->frames_sent
++;
1778 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1779 chan
->tx_send_head
= NULL
;
1781 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1783 l2cap_do_send(chan
, tx_skb
);
1784 BT_DBG("Sent txseq %u", control
->txseq
);
1787 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1788 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1793 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1795 struct l2cap_ctrl control
;
1796 struct sk_buff
*skb
;
1797 struct sk_buff
*tx_skb
;
1800 BT_DBG("chan %p", chan
);
1802 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1805 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1806 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1808 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1810 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1815 bt_cb(skb
)->control
.retries
++;
1816 control
= bt_cb(skb
)->control
;
1818 if (chan
->max_tx
!= 0 &&
1819 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1820 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1821 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1822 l2cap_seq_list_clear(&chan
->retrans_list
);
1826 control
.reqseq
= chan
->buffer_seq
;
1827 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1832 if (skb_cloned(skb
)) {
1833 /* Cloned sk_buffs are read-only, so we need a
1836 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1838 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1842 l2cap_seq_list_clear(&chan
->retrans_list
);
1846 /* Update skb contents */
1847 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1848 put_unaligned_le32(__pack_extended_control(&control
),
1849 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1851 put_unaligned_le16(__pack_enhanced_control(&control
),
1852 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1855 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1856 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1857 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1861 l2cap_do_send(chan
, tx_skb
);
1863 BT_DBG("Resent txseq %d", control
.txseq
);
1865 chan
->last_acked_seq
= chan
->buffer_seq
;
1869 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1870 struct l2cap_ctrl
*control
)
1872 BT_DBG("chan %p, control %p", chan
, control
);
1874 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1875 l2cap_ertm_resend(chan
);
1878 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1879 struct l2cap_ctrl
*control
)
1881 struct sk_buff
*skb
;
1883 BT_DBG("chan %p, control %p", chan
, control
);
1886 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1888 l2cap_seq_list_clear(&chan
->retrans_list
);
1890 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1893 if (chan
->unacked_frames
) {
1894 skb_queue_walk(&chan
->tx_q
, skb
) {
1895 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1896 skb
== chan
->tx_send_head
)
1900 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1901 if (skb
== chan
->tx_send_head
)
1904 l2cap_seq_list_append(&chan
->retrans_list
,
1905 bt_cb(skb
)->control
.txseq
);
1908 l2cap_ertm_resend(chan
);
1912 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1914 struct l2cap_ctrl control
;
1915 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1916 chan
->last_acked_seq
);
1919 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1920 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1922 memset(&control
, 0, sizeof(control
));
1925 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1926 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1927 __clear_ack_timer(chan
);
1928 control
.super
= L2CAP_SUPER_RNR
;
1929 control
.reqseq
= chan
->buffer_seq
;
1930 l2cap_send_sframe(chan
, &control
);
1932 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1933 l2cap_ertm_send(chan
);
1934 /* If any i-frames were sent, they included an ack */
1935 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1939 /* Ack now if the window is 3/4ths full.
1940 * Calculate without mul or div
1942 threshold
= chan
->ack_win
;
1943 threshold
+= threshold
<< 1;
1946 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
1949 if (frames_to_ack
>= threshold
) {
1950 __clear_ack_timer(chan
);
1951 control
.super
= L2CAP_SUPER_RR
;
1952 control
.reqseq
= chan
->buffer_seq
;
1953 l2cap_send_sframe(chan
, &control
);
1958 __set_ack_timer(chan
);
1962 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1963 struct msghdr
*msg
, int len
,
1964 int count
, struct sk_buff
*skb
)
1966 struct l2cap_conn
*conn
= chan
->conn
;
1967 struct sk_buff
**frag
;
1970 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1976 /* Continuation fragments (no L2CAP header) */
1977 frag
= &skb_shinfo(skb
)->frag_list
;
1979 struct sk_buff
*tmp
;
1981 count
= min_t(unsigned int, conn
->mtu
, len
);
1983 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1984 msg
->msg_flags
& MSG_DONTWAIT
);
1986 return PTR_ERR(tmp
);
1990 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1993 (*frag
)->priority
= skb
->priority
;
1998 skb
->len
+= (*frag
)->len
;
1999 skb
->data_len
+= (*frag
)->len
;
2001 frag
= &(*frag
)->next
;
2007 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2008 struct msghdr
*msg
, size_t len
,
2011 struct l2cap_conn
*conn
= chan
->conn
;
2012 struct sk_buff
*skb
;
2013 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2014 struct l2cap_hdr
*lh
;
2016 BT_DBG("chan %p len %zu priority %u", chan
, len
, priority
);
2018 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2020 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2021 msg
->msg_flags
& MSG_DONTWAIT
);
2025 skb
->priority
= priority
;
2027 /* Create L2CAP header */
2028 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2029 lh
->cid
= cpu_to_le16(chan
->dcid
);
2030 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2031 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2033 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2034 if (unlikely(err
< 0)) {
2036 return ERR_PTR(err
);
2041 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2042 struct msghdr
*msg
, size_t len
,
2045 struct l2cap_conn
*conn
= chan
->conn
;
2046 struct sk_buff
*skb
;
2048 struct l2cap_hdr
*lh
;
2050 BT_DBG("chan %p len %zu", chan
, len
);
2052 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2054 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2055 msg
->msg_flags
& MSG_DONTWAIT
);
2059 skb
->priority
= priority
;
2061 /* Create L2CAP header */
2062 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2063 lh
->cid
= cpu_to_le16(chan
->dcid
);
2064 lh
->len
= cpu_to_le16(len
);
2066 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2067 if (unlikely(err
< 0)) {
2069 return ERR_PTR(err
);
2074 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2075 struct msghdr
*msg
, size_t len
,
2078 struct l2cap_conn
*conn
= chan
->conn
;
2079 struct sk_buff
*skb
;
2080 int err
, count
, hlen
;
2081 struct l2cap_hdr
*lh
;
2083 BT_DBG("chan %p len %zu", chan
, len
);
2086 return ERR_PTR(-ENOTCONN
);
2088 hlen
= __ertm_hdr_size(chan
);
2091 hlen
+= L2CAP_SDULEN_SIZE
;
2093 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2094 hlen
+= L2CAP_FCS_SIZE
;
2096 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2098 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2099 msg
->msg_flags
& MSG_DONTWAIT
);
2103 /* Create L2CAP header */
2104 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2105 lh
->cid
= cpu_to_le16(chan
->dcid
);
2106 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2108 /* Control header is populated later */
2109 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2110 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2112 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2115 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2117 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2118 if (unlikely(err
< 0)) {
2120 return ERR_PTR(err
);
2123 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2124 bt_cb(skb
)->control
.retries
= 0;
2128 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2129 struct sk_buff_head
*seg_queue
,
2130 struct msghdr
*msg
, size_t len
)
2132 struct sk_buff
*skb
;
2137 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2139 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2140 * so fragmented skbs are not used. The HCI layer's handling
2141 * of fragmented skbs is not compatible with ERTM's queueing.
2144 /* PDU size is derived from the HCI MTU */
2145 pdu_len
= chan
->conn
->mtu
;
2147 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2149 /* Adjust for largest possible L2CAP overhead. */
2151 pdu_len
-= L2CAP_FCS_SIZE
;
2153 pdu_len
-= __ertm_hdr_size(chan
);
2155 /* Remote device may have requested smaller PDUs */
2156 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2158 if (len
<= pdu_len
) {
2159 sar
= L2CAP_SAR_UNSEGMENTED
;
2163 sar
= L2CAP_SAR_START
;
2165 pdu_len
-= L2CAP_SDULEN_SIZE
;
2169 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2172 __skb_queue_purge(seg_queue
);
2173 return PTR_ERR(skb
);
2176 bt_cb(skb
)->control
.sar
= sar
;
2177 __skb_queue_tail(seg_queue
, skb
);
2182 pdu_len
+= L2CAP_SDULEN_SIZE
;
2185 if (len
<= pdu_len
) {
2186 sar
= L2CAP_SAR_END
;
2189 sar
= L2CAP_SAR_CONTINUE
;
2196 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2199 struct sk_buff
*skb
;
2201 struct sk_buff_head seg_queue
;
2203 /* Connectionless channel */
2204 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2205 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2207 return PTR_ERR(skb
);
2209 l2cap_do_send(chan
, skb
);
2213 switch (chan
->mode
) {
2214 case L2CAP_MODE_BASIC
:
2215 /* Check outgoing MTU */
2216 if (len
> chan
->omtu
)
2219 /* Create a basic PDU */
2220 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2222 return PTR_ERR(skb
);
2224 l2cap_do_send(chan
, skb
);
2228 case L2CAP_MODE_ERTM
:
2229 case L2CAP_MODE_STREAMING
:
2230 /* Check outgoing MTU */
2231 if (len
> chan
->omtu
) {
2236 __skb_queue_head_init(&seg_queue
);
2238 /* Do segmentation before calling in to the state machine,
2239 * since it's possible to block while waiting for memory
2242 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2244 /* The channel could have been closed while segmenting,
2245 * check that it is still connected.
2247 if (chan
->state
!= BT_CONNECTED
) {
2248 __skb_queue_purge(&seg_queue
);
2255 if (chan
->mode
== L2CAP_MODE_ERTM
)
2256 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2258 l2cap_streaming_send(chan
, &seg_queue
);
2262 /* If the skbs were not queued for sending, they'll still be in
2263 * seg_queue and need to be purged.
2265 __skb_queue_purge(&seg_queue
);
2269 BT_DBG("bad state %1.1x", chan
->mode
);
2276 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2278 struct l2cap_ctrl control
;
2281 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2283 memset(&control
, 0, sizeof(control
));
2285 control
.super
= L2CAP_SUPER_SREJ
;
2287 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2288 seq
= __next_seq(chan
, seq
)) {
2289 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2290 control
.reqseq
= seq
;
2291 l2cap_send_sframe(chan
, &control
);
2292 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2296 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2299 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2301 struct l2cap_ctrl control
;
2303 BT_DBG("chan %p", chan
);
2305 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2308 memset(&control
, 0, sizeof(control
));
2310 control
.super
= L2CAP_SUPER_SREJ
;
2311 control
.reqseq
= chan
->srej_list
.tail
;
2312 l2cap_send_sframe(chan
, &control
);
2315 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2317 struct l2cap_ctrl control
;
2321 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2323 memset(&control
, 0, sizeof(control
));
2325 control
.super
= L2CAP_SUPER_SREJ
;
2327 /* Capture initial list head to allow only one pass through the list. */
2328 initial_head
= chan
->srej_list
.head
;
2331 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2332 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2335 control
.reqseq
= seq
;
2336 l2cap_send_sframe(chan
, &control
);
2337 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2338 } while (chan
->srej_list
.head
!= initial_head
);
2341 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2343 struct sk_buff
*acked_skb
;
2346 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2348 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2351 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2352 chan
->expected_ack_seq
, chan
->unacked_frames
);
2354 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2355 ackseq
= __next_seq(chan
, ackseq
)) {
2357 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2359 skb_unlink(acked_skb
, &chan
->tx_q
);
2360 kfree_skb(acked_skb
);
2361 chan
->unacked_frames
--;
2365 chan
->expected_ack_seq
= reqseq
;
2367 if (chan
->unacked_frames
== 0)
2368 __clear_retrans_timer(chan
);
2370 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2373 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2375 BT_DBG("chan %p", chan
);
2377 chan
->expected_tx_seq
= chan
->buffer_seq
;
2378 l2cap_seq_list_clear(&chan
->srej_list
);
2379 skb_queue_purge(&chan
->srej_q
);
2380 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2383 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2384 struct l2cap_ctrl
*control
,
2385 struct sk_buff_head
*skbs
, u8 event
)
2387 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2391 case L2CAP_EV_DATA_REQUEST
:
2392 if (chan
->tx_send_head
== NULL
)
2393 chan
->tx_send_head
= skb_peek(skbs
);
2395 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2396 l2cap_ertm_send(chan
);
2398 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2399 BT_DBG("Enter LOCAL_BUSY");
2400 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2402 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2403 /* The SREJ_SENT state must be aborted if we are to
2404 * enter the LOCAL_BUSY state.
2406 l2cap_abort_rx_srej_sent(chan
);
2409 l2cap_send_ack(chan
);
2412 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2413 BT_DBG("Exit LOCAL_BUSY");
2414 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2416 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2417 struct l2cap_ctrl local_control
;
2419 memset(&local_control
, 0, sizeof(local_control
));
2420 local_control
.sframe
= 1;
2421 local_control
.super
= L2CAP_SUPER_RR
;
2422 local_control
.poll
= 1;
2423 local_control
.reqseq
= chan
->buffer_seq
;
2424 l2cap_send_sframe(chan
, &local_control
);
2426 chan
->retry_count
= 1;
2427 __set_monitor_timer(chan
);
2428 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2431 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2432 l2cap_process_reqseq(chan
, control
->reqseq
);
2434 case L2CAP_EV_EXPLICIT_POLL
:
2435 l2cap_send_rr_or_rnr(chan
, 1);
2436 chan
->retry_count
= 1;
2437 __set_monitor_timer(chan
);
2438 __clear_ack_timer(chan
);
2439 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2441 case L2CAP_EV_RETRANS_TO
:
2442 l2cap_send_rr_or_rnr(chan
, 1);
2443 chan
->retry_count
= 1;
2444 __set_monitor_timer(chan
);
2445 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2447 case L2CAP_EV_RECV_FBIT
:
2448 /* Nothing to process */
2455 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2456 struct l2cap_ctrl
*control
,
2457 struct sk_buff_head
*skbs
, u8 event
)
2459 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2463 case L2CAP_EV_DATA_REQUEST
:
2464 if (chan
->tx_send_head
== NULL
)
2465 chan
->tx_send_head
= skb_peek(skbs
);
2466 /* Queue data, but don't send. */
2467 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2469 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2470 BT_DBG("Enter LOCAL_BUSY");
2471 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2473 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2474 /* The SREJ_SENT state must be aborted if we are to
2475 * enter the LOCAL_BUSY state.
2477 l2cap_abort_rx_srej_sent(chan
);
2480 l2cap_send_ack(chan
);
2483 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2484 BT_DBG("Exit LOCAL_BUSY");
2485 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2487 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2488 struct l2cap_ctrl local_control
;
2489 memset(&local_control
, 0, sizeof(local_control
));
2490 local_control
.sframe
= 1;
2491 local_control
.super
= L2CAP_SUPER_RR
;
2492 local_control
.poll
= 1;
2493 local_control
.reqseq
= chan
->buffer_seq
;
2494 l2cap_send_sframe(chan
, &local_control
);
2496 chan
->retry_count
= 1;
2497 __set_monitor_timer(chan
);
2498 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2501 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2502 l2cap_process_reqseq(chan
, control
->reqseq
);
2506 case L2CAP_EV_RECV_FBIT
:
2507 if (control
&& control
->final
) {
2508 __clear_monitor_timer(chan
);
2509 if (chan
->unacked_frames
> 0)
2510 __set_retrans_timer(chan
);
2511 chan
->retry_count
= 0;
2512 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2513 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2516 case L2CAP_EV_EXPLICIT_POLL
:
2519 case L2CAP_EV_MONITOR_TO
:
2520 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2521 l2cap_send_rr_or_rnr(chan
, 1);
2522 __set_monitor_timer(chan
);
2523 chan
->retry_count
++;
2525 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2533 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2534 struct sk_buff_head
*skbs
, u8 event
)
2536 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2537 chan
, control
, skbs
, event
, chan
->tx_state
);
2539 switch (chan
->tx_state
) {
2540 case L2CAP_TX_STATE_XMIT
:
2541 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2543 case L2CAP_TX_STATE_WAIT_F
:
2544 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2552 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2553 struct l2cap_ctrl
*control
)
2555 BT_DBG("chan %p, control %p", chan
, control
);
2556 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2559 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2560 struct l2cap_ctrl
*control
)
2562 BT_DBG("chan %p, control %p", chan
, control
);
2563 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2566 /* Copy frame to all raw sockets on that connection */
2567 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2569 struct sk_buff
*nskb
;
2570 struct l2cap_chan
*chan
;
2572 BT_DBG("conn %p", conn
);
2574 mutex_lock(&conn
->chan_lock
);
2576 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2577 struct sock
*sk
= chan
->sk
;
2578 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2581 /* Don't send frame to the socket it came from */
2584 nskb
= skb_clone(skb
, GFP_KERNEL
);
2588 if (chan
->ops
->recv(chan
, nskb
))
2592 mutex_unlock(&conn
->chan_lock
);
2595 /* ---- L2CAP signalling commands ---- */
2596 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2597 u8 ident
, u16 dlen
, void *data
)
2599 struct sk_buff
*skb
, **frag
;
2600 struct l2cap_cmd_hdr
*cmd
;
2601 struct l2cap_hdr
*lh
;
2604 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2605 conn
, code
, ident
, dlen
);
2607 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2608 count
= min_t(unsigned int, conn
->mtu
, len
);
2610 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2614 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2615 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2617 if (conn
->hcon
->type
== LE_LINK
)
2618 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2620 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2622 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2625 cmd
->len
= cpu_to_le16(dlen
);
2628 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2629 memcpy(skb_put(skb
, count
), data
, count
);
2635 /* Continuation fragments (no L2CAP header) */
2636 frag
= &skb_shinfo(skb
)->frag_list
;
2638 count
= min_t(unsigned int, conn
->mtu
, len
);
2640 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2644 memcpy(skb_put(*frag
, count
), data
, count
);
2649 frag
= &(*frag
)->next
;
2659 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2662 struct l2cap_conf_opt
*opt
= *ptr
;
2665 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2673 *val
= *((u8
*) opt
->val
);
2677 *val
= get_unaligned_le16(opt
->val
);
2681 *val
= get_unaligned_le32(opt
->val
);
2685 *val
= (unsigned long) opt
->val
;
2689 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2693 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2695 struct l2cap_conf_opt
*opt
= *ptr
;
2697 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2704 *((u8
*) opt
->val
) = val
;
2708 put_unaligned_le16(val
, opt
->val
);
2712 put_unaligned_le32(val
, opt
->val
);
2716 memcpy(opt
->val
, (void *) val
, len
);
2720 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2723 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2725 struct l2cap_conf_efs efs
;
2727 switch (chan
->mode
) {
2728 case L2CAP_MODE_ERTM
:
2729 efs
.id
= chan
->local_id
;
2730 efs
.stype
= chan
->local_stype
;
2731 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2732 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2733 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2734 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2737 case L2CAP_MODE_STREAMING
:
2739 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2740 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2741 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2750 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2751 (unsigned long) &efs
);
2754 static void l2cap_ack_timeout(struct work_struct
*work
)
2756 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2760 BT_DBG("chan %p", chan
);
2762 l2cap_chan_lock(chan
);
2764 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2765 chan
->last_acked_seq
);
2768 l2cap_send_rr_or_rnr(chan
, 0);
2770 l2cap_chan_unlock(chan
);
2771 l2cap_chan_put(chan
);
2774 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2778 chan
->next_tx_seq
= 0;
2779 chan
->expected_tx_seq
= 0;
2780 chan
->expected_ack_seq
= 0;
2781 chan
->unacked_frames
= 0;
2782 chan
->buffer_seq
= 0;
2783 chan
->frames_sent
= 0;
2784 chan
->last_acked_seq
= 0;
2786 chan
->sdu_last_frag
= NULL
;
2789 skb_queue_head_init(&chan
->tx_q
);
2791 chan
->local_amp_id
= 0;
2793 chan
->move_state
= L2CAP_MOVE_STABLE
;
2794 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
2796 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2799 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2800 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2802 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2803 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2804 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2806 skb_queue_head_init(&chan
->srej_q
);
2808 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2812 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2814 l2cap_seq_list_free(&chan
->srej_list
);
2819 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2822 case L2CAP_MODE_STREAMING
:
2823 case L2CAP_MODE_ERTM
:
2824 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2828 return L2CAP_MODE_BASIC
;
2832 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2834 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2837 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2839 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2842 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2844 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2845 __l2cap_ews_supported(chan
)) {
2846 /* use extended control field */
2847 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2848 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2850 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2851 L2CAP_DEFAULT_TX_WINDOW
);
2852 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2854 chan
->ack_win
= chan
->tx_win
;
2857 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2859 struct l2cap_conf_req
*req
= data
;
2860 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2861 void *ptr
= req
->data
;
2864 BT_DBG("chan %p", chan
);
2866 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2869 switch (chan
->mode
) {
2870 case L2CAP_MODE_STREAMING
:
2871 case L2CAP_MODE_ERTM
:
2872 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2875 if (__l2cap_efs_supported(chan
))
2876 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2880 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2885 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2886 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2888 switch (chan
->mode
) {
2889 case L2CAP_MODE_BASIC
:
2890 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2891 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2894 rfc
.mode
= L2CAP_MODE_BASIC
;
2896 rfc
.max_transmit
= 0;
2897 rfc
.retrans_timeout
= 0;
2898 rfc
.monitor_timeout
= 0;
2899 rfc
.max_pdu_size
= 0;
2901 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2902 (unsigned long) &rfc
);
2905 case L2CAP_MODE_ERTM
:
2906 rfc
.mode
= L2CAP_MODE_ERTM
;
2907 rfc
.max_transmit
= chan
->max_tx
;
2908 rfc
.retrans_timeout
= 0;
2909 rfc
.monitor_timeout
= 0;
2911 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2912 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
2914 rfc
.max_pdu_size
= cpu_to_le16(size
);
2916 l2cap_txwin_setup(chan
);
2918 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2919 L2CAP_DEFAULT_TX_WINDOW
);
2921 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2922 (unsigned long) &rfc
);
2924 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2925 l2cap_add_opt_efs(&ptr
, chan
);
2927 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2930 if (chan
->fcs
== L2CAP_FCS_NONE
||
2931 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2932 chan
->fcs
= L2CAP_FCS_NONE
;
2933 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2936 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2937 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2941 case L2CAP_MODE_STREAMING
:
2942 l2cap_txwin_setup(chan
);
2943 rfc
.mode
= L2CAP_MODE_STREAMING
;
2945 rfc
.max_transmit
= 0;
2946 rfc
.retrans_timeout
= 0;
2947 rfc
.monitor_timeout
= 0;
2949 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2950 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
2952 rfc
.max_pdu_size
= cpu_to_le16(size
);
2954 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2955 (unsigned long) &rfc
);
2957 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2958 l2cap_add_opt_efs(&ptr
, chan
);
2960 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2963 if (chan
->fcs
== L2CAP_FCS_NONE
||
2964 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2965 chan
->fcs
= L2CAP_FCS_NONE
;
2966 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2971 req
->dcid
= cpu_to_le16(chan
->dcid
);
2972 req
->flags
= __constant_cpu_to_le16(0);
2977 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2979 struct l2cap_conf_rsp
*rsp
= data
;
2980 void *ptr
= rsp
->data
;
2981 void *req
= chan
->conf_req
;
2982 int len
= chan
->conf_len
;
2983 int type
, hint
, olen
;
2985 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2986 struct l2cap_conf_efs efs
;
2988 u16 mtu
= L2CAP_DEFAULT_MTU
;
2989 u16 result
= L2CAP_CONF_SUCCESS
;
2992 BT_DBG("chan %p", chan
);
2994 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2995 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2997 hint
= type
& L2CAP_CONF_HINT
;
2998 type
&= L2CAP_CONF_MASK
;
3001 case L2CAP_CONF_MTU
:
3005 case L2CAP_CONF_FLUSH_TO
:
3006 chan
->flush_to
= val
;
3009 case L2CAP_CONF_QOS
:
3012 case L2CAP_CONF_RFC
:
3013 if (olen
== sizeof(rfc
))
3014 memcpy(&rfc
, (void *) val
, olen
);
3017 case L2CAP_CONF_FCS
:
3018 if (val
== L2CAP_FCS_NONE
)
3019 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3022 case L2CAP_CONF_EFS
:
3024 if (olen
== sizeof(efs
))
3025 memcpy(&efs
, (void *) val
, olen
);
3028 case L2CAP_CONF_EWS
:
3030 return -ECONNREFUSED
;
3032 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3033 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3034 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3035 chan
->remote_tx_win
= val
;
3042 result
= L2CAP_CONF_UNKNOWN
;
3043 *((u8
*) ptr
++) = type
;
3048 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3051 switch (chan
->mode
) {
3052 case L2CAP_MODE_STREAMING
:
3053 case L2CAP_MODE_ERTM
:
3054 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3055 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3056 chan
->conn
->feat_mask
);
3061 if (__l2cap_efs_supported(chan
))
3062 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3064 return -ECONNREFUSED
;
3067 if (chan
->mode
!= rfc
.mode
)
3068 return -ECONNREFUSED
;
3074 if (chan
->mode
!= rfc
.mode
) {
3075 result
= L2CAP_CONF_UNACCEPT
;
3076 rfc
.mode
= chan
->mode
;
3078 if (chan
->num_conf_rsp
== 1)
3079 return -ECONNREFUSED
;
3081 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3082 (unsigned long) &rfc
);
3085 if (result
== L2CAP_CONF_SUCCESS
) {
3086 /* Configure output options and let the other side know
3087 * which ones we don't like. */
3089 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3090 result
= L2CAP_CONF_UNACCEPT
;
3093 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3095 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3098 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3099 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3100 efs
.stype
!= chan
->local_stype
) {
3102 result
= L2CAP_CONF_UNACCEPT
;
3104 if (chan
->num_conf_req
>= 1)
3105 return -ECONNREFUSED
;
3107 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3109 (unsigned long) &efs
);
3111 /* Send PENDING Conf Rsp */
3112 result
= L2CAP_CONF_PENDING
;
3113 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3118 case L2CAP_MODE_BASIC
:
3119 chan
->fcs
= L2CAP_FCS_NONE
;
3120 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3123 case L2CAP_MODE_ERTM
:
3124 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3125 chan
->remote_tx_win
= rfc
.txwin_size
;
3127 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3129 chan
->remote_max_tx
= rfc
.max_transmit
;
3131 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3132 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3133 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3134 rfc
.max_pdu_size
= cpu_to_le16(size
);
3135 chan
->remote_mps
= size
;
3137 rfc
.retrans_timeout
=
3138 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3139 rfc
.monitor_timeout
=
3140 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3142 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3144 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3145 sizeof(rfc
), (unsigned long) &rfc
);
3147 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3148 chan
->remote_id
= efs
.id
;
3149 chan
->remote_stype
= efs
.stype
;
3150 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3151 chan
->remote_flush_to
=
3152 le32_to_cpu(efs
.flush_to
);
3153 chan
->remote_acc_lat
=
3154 le32_to_cpu(efs
.acc_lat
);
3155 chan
->remote_sdu_itime
=
3156 le32_to_cpu(efs
.sdu_itime
);
3157 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3159 (unsigned long) &efs
);
3163 case L2CAP_MODE_STREAMING
:
3164 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3165 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3166 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3167 rfc
.max_pdu_size
= cpu_to_le16(size
);
3168 chan
->remote_mps
= size
;
3170 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3172 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3173 (unsigned long) &rfc
);
3178 result
= L2CAP_CONF_UNACCEPT
;
3180 memset(&rfc
, 0, sizeof(rfc
));
3181 rfc
.mode
= chan
->mode
;
3184 if (result
== L2CAP_CONF_SUCCESS
)
3185 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3187 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3188 rsp
->result
= cpu_to_le16(result
);
3189 rsp
->flags
= __constant_cpu_to_le16(0);
3194 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3195 void *data
, u16
*result
)
3197 struct l2cap_conf_req
*req
= data
;
3198 void *ptr
= req
->data
;
3201 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3202 struct l2cap_conf_efs efs
;
3204 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3206 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3207 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3210 case L2CAP_CONF_MTU
:
3211 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3212 *result
= L2CAP_CONF_UNACCEPT
;
3213 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3216 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3219 case L2CAP_CONF_FLUSH_TO
:
3220 chan
->flush_to
= val
;
3221 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3225 case L2CAP_CONF_RFC
:
3226 if (olen
== sizeof(rfc
))
3227 memcpy(&rfc
, (void *)val
, olen
);
3229 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3230 rfc
.mode
!= chan
->mode
)
3231 return -ECONNREFUSED
;
3235 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3236 sizeof(rfc
), (unsigned long) &rfc
);
3239 case L2CAP_CONF_EWS
:
3240 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3241 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3245 case L2CAP_CONF_EFS
:
3246 if (olen
== sizeof(efs
))
3247 memcpy(&efs
, (void *)val
, olen
);
3249 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3250 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3251 efs
.stype
!= chan
->local_stype
)
3252 return -ECONNREFUSED
;
3254 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3255 (unsigned long) &efs
);
3260 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3261 return -ECONNREFUSED
;
3263 chan
->mode
= rfc
.mode
;
3265 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3267 case L2CAP_MODE_ERTM
:
3268 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3269 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3270 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3271 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3272 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3275 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3276 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3277 chan
->local_sdu_itime
=
3278 le32_to_cpu(efs
.sdu_itime
);
3279 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3280 chan
->local_flush_to
=
3281 le32_to_cpu(efs
.flush_to
);
3285 case L2CAP_MODE_STREAMING
:
3286 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3290 req
->dcid
= cpu_to_le16(chan
->dcid
);
3291 req
->flags
= __constant_cpu_to_le16(0);
3296 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3297 u16 result
, u16 flags
)
3299 struct l2cap_conf_rsp
*rsp
= data
;
3300 void *ptr
= rsp
->data
;
3302 BT_DBG("chan %p", chan
);
3304 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3305 rsp
->result
= cpu_to_le16(result
);
3306 rsp
->flags
= cpu_to_le16(flags
);
3311 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3313 struct l2cap_conn_rsp rsp
;
3314 struct l2cap_conn
*conn
= chan
->conn
;
3317 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3318 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3319 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3320 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3321 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3323 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3326 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3327 l2cap_build_conf_req(chan
, buf
), buf
);
3328 chan
->num_conf_req
++;
3331 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3335 /* Use sane default values in case a misbehaving remote device
3336 * did not send an RFC or extended window size option.
3338 u16 txwin_ext
= chan
->ack_win
;
3339 struct l2cap_conf_rfc rfc
= {
3341 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3342 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3343 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3344 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3347 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3349 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3352 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3353 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3356 case L2CAP_CONF_RFC
:
3357 if (olen
== sizeof(rfc
))
3358 memcpy(&rfc
, (void *)val
, olen
);
3360 case L2CAP_CONF_EWS
:
3367 case L2CAP_MODE_ERTM
:
3368 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3369 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3370 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3371 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3372 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3374 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3377 case L2CAP_MODE_STREAMING
:
3378 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3382 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3383 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3385 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3387 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3390 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3391 cmd
->ident
== conn
->info_ident
) {
3392 cancel_delayed_work(&conn
->info_timer
);
3394 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3395 conn
->info_ident
= 0;
3397 l2cap_conn_start(conn
);
3403 static void l2cap_connect(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
,
3404 u8
*data
, u8 rsp_code
, u8 amp_id
)
3406 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3407 struct l2cap_conn_rsp rsp
;
3408 struct l2cap_chan
*chan
= NULL
, *pchan
;
3409 struct sock
*parent
, *sk
= NULL
;
3410 int result
, status
= L2CAP_CS_NO_INFO
;
3412 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3413 __le16 psm
= req
->psm
;
3415 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3417 /* Check if we have socket listening on psm */
3418 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3420 result
= L2CAP_CR_BAD_PSM
;
3426 mutex_lock(&conn
->chan_lock
);
3429 /* Check if the ACL is secure enough (if not SDP) */
3430 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3431 !hci_conn_check_link_mode(conn
->hcon
)) {
3432 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3433 result
= L2CAP_CR_SEC_BLOCK
;
3437 result
= L2CAP_CR_NO_MEM
;
3439 /* Check if we already have channel with that dcid */
3440 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3443 chan
= pchan
->ops
->new_connection(pchan
);
3449 hci_conn_hold(conn
->hcon
);
3451 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3452 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3456 __l2cap_chan_add(conn
, chan
);
3460 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3462 chan
->ident
= cmd
->ident
;
3464 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3465 if (l2cap_chan_check_security(chan
)) {
3466 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3467 __l2cap_state_change(chan
, BT_CONNECT2
);
3468 result
= L2CAP_CR_PEND
;
3469 status
= L2CAP_CS_AUTHOR_PEND
;
3470 chan
->ops
->defer(chan
);
3472 __l2cap_state_change(chan
, BT_CONFIG
);
3473 result
= L2CAP_CR_SUCCESS
;
3474 status
= L2CAP_CS_NO_INFO
;
3477 __l2cap_state_change(chan
, BT_CONNECT2
);
3478 result
= L2CAP_CR_PEND
;
3479 status
= L2CAP_CS_AUTHEN_PEND
;
3482 __l2cap_state_change(chan
, BT_CONNECT2
);
3483 result
= L2CAP_CR_PEND
;
3484 status
= L2CAP_CS_NO_INFO
;
3488 release_sock(parent
);
3489 mutex_unlock(&conn
->chan_lock
);
3492 rsp
.scid
= cpu_to_le16(scid
);
3493 rsp
.dcid
= cpu_to_le16(dcid
);
3494 rsp
.result
= cpu_to_le16(result
);
3495 rsp
.status
= cpu_to_le16(status
);
3496 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3498 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3499 struct l2cap_info_req info
;
3500 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3502 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3503 conn
->info_ident
= l2cap_get_ident(conn
);
3505 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3507 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3508 sizeof(info
), &info
);
3511 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3512 result
== L2CAP_CR_SUCCESS
) {
3514 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3515 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3516 l2cap_build_conf_req(chan
, buf
), buf
);
3517 chan
->num_conf_req
++;
3521 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3522 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3524 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3528 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
,
3529 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3531 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3532 u16 scid
, dcid
, result
, status
;
3533 struct l2cap_chan
*chan
;
3537 scid
= __le16_to_cpu(rsp
->scid
);
3538 dcid
= __le16_to_cpu(rsp
->dcid
);
3539 result
= __le16_to_cpu(rsp
->result
);
3540 status
= __le16_to_cpu(rsp
->status
);
3542 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3543 dcid
, scid
, result
, status
);
3545 mutex_lock(&conn
->chan_lock
);
3548 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3554 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3563 l2cap_chan_lock(chan
);
3566 case L2CAP_CR_SUCCESS
:
3567 l2cap_state_change(chan
, BT_CONFIG
);
3570 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3572 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3575 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3576 l2cap_build_conf_req(chan
, req
), req
);
3577 chan
->num_conf_req
++;
3581 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3585 l2cap_chan_del(chan
, ECONNREFUSED
);
3589 l2cap_chan_unlock(chan
);
3592 mutex_unlock(&conn
->chan_lock
);
3597 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3599 /* FCS is enabled only in ERTM or streaming mode, if one or both
3602 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3603 chan
->fcs
= L2CAP_FCS_NONE
;
3604 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3605 chan
->fcs
= L2CAP_FCS_CRC16
;
3608 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3609 u8 ident
, u16 flags
)
3611 struct l2cap_conn
*conn
= chan
->conn
;
3613 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3616 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3617 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3619 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3620 l2cap_build_conf_rsp(chan
, data
,
3621 L2CAP_CONF_SUCCESS
, flags
), data
);
3624 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3625 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3628 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3631 struct l2cap_chan
*chan
;
3634 dcid
= __le16_to_cpu(req
->dcid
);
3635 flags
= __le16_to_cpu(req
->flags
);
3637 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3639 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3643 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3644 struct l2cap_cmd_rej_cid rej
;
3646 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3647 rej
.scid
= cpu_to_le16(chan
->scid
);
3648 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3650 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3655 /* Reject if config buffer is too small. */
3656 len
= cmd_len
- sizeof(*req
);
3657 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3658 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3659 l2cap_build_conf_rsp(chan
, rsp
,
3660 L2CAP_CONF_REJECT
, flags
), rsp
);
3665 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3666 chan
->conf_len
+= len
;
3668 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3669 /* Incomplete config. Send empty response. */
3670 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3671 l2cap_build_conf_rsp(chan
, rsp
,
3672 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3676 /* Complete config. */
3677 len
= l2cap_parse_conf_req(chan
, rsp
);
3679 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3683 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3684 chan
->num_conf_rsp
++;
3686 /* Reset config buffer. */
3689 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3692 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3693 set_default_fcs(chan
);
3695 if (chan
->mode
== L2CAP_MODE_ERTM
||
3696 chan
->mode
== L2CAP_MODE_STREAMING
)
3697 err
= l2cap_ertm_init(chan
);
3700 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3702 l2cap_chan_ready(chan
);
3707 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3709 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3710 l2cap_build_conf_req(chan
, buf
), buf
);
3711 chan
->num_conf_req
++;
3714 /* Got Conf Rsp PENDING from remote side and asume we sent
3715 Conf Rsp PENDING in the code above */
3716 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3717 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3719 /* check compatibility */
3721 /* Send rsp for BR/EDR channel */
3723 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
3725 chan
->ident
= cmd
->ident
;
3729 l2cap_chan_unlock(chan
);
3733 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
3734 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3736 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3737 u16 scid
, flags
, result
;
3738 struct l2cap_chan
*chan
;
3739 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3742 scid
= __le16_to_cpu(rsp
->scid
);
3743 flags
= __le16_to_cpu(rsp
->flags
);
3744 result
= __le16_to_cpu(rsp
->result
);
3746 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3749 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3754 case L2CAP_CONF_SUCCESS
:
3755 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3756 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3759 case L2CAP_CONF_PENDING
:
3760 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3762 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3765 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3768 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3772 /* check compatibility */
3775 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
3778 chan
->ident
= cmd
->ident
;
3782 case L2CAP_CONF_UNACCEPT
:
3783 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3786 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3787 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3791 /* throw out any old stored conf requests */
3792 result
= L2CAP_CONF_SUCCESS
;
3793 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3796 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3800 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3801 L2CAP_CONF_REQ
, len
, req
);
3802 chan
->num_conf_req
++;
3803 if (result
!= L2CAP_CONF_SUCCESS
)
3809 l2cap_chan_set_err(chan
, ECONNRESET
);
3811 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3812 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3816 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
3819 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3821 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3822 set_default_fcs(chan
);
3824 if (chan
->mode
== L2CAP_MODE_ERTM
||
3825 chan
->mode
== L2CAP_MODE_STREAMING
)
3826 err
= l2cap_ertm_init(chan
);
3829 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3831 l2cap_chan_ready(chan
);
3835 l2cap_chan_unlock(chan
);
3839 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
3840 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3842 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3843 struct l2cap_disconn_rsp rsp
;
3845 struct l2cap_chan
*chan
;
3848 scid
= __le16_to_cpu(req
->scid
);
3849 dcid
= __le16_to_cpu(req
->dcid
);
3851 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3853 mutex_lock(&conn
->chan_lock
);
3855 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3857 mutex_unlock(&conn
->chan_lock
);
3861 l2cap_chan_lock(chan
);
3865 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3866 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3867 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3870 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3873 l2cap_chan_hold(chan
);
3874 l2cap_chan_del(chan
, ECONNRESET
);
3876 l2cap_chan_unlock(chan
);
3878 chan
->ops
->close(chan
);
3879 l2cap_chan_put(chan
);
3881 mutex_unlock(&conn
->chan_lock
);
3886 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
3887 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3889 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3891 struct l2cap_chan
*chan
;
3893 scid
= __le16_to_cpu(rsp
->scid
);
3894 dcid
= __le16_to_cpu(rsp
->dcid
);
3896 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3898 mutex_lock(&conn
->chan_lock
);
3900 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3902 mutex_unlock(&conn
->chan_lock
);
3906 l2cap_chan_lock(chan
);
3908 l2cap_chan_hold(chan
);
3909 l2cap_chan_del(chan
, 0);
3911 l2cap_chan_unlock(chan
);
3913 chan
->ops
->close(chan
);
3914 l2cap_chan_put(chan
);
3916 mutex_unlock(&conn
->chan_lock
);
3921 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
3922 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3924 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3927 type
= __le16_to_cpu(req
->type
);
3929 BT_DBG("type 0x%4.4x", type
);
3931 if (type
== L2CAP_IT_FEAT_MASK
) {
3933 u32 feat_mask
= l2cap_feat_mask
;
3934 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3935 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3936 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3938 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3941 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3942 | L2CAP_FEAT_EXT_WINDOW
;
3944 put_unaligned_le32(feat_mask
, rsp
->data
);
3945 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
3947 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3949 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3952 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3954 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3956 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3957 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3958 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3959 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
3962 struct l2cap_info_rsp rsp
;
3963 rsp
.type
= cpu_to_le16(type
);
3964 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
3965 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
3972 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
3973 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3975 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3978 type
= __le16_to_cpu(rsp
->type
);
3979 result
= __le16_to_cpu(rsp
->result
);
3981 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3983 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3984 if (cmd
->ident
!= conn
->info_ident
||
3985 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3988 cancel_delayed_work(&conn
->info_timer
);
3990 if (result
!= L2CAP_IR_SUCCESS
) {
3991 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3992 conn
->info_ident
= 0;
3994 l2cap_conn_start(conn
);
4000 case L2CAP_IT_FEAT_MASK
:
4001 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4003 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4004 struct l2cap_info_req req
;
4005 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4007 conn
->info_ident
= l2cap_get_ident(conn
);
4009 l2cap_send_cmd(conn
, conn
->info_ident
,
4010 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4012 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4013 conn
->info_ident
= 0;
4015 l2cap_conn_start(conn
);
4019 case L2CAP_IT_FIXED_CHAN
:
4020 conn
->fixed_chan_mask
= rsp
->data
[0];
4021 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4022 conn
->info_ident
= 0;
4024 l2cap_conn_start(conn
);
4031 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4032 struct l2cap_cmd_hdr
*cmd
,
4033 u16 cmd_len
, void *data
)
4035 struct l2cap_create_chan_req
*req
= data
;
4036 struct l2cap_create_chan_rsp rsp
;
4039 if (cmd_len
!= sizeof(*req
))
4045 psm
= le16_to_cpu(req
->psm
);
4046 scid
= le16_to_cpu(req
->scid
);
4048 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4050 /* Placeholder: Always reject */
4052 rsp
.scid
= cpu_to_le16(scid
);
4053 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4054 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4056 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4062 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
4063 struct l2cap_cmd_hdr
*cmd
,
4066 BT_DBG("conn %p", conn
);
4068 return l2cap_connect_rsp(conn
, cmd
, data
);
4071 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
4072 u16 icid
, u16 result
)
4074 struct l2cap_move_chan_rsp rsp
;
4076 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4078 rsp
.icid
= cpu_to_le16(icid
);
4079 rsp
.result
= cpu_to_le16(result
);
4081 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
4084 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
4085 struct l2cap_chan
*chan
,
4086 u16 icid
, u16 result
)
4088 struct l2cap_move_chan_cfm cfm
;
4091 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4093 ident
= l2cap_get_ident(conn
);
4095 chan
->ident
= ident
;
4097 cfm
.icid
= cpu_to_le16(icid
);
4098 cfm
.result
= cpu_to_le16(result
);
4100 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4103 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4106 struct l2cap_move_chan_cfm_rsp rsp
;
4108 BT_DBG("icid 0x%4.4x", icid
);
4110 rsp
.icid
= cpu_to_le16(icid
);
4111 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4114 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4115 struct l2cap_cmd_hdr
*cmd
,
4116 u16 cmd_len
, void *data
)
4118 struct l2cap_move_chan_req
*req
= data
;
4120 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4122 if (cmd_len
!= sizeof(*req
))
4125 icid
= le16_to_cpu(req
->icid
);
4127 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4132 /* Placeholder: Always refuse */
4133 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4138 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4139 struct l2cap_cmd_hdr
*cmd
,
4140 u16 cmd_len
, void *data
)
4142 struct l2cap_move_chan_rsp
*rsp
= data
;
4145 if (cmd_len
!= sizeof(*rsp
))
4148 icid
= le16_to_cpu(rsp
->icid
);
4149 result
= le16_to_cpu(rsp
->result
);
4151 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4153 /* Placeholder: Always unconfirmed */
4154 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4159 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4160 struct l2cap_cmd_hdr
*cmd
,
4161 u16 cmd_len
, void *data
)
4163 struct l2cap_move_chan_cfm
*cfm
= data
;
4166 if (cmd_len
!= sizeof(*cfm
))
4169 icid
= le16_to_cpu(cfm
->icid
);
4170 result
= le16_to_cpu(cfm
->result
);
4172 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4174 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4179 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4180 struct l2cap_cmd_hdr
*cmd
,
4181 u16 cmd_len
, void *data
)
4183 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4186 if (cmd_len
!= sizeof(*rsp
))
4189 icid
= le16_to_cpu(rsp
->icid
);
4191 BT_DBG("icid 0x%4.4x", icid
);
4196 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4201 if (min
> max
|| min
< 6 || max
> 3200)
4204 if (to_multiplier
< 10 || to_multiplier
> 3200)
4207 if (max
>= to_multiplier
* 8)
4210 max_latency
= (to_multiplier
* 8 / max
) - 1;
4211 if (latency
> 499 || latency
> max_latency
)
4217 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4218 struct l2cap_cmd_hdr
*cmd
,
4221 struct hci_conn
*hcon
= conn
->hcon
;
4222 struct l2cap_conn_param_update_req
*req
;
4223 struct l2cap_conn_param_update_rsp rsp
;
4224 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4227 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4230 cmd_len
= __le16_to_cpu(cmd
->len
);
4231 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4234 req
= (struct l2cap_conn_param_update_req
*) data
;
4235 min
= __le16_to_cpu(req
->min
);
4236 max
= __le16_to_cpu(req
->max
);
4237 latency
= __le16_to_cpu(req
->latency
);
4238 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4240 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4241 min
, max
, latency
, to_multiplier
);
4243 memset(&rsp
, 0, sizeof(rsp
));
4245 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4247 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4249 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4251 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4255 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4260 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4261 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4266 switch (cmd
->code
) {
4267 case L2CAP_COMMAND_REJ
:
4268 l2cap_command_rej(conn
, cmd
, data
);
4271 case L2CAP_CONN_REQ
:
4272 err
= l2cap_connect_req(conn
, cmd
, data
);
4275 case L2CAP_CONN_RSP
:
4276 case L2CAP_CREATE_CHAN_RSP
:
4277 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4280 case L2CAP_CONF_REQ
:
4281 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4284 case L2CAP_CONF_RSP
:
4285 err
= l2cap_config_rsp(conn
, cmd
, data
);
4288 case L2CAP_DISCONN_REQ
:
4289 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4292 case L2CAP_DISCONN_RSP
:
4293 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4296 case L2CAP_ECHO_REQ
:
4297 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4300 case L2CAP_ECHO_RSP
:
4303 case L2CAP_INFO_REQ
:
4304 err
= l2cap_information_req(conn
, cmd
, data
);
4307 case L2CAP_INFO_RSP
:
4308 err
= l2cap_information_rsp(conn
, cmd
, data
);
4311 case L2CAP_CREATE_CHAN_REQ
:
4312 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4315 case L2CAP_MOVE_CHAN_REQ
:
4316 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4319 case L2CAP_MOVE_CHAN_RSP
:
4320 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4323 case L2CAP_MOVE_CHAN_CFM
:
4324 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4327 case L2CAP_MOVE_CHAN_CFM_RSP
:
4328 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4332 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4340 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4341 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4343 switch (cmd
->code
) {
4344 case L2CAP_COMMAND_REJ
:
4347 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4348 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4350 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4354 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4359 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4360 struct sk_buff
*skb
)
4362 u8
*data
= skb
->data
;
4364 struct l2cap_cmd_hdr cmd
;
4367 l2cap_raw_recv(conn
, skb
);
4369 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4371 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4372 data
+= L2CAP_CMD_HDR_SIZE
;
4373 len
-= L2CAP_CMD_HDR_SIZE
;
4375 cmd_len
= le16_to_cpu(cmd
.len
);
4377 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
4380 if (cmd_len
> len
|| !cmd
.ident
) {
4381 BT_DBG("corrupted command");
4385 if (conn
->hcon
->type
== LE_LINK
)
4386 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4388 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4391 struct l2cap_cmd_rej_unk rej
;
4393 BT_ERR("Wrong link type (%d)", err
);
4395 /* FIXME: Map err to a valid reason */
4396 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4397 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
4408 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4410 u16 our_fcs
, rcv_fcs
;
4413 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4414 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4416 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4418 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4419 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4420 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4421 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4423 if (our_fcs
!= rcv_fcs
)
4429 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4431 struct l2cap_ctrl control
;
4433 BT_DBG("chan %p", chan
);
4435 memset(&control
, 0, sizeof(control
));
4438 control
.reqseq
= chan
->buffer_seq
;
4439 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4441 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4442 control
.super
= L2CAP_SUPER_RNR
;
4443 l2cap_send_sframe(chan
, &control
);
4446 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4447 chan
->unacked_frames
> 0)
4448 __set_retrans_timer(chan
);
4450 /* Send pending iframes */
4451 l2cap_ertm_send(chan
);
4453 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4454 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4455 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4458 control
.super
= L2CAP_SUPER_RR
;
4459 l2cap_send_sframe(chan
, &control
);
4463 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
4464 struct sk_buff
**last_frag
)
4466 /* skb->len reflects data in skb as well as all fragments
4467 * skb->data_len reflects only data in fragments
4469 if (!skb_has_frag_list(skb
))
4470 skb_shinfo(skb
)->frag_list
= new_frag
;
4472 new_frag
->next
= NULL
;
4474 (*last_frag
)->next
= new_frag
;
4475 *last_frag
= new_frag
;
4477 skb
->len
+= new_frag
->len
;
4478 skb
->data_len
+= new_frag
->len
;
4479 skb
->truesize
+= new_frag
->truesize
;
4482 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4483 struct l2cap_ctrl
*control
)
4487 switch (control
->sar
) {
4488 case L2CAP_SAR_UNSEGMENTED
:
4492 err
= chan
->ops
->recv(chan
, skb
);
4495 case L2CAP_SAR_START
:
4499 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4500 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4502 if (chan
->sdu_len
> chan
->imtu
) {
4507 if (skb
->len
>= chan
->sdu_len
)
4511 chan
->sdu_last_frag
= skb
;
4517 case L2CAP_SAR_CONTINUE
:
4521 append_skb_frag(chan
->sdu
, skb
,
4522 &chan
->sdu_last_frag
);
4525 if (chan
->sdu
->len
>= chan
->sdu_len
)
4535 append_skb_frag(chan
->sdu
, skb
,
4536 &chan
->sdu_last_frag
);
4539 if (chan
->sdu
->len
!= chan
->sdu_len
)
4542 err
= chan
->ops
->recv(chan
, chan
->sdu
);
4545 /* Reassembly complete */
4547 chan
->sdu_last_frag
= NULL
;
4555 kfree_skb(chan
->sdu
);
4557 chan
->sdu_last_frag
= NULL
;
4564 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4568 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4571 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4572 l2cap_tx(chan
, NULL
, NULL
, event
);
4575 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4578 /* Pass sequential frames to l2cap_reassemble_sdu()
4579 * until a gap is encountered.
4582 BT_DBG("chan %p", chan
);
4584 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4585 struct sk_buff
*skb
;
4586 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4587 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4589 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4594 skb_unlink(skb
, &chan
->srej_q
);
4595 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4596 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4601 if (skb_queue_empty(&chan
->srej_q
)) {
4602 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4603 l2cap_send_ack(chan
);
4609 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4610 struct l2cap_ctrl
*control
)
4612 struct sk_buff
*skb
;
4614 BT_DBG("chan %p, control %p", chan
, control
);
4616 if (control
->reqseq
== chan
->next_tx_seq
) {
4617 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4618 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4622 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4625 BT_DBG("Seq %d not available for retransmission",
4630 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4631 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4632 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4636 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4638 if (control
->poll
) {
4639 l2cap_pass_to_tx(chan
, control
);
4641 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4642 l2cap_retransmit(chan
, control
);
4643 l2cap_ertm_send(chan
);
4645 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4646 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4647 chan
->srej_save_reqseq
= control
->reqseq
;
4650 l2cap_pass_to_tx_fbit(chan
, control
);
4652 if (control
->final
) {
4653 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4654 !test_and_clear_bit(CONN_SREJ_ACT
,
4656 l2cap_retransmit(chan
, control
);
4658 l2cap_retransmit(chan
, control
);
4659 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4660 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4661 chan
->srej_save_reqseq
= control
->reqseq
;
4667 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4668 struct l2cap_ctrl
*control
)
4670 struct sk_buff
*skb
;
4672 BT_DBG("chan %p, control %p", chan
, control
);
4674 if (control
->reqseq
== chan
->next_tx_seq
) {
4675 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4676 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4680 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4682 if (chan
->max_tx
&& skb
&&
4683 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4684 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4685 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4689 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4691 l2cap_pass_to_tx(chan
, control
);
4693 if (control
->final
) {
4694 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4695 l2cap_retransmit_all(chan
, control
);
4697 l2cap_retransmit_all(chan
, control
);
4698 l2cap_ertm_send(chan
);
4699 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4700 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4704 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4706 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4708 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4709 chan
->expected_tx_seq
);
4711 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4712 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4714 /* See notes below regarding "double poll" and
4717 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4718 BT_DBG("Invalid/Ignore - after SREJ");
4719 return L2CAP_TXSEQ_INVALID_IGNORE
;
4721 BT_DBG("Invalid - in window after SREJ sent");
4722 return L2CAP_TXSEQ_INVALID
;
4726 if (chan
->srej_list
.head
== txseq
) {
4727 BT_DBG("Expected SREJ");
4728 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4731 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4732 BT_DBG("Duplicate SREJ - txseq already stored");
4733 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4736 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4737 BT_DBG("Unexpected SREJ - not requested");
4738 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4742 if (chan
->expected_tx_seq
== txseq
) {
4743 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4745 BT_DBG("Invalid - txseq outside tx window");
4746 return L2CAP_TXSEQ_INVALID
;
4749 return L2CAP_TXSEQ_EXPECTED
;
4753 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4754 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
4755 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4756 return L2CAP_TXSEQ_DUPLICATE
;
4759 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4760 /* A source of invalid packets is a "double poll" condition,
4761 * where delays cause us to send multiple poll packets. If
4762 * the remote stack receives and processes both polls,
4763 * sequence numbers can wrap around in such a way that a
4764 * resent frame has a sequence number that looks like new data
4765 * with a sequence gap. This would trigger an erroneous SREJ
4768 * Fortunately, this is impossible with a tx window that's
4769 * less than half of the maximum sequence number, which allows
4770 * invalid frames to be safely ignored.
4772 * With tx window sizes greater than half of the tx window
4773 * maximum, the frame is invalid and cannot be ignored. This
4774 * causes a disconnect.
4777 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4778 BT_DBG("Invalid/Ignore - txseq outside tx window");
4779 return L2CAP_TXSEQ_INVALID_IGNORE
;
4781 BT_DBG("Invalid - txseq outside tx window");
4782 return L2CAP_TXSEQ_INVALID
;
4785 BT_DBG("Unexpected - txseq indicates missing frames");
4786 return L2CAP_TXSEQ_UNEXPECTED
;
4790 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4791 struct l2cap_ctrl
*control
,
4792 struct sk_buff
*skb
, u8 event
)
4795 bool skb_in_use
= 0;
4797 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4801 case L2CAP_EV_RECV_IFRAME
:
4802 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4803 case L2CAP_TXSEQ_EXPECTED
:
4804 l2cap_pass_to_tx(chan
, control
);
4806 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4807 BT_DBG("Busy, discarding expected seq %d",
4812 chan
->expected_tx_seq
= __next_seq(chan
,
4815 chan
->buffer_seq
= chan
->expected_tx_seq
;
4818 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4822 if (control
->final
) {
4823 if (!test_and_clear_bit(CONN_REJ_ACT
,
4824 &chan
->conn_state
)) {
4826 l2cap_retransmit_all(chan
, control
);
4827 l2cap_ertm_send(chan
);
4831 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4832 l2cap_send_ack(chan
);
4834 case L2CAP_TXSEQ_UNEXPECTED
:
4835 l2cap_pass_to_tx(chan
, control
);
4837 /* Can't issue SREJ frames in the local busy state.
4838 * Drop this frame, it will be seen as missing
4839 * when local busy is exited.
4841 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4842 BT_DBG("Busy, discarding unexpected seq %d",
4847 /* There was a gap in the sequence, so an SREJ
4848 * must be sent for each missing frame. The
4849 * current frame is stored for later use.
4851 skb_queue_tail(&chan
->srej_q
, skb
);
4853 BT_DBG("Queued %p (queue len %d)", skb
,
4854 skb_queue_len(&chan
->srej_q
));
4856 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4857 l2cap_seq_list_clear(&chan
->srej_list
);
4858 l2cap_send_srej(chan
, control
->txseq
);
4860 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4862 case L2CAP_TXSEQ_DUPLICATE
:
4863 l2cap_pass_to_tx(chan
, control
);
4865 case L2CAP_TXSEQ_INVALID_IGNORE
:
4867 case L2CAP_TXSEQ_INVALID
:
4869 l2cap_send_disconn_req(chan
->conn
, chan
,
4874 case L2CAP_EV_RECV_RR
:
4875 l2cap_pass_to_tx(chan
, control
);
4876 if (control
->final
) {
4877 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4879 if (!test_and_clear_bit(CONN_REJ_ACT
,
4880 &chan
->conn_state
)) {
4882 l2cap_retransmit_all(chan
, control
);
4885 l2cap_ertm_send(chan
);
4886 } else if (control
->poll
) {
4887 l2cap_send_i_or_rr_or_rnr(chan
);
4889 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4890 &chan
->conn_state
) &&
4891 chan
->unacked_frames
)
4892 __set_retrans_timer(chan
);
4894 l2cap_ertm_send(chan
);
4897 case L2CAP_EV_RECV_RNR
:
4898 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4899 l2cap_pass_to_tx(chan
, control
);
4900 if (control
&& control
->poll
) {
4901 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4902 l2cap_send_rr_or_rnr(chan
, 0);
4904 __clear_retrans_timer(chan
);
4905 l2cap_seq_list_clear(&chan
->retrans_list
);
4907 case L2CAP_EV_RECV_REJ
:
4908 l2cap_handle_rej(chan
, control
);
4910 case L2CAP_EV_RECV_SREJ
:
4911 l2cap_handle_srej(chan
, control
);
4917 if (skb
&& !skb_in_use
) {
4918 BT_DBG("Freeing %p", skb
);
4925 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4926 struct l2cap_ctrl
*control
,
4927 struct sk_buff
*skb
, u8 event
)
4930 u16 txseq
= control
->txseq
;
4931 bool skb_in_use
= 0;
4933 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4937 case L2CAP_EV_RECV_IFRAME
:
4938 switch (l2cap_classify_txseq(chan
, txseq
)) {
4939 case L2CAP_TXSEQ_EXPECTED
:
4940 /* Keep frame for reassembly later */
4941 l2cap_pass_to_tx(chan
, control
);
4942 skb_queue_tail(&chan
->srej_q
, skb
);
4944 BT_DBG("Queued %p (queue len %d)", skb
,
4945 skb_queue_len(&chan
->srej_q
));
4947 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4949 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4950 l2cap_seq_list_pop(&chan
->srej_list
);
4952 l2cap_pass_to_tx(chan
, control
);
4953 skb_queue_tail(&chan
->srej_q
, skb
);
4955 BT_DBG("Queued %p (queue len %d)", skb
,
4956 skb_queue_len(&chan
->srej_q
));
4958 err
= l2cap_rx_queued_iframes(chan
);
4963 case L2CAP_TXSEQ_UNEXPECTED
:
4964 /* Got a frame that can't be reassembled yet.
4965 * Save it for later, and send SREJs to cover
4966 * the missing frames.
4968 skb_queue_tail(&chan
->srej_q
, skb
);
4970 BT_DBG("Queued %p (queue len %d)", skb
,
4971 skb_queue_len(&chan
->srej_q
));
4973 l2cap_pass_to_tx(chan
, control
);
4974 l2cap_send_srej(chan
, control
->txseq
);
4976 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4977 /* This frame was requested with an SREJ, but
4978 * some expected retransmitted frames are
4979 * missing. Request retransmission of missing
4982 skb_queue_tail(&chan
->srej_q
, skb
);
4984 BT_DBG("Queued %p (queue len %d)", skb
,
4985 skb_queue_len(&chan
->srej_q
));
4987 l2cap_pass_to_tx(chan
, control
);
4988 l2cap_send_srej_list(chan
, control
->txseq
);
4990 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4991 /* We've already queued this frame. Drop this copy. */
4992 l2cap_pass_to_tx(chan
, control
);
4994 case L2CAP_TXSEQ_DUPLICATE
:
4995 /* Expecting a later sequence number, so this frame
4996 * was already received. Ignore it completely.
4999 case L2CAP_TXSEQ_INVALID_IGNORE
:
5001 case L2CAP_TXSEQ_INVALID
:
5003 l2cap_send_disconn_req(chan
->conn
, chan
,
5008 case L2CAP_EV_RECV_RR
:
5009 l2cap_pass_to_tx(chan
, control
);
5010 if (control
->final
) {
5011 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5013 if (!test_and_clear_bit(CONN_REJ_ACT
,
5014 &chan
->conn_state
)) {
5016 l2cap_retransmit_all(chan
, control
);
5019 l2cap_ertm_send(chan
);
5020 } else if (control
->poll
) {
5021 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5022 &chan
->conn_state
) &&
5023 chan
->unacked_frames
) {
5024 __set_retrans_timer(chan
);
5027 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5028 l2cap_send_srej_tail(chan
);
5030 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5031 &chan
->conn_state
) &&
5032 chan
->unacked_frames
)
5033 __set_retrans_timer(chan
);
5035 l2cap_send_ack(chan
);
5038 case L2CAP_EV_RECV_RNR
:
5039 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5040 l2cap_pass_to_tx(chan
, control
);
5041 if (control
->poll
) {
5042 l2cap_send_srej_tail(chan
);
5044 struct l2cap_ctrl rr_control
;
5045 memset(&rr_control
, 0, sizeof(rr_control
));
5046 rr_control
.sframe
= 1;
5047 rr_control
.super
= L2CAP_SUPER_RR
;
5048 rr_control
.reqseq
= chan
->buffer_seq
;
5049 l2cap_send_sframe(chan
, &rr_control
);
5053 case L2CAP_EV_RECV_REJ
:
5054 l2cap_handle_rej(chan
, control
);
5056 case L2CAP_EV_RECV_SREJ
:
5057 l2cap_handle_srej(chan
, control
);
5061 if (skb
&& !skb_in_use
) {
5062 BT_DBG("Freeing %p", skb
);
5069 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5071 /* Make sure reqseq is for a packet that has been sent but not acked */
5074 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5075 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5078 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5079 struct sk_buff
*skb
, u8 event
)
5083 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5084 control
, skb
, event
, chan
->rx_state
);
5086 if (__valid_reqseq(chan
, control
->reqseq
)) {
5087 switch (chan
->rx_state
) {
5088 case L2CAP_RX_STATE_RECV
:
5089 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5091 case L2CAP_RX_STATE_SREJ_SENT
:
5092 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5100 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5101 control
->reqseq
, chan
->next_tx_seq
,
5102 chan
->expected_ack_seq
);
5103 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5109 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5110 struct sk_buff
*skb
)
5114 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5117 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5118 L2CAP_TXSEQ_EXPECTED
) {
5119 l2cap_pass_to_tx(chan
, control
);
5121 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5122 __next_seq(chan
, chan
->buffer_seq
));
5124 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5126 l2cap_reassemble_sdu(chan
, skb
, control
);
5129 kfree_skb(chan
->sdu
);
5132 chan
->sdu_last_frag
= NULL
;
5136 BT_DBG("Freeing %p", skb
);
5141 chan
->last_acked_seq
= control
->txseq
;
5142 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5147 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5149 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5153 __unpack_control(chan
, skb
);
5158 * We can just drop the corrupted I-frame here.
5159 * Receiver will miss it and start proper recovery
5160 * procedures and ask for retransmission.
5162 if (l2cap_check_fcs(chan
, skb
))
5165 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5166 len
-= L2CAP_SDULEN_SIZE
;
5168 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5169 len
-= L2CAP_FCS_SIZE
;
5171 if (len
> chan
->mps
) {
5172 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5176 if (!control
->sframe
) {
5179 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5180 control
->sar
, control
->reqseq
, control
->final
,
5183 /* Validate F-bit - F=0 always valid, F=1 only
5184 * valid in TX WAIT_F
5186 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5189 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5190 event
= L2CAP_EV_RECV_IFRAME
;
5191 err
= l2cap_rx(chan
, control
, skb
, event
);
5193 err
= l2cap_stream_rx(chan
, control
, skb
);
5197 l2cap_send_disconn_req(chan
->conn
, chan
,
5200 const u8 rx_func_to_event
[4] = {
5201 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5202 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5205 /* Only I-frames are expected in streaming mode */
5206 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5209 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5210 control
->reqseq
, control
->final
, control
->poll
,
5215 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5219 /* Validate F and P bits */
5220 if (control
->final
&& (control
->poll
||
5221 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5224 event
= rx_func_to_event
[control
->super
];
5225 if (l2cap_rx(chan
, control
, skb
, event
))
5226 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5236 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
5237 struct sk_buff
*skb
)
5239 struct l2cap_chan
*chan
;
5241 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5243 if (cid
== L2CAP_CID_A2MP
) {
5244 chan
= a2mp_channel_create(conn
, skb
);
5250 l2cap_chan_lock(chan
);
5252 BT_DBG("unknown cid 0x%4.4x", cid
);
5253 /* Drop packet and return */
5259 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5261 if (chan
->state
!= BT_CONNECTED
)
5264 switch (chan
->mode
) {
5265 case L2CAP_MODE_BASIC
:
5266 /* If socket recv buffers overflows we drop data here
5267 * which is *bad* because L2CAP has to be reliable.
5268 * But we don't have any other choice. L2CAP doesn't
5269 * provide flow control mechanism. */
5271 if (chan
->imtu
< skb
->len
)
5274 if (!chan
->ops
->recv(chan
, skb
))
5278 case L2CAP_MODE_ERTM
:
5279 case L2CAP_MODE_STREAMING
:
5280 l2cap_data_rcv(chan
, skb
);
5284 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5292 l2cap_chan_unlock(chan
);
5295 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
5296 struct sk_buff
*skb
)
5298 struct l2cap_chan
*chan
;
5300 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5304 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5306 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5309 if (chan
->imtu
< skb
->len
)
5312 if (!chan
->ops
->recv(chan
, skb
))
5319 static void l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5320 struct sk_buff
*skb
)
5322 struct l2cap_chan
*chan
;
5324 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5328 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5330 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5333 if (chan
->imtu
< skb
->len
)
5336 if (!chan
->ops
->recv(chan
, skb
))
5343 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5345 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5349 skb_pull(skb
, L2CAP_HDR_SIZE
);
5350 cid
= __le16_to_cpu(lh
->cid
);
5351 len
= __le16_to_cpu(lh
->len
);
5353 if (len
!= skb
->len
) {
5358 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5361 case L2CAP_CID_LE_SIGNALING
:
5362 case L2CAP_CID_SIGNALING
:
5363 l2cap_sig_channel(conn
, skb
);
5366 case L2CAP_CID_CONN_LESS
:
5367 psm
= get_unaligned((__le16
*) skb
->data
);
5368 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
5369 l2cap_conless_channel(conn
, psm
, skb
);
5372 case L2CAP_CID_LE_DATA
:
5373 l2cap_att_channel(conn
, cid
, skb
);
5377 if (smp_sig_channel(conn
, skb
))
5378 l2cap_conn_del(conn
->hcon
, EACCES
);
5382 l2cap_data_channel(conn
, cid
, skb
);
5387 /* ---- L2CAP interface with lower layer (HCI) ---- */
5389 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5391 int exact
= 0, lm1
= 0, lm2
= 0;
5392 struct l2cap_chan
*c
;
5394 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
5396 /* Find listening sockets and check their link_mode */
5397 read_lock(&chan_list_lock
);
5398 list_for_each_entry(c
, &chan_list
, global_l
) {
5399 struct sock
*sk
= c
->sk
;
5401 if (c
->state
!= BT_LISTEN
)
5404 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5405 lm1
|= HCI_LM_ACCEPT
;
5406 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5407 lm1
|= HCI_LM_MASTER
;
5409 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5410 lm2
|= HCI_LM_ACCEPT
;
5411 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5412 lm2
|= HCI_LM_MASTER
;
5415 read_unlock(&chan_list_lock
);
5417 return exact
? lm1
: lm2
;
5420 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5422 struct l2cap_conn
*conn
;
5424 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
5427 conn
= l2cap_conn_add(hcon
, status
);
5429 l2cap_conn_ready(conn
);
5431 l2cap_conn_del(hcon
, bt_to_errno(status
));
5435 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5437 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5439 BT_DBG("hcon %p", hcon
);
5442 return HCI_ERROR_REMOTE_USER_TERM
;
5443 return conn
->disc_reason
;
5446 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5448 BT_DBG("hcon %p reason %d", hcon
, reason
);
5450 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5453 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5455 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5458 if (encrypt
== 0x00) {
5459 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5460 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5461 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5462 l2cap_chan_close(chan
, ECONNREFUSED
);
5464 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5465 __clear_chan_timer(chan
);
5469 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5471 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5472 struct l2cap_chan
*chan
;
5477 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
5479 if (hcon
->type
== LE_LINK
) {
5480 if (!status
&& encrypt
)
5481 smp_distribute_keys(conn
, 0);
5482 cancel_delayed_work(&conn
->security_timer
);
5485 mutex_lock(&conn
->chan_lock
);
5487 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5488 l2cap_chan_lock(chan
);
5490 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
5491 state_to_string(chan
->state
));
5493 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
5494 l2cap_chan_unlock(chan
);
5498 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5499 if (!status
&& encrypt
) {
5500 chan
->sec_level
= hcon
->sec_level
;
5501 l2cap_chan_ready(chan
);
5504 l2cap_chan_unlock(chan
);
5508 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5509 l2cap_chan_unlock(chan
);
5513 if (!status
&& (chan
->state
== BT_CONNECTED
||
5514 chan
->state
== BT_CONFIG
)) {
5515 struct sock
*sk
= chan
->sk
;
5517 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5518 sk
->sk_state_change(sk
);
5520 l2cap_check_encryption(chan
, encrypt
);
5521 l2cap_chan_unlock(chan
);
5525 if (chan
->state
== BT_CONNECT
) {
5527 l2cap_start_connection(chan
);
5529 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5531 } else if (chan
->state
== BT_CONNECT2
) {
5532 struct sock
*sk
= chan
->sk
;
5533 struct l2cap_conn_rsp rsp
;
5539 if (test_bit(BT_SK_DEFER_SETUP
,
5540 &bt_sk(sk
)->flags
)) {
5541 res
= L2CAP_CR_PEND
;
5542 stat
= L2CAP_CS_AUTHOR_PEND
;
5543 chan
->ops
->defer(chan
);
5545 __l2cap_state_change(chan
, BT_CONFIG
);
5546 res
= L2CAP_CR_SUCCESS
;
5547 stat
= L2CAP_CS_NO_INFO
;
5550 __l2cap_state_change(chan
, BT_DISCONN
);
5551 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5552 res
= L2CAP_CR_SEC_BLOCK
;
5553 stat
= L2CAP_CS_NO_INFO
;
5558 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5559 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5560 rsp
.result
= cpu_to_le16(res
);
5561 rsp
.status
= cpu_to_le16(stat
);
5562 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5565 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
5566 res
== L2CAP_CR_SUCCESS
) {
5568 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
5569 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
5571 l2cap_build_conf_req(chan
, buf
),
5573 chan
->num_conf_req
++;
5577 l2cap_chan_unlock(chan
);
5580 mutex_unlock(&conn
->chan_lock
);
5585 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5587 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5588 struct l2cap_hdr
*hdr
;
5591 /* For AMP controller do not create l2cap conn */
5592 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
5596 conn
= l2cap_conn_add(hcon
, 0);
5601 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5605 case ACL_START_NO_FLUSH
:
5608 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5609 kfree_skb(conn
->rx_skb
);
5610 conn
->rx_skb
= NULL
;
5612 l2cap_conn_unreliable(conn
, ECOMM
);
5615 /* Start fragment always begin with Basic L2CAP header */
5616 if (skb
->len
< L2CAP_HDR_SIZE
) {
5617 BT_ERR("Frame is too short (len %d)", skb
->len
);
5618 l2cap_conn_unreliable(conn
, ECOMM
);
5622 hdr
= (struct l2cap_hdr
*) skb
->data
;
5623 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5625 if (len
== skb
->len
) {
5626 /* Complete frame received */
5627 l2cap_recv_frame(conn
, skb
);
5631 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5633 if (skb
->len
> len
) {
5634 BT_ERR("Frame is too long (len %d, expected len %d)",
5636 l2cap_conn_unreliable(conn
, ECOMM
);
5640 /* Allocate skb for the complete frame (with header) */
5641 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
5645 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5647 conn
->rx_len
= len
- skb
->len
;
5651 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5653 if (!conn
->rx_len
) {
5654 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5655 l2cap_conn_unreliable(conn
, ECOMM
);
5659 if (skb
->len
> conn
->rx_len
) {
5660 BT_ERR("Fragment is too long (len %d, expected %d)",
5661 skb
->len
, conn
->rx_len
);
5662 kfree_skb(conn
->rx_skb
);
5663 conn
->rx_skb
= NULL
;
5665 l2cap_conn_unreliable(conn
, ECOMM
);
5669 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5671 conn
->rx_len
-= skb
->len
;
5673 if (!conn
->rx_len
) {
5674 /* Complete frame received */
5675 l2cap_recv_frame(conn
, conn
->rx_skb
);
5676 conn
->rx_skb
= NULL
;
5686 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5688 struct l2cap_chan
*c
;
5690 read_lock(&chan_list_lock
);
5692 list_for_each_entry(c
, &chan_list
, global_l
) {
5693 struct sock
*sk
= c
->sk
;
5695 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5696 &bt_sk(sk
)->src
, &bt_sk(sk
)->dst
,
5697 c
->state
, __le16_to_cpu(c
->psm
),
5698 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5699 c
->sec_level
, c
->mode
);
5702 read_unlock(&chan_list_lock
);
5707 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5709 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5712 static const struct file_operations l2cap_debugfs_fops
= {
5713 .open
= l2cap_debugfs_open
,
5715 .llseek
= seq_lseek
,
5716 .release
= single_release
,
5719 static struct dentry
*l2cap_debugfs
;
5721 int __init
l2cap_init(void)
5725 err
= l2cap_init_sockets();
5730 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
5731 NULL
, &l2cap_debugfs_fops
);
5733 BT_ERR("Failed to create L2CAP debug file");
5739 void l2cap_exit(void)
5741 debugfs_remove(l2cap_debugfs
);
5742 l2cap_cleanup_sockets();
5745 module_param(disable_ertm
, bool, 0644);
5746 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");