2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
45 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
47 static LIST_HEAD(chan_list
);
48 static DEFINE_RWLOCK(chan_list_lock
);
50 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
51 u8 code
, u8 ident
, u16 dlen
, void *data
);
52 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
54 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
55 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
56 struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
67 list_for_each_entry(c
, &conn
->chan_l
, list
) {
74 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
78 list_for_each_entry(c
, &conn
->chan_l
, list
) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
91 mutex_lock(&conn
->chan_lock
);
92 c
= __l2cap_get_chan_by_scid(conn
, cid
);
95 mutex_unlock(&conn
->chan_lock
);
100 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
102 struct l2cap_chan
*c
;
104 list_for_each_entry(c
, &conn
->chan_l
, list
) {
105 if (c
->ident
== ident
)
111 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
113 struct l2cap_chan
*c
;
115 list_for_each_entry(c
, &chan_list
, global_l
) {
116 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
122 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
126 write_lock(&chan_list_lock
);
128 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
141 for (p
= 0x1001; p
< 0x1100; p
+= 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
143 chan
->psm
= cpu_to_le16(p
);
144 chan
->sport
= cpu_to_le16(p
);
151 write_unlock(&chan_list_lock
);
155 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
157 write_lock(&chan_list_lock
);
161 write_unlock(&chan_list_lock
);
166 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
168 u16 cid
= L2CAP_CID_DYN_START
;
170 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
171 if (!__l2cap_get_chan_by_scid(conn
, cid
))
178 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
180 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
181 state_to_string(state
));
184 chan
->ops
->state_change(chan
, state
);
187 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
189 struct sock
*sk
= chan
->sk
;
192 __l2cap_state_change(chan
, state
);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
198 struct sock
*sk
= chan
->sk
;
203 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
205 struct sock
*sk
= chan
->sk
;
208 __l2cap_chan_set_err(chan
, err
);
212 static void __set_retrans_timer(struct l2cap_chan
*chan
)
214 if (!delayed_work_pending(&chan
->monitor_timer
) &&
215 chan
->retrans_timeout
) {
216 l2cap_set_timer(chan
, &chan
->retrans_timer
,
217 msecs_to_jiffies(chan
->retrans_timeout
));
221 static void __set_monitor_timer(struct l2cap_chan
*chan
)
223 __clear_retrans_timer(chan
);
224 if (chan
->monitor_timeout
) {
225 l2cap_set_timer(chan
, &chan
->monitor_timer
,
226 msecs_to_jiffies(chan
->monitor_timeout
));
230 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
235 skb_queue_walk(head
, skb
) {
236 if (bt_cb(skb
)->control
.txseq
== seq
)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
256 size_t alloc_size
, i
;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size
= roundup_pow_of_two(size
);
264 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
268 seq_list
->mask
= alloc_size
- 1;
269 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
270 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
271 for (i
= 0; i
< alloc_size
; i
++)
272 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
279 kfree(seq_list
->list
);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
285 /* Constant-time check for list membership */
286 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
289 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
291 u16 mask
= seq_list
->mask
;
293 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR
;
296 } else if (seq_list
->head
== seq
) {
297 /* Head can be removed in constant time */
298 seq_list
->head
= seq_list
->list
[seq
& mask
];
299 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
301 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
302 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
303 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 /* Walk the list to find the sequence number */
307 u16 prev
= seq_list
->head
;
308 while (seq_list
->list
[prev
& mask
] != seq
) {
309 prev
= seq_list
->list
[prev
& mask
];
310 if (prev
== L2CAP_SEQ_LIST_TAIL
)
311 return L2CAP_SEQ_LIST_CLEAR
;
314 /* Unlink the number from the list and clear it */
315 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
316 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
317 if (seq_list
->tail
== seq
)
318 seq_list
->tail
= prev
;
323 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
333 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
336 for (i
= 0; i
<= seq_list
->mask
; i
++)
337 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
339 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
340 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
343 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
345 u16 mask
= seq_list
->mask
;
347 /* All appends happen in constant time */
349 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
352 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
353 seq_list
->head
= seq
;
355 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
357 seq_list
->tail
= seq
;
358 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
361 static void l2cap_chan_timeout(struct work_struct
*work
)
363 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
365 struct l2cap_conn
*conn
= chan
->conn
;
368 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
370 mutex_lock(&conn
->chan_lock
);
371 l2cap_chan_lock(chan
);
373 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
374 reason
= ECONNREFUSED
;
375 else if (chan
->state
== BT_CONNECT
&&
376 chan
->sec_level
!= BT_SECURITY_SDP
)
377 reason
= ECONNREFUSED
;
381 l2cap_chan_close(chan
, reason
);
383 l2cap_chan_unlock(chan
);
385 chan
->ops
->close(chan
);
386 mutex_unlock(&conn
->chan_lock
);
388 l2cap_chan_put(chan
);
391 struct l2cap_chan
*l2cap_chan_create(void)
393 struct l2cap_chan
*chan
;
395 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
399 mutex_init(&chan
->lock
);
401 write_lock(&chan_list_lock
);
402 list_add(&chan
->global_l
, &chan_list
);
403 write_unlock(&chan_list_lock
);
405 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
407 chan
->state
= BT_OPEN
;
409 atomic_set(&chan
->refcnt
, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
414 BT_DBG("chan %p", chan
);
419 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
421 write_lock(&chan_list_lock
);
422 list_del(&chan
->global_l
);
423 write_unlock(&chan_list_lock
);
425 l2cap_chan_put(chan
);
428 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
430 chan
->fcs
= L2CAP_FCS_CRC16
;
431 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
432 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
433 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
434 chan
->sec_level
= BT_SECURITY_LOW
;
436 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
439 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
442 __le16_to_cpu(chan
->psm
), chan
->dcid
);
444 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
448 switch (chan
->chan_type
) {
449 case L2CAP_CHAN_CONN_ORIENTED
:
450 if (conn
->hcon
->type
== LE_LINK
) {
452 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
453 chan
->scid
= L2CAP_CID_LE_DATA
;
454 chan
->dcid
= L2CAP_CID_LE_DATA
;
456 /* Alloc CID for connection-oriented socket */
457 chan
->scid
= l2cap_alloc_cid(conn
);
458 chan
->omtu
= L2CAP_DEFAULT_MTU
;
462 case L2CAP_CHAN_CONN_LESS
:
463 /* Connectionless socket */
464 chan
->scid
= L2CAP_CID_CONN_LESS
;
465 chan
->dcid
= L2CAP_CID_CONN_LESS
;
466 chan
->omtu
= L2CAP_DEFAULT_MTU
;
469 case L2CAP_CHAN_CONN_FIX_A2MP
:
470 chan
->scid
= L2CAP_CID_A2MP
;
471 chan
->dcid
= L2CAP_CID_A2MP
;
472 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
473 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
477 /* Raw socket can send/recv signalling messages only */
478 chan
->scid
= L2CAP_CID_SIGNALING
;
479 chan
->dcid
= L2CAP_CID_SIGNALING
;
480 chan
->omtu
= L2CAP_DEFAULT_MTU
;
483 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
484 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
485 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
486 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
487 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
488 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
490 l2cap_chan_hold(chan
);
492 list_add(&chan
->list
, &conn
->chan_l
);
495 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
497 mutex_lock(&conn
->chan_lock
);
498 __l2cap_chan_add(conn
, chan
);
499 mutex_unlock(&conn
->chan_lock
);
502 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
504 struct l2cap_conn
*conn
= chan
->conn
;
506 __clear_chan_timer(chan
);
508 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
511 /* Delete from channel list */
512 list_del(&chan
->list
);
514 l2cap_chan_put(chan
);
517 hci_conn_put(conn
->hcon
);
520 if (chan
->ops
->teardown
)
521 chan
->ops
->teardown(chan
, err
);
523 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
527 case L2CAP_MODE_BASIC
:
530 case L2CAP_MODE_ERTM
:
531 __clear_retrans_timer(chan
);
532 __clear_monitor_timer(chan
);
533 __clear_ack_timer(chan
);
535 skb_queue_purge(&chan
->srej_q
);
537 l2cap_seq_list_free(&chan
->srej_list
);
538 l2cap_seq_list_free(&chan
->retrans_list
);
542 case L2CAP_MODE_STREAMING
:
543 skb_queue_purge(&chan
->tx_q
);
550 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
552 struct l2cap_conn
*conn
= chan
->conn
;
553 struct sock
*sk
= chan
->sk
;
555 BT_DBG("chan %p state %s sk %p", chan
,
556 state_to_string(chan
->state
), sk
);
558 switch (chan
->state
) {
560 if (chan
->ops
->teardown
)
561 chan
->ops
->teardown(chan
, 0);
566 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
567 conn
->hcon
->type
== ACL_LINK
) {
568 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
569 l2cap_send_disconn_req(conn
, chan
, reason
);
571 l2cap_chan_del(chan
, reason
);
575 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
576 conn
->hcon
->type
== ACL_LINK
) {
577 struct l2cap_conn_rsp rsp
;
580 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
581 result
= L2CAP_CR_SEC_BLOCK
;
583 result
= L2CAP_CR_BAD_PSM
;
584 l2cap_state_change(chan
, BT_DISCONN
);
586 rsp
.scid
= cpu_to_le16(chan
->dcid
);
587 rsp
.dcid
= cpu_to_le16(chan
->scid
);
588 rsp
.result
= cpu_to_le16(result
);
589 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
590 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
594 l2cap_chan_del(chan
, reason
);
599 l2cap_chan_del(chan
, reason
);
603 if (chan
->ops
->teardown
)
604 chan
->ops
->teardown(chan
, 0);
609 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
611 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
612 switch (chan
->sec_level
) {
613 case BT_SECURITY_HIGH
:
614 return HCI_AT_DEDICATED_BONDING_MITM
;
615 case BT_SECURITY_MEDIUM
:
616 return HCI_AT_DEDICATED_BONDING
;
618 return HCI_AT_NO_BONDING
;
620 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
621 if (chan
->sec_level
== BT_SECURITY_LOW
)
622 chan
->sec_level
= BT_SECURITY_SDP
;
624 if (chan
->sec_level
== BT_SECURITY_HIGH
)
625 return HCI_AT_NO_BONDING_MITM
;
627 return HCI_AT_NO_BONDING
;
629 switch (chan
->sec_level
) {
630 case BT_SECURITY_HIGH
:
631 return HCI_AT_GENERAL_BONDING_MITM
;
632 case BT_SECURITY_MEDIUM
:
633 return HCI_AT_GENERAL_BONDING
;
635 return HCI_AT_NO_BONDING
;
640 /* Service level security */
641 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
643 struct l2cap_conn
*conn
= chan
->conn
;
646 auth_type
= l2cap_get_auth_type(chan
);
648 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
651 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
655 /* Get next available identificator.
656 * 1 - 128 are used by kernel.
657 * 129 - 199 are reserved.
658 * 200 - 254 are used by utilities like l2ping, etc.
661 spin_lock(&conn
->lock
);
663 if (++conn
->tx_ident
> 128)
668 spin_unlock(&conn
->lock
);
673 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
675 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
678 BT_DBG("code 0x%2.2x", code
);
683 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
684 flags
= ACL_START_NO_FLUSH
;
688 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
689 skb
->priority
= HCI_PRIO_MAX
;
691 hci_send_acl(conn
->hchan
, skb
, flags
);
694 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
696 struct hci_conn
*hcon
= chan
->conn
->hcon
;
699 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
702 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
703 lmp_no_flush_capable(hcon
->hdev
))
704 flags
= ACL_START_NO_FLUSH
;
708 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
709 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
712 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
714 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
715 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
717 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
720 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
721 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
728 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
729 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
736 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
738 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
739 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
741 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
744 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
745 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
752 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
753 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
760 static inline void __unpack_control(struct l2cap_chan
*chan
,
763 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
764 __unpack_extended_control(get_unaligned_le32(skb
->data
),
765 &bt_cb(skb
)->control
);
766 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
768 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
769 &bt_cb(skb
)->control
);
770 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
774 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
778 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
779 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
781 if (control
->sframe
) {
782 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
783 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
784 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
786 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
787 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
793 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
797 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
798 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
800 if (control
->sframe
) {
801 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
802 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
803 packed
|= L2CAP_CTRL_FRAME_TYPE
;
805 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
806 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
812 static inline void __pack_control(struct l2cap_chan
*chan
,
813 struct l2cap_ctrl
*control
,
816 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
817 put_unaligned_le32(__pack_extended_control(control
),
818 skb
->data
+ L2CAP_HDR_SIZE
);
820 put_unaligned_le16(__pack_enhanced_control(control
),
821 skb
->data
+ L2CAP_HDR_SIZE
);
825 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
829 struct l2cap_hdr
*lh
;
832 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
833 hlen
= L2CAP_EXT_HDR_SIZE
;
835 hlen
= L2CAP_ENH_HDR_SIZE
;
837 if (chan
->fcs
== L2CAP_FCS_CRC16
)
838 hlen
+= L2CAP_FCS_SIZE
;
840 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
843 return ERR_PTR(-ENOMEM
);
845 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
846 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
847 lh
->cid
= cpu_to_le16(chan
->dcid
);
849 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
850 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
852 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
854 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
855 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
856 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
859 skb
->priority
= HCI_PRIO_MAX
;
863 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
864 struct l2cap_ctrl
*control
)
869 BT_DBG("chan %p, control %p", chan
, control
);
871 if (!control
->sframe
)
874 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
878 if (control
->super
== L2CAP_SUPER_RR
)
879 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
880 else if (control
->super
== L2CAP_SUPER_RNR
)
881 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
883 if (control
->super
!= L2CAP_SUPER_SREJ
) {
884 chan
->last_acked_seq
= control
->reqseq
;
885 __clear_ack_timer(chan
);
888 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
889 control
->final
, control
->poll
, control
->super
);
891 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
892 control_field
= __pack_extended_control(control
);
894 control_field
= __pack_enhanced_control(control
);
896 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
898 l2cap_do_send(chan
, skb
);
901 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
903 struct l2cap_ctrl control
;
905 BT_DBG("chan %p, poll %d", chan
, poll
);
907 memset(&control
, 0, sizeof(control
));
911 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
912 control
.super
= L2CAP_SUPER_RNR
;
914 control
.super
= L2CAP_SUPER_RR
;
916 control
.reqseq
= chan
->buffer_seq
;
917 l2cap_send_sframe(chan
, &control
);
920 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
922 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
925 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
927 struct l2cap_conn
*conn
= chan
->conn
;
928 struct l2cap_conn_req req
;
930 req
.scid
= cpu_to_le16(chan
->scid
);
933 chan
->ident
= l2cap_get_ident(conn
);
935 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
937 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
940 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
942 /* This clears all conf flags, including CONF_NOT_COMPLETE */
943 chan
->conf_state
= 0;
944 __clear_chan_timer(chan
);
946 chan
->state
= BT_CONNECTED
;
948 if (chan
->ops
->ready
)
949 chan
->ops
->ready(chan
);
952 static void l2cap_do_start(struct l2cap_chan
*chan
)
954 struct l2cap_conn
*conn
= chan
->conn
;
956 if (conn
->hcon
->type
== LE_LINK
) {
957 l2cap_chan_ready(chan
);
961 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
962 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
965 if (l2cap_chan_check_security(chan
) &&
966 __l2cap_no_conn_pending(chan
))
967 l2cap_send_conn_req(chan
);
969 struct l2cap_info_req req
;
970 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
972 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
973 conn
->info_ident
= l2cap_get_ident(conn
);
975 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
977 l2cap_send_cmd(conn
, conn
->info_ident
,
978 L2CAP_INFO_REQ
, sizeof(req
), &req
);
982 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
984 u32 local_feat_mask
= l2cap_feat_mask
;
986 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
989 case L2CAP_MODE_ERTM
:
990 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
991 case L2CAP_MODE_STREAMING
:
992 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
998 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
1000 struct sock
*sk
= chan
->sk
;
1001 struct l2cap_disconn_req req
;
1006 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1007 __clear_retrans_timer(chan
);
1008 __clear_monitor_timer(chan
);
1009 __clear_ack_timer(chan
);
1012 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1013 __l2cap_state_change(chan
, BT_DISCONN
);
1017 req
.dcid
= cpu_to_le16(chan
->dcid
);
1018 req
.scid
= cpu_to_le16(chan
->scid
);
1019 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1020 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1023 __l2cap_state_change(chan
, BT_DISCONN
);
1024 __l2cap_chan_set_err(chan
, err
);
1028 /* ---- L2CAP connections ---- */
1029 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1031 struct l2cap_chan
*chan
, *tmp
;
1033 BT_DBG("conn %p", conn
);
1035 mutex_lock(&conn
->chan_lock
);
1037 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1038 struct sock
*sk
= chan
->sk
;
1040 l2cap_chan_lock(chan
);
1042 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1043 l2cap_chan_unlock(chan
);
1047 if (chan
->state
== BT_CONNECT
) {
1048 if (!l2cap_chan_check_security(chan
) ||
1049 !__l2cap_no_conn_pending(chan
)) {
1050 l2cap_chan_unlock(chan
);
1054 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1055 && test_bit(CONF_STATE2_DEVICE
,
1056 &chan
->conf_state
)) {
1057 l2cap_chan_close(chan
, ECONNRESET
);
1058 l2cap_chan_unlock(chan
);
1062 l2cap_send_conn_req(chan
);
1064 } else if (chan
->state
== BT_CONNECT2
) {
1065 struct l2cap_conn_rsp rsp
;
1067 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1068 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1070 if (l2cap_chan_check_security(chan
)) {
1072 if (test_bit(BT_SK_DEFER_SETUP
,
1073 &bt_sk(sk
)->flags
)) {
1074 struct sock
*parent
= bt_sk(sk
)->parent
;
1075 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1076 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1078 parent
->sk_data_ready(parent
, 0);
1081 __l2cap_state_change(chan
, BT_CONFIG
);
1082 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1083 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1087 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1088 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1091 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1094 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1095 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1096 l2cap_chan_unlock(chan
);
1100 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1101 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1102 l2cap_build_conf_req(chan
, buf
), buf
);
1103 chan
->num_conf_req
++;
1106 l2cap_chan_unlock(chan
);
1109 mutex_unlock(&conn
->chan_lock
);
1112 /* Find socket with cid and source/destination bdaddr.
1113 * Returns closest match, locked.
1115 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1119 struct l2cap_chan
*c
, *c1
= NULL
;
1121 read_lock(&chan_list_lock
);
1123 list_for_each_entry(c
, &chan_list
, global_l
) {
1124 struct sock
*sk
= c
->sk
;
1126 if (state
&& c
->state
!= state
)
1129 if (c
->scid
== cid
) {
1130 int src_match
, dst_match
;
1131 int src_any
, dst_any
;
1134 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1135 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1136 if (src_match
&& dst_match
) {
1137 read_unlock(&chan_list_lock
);
1142 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1143 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1144 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1145 (src_any
&& dst_any
))
1150 read_unlock(&chan_list_lock
);
1155 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1157 struct sock
*parent
, *sk
;
1158 struct l2cap_chan
*chan
, *pchan
;
1162 /* Check if we have socket listening on cid */
1163 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1164 conn
->src
, conn
->dst
);
1172 chan
= pchan
->ops
->new_connection(pchan
);
1178 hci_conn_hold(conn
->hcon
);
1180 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1181 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1183 bt_accept_enqueue(parent
, sk
);
1185 l2cap_chan_add(conn
, chan
);
1187 l2cap_chan_ready(chan
);
1190 release_sock(parent
);
1193 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1195 struct l2cap_chan
*chan
;
1197 BT_DBG("conn %p", conn
);
1199 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1200 l2cap_le_conn_ready(conn
);
1202 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1203 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1205 mutex_lock(&conn
->chan_lock
);
1207 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1209 l2cap_chan_lock(chan
);
1211 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1212 l2cap_chan_unlock(chan
);
1216 if (conn
->hcon
->type
== LE_LINK
) {
1217 if (smp_conn_security(conn
, chan
->sec_level
))
1218 l2cap_chan_ready(chan
);
1220 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1221 struct sock
*sk
= chan
->sk
;
1222 __clear_chan_timer(chan
);
1224 __l2cap_state_change(chan
, BT_CONNECTED
);
1225 sk
->sk_state_change(sk
);
1228 } else if (chan
->state
== BT_CONNECT
)
1229 l2cap_do_start(chan
);
1231 l2cap_chan_unlock(chan
);
1234 mutex_unlock(&conn
->chan_lock
);
1237 /* Notify sockets that we cannot guaranty reliability anymore */
1238 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1240 struct l2cap_chan
*chan
;
1242 BT_DBG("conn %p", conn
);
1244 mutex_lock(&conn
->chan_lock
);
1246 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1247 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1248 __l2cap_chan_set_err(chan
, err
);
1251 mutex_unlock(&conn
->chan_lock
);
1254 static void l2cap_info_timeout(struct work_struct
*work
)
1256 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1259 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1260 conn
->info_ident
= 0;
1262 l2cap_conn_start(conn
);
1265 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1267 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1268 struct l2cap_chan
*chan
, *l
;
1273 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1275 kfree_skb(conn
->rx_skb
);
1277 mutex_lock(&conn
->chan_lock
);
1280 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1281 l2cap_chan_hold(chan
);
1282 l2cap_chan_lock(chan
);
1284 l2cap_chan_del(chan
, err
);
1286 l2cap_chan_unlock(chan
);
1288 chan
->ops
->close(chan
);
1289 l2cap_chan_put(chan
);
1292 mutex_unlock(&conn
->chan_lock
);
1294 hci_chan_del(conn
->hchan
);
1296 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1297 cancel_delayed_work_sync(&conn
->info_timer
);
1299 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1300 cancel_delayed_work_sync(&conn
->security_timer
);
1301 smp_chan_destroy(conn
);
1304 hcon
->l2cap_data
= NULL
;
1308 static void security_timeout(struct work_struct
*work
)
1310 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1311 security_timer
.work
);
1313 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1316 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1318 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1319 struct hci_chan
*hchan
;
1324 hchan
= hci_chan_create(hcon
);
1328 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1330 hci_chan_del(hchan
);
1334 hcon
->l2cap_data
= conn
;
1336 conn
->hchan
= hchan
;
1338 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1340 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1341 conn
->mtu
= hcon
->hdev
->le_mtu
;
1343 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1345 conn
->src
= &hcon
->hdev
->bdaddr
;
1346 conn
->dst
= &hcon
->dst
;
1348 conn
->feat_mask
= 0;
1350 spin_lock_init(&conn
->lock
);
1351 mutex_init(&conn
->chan_lock
);
1353 INIT_LIST_HEAD(&conn
->chan_l
);
1355 if (hcon
->type
== LE_LINK
)
1356 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1358 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1360 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1365 /* ---- Socket interface ---- */
1367 /* Find socket with psm and source / destination bdaddr.
1368 * Returns closest match.
1370 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1374 struct l2cap_chan
*c
, *c1
= NULL
;
1376 read_lock(&chan_list_lock
);
1378 list_for_each_entry(c
, &chan_list
, global_l
) {
1379 struct sock
*sk
= c
->sk
;
1381 if (state
&& c
->state
!= state
)
1384 if (c
->psm
== psm
) {
1385 int src_match
, dst_match
;
1386 int src_any
, dst_any
;
1389 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1390 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1391 if (src_match
&& dst_match
) {
1392 read_unlock(&chan_list_lock
);
1397 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1398 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1399 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1400 (src_any
&& dst_any
))
1405 read_unlock(&chan_list_lock
);
1410 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1411 bdaddr_t
*dst
, u8 dst_type
)
1413 struct sock
*sk
= chan
->sk
;
1414 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1415 struct l2cap_conn
*conn
;
1416 struct hci_conn
*hcon
;
1417 struct hci_dev
*hdev
;
1421 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1422 dst_type
, __le16_to_cpu(chan
->psm
));
1424 hdev
= hci_get_route(dst
, src
);
1426 return -EHOSTUNREACH
;
1430 l2cap_chan_lock(chan
);
1432 /* PSM must be odd and lsb of upper byte must be 0 */
1433 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1434 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1439 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1444 switch (chan
->mode
) {
1445 case L2CAP_MODE_BASIC
:
1447 case L2CAP_MODE_ERTM
:
1448 case L2CAP_MODE_STREAMING
:
1457 switch (chan
->state
) {
1461 /* Already connecting */
1466 /* Already connected */
1480 /* Set destination address and psm */
1482 bacpy(&bt_sk(sk
)->dst
, dst
);
1488 auth_type
= l2cap_get_auth_type(chan
);
1490 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1491 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1492 chan
->sec_level
, auth_type
);
1494 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1495 chan
->sec_level
, auth_type
);
1498 err
= PTR_ERR(hcon
);
1502 conn
= l2cap_conn_add(hcon
, 0);
1509 if (hcon
->type
== LE_LINK
) {
1512 if (!list_empty(&conn
->chan_l
)) {
1521 /* Update source addr of the socket */
1522 bacpy(src
, conn
->src
);
1524 l2cap_chan_unlock(chan
);
1525 l2cap_chan_add(conn
, chan
);
1526 l2cap_chan_lock(chan
);
1528 l2cap_state_change(chan
, BT_CONNECT
);
1529 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1531 if (hcon
->state
== BT_CONNECTED
) {
1532 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1533 __clear_chan_timer(chan
);
1534 if (l2cap_chan_check_security(chan
))
1535 l2cap_state_change(chan
, BT_CONNECTED
);
1537 l2cap_do_start(chan
);
1543 l2cap_chan_unlock(chan
);
1544 hci_dev_unlock(hdev
);
1549 int __l2cap_wait_ack(struct sock
*sk
)
1551 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1552 DECLARE_WAITQUEUE(wait
, current
);
1556 add_wait_queue(sk_sleep(sk
), &wait
);
1557 set_current_state(TASK_INTERRUPTIBLE
);
1558 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1562 if (signal_pending(current
)) {
1563 err
= sock_intr_errno(timeo
);
1568 timeo
= schedule_timeout(timeo
);
1570 set_current_state(TASK_INTERRUPTIBLE
);
1572 err
= sock_error(sk
);
1576 set_current_state(TASK_RUNNING
);
1577 remove_wait_queue(sk_sleep(sk
), &wait
);
1581 static void l2cap_monitor_timeout(struct work_struct
*work
)
1583 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1584 monitor_timer
.work
);
1586 BT_DBG("chan %p", chan
);
1588 l2cap_chan_lock(chan
);
1591 l2cap_chan_unlock(chan
);
1592 l2cap_chan_put(chan
);
1596 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1598 l2cap_chan_unlock(chan
);
1599 l2cap_chan_put(chan
);
1602 static void l2cap_retrans_timeout(struct work_struct
*work
)
1604 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1605 retrans_timer
.work
);
1607 BT_DBG("chan %p", chan
);
1609 l2cap_chan_lock(chan
);
1612 l2cap_chan_unlock(chan
);
1613 l2cap_chan_put(chan
);
1617 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1618 l2cap_chan_unlock(chan
);
1619 l2cap_chan_put(chan
);
1622 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1623 struct sk_buff_head
*skbs
)
1625 struct sk_buff
*skb
;
1626 struct l2cap_ctrl
*control
;
1628 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1630 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1632 while (!skb_queue_empty(&chan
->tx_q
)) {
1634 skb
= skb_dequeue(&chan
->tx_q
);
1636 bt_cb(skb
)->control
.retries
= 1;
1637 control
= &bt_cb(skb
)->control
;
1639 control
->reqseq
= 0;
1640 control
->txseq
= chan
->next_tx_seq
;
1642 __pack_control(chan
, control
, skb
);
1644 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1645 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1646 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1649 l2cap_do_send(chan
, skb
);
1651 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1653 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1654 chan
->frames_sent
++;
1658 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1660 struct sk_buff
*skb
, *tx_skb
;
1661 struct l2cap_ctrl
*control
;
1664 BT_DBG("chan %p", chan
);
1666 if (chan
->state
!= BT_CONNECTED
)
1669 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1672 while (chan
->tx_send_head
&&
1673 chan
->unacked_frames
< chan
->remote_tx_win
&&
1674 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1676 skb
= chan
->tx_send_head
;
1678 bt_cb(skb
)->control
.retries
= 1;
1679 control
= &bt_cb(skb
)->control
;
1681 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1684 control
->reqseq
= chan
->buffer_seq
;
1685 chan
->last_acked_seq
= chan
->buffer_seq
;
1686 control
->txseq
= chan
->next_tx_seq
;
1688 __pack_control(chan
, control
, skb
);
1690 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1691 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1692 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1695 /* Clone after data has been modified. Data is assumed to be
1696 read-only (for locking purposes) on cloned sk_buffs.
1698 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1703 __set_retrans_timer(chan
);
1705 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1706 chan
->unacked_frames
++;
1707 chan
->frames_sent
++;
1710 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1711 chan
->tx_send_head
= NULL
;
1713 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1715 l2cap_do_send(chan
, tx_skb
);
1716 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1719 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent
,
1720 (int) chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1725 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1727 struct l2cap_ctrl control
;
1728 struct sk_buff
*skb
;
1729 struct sk_buff
*tx_skb
;
1732 BT_DBG("chan %p", chan
);
1734 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1737 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1738 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1740 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1742 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1747 bt_cb(skb
)->control
.retries
++;
1748 control
= bt_cb(skb
)->control
;
1750 if (chan
->max_tx
!= 0 &&
1751 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1752 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1753 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1754 l2cap_seq_list_clear(&chan
->retrans_list
);
1758 control
.reqseq
= chan
->buffer_seq
;
1759 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1764 if (skb_cloned(skb
)) {
1765 /* Cloned sk_buffs are read-only, so we need a
1768 tx_skb
= skb_copy(skb
, GFP_ATOMIC
);
1770 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1774 l2cap_seq_list_clear(&chan
->retrans_list
);
1778 /* Update skb contents */
1779 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1780 put_unaligned_le32(__pack_extended_control(&control
),
1781 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1783 put_unaligned_le16(__pack_enhanced_control(&control
),
1784 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1787 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1788 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1789 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1793 l2cap_do_send(chan
, tx_skb
);
1795 BT_DBG("Resent txseq %d", control
.txseq
);
1797 chan
->last_acked_seq
= chan
->buffer_seq
;
1801 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1802 struct l2cap_ctrl
*control
)
1804 BT_DBG("chan %p, control %p", chan
, control
);
1806 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1807 l2cap_ertm_resend(chan
);
1810 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1811 struct l2cap_ctrl
*control
)
1813 struct sk_buff
*skb
;
1815 BT_DBG("chan %p, control %p", chan
, control
);
1818 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1820 l2cap_seq_list_clear(&chan
->retrans_list
);
1822 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1825 if (chan
->unacked_frames
) {
1826 skb_queue_walk(&chan
->tx_q
, skb
) {
1827 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1828 skb
== chan
->tx_send_head
)
1832 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1833 if (skb
== chan
->tx_send_head
)
1836 l2cap_seq_list_append(&chan
->retrans_list
,
1837 bt_cb(skb
)->control
.txseq
);
1840 l2cap_ertm_resend(chan
);
1844 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1846 struct l2cap_ctrl control
;
1847 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1848 chan
->last_acked_seq
);
1851 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1852 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1854 memset(&control
, 0, sizeof(control
));
1857 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1858 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1859 __clear_ack_timer(chan
);
1860 control
.super
= L2CAP_SUPER_RNR
;
1861 control
.reqseq
= chan
->buffer_seq
;
1862 l2cap_send_sframe(chan
, &control
);
1864 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1865 l2cap_ertm_send(chan
);
1866 /* If any i-frames were sent, they included an ack */
1867 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1871 /* Ack now if the tx window is 3/4ths full.
1872 * Calculate without mul or div
1874 threshold
= chan
->tx_win
;
1875 threshold
+= threshold
<< 1;
1878 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack
,
1881 if (frames_to_ack
>= threshold
) {
1882 __clear_ack_timer(chan
);
1883 control
.super
= L2CAP_SUPER_RR
;
1884 control
.reqseq
= chan
->buffer_seq
;
1885 l2cap_send_sframe(chan
, &control
);
1890 __set_ack_timer(chan
);
1894 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1895 struct msghdr
*msg
, int len
,
1896 int count
, struct sk_buff
*skb
)
1898 struct l2cap_conn
*conn
= chan
->conn
;
1899 struct sk_buff
**frag
;
1902 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1908 /* Continuation fragments (no L2CAP header) */
1909 frag
= &skb_shinfo(skb
)->frag_list
;
1911 struct sk_buff
*tmp
;
1913 count
= min_t(unsigned int, conn
->mtu
, len
);
1915 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1916 msg
->msg_flags
& MSG_DONTWAIT
);
1918 return PTR_ERR(tmp
);
1922 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1925 (*frag
)->priority
= skb
->priority
;
1930 skb
->len
+= (*frag
)->len
;
1931 skb
->data_len
+= (*frag
)->len
;
1933 frag
= &(*frag
)->next
;
1939 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1940 struct msghdr
*msg
, size_t len
,
1943 struct l2cap_conn
*conn
= chan
->conn
;
1944 struct sk_buff
*skb
;
1945 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1946 struct l2cap_hdr
*lh
;
1948 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1950 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1952 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1953 msg
->msg_flags
& MSG_DONTWAIT
);
1957 skb
->priority
= priority
;
1959 /* Create L2CAP header */
1960 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1961 lh
->cid
= cpu_to_le16(chan
->dcid
);
1962 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
1963 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
1965 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1966 if (unlikely(err
< 0)) {
1968 return ERR_PTR(err
);
1973 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1974 struct msghdr
*msg
, size_t len
,
1977 struct l2cap_conn
*conn
= chan
->conn
;
1978 struct sk_buff
*skb
;
1980 struct l2cap_hdr
*lh
;
1982 BT_DBG("chan %p len %d", chan
, (int)len
);
1984 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
1986 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
1987 msg
->msg_flags
& MSG_DONTWAIT
);
1991 skb
->priority
= priority
;
1993 /* Create L2CAP header */
1994 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1995 lh
->cid
= cpu_to_le16(chan
->dcid
);
1996 lh
->len
= cpu_to_le16(len
);
1998 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1999 if (unlikely(err
< 0)) {
2001 return ERR_PTR(err
);
2006 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2007 struct msghdr
*msg
, size_t len
,
2010 struct l2cap_conn
*conn
= chan
->conn
;
2011 struct sk_buff
*skb
;
2012 int err
, count
, hlen
;
2013 struct l2cap_hdr
*lh
;
2015 BT_DBG("chan %p len %d", chan
, (int)len
);
2018 return ERR_PTR(-ENOTCONN
);
2020 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2021 hlen
= L2CAP_EXT_HDR_SIZE
;
2023 hlen
= L2CAP_ENH_HDR_SIZE
;
2026 hlen
+= L2CAP_SDULEN_SIZE
;
2028 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2029 hlen
+= L2CAP_FCS_SIZE
;
2031 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2033 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2034 msg
->msg_flags
& MSG_DONTWAIT
);
2038 /* Create L2CAP header */
2039 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2040 lh
->cid
= cpu_to_le16(chan
->dcid
);
2041 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2043 /* Control header is populated later */
2044 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2045 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2047 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2050 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2052 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2053 if (unlikely(err
< 0)) {
2055 return ERR_PTR(err
);
2058 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2059 bt_cb(skb
)->control
.retries
= 0;
2063 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2064 struct sk_buff_head
*seg_queue
,
2065 struct msghdr
*msg
, size_t len
)
2067 struct sk_buff
*skb
;
2073 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2075 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2076 * so fragmented skbs are not used. The HCI layer's handling
2077 * of fragmented skbs is not compatible with ERTM's queueing.
2080 /* PDU size is derived from the HCI MTU */
2081 pdu_len
= chan
->conn
->mtu
;
2083 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2085 /* Adjust for largest possible L2CAP overhead. */
2087 pdu_len
-= L2CAP_FCS_SIZE
;
2089 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2090 pdu_len
-= L2CAP_EXT_HDR_SIZE
;
2092 pdu_len
-= L2CAP_ENH_HDR_SIZE
;
2094 /* Remote device may have requested smaller PDUs */
2095 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2097 if (len
<= pdu_len
) {
2098 sar
= L2CAP_SAR_UNSEGMENTED
;
2102 sar
= L2CAP_SAR_START
;
2104 pdu_len
-= L2CAP_SDULEN_SIZE
;
2108 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2111 __skb_queue_purge(seg_queue
);
2112 return PTR_ERR(skb
);
2115 bt_cb(skb
)->control
.sar
= sar
;
2116 __skb_queue_tail(seg_queue
, skb
);
2121 pdu_len
+= L2CAP_SDULEN_SIZE
;
2124 if (len
<= pdu_len
) {
2125 sar
= L2CAP_SAR_END
;
2128 sar
= L2CAP_SAR_CONTINUE
;
2135 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2138 struct sk_buff
*skb
;
2140 struct sk_buff_head seg_queue
;
2142 /* Connectionless channel */
2143 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2144 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2146 return PTR_ERR(skb
);
2148 l2cap_do_send(chan
, skb
);
2152 switch (chan
->mode
) {
2153 case L2CAP_MODE_BASIC
:
2154 /* Check outgoing MTU */
2155 if (len
> chan
->omtu
)
2158 /* Create a basic PDU */
2159 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2161 return PTR_ERR(skb
);
2163 l2cap_do_send(chan
, skb
);
2167 case L2CAP_MODE_ERTM
:
2168 case L2CAP_MODE_STREAMING
:
2169 /* Check outgoing MTU */
2170 if (len
> chan
->omtu
) {
2175 __skb_queue_head_init(&seg_queue
);
2177 /* Do segmentation before calling in to the state machine,
2178 * since it's possible to block while waiting for memory
2181 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2183 /* The channel could have been closed while segmenting,
2184 * check that it is still connected.
2186 if (chan
->state
!= BT_CONNECTED
) {
2187 __skb_queue_purge(&seg_queue
);
2194 if (chan
->mode
== L2CAP_MODE_ERTM
)
2195 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2197 l2cap_streaming_send(chan
, &seg_queue
);
2201 /* If the skbs were not queued for sending, they'll still be in
2202 * seg_queue and need to be purged.
2204 __skb_queue_purge(&seg_queue
);
2208 BT_DBG("bad state %1.1x", chan
->mode
);
2215 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2217 struct l2cap_ctrl control
;
2220 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2222 memset(&control
, 0, sizeof(control
));
2224 control
.super
= L2CAP_SUPER_SREJ
;
2226 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2227 seq
= __next_seq(chan
, seq
)) {
2228 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2229 control
.reqseq
= seq
;
2230 l2cap_send_sframe(chan
, &control
);
2231 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2235 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2238 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2240 struct l2cap_ctrl control
;
2242 BT_DBG("chan %p", chan
);
2244 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2247 memset(&control
, 0, sizeof(control
));
2249 control
.super
= L2CAP_SUPER_SREJ
;
2250 control
.reqseq
= chan
->srej_list
.tail
;
2251 l2cap_send_sframe(chan
, &control
);
2254 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2256 struct l2cap_ctrl control
;
2260 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2262 memset(&control
, 0, sizeof(control
));
2264 control
.super
= L2CAP_SUPER_SREJ
;
2266 /* Capture initial list head to allow only one pass through the list. */
2267 initial_head
= chan
->srej_list
.head
;
2270 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2271 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2274 control
.reqseq
= seq
;
2275 l2cap_send_sframe(chan
, &control
);
2276 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2277 } while (chan
->srej_list
.head
!= initial_head
);
2280 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2282 struct sk_buff
*acked_skb
;
2285 BT_DBG("chan %p, reqseq %d", chan
, reqseq
);
2287 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2290 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2291 chan
->expected_ack_seq
, chan
->unacked_frames
);
2293 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2294 ackseq
= __next_seq(chan
, ackseq
)) {
2296 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2298 skb_unlink(acked_skb
, &chan
->tx_q
);
2299 kfree_skb(acked_skb
);
2300 chan
->unacked_frames
--;
2304 chan
->expected_ack_seq
= reqseq
;
2306 if (chan
->unacked_frames
== 0)
2307 __clear_retrans_timer(chan
);
2309 BT_DBG("unacked_frames %d", (int) chan
->unacked_frames
);
2312 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2314 BT_DBG("chan %p", chan
);
2316 chan
->expected_tx_seq
= chan
->buffer_seq
;
2317 l2cap_seq_list_clear(&chan
->srej_list
);
2318 skb_queue_purge(&chan
->srej_q
);
2319 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2322 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2323 struct l2cap_ctrl
*control
,
2324 struct sk_buff_head
*skbs
, u8 event
)
2326 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2330 case L2CAP_EV_DATA_REQUEST
:
2331 if (chan
->tx_send_head
== NULL
)
2332 chan
->tx_send_head
= skb_peek(skbs
);
2334 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2335 l2cap_ertm_send(chan
);
2337 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2338 BT_DBG("Enter LOCAL_BUSY");
2339 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2341 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2342 /* The SREJ_SENT state must be aborted if we are to
2343 * enter the LOCAL_BUSY state.
2345 l2cap_abort_rx_srej_sent(chan
);
2348 l2cap_send_ack(chan
);
2351 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2352 BT_DBG("Exit LOCAL_BUSY");
2353 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2355 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2356 struct l2cap_ctrl local_control
;
2358 memset(&local_control
, 0, sizeof(local_control
));
2359 local_control
.sframe
= 1;
2360 local_control
.super
= L2CAP_SUPER_RR
;
2361 local_control
.poll
= 1;
2362 local_control
.reqseq
= chan
->buffer_seq
;
2363 l2cap_send_sframe(chan
, &local_control
);
2365 chan
->retry_count
= 1;
2366 __set_monitor_timer(chan
);
2367 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2370 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2371 l2cap_process_reqseq(chan
, control
->reqseq
);
2373 case L2CAP_EV_EXPLICIT_POLL
:
2374 l2cap_send_rr_or_rnr(chan
, 1);
2375 chan
->retry_count
= 1;
2376 __set_monitor_timer(chan
);
2377 __clear_ack_timer(chan
);
2378 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2380 case L2CAP_EV_RETRANS_TO
:
2381 l2cap_send_rr_or_rnr(chan
, 1);
2382 chan
->retry_count
= 1;
2383 __set_monitor_timer(chan
);
2384 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2386 case L2CAP_EV_RECV_FBIT
:
2387 /* Nothing to process */
2394 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2395 struct l2cap_ctrl
*control
,
2396 struct sk_buff_head
*skbs
, u8 event
)
2398 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2402 case L2CAP_EV_DATA_REQUEST
:
2403 if (chan
->tx_send_head
== NULL
)
2404 chan
->tx_send_head
= skb_peek(skbs
);
2405 /* Queue data, but don't send. */
2406 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2408 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2409 BT_DBG("Enter LOCAL_BUSY");
2410 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2412 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2413 /* The SREJ_SENT state must be aborted if we are to
2414 * enter the LOCAL_BUSY state.
2416 l2cap_abort_rx_srej_sent(chan
);
2419 l2cap_send_ack(chan
);
2422 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2423 BT_DBG("Exit LOCAL_BUSY");
2424 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2426 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2427 struct l2cap_ctrl local_control
;
2428 memset(&local_control
, 0, sizeof(local_control
));
2429 local_control
.sframe
= 1;
2430 local_control
.super
= L2CAP_SUPER_RR
;
2431 local_control
.poll
= 1;
2432 local_control
.reqseq
= chan
->buffer_seq
;
2433 l2cap_send_sframe(chan
, &local_control
);
2435 chan
->retry_count
= 1;
2436 __set_monitor_timer(chan
);
2437 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2440 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2441 l2cap_process_reqseq(chan
, control
->reqseq
);
2445 case L2CAP_EV_RECV_FBIT
:
2446 if (control
&& control
->final
) {
2447 __clear_monitor_timer(chan
);
2448 if (chan
->unacked_frames
> 0)
2449 __set_retrans_timer(chan
);
2450 chan
->retry_count
= 0;
2451 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2452 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2455 case L2CAP_EV_EXPLICIT_POLL
:
2458 case L2CAP_EV_MONITOR_TO
:
2459 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2460 l2cap_send_rr_or_rnr(chan
, 1);
2461 __set_monitor_timer(chan
);
2462 chan
->retry_count
++;
2464 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2472 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2473 struct sk_buff_head
*skbs
, u8 event
)
2475 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2476 chan
, control
, skbs
, event
, chan
->tx_state
);
2478 switch (chan
->tx_state
) {
2479 case L2CAP_TX_STATE_XMIT
:
2480 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2482 case L2CAP_TX_STATE_WAIT_F
:
2483 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2491 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2492 struct l2cap_ctrl
*control
)
2494 BT_DBG("chan %p, control %p", chan
, control
);
2495 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2498 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2499 struct l2cap_ctrl
*control
)
2501 BT_DBG("chan %p, control %p", chan
, control
);
2502 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2505 /* Copy frame to all raw sockets on that connection */
2506 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2508 struct sk_buff
*nskb
;
2509 struct l2cap_chan
*chan
;
2511 BT_DBG("conn %p", conn
);
2513 mutex_lock(&conn
->chan_lock
);
2515 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2516 struct sock
*sk
= chan
->sk
;
2517 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2520 /* Don't send frame to the socket it came from */
2523 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2527 if (chan
->ops
->recv(chan
, nskb
))
2531 mutex_unlock(&conn
->chan_lock
);
2534 /* ---- L2CAP signalling commands ---- */
2535 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2536 u8 code
, u8 ident
, u16 dlen
, void *data
)
2538 struct sk_buff
*skb
, **frag
;
2539 struct l2cap_cmd_hdr
*cmd
;
2540 struct l2cap_hdr
*lh
;
2543 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2544 conn
, code
, ident
, dlen
);
2546 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2547 count
= min_t(unsigned int, conn
->mtu
, len
);
2549 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2553 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2554 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2556 if (conn
->hcon
->type
== LE_LINK
)
2557 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2559 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2561 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2564 cmd
->len
= cpu_to_le16(dlen
);
2567 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2568 memcpy(skb_put(skb
, count
), data
, count
);
2574 /* Continuation fragments (no L2CAP header) */
2575 frag
= &skb_shinfo(skb
)->frag_list
;
2577 count
= min_t(unsigned int, conn
->mtu
, len
);
2579 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2583 memcpy(skb_put(*frag
, count
), data
, count
);
2588 frag
= &(*frag
)->next
;
2598 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2600 struct l2cap_conf_opt
*opt
= *ptr
;
2603 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2611 *val
= *((u8
*) opt
->val
);
2615 *val
= get_unaligned_le16(opt
->val
);
2619 *val
= get_unaligned_le32(opt
->val
);
2623 *val
= (unsigned long) opt
->val
;
2627 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2631 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2633 struct l2cap_conf_opt
*opt
= *ptr
;
2635 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2642 *((u8
*) opt
->val
) = val
;
2646 put_unaligned_le16(val
, opt
->val
);
2650 put_unaligned_le32(val
, opt
->val
);
2654 memcpy(opt
->val
, (void *) val
, len
);
2658 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2661 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2663 struct l2cap_conf_efs efs
;
2665 switch (chan
->mode
) {
2666 case L2CAP_MODE_ERTM
:
2667 efs
.id
= chan
->local_id
;
2668 efs
.stype
= chan
->local_stype
;
2669 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2670 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2671 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2672 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2675 case L2CAP_MODE_STREAMING
:
2677 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2678 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2679 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2688 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2689 (unsigned long) &efs
);
2692 static void l2cap_ack_timeout(struct work_struct
*work
)
2694 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2698 BT_DBG("chan %p", chan
);
2700 l2cap_chan_lock(chan
);
2702 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2703 chan
->last_acked_seq
);
2706 l2cap_send_rr_or_rnr(chan
, 0);
2708 l2cap_chan_unlock(chan
);
2709 l2cap_chan_put(chan
);
2712 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2716 chan
->next_tx_seq
= 0;
2717 chan
->expected_tx_seq
= 0;
2718 chan
->expected_ack_seq
= 0;
2719 chan
->unacked_frames
= 0;
2720 chan
->buffer_seq
= 0;
2721 chan
->frames_sent
= 0;
2722 chan
->last_acked_seq
= 0;
2724 chan
->sdu_last_frag
= NULL
;
2727 skb_queue_head_init(&chan
->tx_q
);
2729 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2732 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2733 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2735 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2736 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2737 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2739 skb_queue_head_init(&chan
->srej_q
);
2741 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2745 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2747 l2cap_seq_list_free(&chan
->srej_list
);
2752 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2755 case L2CAP_MODE_STREAMING
:
2756 case L2CAP_MODE_ERTM
:
2757 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2761 return L2CAP_MODE_BASIC
;
2765 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2767 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2770 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2772 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2775 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2777 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2778 __l2cap_ews_supported(chan
)) {
2779 /* use extended control field */
2780 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2781 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2783 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2784 L2CAP_DEFAULT_TX_WINDOW
);
2785 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2789 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2791 struct l2cap_conf_req
*req
= data
;
2792 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2793 void *ptr
= req
->data
;
2796 BT_DBG("chan %p", chan
);
2798 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2801 switch (chan
->mode
) {
2802 case L2CAP_MODE_STREAMING
:
2803 case L2CAP_MODE_ERTM
:
2804 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2807 if (__l2cap_efs_supported(chan
))
2808 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2812 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2817 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2818 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2820 switch (chan
->mode
) {
2821 case L2CAP_MODE_BASIC
:
2822 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2823 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2826 rfc
.mode
= L2CAP_MODE_BASIC
;
2828 rfc
.max_transmit
= 0;
2829 rfc
.retrans_timeout
= 0;
2830 rfc
.monitor_timeout
= 0;
2831 rfc
.max_pdu_size
= 0;
2833 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2834 (unsigned long) &rfc
);
2837 case L2CAP_MODE_ERTM
:
2838 rfc
.mode
= L2CAP_MODE_ERTM
;
2839 rfc
.max_transmit
= chan
->max_tx
;
2840 rfc
.retrans_timeout
= 0;
2841 rfc
.monitor_timeout
= 0;
2843 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2844 L2CAP_EXT_HDR_SIZE
-
2847 rfc
.max_pdu_size
= cpu_to_le16(size
);
2849 l2cap_txwin_setup(chan
);
2851 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2852 L2CAP_DEFAULT_TX_WINDOW
);
2854 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2855 (unsigned long) &rfc
);
2857 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2858 l2cap_add_opt_efs(&ptr
, chan
);
2860 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2863 if (chan
->fcs
== L2CAP_FCS_NONE
||
2864 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2865 chan
->fcs
= L2CAP_FCS_NONE
;
2866 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2869 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2870 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2874 case L2CAP_MODE_STREAMING
:
2875 l2cap_txwin_setup(chan
);
2876 rfc
.mode
= L2CAP_MODE_STREAMING
;
2878 rfc
.max_transmit
= 0;
2879 rfc
.retrans_timeout
= 0;
2880 rfc
.monitor_timeout
= 0;
2882 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2883 L2CAP_EXT_HDR_SIZE
-
2886 rfc
.max_pdu_size
= cpu_to_le16(size
);
2888 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2889 (unsigned long) &rfc
);
2891 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2892 l2cap_add_opt_efs(&ptr
, chan
);
2894 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2897 if (chan
->fcs
== L2CAP_FCS_NONE
||
2898 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2899 chan
->fcs
= L2CAP_FCS_NONE
;
2900 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2905 req
->dcid
= cpu_to_le16(chan
->dcid
);
2906 req
->flags
= __constant_cpu_to_le16(0);
2911 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2913 struct l2cap_conf_rsp
*rsp
= data
;
2914 void *ptr
= rsp
->data
;
2915 void *req
= chan
->conf_req
;
2916 int len
= chan
->conf_len
;
2917 int type
, hint
, olen
;
2919 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2920 struct l2cap_conf_efs efs
;
2922 u16 mtu
= L2CAP_DEFAULT_MTU
;
2923 u16 result
= L2CAP_CONF_SUCCESS
;
2926 BT_DBG("chan %p", chan
);
2928 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2929 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2931 hint
= type
& L2CAP_CONF_HINT
;
2932 type
&= L2CAP_CONF_MASK
;
2935 case L2CAP_CONF_MTU
:
2939 case L2CAP_CONF_FLUSH_TO
:
2940 chan
->flush_to
= val
;
2943 case L2CAP_CONF_QOS
:
2946 case L2CAP_CONF_RFC
:
2947 if (olen
== sizeof(rfc
))
2948 memcpy(&rfc
, (void *) val
, olen
);
2951 case L2CAP_CONF_FCS
:
2952 if (val
== L2CAP_FCS_NONE
)
2953 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2956 case L2CAP_CONF_EFS
:
2958 if (olen
== sizeof(efs
))
2959 memcpy(&efs
, (void *) val
, olen
);
2962 case L2CAP_CONF_EWS
:
2964 return -ECONNREFUSED
;
2966 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2967 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2968 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2969 chan
->remote_tx_win
= val
;
2976 result
= L2CAP_CONF_UNKNOWN
;
2977 *((u8
*) ptr
++) = type
;
2982 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2985 switch (chan
->mode
) {
2986 case L2CAP_MODE_STREAMING
:
2987 case L2CAP_MODE_ERTM
:
2988 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2989 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2990 chan
->conn
->feat_mask
);
2995 if (__l2cap_efs_supported(chan
))
2996 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2998 return -ECONNREFUSED
;
3001 if (chan
->mode
!= rfc
.mode
)
3002 return -ECONNREFUSED
;
3008 if (chan
->mode
!= rfc
.mode
) {
3009 result
= L2CAP_CONF_UNACCEPT
;
3010 rfc
.mode
= chan
->mode
;
3012 if (chan
->num_conf_rsp
== 1)
3013 return -ECONNREFUSED
;
3015 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3016 sizeof(rfc
), (unsigned long) &rfc
);
3019 if (result
== L2CAP_CONF_SUCCESS
) {
3020 /* Configure output options and let the other side know
3021 * which ones we don't like. */
3023 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3024 result
= L2CAP_CONF_UNACCEPT
;
3027 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3029 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3032 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3033 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3034 efs
.stype
!= chan
->local_stype
) {
3036 result
= L2CAP_CONF_UNACCEPT
;
3038 if (chan
->num_conf_req
>= 1)
3039 return -ECONNREFUSED
;
3041 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3043 (unsigned long) &efs
);
3045 /* Send PENDING Conf Rsp */
3046 result
= L2CAP_CONF_PENDING
;
3047 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3052 case L2CAP_MODE_BASIC
:
3053 chan
->fcs
= L2CAP_FCS_NONE
;
3054 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3057 case L2CAP_MODE_ERTM
:
3058 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3059 chan
->remote_tx_win
= rfc
.txwin_size
;
3061 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3063 chan
->remote_max_tx
= rfc
.max_transmit
;
3065 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3067 L2CAP_EXT_HDR_SIZE
-
3070 rfc
.max_pdu_size
= cpu_to_le16(size
);
3071 chan
->remote_mps
= size
;
3073 rfc
.retrans_timeout
=
3074 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3075 rfc
.monitor_timeout
=
3076 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3078 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3080 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3081 sizeof(rfc
), (unsigned long) &rfc
);
3083 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3084 chan
->remote_id
= efs
.id
;
3085 chan
->remote_stype
= efs
.stype
;
3086 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3087 chan
->remote_flush_to
=
3088 le32_to_cpu(efs
.flush_to
);
3089 chan
->remote_acc_lat
=
3090 le32_to_cpu(efs
.acc_lat
);
3091 chan
->remote_sdu_itime
=
3092 le32_to_cpu(efs
.sdu_itime
);
3093 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3094 sizeof(efs
), (unsigned long) &efs
);
3098 case L2CAP_MODE_STREAMING
:
3099 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3101 L2CAP_EXT_HDR_SIZE
-
3104 rfc
.max_pdu_size
= cpu_to_le16(size
);
3105 chan
->remote_mps
= size
;
3107 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3109 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3110 sizeof(rfc
), (unsigned long) &rfc
);
3115 result
= L2CAP_CONF_UNACCEPT
;
3117 memset(&rfc
, 0, sizeof(rfc
));
3118 rfc
.mode
= chan
->mode
;
3121 if (result
== L2CAP_CONF_SUCCESS
)
3122 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3124 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3125 rsp
->result
= cpu_to_le16(result
);
3126 rsp
->flags
= __constant_cpu_to_le16(0);
3131 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3133 struct l2cap_conf_req
*req
= data
;
3134 void *ptr
= req
->data
;
3137 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3138 struct l2cap_conf_efs efs
;
3140 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3142 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3143 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3146 case L2CAP_CONF_MTU
:
3147 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3148 *result
= L2CAP_CONF_UNACCEPT
;
3149 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3152 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3155 case L2CAP_CONF_FLUSH_TO
:
3156 chan
->flush_to
= val
;
3157 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3161 case L2CAP_CONF_RFC
:
3162 if (olen
== sizeof(rfc
))
3163 memcpy(&rfc
, (void *)val
, olen
);
3165 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3166 rfc
.mode
!= chan
->mode
)
3167 return -ECONNREFUSED
;
3171 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3172 sizeof(rfc
), (unsigned long) &rfc
);
3175 case L2CAP_CONF_EWS
:
3176 chan
->tx_win
= min_t(u16
, val
,
3177 L2CAP_DEFAULT_EXT_WINDOW
);
3178 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3182 case L2CAP_CONF_EFS
:
3183 if (olen
== sizeof(efs
))
3184 memcpy(&efs
, (void *)val
, olen
);
3186 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3187 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3188 efs
.stype
!= chan
->local_stype
)
3189 return -ECONNREFUSED
;
3191 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3192 sizeof(efs
), (unsigned long) &efs
);
3197 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3198 return -ECONNREFUSED
;
3200 chan
->mode
= rfc
.mode
;
3202 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3204 case L2CAP_MODE_ERTM
:
3205 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3206 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3207 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3209 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3210 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3211 chan
->local_sdu_itime
=
3212 le32_to_cpu(efs
.sdu_itime
);
3213 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3214 chan
->local_flush_to
=
3215 le32_to_cpu(efs
.flush_to
);
3219 case L2CAP_MODE_STREAMING
:
3220 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3224 req
->dcid
= cpu_to_le16(chan
->dcid
);
3225 req
->flags
= __constant_cpu_to_le16(0);
3230 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3232 struct l2cap_conf_rsp
*rsp
= data
;
3233 void *ptr
= rsp
->data
;
3235 BT_DBG("chan %p", chan
);
3237 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3238 rsp
->result
= cpu_to_le16(result
);
3239 rsp
->flags
= cpu_to_le16(flags
);
3244 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3246 struct l2cap_conn_rsp rsp
;
3247 struct l2cap_conn
*conn
= chan
->conn
;
3250 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3251 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3252 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3253 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3254 l2cap_send_cmd(conn
, chan
->ident
,
3255 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3257 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3260 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3261 l2cap_build_conf_req(chan
, buf
), buf
);
3262 chan
->num_conf_req
++;
3265 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3269 struct l2cap_conf_rfc rfc
;
3271 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3273 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3276 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3277 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3280 case L2CAP_CONF_RFC
:
3281 if (olen
== sizeof(rfc
))
3282 memcpy(&rfc
, (void *)val
, olen
);
3287 /* Use sane default values in case a misbehaving remote device
3288 * did not send an RFC option.
3290 rfc
.mode
= chan
->mode
;
3291 rfc
.retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3292 rfc
.monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3293 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
3295 BT_ERR("Expected RFC option was not found, using defaults");
3299 case L2CAP_MODE_ERTM
:
3300 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3301 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3302 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3304 case L2CAP_MODE_STREAMING
:
3305 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3309 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3311 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3313 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3316 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3317 cmd
->ident
== conn
->info_ident
) {
3318 cancel_delayed_work(&conn
->info_timer
);
3320 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3321 conn
->info_ident
= 0;
3323 l2cap_conn_start(conn
);
3329 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3331 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3332 struct l2cap_conn_rsp rsp
;
3333 struct l2cap_chan
*chan
= NULL
, *pchan
;
3334 struct sock
*parent
, *sk
= NULL
;
3335 int result
, status
= L2CAP_CS_NO_INFO
;
3337 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3338 __le16 psm
= req
->psm
;
3340 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3342 /* Check if we have socket listening on psm */
3343 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3345 result
= L2CAP_CR_BAD_PSM
;
3351 mutex_lock(&conn
->chan_lock
);
3354 /* Check if the ACL is secure enough (if not SDP) */
3355 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3356 !hci_conn_check_link_mode(conn
->hcon
)) {
3357 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3358 result
= L2CAP_CR_SEC_BLOCK
;
3362 result
= L2CAP_CR_NO_MEM
;
3364 /* Check if we already have channel with that dcid */
3365 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3368 chan
= pchan
->ops
->new_connection(pchan
);
3374 hci_conn_hold(conn
->hcon
);
3376 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3377 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3381 bt_accept_enqueue(parent
, sk
);
3383 __l2cap_chan_add(conn
, chan
);
3387 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3389 chan
->ident
= cmd
->ident
;
3391 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3392 if (l2cap_chan_check_security(chan
)) {
3393 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3394 __l2cap_state_change(chan
, BT_CONNECT2
);
3395 result
= L2CAP_CR_PEND
;
3396 status
= L2CAP_CS_AUTHOR_PEND
;
3397 parent
->sk_data_ready(parent
, 0);
3399 __l2cap_state_change(chan
, BT_CONFIG
);
3400 result
= L2CAP_CR_SUCCESS
;
3401 status
= L2CAP_CS_NO_INFO
;
3404 __l2cap_state_change(chan
, BT_CONNECT2
);
3405 result
= L2CAP_CR_PEND
;
3406 status
= L2CAP_CS_AUTHEN_PEND
;
3409 __l2cap_state_change(chan
, BT_CONNECT2
);
3410 result
= L2CAP_CR_PEND
;
3411 status
= L2CAP_CS_NO_INFO
;
3415 release_sock(parent
);
3416 mutex_unlock(&conn
->chan_lock
);
3419 rsp
.scid
= cpu_to_le16(scid
);
3420 rsp
.dcid
= cpu_to_le16(dcid
);
3421 rsp
.result
= cpu_to_le16(result
);
3422 rsp
.status
= cpu_to_le16(status
);
3423 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3425 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3426 struct l2cap_info_req info
;
3427 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3429 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3430 conn
->info_ident
= l2cap_get_ident(conn
);
3432 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3434 l2cap_send_cmd(conn
, conn
->info_ident
,
3435 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3438 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3439 result
== L2CAP_CR_SUCCESS
) {
3441 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3442 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3443 l2cap_build_conf_req(chan
, buf
), buf
);
3444 chan
->num_conf_req
++;
3450 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3452 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3453 u16 scid
, dcid
, result
, status
;
3454 struct l2cap_chan
*chan
;
3458 scid
= __le16_to_cpu(rsp
->scid
);
3459 dcid
= __le16_to_cpu(rsp
->dcid
);
3460 result
= __le16_to_cpu(rsp
->result
);
3461 status
= __le16_to_cpu(rsp
->status
);
3463 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3464 dcid
, scid
, result
, status
);
3466 mutex_lock(&conn
->chan_lock
);
3469 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3475 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3484 l2cap_chan_lock(chan
);
3487 case L2CAP_CR_SUCCESS
:
3488 l2cap_state_change(chan
, BT_CONFIG
);
3491 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3493 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3496 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3497 l2cap_build_conf_req(chan
, req
), req
);
3498 chan
->num_conf_req
++;
3502 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3506 l2cap_chan_del(chan
, ECONNREFUSED
);
3510 l2cap_chan_unlock(chan
);
3513 mutex_unlock(&conn
->chan_lock
);
3518 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3520 /* FCS is enabled only in ERTM or streaming mode, if one or both
3523 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3524 chan
->fcs
= L2CAP_FCS_NONE
;
3525 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3526 chan
->fcs
= L2CAP_FCS_CRC16
;
3529 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3531 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3534 struct l2cap_chan
*chan
;
3537 dcid
= __le16_to_cpu(req
->dcid
);
3538 flags
= __le16_to_cpu(req
->flags
);
3540 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3542 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3546 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3547 struct l2cap_cmd_rej_cid rej
;
3549 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3550 rej
.scid
= cpu_to_le16(chan
->scid
);
3551 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3553 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3558 /* Reject if config buffer is too small. */
3559 len
= cmd_len
- sizeof(*req
);
3560 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3561 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3562 l2cap_build_conf_rsp(chan
, rsp
,
3563 L2CAP_CONF_REJECT
, flags
), rsp
);
3568 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3569 chan
->conf_len
+= len
;
3571 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3572 /* Incomplete config. Send empty response. */
3573 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3574 l2cap_build_conf_rsp(chan
, rsp
,
3575 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3579 /* Complete config. */
3580 len
= l2cap_parse_conf_req(chan
, rsp
);
3582 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3586 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3587 chan
->num_conf_rsp
++;
3589 /* Reset config buffer. */
3592 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3595 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3596 set_default_fcs(chan
);
3598 if (chan
->mode
== L2CAP_MODE_ERTM
||
3599 chan
->mode
== L2CAP_MODE_STREAMING
)
3600 err
= l2cap_ertm_init(chan
);
3603 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3605 l2cap_chan_ready(chan
);
3610 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3612 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3613 l2cap_build_conf_req(chan
, buf
), buf
);
3614 chan
->num_conf_req
++;
3617 /* Got Conf Rsp PENDING from remote side and asume we sent
3618 Conf Rsp PENDING in the code above */
3619 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3620 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3622 /* check compatibility */
3624 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3625 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3627 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3628 l2cap_build_conf_rsp(chan
, rsp
,
3629 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3633 l2cap_chan_unlock(chan
);
3637 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3639 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3640 u16 scid
, flags
, result
;
3641 struct l2cap_chan
*chan
;
3642 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3645 scid
= __le16_to_cpu(rsp
->scid
);
3646 flags
= __le16_to_cpu(rsp
->flags
);
3647 result
= __le16_to_cpu(rsp
->result
);
3649 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3652 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3657 case L2CAP_CONF_SUCCESS
:
3658 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3659 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3662 case L2CAP_CONF_PENDING
:
3663 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3665 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3668 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3671 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3675 /* check compatibility */
3677 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3678 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3680 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3681 l2cap_build_conf_rsp(chan
, buf
,
3682 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3686 case L2CAP_CONF_UNACCEPT
:
3687 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3690 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3691 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3695 /* throw out any old stored conf requests */
3696 result
= L2CAP_CONF_SUCCESS
;
3697 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3700 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3704 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3705 L2CAP_CONF_REQ
, len
, req
);
3706 chan
->num_conf_req
++;
3707 if (result
!= L2CAP_CONF_SUCCESS
)
3713 l2cap_chan_set_err(chan
, ECONNRESET
);
3715 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3716 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3720 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
3723 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3725 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3726 set_default_fcs(chan
);
3728 if (chan
->mode
== L2CAP_MODE_ERTM
||
3729 chan
->mode
== L2CAP_MODE_STREAMING
)
3730 err
= l2cap_ertm_init(chan
);
3733 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3735 l2cap_chan_ready(chan
);
3739 l2cap_chan_unlock(chan
);
3743 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3745 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3746 struct l2cap_disconn_rsp rsp
;
3748 struct l2cap_chan
*chan
;
3751 scid
= __le16_to_cpu(req
->scid
);
3752 dcid
= __le16_to_cpu(req
->dcid
);
3754 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3756 mutex_lock(&conn
->chan_lock
);
3758 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3760 mutex_unlock(&conn
->chan_lock
);
3764 l2cap_chan_lock(chan
);
3768 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3769 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3770 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3773 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3776 l2cap_chan_hold(chan
);
3777 l2cap_chan_del(chan
, ECONNRESET
);
3779 l2cap_chan_unlock(chan
);
3781 chan
->ops
->close(chan
);
3782 l2cap_chan_put(chan
);
3784 mutex_unlock(&conn
->chan_lock
);
3789 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3791 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3793 struct l2cap_chan
*chan
;
3795 scid
= __le16_to_cpu(rsp
->scid
);
3796 dcid
= __le16_to_cpu(rsp
->dcid
);
3798 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3800 mutex_lock(&conn
->chan_lock
);
3802 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3804 mutex_unlock(&conn
->chan_lock
);
3808 l2cap_chan_lock(chan
);
3810 l2cap_chan_hold(chan
);
3811 l2cap_chan_del(chan
, 0);
3813 l2cap_chan_unlock(chan
);
3815 chan
->ops
->close(chan
);
3816 l2cap_chan_put(chan
);
3818 mutex_unlock(&conn
->chan_lock
);
3823 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3825 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3828 type
= __le16_to_cpu(req
->type
);
3830 BT_DBG("type 0x%4.4x", type
);
3832 if (type
== L2CAP_IT_FEAT_MASK
) {
3834 u32 feat_mask
= l2cap_feat_mask
;
3835 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3836 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3837 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3839 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3842 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3843 | L2CAP_FEAT_EXT_WINDOW
;
3845 put_unaligned_le32(feat_mask
, rsp
->data
);
3846 l2cap_send_cmd(conn
, cmd
->ident
,
3847 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3848 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3850 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3853 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3855 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3857 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3858 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3859 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3860 l2cap_send_cmd(conn
, cmd
->ident
,
3861 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3863 struct l2cap_info_rsp rsp
;
3864 rsp
.type
= cpu_to_le16(type
);
3865 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
3866 l2cap_send_cmd(conn
, cmd
->ident
,
3867 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3873 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3875 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3878 type
= __le16_to_cpu(rsp
->type
);
3879 result
= __le16_to_cpu(rsp
->result
);
3881 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3883 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3884 if (cmd
->ident
!= conn
->info_ident
||
3885 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3888 cancel_delayed_work(&conn
->info_timer
);
3890 if (result
!= L2CAP_IR_SUCCESS
) {
3891 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3892 conn
->info_ident
= 0;
3894 l2cap_conn_start(conn
);
3900 case L2CAP_IT_FEAT_MASK
:
3901 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3903 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3904 struct l2cap_info_req req
;
3905 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3907 conn
->info_ident
= l2cap_get_ident(conn
);
3909 l2cap_send_cmd(conn
, conn
->info_ident
,
3910 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3912 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3913 conn
->info_ident
= 0;
3915 l2cap_conn_start(conn
);
3919 case L2CAP_IT_FIXED_CHAN
:
3920 conn
->fixed_chan_mask
= rsp
->data
[0];
3921 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3922 conn
->info_ident
= 0;
3924 l2cap_conn_start(conn
);
3931 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3932 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3935 struct l2cap_create_chan_req
*req
= data
;
3936 struct l2cap_create_chan_rsp rsp
;
3939 if (cmd_len
!= sizeof(*req
))
3945 psm
= le16_to_cpu(req
->psm
);
3946 scid
= le16_to_cpu(req
->scid
);
3948 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3950 /* Placeholder: Always reject */
3952 rsp
.scid
= cpu_to_le16(scid
);
3953 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
3954 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3956 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3962 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3963 struct l2cap_cmd_hdr
*cmd
, void *data
)
3965 BT_DBG("conn %p", conn
);
3967 return l2cap_connect_rsp(conn
, cmd
, data
);
3970 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3971 u16 icid
, u16 result
)
3973 struct l2cap_move_chan_rsp rsp
;
3975 BT_DBG("icid %d, result %d", icid
, result
);
3977 rsp
.icid
= cpu_to_le16(icid
);
3978 rsp
.result
= cpu_to_le16(result
);
3980 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3983 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3984 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3986 struct l2cap_move_chan_cfm cfm
;
3989 BT_DBG("icid %d, result %d", icid
, result
);
3991 ident
= l2cap_get_ident(conn
);
3993 chan
->ident
= ident
;
3995 cfm
.icid
= cpu_to_le16(icid
);
3996 cfm
.result
= cpu_to_le16(result
);
3998 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
4001 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4004 struct l2cap_move_chan_cfm_rsp rsp
;
4006 BT_DBG("icid %d", icid
);
4008 rsp
.icid
= cpu_to_le16(icid
);
4009 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4012 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4013 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4015 struct l2cap_move_chan_req
*req
= data
;
4017 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4019 if (cmd_len
!= sizeof(*req
))
4022 icid
= le16_to_cpu(req
->icid
);
4024 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
4029 /* Placeholder: Always refuse */
4030 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4035 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4036 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4038 struct l2cap_move_chan_rsp
*rsp
= data
;
4041 if (cmd_len
!= sizeof(*rsp
))
4044 icid
= le16_to_cpu(rsp
->icid
);
4045 result
= le16_to_cpu(rsp
->result
);
4047 BT_DBG("icid %d, result %d", icid
, result
);
4049 /* Placeholder: Always unconfirmed */
4050 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4055 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4056 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4058 struct l2cap_move_chan_cfm
*cfm
= data
;
4061 if (cmd_len
!= sizeof(*cfm
))
4064 icid
= le16_to_cpu(cfm
->icid
);
4065 result
= le16_to_cpu(cfm
->result
);
4067 BT_DBG("icid %d, result %d", icid
, result
);
4069 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4074 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4075 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4077 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4080 if (cmd_len
!= sizeof(*rsp
))
4083 icid
= le16_to_cpu(rsp
->icid
);
4085 BT_DBG("icid %d", icid
);
4090 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4095 if (min
> max
|| min
< 6 || max
> 3200)
4098 if (to_multiplier
< 10 || to_multiplier
> 3200)
4101 if (max
>= to_multiplier
* 8)
4104 max_latency
= (to_multiplier
* 8 / max
) - 1;
4105 if (latency
> 499 || latency
> max_latency
)
4111 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4112 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4114 struct hci_conn
*hcon
= conn
->hcon
;
4115 struct l2cap_conn_param_update_req
*req
;
4116 struct l2cap_conn_param_update_rsp rsp
;
4117 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4120 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4123 cmd_len
= __le16_to_cpu(cmd
->len
);
4124 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4127 req
= (struct l2cap_conn_param_update_req
*) data
;
4128 min
= __le16_to_cpu(req
->min
);
4129 max
= __le16_to_cpu(req
->max
);
4130 latency
= __le16_to_cpu(req
->latency
);
4131 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4133 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4134 min
, max
, latency
, to_multiplier
);
4136 memset(&rsp
, 0, sizeof(rsp
));
4138 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4140 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4142 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4144 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4148 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4153 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4154 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4158 switch (cmd
->code
) {
4159 case L2CAP_COMMAND_REJ
:
4160 l2cap_command_rej(conn
, cmd
, data
);
4163 case L2CAP_CONN_REQ
:
4164 err
= l2cap_connect_req(conn
, cmd
, data
);
4167 case L2CAP_CONN_RSP
:
4168 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4171 case L2CAP_CONF_REQ
:
4172 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4175 case L2CAP_CONF_RSP
:
4176 err
= l2cap_config_rsp(conn
, cmd
, data
);
4179 case L2CAP_DISCONN_REQ
:
4180 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4183 case L2CAP_DISCONN_RSP
:
4184 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4187 case L2CAP_ECHO_REQ
:
4188 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4191 case L2CAP_ECHO_RSP
:
4194 case L2CAP_INFO_REQ
:
4195 err
= l2cap_information_req(conn
, cmd
, data
);
4198 case L2CAP_INFO_RSP
:
4199 err
= l2cap_information_rsp(conn
, cmd
, data
);
4202 case L2CAP_CREATE_CHAN_REQ
:
4203 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4206 case L2CAP_CREATE_CHAN_RSP
:
4207 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4210 case L2CAP_MOVE_CHAN_REQ
:
4211 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4214 case L2CAP_MOVE_CHAN_RSP
:
4215 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4218 case L2CAP_MOVE_CHAN_CFM
:
4219 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4222 case L2CAP_MOVE_CHAN_CFM_RSP
:
4223 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4227 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4235 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4236 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4238 switch (cmd
->code
) {
4239 case L2CAP_COMMAND_REJ
:
4242 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4243 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4245 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4249 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4254 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4255 struct sk_buff
*skb
)
4257 u8
*data
= skb
->data
;
4259 struct l2cap_cmd_hdr cmd
;
4262 l2cap_raw_recv(conn
, skb
);
4264 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4266 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4267 data
+= L2CAP_CMD_HDR_SIZE
;
4268 len
-= L2CAP_CMD_HDR_SIZE
;
4270 cmd_len
= le16_to_cpu(cmd
.len
);
4272 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4274 if (cmd_len
> len
|| !cmd
.ident
) {
4275 BT_DBG("corrupted command");
4279 if (conn
->hcon
->type
== LE_LINK
)
4280 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4282 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4285 struct l2cap_cmd_rej_unk rej
;
4287 BT_ERR("Wrong link type (%d)", err
);
4289 /* FIXME: Map err to a valid reason */
4290 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4291 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4301 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4303 u16 our_fcs
, rcv_fcs
;
4306 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4307 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4309 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4311 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4312 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4313 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4314 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4316 if (our_fcs
!= rcv_fcs
)
4322 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4324 struct l2cap_ctrl control
;
4326 BT_DBG("chan %p", chan
);
4328 memset(&control
, 0, sizeof(control
));
4331 control
.reqseq
= chan
->buffer_seq
;
4332 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4334 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4335 control
.super
= L2CAP_SUPER_RNR
;
4336 l2cap_send_sframe(chan
, &control
);
4339 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4340 chan
->unacked_frames
> 0)
4341 __set_retrans_timer(chan
);
4343 /* Send pending iframes */
4344 l2cap_ertm_send(chan
);
4346 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4347 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4348 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4351 control
.super
= L2CAP_SUPER_RR
;
4352 l2cap_send_sframe(chan
, &control
);
4356 static void append_skb_frag(struct sk_buff
*skb
,
4357 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4359 /* skb->len reflects data in skb as well as all fragments
4360 * skb->data_len reflects only data in fragments
4362 if (!skb_has_frag_list(skb
))
4363 skb_shinfo(skb
)->frag_list
= new_frag
;
4365 new_frag
->next
= NULL
;
4367 (*last_frag
)->next
= new_frag
;
4368 *last_frag
= new_frag
;
4370 skb
->len
+= new_frag
->len
;
4371 skb
->data_len
+= new_frag
->len
;
4372 skb
->truesize
+= new_frag
->truesize
;
4375 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4376 struct l2cap_ctrl
*control
)
4380 switch (control
->sar
) {
4381 case L2CAP_SAR_UNSEGMENTED
:
4385 err
= chan
->ops
->recv(chan
, skb
);
4388 case L2CAP_SAR_START
:
4392 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4393 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4395 if (chan
->sdu_len
> chan
->imtu
) {
4400 if (skb
->len
>= chan
->sdu_len
)
4404 chan
->sdu_last_frag
= skb
;
4410 case L2CAP_SAR_CONTINUE
:
4414 append_skb_frag(chan
->sdu
, skb
,
4415 &chan
->sdu_last_frag
);
4418 if (chan
->sdu
->len
>= chan
->sdu_len
)
4428 append_skb_frag(chan
->sdu
, skb
,
4429 &chan
->sdu_last_frag
);
4432 if (chan
->sdu
->len
!= chan
->sdu_len
)
4435 err
= chan
->ops
->recv(chan
, chan
->sdu
);
4438 /* Reassembly complete */
4440 chan
->sdu_last_frag
= NULL
;
4448 kfree_skb(chan
->sdu
);
4450 chan
->sdu_last_frag
= NULL
;
4457 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4461 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4464 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4465 l2cap_tx(chan
, NULL
, NULL
, event
);
4468 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4471 /* Pass sequential frames to l2cap_reassemble_sdu()
4472 * until a gap is encountered.
4475 BT_DBG("chan %p", chan
);
4477 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4478 struct sk_buff
*skb
;
4479 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4480 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4482 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4487 skb_unlink(skb
, &chan
->srej_q
);
4488 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4489 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4494 if (skb_queue_empty(&chan
->srej_q
)) {
4495 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4496 l2cap_send_ack(chan
);
4502 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4503 struct l2cap_ctrl
*control
)
4505 struct sk_buff
*skb
;
4507 BT_DBG("chan %p, control %p", chan
, control
);
4509 if (control
->reqseq
== chan
->next_tx_seq
) {
4510 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4511 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4515 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4518 BT_DBG("Seq %d not available for retransmission",
4523 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4524 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4525 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4529 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4531 if (control
->poll
) {
4532 l2cap_pass_to_tx(chan
, control
);
4534 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4535 l2cap_retransmit(chan
, control
);
4536 l2cap_ertm_send(chan
);
4538 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4539 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4540 chan
->srej_save_reqseq
= control
->reqseq
;
4543 l2cap_pass_to_tx_fbit(chan
, control
);
4545 if (control
->final
) {
4546 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4547 !test_and_clear_bit(CONN_SREJ_ACT
,
4549 l2cap_retransmit(chan
, control
);
4551 l2cap_retransmit(chan
, control
);
4552 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4553 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4554 chan
->srej_save_reqseq
= control
->reqseq
;
4560 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4561 struct l2cap_ctrl
*control
)
4563 struct sk_buff
*skb
;
4565 BT_DBG("chan %p, control %p", chan
, control
);
4567 if (control
->reqseq
== chan
->next_tx_seq
) {
4568 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4569 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4573 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4575 if (chan
->max_tx
&& skb
&&
4576 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4577 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4578 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4582 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4584 l2cap_pass_to_tx(chan
, control
);
4586 if (control
->final
) {
4587 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4588 l2cap_retransmit_all(chan
, control
);
4590 l2cap_retransmit_all(chan
, control
);
4591 l2cap_ertm_send(chan
);
4592 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4593 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4597 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4599 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4601 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4602 chan
->expected_tx_seq
);
4604 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4605 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4607 /* See notes below regarding "double poll" and
4610 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4611 BT_DBG("Invalid/Ignore - after SREJ");
4612 return L2CAP_TXSEQ_INVALID_IGNORE
;
4614 BT_DBG("Invalid - in window after SREJ sent");
4615 return L2CAP_TXSEQ_INVALID
;
4619 if (chan
->srej_list
.head
== txseq
) {
4620 BT_DBG("Expected SREJ");
4621 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4624 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4625 BT_DBG("Duplicate SREJ - txseq already stored");
4626 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4629 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4630 BT_DBG("Unexpected SREJ - not requested");
4631 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4635 if (chan
->expected_tx_seq
== txseq
) {
4636 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4638 BT_DBG("Invalid - txseq outside tx window");
4639 return L2CAP_TXSEQ_INVALID
;
4642 return L2CAP_TXSEQ_EXPECTED
;
4646 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4647 __seq_offset(chan
, chan
->expected_tx_seq
,
4648 chan
->last_acked_seq
)){
4649 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4650 return L2CAP_TXSEQ_DUPLICATE
;
4653 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4654 /* A source of invalid packets is a "double poll" condition,
4655 * where delays cause us to send multiple poll packets. If
4656 * the remote stack receives and processes both polls,
4657 * sequence numbers can wrap around in such a way that a
4658 * resent frame has a sequence number that looks like new data
4659 * with a sequence gap. This would trigger an erroneous SREJ
4662 * Fortunately, this is impossible with a tx window that's
4663 * less than half of the maximum sequence number, which allows
4664 * invalid frames to be safely ignored.
4666 * With tx window sizes greater than half of the tx window
4667 * maximum, the frame is invalid and cannot be ignored. This
4668 * causes a disconnect.
4671 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4672 BT_DBG("Invalid/Ignore - txseq outside tx window");
4673 return L2CAP_TXSEQ_INVALID_IGNORE
;
4675 BT_DBG("Invalid - txseq outside tx window");
4676 return L2CAP_TXSEQ_INVALID
;
4679 BT_DBG("Unexpected - txseq indicates missing frames");
4680 return L2CAP_TXSEQ_UNEXPECTED
;
4684 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4685 struct l2cap_ctrl
*control
,
4686 struct sk_buff
*skb
, u8 event
)
4689 bool skb_in_use
= 0;
4691 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4695 case L2CAP_EV_RECV_IFRAME
:
4696 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4697 case L2CAP_TXSEQ_EXPECTED
:
4698 l2cap_pass_to_tx(chan
, control
);
4700 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4701 BT_DBG("Busy, discarding expected seq %d",
4706 chan
->expected_tx_seq
= __next_seq(chan
,
4709 chan
->buffer_seq
= chan
->expected_tx_seq
;
4712 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4716 if (control
->final
) {
4717 if (!test_and_clear_bit(CONN_REJ_ACT
,
4718 &chan
->conn_state
)) {
4720 l2cap_retransmit_all(chan
, control
);
4721 l2cap_ertm_send(chan
);
4725 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4726 l2cap_send_ack(chan
);
4728 case L2CAP_TXSEQ_UNEXPECTED
:
4729 l2cap_pass_to_tx(chan
, control
);
4731 /* Can't issue SREJ frames in the local busy state.
4732 * Drop this frame, it will be seen as missing
4733 * when local busy is exited.
4735 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4736 BT_DBG("Busy, discarding unexpected seq %d",
4741 /* There was a gap in the sequence, so an SREJ
4742 * must be sent for each missing frame. The
4743 * current frame is stored for later use.
4745 skb_queue_tail(&chan
->srej_q
, skb
);
4747 BT_DBG("Queued %p (queue len %d)", skb
,
4748 skb_queue_len(&chan
->srej_q
));
4750 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4751 l2cap_seq_list_clear(&chan
->srej_list
);
4752 l2cap_send_srej(chan
, control
->txseq
);
4754 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4756 case L2CAP_TXSEQ_DUPLICATE
:
4757 l2cap_pass_to_tx(chan
, control
);
4759 case L2CAP_TXSEQ_INVALID_IGNORE
:
4761 case L2CAP_TXSEQ_INVALID
:
4763 l2cap_send_disconn_req(chan
->conn
, chan
,
4768 case L2CAP_EV_RECV_RR
:
4769 l2cap_pass_to_tx(chan
, control
);
4770 if (control
->final
) {
4771 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4773 if (!test_and_clear_bit(CONN_REJ_ACT
,
4774 &chan
->conn_state
)) {
4776 l2cap_retransmit_all(chan
, control
);
4779 l2cap_ertm_send(chan
);
4780 } else if (control
->poll
) {
4781 l2cap_send_i_or_rr_or_rnr(chan
);
4783 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4784 &chan
->conn_state
) &&
4785 chan
->unacked_frames
)
4786 __set_retrans_timer(chan
);
4788 l2cap_ertm_send(chan
);
4791 case L2CAP_EV_RECV_RNR
:
4792 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4793 l2cap_pass_to_tx(chan
, control
);
4794 if (control
&& control
->poll
) {
4795 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4796 l2cap_send_rr_or_rnr(chan
, 0);
4798 __clear_retrans_timer(chan
);
4799 l2cap_seq_list_clear(&chan
->retrans_list
);
4801 case L2CAP_EV_RECV_REJ
:
4802 l2cap_handle_rej(chan
, control
);
4804 case L2CAP_EV_RECV_SREJ
:
4805 l2cap_handle_srej(chan
, control
);
4811 if (skb
&& !skb_in_use
) {
4812 BT_DBG("Freeing %p", skb
);
4819 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4820 struct l2cap_ctrl
*control
,
4821 struct sk_buff
*skb
, u8 event
)
4824 u16 txseq
= control
->txseq
;
4825 bool skb_in_use
= 0;
4827 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4831 case L2CAP_EV_RECV_IFRAME
:
4832 switch (l2cap_classify_txseq(chan
, txseq
)) {
4833 case L2CAP_TXSEQ_EXPECTED
:
4834 /* Keep frame for reassembly later */
4835 l2cap_pass_to_tx(chan
, control
);
4836 skb_queue_tail(&chan
->srej_q
, skb
);
4838 BT_DBG("Queued %p (queue len %d)", skb
,
4839 skb_queue_len(&chan
->srej_q
));
4841 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4843 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4844 l2cap_seq_list_pop(&chan
->srej_list
);
4846 l2cap_pass_to_tx(chan
, control
);
4847 skb_queue_tail(&chan
->srej_q
, skb
);
4849 BT_DBG("Queued %p (queue len %d)", skb
,
4850 skb_queue_len(&chan
->srej_q
));
4852 err
= l2cap_rx_queued_iframes(chan
);
4857 case L2CAP_TXSEQ_UNEXPECTED
:
4858 /* Got a frame that can't be reassembled yet.
4859 * Save it for later, and send SREJs to cover
4860 * the missing frames.
4862 skb_queue_tail(&chan
->srej_q
, skb
);
4864 BT_DBG("Queued %p (queue len %d)", skb
,
4865 skb_queue_len(&chan
->srej_q
));
4867 l2cap_pass_to_tx(chan
, control
);
4868 l2cap_send_srej(chan
, control
->txseq
);
4870 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4871 /* This frame was requested with an SREJ, but
4872 * some expected retransmitted frames are
4873 * missing. Request retransmission of missing
4876 skb_queue_tail(&chan
->srej_q
, skb
);
4878 BT_DBG("Queued %p (queue len %d)", skb
,
4879 skb_queue_len(&chan
->srej_q
));
4881 l2cap_pass_to_tx(chan
, control
);
4882 l2cap_send_srej_list(chan
, control
->txseq
);
4884 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4885 /* We've already queued this frame. Drop this copy. */
4886 l2cap_pass_to_tx(chan
, control
);
4888 case L2CAP_TXSEQ_DUPLICATE
:
4889 /* Expecting a later sequence number, so this frame
4890 * was already received. Ignore it completely.
4893 case L2CAP_TXSEQ_INVALID_IGNORE
:
4895 case L2CAP_TXSEQ_INVALID
:
4897 l2cap_send_disconn_req(chan
->conn
, chan
,
4902 case L2CAP_EV_RECV_RR
:
4903 l2cap_pass_to_tx(chan
, control
);
4904 if (control
->final
) {
4905 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4907 if (!test_and_clear_bit(CONN_REJ_ACT
,
4908 &chan
->conn_state
)) {
4910 l2cap_retransmit_all(chan
, control
);
4913 l2cap_ertm_send(chan
);
4914 } else if (control
->poll
) {
4915 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4916 &chan
->conn_state
) &&
4917 chan
->unacked_frames
) {
4918 __set_retrans_timer(chan
);
4921 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4922 l2cap_send_srej_tail(chan
);
4924 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4925 &chan
->conn_state
) &&
4926 chan
->unacked_frames
)
4927 __set_retrans_timer(chan
);
4929 l2cap_send_ack(chan
);
4932 case L2CAP_EV_RECV_RNR
:
4933 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4934 l2cap_pass_to_tx(chan
, control
);
4935 if (control
->poll
) {
4936 l2cap_send_srej_tail(chan
);
4938 struct l2cap_ctrl rr_control
;
4939 memset(&rr_control
, 0, sizeof(rr_control
));
4940 rr_control
.sframe
= 1;
4941 rr_control
.super
= L2CAP_SUPER_RR
;
4942 rr_control
.reqseq
= chan
->buffer_seq
;
4943 l2cap_send_sframe(chan
, &rr_control
);
4947 case L2CAP_EV_RECV_REJ
:
4948 l2cap_handle_rej(chan
, control
);
4950 case L2CAP_EV_RECV_SREJ
:
4951 l2cap_handle_srej(chan
, control
);
4955 if (skb
&& !skb_in_use
) {
4956 BT_DBG("Freeing %p", skb
);
4963 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
4965 /* Make sure reqseq is for a packet that has been sent but not acked */
4968 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
4969 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
4972 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
4973 struct sk_buff
*skb
, u8 event
)
4977 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
4978 control
, skb
, event
, chan
->rx_state
);
4980 if (__valid_reqseq(chan
, control
->reqseq
)) {
4981 switch (chan
->rx_state
) {
4982 case L2CAP_RX_STATE_RECV
:
4983 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
4985 case L2CAP_RX_STATE_SREJ_SENT
:
4986 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
4994 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4995 control
->reqseq
, chan
->next_tx_seq
,
4996 chan
->expected_ack_seq
);
4997 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5003 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5004 struct sk_buff
*skb
)
5008 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5011 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5012 L2CAP_TXSEQ_EXPECTED
) {
5013 l2cap_pass_to_tx(chan
, control
);
5015 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5016 __next_seq(chan
, chan
->buffer_seq
));
5018 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5020 l2cap_reassemble_sdu(chan
, skb
, control
);
5023 kfree_skb(chan
->sdu
);
5026 chan
->sdu_last_frag
= NULL
;
5030 BT_DBG("Freeing %p", skb
);
5035 chan
->last_acked_seq
= control
->txseq
;
5036 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5041 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5043 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5047 __unpack_control(chan
, skb
);
5052 * We can just drop the corrupted I-frame here.
5053 * Receiver will miss it and start proper recovery
5054 * procedures and ask for retransmission.
5056 if (l2cap_check_fcs(chan
, skb
))
5059 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5060 len
-= L2CAP_SDULEN_SIZE
;
5062 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5063 len
-= L2CAP_FCS_SIZE
;
5065 if (len
> chan
->mps
) {
5066 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5070 if (!control
->sframe
) {
5073 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5074 control
->sar
, control
->reqseq
, control
->final
,
5077 /* Validate F-bit - F=0 always valid, F=1 only
5078 * valid in TX WAIT_F
5080 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5083 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5084 event
= L2CAP_EV_RECV_IFRAME
;
5085 err
= l2cap_rx(chan
, control
, skb
, event
);
5087 err
= l2cap_stream_rx(chan
, control
, skb
);
5091 l2cap_send_disconn_req(chan
->conn
, chan
,
5094 const u8 rx_func_to_event
[4] = {
5095 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5096 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5099 /* Only I-frames are expected in streaming mode */
5100 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5103 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5104 control
->reqseq
, control
->final
, control
->poll
,
5109 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5113 /* Validate F and P bits */
5114 if (control
->final
&& (control
->poll
||
5115 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5118 event
= rx_func_to_event
[control
->super
];
5119 if (l2cap_rx(chan
, control
, skb
, event
))
5120 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5130 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
5132 struct l2cap_chan
*chan
;
5134 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5136 if (cid
== L2CAP_CID_A2MP
) {
5137 chan
= a2mp_channel_create(conn
, skb
);
5143 l2cap_chan_lock(chan
);
5145 BT_DBG("unknown cid 0x%4.4x", cid
);
5146 /* Drop packet and return */
5152 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5154 if (chan
->state
!= BT_CONNECTED
)
5157 switch (chan
->mode
) {
5158 case L2CAP_MODE_BASIC
:
5159 /* If socket recv buffers overflows we drop data here
5160 * which is *bad* because L2CAP has to be reliable.
5161 * But we don't have any other choice. L2CAP doesn't
5162 * provide flow control mechanism. */
5164 if (chan
->imtu
< skb
->len
)
5167 if (!chan
->ops
->recv(chan
, skb
))
5171 case L2CAP_MODE_ERTM
:
5172 case L2CAP_MODE_STREAMING
:
5173 l2cap_data_rcv(chan
, skb
);
5177 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5185 l2cap_chan_unlock(chan
);
5190 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
5192 struct l2cap_chan
*chan
;
5194 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5198 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5200 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5203 if (chan
->imtu
< skb
->len
)
5206 if (!chan
->ops
->recv(chan
, skb
))
5215 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5216 struct sk_buff
*skb
)
5218 struct l2cap_chan
*chan
;
5220 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5224 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5226 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5229 if (chan
->imtu
< skb
->len
)
5232 if (!chan
->ops
->recv(chan
, skb
))
5241 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5243 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5247 skb_pull(skb
, L2CAP_HDR_SIZE
);
5248 cid
= __le16_to_cpu(lh
->cid
);
5249 len
= __le16_to_cpu(lh
->len
);
5251 if (len
!= skb
->len
) {
5256 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5259 case L2CAP_CID_LE_SIGNALING
:
5260 case L2CAP_CID_SIGNALING
:
5261 l2cap_sig_channel(conn
, skb
);
5264 case L2CAP_CID_CONN_LESS
:
5265 psm
= get_unaligned((__le16
*) skb
->data
);
5266 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
5267 l2cap_conless_channel(conn
, psm
, skb
);
5270 case L2CAP_CID_LE_DATA
:
5271 l2cap_att_channel(conn
, cid
, skb
);
5275 if (smp_sig_channel(conn
, skb
))
5276 l2cap_conn_del(conn
->hcon
, EACCES
);
5280 l2cap_data_channel(conn
, cid
, skb
);
5285 /* ---- L2CAP interface with lower layer (HCI) ---- */
5287 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5289 int exact
= 0, lm1
= 0, lm2
= 0;
5290 struct l2cap_chan
*c
;
5292 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
5294 /* Find listening sockets and check their link_mode */
5295 read_lock(&chan_list_lock
);
5296 list_for_each_entry(c
, &chan_list
, global_l
) {
5297 struct sock
*sk
= c
->sk
;
5299 if (c
->state
!= BT_LISTEN
)
5302 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5303 lm1
|= HCI_LM_ACCEPT
;
5304 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5305 lm1
|= HCI_LM_MASTER
;
5307 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5308 lm2
|= HCI_LM_ACCEPT
;
5309 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5310 lm2
|= HCI_LM_MASTER
;
5313 read_unlock(&chan_list_lock
);
5315 return exact
? lm1
: lm2
;
5318 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5320 struct l2cap_conn
*conn
;
5322 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
5325 conn
= l2cap_conn_add(hcon
, status
);
5327 l2cap_conn_ready(conn
);
5329 l2cap_conn_del(hcon
, bt_to_errno(status
));
5334 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5336 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5338 BT_DBG("hcon %p", hcon
);
5341 return HCI_ERROR_REMOTE_USER_TERM
;
5342 return conn
->disc_reason
;
5345 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5347 BT_DBG("hcon %p reason %d", hcon
, reason
);
5349 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5353 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5355 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5358 if (encrypt
== 0x00) {
5359 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5360 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5361 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5362 l2cap_chan_close(chan
, ECONNREFUSED
);
5364 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5365 __clear_chan_timer(chan
);
5369 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5371 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5372 struct l2cap_chan
*chan
;
5377 BT_DBG("conn %p", conn
);
5379 if (hcon
->type
== LE_LINK
) {
5380 if (!status
&& encrypt
)
5381 smp_distribute_keys(conn
, 0);
5382 cancel_delayed_work(&conn
->security_timer
);
5385 mutex_lock(&conn
->chan_lock
);
5387 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5388 l2cap_chan_lock(chan
);
5390 BT_DBG("chan->scid %d", chan
->scid
);
5392 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5393 if (!status
&& encrypt
) {
5394 chan
->sec_level
= hcon
->sec_level
;
5395 l2cap_chan_ready(chan
);
5398 l2cap_chan_unlock(chan
);
5402 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5403 l2cap_chan_unlock(chan
);
5407 if (!status
&& (chan
->state
== BT_CONNECTED
||
5408 chan
->state
== BT_CONFIG
)) {
5409 struct sock
*sk
= chan
->sk
;
5411 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5412 sk
->sk_state_change(sk
);
5414 l2cap_check_encryption(chan
, encrypt
);
5415 l2cap_chan_unlock(chan
);
5419 if (chan
->state
== BT_CONNECT
) {
5421 l2cap_send_conn_req(chan
);
5423 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5425 } else if (chan
->state
== BT_CONNECT2
) {
5426 struct sock
*sk
= chan
->sk
;
5427 struct l2cap_conn_rsp rsp
;
5433 if (test_bit(BT_SK_DEFER_SETUP
,
5434 &bt_sk(sk
)->flags
)) {
5435 struct sock
*parent
= bt_sk(sk
)->parent
;
5436 res
= L2CAP_CR_PEND
;
5437 stat
= L2CAP_CS_AUTHOR_PEND
;
5439 parent
->sk_data_ready(parent
, 0);
5441 __l2cap_state_change(chan
, BT_CONFIG
);
5442 res
= L2CAP_CR_SUCCESS
;
5443 stat
= L2CAP_CS_NO_INFO
;
5446 __l2cap_state_change(chan
, BT_DISCONN
);
5447 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5448 res
= L2CAP_CR_SEC_BLOCK
;
5449 stat
= L2CAP_CS_NO_INFO
;
5454 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5455 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5456 rsp
.result
= cpu_to_le16(res
);
5457 rsp
.status
= cpu_to_le16(stat
);
5458 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5461 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
5462 res
== L2CAP_CR_SUCCESS
) {
5464 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
5465 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
5467 l2cap_build_conf_req(chan
, buf
),
5469 chan
->num_conf_req
++;
5473 l2cap_chan_unlock(chan
);
5476 mutex_unlock(&conn
->chan_lock
);
5481 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5483 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5486 conn
= l2cap_conn_add(hcon
, 0);
5491 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5493 if (!(flags
& ACL_CONT
)) {
5494 struct l2cap_hdr
*hdr
;
5498 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5499 kfree_skb(conn
->rx_skb
);
5500 conn
->rx_skb
= NULL
;
5502 l2cap_conn_unreliable(conn
, ECOMM
);
5505 /* Start fragment always begin with Basic L2CAP header */
5506 if (skb
->len
< L2CAP_HDR_SIZE
) {
5507 BT_ERR("Frame is too short (len %d)", skb
->len
);
5508 l2cap_conn_unreliable(conn
, ECOMM
);
5512 hdr
= (struct l2cap_hdr
*) skb
->data
;
5513 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5515 if (len
== skb
->len
) {
5516 /* Complete frame received */
5517 l2cap_recv_frame(conn
, skb
);
5521 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5523 if (skb
->len
> len
) {
5524 BT_ERR("Frame is too long (len %d, expected len %d)",
5526 l2cap_conn_unreliable(conn
, ECOMM
);
5530 /* Allocate skb for the complete frame (with header) */
5531 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5535 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5537 conn
->rx_len
= len
- skb
->len
;
5539 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5541 if (!conn
->rx_len
) {
5542 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5543 l2cap_conn_unreliable(conn
, ECOMM
);
5547 if (skb
->len
> conn
->rx_len
) {
5548 BT_ERR("Fragment is too long (len %d, expected %d)",
5549 skb
->len
, conn
->rx_len
);
5550 kfree_skb(conn
->rx_skb
);
5551 conn
->rx_skb
= NULL
;
5553 l2cap_conn_unreliable(conn
, ECOMM
);
5557 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5559 conn
->rx_len
-= skb
->len
;
5561 if (!conn
->rx_len
) {
5562 /* Complete frame received */
5563 l2cap_recv_frame(conn
, conn
->rx_skb
);
5564 conn
->rx_skb
= NULL
;
5573 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5575 struct l2cap_chan
*c
;
5577 read_lock(&chan_list_lock
);
5579 list_for_each_entry(c
, &chan_list
, global_l
) {
5580 struct sock
*sk
= c
->sk
;
5582 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5583 batostr(&bt_sk(sk
)->src
),
5584 batostr(&bt_sk(sk
)->dst
),
5585 c
->state
, __le16_to_cpu(c
->psm
),
5586 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5587 c
->sec_level
, c
->mode
);
5590 read_unlock(&chan_list_lock
);
5595 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5597 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5600 static const struct file_operations l2cap_debugfs_fops
= {
5601 .open
= l2cap_debugfs_open
,
5603 .llseek
= seq_lseek
,
5604 .release
= single_release
,
5607 static struct dentry
*l2cap_debugfs
;
5609 int __init
l2cap_init(void)
5613 err
= l2cap_init_sockets();
5618 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5619 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5621 BT_ERR("Failed to create L2CAP debug file");
5627 void l2cap_exit(void)
5629 debugfs_remove(l2cap_debugfs
);
5630 l2cap_cleanup_sockets();
5633 module_param(disable_ertm
, bool, 0644);
5634 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");