2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
45 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
47 static LIST_HEAD(chan_list
);
48 static DEFINE_RWLOCK(chan_list_lock
);
50 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
51 u8 code
, u8 ident
, u16 dlen
, void *data
);
52 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
54 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
55 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
56 struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
67 list_for_each_entry(c
, &conn
->chan_l
, list
) {
74 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
78 list_for_each_entry(c
, &conn
->chan_l
, list
) {
85 /* Find channel with given SCID.
86 * Returns locked channel. */
87 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
91 mutex_lock(&conn
->chan_lock
);
92 c
= __l2cap_get_chan_by_scid(conn
, cid
);
95 mutex_unlock(&conn
->chan_lock
);
100 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
102 struct l2cap_chan
*c
;
104 list_for_each_entry(c
, &conn
->chan_l
, list
) {
105 if (c
->ident
== ident
)
111 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
113 struct l2cap_chan
*c
;
115 list_for_each_entry(c
, &chan_list
, global_l
) {
116 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
122 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
126 write_lock(&chan_list_lock
);
128 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
141 for (p
= 0x1001; p
< 0x1100; p
+= 2)
142 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
143 chan
->psm
= cpu_to_le16(p
);
144 chan
->sport
= cpu_to_le16(p
);
151 write_unlock(&chan_list_lock
);
155 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
157 write_lock(&chan_list_lock
);
161 write_unlock(&chan_list_lock
);
166 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
168 u16 cid
= L2CAP_CID_DYN_START
;
170 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
171 if (!__l2cap_get_chan_by_scid(conn
, cid
))
178 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
180 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
181 state_to_string(state
));
184 chan
->ops
->state_change(chan
, state
);
187 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
189 struct sock
*sk
= chan
->sk
;
192 __l2cap_state_change(chan
, state
);
196 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
198 struct sock
*sk
= chan
->sk
;
203 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
205 struct sock
*sk
= chan
->sk
;
208 __l2cap_chan_set_err(chan
, err
);
212 static void __set_retrans_timer(struct l2cap_chan
*chan
)
214 if (!delayed_work_pending(&chan
->monitor_timer
) &&
215 chan
->retrans_timeout
) {
216 l2cap_set_timer(chan
, &chan
->retrans_timer
,
217 msecs_to_jiffies(chan
->retrans_timeout
));
221 static void __set_monitor_timer(struct l2cap_chan
*chan
)
223 __clear_retrans_timer(chan
);
224 if (chan
->monitor_timeout
) {
225 l2cap_set_timer(chan
, &chan
->monitor_timer
,
226 msecs_to_jiffies(chan
->monitor_timeout
));
230 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
235 skb_queue_walk(head
, skb
) {
236 if (bt_cb(skb
)->control
.txseq
== seq
)
243 /* ---- L2CAP sequence number lists ---- */
245 /* For ERTM, ordered lists of sequence numbers must be tracked for
246 * SREJ requests that are received and for frames that are to be
247 * retransmitted. These seq_list functions implement a singly-linked
248 * list in an array, where membership in the list can also be checked
249 * in constant time. Items can also be added to the tail of the list
250 * and removed from the head in constant time, without further memory
254 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
256 size_t alloc_size
, i
;
258 /* Allocated size is a power of 2 to map sequence numbers
259 * (which may be up to 14 bits) in to a smaller array that is
260 * sized for the negotiated ERTM transmit windows.
262 alloc_size
= roundup_pow_of_two(size
);
264 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
268 seq_list
->mask
= alloc_size
- 1;
269 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
270 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
271 for (i
= 0; i
< alloc_size
; i
++)
272 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
277 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
279 kfree(seq_list
->list
);
282 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
285 /* Constant-time check for list membership */
286 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
289 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
291 u16 mask
= seq_list
->mask
;
293 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
294 /* In case someone tries to pop the head of an empty list */
295 return L2CAP_SEQ_LIST_CLEAR
;
296 } else if (seq_list
->head
== seq
) {
297 /* Head can be removed in constant time */
298 seq_list
->head
= seq_list
->list
[seq
& mask
];
299 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
301 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
302 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
303 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 /* Walk the list to find the sequence number */
307 u16 prev
= seq_list
->head
;
308 while (seq_list
->list
[prev
& mask
] != seq
) {
309 prev
= seq_list
->list
[prev
& mask
];
310 if (prev
== L2CAP_SEQ_LIST_TAIL
)
311 return L2CAP_SEQ_LIST_CLEAR
;
314 /* Unlink the number from the list and clear it */
315 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
316 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
317 if (seq_list
->tail
== seq
)
318 seq_list
->tail
= prev
;
323 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
325 /* Remove the head in constant time */
326 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
329 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
333 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
336 for (i
= 0; i
<= seq_list
->mask
; i
++)
337 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
339 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
340 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
343 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
345 u16 mask
= seq_list
->mask
;
347 /* All appends happen in constant time */
349 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
352 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
353 seq_list
->head
= seq
;
355 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
357 seq_list
->tail
= seq
;
358 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
361 static void l2cap_chan_timeout(struct work_struct
*work
)
363 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
365 struct l2cap_conn
*conn
= chan
->conn
;
368 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
370 mutex_lock(&conn
->chan_lock
);
371 l2cap_chan_lock(chan
);
373 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
374 reason
= ECONNREFUSED
;
375 else if (chan
->state
== BT_CONNECT
&&
376 chan
->sec_level
!= BT_SECURITY_SDP
)
377 reason
= ECONNREFUSED
;
381 l2cap_chan_close(chan
, reason
);
383 l2cap_chan_unlock(chan
);
385 chan
->ops
->close(chan
);
386 mutex_unlock(&conn
->chan_lock
);
388 l2cap_chan_put(chan
);
391 struct l2cap_chan
*l2cap_chan_create(void)
393 struct l2cap_chan
*chan
;
395 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
399 mutex_init(&chan
->lock
);
401 write_lock(&chan_list_lock
);
402 list_add(&chan
->global_l
, &chan_list
);
403 write_unlock(&chan_list_lock
);
405 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
407 chan
->state
= BT_OPEN
;
409 atomic_set(&chan
->refcnt
, 1);
411 /* This flag is cleared in l2cap_chan_ready() */
412 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
414 BT_DBG("chan %p", chan
);
419 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
421 write_lock(&chan_list_lock
);
422 list_del(&chan
->global_l
);
423 write_unlock(&chan_list_lock
);
425 l2cap_chan_put(chan
);
428 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
430 chan
->fcs
= L2CAP_FCS_CRC16
;
431 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
432 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
433 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
434 chan
->sec_level
= BT_SECURITY_LOW
;
436 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
439 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
441 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
442 __le16_to_cpu(chan
->psm
), chan
->dcid
);
444 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
448 switch (chan
->chan_type
) {
449 case L2CAP_CHAN_CONN_ORIENTED
:
450 if (conn
->hcon
->type
== LE_LINK
) {
452 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
453 chan
->scid
= L2CAP_CID_LE_DATA
;
454 chan
->dcid
= L2CAP_CID_LE_DATA
;
456 /* Alloc CID for connection-oriented socket */
457 chan
->scid
= l2cap_alloc_cid(conn
);
458 chan
->omtu
= L2CAP_DEFAULT_MTU
;
462 case L2CAP_CHAN_CONN_LESS
:
463 /* Connectionless socket */
464 chan
->scid
= L2CAP_CID_CONN_LESS
;
465 chan
->dcid
= L2CAP_CID_CONN_LESS
;
466 chan
->omtu
= L2CAP_DEFAULT_MTU
;
469 case L2CAP_CHAN_CONN_FIX_A2MP
:
470 chan
->scid
= L2CAP_CID_A2MP
;
471 chan
->dcid
= L2CAP_CID_A2MP
;
472 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
473 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
477 /* Raw socket can send/recv signalling messages only */
478 chan
->scid
= L2CAP_CID_SIGNALING
;
479 chan
->dcid
= L2CAP_CID_SIGNALING
;
480 chan
->omtu
= L2CAP_DEFAULT_MTU
;
483 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
484 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
485 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
486 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
487 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
488 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
490 l2cap_chan_hold(chan
);
492 list_add(&chan
->list
, &conn
->chan_l
);
495 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
497 mutex_lock(&conn
->chan_lock
);
498 __l2cap_chan_add(conn
, chan
);
499 mutex_unlock(&conn
->chan_lock
);
502 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
504 struct l2cap_conn
*conn
= chan
->conn
;
506 __clear_chan_timer(chan
);
508 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
511 /* Delete from channel list */
512 list_del(&chan
->list
);
514 l2cap_chan_put(chan
);
518 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
519 hci_conn_put(conn
->hcon
);
522 if (chan
->ops
->teardown
)
523 chan
->ops
->teardown(chan
, err
);
525 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
529 case L2CAP_MODE_BASIC
:
532 case L2CAP_MODE_ERTM
:
533 __clear_retrans_timer(chan
);
534 __clear_monitor_timer(chan
);
535 __clear_ack_timer(chan
);
537 skb_queue_purge(&chan
->srej_q
);
539 l2cap_seq_list_free(&chan
->srej_list
);
540 l2cap_seq_list_free(&chan
->retrans_list
);
544 case L2CAP_MODE_STREAMING
:
545 skb_queue_purge(&chan
->tx_q
);
552 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
554 struct l2cap_conn
*conn
= chan
->conn
;
555 struct sock
*sk
= chan
->sk
;
557 BT_DBG("chan %p state %s sk %p", chan
,
558 state_to_string(chan
->state
), sk
);
560 switch (chan
->state
) {
562 if (chan
->ops
->teardown
)
563 chan
->ops
->teardown(chan
, 0);
568 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
569 conn
->hcon
->type
== ACL_LINK
) {
570 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
571 l2cap_send_disconn_req(conn
, chan
, reason
);
573 l2cap_chan_del(chan
, reason
);
577 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
578 conn
->hcon
->type
== ACL_LINK
) {
579 struct l2cap_conn_rsp rsp
;
582 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
583 result
= L2CAP_CR_SEC_BLOCK
;
585 result
= L2CAP_CR_BAD_PSM
;
586 l2cap_state_change(chan
, BT_DISCONN
);
588 rsp
.scid
= cpu_to_le16(chan
->dcid
);
589 rsp
.dcid
= cpu_to_le16(chan
->scid
);
590 rsp
.result
= cpu_to_le16(result
);
591 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
592 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
596 l2cap_chan_del(chan
, reason
);
601 l2cap_chan_del(chan
, reason
);
605 if (chan
->ops
->teardown
)
606 chan
->ops
->teardown(chan
, 0);
611 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
613 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
614 switch (chan
->sec_level
) {
615 case BT_SECURITY_HIGH
:
616 return HCI_AT_DEDICATED_BONDING_MITM
;
617 case BT_SECURITY_MEDIUM
:
618 return HCI_AT_DEDICATED_BONDING
;
620 return HCI_AT_NO_BONDING
;
622 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
623 if (chan
->sec_level
== BT_SECURITY_LOW
)
624 chan
->sec_level
= BT_SECURITY_SDP
;
626 if (chan
->sec_level
== BT_SECURITY_HIGH
)
627 return HCI_AT_NO_BONDING_MITM
;
629 return HCI_AT_NO_BONDING
;
631 switch (chan
->sec_level
) {
632 case BT_SECURITY_HIGH
:
633 return HCI_AT_GENERAL_BONDING_MITM
;
634 case BT_SECURITY_MEDIUM
:
635 return HCI_AT_GENERAL_BONDING
;
637 return HCI_AT_NO_BONDING
;
642 /* Service level security */
643 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
645 struct l2cap_conn
*conn
= chan
->conn
;
648 auth_type
= l2cap_get_auth_type(chan
);
650 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
653 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
657 /* Get next available identificator.
658 * 1 - 128 are used by kernel.
659 * 129 - 199 are reserved.
660 * 200 - 254 are used by utilities like l2ping, etc.
663 spin_lock(&conn
->lock
);
665 if (++conn
->tx_ident
> 128)
670 spin_unlock(&conn
->lock
);
675 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
677 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
680 BT_DBG("code 0x%2.2x", code
);
685 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
686 flags
= ACL_START_NO_FLUSH
;
690 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
691 skb
->priority
= HCI_PRIO_MAX
;
693 hci_send_acl(conn
->hchan
, skb
, flags
);
696 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
698 struct hci_conn
*hcon
= chan
->conn
->hcon
;
701 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
704 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
705 lmp_no_flush_capable(hcon
->hdev
))
706 flags
= ACL_START_NO_FLUSH
;
710 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
711 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
714 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
716 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
717 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
719 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
722 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
723 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
730 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
731 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
738 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
740 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
741 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
743 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
746 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
747 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
754 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
755 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
762 static inline void __unpack_control(struct l2cap_chan
*chan
,
765 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
766 __unpack_extended_control(get_unaligned_le32(skb
->data
),
767 &bt_cb(skb
)->control
);
768 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
770 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
771 &bt_cb(skb
)->control
);
772 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
776 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
780 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
781 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
783 if (control
->sframe
) {
784 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
785 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
786 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
788 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
789 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
795 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
799 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
800 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
802 if (control
->sframe
) {
803 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
804 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
805 packed
|= L2CAP_CTRL_FRAME_TYPE
;
807 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
808 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
814 static inline void __pack_control(struct l2cap_chan
*chan
,
815 struct l2cap_ctrl
*control
,
818 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
819 put_unaligned_le32(__pack_extended_control(control
),
820 skb
->data
+ L2CAP_HDR_SIZE
);
822 put_unaligned_le16(__pack_enhanced_control(control
),
823 skb
->data
+ L2CAP_HDR_SIZE
);
827 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
829 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
830 return L2CAP_EXT_HDR_SIZE
;
832 return L2CAP_ENH_HDR_SIZE
;
835 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
839 struct l2cap_hdr
*lh
;
840 int hlen
= __ertm_hdr_size(chan
);
842 if (chan
->fcs
== L2CAP_FCS_CRC16
)
843 hlen
+= L2CAP_FCS_SIZE
;
845 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
848 return ERR_PTR(-ENOMEM
);
850 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
851 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
852 lh
->cid
= cpu_to_le16(chan
->dcid
);
854 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
855 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
857 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
859 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
860 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
861 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
864 skb
->priority
= HCI_PRIO_MAX
;
868 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
869 struct l2cap_ctrl
*control
)
874 BT_DBG("chan %p, control %p", chan
, control
);
876 if (!control
->sframe
)
879 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
883 if (control
->super
== L2CAP_SUPER_RR
)
884 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
885 else if (control
->super
== L2CAP_SUPER_RNR
)
886 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
888 if (control
->super
!= L2CAP_SUPER_SREJ
) {
889 chan
->last_acked_seq
= control
->reqseq
;
890 __clear_ack_timer(chan
);
893 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
894 control
->final
, control
->poll
, control
->super
);
896 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
897 control_field
= __pack_extended_control(control
);
899 control_field
= __pack_enhanced_control(control
);
901 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
903 l2cap_do_send(chan
, skb
);
906 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
908 struct l2cap_ctrl control
;
910 BT_DBG("chan %p, poll %d", chan
, poll
);
912 memset(&control
, 0, sizeof(control
));
916 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
917 control
.super
= L2CAP_SUPER_RNR
;
919 control
.super
= L2CAP_SUPER_RR
;
921 control
.reqseq
= chan
->buffer_seq
;
922 l2cap_send_sframe(chan
, &control
);
925 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
927 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
930 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
932 struct l2cap_conn
*conn
= chan
->conn
;
933 struct l2cap_conn_req req
;
935 req
.scid
= cpu_to_le16(chan
->scid
);
938 chan
->ident
= l2cap_get_ident(conn
);
940 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
942 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
945 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
947 /* This clears all conf flags, including CONF_NOT_COMPLETE */
948 chan
->conf_state
= 0;
949 __clear_chan_timer(chan
);
951 chan
->state
= BT_CONNECTED
;
953 chan
->ops
->ready(chan
);
956 static void l2cap_do_start(struct l2cap_chan
*chan
)
958 struct l2cap_conn
*conn
= chan
->conn
;
960 if (conn
->hcon
->type
== LE_LINK
) {
961 l2cap_chan_ready(chan
);
965 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
966 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
969 if (l2cap_chan_check_security(chan
) &&
970 __l2cap_no_conn_pending(chan
))
971 l2cap_send_conn_req(chan
);
973 struct l2cap_info_req req
;
974 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
976 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
977 conn
->info_ident
= l2cap_get_ident(conn
);
979 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
981 l2cap_send_cmd(conn
, conn
->info_ident
,
982 L2CAP_INFO_REQ
, sizeof(req
), &req
);
986 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
988 u32 local_feat_mask
= l2cap_feat_mask
;
990 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
993 case L2CAP_MODE_ERTM
:
994 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
995 case L2CAP_MODE_STREAMING
:
996 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1002 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
1004 struct sock
*sk
= chan
->sk
;
1005 struct l2cap_disconn_req req
;
1010 if (chan
->mode
== L2CAP_MODE_ERTM
) {
1011 __clear_retrans_timer(chan
);
1012 __clear_monitor_timer(chan
);
1013 __clear_ack_timer(chan
);
1016 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1017 __l2cap_state_change(chan
, BT_DISCONN
);
1021 req
.dcid
= cpu_to_le16(chan
->dcid
);
1022 req
.scid
= cpu_to_le16(chan
->scid
);
1023 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
1024 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
1027 __l2cap_state_change(chan
, BT_DISCONN
);
1028 __l2cap_chan_set_err(chan
, err
);
1032 /* ---- L2CAP connections ---- */
1033 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1035 struct l2cap_chan
*chan
, *tmp
;
1037 BT_DBG("conn %p", conn
);
1039 mutex_lock(&conn
->chan_lock
);
1041 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1042 struct sock
*sk
= chan
->sk
;
1044 l2cap_chan_lock(chan
);
1046 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1047 l2cap_chan_unlock(chan
);
1051 if (chan
->state
== BT_CONNECT
) {
1052 if (!l2cap_chan_check_security(chan
) ||
1053 !__l2cap_no_conn_pending(chan
)) {
1054 l2cap_chan_unlock(chan
);
1058 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1059 && test_bit(CONF_STATE2_DEVICE
,
1060 &chan
->conf_state
)) {
1061 l2cap_chan_close(chan
, ECONNRESET
);
1062 l2cap_chan_unlock(chan
);
1066 l2cap_send_conn_req(chan
);
1068 } else if (chan
->state
== BT_CONNECT2
) {
1069 struct l2cap_conn_rsp rsp
;
1071 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1072 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1074 if (l2cap_chan_check_security(chan
)) {
1076 if (test_bit(BT_SK_DEFER_SETUP
,
1077 &bt_sk(sk
)->flags
)) {
1078 struct sock
*parent
= bt_sk(sk
)->parent
;
1079 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1080 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1082 parent
->sk_data_ready(parent
, 0);
1085 __l2cap_state_change(chan
, BT_CONFIG
);
1086 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1087 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1091 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1092 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1095 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1098 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1099 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1100 l2cap_chan_unlock(chan
);
1104 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1105 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1106 l2cap_build_conf_req(chan
, buf
), buf
);
1107 chan
->num_conf_req
++;
1110 l2cap_chan_unlock(chan
);
1113 mutex_unlock(&conn
->chan_lock
);
1116 /* Find socket with cid and source/destination bdaddr.
1117 * Returns closest match, locked.
1119 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1123 struct l2cap_chan
*c
, *c1
= NULL
;
1125 read_lock(&chan_list_lock
);
1127 list_for_each_entry(c
, &chan_list
, global_l
) {
1128 struct sock
*sk
= c
->sk
;
1130 if (state
&& c
->state
!= state
)
1133 if (c
->scid
== cid
) {
1134 int src_match
, dst_match
;
1135 int src_any
, dst_any
;
1138 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1139 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1140 if (src_match
&& dst_match
) {
1141 read_unlock(&chan_list_lock
);
1146 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1147 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1148 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1149 (src_any
&& dst_any
))
1154 read_unlock(&chan_list_lock
);
1159 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1161 struct sock
*parent
, *sk
;
1162 struct l2cap_chan
*chan
, *pchan
;
1166 /* Check if we have socket listening on cid */
1167 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1168 conn
->src
, conn
->dst
);
1176 chan
= pchan
->ops
->new_connection(pchan
);
1182 hci_conn_hold(conn
->hcon
);
1184 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1185 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1187 bt_accept_enqueue(parent
, sk
);
1189 l2cap_chan_add(conn
, chan
);
1191 l2cap_chan_ready(chan
);
1194 release_sock(parent
);
1197 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1199 struct l2cap_chan
*chan
;
1201 BT_DBG("conn %p", conn
);
1203 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1204 l2cap_le_conn_ready(conn
);
1206 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1207 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1209 mutex_lock(&conn
->chan_lock
);
1211 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1213 l2cap_chan_lock(chan
);
1215 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1216 l2cap_chan_unlock(chan
);
1220 if (conn
->hcon
->type
== LE_LINK
) {
1221 if (smp_conn_security(conn
, chan
->sec_level
))
1222 l2cap_chan_ready(chan
);
1224 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1225 struct sock
*sk
= chan
->sk
;
1226 __clear_chan_timer(chan
);
1228 __l2cap_state_change(chan
, BT_CONNECTED
);
1229 sk
->sk_state_change(sk
);
1232 } else if (chan
->state
== BT_CONNECT
)
1233 l2cap_do_start(chan
);
1235 l2cap_chan_unlock(chan
);
1238 mutex_unlock(&conn
->chan_lock
);
1241 /* Notify sockets that we cannot guaranty reliability anymore */
1242 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1244 struct l2cap_chan
*chan
;
1246 BT_DBG("conn %p", conn
);
1248 mutex_lock(&conn
->chan_lock
);
1250 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1251 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1252 __l2cap_chan_set_err(chan
, err
);
1255 mutex_unlock(&conn
->chan_lock
);
1258 static void l2cap_info_timeout(struct work_struct
*work
)
1260 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1263 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1264 conn
->info_ident
= 0;
1266 l2cap_conn_start(conn
);
1269 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1271 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1272 struct l2cap_chan
*chan
, *l
;
1277 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1279 kfree_skb(conn
->rx_skb
);
1281 mutex_lock(&conn
->chan_lock
);
1284 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1285 l2cap_chan_hold(chan
);
1286 l2cap_chan_lock(chan
);
1288 l2cap_chan_del(chan
, err
);
1290 l2cap_chan_unlock(chan
);
1292 chan
->ops
->close(chan
);
1293 l2cap_chan_put(chan
);
1296 mutex_unlock(&conn
->chan_lock
);
1298 hci_chan_del(conn
->hchan
);
1300 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1301 cancel_delayed_work_sync(&conn
->info_timer
);
1303 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1304 cancel_delayed_work_sync(&conn
->security_timer
);
1305 smp_chan_destroy(conn
);
1308 hcon
->l2cap_data
= NULL
;
1312 static void security_timeout(struct work_struct
*work
)
1314 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1315 security_timer
.work
);
1317 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1320 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1322 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1323 struct hci_chan
*hchan
;
1328 hchan
= hci_chan_create(hcon
);
1332 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1334 hci_chan_del(hchan
);
1338 hcon
->l2cap_data
= conn
;
1340 conn
->hchan
= hchan
;
1342 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1344 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1345 conn
->mtu
= hcon
->hdev
->le_mtu
;
1347 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1349 conn
->src
= &hcon
->hdev
->bdaddr
;
1350 conn
->dst
= &hcon
->dst
;
1352 conn
->feat_mask
= 0;
1354 spin_lock_init(&conn
->lock
);
1355 mutex_init(&conn
->chan_lock
);
1357 INIT_LIST_HEAD(&conn
->chan_l
);
1359 if (hcon
->type
== LE_LINK
)
1360 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1362 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1364 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1369 /* ---- Socket interface ---- */
1371 /* Find socket with psm and source / destination bdaddr.
1372 * Returns closest match.
1374 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1378 struct l2cap_chan
*c
, *c1
= NULL
;
1380 read_lock(&chan_list_lock
);
1382 list_for_each_entry(c
, &chan_list
, global_l
) {
1383 struct sock
*sk
= c
->sk
;
1385 if (state
&& c
->state
!= state
)
1388 if (c
->psm
== psm
) {
1389 int src_match
, dst_match
;
1390 int src_any
, dst_any
;
1393 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1394 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1395 if (src_match
&& dst_match
) {
1396 read_unlock(&chan_list_lock
);
1401 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1402 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1403 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1404 (src_any
&& dst_any
))
1409 read_unlock(&chan_list_lock
);
1414 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1415 bdaddr_t
*dst
, u8 dst_type
)
1417 struct sock
*sk
= chan
->sk
;
1418 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1419 struct l2cap_conn
*conn
;
1420 struct hci_conn
*hcon
;
1421 struct hci_dev
*hdev
;
1425 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1426 dst_type
, __le16_to_cpu(chan
->psm
));
1428 hdev
= hci_get_route(dst
, src
);
1430 return -EHOSTUNREACH
;
1434 l2cap_chan_lock(chan
);
1436 /* PSM must be odd and lsb of upper byte must be 0 */
1437 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1438 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1443 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1448 switch (chan
->mode
) {
1449 case L2CAP_MODE_BASIC
:
1451 case L2CAP_MODE_ERTM
:
1452 case L2CAP_MODE_STREAMING
:
1461 switch (chan
->state
) {
1465 /* Already connecting */
1470 /* Already connected */
1484 /* Set destination address and psm */
1486 bacpy(&bt_sk(sk
)->dst
, dst
);
1492 auth_type
= l2cap_get_auth_type(chan
);
1494 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1495 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1496 chan
->sec_level
, auth_type
);
1498 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1499 chan
->sec_level
, auth_type
);
1502 err
= PTR_ERR(hcon
);
1506 conn
= l2cap_conn_add(hcon
, 0);
1513 if (hcon
->type
== LE_LINK
) {
1516 if (!list_empty(&conn
->chan_l
)) {
1525 /* Update source addr of the socket */
1526 bacpy(src
, conn
->src
);
1528 l2cap_chan_unlock(chan
);
1529 l2cap_chan_add(conn
, chan
);
1530 l2cap_chan_lock(chan
);
1532 l2cap_state_change(chan
, BT_CONNECT
);
1533 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1535 if (hcon
->state
== BT_CONNECTED
) {
1536 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1537 __clear_chan_timer(chan
);
1538 if (l2cap_chan_check_security(chan
))
1539 l2cap_state_change(chan
, BT_CONNECTED
);
1541 l2cap_do_start(chan
);
1547 l2cap_chan_unlock(chan
);
1548 hci_dev_unlock(hdev
);
1553 int __l2cap_wait_ack(struct sock
*sk
)
1555 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1556 DECLARE_WAITQUEUE(wait
, current
);
1560 add_wait_queue(sk_sleep(sk
), &wait
);
1561 set_current_state(TASK_INTERRUPTIBLE
);
1562 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1566 if (signal_pending(current
)) {
1567 err
= sock_intr_errno(timeo
);
1572 timeo
= schedule_timeout(timeo
);
1574 set_current_state(TASK_INTERRUPTIBLE
);
1576 err
= sock_error(sk
);
1580 set_current_state(TASK_RUNNING
);
1581 remove_wait_queue(sk_sleep(sk
), &wait
);
1585 static void l2cap_monitor_timeout(struct work_struct
*work
)
1587 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1588 monitor_timer
.work
);
1590 BT_DBG("chan %p", chan
);
1592 l2cap_chan_lock(chan
);
1595 l2cap_chan_unlock(chan
);
1596 l2cap_chan_put(chan
);
1600 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1602 l2cap_chan_unlock(chan
);
1603 l2cap_chan_put(chan
);
1606 static void l2cap_retrans_timeout(struct work_struct
*work
)
1608 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1609 retrans_timer
.work
);
1611 BT_DBG("chan %p", chan
);
1613 l2cap_chan_lock(chan
);
1616 l2cap_chan_unlock(chan
);
1617 l2cap_chan_put(chan
);
1621 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1622 l2cap_chan_unlock(chan
);
1623 l2cap_chan_put(chan
);
1626 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1627 struct sk_buff_head
*skbs
)
1629 struct sk_buff
*skb
;
1630 struct l2cap_ctrl
*control
;
1632 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1634 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1636 while (!skb_queue_empty(&chan
->tx_q
)) {
1638 skb
= skb_dequeue(&chan
->tx_q
);
1640 bt_cb(skb
)->control
.retries
= 1;
1641 control
= &bt_cb(skb
)->control
;
1643 control
->reqseq
= 0;
1644 control
->txseq
= chan
->next_tx_seq
;
1646 __pack_control(chan
, control
, skb
);
1648 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1649 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1650 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1653 l2cap_do_send(chan
, skb
);
1655 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1657 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1658 chan
->frames_sent
++;
1662 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1664 struct sk_buff
*skb
, *tx_skb
;
1665 struct l2cap_ctrl
*control
;
1668 BT_DBG("chan %p", chan
);
1670 if (chan
->state
!= BT_CONNECTED
)
1673 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1676 while (chan
->tx_send_head
&&
1677 chan
->unacked_frames
< chan
->remote_tx_win
&&
1678 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1680 skb
= chan
->tx_send_head
;
1682 bt_cb(skb
)->control
.retries
= 1;
1683 control
= &bt_cb(skb
)->control
;
1685 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1688 control
->reqseq
= chan
->buffer_seq
;
1689 chan
->last_acked_seq
= chan
->buffer_seq
;
1690 control
->txseq
= chan
->next_tx_seq
;
1692 __pack_control(chan
, control
, skb
);
1694 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1695 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1696 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1699 /* Clone after data has been modified. Data is assumed to be
1700 read-only (for locking purposes) on cloned sk_buffs.
1702 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1707 __set_retrans_timer(chan
);
1709 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1710 chan
->unacked_frames
++;
1711 chan
->frames_sent
++;
1714 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1715 chan
->tx_send_head
= NULL
;
1717 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1719 l2cap_do_send(chan
, tx_skb
);
1720 BT_DBG("Sent txseq %d", (int)control
->txseq
);
1723 BT_DBG("Sent %d, %d unacked, %d in ERTM queue", sent
,
1724 (int) chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1729 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1731 struct l2cap_ctrl control
;
1732 struct sk_buff
*skb
;
1733 struct sk_buff
*tx_skb
;
1736 BT_DBG("chan %p", chan
);
1738 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1741 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1742 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1744 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1746 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1751 bt_cb(skb
)->control
.retries
++;
1752 control
= bt_cb(skb
)->control
;
1754 if (chan
->max_tx
!= 0 &&
1755 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1756 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1757 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1758 l2cap_seq_list_clear(&chan
->retrans_list
);
1762 control
.reqseq
= chan
->buffer_seq
;
1763 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1768 if (skb_cloned(skb
)) {
1769 /* Cloned sk_buffs are read-only, so we need a
1772 tx_skb
= skb_copy(skb
, GFP_ATOMIC
);
1774 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1778 l2cap_seq_list_clear(&chan
->retrans_list
);
1782 /* Update skb contents */
1783 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1784 put_unaligned_le32(__pack_extended_control(&control
),
1785 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1787 put_unaligned_le16(__pack_enhanced_control(&control
),
1788 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1791 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1792 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1793 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1797 l2cap_do_send(chan
, tx_skb
);
1799 BT_DBG("Resent txseq %d", control
.txseq
);
1801 chan
->last_acked_seq
= chan
->buffer_seq
;
1805 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1806 struct l2cap_ctrl
*control
)
1808 BT_DBG("chan %p, control %p", chan
, control
);
1810 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1811 l2cap_ertm_resend(chan
);
1814 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1815 struct l2cap_ctrl
*control
)
1817 struct sk_buff
*skb
;
1819 BT_DBG("chan %p, control %p", chan
, control
);
1822 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
1824 l2cap_seq_list_clear(&chan
->retrans_list
);
1826 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1829 if (chan
->unacked_frames
) {
1830 skb_queue_walk(&chan
->tx_q
, skb
) {
1831 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
1832 skb
== chan
->tx_send_head
)
1836 skb_queue_walk_from(&chan
->tx_q
, skb
) {
1837 if (skb
== chan
->tx_send_head
)
1840 l2cap_seq_list_append(&chan
->retrans_list
,
1841 bt_cb(skb
)->control
.txseq
);
1844 l2cap_ertm_resend(chan
);
1848 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1850 struct l2cap_ctrl control
;
1851 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
1852 chan
->last_acked_seq
);
1855 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
1856 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
1858 memset(&control
, 0, sizeof(control
));
1861 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
1862 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
1863 __clear_ack_timer(chan
);
1864 control
.super
= L2CAP_SUPER_RNR
;
1865 control
.reqseq
= chan
->buffer_seq
;
1866 l2cap_send_sframe(chan
, &control
);
1868 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
1869 l2cap_ertm_send(chan
);
1870 /* If any i-frames were sent, they included an ack */
1871 if (chan
->buffer_seq
== chan
->last_acked_seq
)
1875 /* Ack now if the tx window is 3/4ths full.
1876 * Calculate without mul or div
1878 threshold
= chan
->tx_win
;
1879 threshold
+= threshold
<< 1;
1882 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack
,
1885 if (frames_to_ack
>= threshold
) {
1886 __clear_ack_timer(chan
);
1887 control
.super
= L2CAP_SUPER_RR
;
1888 control
.reqseq
= chan
->buffer_seq
;
1889 l2cap_send_sframe(chan
, &control
);
1894 __set_ack_timer(chan
);
1898 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1899 struct msghdr
*msg
, int len
,
1900 int count
, struct sk_buff
*skb
)
1902 struct l2cap_conn
*conn
= chan
->conn
;
1903 struct sk_buff
**frag
;
1906 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1912 /* Continuation fragments (no L2CAP header) */
1913 frag
= &skb_shinfo(skb
)->frag_list
;
1915 struct sk_buff
*tmp
;
1917 count
= min_t(unsigned int, conn
->mtu
, len
);
1919 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1920 msg
->msg_flags
& MSG_DONTWAIT
);
1922 return PTR_ERR(tmp
);
1926 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1929 (*frag
)->priority
= skb
->priority
;
1934 skb
->len
+= (*frag
)->len
;
1935 skb
->data_len
+= (*frag
)->len
;
1937 frag
= &(*frag
)->next
;
1943 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1944 struct msghdr
*msg
, size_t len
,
1947 struct l2cap_conn
*conn
= chan
->conn
;
1948 struct sk_buff
*skb
;
1949 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1950 struct l2cap_hdr
*lh
;
1952 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1954 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1956 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1957 msg
->msg_flags
& MSG_DONTWAIT
);
1961 skb
->priority
= priority
;
1963 /* Create L2CAP header */
1964 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1965 lh
->cid
= cpu_to_le16(chan
->dcid
);
1966 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
1967 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
1969 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1970 if (unlikely(err
< 0)) {
1972 return ERR_PTR(err
);
1977 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1978 struct msghdr
*msg
, size_t len
,
1981 struct l2cap_conn
*conn
= chan
->conn
;
1982 struct sk_buff
*skb
;
1984 struct l2cap_hdr
*lh
;
1986 BT_DBG("chan %p len %d", chan
, (int)len
);
1988 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
1990 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
1991 msg
->msg_flags
& MSG_DONTWAIT
);
1995 skb
->priority
= priority
;
1997 /* Create L2CAP header */
1998 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1999 lh
->cid
= cpu_to_le16(chan
->dcid
);
2000 lh
->len
= cpu_to_le16(len
);
2002 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2003 if (unlikely(err
< 0)) {
2005 return ERR_PTR(err
);
2010 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2011 struct msghdr
*msg
, size_t len
,
2014 struct l2cap_conn
*conn
= chan
->conn
;
2015 struct sk_buff
*skb
;
2016 int err
, count
, hlen
;
2017 struct l2cap_hdr
*lh
;
2019 BT_DBG("chan %p len %d", chan
, (int)len
);
2022 return ERR_PTR(-ENOTCONN
);
2024 hlen
= __ertm_hdr_size(chan
);
2027 hlen
+= L2CAP_SDULEN_SIZE
;
2029 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2030 hlen
+= L2CAP_FCS_SIZE
;
2032 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2034 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2035 msg
->msg_flags
& MSG_DONTWAIT
);
2039 /* Create L2CAP header */
2040 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2041 lh
->cid
= cpu_to_le16(chan
->dcid
);
2042 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2044 /* Control header is populated later */
2045 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2046 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2048 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2051 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2053 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2054 if (unlikely(err
< 0)) {
2056 return ERR_PTR(err
);
2059 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2060 bt_cb(skb
)->control
.retries
= 0;
2064 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2065 struct sk_buff_head
*seg_queue
,
2066 struct msghdr
*msg
, size_t len
)
2068 struct sk_buff
*skb
;
2074 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
2076 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2077 * so fragmented skbs are not used. The HCI layer's handling
2078 * of fragmented skbs is not compatible with ERTM's queueing.
2081 /* PDU size is derived from the HCI MTU */
2082 pdu_len
= chan
->conn
->mtu
;
2084 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2086 /* Adjust for largest possible L2CAP overhead. */
2088 pdu_len
-= L2CAP_FCS_SIZE
;
2090 pdu_len
-= __ertm_hdr_size(chan
);
2092 /* Remote device may have requested smaller PDUs */
2093 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2095 if (len
<= pdu_len
) {
2096 sar
= L2CAP_SAR_UNSEGMENTED
;
2100 sar
= L2CAP_SAR_START
;
2102 pdu_len
-= L2CAP_SDULEN_SIZE
;
2106 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2109 __skb_queue_purge(seg_queue
);
2110 return PTR_ERR(skb
);
2113 bt_cb(skb
)->control
.sar
= sar
;
2114 __skb_queue_tail(seg_queue
, skb
);
2119 pdu_len
+= L2CAP_SDULEN_SIZE
;
2122 if (len
<= pdu_len
) {
2123 sar
= L2CAP_SAR_END
;
2126 sar
= L2CAP_SAR_CONTINUE
;
2133 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2136 struct sk_buff
*skb
;
2138 struct sk_buff_head seg_queue
;
2140 /* Connectionless channel */
2141 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2142 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2144 return PTR_ERR(skb
);
2146 l2cap_do_send(chan
, skb
);
2150 switch (chan
->mode
) {
2151 case L2CAP_MODE_BASIC
:
2152 /* Check outgoing MTU */
2153 if (len
> chan
->omtu
)
2156 /* Create a basic PDU */
2157 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2159 return PTR_ERR(skb
);
2161 l2cap_do_send(chan
, skb
);
2165 case L2CAP_MODE_ERTM
:
2166 case L2CAP_MODE_STREAMING
:
2167 /* Check outgoing MTU */
2168 if (len
> chan
->omtu
) {
2173 __skb_queue_head_init(&seg_queue
);
2175 /* Do segmentation before calling in to the state machine,
2176 * since it's possible to block while waiting for memory
2179 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2181 /* The channel could have been closed while segmenting,
2182 * check that it is still connected.
2184 if (chan
->state
!= BT_CONNECTED
) {
2185 __skb_queue_purge(&seg_queue
);
2192 if (chan
->mode
== L2CAP_MODE_ERTM
)
2193 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2195 l2cap_streaming_send(chan
, &seg_queue
);
2199 /* If the skbs were not queued for sending, they'll still be in
2200 * seg_queue and need to be purged.
2202 __skb_queue_purge(&seg_queue
);
2206 BT_DBG("bad state %1.1x", chan
->mode
);
2213 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2215 struct l2cap_ctrl control
;
2218 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2220 memset(&control
, 0, sizeof(control
));
2222 control
.super
= L2CAP_SUPER_SREJ
;
2224 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2225 seq
= __next_seq(chan
, seq
)) {
2226 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2227 control
.reqseq
= seq
;
2228 l2cap_send_sframe(chan
, &control
);
2229 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2233 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2236 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2238 struct l2cap_ctrl control
;
2240 BT_DBG("chan %p", chan
);
2242 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2245 memset(&control
, 0, sizeof(control
));
2247 control
.super
= L2CAP_SUPER_SREJ
;
2248 control
.reqseq
= chan
->srej_list
.tail
;
2249 l2cap_send_sframe(chan
, &control
);
2252 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2254 struct l2cap_ctrl control
;
2258 BT_DBG("chan %p, txseq %d", chan
, txseq
);
2260 memset(&control
, 0, sizeof(control
));
2262 control
.super
= L2CAP_SUPER_SREJ
;
2264 /* Capture initial list head to allow only one pass through the list. */
2265 initial_head
= chan
->srej_list
.head
;
2268 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2269 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2272 control
.reqseq
= seq
;
2273 l2cap_send_sframe(chan
, &control
);
2274 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2275 } while (chan
->srej_list
.head
!= initial_head
);
2278 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2280 struct sk_buff
*acked_skb
;
2283 BT_DBG("chan %p, reqseq %d", chan
, reqseq
);
2285 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2288 BT_DBG("expected_ack_seq %d, unacked_frames %d",
2289 chan
->expected_ack_seq
, chan
->unacked_frames
);
2291 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2292 ackseq
= __next_seq(chan
, ackseq
)) {
2294 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2296 skb_unlink(acked_skb
, &chan
->tx_q
);
2297 kfree_skb(acked_skb
);
2298 chan
->unacked_frames
--;
2302 chan
->expected_ack_seq
= reqseq
;
2304 if (chan
->unacked_frames
== 0)
2305 __clear_retrans_timer(chan
);
2307 BT_DBG("unacked_frames %d", (int) chan
->unacked_frames
);
2310 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2312 BT_DBG("chan %p", chan
);
2314 chan
->expected_tx_seq
= chan
->buffer_seq
;
2315 l2cap_seq_list_clear(&chan
->srej_list
);
2316 skb_queue_purge(&chan
->srej_q
);
2317 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2320 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2321 struct l2cap_ctrl
*control
,
2322 struct sk_buff_head
*skbs
, u8 event
)
2324 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2328 case L2CAP_EV_DATA_REQUEST
:
2329 if (chan
->tx_send_head
== NULL
)
2330 chan
->tx_send_head
= skb_peek(skbs
);
2332 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2333 l2cap_ertm_send(chan
);
2335 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2336 BT_DBG("Enter LOCAL_BUSY");
2337 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2339 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2340 /* The SREJ_SENT state must be aborted if we are to
2341 * enter the LOCAL_BUSY state.
2343 l2cap_abort_rx_srej_sent(chan
);
2346 l2cap_send_ack(chan
);
2349 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2350 BT_DBG("Exit LOCAL_BUSY");
2351 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2353 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2354 struct l2cap_ctrl local_control
;
2356 memset(&local_control
, 0, sizeof(local_control
));
2357 local_control
.sframe
= 1;
2358 local_control
.super
= L2CAP_SUPER_RR
;
2359 local_control
.poll
= 1;
2360 local_control
.reqseq
= chan
->buffer_seq
;
2361 l2cap_send_sframe(chan
, &local_control
);
2363 chan
->retry_count
= 1;
2364 __set_monitor_timer(chan
);
2365 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2368 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2369 l2cap_process_reqseq(chan
, control
->reqseq
);
2371 case L2CAP_EV_EXPLICIT_POLL
:
2372 l2cap_send_rr_or_rnr(chan
, 1);
2373 chan
->retry_count
= 1;
2374 __set_monitor_timer(chan
);
2375 __clear_ack_timer(chan
);
2376 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2378 case L2CAP_EV_RETRANS_TO
:
2379 l2cap_send_rr_or_rnr(chan
, 1);
2380 chan
->retry_count
= 1;
2381 __set_monitor_timer(chan
);
2382 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2384 case L2CAP_EV_RECV_FBIT
:
2385 /* Nothing to process */
2392 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2393 struct l2cap_ctrl
*control
,
2394 struct sk_buff_head
*skbs
, u8 event
)
2396 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2400 case L2CAP_EV_DATA_REQUEST
:
2401 if (chan
->tx_send_head
== NULL
)
2402 chan
->tx_send_head
= skb_peek(skbs
);
2403 /* Queue data, but don't send. */
2404 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2406 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2407 BT_DBG("Enter LOCAL_BUSY");
2408 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2410 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2411 /* The SREJ_SENT state must be aborted if we are to
2412 * enter the LOCAL_BUSY state.
2414 l2cap_abort_rx_srej_sent(chan
);
2417 l2cap_send_ack(chan
);
2420 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2421 BT_DBG("Exit LOCAL_BUSY");
2422 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2424 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2425 struct l2cap_ctrl local_control
;
2426 memset(&local_control
, 0, sizeof(local_control
));
2427 local_control
.sframe
= 1;
2428 local_control
.super
= L2CAP_SUPER_RR
;
2429 local_control
.poll
= 1;
2430 local_control
.reqseq
= chan
->buffer_seq
;
2431 l2cap_send_sframe(chan
, &local_control
);
2433 chan
->retry_count
= 1;
2434 __set_monitor_timer(chan
);
2435 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2438 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2439 l2cap_process_reqseq(chan
, control
->reqseq
);
2443 case L2CAP_EV_RECV_FBIT
:
2444 if (control
&& control
->final
) {
2445 __clear_monitor_timer(chan
);
2446 if (chan
->unacked_frames
> 0)
2447 __set_retrans_timer(chan
);
2448 chan
->retry_count
= 0;
2449 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2450 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2453 case L2CAP_EV_EXPLICIT_POLL
:
2456 case L2CAP_EV_MONITOR_TO
:
2457 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2458 l2cap_send_rr_or_rnr(chan
, 1);
2459 __set_monitor_timer(chan
);
2460 chan
->retry_count
++;
2462 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2470 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2471 struct sk_buff_head
*skbs
, u8 event
)
2473 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2474 chan
, control
, skbs
, event
, chan
->tx_state
);
2476 switch (chan
->tx_state
) {
2477 case L2CAP_TX_STATE_XMIT
:
2478 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2480 case L2CAP_TX_STATE_WAIT_F
:
2481 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2489 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2490 struct l2cap_ctrl
*control
)
2492 BT_DBG("chan %p, control %p", chan
, control
);
2493 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2496 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2497 struct l2cap_ctrl
*control
)
2499 BT_DBG("chan %p, control %p", chan
, control
);
2500 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2503 /* Copy frame to all raw sockets on that connection */
2504 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2506 struct sk_buff
*nskb
;
2507 struct l2cap_chan
*chan
;
2509 BT_DBG("conn %p", conn
);
2511 mutex_lock(&conn
->chan_lock
);
2513 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2514 struct sock
*sk
= chan
->sk
;
2515 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2518 /* Don't send frame to the socket it came from */
2521 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2525 if (chan
->ops
->recv(chan
, nskb
))
2529 mutex_unlock(&conn
->chan_lock
);
2532 /* ---- L2CAP signalling commands ---- */
2533 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2534 u8 code
, u8 ident
, u16 dlen
, void *data
)
2536 struct sk_buff
*skb
, **frag
;
2537 struct l2cap_cmd_hdr
*cmd
;
2538 struct l2cap_hdr
*lh
;
2541 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2542 conn
, code
, ident
, dlen
);
2544 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2545 count
= min_t(unsigned int, conn
->mtu
, len
);
2547 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2551 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2552 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2554 if (conn
->hcon
->type
== LE_LINK
)
2555 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2557 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2559 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2562 cmd
->len
= cpu_to_le16(dlen
);
2565 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2566 memcpy(skb_put(skb
, count
), data
, count
);
2572 /* Continuation fragments (no L2CAP header) */
2573 frag
= &skb_shinfo(skb
)->frag_list
;
2575 count
= min_t(unsigned int, conn
->mtu
, len
);
2577 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2581 memcpy(skb_put(*frag
, count
), data
, count
);
2586 frag
= &(*frag
)->next
;
2596 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2598 struct l2cap_conf_opt
*opt
= *ptr
;
2601 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2609 *val
= *((u8
*) opt
->val
);
2613 *val
= get_unaligned_le16(opt
->val
);
2617 *val
= get_unaligned_le32(opt
->val
);
2621 *val
= (unsigned long) opt
->val
;
2625 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2629 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2631 struct l2cap_conf_opt
*opt
= *ptr
;
2633 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2640 *((u8
*) opt
->val
) = val
;
2644 put_unaligned_le16(val
, opt
->val
);
2648 put_unaligned_le32(val
, opt
->val
);
2652 memcpy(opt
->val
, (void *) val
, len
);
2656 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2659 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2661 struct l2cap_conf_efs efs
;
2663 switch (chan
->mode
) {
2664 case L2CAP_MODE_ERTM
:
2665 efs
.id
= chan
->local_id
;
2666 efs
.stype
= chan
->local_stype
;
2667 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2668 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2669 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2670 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2673 case L2CAP_MODE_STREAMING
:
2675 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2676 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2677 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2686 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2687 (unsigned long) &efs
);
2690 static void l2cap_ack_timeout(struct work_struct
*work
)
2692 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2696 BT_DBG("chan %p", chan
);
2698 l2cap_chan_lock(chan
);
2700 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2701 chan
->last_acked_seq
);
2704 l2cap_send_rr_or_rnr(chan
, 0);
2706 l2cap_chan_unlock(chan
);
2707 l2cap_chan_put(chan
);
2710 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2714 chan
->next_tx_seq
= 0;
2715 chan
->expected_tx_seq
= 0;
2716 chan
->expected_ack_seq
= 0;
2717 chan
->unacked_frames
= 0;
2718 chan
->buffer_seq
= 0;
2719 chan
->frames_sent
= 0;
2720 chan
->last_acked_seq
= 0;
2722 chan
->sdu_last_frag
= NULL
;
2725 skb_queue_head_init(&chan
->tx_q
);
2727 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2730 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2731 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2733 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2734 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2735 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2737 skb_queue_head_init(&chan
->srej_q
);
2739 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2743 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2745 l2cap_seq_list_free(&chan
->srej_list
);
2750 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2753 case L2CAP_MODE_STREAMING
:
2754 case L2CAP_MODE_ERTM
:
2755 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2759 return L2CAP_MODE_BASIC
;
2763 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2765 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2768 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2770 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2773 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2775 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2776 __l2cap_ews_supported(chan
)) {
2777 /* use extended control field */
2778 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2779 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2781 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2782 L2CAP_DEFAULT_TX_WINDOW
);
2783 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2787 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2789 struct l2cap_conf_req
*req
= data
;
2790 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2791 void *ptr
= req
->data
;
2794 BT_DBG("chan %p", chan
);
2796 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2799 switch (chan
->mode
) {
2800 case L2CAP_MODE_STREAMING
:
2801 case L2CAP_MODE_ERTM
:
2802 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2805 if (__l2cap_efs_supported(chan
))
2806 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2810 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2815 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2816 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2818 switch (chan
->mode
) {
2819 case L2CAP_MODE_BASIC
:
2820 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2821 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2824 rfc
.mode
= L2CAP_MODE_BASIC
;
2826 rfc
.max_transmit
= 0;
2827 rfc
.retrans_timeout
= 0;
2828 rfc
.monitor_timeout
= 0;
2829 rfc
.max_pdu_size
= 0;
2831 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2832 (unsigned long) &rfc
);
2835 case L2CAP_MODE_ERTM
:
2836 rfc
.mode
= L2CAP_MODE_ERTM
;
2837 rfc
.max_transmit
= chan
->max_tx
;
2838 rfc
.retrans_timeout
= 0;
2839 rfc
.monitor_timeout
= 0;
2841 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2842 L2CAP_EXT_HDR_SIZE
-
2845 rfc
.max_pdu_size
= cpu_to_le16(size
);
2847 l2cap_txwin_setup(chan
);
2849 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2850 L2CAP_DEFAULT_TX_WINDOW
);
2852 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2853 (unsigned long) &rfc
);
2855 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2856 l2cap_add_opt_efs(&ptr
, chan
);
2858 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2861 if (chan
->fcs
== L2CAP_FCS_NONE
||
2862 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2863 chan
->fcs
= L2CAP_FCS_NONE
;
2864 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2867 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2868 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2872 case L2CAP_MODE_STREAMING
:
2873 l2cap_txwin_setup(chan
);
2874 rfc
.mode
= L2CAP_MODE_STREAMING
;
2876 rfc
.max_transmit
= 0;
2877 rfc
.retrans_timeout
= 0;
2878 rfc
.monitor_timeout
= 0;
2880 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2881 L2CAP_EXT_HDR_SIZE
-
2884 rfc
.max_pdu_size
= cpu_to_le16(size
);
2886 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2887 (unsigned long) &rfc
);
2889 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2890 l2cap_add_opt_efs(&ptr
, chan
);
2892 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2895 if (chan
->fcs
== L2CAP_FCS_NONE
||
2896 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2897 chan
->fcs
= L2CAP_FCS_NONE
;
2898 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2903 req
->dcid
= cpu_to_le16(chan
->dcid
);
2904 req
->flags
= __constant_cpu_to_le16(0);
2909 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2911 struct l2cap_conf_rsp
*rsp
= data
;
2912 void *ptr
= rsp
->data
;
2913 void *req
= chan
->conf_req
;
2914 int len
= chan
->conf_len
;
2915 int type
, hint
, olen
;
2917 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2918 struct l2cap_conf_efs efs
;
2920 u16 mtu
= L2CAP_DEFAULT_MTU
;
2921 u16 result
= L2CAP_CONF_SUCCESS
;
2924 BT_DBG("chan %p", chan
);
2926 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2927 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2929 hint
= type
& L2CAP_CONF_HINT
;
2930 type
&= L2CAP_CONF_MASK
;
2933 case L2CAP_CONF_MTU
:
2937 case L2CAP_CONF_FLUSH_TO
:
2938 chan
->flush_to
= val
;
2941 case L2CAP_CONF_QOS
:
2944 case L2CAP_CONF_RFC
:
2945 if (olen
== sizeof(rfc
))
2946 memcpy(&rfc
, (void *) val
, olen
);
2949 case L2CAP_CONF_FCS
:
2950 if (val
== L2CAP_FCS_NONE
)
2951 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2954 case L2CAP_CONF_EFS
:
2956 if (olen
== sizeof(efs
))
2957 memcpy(&efs
, (void *) val
, olen
);
2960 case L2CAP_CONF_EWS
:
2962 return -ECONNREFUSED
;
2964 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2965 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2966 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2967 chan
->remote_tx_win
= val
;
2974 result
= L2CAP_CONF_UNKNOWN
;
2975 *((u8
*) ptr
++) = type
;
2980 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2983 switch (chan
->mode
) {
2984 case L2CAP_MODE_STREAMING
:
2985 case L2CAP_MODE_ERTM
:
2986 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2987 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2988 chan
->conn
->feat_mask
);
2993 if (__l2cap_efs_supported(chan
))
2994 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2996 return -ECONNREFUSED
;
2999 if (chan
->mode
!= rfc
.mode
)
3000 return -ECONNREFUSED
;
3006 if (chan
->mode
!= rfc
.mode
) {
3007 result
= L2CAP_CONF_UNACCEPT
;
3008 rfc
.mode
= chan
->mode
;
3010 if (chan
->num_conf_rsp
== 1)
3011 return -ECONNREFUSED
;
3013 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3014 sizeof(rfc
), (unsigned long) &rfc
);
3017 if (result
== L2CAP_CONF_SUCCESS
) {
3018 /* Configure output options and let the other side know
3019 * which ones we don't like. */
3021 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3022 result
= L2CAP_CONF_UNACCEPT
;
3025 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3027 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3030 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3031 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3032 efs
.stype
!= chan
->local_stype
) {
3034 result
= L2CAP_CONF_UNACCEPT
;
3036 if (chan
->num_conf_req
>= 1)
3037 return -ECONNREFUSED
;
3039 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3041 (unsigned long) &efs
);
3043 /* Send PENDING Conf Rsp */
3044 result
= L2CAP_CONF_PENDING
;
3045 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3050 case L2CAP_MODE_BASIC
:
3051 chan
->fcs
= L2CAP_FCS_NONE
;
3052 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3055 case L2CAP_MODE_ERTM
:
3056 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3057 chan
->remote_tx_win
= rfc
.txwin_size
;
3059 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3061 chan
->remote_max_tx
= rfc
.max_transmit
;
3063 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3065 L2CAP_EXT_HDR_SIZE
-
3068 rfc
.max_pdu_size
= cpu_to_le16(size
);
3069 chan
->remote_mps
= size
;
3071 rfc
.retrans_timeout
=
3072 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3073 rfc
.monitor_timeout
=
3074 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3076 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3078 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3079 sizeof(rfc
), (unsigned long) &rfc
);
3081 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3082 chan
->remote_id
= efs
.id
;
3083 chan
->remote_stype
= efs
.stype
;
3084 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3085 chan
->remote_flush_to
=
3086 le32_to_cpu(efs
.flush_to
);
3087 chan
->remote_acc_lat
=
3088 le32_to_cpu(efs
.acc_lat
);
3089 chan
->remote_sdu_itime
=
3090 le32_to_cpu(efs
.sdu_itime
);
3091 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3092 sizeof(efs
), (unsigned long) &efs
);
3096 case L2CAP_MODE_STREAMING
:
3097 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3099 L2CAP_EXT_HDR_SIZE
-
3102 rfc
.max_pdu_size
= cpu_to_le16(size
);
3103 chan
->remote_mps
= size
;
3105 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3107 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3108 sizeof(rfc
), (unsigned long) &rfc
);
3113 result
= L2CAP_CONF_UNACCEPT
;
3115 memset(&rfc
, 0, sizeof(rfc
));
3116 rfc
.mode
= chan
->mode
;
3119 if (result
== L2CAP_CONF_SUCCESS
)
3120 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3122 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3123 rsp
->result
= cpu_to_le16(result
);
3124 rsp
->flags
= __constant_cpu_to_le16(0);
3129 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
3131 struct l2cap_conf_req
*req
= data
;
3132 void *ptr
= req
->data
;
3135 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3136 struct l2cap_conf_efs efs
;
3138 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3140 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3141 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3144 case L2CAP_CONF_MTU
:
3145 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3146 *result
= L2CAP_CONF_UNACCEPT
;
3147 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3150 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3153 case L2CAP_CONF_FLUSH_TO
:
3154 chan
->flush_to
= val
;
3155 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3159 case L2CAP_CONF_RFC
:
3160 if (olen
== sizeof(rfc
))
3161 memcpy(&rfc
, (void *)val
, olen
);
3163 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3164 rfc
.mode
!= chan
->mode
)
3165 return -ECONNREFUSED
;
3169 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3170 sizeof(rfc
), (unsigned long) &rfc
);
3173 case L2CAP_CONF_EWS
:
3174 chan
->tx_win
= min_t(u16
, val
,
3175 L2CAP_DEFAULT_EXT_WINDOW
);
3176 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3180 case L2CAP_CONF_EFS
:
3181 if (olen
== sizeof(efs
))
3182 memcpy(&efs
, (void *)val
, olen
);
3184 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3185 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3186 efs
.stype
!= chan
->local_stype
)
3187 return -ECONNREFUSED
;
3189 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3190 sizeof(efs
), (unsigned long) &efs
);
3195 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3196 return -ECONNREFUSED
;
3198 chan
->mode
= rfc
.mode
;
3200 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3202 case L2CAP_MODE_ERTM
:
3203 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3204 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3205 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3207 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3208 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3209 chan
->local_sdu_itime
=
3210 le32_to_cpu(efs
.sdu_itime
);
3211 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3212 chan
->local_flush_to
=
3213 le32_to_cpu(efs
.flush_to
);
3217 case L2CAP_MODE_STREAMING
:
3218 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3222 req
->dcid
= cpu_to_le16(chan
->dcid
);
3223 req
->flags
= __constant_cpu_to_le16(0);
3228 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
3230 struct l2cap_conf_rsp
*rsp
= data
;
3231 void *ptr
= rsp
->data
;
3233 BT_DBG("chan %p", chan
);
3235 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3236 rsp
->result
= cpu_to_le16(result
);
3237 rsp
->flags
= cpu_to_le16(flags
);
3242 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3244 struct l2cap_conn_rsp rsp
;
3245 struct l2cap_conn
*conn
= chan
->conn
;
3248 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3249 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3250 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3251 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3252 l2cap_send_cmd(conn
, chan
->ident
,
3253 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3255 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3258 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3259 l2cap_build_conf_req(chan
, buf
), buf
);
3260 chan
->num_conf_req
++;
3263 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3267 struct l2cap_conf_rfc rfc
;
3269 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3271 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3274 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3275 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3278 case L2CAP_CONF_RFC
:
3279 if (olen
== sizeof(rfc
))
3280 memcpy(&rfc
, (void *)val
, olen
);
3285 /* Use sane default values in case a misbehaving remote device
3286 * did not send an RFC option.
3288 rfc
.mode
= chan
->mode
;
3289 rfc
.retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3290 rfc
.monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3291 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
3293 BT_ERR("Expected RFC option was not found, using defaults");
3297 case L2CAP_MODE_ERTM
:
3298 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3299 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3300 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3302 case L2CAP_MODE_STREAMING
:
3303 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3307 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3309 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3311 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3314 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3315 cmd
->ident
== conn
->info_ident
) {
3316 cancel_delayed_work(&conn
->info_timer
);
3318 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3319 conn
->info_ident
= 0;
3321 l2cap_conn_start(conn
);
3327 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3329 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3330 struct l2cap_conn_rsp rsp
;
3331 struct l2cap_chan
*chan
= NULL
, *pchan
;
3332 struct sock
*parent
, *sk
= NULL
;
3333 int result
, status
= L2CAP_CS_NO_INFO
;
3335 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3336 __le16 psm
= req
->psm
;
3338 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3340 /* Check if we have socket listening on psm */
3341 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3343 result
= L2CAP_CR_BAD_PSM
;
3349 mutex_lock(&conn
->chan_lock
);
3352 /* Check if the ACL is secure enough (if not SDP) */
3353 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3354 !hci_conn_check_link_mode(conn
->hcon
)) {
3355 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3356 result
= L2CAP_CR_SEC_BLOCK
;
3360 result
= L2CAP_CR_NO_MEM
;
3362 /* Check if we already have channel with that dcid */
3363 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3366 chan
= pchan
->ops
->new_connection(pchan
);
3372 hci_conn_hold(conn
->hcon
);
3374 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3375 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3379 bt_accept_enqueue(parent
, sk
);
3381 __l2cap_chan_add(conn
, chan
);
3385 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3387 chan
->ident
= cmd
->ident
;
3389 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3390 if (l2cap_chan_check_security(chan
)) {
3391 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3392 __l2cap_state_change(chan
, BT_CONNECT2
);
3393 result
= L2CAP_CR_PEND
;
3394 status
= L2CAP_CS_AUTHOR_PEND
;
3395 parent
->sk_data_ready(parent
, 0);
3397 __l2cap_state_change(chan
, BT_CONFIG
);
3398 result
= L2CAP_CR_SUCCESS
;
3399 status
= L2CAP_CS_NO_INFO
;
3402 __l2cap_state_change(chan
, BT_CONNECT2
);
3403 result
= L2CAP_CR_PEND
;
3404 status
= L2CAP_CS_AUTHEN_PEND
;
3407 __l2cap_state_change(chan
, BT_CONNECT2
);
3408 result
= L2CAP_CR_PEND
;
3409 status
= L2CAP_CS_NO_INFO
;
3413 release_sock(parent
);
3414 mutex_unlock(&conn
->chan_lock
);
3417 rsp
.scid
= cpu_to_le16(scid
);
3418 rsp
.dcid
= cpu_to_le16(dcid
);
3419 rsp
.result
= cpu_to_le16(result
);
3420 rsp
.status
= cpu_to_le16(status
);
3421 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3423 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3424 struct l2cap_info_req info
;
3425 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3427 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3428 conn
->info_ident
= l2cap_get_ident(conn
);
3430 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3432 l2cap_send_cmd(conn
, conn
->info_ident
,
3433 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3436 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3437 result
== L2CAP_CR_SUCCESS
) {
3439 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3440 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3441 l2cap_build_conf_req(chan
, buf
), buf
);
3442 chan
->num_conf_req
++;
3448 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3450 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3451 u16 scid
, dcid
, result
, status
;
3452 struct l2cap_chan
*chan
;
3456 scid
= __le16_to_cpu(rsp
->scid
);
3457 dcid
= __le16_to_cpu(rsp
->dcid
);
3458 result
= __le16_to_cpu(rsp
->result
);
3459 status
= __le16_to_cpu(rsp
->status
);
3461 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3462 dcid
, scid
, result
, status
);
3464 mutex_lock(&conn
->chan_lock
);
3467 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3473 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3482 l2cap_chan_lock(chan
);
3485 case L2CAP_CR_SUCCESS
:
3486 l2cap_state_change(chan
, BT_CONFIG
);
3489 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3491 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3494 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3495 l2cap_build_conf_req(chan
, req
), req
);
3496 chan
->num_conf_req
++;
3500 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3504 l2cap_chan_del(chan
, ECONNREFUSED
);
3508 l2cap_chan_unlock(chan
);
3511 mutex_unlock(&conn
->chan_lock
);
3516 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3518 /* FCS is enabled only in ERTM or streaming mode, if one or both
3521 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3522 chan
->fcs
= L2CAP_FCS_NONE
;
3523 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3524 chan
->fcs
= L2CAP_FCS_CRC16
;
3527 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3529 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3532 struct l2cap_chan
*chan
;
3535 dcid
= __le16_to_cpu(req
->dcid
);
3536 flags
= __le16_to_cpu(req
->flags
);
3538 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3540 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3544 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3545 struct l2cap_cmd_rej_cid rej
;
3547 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3548 rej
.scid
= cpu_to_le16(chan
->scid
);
3549 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3551 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3556 /* Reject if config buffer is too small. */
3557 len
= cmd_len
- sizeof(*req
);
3558 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3559 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3560 l2cap_build_conf_rsp(chan
, rsp
,
3561 L2CAP_CONF_REJECT
, flags
), rsp
);
3566 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3567 chan
->conf_len
+= len
;
3569 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3570 /* Incomplete config. Send empty response. */
3571 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3572 l2cap_build_conf_rsp(chan
, rsp
,
3573 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3577 /* Complete config. */
3578 len
= l2cap_parse_conf_req(chan
, rsp
);
3580 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3584 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3585 chan
->num_conf_rsp
++;
3587 /* Reset config buffer. */
3590 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3593 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3594 set_default_fcs(chan
);
3596 if (chan
->mode
== L2CAP_MODE_ERTM
||
3597 chan
->mode
== L2CAP_MODE_STREAMING
)
3598 err
= l2cap_ertm_init(chan
);
3601 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3603 l2cap_chan_ready(chan
);
3608 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3610 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3611 l2cap_build_conf_req(chan
, buf
), buf
);
3612 chan
->num_conf_req
++;
3615 /* Got Conf Rsp PENDING from remote side and asume we sent
3616 Conf Rsp PENDING in the code above */
3617 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3618 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3620 /* check compatibility */
3622 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3623 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3625 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3626 l2cap_build_conf_rsp(chan
, rsp
,
3627 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3631 l2cap_chan_unlock(chan
);
3635 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3637 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3638 u16 scid
, flags
, result
;
3639 struct l2cap_chan
*chan
;
3640 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3643 scid
= __le16_to_cpu(rsp
->scid
);
3644 flags
= __le16_to_cpu(rsp
->flags
);
3645 result
= __le16_to_cpu(rsp
->result
);
3647 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3650 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3655 case L2CAP_CONF_SUCCESS
:
3656 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3657 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3660 case L2CAP_CONF_PENDING
:
3661 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3663 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3666 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3669 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3673 /* check compatibility */
3675 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3676 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3678 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3679 l2cap_build_conf_rsp(chan
, buf
,
3680 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3684 case L2CAP_CONF_UNACCEPT
:
3685 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3688 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3689 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3693 /* throw out any old stored conf requests */
3694 result
= L2CAP_CONF_SUCCESS
;
3695 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3698 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3702 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3703 L2CAP_CONF_REQ
, len
, req
);
3704 chan
->num_conf_req
++;
3705 if (result
!= L2CAP_CONF_SUCCESS
)
3711 l2cap_chan_set_err(chan
, ECONNRESET
);
3713 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3714 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3718 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
3721 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3723 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3724 set_default_fcs(chan
);
3726 if (chan
->mode
== L2CAP_MODE_ERTM
||
3727 chan
->mode
== L2CAP_MODE_STREAMING
)
3728 err
= l2cap_ertm_init(chan
);
3731 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3733 l2cap_chan_ready(chan
);
3737 l2cap_chan_unlock(chan
);
3741 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3743 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3744 struct l2cap_disconn_rsp rsp
;
3746 struct l2cap_chan
*chan
;
3749 scid
= __le16_to_cpu(req
->scid
);
3750 dcid
= __le16_to_cpu(req
->dcid
);
3752 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3754 mutex_lock(&conn
->chan_lock
);
3756 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3758 mutex_unlock(&conn
->chan_lock
);
3762 l2cap_chan_lock(chan
);
3766 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3767 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3768 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3771 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3774 l2cap_chan_hold(chan
);
3775 l2cap_chan_del(chan
, ECONNRESET
);
3777 l2cap_chan_unlock(chan
);
3779 chan
->ops
->close(chan
);
3780 l2cap_chan_put(chan
);
3782 mutex_unlock(&conn
->chan_lock
);
3787 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3789 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3791 struct l2cap_chan
*chan
;
3793 scid
= __le16_to_cpu(rsp
->scid
);
3794 dcid
= __le16_to_cpu(rsp
->dcid
);
3796 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3798 mutex_lock(&conn
->chan_lock
);
3800 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3802 mutex_unlock(&conn
->chan_lock
);
3806 l2cap_chan_lock(chan
);
3808 l2cap_chan_hold(chan
);
3809 l2cap_chan_del(chan
, 0);
3811 l2cap_chan_unlock(chan
);
3813 chan
->ops
->close(chan
);
3814 l2cap_chan_put(chan
);
3816 mutex_unlock(&conn
->chan_lock
);
3821 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3823 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3826 type
= __le16_to_cpu(req
->type
);
3828 BT_DBG("type 0x%4.4x", type
);
3830 if (type
== L2CAP_IT_FEAT_MASK
) {
3832 u32 feat_mask
= l2cap_feat_mask
;
3833 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3834 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3835 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3837 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3840 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3841 | L2CAP_FEAT_EXT_WINDOW
;
3843 put_unaligned_le32(feat_mask
, rsp
->data
);
3844 l2cap_send_cmd(conn
, cmd
->ident
,
3845 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3846 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3848 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3851 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3853 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3855 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3856 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
3857 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3858 l2cap_send_cmd(conn
, cmd
->ident
,
3859 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3861 struct l2cap_info_rsp rsp
;
3862 rsp
.type
= cpu_to_le16(type
);
3863 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
3864 l2cap_send_cmd(conn
, cmd
->ident
,
3865 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3871 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3873 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3876 type
= __le16_to_cpu(rsp
->type
);
3877 result
= __le16_to_cpu(rsp
->result
);
3879 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3881 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3882 if (cmd
->ident
!= conn
->info_ident
||
3883 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3886 cancel_delayed_work(&conn
->info_timer
);
3888 if (result
!= L2CAP_IR_SUCCESS
) {
3889 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3890 conn
->info_ident
= 0;
3892 l2cap_conn_start(conn
);
3898 case L2CAP_IT_FEAT_MASK
:
3899 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3901 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3902 struct l2cap_info_req req
;
3903 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3905 conn
->info_ident
= l2cap_get_ident(conn
);
3907 l2cap_send_cmd(conn
, conn
->info_ident
,
3908 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3910 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3911 conn
->info_ident
= 0;
3913 l2cap_conn_start(conn
);
3917 case L2CAP_IT_FIXED_CHAN
:
3918 conn
->fixed_chan_mask
= rsp
->data
[0];
3919 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3920 conn
->info_ident
= 0;
3922 l2cap_conn_start(conn
);
3929 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3930 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3933 struct l2cap_create_chan_req
*req
= data
;
3934 struct l2cap_create_chan_rsp rsp
;
3937 if (cmd_len
!= sizeof(*req
))
3943 psm
= le16_to_cpu(req
->psm
);
3944 scid
= le16_to_cpu(req
->scid
);
3946 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3948 /* Placeholder: Always reject */
3950 rsp
.scid
= cpu_to_le16(scid
);
3951 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
3952 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3954 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3960 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3961 struct l2cap_cmd_hdr
*cmd
, void *data
)
3963 BT_DBG("conn %p", conn
);
3965 return l2cap_connect_rsp(conn
, cmd
, data
);
3968 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3969 u16 icid
, u16 result
)
3971 struct l2cap_move_chan_rsp rsp
;
3973 BT_DBG("icid %d, result %d", icid
, result
);
3975 rsp
.icid
= cpu_to_le16(icid
);
3976 rsp
.result
= cpu_to_le16(result
);
3978 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3981 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3982 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3984 struct l2cap_move_chan_cfm cfm
;
3987 BT_DBG("icid %d, result %d", icid
, result
);
3989 ident
= l2cap_get_ident(conn
);
3991 chan
->ident
= ident
;
3993 cfm
.icid
= cpu_to_le16(icid
);
3994 cfm
.result
= cpu_to_le16(result
);
3996 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3999 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4002 struct l2cap_move_chan_cfm_rsp rsp
;
4004 BT_DBG("icid %d", icid
);
4006 rsp
.icid
= cpu_to_le16(icid
);
4007 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4010 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4011 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4013 struct l2cap_move_chan_req
*req
= data
;
4015 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4017 if (cmd_len
!= sizeof(*req
))
4020 icid
= le16_to_cpu(req
->icid
);
4022 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
4027 /* Placeholder: Always refuse */
4028 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
4033 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4034 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4036 struct l2cap_move_chan_rsp
*rsp
= data
;
4039 if (cmd_len
!= sizeof(*rsp
))
4042 icid
= le16_to_cpu(rsp
->icid
);
4043 result
= le16_to_cpu(rsp
->result
);
4045 BT_DBG("icid %d, result %d", icid
, result
);
4047 /* Placeholder: Always unconfirmed */
4048 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
4053 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4054 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4056 struct l2cap_move_chan_cfm
*cfm
= data
;
4059 if (cmd_len
!= sizeof(*cfm
))
4062 icid
= le16_to_cpu(cfm
->icid
);
4063 result
= le16_to_cpu(cfm
->result
);
4065 BT_DBG("icid %d, result %d", icid
, result
);
4067 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4072 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4073 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
4075 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4078 if (cmd_len
!= sizeof(*rsp
))
4081 icid
= le16_to_cpu(rsp
->icid
);
4083 BT_DBG("icid %d", icid
);
4088 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4093 if (min
> max
|| min
< 6 || max
> 3200)
4096 if (to_multiplier
< 10 || to_multiplier
> 3200)
4099 if (max
>= to_multiplier
* 8)
4102 max_latency
= (to_multiplier
* 8 / max
) - 1;
4103 if (latency
> 499 || latency
> max_latency
)
4109 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4110 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4112 struct hci_conn
*hcon
= conn
->hcon
;
4113 struct l2cap_conn_param_update_req
*req
;
4114 struct l2cap_conn_param_update_rsp rsp
;
4115 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4118 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4121 cmd_len
= __le16_to_cpu(cmd
->len
);
4122 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4125 req
= (struct l2cap_conn_param_update_req
*) data
;
4126 min
= __le16_to_cpu(req
->min
);
4127 max
= __le16_to_cpu(req
->max
);
4128 latency
= __le16_to_cpu(req
->latency
);
4129 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4131 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4132 min
, max
, latency
, to_multiplier
);
4134 memset(&rsp
, 0, sizeof(rsp
));
4136 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4138 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4140 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4142 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4146 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4151 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4152 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
4156 switch (cmd
->code
) {
4157 case L2CAP_COMMAND_REJ
:
4158 l2cap_command_rej(conn
, cmd
, data
);
4161 case L2CAP_CONN_REQ
:
4162 err
= l2cap_connect_req(conn
, cmd
, data
);
4165 case L2CAP_CONN_RSP
:
4166 err
= l2cap_connect_rsp(conn
, cmd
, data
);
4169 case L2CAP_CONF_REQ
:
4170 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4173 case L2CAP_CONF_RSP
:
4174 err
= l2cap_config_rsp(conn
, cmd
, data
);
4177 case L2CAP_DISCONN_REQ
:
4178 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4181 case L2CAP_DISCONN_RSP
:
4182 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4185 case L2CAP_ECHO_REQ
:
4186 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4189 case L2CAP_ECHO_RSP
:
4192 case L2CAP_INFO_REQ
:
4193 err
= l2cap_information_req(conn
, cmd
, data
);
4196 case L2CAP_INFO_RSP
:
4197 err
= l2cap_information_rsp(conn
, cmd
, data
);
4200 case L2CAP_CREATE_CHAN_REQ
:
4201 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4204 case L2CAP_CREATE_CHAN_RSP
:
4205 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
4208 case L2CAP_MOVE_CHAN_REQ
:
4209 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4212 case L2CAP_MOVE_CHAN_RSP
:
4213 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4216 case L2CAP_MOVE_CHAN_CFM
:
4217 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4220 case L2CAP_MOVE_CHAN_CFM_RSP
:
4221 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4225 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
4233 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
4234 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4236 switch (cmd
->code
) {
4237 case L2CAP_COMMAND_REJ
:
4240 case L2CAP_CONN_PARAM_UPDATE_REQ
:
4241 return l2cap_conn_param_update_req(conn
, cmd
, data
);
4243 case L2CAP_CONN_PARAM_UPDATE_RSP
:
4247 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
4252 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
4253 struct sk_buff
*skb
)
4255 u8
*data
= skb
->data
;
4257 struct l2cap_cmd_hdr cmd
;
4260 l2cap_raw_recv(conn
, skb
);
4262 while (len
>= L2CAP_CMD_HDR_SIZE
) {
4264 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
4265 data
+= L2CAP_CMD_HDR_SIZE
;
4266 len
-= L2CAP_CMD_HDR_SIZE
;
4268 cmd_len
= le16_to_cpu(cmd
.len
);
4270 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
4272 if (cmd_len
> len
|| !cmd
.ident
) {
4273 BT_DBG("corrupted command");
4277 if (conn
->hcon
->type
== LE_LINK
)
4278 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
4280 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
4283 struct l2cap_cmd_rej_unk rej
;
4285 BT_ERR("Wrong link type (%d)", err
);
4287 /* FIXME: Map err to a valid reason */
4288 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
4289 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4299 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4301 u16 our_fcs
, rcv_fcs
;
4304 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
4305 hdr_size
= L2CAP_EXT_HDR_SIZE
;
4307 hdr_size
= L2CAP_ENH_HDR_SIZE
;
4309 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
4310 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
4311 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
4312 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
4314 if (our_fcs
!= rcv_fcs
)
4320 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
4322 struct l2cap_ctrl control
;
4324 BT_DBG("chan %p", chan
);
4326 memset(&control
, 0, sizeof(control
));
4329 control
.reqseq
= chan
->buffer_seq
;
4330 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4332 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4333 control
.super
= L2CAP_SUPER_RNR
;
4334 l2cap_send_sframe(chan
, &control
);
4337 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4338 chan
->unacked_frames
> 0)
4339 __set_retrans_timer(chan
);
4341 /* Send pending iframes */
4342 l2cap_ertm_send(chan
);
4344 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
4345 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
4346 /* F-bit wasn't sent in an s-frame or i-frame yet, so
4349 control
.super
= L2CAP_SUPER_RR
;
4350 l2cap_send_sframe(chan
, &control
);
4354 static void append_skb_frag(struct sk_buff
*skb
,
4355 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4357 /* skb->len reflects data in skb as well as all fragments
4358 * skb->data_len reflects only data in fragments
4360 if (!skb_has_frag_list(skb
))
4361 skb_shinfo(skb
)->frag_list
= new_frag
;
4363 new_frag
->next
= NULL
;
4365 (*last_frag
)->next
= new_frag
;
4366 *last_frag
= new_frag
;
4368 skb
->len
+= new_frag
->len
;
4369 skb
->data_len
+= new_frag
->len
;
4370 skb
->truesize
+= new_frag
->truesize
;
4373 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
4374 struct l2cap_ctrl
*control
)
4378 switch (control
->sar
) {
4379 case L2CAP_SAR_UNSEGMENTED
:
4383 err
= chan
->ops
->recv(chan
, skb
);
4386 case L2CAP_SAR_START
:
4390 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4391 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4393 if (chan
->sdu_len
> chan
->imtu
) {
4398 if (skb
->len
>= chan
->sdu_len
)
4402 chan
->sdu_last_frag
= skb
;
4408 case L2CAP_SAR_CONTINUE
:
4412 append_skb_frag(chan
->sdu
, skb
,
4413 &chan
->sdu_last_frag
);
4416 if (chan
->sdu
->len
>= chan
->sdu_len
)
4426 append_skb_frag(chan
->sdu
, skb
,
4427 &chan
->sdu_last_frag
);
4430 if (chan
->sdu
->len
!= chan
->sdu_len
)
4433 err
= chan
->ops
->recv(chan
, chan
->sdu
);
4436 /* Reassembly complete */
4438 chan
->sdu_last_frag
= NULL
;
4446 kfree_skb(chan
->sdu
);
4448 chan
->sdu_last_frag
= NULL
;
4455 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4459 if (chan
->mode
!= L2CAP_MODE_ERTM
)
4462 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
4463 l2cap_tx(chan
, NULL
, NULL
, event
);
4466 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
4469 /* Pass sequential frames to l2cap_reassemble_sdu()
4470 * until a gap is encountered.
4473 BT_DBG("chan %p", chan
);
4475 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4476 struct sk_buff
*skb
;
4477 BT_DBG("Searching for skb with txseq %d (queue len %d)",
4478 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
4480 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
4485 skb_unlink(skb
, &chan
->srej_q
);
4486 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4487 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
4492 if (skb_queue_empty(&chan
->srej_q
)) {
4493 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
4494 l2cap_send_ack(chan
);
4500 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
4501 struct l2cap_ctrl
*control
)
4503 struct sk_buff
*skb
;
4505 BT_DBG("chan %p, control %p", chan
, control
);
4507 if (control
->reqseq
== chan
->next_tx_seq
) {
4508 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4509 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4513 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4516 BT_DBG("Seq %d not available for retransmission",
4521 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4522 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4523 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4527 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4529 if (control
->poll
) {
4530 l2cap_pass_to_tx(chan
, control
);
4532 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4533 l2cap_retransmit(chan
, control
);
4534 l2cap_ertm_send(chan
);
4536 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4537 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4538 chan
->srej_save_reqseq
= control
->reqseq
;
4541 l2cap_pass_to_tx_fbit(chan
, control
);
4543 if (control
->final
) {
4544 if (chan
->srej_save_reqseq
!= control
->reqseq
||
4545 !test_and_clear_bit(CONN_SREJ_ACT
,
4547 l2cap_retransmit(chan
, control
);
4549 l2cap_retransmit(chan
, control
);
4550 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
4551 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4552 chan
->srej_save_reqseq
= control
->reqseq
;
4558 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
4559 struct l2cap_ctrl
*control
)
4561 struct sk_buff
*skb
;
4563 BT_DBG("chan %p, control %p", chan
, control
);
4565 if (control
->reqseq
== chan
->next_tx_seq
) {
4566 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
4567 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4571 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
4573 if (chan
->max_tx
&& skb
&&
4574 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
4575 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
4576 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4580 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4582 l2cap_pass_to_tx(chan
, control
);
4584 if (control
->final
) {
4585 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4586 l2cap_retransmit_all(chan
, control
);
4588 l2cap_retransmit_all(chan
, control
);
4589 l2cap_ertm_send(chan
);
4590 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
4591 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4595 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
4597 BT_DBG("chan %p, txseq %d", chan
, txseq
);
4599 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
4600 chan
->expected_tx_seq
);
4602 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
4603 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4605 /* See notes below regarding "double poll" and
4608 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4609 BT_DBG("Invalid/Ignore - after SREJ");
4610 return L2CAP_TXSEQ_INVALID_IGNORE
;
4612 BT_DBG("Invalid - in window after SREJ sent");
4613 return L2CAP_TXSEQ_INVALID
;
4617 if (chan
->srej_list
.head
== txseq
) {
4618 BT_DBG("Expected SREJ");
4619 return L2CAP_TXSEQ_EXPECTED_SREJ
;
4622 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
4623 BT_DBG("Duplicate SREJ - txseq already stored");
4624 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
4627 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
4628 BT_DBG("Unexpected SREJ - not requested");
4629 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
4633 if (chan
->expected_tx_seq
== txseq
) {
4634 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
4636 BT_DBG("Invalid - txseq outside tx window");
4637 return L2CAP_TXSEQ_INVALID
;
4640 return L2CAP_TXSEQ_EXPECTED
;
4644 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
4645 __seq_offset(chan
, chan
->expected_tx_seq
,
4646 chan
->last_acked_seq
)){
4647 BT_DBG("Duplicate - expected_tx_seq later than txseq");
4648 return L2CAP_TXSEQ_DUPLICATE
;
4651 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
4652 /* A source of invalid packets is a "double poll" condition,
4653 * where delays cause us to send multiple poll packets. If
4654 * the remote stack receives and processes both polls,
4655 * sequence numbers can wrap around in such a way that a
4656 * resent frame has a sequence number that looks like new data
4657 * with a sequence gap. This would trigger an erroneous SREJ
4660 * Fortunately, this is impossible with a tx window that's
4661 * less than half of the maximum sequence number, which allows
4662 * invalid frames to be safely ignored.
4664 * With tx window sizes greater than half of the tx window
4665 * maximum, the frame is invalid and cannot be ignored. This
4666 * causes a disconnect.
4669 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
4670 BT_DBG("Invalid/Ignore - txseq outside tx window");
4671 return L2CAP_TXSEQ_INVALID_IGNORE
;
4673 BT_DBG("Invalid - txseq outside tx window");
4674 return L2CAP_TXSEQ_INVALID
;
4677 BT_DBG("Unexpected - txseq indicates missing frames");
4678 return L2CAP_TXSEQ_UNEXPECTED
;
4682 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
4683 struct l2cap_ctrl
*control
,
4684 struct sk_buff
*skb
, u8 event
)
4687 bool skb_in_use
= 0;
4689 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4693 case L2CAP_EV_RECV_IFRAME
:
4694 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
4695 case L2CAP_TXSEQ_EXPECTED
:
4696 l2cap_pass_to_tx(chan
, control
);
4698 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4699 BT_DBG("Busy, discarding expected seq %d",
4704 chan
->expected_tx_seq
= __next_seq(chan
,
4707 chan
->buffer_seq
= chan
->expected_tx_seq
;
4710 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4714 if (control
->final
) {
4715 if (!test_and_clear_bit(CONN_REJ_ACT
,
4716 &chan
->conn_state
)) {
4718 l2cap_retransmit_all(chan
, control
);
4719 l2cap_ertm_send(chan
);
4723 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
4724 l2cap_send_ack(chan
);
4726 case L2CAP_TXSEQ_UNEXPECTED
:
4727 l2cap_pass_to_tx(chan
, control
);
4729 /* Can't issue SREJ frames in the local busy state.
4730 * Drop this frame, it will be seen as missing
4731 * when local busy is exited.
4733 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4734 BT_DBG("Busy, discarding unexpected seq %d",
4739 /* There was a gap in the sequence, so an SREJ
4740 * must be sent for each missing frame. The
4741 * current frame is stored for later use.
4743 skb_queue_tail(&chan
->srej_q
, skb
);
4745 BT_DBG("Queued %p (queue len %d)", skb
,
4746 skb_queue_len(&chan
->srej_q
));
4748 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4749 l2cap_seq_list_clear(&chan
->srej_list
);
4750 l2cap_send_srej(chan
, control
->txseq
);
4752 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
4754 case L2CAP_TXSEQ_DUPLICATE
:
4755 l2cap_pass_to_tx(chan
, control
);
4757 case L2CAP_TXSEQ_INVALID_IGNORE
:
4759 case L2CAP_TXSEQ_INVALID
:
4761 l2cap_send_disconn_req(chan
->conn
, chan
,
4766 case L2CAP_EV_RECV_RR
:
4767 l2cap_pass_to_tx(chan
, control
);
4768 if (control
->final
) {
4769 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4771 if (!test_and_clear_bit(CONN_REJ_ACT
,
4772 &chan
->conn_state
)) {
4774 l2cap_retransmit_all(chan
, control
);
4777 l2cap_ertm_send(chan
);
4778 } else if (control
->poll
) {
4779 l2cap_send_i_or_rr_or_rnr(chan
);
4781 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4782 &chan
->conn_state
) &&
4783 chan
->unacked_frames
)
4784 __set_retrans_timer(chan
);
4786 l2cap_ertm_send(chan
);
4789 case L2CAP_EV_RECV_RNR
:
4790 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4791 l2cap_pass_to_tx(chan
, control
);
4792 if (control
&& control
->poll
) {
4793 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4794 l2cap_send_rr_or_rnr(chan
, 0);
4796 __clear_retrans_timer(chan
);
4797 l2cap_seq_list_clear(&chan
->retrans_list
);
4799 case L2CAP_EV_RECV_REJ
:
4800 l2cap_handle_rej(chan
, control
);
4802 case L2CAP_EV_RECV_SREJ
:
4803 l2cap_handle_srej(chan
, control
);
4809 if (skb
&& !skb_in_use
) {
4810 BT_DBG("Freeing %p", skb
);
4817 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
4818 struct l2cap_ctrl
*control
,
4819 struct sk_buff
*skb
, u8 event
)
4822 u16 txseq
= control
->txseq
;
4823 bool skb_in_use
= 0;
4825 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
4829 case L2CAP_EV_RECV_IFRAME
:
4830 switch (l2cap_classify_txseq(chan
, txseq
)) {
4831 case L2CAP_TXSEQ_EXPECTED
:
4832 /* Keep frame for reassembly later */
4833 l2cap_pass_to_tx(chan
, control
);
4834 skb_queue_tail(&chan
->srej_q
, skb
);
4836 BT_DBG("Queued %p (queue len %d)", skb
,
4837 skb_queue_len(&chan
->srej_q
));
4839 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
4841 case L2CAP_TXSEQ_EXPECTED_SREJ
:
4842 l2cap_seq_list_pop(&chan
->srej_list
);
4844 l2cap_pass_to_tx(chan
, control
);
4845 skb_queue_tail(&chan
->srej_q
, skb
);
4847 BT_DBG("Queued %p (queue len %d)", skb
,
4848 skb_queue_len(&chan
->srej_q
));
4850 err
= l2cap_rx_queued_iframes(chan
);
4855 case L2CAP_TXSEQ_UNEXPECTED
:
4856 /* Got a frame that can't be reassembled yet.
4857 * Save it for later, and send SREJs to cover
4858 * the missing frames.
4860 skb_queue_tail(&chan
->srej_q
, skb
);
4862 BT_DBG("Queued %p (queue len %d)", skb
,
4863 skb_queue_len(&chan
->srej_q
));
4865 l2cap_pass_to_tx(chan
, control
);
4866 l2cap_send_srej(chan
, control
->txseq
);
4868 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
4869 /* This frame was requested with an SREJ, but
4870 * some expected retransmitted frames are
4871 * missing. Request retransmission of missing
4874 skb_queue_tail(&chan
->srej_q
, skb
);
4876 BT_DBG("Queued %p (queue len %d)", skb
,
4877 skb_queue_len(&chan
->srej_q
));
4879 l2cap_pass_to_tx(chan
, control
);
4880 l2cap_send_srej_list(chan
, control
->txseq
);
4882 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
4883 /* We've already queued this frame. Drop this copy. */
4884 l2cap_pass_to_tx(chan
, control
);
4886 case L2CAP_TXSEQ_DUPLICATE
:
4887 /* Expecting a later sequence number, so this frame
4888 * was already received. Ignore it completely.
4891 case L2CAP_TXSEQ_INVALID_IGNORE
:
4893 case L2CAP_TXSEQ_INVALID
:
4895 l2cap_send_disconn_req(chan
->conn
, chan
,
4900 case L2CAP_EV_RECV_RR
:
4901 l2cap_pass_to_tx(chan
, control
);
4902 if (control
->final
) {
4903 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4905 if (!test_and_clear_bit(CONN_REJ_ACT
,
4906 &chan
->conn_state
)) {
4908 l2cap_retransmit_all(chan
, control
);
4911 l2cap_ertm_send(chan
);
4912 } else if (control
->poll
) {
4913 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4914 &chan
->conn_state
) &&
4915 chan
->unacked_frames
) {
4916 __set_retrans_timer(chan
);
4919 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4920 l2cap_send_srej_tail(chan
);
4922 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
4923 &chan
->conn_state
) &&
4924 chan
->unacked_frames
)
4925 __set_retrans_timer(chan
);
4927 l2cap_send_ack(chan
);
4930 case L2CAP_EV_RECV_RNR
:
4931 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4932 l2cap_pass_to_tx(chan
, control
);
4933 if (control
->poll
) {
4934 l2cap_send_srej_tail(chan
);
4936 struct l2cap_ctrl rr_control
;
4937 memset(&rr_control
, 0, sizeof(rr_control
));
4938 rr_control
.sframe
= 1;
4939 rr_control
.super
= L2CAP_SUPER_RR
;
4940 rr_control
.reqseq
= chan
->buffer_seq
;
4941 l2cap_send_sframe(chan
, &rr_control
);
4945 case L2CAP_EV_RECV_REJ
:
4946 l2cap_handle_rej(chan
, control
);
4948 case L2CAP_EV_RECV_SREJ
:
4949 l2cap_handle_srej(chan
, control
);
4953 if (skb
&& !skb_in_use
) {
4954 BT_DBG("Freeing %p", skb
);
4961 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
4963 /* Make sure reqseq is for a packet that has been sent but not acked */
4966 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
4967 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
4970 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
4971 struct sk_buff
*skb
, u8 event
)
4975 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
4976 control
, skb
, event
, chan
->rx_state
);
4978 if (__valid_reqseq(chan
, control
->reqseq
)) {
4979 switch (chan
->rx_state
) {
4980 case L2CAP_RX_STATE_RECV
:
4981 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
4983 case L2CAP_RX_STATE_SREJ_SENT
:
4984 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
4992 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
4993 control
->reqseq
, chan
->next_tx_seq
,
4994 chan
->expected_ack_seq
);
4995 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5001 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5002 struct sk_buff
*skb
)
5006 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5009 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5010 L2CAP_TXSEQ_EXPECTED
) {
5011 l2cap_pass_to_tx(chan
, control
);
5013 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5014 __next_seq(chan
, chan
->buffer_seq
));
5016 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5018 l2cap_reassemble_sdu(chan
, skb
, control
);
5021 kfree_skb(chan
->sdu
);
5024 chan
->sdu_last_frag
= NULL
;
5028 BT_DBG("Freeing %p", skb
);
5033 chan
->last_acked_seq
= control
->txseq
;
5034 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5039 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5041 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5045 __unpack_control(chan
, skb
);
5050 * We can just drop the corrupted I-frame here.
5051 * Receiver will miss it and start proper recovery
5052 * procedures and ask for retransmission.
5054 if (l2cap_check_fcs(chan
, skb
))
5057 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5058 len
-= L2CAP_SDULEN_SIZE
;
5060 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5061 len
-= L2CAP_FCS_SIZE
;
5063 if (len
> chan
->mps
) {
5064 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5068 if (!control
->sframe
) {
5071 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5072 control
->sar
, control
->reqseq
, control
->final
,
5075 /* Validate F-bit - F=0 always valid, F=1 only
5076 * valid in TX WAIT_F
5078 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5081 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5082 event
= L2CAP_EV_RECV_IFRAME
;
5083 err
= l2cap_rx(chan
, control
, skb
, event
);
5085 err
= l2cap_stream_rx(chan
, control
, skb
);
5089 l2cap_send_disconn_req(chan
->conn
, chan
,
5092 const u8 rx_func_to_event
[4] = {
5093 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5094 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5097 /* Only I-frames are expected in streaming mode */
5098 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5101 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5102 control
->reqseq
, control
->final
, control
->poll
,
5107 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5111 /* Validate F and P bits */
5112 if (control
->final
&& (control
->poll
||
5113 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5116 event
= rx_func_to_event
[control
->super
];
5117 if (l2cap_rx(chan
, control
, skb
, event
))
5118 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5128 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
5129 struct sk_buff
*skb
)
5131 struct l2cap_chan
*chan
;
5133 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5135 if (cid
== L2CAP_CID_A2MP
) {
5136 chan
= a2mp_channel_create(conn
, skb
);
5142 l2cap_chan_lock(chan
);
5144 BT_DBG("unknown cid 0x%4.4x", cid
);
5145 /* Drop packet and return */
5151 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5153 if (chan
->state
!= BT_CONNECTED
)
5156 switch (chan
->mode
) {
5157 case L2CAP_MODE_BASIC
:
5158 /* If socket recv buffers overflows we drop data here
5159 * which is *bad* because L2CAP has to be reliable.
5160 * But we don't have any other choice. L2CAP doesn't
5161 * provide flow control mechanism. */
5163 if (chan
->imtu
< skb
->len
)
5166 if (!chan
->ops
->recv(chan
, skb
))
5170 case L2CAP_MODE_ERTM
:
5171 case L2CAP_MODE_STREAMING
:
5172 l2cap_data_rcv(chan
, skb
);
5176 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
5184 l2cap_chan_unlock(chan
);
5187 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
5188 struct sk_buff
*skb
)
5190 struct l2cap_chan
*chan
;
5192 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
5196 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5198 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5201 if (chan
->imtu
< skb
->len
)
5204 if (!chan
->ops
->recv(chan
, skb
))
5211 static void l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
5212 struct sk_buff
*skb
)
5214 struct l2cap_chan
*chan
;
5216 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
5220 BT_DBG("chan %p, len %d", chan
, skb
->len
);
5222 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
5225 if (chan
->imtu
< skb
->len
)
5228 if (!chan
->ops
->recv(chan
, skb
))
5235 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
5237 struct l2cap_hdr
*lh
= (void *) skb
->data
;
5241 skb_pull(skb
, L2CAP_HDR_SIZE
);
5242 cid
= __le16_to_cpu(lh
->cid
);
5243 len
= __le16_to_cpu(lh
->len
);
5245 if (len
!= skb
->len
) {
5250 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
5253 case L2CAP_CID_LE_SIGNALING
:
5254 case L2CAP_CID_SIGNALING
:
5255 l2cap_sig_channel(conn
, skb
);
5258 case L2CAP_CID_CONN_LESS
:
5259 psm
= get_unaligned((__le16
*) skb
->data
);
5260 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
5261 l2cap_conless_channel(conn
, psm
, skb
);
5264 case L2CAP_CID_LE_DATA
:
5265 l2cap_att_channel(conn
, cid
, skb
);
5269 if (smp_sig_channel(conn
, skb
))
5270 l2cap_conn_del(conn
->hcon
, EACCES
);
5274 l2cap_data_channel(conn
, cid
, skb
);
5279 /* ---- L2CAP interface with lower layer (HCI) ---- */
5281 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
5283 int exact
= 0, lm1
= 0, lm2
= 0;
5284 struct l2cap_chan
*c
;
5286 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
5288 /* Find listening sockets and check their link_mode */
5289 read_lock(&chan_list_lock
);
5290 list_for_each_entry(c
, &chan_list
, global_l
) {
5291 struct sock
*sk
= c
->sk
;
5293 if (c
->state
!= BT_LISTEN
)
5296 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
5297 lm1
|= HCI_LM_ACCEPT
;
5298 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5299 lm1
|= HCI_LM_MASTER
;
5301 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
5302 lm2
|= HCI_LM_ACCEPT
;
5303 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
5304 lm2
|= HCI_LM_MASTER
;
5307 read_unlock(&chan_list_lock
);
5309 return exact
? lm1
: lm2
;
5312 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
5314 struct l2cap_conn
*conn
;
5316 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
5319 conn
= l2cap_conn_add(hcon
, status
);
5321 l2cap_conn_ready(conn
);
5323 l2cap_conn_del(hcon
, bt_to_errno(status
));
5328 int l2cap_disconn_ind(struct hci_conn
*hcon
)
5330 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5332 BT_DBG("hcon %p", hcon
);
5335 return HCI_ERROR_REMOTE_USER_TERM
;
5336 return conn
->disc_reason
;
5339 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
5341 BT_DBG("hcon %p reason %d", hcon
, reason
);
5343 l2cap_conn_del(hcon
, bt_to_errno(reason
));
5347 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
5349 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
5352 if (encrypt
== 0x00) {
5353 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
5354 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
5355 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
5356 l2cap_chan_close(chan
, ECONNREFUSED
);
5358 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
5359 __clear_chan_timer(chan
);
5363 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
5365 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5366 struct l2cap_chan
*chan
;
5371 BT_DBG("conn %p", conn
);
5373 if (hcon
->type
== LE_LINK
) {
5374 if (!status
&& encrypt
)
5375 smp_distribute_keys(conn
, 0);
5376 cancel_delayed_work(&conn
->security_timer
);
5379 mutex_lock(&conn
->chan_lock
);
5381 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
5382 l2cap_chan_lock(chan
);
5384 BT_DBG("chan->scid %d", chan
->scid
);
5386 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
5387 if (!status
&& encrypt
) {
5388 chan
->sec_level
= hcon
->sec_level
;
5389 l2cap_chan_ready(chan
);
5392 l2cap_chan_unlock(chan
);
5396 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
5397 l2cap_chan_unlock(chan
);
5401 if (!status
&& (chan
->state
== BT_CONNECTED
||
5402 chan
->state
== BT_CONFIG
)) {
5403 struct sock
*sk
= chan
->sk
;
5405 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
5406 sk
->sk_state_change(sk
);
5408 l2cap_check_encryption(chan
, encrypt
);
5409 l2cap_chan_unlock(chan
);
5413 if (chan
->state
== BT_CONNECT
) {
5415 l2cap_send_conn_req(chan
);
5417 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5419 } else if (chan
->state
== BT_CONNECT2
) {
5420 struct sock
*sk
= chan
->sk
;
5421 struct l2cap_conn_rsp rsp
;
5427 if (test_bit(BT_SK_DEFER_SETUP
,
5428 &bt_sk(sk
)->flags
)) {
5429 struct sock
*parent
= bt_sk(sk
)->parent
;
5430 res
= L2CAP_CR_PEND
;
5431 stat
= L2CAP_CS_AUTHOR_PEND
;
5433 parent
->sk_data_ready(parent
, 0);
5435 __l2cap_state_change(chan
, BT_CONFIG
);
5436 res
= L2CAP_CR_SUCCESS
;
5437 stat
= L2CAP_CS_NO_INFO
;
5440 __l2cap_state_change(chan
, BT_DISCONN
);
5441 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
5442 res
= L2CAP_CR_SEC_BLOCK
;
5443 stat
= L2CAP_CS_NO_INFO
;
5448 rsp
.scid
= cpu_to_le16(chan
->dcid
);
5449 rsp
.dcid
= cpu_to_le16(chan
->scid
);
5450 rsp
.result
= cpu_to_le16(res
);
5451 rsp
.status
= cpu_to_le16(stat
);
5452 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
5455 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
5456 res
== L2CAP_CR_SUCCESS
) {
5458 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
5459 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
5461 l2cap_build_conf_req(chan
, buf
),
5463 chan
->num_conf_req
++;
5467 l2cap_chan_unlock(chan
);
5470 mutex_unlock(&conn
->chan_lock
);
5475 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
5477 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
5480 conn
= l2cap_conn_add(hcon
, 0);
5485 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
5487 if (!(flags
& ACL_CONT
)) {
5488 struct l2cap_hdr
*hdr
;
5492 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5493 kfree_skb(conn
->rx_skb
);
5494 conn
->rx_skb
= NULL
;
5496 l2cap_conn_unreliable(conn
, ECOMM
);
5499 /* Start fragment always begin with Basic L2CAP header */
5500 if (skb
->len
< L2CAP_HDR_SIZE
) {
5501 BT_ERR("Frame is too short (len %d)", skb
->len
);
5502 l2cap_conn_unreliable(conn
, ECOMM
);
5506 hdr
= (struct l2cap_hdr
*) skb
->data
;
5507 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5509 if (len
== skb
->len
) {
5510 /* Complete frame received */
5511 l2cap_recv_frame(conn
, skb
);
5515 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5517 if (skb
->len
> len
) {
5518 BT_ERR("Frame is too long (len %d, expected len %d)",
5520 l2cap_conn_unreliable(conn
, ECOMM
);
5524 /* Allocate skb for the complete frame (with header) */
5525 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5529 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5531 conn
->rx_len
= len
- skb
->len
;
5533 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5535 if (!conn
->rx_len
) {
5536 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5537 l2cap_conn_unreliable(conn
, ECOMM
);
5541 if (skb
->len
> conn
->rx_len
) {
5542 BT_ERR("Fragment is too long (len %d, expected %d)",
5543 skb
->len
, conn
->rx_len
);
5544 kfree_skb(conn
->rx_skb
);
5545 conn
->rx_skb
= NULL
;
5547 l2cap_conn_unreliable(conn
, ECOMM
);
5551 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5553 conn
->rx_len
-= skb
->len
;
5555 if (!conn
->rx_len
) {
5556 /* Complete frame received */
5557 l2cap_recv_frame(conn
, conn
->rx_skb
);
5558 conn
->rx_skb
= NULL
;
5567 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5569 struct l2cap_chan
*c
;
5571 read_lock(&chan_list_lock
);
5573 list_for_each_entry(c
, &chan_list
, global_l
) {
5574 struct sock
*sk
= c
->sk
;
5576 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5577 batostr(&bt_sk(sk
)->src
),
5578 batostr(&bt_sk(sk
)->dst
),
5579 c
->state
, __le16_to_cpu(c
->psm
),
5580 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5581 c
->sec_level
, c
->mode
);
5584 read_unlock(&chan_list_lock
);
5589 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5591 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5594 static const struct file_operations l2cap_debugfs_fops
= {
5595 .open
= l2cap_debugfs_open
,
5597 .llseek
= seq_lseek
,
5598 .release
= single_release
,
5601 static struct dentry
*l2cap_debugfs
;
5603 int __init
l2cap_init(void)
5607 err
= l2cap_init_sockets();
5612 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5613 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5615 BT_ERR("Failed to create L2CAP debug file");
5621 void l2cap_exit(void)
5623 debugfs_remove(l2cap_debugfs
);
5624 l2cap_cleanup_sockets();
5627 module_param(disable_ertm
, bool, 0644);
5628 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");