2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
39 #include <net/bluetooth/smp.h>
40 #include <net/bluetooth/a2mp.h>
44 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
45 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
47 static LIST_HEAD(chan_list
);
48 static DEFINE_RWLOCK(chan_list_lock
);
50 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
51 u8 code
, u8 ident
, u16 dlen
, void *data
);
52 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
54 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
55 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
56 struct l2cap_chan
*chan
, int err
);
58 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
59 struct sk_buff_head
*skbs
, u8 event
);
61 /* ---- L2CAP channels ---- */
63 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
68 list_for_each_entry(c
, &conn
->chan_l
, list
) {
75 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
80 list_for_each_entry(c
, &conn
->chan_l
, list
) {
87 /* Find channel with given SCID.
88 * Returns locked channel. */
89 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
94 mutex_lock(&conn
->chan_lock
);
95 c
= __l2cap_get_chan_by_scid(conn
, cid
);
98 mutex_unlock(&conn
->chan_lock
);
103 /* Find channel with given DCID.
104 * Returns locked channel.
106 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
109 struct l2cap_chan
*c
;
111 mutex_lock(&conn
->chan_lock
);
112 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
115 mutex_unlock(&conn
->chan_lock
);
120 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
123 struct l2cap_chan
*c
;
125 list_for_each_entry(c
, &conn
->chan_l
, list
) {
126 if (c
->ident
== ident
)
132 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
135 struct l2cap_chan
*c
;
137 mutex_lock(&conn
->chan_lock
);
138 c
= __l2cap_get_chan_by_ident(conn
, ident
);
141 mutex_unlock(&conn
->chan_lock
);
146 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
148 struct l2cap_chan
*c
;
150 list_for_each_entry(c
, &chan_list
, global_l
) {
151 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
157 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
161 write_lock(&chan_list_lock
);
163 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
176 for (p
= 0x1001; p
< 0x1100; p
+= 2)
177 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
178 chan
->psm
= cpu_to_le16(p
);
179 chan
->sport
= cpu_to_le16(p
);
186 write_unlock(&chan_list_lock
);
190 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
192 write_lock(&chan_list_lock
);
196 write_unlock(&chan_list_lock
);
201 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
203 u16 cid
= L2CAP_CID_DYN_START
;
205 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
206 if (!__l2cap_get_chan_by_scid(conn
, cid
))
213 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
215 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
216 state_to_string(state
));
219 chan
->ops
->state_change(chan
, state
);
222 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
224 struct sock
*sk
= chan
->sk
;
227 __l2cap_state_change(chan
, state
);
231 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
233 struct sock
*sk
= chan
->sk
;
238 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
240 struct sock
*sk
= chan
->sk
;
243 __l2cap_chan_set_err(chan
, err
);
247 static void __set_retrans_timer(struct l2cap_chan
*chan
)
249 if (!delayed_work_pending(&chan
->monitor_timer
) &&
250 chan
->retrans_timeout
) {
251 l2cap_set_timer(chan
, &chan
->retrans_timer
,
252 msecs_to_jiffies(chan
->retrans_timeout
));
256 static void __set_monitor_timer(struct l2cap_chan
*chan
)
258 __clear_retrans_timer(chan
);
259 if (chan
->monitor_timeout
) {
260 l2cap_set_timer(chan
, &chan
->monitor_timer
,
261 msecs_to_jiffies(chan
->monitor_timeout
));
265 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
270 skb_queue_walk(head
, skb
) {
271 if (bt_cb(skb
)->control
.txseq
== seq
)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
291 size_t alloc_size
, i
;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size
= roundup_pow_of_two(size
);
299 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
303 seq_list
->mask
= alloc_size
- 1;
304 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
305 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 for (i
= 0; i
< alloc_size
; i
++)
307 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
314 kfree(seq_list
->list
);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
320 /* Constant-time check for list membership */
321 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
324 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
326 u16 mask
= seq_list
->mask
;
328 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR
;
331 } else if (seq_list
->head
== seq
) {
332 /* Head can be removed in constant time */
333 seq_list
->head
= seq_list
->list
[seq
& mask
];
334 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
336 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
337 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
338 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
341 /* Walk the list to find the sequence number */
342 u16 prev
= seq_list
->head
;
343 while (seq_list
->list
[prev
& mask
] != seq
) {
344 prev
= seq_list
->list
[prev
& mask
];
345 if (prev
== L2CAP_SEQ_LIST_TAIL
)
346 return L2CAP_SEQ_LIST_CLEAR
;
349 /* Unlink the number from the list and clear it */
350 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
351 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
352 if (seq_list
->tail
== seq
)
353 seq_list
->tail
= prev
;
358 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
368 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
371 for (i
= 0; i
<= seq_list
->mask
; i
++)
372 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
374 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
375 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
378 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
380 u16 mask
= seq_list
->mask
;
382 /* All appends happen in constant time */
384 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
387 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
388 seq_list
->head
= seq
;
390 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
392 seq_list
->tail
= seq
;
393 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
396 static void l2cap_chan_timeout(struct work_struct
*work
)
398 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
400 struct l2cap_conn
*conn
= chan
->conn
;
403 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
405 mutex_lock(&conn
->chan_lock
);
406 l2cap_chan_lock(chan
);
408 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
409 reason
= ECONNREFUSED
;
410 else if (chan
->state
== BT_CONNECT
&&
411 chan
->sec_level
!= BT_SECURITY_SDP
)
412 reason
= ECONNREFUSED
;
416 l2cap_chan_close(chan
, reason
);
418 l2cap_chan_unlock(chan
);
420 chan
->ops
->close(chan
);
421 mutex_unlock(&conn
->chan_lock
);
423 l2cap_chan_put(chan
);
426 struct l2cap_chan
*l2cap_chan_create(void)
428 struct l2cap_chan
*chan
;
430 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
434 mutex_init(&chan
->lock
);
436 write_lock(&chan_list_lock
);
437 list_add(&chan
->global_l
, &chan_list
);
438 write_unlock(&chan_list_lock
);
440 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
442 chan
->state
= BT_OPEN
;
444 kref_init(&chan
->kref
);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
449 BT_DBG("chan %p", chan
);
454 static void l2cap_chan_destroy(struct kref
*kref
)
456 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
458 BT_DBG("chan %p", chan
);
460 write_lock(&chan_list_lock
);
461 list_del(&chan
->global_l
);
462 write_unlock(&chan_list_lock
);
467 void l2cap_chan_hold(struct l2cap_chan
*c
)
469 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
474 void l2cap_chan_put(struct l2cap_chan
*c
)
476 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
478 kref_put(&c
->kref
, l2cap_chan_destroy
);
481 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
483 chan
->fcs
= L2CAP_FCS_CRC16
;
484 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
485 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
486 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
487 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
488 chan
->sec_level
= BT_SECURITY_LOW
;
490 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
493 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
496 __le16_to_cpu(chan
->psm
), chan
->dcid
);
498 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
502 switch (chan
->chan_type
) {
503 case L2CAP_CHAN_CONN_ORIENTED
:
504 if (conn
->hcon
->type
== LE_LINK
) {
506 chan
->omtu
= L2CAP_DEFAULT_MTU
;
507 chan
->scid
= L2CAP_CID_LE_DATA
;
508 chan
->dcid
= L2CAP_CID_LE_DATA
;
510 /* Alloc CID for connection-oriented socket */
511 chan
->scid
= l2cap_alloc_cid(conn
);
512 chan
->omtu
= L2CAP_DEFAULT_MTU
;
516 case L2CAP_CHAN_CONN_LESS
:
517 /* Connectionless socket */
518 chan
->scid
= L2CAP_CID_CONN_LESS
;
519 chan
->dcid
= L2CAP_CID_CONN_LESS
;
520 chan
->omtu
= L2CAP_DEFAULT_MTU
;
523 case L2CAP_CHAN_CONN_FIX_A2MP
:
524 chan
->scid
= L2CAP_CID_A2MP
;
525 chan
->dcid
= L2CAP_CID_A2MP
;
526 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
527 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
531 /* Raw socket can send/recv signalling messages only */
532 chan
->scid
= L2CAP_CID_SIGNALING
;
533 chan
->dcid
= L2CAP_CID_SIGNALING
;
534 chan
->omtu
= L2CAP_DEFAULT_MTU
;
537 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
538 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
539 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
540 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
541 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
542 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
544 l2cap_chan_hold(chan
);
546 list_add(&chan
->list
, &conn
->chan_l
);
549 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
551 mutex_lock(&conn
->chan_lock
);
552 __l2cap_chan_add(conn
, chan
);
553 mutex_unlock(&conn
->chan_lock
);
556 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
558 struct l2cap_conn
*conn
= chan
->conn
;
560 __clear_chan_timer(chan
);
562 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
565 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
566 /* Delete from channel list */
567 list_del(&chan
->list
);
569 l2cap_chan_put(chan
);
573 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
574 hci_conn_put(conn
->hcon
);
576 if (mgr
&& mgr
->bredr_chan
== chan
)
577 mgr
->bredr_chan
= NULL
;
580 chan
->ops
->teardown(chan
, err
);
582 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
586 case L2CAP_MODE_BASIC
:
589 case L2CAP_MODE_ERTM
:
590 __clear_retrans_timer(chan
);
591 __clear_monitor_timer(chan
);
592 __clear_ack_timer(chan
);
594 skb_queue_purge(&chan
->srej_q
);
596 l2cap_seq_list_free(&chan
->srej_list
);
597 l2cap_seq_list_free(&chan
->retrans_list
);
601 case L2CAP_MODE_STREAMING
:
602 skb_queue_purge(&chan
->tx_q
);
609 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
611 struct l2cap_conn
*conn
= chan
->conn
;
612 struct sock
*sk
= chan
->sk
;
614 BT_DBG("chan %p state %s sk %p", chan
, state_to_string(chan
->state
),
617 switch (chan
->state
) {
619 chan
->ops
->teardown(chan
, 0);
624 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
625 conn
->hcon
->type
== ACL_LINK
) {
626 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
627 l2cap_send_disconn_req(conn
, chan
, reason
);
629 l2cap_chan_del(chan
, reason
);
633 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
634 conn
->hcon
->type
== ACL_LINK
) {
635 struct l2cap_conn_rsp rsp
;
638 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
))
639 result
= L2CAP_CR_SEC_BLOCK
;
641 result
= L2CAP_CR_BAD_PSM
;
642 l2cap_state_change(chan
, BT_DISCONN
);
644 rsp
.scid
= cpu_to_le16(chan
->dcid
);
645 rsp
.dcid
= cpu_to_le16(chan
->scid
);
646 rsp
.result
= cpu_to_le16(result
);
647 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
648 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
652 l2cap_chan_del(chan
, reason
);
657 l2cap_chan_del(chan
, reason
);
661 chan
->ops
->teardown(chan
, 0);
666 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
668 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
669 switch (chan
->sec_level
) {
670 case BT_SECURITY_HIGH
:
671 return HCI_AT_DEDICATED_BONDING_MITM
;
672 case BT_SECURITY_MEDIUM
:
673 return HCI_AT_DEDICATED_BONDING
;
675 return HCI_AT_NO_BONDING
;
677 } else if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
678 if (chan
->sec_level
== BT_SECURITY_LOW
)
679 chan
->sec_level
= BT_SECURITY_SDP
;
681 if (chan
->sec_level
== BT_SECURITY_HIGH
)
682 return HCI_AT_NO_BONDING_MITM
;
684 return HCI_AT_NO_BONDING
;
686 switch (chan
->sec_level
) {
687 case BT_SECURITY_HIGH
:
688 return HCI_AT_GENERAL_BONDING_MITM
;
689 case BT_SECURITY_MEDIUM
:
690 return HCI_AT_GENERAL_BONDING
;
692 return HCI_AT_NO_BONDING
;
697 /* Service level security */
698 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
700 struct l2cap_conn
*conn
= chan
->conn
;
703 auth_type
= l2cap_get_auth_type(chan
);
705 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
708 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
712 /* Get next available identificator.
713 * 1 - 128 are used by kernel.
714 * 129 - 199 are reserved.
715 * 200 - 254 are used by utilities like l2ping, etc.
718 spin_lock(&conn
->lock
);
720 if (++conn
->tx_ident
> 128)
725 spin_unlock(&conn
->lock
);
730 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
733 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
736 BT_DBG("code 0x%2.2x", code
);
741 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
742 flags
= ACL_START_NO_FLUSH
;
746 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
747 skb
->priority
= HCI_PRIO_MAX
;
749 hci_send_acl(conn
->hchan
, skb
, flags
);
752 static bool __chan_is_moving(struct l2cap_chan
*chan
)
754 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
755 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
758 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
760 struct hci_conn
*hcon
= chan
->conn
->hcon
;
763 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
766 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
768 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
775 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
776 lmp_no_flush_capable(hcon
->hdev
))
777 flags
= ACL_START_NO_FLUSH
;
781 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
782 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
785 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
787 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
788 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
790 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
793 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
794 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
801 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
802 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
809 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
811 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
812 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
814 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
817 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
818 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
825 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
826 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
833 static inline void __unpack_control(struct l2cap_chan
*chan
,
836 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
837 __unpack_extended_control(get_unaligned_le32(skb
->data
),
838 &bt_cb(skb
)->control
);
839 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
841 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
842 &bt_cb(skb
)->control
);
843 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
847 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
851 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
852 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
854 if (control
->sframe
) {
855 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
856 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
857 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
859 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
860 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
866 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
870 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
871 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
873 if (control
->sframe
) {
874 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
875 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
876 packed
|= L2CAP_CTRL_FRAME_TYPE
;
878 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
879 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
885 static inline void __pack_control(struct l2cap_chan
*chan
,
886 struct l2cap_ctrl
*control
,
889 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
890 put_unaligned_le32(__pack_extended_control(control
),
891 skb
->data
+ L2CAP_HDR_SIZE
);
893 put_unaligned_le16(__pack_enhanced_control(control
),
894 skb
->data
+ L2CAP_HDR_SIZE
);
898 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
900 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
901 return L2CAP_EXT_HDR_SIZE
;
903 return L2CAP_ENH_HDR_SIZE
;
906 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
910 struct l2cap_hdr
*lh
;
911 int hlen
= __ertm_hdr_size(chan
);
913 if (chan
->fcs
== L2CAP_FCS_CRC16
)
914 hlen
+= L2CAP_FCS_SIZE
;
916 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
919 return ERR_PTR(-ENOMEM
);
921 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
922 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
923 lh
->cid
= cpu_to_le16(chan
->dcid
);
925 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
926 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
928 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
930 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
931 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
932 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
935 skb
->priority
= HCI_PRIO_MAX
;
939 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
940 struct l2cap_ctrl
*control
)
945 BT_DBG("chan %p, control %p", chan
, control
);
947 if (!control
->sframe
)
950 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
954 if (control
->super
== L2CAP_SUPER_RR
)
955 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
956 else if (control
->super
== L2CAP_SUPER_RNR
)
957 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
959 if (control
->super
!= L2CAP_SUPER_SREJ
) {
960 chan
->last_acked_seq
= control
->reqseq
;
961 __clear_ack_timer(chan
);
964 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
965 control
->final
, control
->poll
, control
->super
);
967 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
968 control_field
= __pack_extended_control(control
);
970 control_field
= __pack_enhanced_control(control
);
972 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
974 l2cap_do_send(chan
, skb
);
977 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
979 struct l2cap_ctrl control
;
981 BT_DBG("chan %p, poll %d", chan
, poll
);
983 memset(&control
, 0, sizeof(control
));
987 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
988 control
.super
= L2CAP_SUPER_RNR
;
990 control
.super
= L2CAP_SUPER_RR
;
992 control
.reqseq
= chan
->buffer_seq
;
993 l2cap_send_sframe(chan
, &control
);
996 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
998 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1001 static bool __amp_capable(struct l2cap_chan
*chan
)
1003 struct l2cap_conn
*conn
= chan
->conn
;
1006 chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
&&
1007 conn
->fixed_chan_mask
& L2CAP_FC_A2MP
)
1013 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1015 struct l2cap_conn
*conn
= chan
->conn
;
1016 struct l2cap_conn_req req
;
1018 req
.scid
= cpu_to_le16(chan
->scid
);
1019 req
.psm
= chan
->psm
;
1021 chan
->ident
= l2cap_get_ident(conn
);
1023 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1025 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1028 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1030 struct l2cap_create_chan_req req
;
1031 req
.scid
= cpu_to_le16(chan
->scid
);
1032 req
.psm
= chan
->psm
;
1033 req
.amp_id
= amp_id
;
1035 chan
->ident
= l2cap_get_ident(chan
->conn
);
1037 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1041 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1043 struct sk_buff
*skb
;
1045 BT_DBG("chan %p", chan
);
1047 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1050 __clear_retrans_timer(chan
);
1051 __clear_monitor_timer(chan
);
1052 __clear_ack_timer(chan
);
1054 chan
->retry_count
= 0;
1055 skb_queue_walk(&chan
->tx_q
, skb
) {
1056 if (bt_cb(skb
)->control
.retries
)
1057 bt_cb(skb
)->control
.retries
= 1;
1062 chan
->expected_tx_seq
= chan
->buffer_seq
;
1064 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1065 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1066 l2cap_seq_list_clear(&chan
->retrans_list
);
1067 l2cap_seq_list_clear(&chan
->srej_list
);
1068 skb_queue_purge(&chan
->srej_q
);
1070 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1071 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1073 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1076 static void l2cap_move_done(struct l2cap_chan
*chan
)
1078 u8 move_role
= chan
->move_role
;
1079 BT_DBG("chan %p", chan
);
1081 chan
->move_state
= L2CAP_MOVE_STABLE
;
1082 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1084 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1087 switch (move_role
) {
1088 case L2CAP_MOVE_ROLE_INITIATOR
:
1089 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1090 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1092 case L2CAP_MOVE_ROLE_RESPONDER
:
1093 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1098 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1100 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1101 chan
->conf_state
= 0;
1102 __clear_chan_timer(chan
);
1104 chan
->state
= BT_CONNECTED
;
1106 chan
->ops
->ready(chan
);
1109 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1111 if (__amp_capable(chan
)) {
1112 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1113 a2mp_discover_amp(chan
);
1115 l2cap_send_conn_req(chan
);
1119 static void l2cap_do_start(struct l2cap_chan
*chan
)
1121 struct l2cap_conn
*conn
= chan
->conn
;
1123 if (conn
->hcon
->type
== LE_LINK
) {
1124 l2cap_chan_ready(chan
);
1128 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1129 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1132 if (l2cap_chan_check_security(chan
) &&
1133 __l2cap_no_conn_pending(chan
)) {
1134 l2cap_start_connection(chan
);
1137 struct l2cap_info_req req
;
1138 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1140 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1141 conn
->info_ident
= l2cap_get_ident(conn
);
1143 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1145 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1150 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1152 u32 local_feat_mask
= l2cap_feat_mask
;
1154 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1157 case L2CAP_MODE_ERTM
:
1158 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1159 case L2CAP_MODE_STREAMING
:
1160 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1166 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
1167 struct l2cap_chan
*chan
, int err
)
1169 struct sock
*sk
= chan
->sk
;
1170 struct l2cap_disconn_req req
;
1175 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1176 __clear_retrans_timer(chan
);
1177 __clear_monitor_timer(chan
);
1178 __clear_ack_timer(chan
);
1181 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1182 l2cap_state_change(chan
, BT_DISCONN
);
1186 req
.dcid
= cpu_to_le16(chan
->dcid
);
1187 req
.scid
= cpu_to_le16(chan
->scid
);
1188 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1192 __l2cap_state_change(chan
, BT_DISCONN
);
1193 __l2cap_chan_set_err(chan
, err
);
1197 /* ---- L2CAP connections ---- */
1198 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1200 struct l2cap_chan
*chan
, *tmp
;
1202 BT_DBG("conn %p", conn
);
1204 mutex_lock(&conn
->chan_lock
);
1206 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1207 struct sock
*sk
= chan
->sk
;
1209 l2cap_chan_lock(chan
);
1211 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1212 l2cap_chan_unlock(chan
);
1216 if (chan
->state
== BT_CONNECT
) {
1217 if (!l2cap_chan_check_security(chan
) ||
1218 !__l2cap_no_conn_pending(chan
)) {
1219 l2cap_chan_unlock(chan
);
1223 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1224 && test_bit(CONF_STATE2_DEVICE
,
1225 &chan
->conf_state
)) {
1226 l2cap_chan_close(chan
, ECONNRESET
);
1227 l2cap_chan_unlock(chan
);
1231 l2cap_start_connection(chan
);
1233 } else if (chan
->state
== BT_CONNECT2
) {
1234 struct l2cap_conn_rsp rsp
;
1236 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1237 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1239 if (l2cap_chan_check_security(chan
)) {
1241 if (test_bit(BT_SK_DEFER_SETUP
,
1242 &bt_sk(sk
)->flags
)) {
1243 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1244 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1245 chan
->ops
->defer(chan
);
1248 __l2cap_state_change(chan
, BT_CONFIG
);
1249 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1250 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1254 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1255 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1258 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1261 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1262 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1263 l2cap_chan_unlock(chan
);
1267 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1268 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1269 l2cap_build_conf_req(chan
, buf
), buf
);
1270 chan
->num_conf_req
++;
1273 l2cap_chan_unlock(chan
);
1276 mutex_unlock(&conn
->chan_lock
);
1279 /* Find socket with cid and source/destination bdaddr.
1280 * Returns closest match, locked.
1282 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1286 struct l2cap_chan
*c
, *c1
= NULL
;
1288 read_lock(&chan_list_lock
);
1290 list_for_each_entry(c
, &chan_list
, global_l
) {
1291 struct sock
*sk
= c
->sk
;
1293 if (state
&& c
->state
!= state
)
1296 if (c
->scid
== cid
) {
1297 int src_match
, dst_match
;
1298 int src_any
, dst_any
;
1301 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1302 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1303 if (src_match
&& dst_match
) {
1304 read_unlock(&chan_list_lock
);
1309 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1310 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1311 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1312 (src_any
&& dst_any
))
1317 read_unlock(&chan_list_lock
);
1322 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1324 struct sock
*parent
, *sk
;
1325 struct l2cap_chan
*chan
, *pchan
;
1329 /* Check if we have socket listening on cid */
1330 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1331 conn
->src
, conn
->dst
);
1339 chan
= pchan
->ops
->new_connection(pchan
);
1345 hci_conn_hold(conn
->hcon
);
1346 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
1348 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1349 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1351 l2cap_chan_add(conn
, chan
);
1353 l2cap_chan_ready(chan
);
1356 release_sock(parent
);
1359 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1361 struct l2cap_chan
*chan
;
1362 struct hci_conn
*hcon
= conn
->hcon
;
1364 BT_DBG("conn %p", conn
);
1366 if (!hcon
->out
&& hcon
->type
== LE_LINK
)
1367 l2cap_le_conn_ready(conn
);
1369 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1370 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1372 mutex_lock(&conn
->chan_lock
);
1374 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1376 l2cap_chan_lock(chan
);
1378 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1379 l2cap_chan_unlock(chan
);
1383 if (hcon
->type
== LE_LINK
) {
1384 if (smp_conn_security(hcon
, chan
->sec_level
))
1385 l2cap_chan_ready(chan
);
1387 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1388 struct sock
*sk
= chan
->sk
;
1389 __clear_chan_timer(chan
);
1391 __l2cap_state_change(chan
, BT_CONNECTED
);
1392 sk
->sk_state_change(sk
);
1395 } else if (chan
->state
== BT_CONNECT
)
1396 l2cap_do_start(chan
);
1398 l2cap_chan_unlock(chan
);
1401 mutex_unlock(&conn
->chan_lock
);
1404 /* Notify sockets that we cannot guaranty reliability anymore */
1405 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1407 struct l2cap_chan
*chan
;
1409 BT_DBG("conn %p", conn
);
1411 mutex_lock(&conn
->chan_lock
);
1413 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1414 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1415 l2cap_chan_set_err(chan
, err
);
1418 mutex_unlock(&conn
->chan_lock
);
1421 static void l2cap_info_timeout(struct work_struct
*work
)
1423 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1426 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1427 conn
->info_ident
= 0;
1429 l2cap_conn_start(conn
);
1432 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1434 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1435 struct l2cap_chan
*chan
, *l
;
1440 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1442 kfree_skb(conn
->rx_skb
);
1444 mutex_lock(&conn
->chan_lock
);
1447 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1448 l2cap_chan_hold(chan
);
1449 l2cap_chan_lock(chan
);
1451 l2cap_chan_del(chan
, err
);
1453 l2cap_chan_unlock(chan
);
1455 chan
->ops
->close(chan
);
1456 l2cap_chan_put(chan
);
1459 mutex_unlock(&conn
->chan_lock
);
1461 hci_chan_del(conn
->hchan
);
1463 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1464 cancel_delayed_work_sync(&conn
->info_timer
);
1466 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1467 cancel_delayed_work_sync(&conn
->security_timer
);
1468 smp_chan_destroy(conn
);
1471 hcon
->l2cap_data
= NULL
;
1475 static void security_timeout(struct work_struct
*work
)
1477 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1478 security_timer
.work
);
1480 BT_DBG("conn %p", conn
);
1482 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1483 smp_chan_destroy(conn
);
1484 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1488 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1490 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1491 struct hci_chan
*hchan
;
1496 hchan
= hci_chan_create(hcon
);
1500 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1502 hci_chan_del(hchan
);
1506 hcon
->l2cap_data
= conn
;
1508 conn
->hchan
= hchan
;
1510 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1512 switch (hcon
->type
) {
1514 conn
->mtu
= hcon
->hdev
->block_mtu
;
1518 if (hcon
->hdev
->le_mtu
) {
1519 conn
->mtu
= hcon
->hdev
->le_mtu
;
1525 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1529 conn
->src
= &hcon
->hdev
->bdaddr
;
1530 conn
->dst
= &hcon
->dst
;
1532 conn
->feat_mask
= 0;
1534 spin_lock_init(&conn
->lock
);
1535 mutex_init(&conn
->chan_lock
);
1537 INIT_LIST_HEAD(&conn
->chan_l
);
1539 if (hcon
->type
== LE_LINK
)
1540 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1542 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1544 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1549 /* ---- Socket interface ---- */
1551 /* Find socket with psm and source / destination bdaddr.
1552 * Returns closest match.
1554 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1558 struct l2cap_chan
*c
, *c1
= NULL
;
1560 read_lock(&chan_list_lock
);
1562 list_for_each_entry(c
, &chan_list
, global_l
) {
1563 struct sock
*sk
= c
->sk
;
1565 if (state
&& c
->state
!= state
)
1568 if (c
->psm
== psm
) {
1569 int src_match
, dst_match
;
1570 int src_any
, dst_any
;
1573 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1574 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1575 if (src_match
&& dst_match
) {
1576 read_unlock(&chan_list_lock
);
1581 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1582 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1583 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1584 (src_any
&& dst_any
))
1589 read_unlock(&chan_list_lock
);
1594 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1595 bdaddr_t
*dst
, u8 dst_type
)
1597 struct sock
*sk
= chan
->sk
;
1598 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1599 struct l2cap_conn
*conn
;
1600 struct hci_conn
*hcon
;
1601 struct hci_dev
*hdev
;
1605 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", src
, dst
,
1606 dst_type
, __le16_to_cpu(psm
));
1608 hdev
= hci_get_route(dst
, src
);
1610 return -EHOSTUNREACH
;
1614 l2cap_chan_lock(chan
);
1616 /* PSM must be odd and lsb of upper byte must be 0 */
1617 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1618 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1623 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1628 switch (chan
->mode
) {
1629 case L2CAP_MODE_BASIC
:
1631 case L2CAP_MODE_ERTM
:
1632 case L2CAP_MODE_STREAMING
:
1641 switch (chan
->state
) {
1645 /* Already connecting */
1650 /* Already connected */
1664 /* Set destination address and psm */
1666 bacpy(&bt_sk(sk
)->dst
, dst
);
1672 auth_type
= l2cap_get_auth_type(chan
);
1674 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1675 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1676 chan
->sec_level
, auth_type
);
1678 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1679 chan
->sec_level
, auth_type
);
1682 err
= PTR_ERR(hcon
);
1686 conn
= l2cap_conn_add(hcon
, 0);
1693 if (hcon
->type
== LE_LINK
) {
1696 if (!list_empty(&conn
->chan_l
)) {
1705 /* Update source addr of the socket */
1706 bacpy(src
, conn
->src
);
1708 l2cap_chan_unlock(chan
);
1709 l2cap_chan_add(conn
, chan
);
1710 l2cap_chan_lock(chan
);
1712 l2cap_state_change(chan
, BT_CONNECT
);
1713 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1715 if (hcon
->state
== BT_CONNECTED
) {
1716 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1717 __clear_chan_timer(chan
);
1718 if (l2cap_chan_check_security(chan
))
1719 l2cap_state_change(chan
, BT_CONNECTED
);
1721 l2cap_do_start(chan
);
1727 l2cap_chan_unlock(chan
);
1728 hci_dev_unlock(hdev
);
1733 int __l2cap_wait_ack(struct sock
*sk
)
1735 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1736 DECLARE_WAITQUEUE(wait
, current
);
1740 add_wait_queue(sk_sleep(sk
), &wait
);
1741 set_current_state(TASK_INTERRUPTIBLE
);
1742 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1746 if (signal_pending(current
)) {
1747 err
= sock_intr_errno(timeo
);
1752 timeo
= schedule_timeout(timeo
);
1754 set_current_state(TASK_INTERRUPTIBLE
);
1756 err
= sock_error(sk
);
1760 set_current_state(TASK_RUNNING
);
1761 remove_wait_queue(sk_sleep(sk
), &wait
);
1765 static void l2cap_monitor_timeout(struct work_struct
*work
)
1767 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1768 monitor_timer
.work
);
1770 BT_DBG("chan %p", chan
);
1772 l2cap_chan_lock(chan
);
1775 l2cap_chan_unlock(chan
);
1776 l2cap_chan_put(chan
);
1780 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1782 l2cap_chan_unlock(chan
);
1783 l2cap_chan_put(chan
);
1786 static void l2cap_retrans_timeout(struct work_struct
*work
)
1788 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1789 retrans_timer
.work
);
1791 BT_DBG("chan %p", chan
);
1793 l2cap_chan_lock(chan
);
1796 l2cap_chan_unlock(chan
);
1797 l2cap_chan_put(chan
);
1801 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1802 l2cap_chan_unlock(chan
);
1803 l2cap_chan_put(chan
);
1806 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1807 struct sk_buff_head
*skbs
)
1809 struct sk_buff
*skb
;
1810 struct l2cap_ctrl
*control
;
1812 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1814 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1816 while (!skb_queue_empty(&chan
->tx_q
)) {
1818 skb
= skb_dequeue(&chan
->tx_q
);
1820 bt_cb(skb
)->control
.retries
= 1;
1821 control
= &bt_cb(skb
)->control
;
1823 control
->reqseq
= 0;
1824 control
->txseq
= chan
->next_tx_seq
;
1826 __pack_control(chan
, control
, skb
);
1828 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1829 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1830 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1833 l2cap_do_send(chan
, skb
);
1835 BT_DBG("Sent txseq %u", control
->txseq
);
1837 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1838 chan
->frames_sent
++;
1842 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1844 struct sk_buff
*skb
, *tx_skb
;
1845 struct l2cap_ctrl
*control
;
1848 BT_DBG("chan %p", chan
);
1850 if (chan
->state
!= BT_CONNECTED
)
1853 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1856 while (chan
->tx_send_head
&&
1857 chan
->unacked_frames
< chan
->remote_tx_win
&&
1858 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
1860 skb
= chan
->tx_send_head
;
1862 bt_cb(skb
)->control
.retries
= 1;
1863 control
= &bt_cb(skb
)->control
;
1865 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1868 control
->reqseq
= chan
->buffer_seq
;
1869 chan
->last_acked_seq
= chan
->buffer_seq
;
1870 control
->txseq
= chan
->next_tx_seq
;
1872 __pack_control(chan
, control
, skb
);
1874 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1875 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1876 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1879 /* Clone after data has been modified. Data is assumed to be
1880 read-only (for locking purposes) on cloned sk_buffs.
1882 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1887 __set_retrans_timer(chan
);
1889 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1890 chan
->unacked_frames
++;
1891 chan
->frames_sent
++;
1894 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1895 chan
->tx_send_head
= NULL
;
1897 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1899 l2cap_do_send(chan
, tx_skb
);
1900 BT_DBG("Sent txseq %u", control
->txseq
);
1903 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
1904 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
1909 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
1911 struct l2cap_ctrl control
;
1912 struct sk_buff
*skb
;
1913 struct sk_buff
*tx_skb
;
1916 BT_DBG("chan %p", chan
);
1918 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1921 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
1922 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
1924 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
1926 BT_DBG("Error: Can't retransmit seq %d, frame missing",
1931 bt_cb(skb
)->control
.retries
++;
1932 control
= bt_cb(skb
)->control
;
1934 if (chan
->max_tx
!= 0 &&
1935 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
1936 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
1937 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
1938 l2cap_seq_list_clear(&chan
->retrans_list
);
1942 control
.reqseq
= chan
->buffer_seq
;
1943 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1948 if (skb_cloned(skb
)) {
1949 /* Cloned sk_buffs are read-only, so we need a
1952 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
1954 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
1958 l2cap_seq_list_clear(&chan
->retrans_list
);
1962 /* Update skb contents */
1963 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
1964 put_unaligned_le32(__pack_extended_control(&control
),
1965 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1967 put_unaligned_le16(__pack_enhanced_control(&control
),
1968 tx_skb
->data
+ L2CAP_HDR_SIZE
);
1971 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1972 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
1973 put_unaligned_le16(fcs
, skb_put(tx_skb
,
1977 l2cap_do_send(chan
, tx_skb
);
1979 BT_DBG("Resent txseq %d", control
.txseq
);
1981 chan
->last_acked_seq
= chan
->buffer_seq
;
1985 static void l2cap_retransmit(struct l2cap_chan
*chan
,
1986 struct l2cap_ctrl
*control
)
1988 BT_DBG("chan %p, control %p", chan
, control
);
1990 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
1991 l2cap_ertm_resend(chan
);
1994 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
1995 struct l2cap_ctrl
*control
)
1997 struct sk_buff
*skb
;
1999 BT_DBG("chan %p, control %p", chan
, control
);
2002 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2004 l2cap_seq_list_clear(&chan
->retrans_list
);
2006 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2009 if (chan
->unacked_frames
) {
2010 skb_queue_walk(&chan
->tx_q
, skb
) {
2011 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2012 skb
== chan
->tx_send_head
)
2016 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2017 if (skb
== chan
->tx_send_head
)
2020 l2cap_seq_list_append(&chan
->retrans_list
,
2021 bt_cb(skb
)->control
.txseq
);
2024 l2cap_ertm_resend(chan
);
2028 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2030 struct l2cap_ctrl control
;
2031 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2032 chan
->last_acked_seq
);
2035 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2036 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2038 memset(&control
, 0, sizeof(control
));
2041 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2042 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2043 __clear_ack_timer(chan
);
2044 control
.super
= L2CAP_SUPER_RNR
;
2045 control
.reqseq
= chan
->buffer_seq
;
2046 l2cap_send_sframe(chan
, &control
);
2048 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2049 l2cap_ertm_send(chan
);
2050 /* If any i-frames were sent, they included an ack */
2051 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2055 /* Ack now if the window is 3/4ths full.
2056 * Calculate without mul or div
2058 threshold
= chan
->ack_win
;
2059 threshold
+= threshold
<< 1;
2062 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2065 if (frames_to_ack
>= threshold
) {
2066 __clear_ack_timer(chan
);
2067 control
.super
= L2CAP_SUPER_RR
;
2068 control
.reqseq
= chan
->buffer_seq
;
2069 l2cap_send_sframe(chan
, &control
);
2074 __set_ack_timer(chan
);
2078 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2079 struct msghdr
*msg
, int len
,
2080 int count
, struct sk_buff
*skb
)
2082 struct l2cap_conn
*conn
= chan
->conn
;
2083 struct sk_buff
**frag
;
2086 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2092 /* Continuation fragments (no L2CAP header) */
2093 frag
= &skb_shinfo(skb
)->frag_list
;
2095 struct sk_buff
*tmp
;
2097 count
= min_t(unsigned int, conn
->mtu
, len
);
2099 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2100 msg
->msg_flags
& MSG_DONTWAIT
);
2102 return PTR_ERR(tmp
);
2106 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2109 (*frag
)->priority
= skb
->priority
;
2114 skb
->len
+= (*frag
)->len
;
2115 skb
->data_len
+= (*frag
)->len
;
2117 frag
= &(*frag
)->next
;
2123 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2124 struct msghdr
*msg
, size_t len
,
2127 struct l2cap_conn
*conn
= chan
->conn
;
2128 struct sk_buff
*skb
;
2129 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2130 struct l2cap_hdr
*lh
;
2132 BT_DBG("chan %p len %zu priority %u", chan
, len
, priority
);
2134 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2136 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2137 msg
->msg_flags
& MSG_DONTWAIT
);
2141 skb
->priority
= priority
;
2143 /* Create L2CAP header */
2144 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2145 lh
->cid
= cpu_to_le16(chan
->dcid
);
2146 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2147 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2149 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2150 if (unlikely(err
< 0)) {
2152 return ERR_PTR(err
);
2157 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2158 struct msghdr
*msg
, size_t len
,
2161 struct l2cap_conn
*conn
= chan
->conn
;
2162 struct sk_buff
*skb
;
2164 struct l2cap_hdr
*lh
;
2166 BT_DBG("chan %p len %zu", chan
, len
);
2168 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2170 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2171 msg
->msg_flags
& MSG_DONTWAIT
);
2175 skb
->priority
= priority
;
2177 /* Create L2CAP header */
2178 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2179 lh
->cid
= cpu_to_le16(chan
->dcid
);
2180 lh
->len
= cpu_to_le16(len
);
2182 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2183 if (unlikely(err
< 0)) {
2185 return ERR_PTR(err
);
2190 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2191 struct msghdr
*msg
, size_t len
,
2194 struct l2cap_conn
*conn
= chan
->conn
;
2195 struct sk_buff
*skb
;
2196 int err
, count
, hlen
;
2197 struct l2cap_hdr
*lh
;
2199 BT_DBG("chan %p len %zu", chan
, len
);
2202 return ERR_PTR(-ENOTCONN
);
2204 hlen
= __ertm_hdr_size(chan
);
2207 hlen
+= L2CAP_SDULEN_SIZE
;
2209 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2210 hlen
+= L2CAP_FCS_SIZE
;
2212 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2214 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2215 msg
->msg_flags
& MSG_DONTWAIT
);
2219 /* Create L2CAP header */
2220 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2221 lh
->cid
= cpu_to_le16(chan
->dcid
);
2222 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2224 /* Control header is populated later */
2225 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2226 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2228 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2231 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2233 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2234 if (unlikely(err
< 0)) {
2236 return ERR_PTR(err
);
2239 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2240 bt_cb(skb
)->control
.retries
= 0;
2244 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2245 struct sk_buff_head
*seg_queue
,
2246 struct msghdr
*msg
, size_t len
)
2248 struct sk_buff
*skb
;
2253 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2255 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2256 * so fragmented skbs are not used. The HCI layer's handling
2257 * of fragmented skbs is not compatible with ERTM's queueing.
2260 /* PDU size is derived from the HCI MTU */
2261 pdu_len
= chan
->conn
->mtu
;
2263 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2265 /* Adjust for largest possible L2CAP overhead. */
2267 pdu_len
-= L2CAP_FCS_SIZE
;
2269 pdu_len
-= __ertm_hdr_size(chan
);
2271 /* Remote device may have requested smaller PDUs */
2272 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2274 if (len
<= pdu_len
) {
2275 sar
= L2CAP_SAR_UNSEGMENTED
;
2279 sar
= L2CAP_SAR_START
;
2281 pdu_len
-= L2CAP_SDULEN_SIZE
;
2285 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2288 __skb_queue_purge(seg_queue
);
2289 return PTR_ERR(skb
);
2292 bt_cb(skb
)->control
.sar
= sar
;
2293 __skb_queue_tail(seg_queue
, skb
);
2298 pdu_len
+= L2CAP_SDULEN_SIZE
;
2301 if (len
<= pdu_len
) {
2302 sar
= L2CAP_SAR_END
;
2305 sar
= L2CAP_SAR_CONTINUE
;
2312 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2315 struct sk_buff
*skb
;
2317 struct sk_buff_head seg_queue
;
2319 /* Connectionless channel */
2320 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2321 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2323 return PTR_ERR(skb
);
2325 l2cap_do_send(chan
, skb
);
2329 switch (chan
->mode
) {
2330 case L2CAP_MODE_BASIC
:
2331 /* Check outgoing MTU */
2332 if (len
> chan
->omtu
)
2335 /* Create a basic PDU */
2336 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2338 return PTR_ERR(skb
);
2340 l2cap_do_send(chan
, skb
);
2344 case L2CAP_MODE_ERTM
:
2345 case L2CAP_MODE_STREAMING
:
2346 /* Check outgoing MTU */
2347 if (len
> chan
->omtu
) {
2352 __skb_queue_head_init(&seg_queue
);
2354 /* Do segmentation before calling in to the state machine,
2355 * since it's possible to block while waiting for memory
2358 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2360 /* The channel could have been closed while segmenting,
2361 * check that it is still connected.
2363 if (chan
->state
!= BT_CONNECTED
) {
2364 __skb_queue_purge(&seg_queue
);
2371 if (chan
->mode
== L2CAP_MODE_ERTM
)
2372 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2374 l2cap_streaming_send(chan
, &seg_queue
);
2378 /* If the skbs were not queued for sending, they'll still be in
2379 * seg_queue and need to be purged.
2381 __skb_queue_purge(&seg_queue
);
2385 BT_DBG("bad state %1.1x", chan
->mode
);
2392 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2394 struct l2cap_ctrl control
;
2397 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2399 memset(&control
, 0, sizeof(control
));
2401 control
.super
= L2CAP_SUPER_SREJ
;
2403 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2404 seq
= __next_seq(chan
, seq
)) {
2405 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2406 control
.reqseq
= seq
;
2407 l2cap_send_sframe(chan
, &control
);
2408 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2412 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2415 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2417 struct l2cap_ctrl control
;
2419 BT_DBG("chan %p", chan
);
2421 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2424 memset(&control
, 0, sizeof(control
));
2426 control
.super
= L2CAP_SUPER_SREJ
;
2427 control
.reqseq
= chan
->srej_list
.tail
;
2428 l2cap_send_sframe(chan
, &control
);
2431 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2433 struct l2cap_ctrl control
;
2437 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2439 memset(&control
, 0, sizeof(control
));
2441 control
.super
= L2CAP_SUPER_SREJ
;
2443 /* Capture initial list head to allow only one pass through the list. */
2444 initial_head
= chan
->srej_list
.head
;
2447 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2448 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2451 control
.reqseq
= seq
;
2452 l2cap_send_sframe(chan
, &control
);
2453 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2454 } while (chan
->srej_list
.head
!= initial_head
);
2457 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2459 struct sk_buff
*acked_skb
;
2462 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2464 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2467 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2468 chan
->expected_ack_seq
, chan
->unacked_frames
);
2470 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2471 ackseq
= __next_seq(chan
, ackseq
)) {
2473 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2475 skb_unlink(acked_skb
, &chan
->tx_q
);
2476 kfree_skb(acked_skb
);
2477 chan
->unacked_frames
--;
2481 chan
->expected_ack_seq
= reqseq
;
2483 if (chan
->unacked_frames
== 0)
2484 __clear_retrans_timer(chan
);
2486 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2489 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2491 BT_DBG("chan %p", chan
);
2493 chan
->expected_tx_seq
= chan
->buffer_seq
;
2494 l2cap_seq_list_clear(&chan
->srej_list
);
2495 skb_queue_purge(&chan
->srej_q
);
2496 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2499 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2500 struct l2cap_ctrl
*control
,
2501 struct sk_buff_head
*skbs
, u8 event
)
2503 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2507 case L2CAP_EV_DATA_REQUEST
:
2508 if (chan
->tx_send_head
== NULL
)
2509 chan
->tx_send_head
= skb_peek(skbs
);
2511 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2512 l2cap_ertm_send(chan
);
2514 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2515 BT_DBG("Enter LOCAL_BUSY");
2516 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2518 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2519 /* The SREJ_SENT state must be aborted if we are to
2520 * enter the LOCAL_BUSY state.
2522 l2cap_abort_rx_srej_sent(chan
);
2525 l2cap_send_ack(chan
);
2528 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2529 BT_DBG("Exit LOCAL_BUSY");
2530 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2532 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2533 struct l2cap_ctrl local_control
;
2535 memset(&local_control
, 0, sizeof(local_control
));
2536 local_control
.sframe
= 1;
2537 local_control
.super
= L2CAP_SUPER_RR
;
2538 local_control
.poll
= 1;
2539 local_control
.reqseq
= chan
->buffer_seq
;
2540 l2cap_send_sframe(chan
, &local_control
);
2542 chan
->retry_count
= 1;
2543 __set_monitor_timer(chan
);
2544 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2547 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2548 l2cap_process_reqseq(chan
, control
->reqseq
);
2550 case L2CAP_EV_EXPLICIT_POLL
:
2551 l2cap_send_rr_or_rnr(chan
, 1);
2552 chan
->retry_count
= 1;
2553 __set_monitor_timer(chan
);
2554 __clear_ack_timer(chan
);
2555 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2557 case L2CAP_EV_RETRANS_TO
:
2558 l2cap_send_rr_or_rnr(chan
, 1);
2559 chan
->retry_count
= 1;
2560 __set_monitor_timer(chan
);
2561 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2563 case L2CAP_EV_RECV_FBIT
:
2564 /* Nothing to process */
2571 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2572 struct l2cap_ctrl
*control
,
2573 struct sk_buff_head
*skbs
, u8 event
)
2575 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2579 case L2CAP_EV_DATA_REQUEST
:
2580 if (chan
->tx_send_head
== NULL
)
2581 chan
->tx_send_head
= skb_peek(skbs
);
2582 /* Queue data, but don't send. */
2583 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2585 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2586 BT_DBG("Enter LOCAL_BUSY");
2587 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2589 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2590 /* The SREJ_SENT state must be aborted if we are to
2591 * enter the LOCAL_BUSY state.
2593 l2cap_abort_rx_srej_sent(chan
);
2596 l2cap_send_ack(chan
);
2599 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2600 BT_DBG("Exit LOCAL_BUSY");
2601 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2603 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2604 struct l2cap_ctrl local_control
;
2605 memset(&local_control
, 0, sizeof(local_control
));
2606 local_control
.sframe
= 1;
2607 local_control
.super
= L2CAP_SUPER_RR
;
2608 local_control
.poll
= 1;
2609 local_control
.reqseq
= chan
->buffer_seq
;
2610 l2cap_send_sframe(chan
, &local_control
);
2612 chan
->retry_count
= 1;
2613 __set_monitor_timer(chan
);
2614 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2617 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2618 l2cap_process_reqseq(chan
, control
->reqseq
);
2622 case L2CAP_EV_RECV_FBIT
:
2623 if (control
&& control
->final
) {
2624 __clear_monitor_timer(chan
);
2625 if (chan
->unacked_frames
> 0)
2626 __set_retrans_timer(chan
);
2627 chan
->retry_count
= 0;
2628 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2629 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2632 case L2CAP_EV_EXPLICIT_POLL
:
2635 case L2CAP_EV_MONITOR_TO
:
2636 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2637 l2cap_send_rr_or_rnr(chan
, 1);
2638 __set_monitor_timer(chan
);
2639 chan
->retry_count
++;
2641 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
2649 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2650 struct sk_buff_head
*skbs
, u8 event
)
2652 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2653 chan
, control
, skbs
, event
, chan
->tx_state
);
2655 switch (chan
->tx_state
) {
2656 case L2CAP_TX_STATE_XMIT
:
2657 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2659 case L2CAP_TX_STATE_WAIT_F
:
2660 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2668 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2669 struct l2cap_ctrl
*control
)
2671 BT_DBG("chan %p, control %p", chan
, control
);
2672 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2675 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2676 struct l2cap_ctrl
*control
)
2678 BT_DBG("chan %p, control %p", chan
, control
);
2679 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2682 /* Copy frame to all raw sockets on that connection */
2683 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2685 struct sk_buff
*nskb
;
2686 struct l2cap_chan
*chan
;
2688 BT_DBG("conn %p", conn
);
2690 mutex_lock(&conn
->chan_lock
);
2692 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2693 struct sock
*sk
= chan
->sk
;
2694 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2697 /* Don't send frame to the socket it came from */
2700 nskb
= skb_clone(skb
, GFP_KERNEL
);
2704 if (chan
->ops
->recv(chan
, nskb
))
2708 mutex_unlock(&conn
->chan_lock
);
2711 /* ---- L2CAP signalling commands ---- */
2712 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2713 u8 ident
, u16 dlen
, void *data
)
2715 struct sk_buff
*skb
, **frag
;
2716 struct l2cap_cmd_hdr
*cmd
;
2717 struct l2cap_hdr
*lh
;
2720 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2721 conn
, code
, ident
, dlen
);
2723 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2724 count
= min_t(unsigned int, conn
->mtu
, len
);
2726 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2730 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2731 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2733 if (conn
->hcon
->type
== LE_LINK
)
2734 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2736 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2738 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2741 cmd
->len
= cpu_to_le16(dlen
);
2744 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2745 memcpy(skb_put(skb
, count
), data
, count
);
2751 /* Continuation fragments (no L2CAP header) */
2752 frag
= &skb_shinfo(skb
)->frag_list
;
2754 count
= min_t(unsigned int, conn
->mtu
, len
);
2756 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2760 memcpy(skb_put(*frag
, count
), data
, count
);
2765 frag
= &(*frag
)->next
;
2775 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2778 struct l2cap_conf_opt
*opt
= *ptr
;
2781 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2789 *val
= *((u8
*) opt
->val
);
2793 *val
= get_unaligned_le16(opt
->val
);
2797 *val
= get_unaligned_le32(opt
->val
);
2801 *val
= (unsigned long) opt
->val
;
2805 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2809 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2811 struct l2cap_conf_opt
*opt
= *ptr
;
2813 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2820 *((u8
*) opt
->val
) = val
;
2824 put_unaligned_le16(val
, opt
->val
);
2828 put_unaligned_le32(val
, opt
->val
);
2832 memcpy(opt
->val
, (void *) val
, len
);
2836 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2839 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2841 struct l2cap_conf_efs efs
;
2843 switch (chan
->mode
) {
2844 case L2CAP_MODE_ERTM
:
2845 efs
.id
= chan
->local_id
;
2846 efs
.stype
= chan
->local_stype
;
2847 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2848 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2849 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2850 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
2853 case L2CAP_MODE_STREAMING
:
2855 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2856 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2857 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2866 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2867 (unsigned long) &efs
);
2870 static void l2cap_ack_timeout(struct work_struct
*work
)
2872 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2876 BT_DBG("chan %p", chan
);
2878 l2cap_chan_lock(chan
);
2880 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2881 chan
->last_acked_seq
);
2884 l2cap_send_rr_or_rnr(chan
, 0);
2886 l2cap_chan_unlock(chan
);
2887 l2cap_chan_put(chan
);
2890 int l2cap_ertm_init(struct l2cap_chan
*chan
)
2894 chan
->next_tx_seq
= 0;
2895 chan
->expected_tx_seq
= 0;
2896 chan
->expected_ack_seq
= 0;
2897 chan
->unacked_frames
= 0;
2898 chan
->buffer_seq
= 0;
2899 chan
->frames_sent
= 0;
2900 chan
->last_acked_seq
= 0;
2902 chan
->sdu_last_frag
= NULL
;
2905 skb_queue_head_init(&chan
->tx_q
);
2907 chan
->local_amp_id
= 0;
2909 chan
->move_state
= L2CAP_MOVE_STABLE
;
2910 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
2912 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2915 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2916 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2918 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2919 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2920 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2922 skb_queue_head_init(&chan
->srej_q
);
2924 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2928 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2930 l2cap_seq_list_free(&chan
->srej_list
);
2935 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2938 case L2CAP_MODE_STREAMING
:
2939 case L2CAP_MODE_ERTM
:
2940 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2944 return L2CAP_MODE_BASIC
;
2948 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2950 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2953 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2955 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2958 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2960 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2961 __l2cap_ews_supported(chan
)) {
2962 /* use extended control field */
2963 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2964 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2966 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2967 L2CAP_DEFAULT_TX_WINDOW
);
2968 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2970 chan
->ack_win
= chan
->tx_win
;
2973 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2975 struct l2cap_conf_req
*req
= data
;
2976 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2977 void *ptr
= req
->data
;
2980 BT_DBG("chan %p", chan
);
2982 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2985 switch (chan
->mode
) {
2986 case L2CAP_MODE_STREAMING
:
2987 case L2CAP_MODE_ERTM
:
2988 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2991 if (__l2cap_efs_supported(chan
))
2992 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2996 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3001 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3002 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3004 switch (chan
->mode
) {
3005 case L2CAP_MODE_BASIC
:
3006 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3007 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3010 rfc
.mode
= L2CAP_MODE_BASIC
;
3012 rfc
.max_transmit
= 0;
3013 rfc
.retrans_timeout
= 0;
3014 rfc
.monitor_timeout
= 0;
3015 rfc
.max_pdu_size
= 0;
3017 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3018 (unsigned long) &rfc
);
3021 case L2CAP_MODE_ERTM
:
3022 rfc
.mode
= L2CAP_MODE_ERTM
;
3023 rfc
.max_transmit
= chan
->max_tx
;
3024 rfc
.retrans_timeout
= 0;
3025 rfc
.monitor_timeout
= 0;
3027 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3028 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3030 rfc
.max_pdu_size
= cpu_to_le16(size
);
3032 l2cap_txwin_setup(chan
);
3034 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3035 L2CAP_DEFAULT_TX_WINDOW
);
3037 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3038 (unsigned long) &rfc
);
3040 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3041 l2cap_add_opt_efs(&ptr
, chan
);
3043 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
3046 if (chan
->fcs
== L2CAP_FCS_NONE
||
3047 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
3048 chan
->fcs
= L2CAP_FCS_NONE
;
3049 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
3052 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3053 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3057 case L2CAP_MODE_STREAMING
:
3058 l2cap_txwin_setup(chan
);
3059 rfc
.mode
= L2CAP_MODE_STREAMING
;
3061 rfc
.max_transmit
= 0;
3062 rfc
.retrans_timeout
= 0;
3063 rfc
.monitor_timeout
= 0;
3065 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3066 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3068 rfc
.max_pdu_size
= cpu_to_le16(size
);
3070 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3071 (unsigned long) &rfc
);
3073 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3074 l2cap_add_opt_efs(&ptr
, chan
);
3076 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
3079 if (chan
->fcs
== L2CAP_FCS_NONE
||
3080 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
3081 chan
->fcs
= L2CAP_FCS_NONE
;
3082 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
3087 req
->dcid
= cpu_to_le16(chan
->dcid
);
3088 req
->flags
= __constant_cpu_to_le16(0);
3093 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3095 struct l2cap_conf_rsp
*rsp
= data
;
3096 void *ptr
= rsp
->data
;
3097 void *req
= chan
->conf_req
;
3098 int len
= chan
->conf_len
;
3099 int type
, hint
, olen
;
3101 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3102 struct l2cap_conf_efs efs
;
3104 u16 mtu
= L2CAP_DEFAULT_MTU
;
3105 u16 result
= L2CAP_CONF_SUCCESS
;
3108 BT_DBG("chan %p", chan
);
3110 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3111 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3113 hint
= type
& L2CAP_CONF_HINT
;
3114 type
&= L2CAP_CONF_MASK
;
3117 case L2CAP_CONF_MTU
:
3121 case L2CAP_CONF_FLUSH_TO
:
3122 chan
->flush_to
= val
;
3125 case L2CAP_CONF_QOS
:
3128 case L2CAP_CONF_RFC
:
3129 if (olen
== sizeof(rfc
))
3130 memcpy(&rfc
, (void *) val
, olen
);
3133 case L2CAP_CONF_FCS
:
3134 if (val
== L2CAP_FCS_NONE
)
3135 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
3138 case L2CAP_CONF_EFS
:
3140 if (olen
== sizeof(efs
))
3141 memcpy(&efs
, (void *) val
, olen
);
3144 case L2CAP_CONF_EWS
:
3146 return -ECONNREFUSED
;
3148 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3149 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3150 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3151 chan
->remote_tx_win
= val
;
3158 result
= L2CAP_CONF_UNKNOWN
;
3159 *((u8
*) ptr
++) = type
;
3164 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3167 switch (chan
->mode
) {
3168 case L2CAP_MODE_STREAMING
:
3169 case L2CAP_MODE_ERTM
:
3170 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3171 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3172 chan
->conn
->feat_mask
);
3177 if (__l2cap_efs_supported(chan
))
3178 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3180 return -ECONNREFUSED
;
3183 if (chan
->mode
!= rfc
.mode
)
3184 return -ECONNREFUSED
;
3190 if (chan
->mode
!= rfc
.mode
) {
3191 result
= L2CAP_CONF_UNACCEPT
;
3192 rfc
.mode
= chan
->mode
;
3194 if (chan
->num_conf_rsp
== 1)
3195 return -ECONNREFUSED
;
3197 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3198 (unsigned long) &rfc
);
3201 if (result
== L2CAP_CONF_SUCCESS
) {
3202 /* Configure output options and let the other side know
3203 * which ones we don't like. */
3205 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3206 result
= L2CAP_CONF_UNACCEPT
;
3209 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3211 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3214 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3215 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3216 efs
.stype
!= chan
->local_stype
) {
3218 result
= L2CAP_CONF_UNACCEPT
;
3220 if (chan
->num_conf_req
>= 1)
3221 return -ECONNREFUSED
;
3223 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3225 (unsigned long) &efs
);
3227 /* Send PENDING Conf Rsp */
3228 result
= L2CAP_CONF_PENDING
;
3229 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3234 case L2CAP_MODE_BASIC
:
3235 chan
->fcs
= L2CAP_FCS_NONE
;
3236 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3239 case L2CAP_MODE_ERTM
:
3240 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3241 chan
->remote_tx_win
= rfc
.txwin_size
;
3243 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3245 chan
->remote_max_tx
= rfc
.max_transmit
;
3247 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3248 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3249 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3250 rfc
.max_pdu_size
= cpu_to_le16(size
);
3251 chan
->remote_mps
= size
;
3253 rfc
.retrans_timeout
=
3254 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3255 rfc
.monitor_timeout
=
3256 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3258 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3260 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3261 sizeof(rfc
), (unsigned long) &rfc
);
3263 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3264 chan
->remote_id
= efs
.id
;
3265 chan
->remote_stype
= efs
.stype
;
3266 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3267 chan
->remote_flush_to
=
3268 le32_to_cpu(efs
.flush_to
);
3269 chan
->remote_acc_lat
=
3270 le32_to_cpu(efs
.acc_lat
);
3271 chan
->remote_sdu_itime
=
3272 le32_to_cpu(efs
.sdu_itime
);
3273 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3275 (unsigned long) &efs
);
3279 case L2CAP_MODE_STREAMING
:
3280 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3281 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3282 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3283 rfc
.max_pdu_size
= cpu_to_le16(size
);
3284 chan
->remote_mps
= size
;
3286 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3288 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3289 (unsigned long) &rfc
);
3294 result
= L2CAP_CONF_UNACCEPT
;
3296 memset(&rfc
, 0, sizeof(rfc
));
3297 rfc
.mode
= chan
->mode
;
3300 if (result
== L2CAP_CONF_SUCCESS
)
3301 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3303 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3304 rsp
->result
= cpu_to_le16(result
);
3305 rsp
->flags
= __constant_cpu_to_le16(0);
3310 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3311 void *data
, u16
*result
)
3313 struct l2cap_conf_req
*req
= data
;
3314 void *ptr
= req
->data
;
3317 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3318 struct l2cap_conf_efs efs
;
3320 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3322 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3323 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3326 case L2CAP_CONF_MTU
:
3327 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3328 *result
= L2CAP_CONF_UNACCEPT
;
3329 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3332 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3335 case L2CAP_CONF_FLUSH_TO
:
3336 chan
->flush_to
= val
;
3337 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3341 case L2CAP_CONF_RFC
:
3342 if (olen
== sizeof(rfc
))
3343 memcpy(&rfc
, (void *)val
, olen
);
3345 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3346 rfc
.mode
!= chan
->mode
)
3347 return -ECONNREFUSED
;
3351 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3352 sizeof(rfc
), (unsigned long) &rfc
);
3355 case L2CAP_CONF_EWS
:
3356 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3357 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3361 case L2CAP_CONF_EFS
:
3362 if (olen
== sizeof(efs
))
3363 memcpy(&efs
, (void *)val
, olen
);
3365 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3366 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3367 efs
.stype
!= chan
->local_stype
)
3368 return -ECONNREFUSED
;
3370 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3371 (unsigned long) &efs
);
3376 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3377 return -ECONNREFUSED
;
3379 chan
->mode
= rfc
.mode
;
3381 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3383 case L2CAP_MODE_ERTM
:
3384 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3385 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3386 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3387 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3388 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3391 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3392 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3393 chan
->local_sdu_itime
=
3394 le32_to_cpu(efs
.sdu_itime
);
3395 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3396 chan
->local_flush_to
=
3397 le32_to_cpu(efs
.flush_to
);
3401 case L2CAP_MODE_STREAMING
:
3402 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3406 req
->dcid
= cpu_to_le16(chan
->dcid
);
3407 req
->flags
= __constant_cpu_to_le16(0);
3412 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3413 u16 result
, u16 flags
)
3415 struct l2cap_conf_rsp
*rsp
= data
;
3416 void *ptr
= rsp
->data
;
3418 BT_DBG("chan %p", chan
);
3420 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3421 rsp
->result
= cpu_to_le16(result
);
3422 rsp
->flags
= cpu_to_le16(flags
);
3427 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3429 struct l2cap_conn_rsp rsp
;
3430 struct l2cap_conn
*conn
= chan
->conn
;
3433 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3434 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3435 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3436 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3437 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3439 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3442 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3443 l2cap_build_conf_req(chan
, buf
), buf
);
3444 chan
->num_conf_req
++;
3447 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3451 /* Use sane default values in case a misbehaving remote device
3452 * did not send an RFC or extended window size option.
3454 u16 txwin_ext
= chan
->ack_win
;
3455 struct l2cap_conf_rfc rfc
= {
3457 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3458 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3459 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3460 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3463 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3465 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3468 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3469 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3472 case L2CAP_CONF_RFC
:
3473 if (olen
== sizeof(rfc
))
3474 memcpy(&rfc
, (void *)val
, olen
);
3476 case L2CAP_CONF_EWS
:
3483 case L2CAP_MODE_ERTM
:
3484 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3485 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3486 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3487 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3488 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3490 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3493 case L2CAP_MODE_STREAMING
:
3494 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3498 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3499 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3501 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3503 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3506 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3507 cmd
->ident
== conn
->info_ident
) {
3508 cancel_delayed_work(&conn
->info_timer
);
3510 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3511 conn
->info_ident
= 0;
3513 l2cap_conn_start(conn
);
3519 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3520 struct l2cap_cmd_hdr
*cmd
,
3521 u8
*data
, u8 rsp_code
, u8 amp_id
)
3523 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3524 struct l2cap_conn_rsp rsp
;
3525 struct l2cap_chan
*chan
= NULL
, *pchan
;
3526 struct sock
*parent
, *sk
= NULL
;
3527 int result
, status
= L2CAP_CS_NO_INFO
;
3529 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3530 __le16 psm
= req
->psm
;
3532 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3534 /* Check if we have socket listening on psm */
3535 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
3537 result
= L2CAP_CR_BAD_PSM
;
3543 mutex_lock(&conn
->chan_lock
);
3546 /* Check if the ACL is secure enough (if not SDP) */
3547 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3548 !hci_conn_check_link_mode(conn
->hcon
)) {
3549 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3550 result
= L2CAP_CR_SEC_BLOCK
;
3554 result
= L2CAP_CR_NO_MEM
;
3556 /* Check if we already have channel with that dcid */
3557 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3560 chan
= pchan
->ops
->new_connection(pchan
);
3566 hci_conn_hold(conn
->hcon
);
3568 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3569 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3572 chan
->local_amp_id
= amp_id
;
3574 __l2cap_chan_add(conn
, chan
);
3578 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3580 chan
->ident
= cmd
->ident
;
3582 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3583 if (l2cap_chan_check_security(chan
)) {
3584 if (test_bit(BT_SK_DEFER_SETUP
, &bt_sk(sk
)->flags
)) {
3585 __l2cap_state_change(chan
, BT_CONNECT2
);
3586 result
= L2CAP_CR_PEND
;
3587 status
= L2CAP_CS_AUTHOR_PEND
;
3588 chan
->ops
->defer(chan
);
3590 /* Force pending result for AMP controllers.
3591 * The connection will succeed after the
3592 * physical link is up.
3595 __l2cap_state_change(chan
, BT_CONNECT2
);
3596 result
= L2CAP_CR_PEND
;
3598 __l2cap_state_change(chan
, BT_CONFIG
);
3599 result
= L2CAP_CR_SUCCESS
;
3601 status
= L2CAP_CS_NO_INFO
;
3604 __l2cap_state_change(chan
, BT_CONNECT2
);
3605 result
= L2CAP_CR_PEND
;
3606 status
= L2CAP_CS_AUTHEN_PEND
;
3609 __l2cap_state_change(chan
, BT_CONNECT2
);
3610 result
= L2CAP_CR_PEND
;
3611 status
= L2CAP_CS_NO_INFO
;
3615 release_sock(parent
);
3616 mutex_unlock(&conn
->chan_lock
);
3619 rsp
.scid
= cpu_to_le16(scid
);
3620 rsp
.dcid
= cpu_to_le16(dcid
);
3621 rsp
.result
= cpu_to_le16(result
);
3622 rsp
.status
= cpu_to_le16(status
);
3623 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3625 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3626 struct l2cap_info_req info
;
3627 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3629 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3630 conn
->info_ident
= l2cap_get_ident(conn
);
3632 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3634 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3635 sizeof(info
), &info
);
3638 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3639 result
== L2CAP_CR_SUCCESS
) {
3641 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3642 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3643 l2cap_build_conf_req(chan
, buf
), buf
);
3644 chan
->num_conf_req
++;
3650 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3651 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3653 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3657 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3658 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3660 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3661 u16 scid
, dcid
, result
, status
;
3662 struct l2cap_chan
*chan
;
3666 scid
= __le16_to_cpu(rsp
->scid
);
3667 dcid
= __le16_to_cpu(rsp
->dcid
);
3668 result
= __le16_to_cpu(rsp
->result
);
3669 status
= __le16_to_cpu(rsp
->status
);
3671 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3672 dcid
, scid
, result
, status
);
3674 mutex_lock(&conn
->chan_lock
);
3677 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3683 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3692 l2cap_chan_lock(chan
);
3695 case L2CAP_CR_SUCCESS
:
3696 l2cap_state_change(chan
, BT_CONFIG
);
3699 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3701 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3704 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3705 l2cap_build_conf_req(chan
, req
), req
);
3706 chan
->num_conf_req
++;
3710 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3714 l2cap_chan_del(chan
, ECONNREFUSED
);
3718 l2cap_chan_unlock(chan
);
3721 mutex_unlock(&conn
->chan_lock
);
3726 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3728 /* FCS is enabled only in ERTM or streaming mode, if one or both
3731 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3732 chan
->fcs
= L2CAP_FCS_NONE
;
3733 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3734 chan
->fcs
= L2CAP_FCS_CRC16
;
3737 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3738 u8 ident
, u16 flags
)
3740 struct l2cap_conn
*conn
= chan
->conn
;
3742 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3745 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3746 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3748 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
3749 l2cap_build_conf_rsp(chan
, data
,
3750 L2CAP_CONF_SUCCESS
, flags
), data
);
3753 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
3754 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3757 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3760 struct l2cap_chan
*chan
;
3763 dcid
= __le16_to_cpu(req
->dcid
);
3764 flags
= __le16_to_cpu(req
->flags
);
3766 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3768 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3772 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3773 struct l2cap_cmd_rej_cid rej
;
3775 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3776 rej
.scid
= cpu_to_le16(chan
->scid
);
3777 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3779 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3784 /* Reject if config buffer is too small. */
3785 len
= cmd_len
- sizeof(*req
);
3786 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3787 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3788 l2cap_build_conf_rsp(chan
, rsp
,
3789 L2CAP_CONF_REJECT
, flags
), rsp
);
3794 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3795 chan
->conf_len
+= len
;
3797 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
3798 /* Incomplete config. Send empty response. */
3799 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3800 l2cap_build_conf_rsp(chan
, rsp
,
3801 L2CAP_CONF_SUCCESS
, flags
), rsp
);
3805 /* Complete config. */
3806 len
= l2cap_parse_conf_req(chan
, rsp
);
3808 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3812 chan
->ident
= cmd
->ident
;
3813 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3814 chan
->num_conf_rsp
++;
3816 /* Reset config buffer. */
3819 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3822 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3823 set_default_fcs(chan
);
3825 if (chan
->mode
== L2CAP_MODE_ERTM
||
3826 chan
->mode
== L2CAP_MODE_STREAMING
)
3827 err
= l2cap_ertm_init(chan
);
3830 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3832 l2cap_chan_ready(chan
);
3837 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3839 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3840 l2cap_build_conf_req(chan
, buf
), buf
);
3841 chan
->num_conf_req
++;
3844 /* Got Conf Rsp PENDING from remote side and asume we sent
3845 Conf Rsp PENDING in the code above */
3846 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3847 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3849 /* check compatibility */
3851 /* Send rsp for BR/EDR channel */
3853 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
3855 chan
->ident
= cmd
->ident
;
3859 l2cap_chan_unlock(chan
);
3863 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
3864 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3866 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3867 u16 scid
, flags
, result
;
3868 struct l2cap_chan
*chan
;
3869 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3872 scid
= __le16_to_cpu(rsp
->scid
);
3873 flags
= __le16_to_cpu(rsp
->flags
);
3874 result
= __le16_to_cpu(rsp
->result
);
3876 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3879 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3884 case L2CAP_CONF_SUCCESS
:
3885 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3886 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3889 case L2CAP_CONF_PENDING
:
3890 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3892 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3895 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3898 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3902 /* check compatibility */
3905 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
3908 chan
->ident
= cmd
->ident
;
3912 case L2CAP_CONF_UNACCEPT
:
3913 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3916 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3917 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3921 /* throw out any old stored conf requests */
3922 result
= L2CAP_CONF_SUCCESS
;
3923 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3926 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3930 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3931 L2CAP_CONF_REQ
, len
, req
);
3932 chan
->num_conf_req
++;
3933 if (result
!= L2CAP_CONF_SUCCESS
)
3939 l2cap_chan_set_err(chan
, ECONNRESET
);
3941 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3942 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3946 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
3949 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3951 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3952 set_default_fcs(chan
);
3954 if (chan
->mode
== L2CAP_MODE_ERTM
||
3955 chan
->mode
== L2CAP_MODE_STREAMING
)
3956 err
= l2cap_ertm_init(chan
);
3959 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3961 l2cap_chan_ready(chan
);
3965 l2cap_chan_unlock(chan
);
3969 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
3970 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3972 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3973 struct l2cap_disconn_rsp rsp
;
3975 struct l2cap_chan
*chan
;
3978 scid
= __le16_to_cpu(req
->scid
);
3979 dcid
= __le16_to_cpu(req
->dcid
);
3981 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3983 mutex_lock(&conn
->chan_lock
);
3985 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3987 mutex_unlock(&conn
->chan_lock
);
3991 l2cap_chan_lock(chan
);
3995 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3996 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3997 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4000 sk
->sk_shutdown
= SHUTDOWN_MASK
;
4003 l2cap_chan_hold(chan
);
4004 l2cap_chan_del(chan
, ECONNRESET
);
4006 l2cap_chan_unlock(chan
);
4008 chan
->ops
->close(chan
);
4009 l2cap_chan_put(chan
);
4011 mutex_unlock(&conn
->chan_lock
);
4016 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4017 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4019 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4021 struct l2cap_chan
*chan
;
4023 scid
= __le16_to_cpu(rsp
->scid
);
4024 dcid
= __le16_to_cpu(rsp
->dcid
);
4026 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4028 mutex_lock(&conn
->chan_lock
);
4030 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4032 mutex_unlock(&conn
->chan_lock
);
4036 l2cap_chan_lock(chan
);
4038 l2cap_chan_hold(chan
);
4039 l2cap_chan_del(chan
, 0);
4041 l2cap_chan_unlock(chan
);
4043 chan
->ops
->close(chan
);
4044 l2cap_chan_put(chan
);
4046 mutex_unlock(&conn
->chan_lock
);
4051 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4052 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4054 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4057 type
= __le16_to_cpu(req
->type
);
4059 BT_DBG("type 0x%4.4x", type
);
4061 if (type
== L2CAP_IT_FEAT_MASK
) {
4063 u32 feat_mask
= l2cap_feat_mask
;
4064 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4065 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4066 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4068 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4071 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4072 | L2CAP_FEAT_EXT_WINDOW
;
4074 put_unaligned_le32(feat_mask
, rsp
->data
);
4075 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4077 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4079 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4082 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4084 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4086 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4087 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4088 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4089 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4092 struct l2cap_info_rsp rsp
;
4093 rsp
.type
= cpu_to_le16(type
);
4094 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4095 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4102 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4103 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
4105 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4108 type
= __le16_to_cpu(rsp
->type
);
4109 result
= __le16_to_cpu(rsp
->result
);
4111 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4113 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4114 if (cmd
->ident
!= conn
->info_ident
||
4115 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4118 cancel_delayed_work(&conn
->info_timer
);
4120 if (result
!= L2CAP_IR_SUCCESS
) {
4121 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4122 conn
->info_ident
= 0;
4124 l2cap_conn_start(conn
);
4130 case L2CAP_IT_FEAT_MASK
:
4131 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4133 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4134 struct l2cap_info_req req
;
4135 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4137 conn
->info_ident
= l2cap_get_ident(conn
);
4139 l2cap_send_cmd(conn
, conn
->info_ident
,
4140 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4142 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4143 conn
->info_ident
= 0;
4145 l2cap_conn_start(conn
);
4149 case L2CAP_IT_FIXED_CHAN
:
4150 conn
->fixed_chan_mask
= rsp
->data
[0];
4151 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4152 conn
->info_ident
= 0;
4154 l2cap_conn_start(conn
);
4161 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4162 struct l2cap_cmd_hdr
*cmd
,
4163 u16 cmd_len
, void *data
)
4165 struct l2cap_create_chan_req
*req
= data
;
4166 struct l2cap_chan
*chan
;
4169 if (cmd_len
!= sizeof(*req
))
4175 psm
= le16_to_cpu(req
->psm
);
4176 scid
= le16_to_cpu(req
->scid
);
4178 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4181 struct hci_dev
*hdev
;
4183 /* Validate AMP controller id */
4184 hdev
= hci_dev_get(req
->amp_id
);
4185 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4186 !test_bit(HCI_UP
, &hdev
->flags
)) {
4187 struct l2cap_create_chan_rsp rsp
;
4190 rsp
.scid
= cpu_to_le16(scid
);
4191 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4192 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4194 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4206 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4212 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4214 struct l2cap_move_chan_req req
;
4217 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4219 ident
= l2cap_get_ident(chan
->conn
);
4220 chan
->ident
= ident
;
4222 req
.icid
= cpu_to_le16(chan
->scid
);
4223 req
.dest_amp_id
= dest_amp_id
;
4225 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4228 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4231 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4233 struct l2cap_move_chan_rsp rsp
;
4235 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4237 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4238 rsp
.result
= cpu_to_le16(result
);
4240 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4244 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4246 struct l2cap_move_chan_cfm cfm
;
4248 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4250 chan
->ident
= l2cap_get_ident(chan
->conn
);
4252 cfm
.icid
= cpu_to_le16(chan
->scid
);
4253 cfm
.result
= cpu_to_le16(result
);
4255 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4258 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4261 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4263 struct l2cap_move_chan_cfm cfm
;
4265 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4267 cfm
.icid
= cpu_to_le16(icid
);
4268 cfm
.result
= __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4270 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4274 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4277 struct l2cap_move_chan_cfm_rsp rsp
;
4279 BT_DBG("icid 0x%4.4x", icid
);
4281 rsp
.icid
= cpu_to_le16(icid
);
4282 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4285 static void __release_logical_link(struct l2cap_chan
*chan
)
4287 chan
->hs_hchan
= NULL
;
4288 chan
->hs_hcon
= NULL
;
4290 /* Placeholder - release the logical link */
4293 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4295 /* Logical link setup failed */
4296 if (chan
->state
!= BT_CONNECTED
) {
4297 /* Create channel failure, disconnect */
4298 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4302 switch (chan
->move_role
) {
4303 case L2CAP_MOVE_ROLE_RESPONDER
:
4304 l2cap_move_done(chan
);
4305 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4307 case L2CAP_MOVE_ROLE_INITIATOR
:
4308 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4309 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4310 /* Remote has only sent pending or
4311 * success responses, clean up
4313 l2cap_move_done(chan
);
4316 /* Other amp move states imply that the move
4317 * has already aborted
4319 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4324 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4325 struct hci_chan
*hchan
)
4327 struct l2cap_conf_rsp rsp
;
4330 chan
->hs_hcon
= hchan
->conn
;
4331 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4333 code
= l2cap_build_conf_rsp(chan
, &rsp
,
4334 L2CAP_CONF_SUCCESS
, 0);
4335 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CONF_RSP
, code
,
4337 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
4339 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4342 set_default_fcs(chan
);
4344 err
= l2cap_ertm_init(chan
);
4346 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4348 l2cap_chan_ready(chan
);
4352 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4353 struct hci_chan
*hchan
)
4355 chan
->hs_hcon
= hchan
->conn
;
4356 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4358 BT_DBG("move_state %d", chan
->move_state
);
4360 switch (chan
->move_state
) {
4361 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4362 /* Move confirm will be sent after a success
4363 * response is received
4365 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4367 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4368 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4369 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4370 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4371 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4372 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4373 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4374 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4375 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4379 /* Move was not in expected state, free the channel */
4380 __release_logical_link(chan
);
4382 chan
->move_state
= L2CAP_MOVE_STABLE
;
4386 /* Call with chan locked */
4387 static void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4390 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4393 l2cap_logical_fail(chan
);
4394 __release_logical_link(chan
);
4398 if (chan
->state
!= BT_CONNECTED
) {
4399 /* Ignore logical link if channel is on BR/EDR */
4400 if (chan
->local_amp_id
)
4401 l2cap_logical_finish_create(chan
, hchan
);
4403 l2cap_logical_finish_move(chan
, hchan
);
4407 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4408 u8 local_amp_id
, u8 remote_amp_id
)
4410 if (!test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4411 struct l2cap_conn_rsp rsp
;
4413 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4414 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4416 /* Incoming channel on AMP */
4417 if (result
== L2CAP_CR_SUCCESS
) {
4418 /* Send successful response */
4419 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
4420 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4422 /* Send negative response */
4423 rsp
.result
= cpu_to_le16(L2CAP_CR_NO_MEM
);
4424 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4427 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4430 if (result
== L2CAP_CR_SUCCESS
) {
4431 __l2cap_state_change(chan
, BT_CONFIG
);
4432 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4433 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4435 l2cap_build_conf_req(chan
, buf
), buf
);
4436 chan
->num_conf_req
++;
4439 /* Outgoing channel on AMP */
4440 if (result
== L2CAP_CR_SUCCESS
) {
4441 chan
->local_amp_id
= local_amp_id
;
4442 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4444 /* Revert to BR/EDR connect */
4445 l2cap_send_conn_req(chan
);
4450 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4453 l2cap_move_setup(chan
);
4454 chan
->move_id
= local_amp_id
;
4455 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4457 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4460 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4462 struct hci_chan
*hchan
= NULL
;
4464 /* Placeholder - get hci_chan for logical link */
4467 if (hchan
->state
== BT_CONNECTED
) {
4468 /* Logical link is ready to go */
4469 chan
->hs_hcon
= hchan
->conn
;
4470 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4471 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4472 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4474 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4476 /* Wait for logical link to be ready */
4477 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4480 /* Logical link not available */
4481 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4485 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4487 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4489 if (result
== -EINVAL
)
4490 rsp_result
= L2CAP_MR_BAD_ID
;
4492 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4494 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4497 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4498 chan
->move_state
= L2CAP_MOVE_STABLE
;
4500 /* Restart data transmission */
4501 l2cap_ertm_send(chan
);
4504 void l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
, u8 local_amp_id
,
4507 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4508 chan
, result
, local_amp_id
, remote_amp_id
);
4510 l2cap_chan_lock(chan
);
4512 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4513 l2cap_chan_unlock(chan
);
4517 if (chan
->state
!= BT_CONNECTED
) {
4518 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4519 } else if (result
!= L2CAP_MR_SUCCESS
) {
4520 l2cap_do_move_cancel(chan
, result
);
4522 switch (chan
->move_role
) {
4523 case L2CAP_MOVE_ROLE_INITIATOR
:
4524 l2cap_do_move_initiate(chan
, local_amp_id
,
4527 case L2CAP_MOVE_ROLE_RESPONDER
:
4528 l2cap_do_move_respond(chan
, result
);
4531 l2cap_do_move_cancel(chan
, result
);
4536 l2cap_chan_unlock(chan
);
4539 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4540 struct l2cap_cmd_hdr
*cmd
,
4541 u16 cmd_len
, void *data
)
4543 struct l2cap_move_chan_req
*req
= data
;
4544 struct l2cap_move_chan_rsp rsp
;
4545 struct l2cap_chan
*chan
;
4547 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4549 if (cmd_len
!= sizeof(*req
))
4552 icid
= le16_to_cpu(req
->icid
);
4554 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4559 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4561 rsp
.icid
= cpu_to_le16(icid
);
4562 rsp
.result
= __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4563 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4568 chan
->ident
= cmd
->ident
;
4570 if (chan
->scid
< L2CAP_CID_DYN_START
||
4571 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4572 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4573 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4574 result
= L2CAP_MR_NOT_ALLOWED
;
4575 goto send_move_response
;
4578 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4579 result
= L2CAP_MR_SAME_ID
;
4580 goto send_move_response
;
4583 if (req
->dest_amp_id
) {
4584 struct hci_dev
*hdev
;
4585 hdev
= hci_dev_get(req
->dest_amp_id
);
4586 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4587 !test_bit(HCI_UP
, &hdev
->flags
)) {
4591 result
= L2CAP_MR_BAD_ID
;
4592 goto send_move_response
;
4597 /* Detect a move collision. Only send a collision response
4598 * if this side has "lost", otherwise proceed with the move.
4599 * The winner has the larger bd_addr.
4601 if ((__chan_is_moving(chan
) ||
4602 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4603 bacmp(conn
->src
, conn
->dst
) > 0) {
4604 result
= L2CAP_MR_COLLISION
;
4605 goto send_move_response
;
4608 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4609 l2cap_move_setup(chan
);
4610 chan
->move_id
= req
->dest_amp_id
;
4613 if (!req
->dest_amp_id
) {
4614 /* Moving to BR/EDR */
4615 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4616 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4617 result
= L2CAP_MR_PEND
;
4619 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4620 result
= L2CAP_MR_SUCCESS
;
4623 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4624 /* Placeholder - uncomment when amp functions are available */
4625 /*amp_accept_physical(chan, req->dest_amp_id);*/
4626 result
= L2CAP_MR_PEND
;
4630 l2cap_send_move_chan_rsp(chan
, result
);
4632 l2cap_chan_unlock(chan
);
4637 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4639 struct l2cap_chan
*chan
;
4640 struct hci_chan
*hchan
= NULL
;
4642 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4644 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4648 __clear_chan_timer(chan
);
4649 if (result
== L2CAP_MR_PEND
)
4650 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4652 switch (chan
->move_state
) {
4653 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4654 /* Move confirm will be sent when logical link
4657 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4659 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4660 if (result
== L2CAP_MR_PEND
) {
4662 } else if (test_bit(CONN_LOCAL_BUSY
,
4663 &chan
->conn_state
)) {
4664 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4666 /* Logical link is up or moving to BR/EDR,
4669 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4670 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4673 case L2CAP_MOVE_WAIT_RSP
:
4675 if (result
== L2CAP_MR_SUCCESS
) {
4676 /* Remote is ready, send confirm immediately
4677 * after logical link is ready
4679 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4681 /* Both logical link and move success
4682 * are required to confirm
4684 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
4687 /* Placeholder - get hci_chan for logical link */
4689 /* Logical link not available */
4690 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4694 /* If the logical link is not yet connected, do not
4695 * send confirmation.
4697 if (hchan
->state
!= BT_CONNECTED
)
4700 /* Logical link is already ready to go */
4702 chan
->hs_hcon
= hchan
->conn
;
4703 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4705 if (result
== L2CAP_MR_SUCCESS
) {
4706 /* Can confirm now */
4707 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4709 /* Now only need move success
4712 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4715 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4718 /* Any other amp move state means the move failed. */
4719 chan
->move_id
= chan
->local_amp_id
;
4720 l2cap_move_done(chan
);
4721 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4724 l2cap_chan_unlock(chan
);
4727 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
4730 struct l2cap_chan
*chan
;
4732 chan
= l2cap_get_chan_by_ident(conn
, ident
);
4734 /* Could not locate channel, icid is best guess */
4735 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4739 __clear_chan_timer(chan
);
4741 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4742 if (result
== L2CAP_MR_COLLISION
) {
4743 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4745 /* Cleanup - cancel move */
4746 chan
->move_id
= chan
->local_amp_id
;
4747 l2cap_move_done(chan
);
4751 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4753 l2cap_chan_unlock(chan
);
4756 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
4757 struct l2cap_cmd_hdr
*cmd
,
4758 u16 cmd_len
, void *data
)
4760 struct l2cap_move_chan_rsp
*rsp
= data
;
4763 if (cmd_len
!= sizeof(*rsp
))
4766 icid
= le16_to_cpu(rsp
->icid
);
4767 result
= le16_to_cpu(rsp
->result
);
4769 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4771 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
4772 l2cap_move_continue(conn
, icid
, result
);
4774 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
4779 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
4780 struct l2cap_cmd_hdr
*cmd
,
4781 u16 cmd_len
, void *data
)
4783 struct l2cap_move_chan_cfm
*cfm
= data
;
4784 struct l2cap_chan
*chan
;
4787 if (cmd_len
!= sizeof(*cfm
))
4790 icid
= le16_to_cpu(cfm
->icid
);
4791 result
= le16_to_cpu(cfm
->result
);
4793 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
4795 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4797 /* Spec requires a response even if the icid was not found */
4798 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4802 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
4803 if (result
== L2CAP_MC_CONFIRMED
) {
4804 chan
->local_amp_id
= chan
->move_id
;
4805 if (!chan
->local_amp_id
)
4806 __release_logical_link(chan
);
4808 chan
->move_id
= chan
->local_amp_id
;
4811 l2cap_move_done(chan
);
4814 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
4816 l2cap_chan_unlock(chan
);
4821 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
4822 struct l2cap_cmd_hdr
*cmd
,
4823 u16 cmd_len
, void *data
)
4825 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
4826 struct l2cap_chan
*chan
;
4829 if (cmd_len
!= sizeof(*rsp
))
4832 icid
= le16_to_cpu(rsp
->icid
);
4834 BT_DBG("icid 0x%4.4x", icid
);
4836 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4840 __clear_chan_timer(chan
);
4842 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
4843 chan
->local_amp_id
= chan
->move_id
;
4845 if (!chan
->local_amp_id
&& chan
->hs_hchan
)
4846 __release_logical_link(chan
);
4848 l2cap_move_done(chan
);
4851 l2cap_chan_unlock(chan
);
4856 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
4861 if (min
> max
|| min
< 6 || max
> 3200)
4864 if (to_multiplier
< 10 || to_multiplier
> 3200)
4867 if (max
>= to_multiplier
* 8)
4870 max_latency
= (to_multiplier
* 8 / max
) - 1;
4871 if (latency
> 499 || latency
> max_latency
)
4877 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
4878 struct l2cap_cmd_hdr
*cmd
,
4881 struct hci_conn
*hcon
= conn
->hcon
;
4882 struct l2cap_conn_param_update_req
*req
;
4883 struct l2cap_conn_param_update_rsp rsp
;
4884 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
4887 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
4890 cmd_len
= __le16_to_cpu(cmd
->len
);
4891 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
4894 req
= (struct l2cap_conn_param_update_req
*) data
;
4895 min
= __le16_to_cpu(req
->min
);
4896 max
= __le16_to_cpu(req
->max
);
4897 latency
= __le16_to_cpu(req
->latency
);
4898 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
4900 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
4901 min
, max
, latency
, to_multiplier
);
4903 memset(&rsp
, 0, sizeof(rsp
));
4905 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
4907 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
4909 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
4911 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
4915 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
4920 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
4921 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4926 switch (cmd
->code
) {
4927 case L2CAP_COMMAND_REJ
:
4928 l2cap_command_rej(conn
, cmd
, data
);
4931 case L2CAP_CONN_REQ
:
4932 err
= l2cap_connect_req(conn
, cmd
, data
);
4935 case L2CAP_CONN_RSP
:
4936 case L2CAP_CREATE_CHAN_RSP
:
4937 err
= l2cap_connect_create_rsp(conn
, cmd
, data
);
4940 case L2CAP_CONF_REQ
:
4941 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
4944 case L2CAP_CONF_RSP
:
4945 err
= l2cap_config_rsp(conn
, cmd
, data
);
4948 case L2CAP_DISCONN_REQ
:
4949 err
= l2cap_disconnect_req(conn
, cmd
, data
);
4952 case L2CAP_DISCONN_RSP
:
4953 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
4956 case L2CAP_ECHO_REQ
:
4957 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
4960 case L2CAP_ECHO_RSP
:
4963 case L2CAP_INFO_REQ
:
4964 err
= l2cap_information_req(conn
, cmd
, data
);
4967 case L2CAP_INFO_RSP
:
4968 err
= l2cap_information_rsp(conn
, cmd
, data
);
4971 case L2CAP_CREATE_CHAN_REQ
:
4972 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
4975 case L2CAP_MOVE_CHAN_REQ
:
4976 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
4979 case L2CAP_MOVE_CHAN_RSP
:
4980 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
4983 case L2CAP_MOVE_CHAN_CFM
:
4984 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
4987 case L2CAP_MOVE_CHAN_CFM_RSP
:
4988 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
4992 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5000 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5001 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
5003 switch (cmd
->code
) {
5004 case L2CAP_COMMAND_REJ
:
5007 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5008 return l2cap_conn_param_update_req(conn
, cmd
, data
);
5010 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5014 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5019 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5020 struct sk_buff
*skb
)
5022 u8
*data
= skb
->data
;
5024 struct l2cap_cmd_hdr cmd
;
5027 l2cap_raw_recv(conn
, skb
);
5029 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5031 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5032 data
+= L2CAP_CMD_HDR_SIZE
;
5033 len
-= L2CAP_CMD_HDR_SIZE
;
5035 cmd_len
= le16_to_cpu(cmd
.len
);
5037 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5040 if (cmd_len
> len
|| !cmd
.ident
) {
5041 BT_DBG("corrupted command");
5045 if (conn
->hcon
->type
== LE_LINK
)
5046 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
5048 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5051 struct l2cap_cmd_rej_unk rej
;
5053 BT_ERR("Wrong link type (%d)", err
);
5055 /* FIXME: Map err to a valid reason */
5056 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5057 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5068 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5070 u16 our_fcs
, rcv_fcs
;
5073 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5074 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5076 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5078 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5079 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5080 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5081 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5083 if (our_fcs
!= rcv_fcs
)
5089 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5091 struct l2cap_ctrl control
;
5093 BT_DBG("chan %p", chan
);
5095 memset(&control
, 0, sizeof(control
));
5098 control
.reqseq
= chan
->buffer_seq
;
5099 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5101 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5102 control
.super
= L2CAP_SUPER_RNR
;
5103 l2cap_send_sframe(chan
, &control
);
5106 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5107 chan
->unacked_frames
> 0)
5108 __set_retrans_timer(chan
);
5110 /* Send pending iframes */
5111 l2cap_ertm_send(chan
);
5113 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5114 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5115 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5118 control
.super
= L2CAP_SUPER_RR
;
5119 l2cap_send_sframe(chan
, &control
);
5123 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5124 struct sk_buff
**last_frag
)
5126 /* skb->len reflects data in skb as well as all fragments
5127 * skb->data_len reflects only data in fragments
5129 if (!skb_has_frag_list(skb
))
5130 skb_shinfo(skb
)->frag_list
= new_frag
;
5132 new_frag
->next
= NULL
;
5134 (*last_frag
)->next
= new_frag
;
5135 *last_frag
= new_frag
;
5137 skb
->len
+= new_frag
->len
;
5138 skb
->data_len
+= new_frag
->len
;
5139 skb
->truesize
+= new_frag
->truesize
;
5142 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5143 struct l2cap_ctrl
*control
)
5147 switch (control
->sar
) {
5148 case L2CAP_SAR_UNSEGMENTED
:
5152 err
= chan
->ops
->recv(chan
, skb
);
5155 case L2CAP_SAR_START
:
5159 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5160 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5162 if (chan
->sdu_len
> chan
->imtu
) {
5167 if (skb
->len
>= chan
->sdu_len
)
5171 chan
->sdu_last_frag
= skb
;
5177 case L2CAP_SAR_CONTINUE
:
5181 append_skb_frag(chan
->sdu
, skb
,
5182 &chan
->sdu_last_frag
);
5185 if (chan
->sdu
->len
>= chan
->sdu_len
)
5195 append_skb_frag(chan
->sdu
, skb
,
5196 &chan
->sdu_last_frag
);
5199 if (chan
->sdu
->len
!= chan
->sdu_len
)
5202 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5205 /* Reassembly complete */
5207 chan
->sdu_last_frag
= NULL
;
5215 kfree_skb(chan
->sdu
);
5217 chan
->sdu_last_frag
= NULL
;
5224 static int l2cap_resegment(struct l2cap_chan
*chan
)
5230 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5234 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5237 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5238 l2cap_tx(chan
, NULL
, NULL
, event
);
5241 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5244 /* Pass sequential frames to l2cap_reassemble_sdu()
5245 * until a gap is encountered.
5248 BT_DBG("chan %p", chan
);
5250 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5251 struct sk_buff
*skb
;
5252 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5253 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5255 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5260 skb_unlink(skb
, &chan
->srej_q
);
5261 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5262 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5267 if (skb_queue_empty(&chan
->srej_q
)) {
5268 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5269 l2cap_send_ack(chan
);
5275 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5276 struct l2cap_ctrl
*control
)
5278 struct sk_buff
*skb
;
5280 BT_DBG("chan %p, control %p", chan
, control
);
5282 if (control
->reqseq
== chan
->next_tx_seq
) {
5283 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5284 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5288 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5291 BT_DBG("Seq %d not available for retransmission",
5296 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5297 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5298 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5302 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5304 if (control
->poll
) {
5305 l2cap_pass_to_tx(chan
, control
);
5307 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5308 l2cap_retransmit(chan
, control
);
5309 l2cap_ertm_send(chan
);
5311 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5312 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5313 chan
->srej_save_reqseq
= control
->reqseq
;
5316 l2cap_pass_to_tx_fbit(chan
, control
);
5318 if (control
->final
) {
5319 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5320 !test_and_clear_bit(CONN_SREJ_ACT
,
5322 l2cap_retransmit(chan
, control
);
5324 l2cap_retransmit(chan
, control
);
5325 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5326 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5327 chan
->srej_save_reqseq
= control
->reqseq
;
5333 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5334 struct l2cap_ctrl
*control
)
5336 struct sk_buff
*skb
;
5338 BT_DBG("chan %p, control %p", chan
, control
);
5340 if (control
->reqseq
== chan
->next_tx_seq
) {
5341 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5342 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5346 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5348 if (chan
->max_tx
&& skb
&&
5349 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5350 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5351 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5355 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5357 l2cap_pass_to_tx(chan
, control
);
5359 if (control
->final
) {
5360 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5361 l2cap_retransmit_all(chan
, control
);
5363 l2cap_retransmit_all(chan
, control
);
5364 l2cap_ertm_send(chan
);
5365 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5366 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5370 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5372 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5374 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5375 chan
->expected_tx_seq
);
5377 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5378 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5380 /* See notes below regarding "double poll" and
5383 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5384 BT_DBG("Invalid/Ignore - after SREJ");
5385 return L2CAP_TXSEQ_INVALID_IGNORE
;
5387 BT_DBG("Invalid - in window after SREJ sent");
5388 return L2CAP_TXSEQ_INVALID
;
5392 if (chan
->srej_list
.head
== txseq
) {
5393 BT_DBG("Expected SREJ");
5394 return L2CAP_TXSEQ_EXPECTED_SREJ
;
5397 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
5398 BT_DBG("Duplicate SREJ - txseq already stored");
5399 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
5402 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
5403 BT_DBG("Unexpected SREJ - not requested");
5404 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
5408 if (chan
->expected_tx_seq
== txseq
) {
5409 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5411 BT_DBG("Invalid - txseq outside tx window");
5412 return L2CAP_TXSEQ_INVALID
;
5415 return L2CAP_TXSEQ_EXPECTED
;
5419 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
5420 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
5421 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5422 return L2CAP_TXSEQ_DUPLICATE
;
5425 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
5426 /* A source of invalid packets is a "double poll" condition,
5427 * where delays cause us to send multiple poll packets. If
5428 * the remote stack receives and processes both polls,
5429 * sequence numbers can wrap around in such a way that a
5430 * resent frame has a sequence number that looks like new data
5431 * with a sequence gap. This would trigger an erroneous SREJ
5434 * Fortunately, this is impossible with a tx window that's
5435 * less than half of the maximum sequence number, which allows
5436 * invalid frames to be safely ignored.
5438 * With tx window sizes greater than half of the tx window
5439 * maximum, the frame is invalid and cannot be ignored. This
5440 * causes a disconnect.
5443 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5444 BT_DBG("Invalid/Ignore - txseq outside tx window");
5445 return L2CAP_TXSEQ_INVALID_IGNORE
;
5447 BT_DBG("Invalid - txseq outside tx window");
5448 return L2CAP_TXSEQ_INVALID
;
5451 BT_DBG("Unexpected - txseq indicates missing frames");
5452 return L2CAP_TXSEQ_UNEXPECTED
;
5456 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
5457 struct l2cap_ctrl
*control
,
5458 struct sk_buff
*skb
, u8 event
)
5461 bool skb_in_use
= 0;
5463 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5467 case L2CAP_EV_RECV_IFRAME
:
5468 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
5469 case L2CAP_TXSEQ_EXPECTED
:
5470 l2cap_pass_to_tx(chan
, control
);
5472 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5473 BT_DBG("Busy, discarding expected seq %d",
5478 chan
->expected_tx_seq
= __next_seq(chan
,
5481 chan
->buffer_seq
= chan
->expected_tx_seq
;
5484 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
5488 if (control
->final
) {
5489 if (!test_and_clear_bit(CONN_REJ_ACT
,
5490 &chan
->conn_state
)) {
5492 l2cap_retransmit_all(chan
, control
);
5493 l2cap_ertm_send(chan
);
5497 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
5498 l2cap_send_ack(chan
);
5500 case L2CAP_TXSEQ_UNEXPECTED
:
5501 l2cap_pass_to_tx(chan
, control
);
5503 /* Can't issue SREJ frames in the local busy state.
5504 * Drop this frame, it will be seen as missing
5505 * when local busy is exited.
5507 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5508 BT_DBG("Busy, discarding unexpected seq %d",
5513 /* There was a gap in the sequence, so an SREJ
5514 * must be sent for each missing frame. The
5515 * current frame is stored for later use.
5517 skb_queue_tail(&chan
->srej_q
, skb
);
5519 BT_DBG("Queued %p (queue len %d)", skb
,
5520 skb_queue_len(&chan
->srej_q
));
5522 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5523 l2cap_seq_list_clear(&chan
->srej_list
);
5524 l2cap_send_srej(chan
, control
->txseq
);
5526 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
5528 case L2CAP_TXSEQ_DUPLICATE
:
5529 l2cap_pass_to_tx(chan
, control
);
5531 case L2CAP_TXSEQ_INVALID_IGNORE
:
5533 case L2CAP_TXSEQ_INVALID
:
5535 l2cap_send_disconn_req(chan
->conn
, chan
,
5540 case L2CAP_EV_RECV_RR
:
5541 l2cap_pass_to_tx(chan
, control
);
5542 if (control
->final
) {
5543 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5545 if (!test_and_clear_bit(CONN_REJ_ACT
,
5546 &chan
->conn_state
)) {
5548 l2cap_retransmit_all(chan
, control
);
5551 l2cap_ertm_send(chan
);
5552 } else if (control
->poll
) {
5553 l2cap_send_i_or_rr_or_rnr(chan
);
5555 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5556 &chan
->conn_state
) &&
5557 chan
->unacked_frames
)
5558 __set_retrans_timer(chan
);
5560 l2cap_ertm_send(chan
);
5563 case L2CAP_EV_RECV_RNR
:
5564 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5565 l2cap_pass_to_tx(chan
, control
);
5566 if (control
&& control
->poll
) {
5567 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5568 l2cap_send_rr_or_rnr(chan
, 0);
5570 __clear_retrans_timer(chan
);
5571 l2cap_seq_list_clear(&chan
->retrans_list
);
5573 case L2CAP_EV_RECV_REJ
:
5574 l2cap_handle_rej(chan
, control
);
5576 case L2CAP_EV_RECV_SREJ
:
5577 l2cap_handle_srej(chan
, control
);
5583 if (skb
&& !skb_in_use
) {
5584 BT_DBG("Freeing %p", skb
);
5591 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
5592 struct l2cap_ctrl
*control
,
5593 struct sk_buff
*skb
, u8 event
)
5596 u16 txseq
= control
->txseq
;
5597 bool skb_in_use
= 0;
5599 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5603 case L2CAP_EV_RECV_IFRAME
:
5604 switch (l2cap_classify_txseq(chan
, txseq
)) {
5605 case L2CAP_TXSEQ_EXPECTED
:
5606 /* Keep frame for reassembly later */
5607 l2cap_pass_to_tx(chan
, control
);
5608 skb_queue_tail(&chan
->srej_q
, skb
);
5610 BT_DBG("Queued %p (queue len %d)", skb
,
5611 skb_queue_len(&chan
->srej_q
));
5613 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
5615 case L2CAP_TXSEQ_EXPECTED_SREJ
:
5616 l2cap_seq_list_pop(&chan
->srej_list
);
5618 l2cap_pass_to_tx(chan
, control
);
5619 skb_queue_tail(&chan
->srej_q
, skb
);
5621 BT_DBG("Queued %p (queue len %d)", skb
,
5622 skb_queue_len(&chan
->srej_q
));
5624 err
= l2cap_rx_queued_iframes(chan
);
5629 case L2CAP_TXSEQ_UNEXPECTED
:
5630 /* Got a frame that can't be reassembled yet.
5631 * Save it for later, and send SREJs to cover
5632 * the missing frames.
5634 skb_queue_tail(&chan
->srej_q
, skb
);
5636 BT_DBG("Queued %p (queue len %d)", skb
,
5637 skb_queue_len(&chan
->srej_q
));
5639 l2cap_pass_to_tx(chan
, control
);
5640 l2cap_send_srej(chan
, control
->txseq
);
5642 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
5643 /* This frame was requested with an SREJ, but
5644 * some expected retransmitted frames are
5645 * missing. Request retransmission of missing
5648 skb_queue_tail(&chan
->srej_q
, skb
);
5650 BT_DBG("Queued %p (queue len %d)", skb
,
5651 skb_queue_len(&chan
->srej_q
));
5653 l2cap_pass_to_tx(chan
, control
);
5654 l2cap_send_srej_list(chan
, control
->txseq
);
5656 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
5657 /* We've already queued this frame. Drop this copy. */
5658 l2cap_pass_to_tx(chan
, control
);
5660 case L2CAP_TXSEQ_DUPLICATE
:
5661 /* Expecting a later sequence number, so this frame
5662 * was already received. Ignore it completely.
5665 case L2CAP_TXSEQ_INVALID_IGNORE
:
5667 case L2CAP_TXSEQ_INVALID
:
5669 l2cap_send_disconn_req(chan
->conn
, chan
,
5674 case L2CAP_EV_RECV_RR
:
5675 l2cap_pass_to_tx(chan
, control
);
5676 if (control
->final
) {
5677 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5679 if (!test_and_clear_bit(CONN_REJ_ACT
,
5680 &chan
->conn_state
)) {
5682 l2cap_retransmit_all(chan
, control
);
5685 l2cap_ertm_send(chan
);
5686 } else if (control
->poll
) {
5687 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5688 &chan
->conn_state
) &&
5689 chan
->unacked_frames
) {
5690 __set_retrans_timer(chan
);
5693 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5694 l2cap_send_srej_tail(chan
);
5696 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5697 &chan
->conn_state
) &&
5698 chan
->unacked_frames
)
5699 __set_retrans_timer(chan
);
5701 l2cap_send_ack(chan
);
5704 case L2CAP_EV_RECV_RNR
:
5705 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5706 l2cap_pass_to_tx(chan
, control
);
5707 if (control
->poll
) {
5708 l2cap_send_srej_tail(chan
);
5710 struct l2cap_ctrl rr_control
;
5711 memset(&rr_control
, 0, sizeof(rr_control
));
5712 rr_control
.sframe
= 1;
5713 rr_control
.super
= L2CAP_SUPER_RR
;
5714 rr_control
.reqseq
= chan
->buffer_seq
;
5715 l2cap_send_sframe(chan
, &rr_control
);
5719 case L2CAP_EV_RECV_REJ
:
5720 l2cap_handle_rej(chan
, control
);
5722 case L2CAP_EV_RECV_SREJ
:
5723 l2cap_handle_srej(chan
, control
);
5727 if (skb
&& !skb_in_use
) {
5728 BT_DBG("Freeing %p", skb
);
5735 static int l2cap_finish_move(struct l2cap_chan
*chan
)
5737 BT_DBG("chan %p", chan
);
5739 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5742 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
5744 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
5746 return l2cap_resegment(chan
);
5749 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
5750 struct l2cap_ctrl
*control
,
5751 struct sk_buff
*skb
, u8 event
)
5755 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5761 l2cap_process_reqseq(chan
, control
->reqseq
);
5763 if (!skb_queue_empty(&chan
->tx_q
))
5764 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
5766 chan
->tx_send_head
= NULL
;
5768 /* Rewind next_tx_seq to the point expected
5771 chan
->next_tx_seq
= control
->reqseq
;
5772 chan
->unacked_frames
= 0;
5774 err
= l2cap_finish_move(chan
);
5778 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5779 l2cap_send_i_or_rr_or_rnr(chan
);
5781 if (event
== L2CAP_EV_RECV_IFRAME
)
5784 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
5787 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
5788 struct l2cap_ctrl
*control
,
5789 struct sk_buff
*skb
, u8 event
)
5793 if (!control
->final
)
5796 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5798 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5799 l2cap_process_reqseq(chan
, control
->reqseq
);
5801 if (!skb_queue_empty(&chan
->tx_q
))
5802 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
5804 chan
->tx_send_head
= NULL
;
5806 /* Rewind next_tx_seq to the point expected
5809 chan
->next_tx_seq
= control
->reqseq
;
5810 chan
->unacked_frames
= 0;
5813 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
5815 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
5817 err
= l2cap_resegment(chan
);
5820 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5825 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
5827 /* Make sure reqseq is for a packet that has been sent but not acked */
5830 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
5831 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
5834 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5835 struct sk_buff
*skb
, u8 event
)
5839 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
5840 control
, skb
, event
, chan
->rx_state
);
5842 if (__valid_reqseq(chan
, control
->reqseq
)) {
5843 switch (chan
->rx_state
) {
5844 case L2CAP_RX_STATE_RECV
:
5845 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
5847 case L2CAP_RX_STATE_SREJ_SENT
:
5848 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
5851 case L2CAP_RX_STATE_WAIT_P
:
5852 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
5854 case L2CAP_RX_STATE_WAIT_F
:
5855 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
5862 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
5863 control
->reqseq
, chan
->next_tx_seq
,
5864 chan
->expected_ack_seq
);
5865 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5871 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
5872 struct sk_buff
*skb
)
5876 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
5879 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
5880 L2CAP_TXSEQ_EXPECTED
) {
5881 l2cap_pass_to_tx(chan
, control
);
5883 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
5884 __next_seq(chan
, chan
->buffer_seq
));
5886 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5888 l2cap_reassemble_sdu(chan
, skb
, control
);
5891 kfree_skb(chan
->sdu
);
5894 chan
->sdu_last_frag
= NULL
;
5898 BT_DBG("Freeing %p", skb
);
5903 chan
->last_acked_seq
= control
->txseq
;
5904 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
5909 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5911 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
5915 __unpack_control(chan
, skb
);
5920 * We can just drop the corrupted I-frame here.
5921 * Receiver will miss it and start proper recovery
5922 * procedures and ask for retransmission.
5924 if (l2cap_check_fcs(chan
, skb
))
5927 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
5928 len
-= L2CAP_SDULEN_SIZE
;
5930 if (chan
->fcs
== L2CAP_FCS_CRC16
)
5931 len
-= L2CAP_FCS_SIZE
;
5933 if (len
> chan
->mps
) {
5934 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5938 if (!control
->sframe
) {
5941 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
5942 control
->sar
, control
->reqseq
, control
->final
,
5945 /* Validate F-bit - F=0 always valid, F=1 only
5946 * valid in TX WAIT_F
5948 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
5951 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
5952 event
= L2CAP_EV_RECV_IFRAME
;
5953 err
= l2cap_rx(chan
, control
, skb
, event
);
5955 err
= l2cap_stream_rx(chan
, control
, skb
);
5959 l2cap_send_disconn_req(chan
->conn
, chan
,
5962 const u8 rx_func_to_event
[4] = {
5963 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
5964 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
5967 /* Only I-frames are expected in streaming mode */
5968 if (chan
->mode
== L2CAP_MODE_STREAMING
)
5971 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
5972 control
->reqseq
, control
->final
, control
->poll
,
5977 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5981 /* Validate F and P bits */
5982 if (control
->final
&& (control
->poll
||
5983 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
5986 event
= rx_func_to_event
[control
->super
];
5987 if (l2cap_rx(chan
, control
, skb
, event
))
5988 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
5998 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
5999 struct sk_buff
*skb
)
6001 struct l2cap_chan
*chan
;
6003 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6005 if (cid
== L2CAP_CID_A2MP
) {
6006 chan
= a2mp_channel_create(conn
, skb
);
6012 l2cap_chan_lock(chan
);
6014 BT_DBG("unknown cid 0x%4.4x", cid
);
6015 /* Drop packet and return */
6021 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6023 if (chan
->state
!= BT_CONNECTED
)
6026 switch (chan
->mode
) {
6027 case L2CAP_MODE_BASIC
:
6028 /* If socket recv buffers overflows we drop data here
6029 * which is *bad* because L2CAP has to be reliable.
6030 * But we don't have any other choice. L2CAP doesn't
6031 * provide flow control mechanism. */
6033 if (chan
->imtu
< skb
->len
)
6036 if (!chan
->ops
->recv(chan
, skb
))
6040 case L2CAP_MODE_ERTM
:
6041 case L2CAP_MODE_STREAMING
:
6042 l2cap_data_rcv(chan
, skb
);
6046 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6054 l2cap_chan_unlock(chan
);
6057 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6058 struct sk_buff
*skb
)
6060 struct l2cap_chan
*chan
;
6062 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
6066 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6068 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6071 if (chan
->imtu
< skb
->len
)
6074 if (!chan
->ops
->recv(chan
, skb
))
6081 static void l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
6082 struct sk_buff
*skb
)
6084 struct l2cap_chan
*chan
;
6086 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
6090 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6092 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6095 if (chan
->imtu
< skb
->len
)
6098 if (!chan
->ops
->recv(chan
, skb
))
6105 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6107 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6111 skb_pull(skb
, L2CAP_HDR_SIZE
);
6112 cid
= __le16_to_cpu(lh
->cid
);
6113 len
= __le16_to_cpu(lh
->len
);
6115 if (len
!= skb
->len
) {
6120 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6123 case L2CAP_CID_LE_SIGNALING
:
6124 case L2CAP_CID_SIGNALING
:
6125 l2cap_sig_channel(conn
, skb
);
6128 case L2CAP_CID_CONN_LESS
:
6129 psm
= get_unaligned((__le16
*) skb
->data
);
6130 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6131 l2cap_conless_channel(conn
, psm
, skb
);
6134 case L2CAP_CID_LE_DATA
:
6135 l2cap_att_channel(conn
, cid
, skb
);
6139 if (smp_sig_channel(conn
, skb
))
6140 l2cap_conn_del(conn
->hcon
, EACCES
);
6144 l2cap_data_channel(conn
, cid
, skb
);
6149 /* ---- L2CAP interface with lower layer (HCI) ---- */
6151 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
6153 int exact
= 0, lm1
= 0, lm2
= 0;
6154 struct l2cap_chan
*c
;
6156 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
6158 /* Find listening sockets and check their link_mode */
6159 read_lock(&chan_list_lock
);
6160 list_for_each_entry(c
, &chan_list
, global_l
) {
6161 struct sock
*sk
= c
->sk
;
6163 if (c
->state
!= BT_LISTEN
)
6166 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
6167 lm1
|= HCI_LM_ACCEPT
;
6168 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6169 lm1
|= HCI_LM_MASTER
;
6171 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
6172 lm2
|= HCI_LM_ACCEPT
;
6173 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6174 lm2
|= HCI_LM_MASTER
;
6177 read_unlock(&chan_list_lock
);
6179 return exact
? lm1
: lm2
;
6182 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
6184 struct l2cap_conn
*conn
;
6186 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
6189 conn
= l2cap_conn_add(hcon
, status
);
6191 l2cap_conn_ready(conn
);
6193 l2cap_conn_del(hcon
, bt_to_errno(status
));
6197 int l2cap_disconn_ind(struct hci_conn
*hcon
)
6199 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6201 BT_DBG("hcon %p", hcon
);
6204 return HCI_ERROR_REMOTE_USER_TERM
;
6205 return conn
->disc_reason
;
6208 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
6210 BT_DBG("hcon %p reason %d", hcon
, reason
);
6212 l2cap_conn_del(hcon
, bt_to_errno(reason
));
6215 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
6217 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
6220 if (encrypt
== 0x00) {
6221 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
6222 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
6223 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
6224 l2cap_chan_close(chan
, ECONNREFUSED
);
6226 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
6227 __clear_chan_timer(chan
);
6231 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
6233 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6234 struct l2cap_chan
*chan
;
6239 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
6241 if (hcon
->type
== LE_LINK
) {
6242 if (!status
&& encrypt
)
6243 smp_distribute_keys(conn
, 0);
6244 cancel_delayed_work(&conn
->security_timer
);
6247 mutex_lock(&conn
->chan_lock
);
6249 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
6250 l2cap_chan_lock(chan
);
6252 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
6253 state_to_string(chan
->state
));
6255 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
6256 l2cap_chan_unlock(chan
);
6260 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
6261 if (!status
&& encrypt
) {
6262 chan
->sec_level
= hcon
->sec_level
;
6263 l2cap_chan_ready(chan
);
6266 l2cap_chan_unlock(chan
);
6270 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
6271 l2cap_chan_unlock(chan
);
6275 if (!status
&& (chan
->state
== BT_CONNECTED
||
6276 chan
->state
== BT_CONFIG
)) {
6277 struct sock
*sk
= chan
->sk
;
6279 clear_bit(BT_SK_SUSPEND
, &bt_sk(sk
)->flags
);
6280 sk
->sk_state_change(sk
);
6282 l2cap_check_encryption(chan
, encrypt
);
6283 l2cap_chan_unlock(chan
);
6287 if (chan
->state
== BT_CONNECT
) {
6289 l2cap_start_connection(chan
);
6291 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6293 } else if (chan
->state
== BT_CONNECT2
) {
6294 struct sock
*sk
= chan
->sk
;
6295 struct l2cap_conn_rsp rsp
;
6301 if (test_bit(BT_SK_DEFER_SETUP
,
6302 &bt_sk(sk
)->flags
)) {
6303 res
= L2CAP_CR_PEND
;
6304 stat
= L2CAP_CS_AUTHOR_PEND
;
6305 chan
->ops
->defer(chan
);
6307 __l2cap_state_change(chan
, BT_CONFIG
);
6308 res
= L2CAP_CR_SUCCESS
;
6309 stat
= L2CAP_CS_NO_INFO
;
6312 __l2cap_state_change(chan
, BT_DISCONN
);
6313 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6314 res
= L2CAP_CR_SEC_BLOCK
;
6315 stat
= L2CAP_CS_NO_INFO
;
6320 rsp
.scid
= cpu_to_le16(chan
->dcid
);
6321 rsp
.dcid
= cpu_to_le16(chan
->scid
);
6322 rsp
.result
= cpu_to_le16(res
);
6323 rsp
.status
= cpu_to_le16(stat
);
6324 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
6327 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
6328 res
== L2CAP_CR_SUCCESS
) {
6330 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
6331 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
6333 l2cap_build_conf_req(chan
, buf
),
6335 chan
->num_conf_req
++;
6339 l2cap_chan_unlock(chan
);
6342 mutex_unlock(&conn
->chan_lock
);
6347 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
6349 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6350 struct l2cap_hdr
*hdr
;
6353 /* For AMP controller do not create l2cap conn */
6354 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
6358 conn
= l2cap_conn_add(hcon
, 0);
6363 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
6367 case ACL_START_NO_FLUSH
:
6370 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
6371 kfree_skb(conn
->rx_skb
);
6372 conn
->rx_skb
= NULL
;
6374 l2cap_conn_unreliable(conn
, ECOMM
);
6377 /* Start fragment always begin with Basic L2CAP header */
6378 if (skb
->len
< L2CAP_HDR_SIZE
) {
6379 BT_ERR("Frame is too short (len %d)", skb
->len
);
6380 l2cap_conn_unreliable(conn
, ECOMM
);
6384 hdr
= (struct l2cap_hdr
*) skb
->data
;
6385 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
6387 if (len
== skb
->len
) {
6388 /* Complete frame received */
6389 l2cap_recv_frame(conn
, skb
);
6393 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
6395 if (skb
->len
> len
) {
6396 BT_ERR("Frame is too long (len %d, expected len %d)",
6398 l2cap_conn_unreliable(conn
, ECOMM
);
6402 /* Allocate skb for the complete frame (with header) */
6403 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
6407 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6409 conn
->rx_len
= len
- skb
->len
;
6413 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
6415 if (!conn
->rx_len
) {
6416 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
6417 l2cap_conn_unreliable(conn
, ECOMM
);
6421 if (skb
->len
> conn
->rx_len
) {
6422 BT_ERR("Fragment is too long (len %d, expected %d)",
6423 skb
->len
, conn
->rx_len
);
6424 kfree_skb(conn
->rx_skb
);
6425 conn
->rx_skb
= NULL
;
6427 l2cap_conn_unreliable(conn
, ECOMM
);
6431 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6433 conn
->rx_len
-= skb
->len
;
6435 if (!conn
->rx_len
) {
6436 /* Complete frame received */
6437 l2cap_recv_frame(conn
, conn
->rx_skb
);
6438 conn
->rx_skb
= NULL
;
6448 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
6450 struct l2cap_chan
*c
;
6452 read_lock(&chan_list_lock
);
6454 list_for_each_entry(c
, &chan_list
, global_l
) {
6455 struct sock
*sk
= c
->sk
;
6457 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6458 &bt_sk(sk
)->src
, &bt_sk(sk
)->dst
,
6459 c
->state
, __le16_to_cpu(c
->psm
),
6460 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
6461 c
->sec_level
, c
->mode
);
6464 read_unlock(&chan_list_lock
);
6469 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
6471 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
6474 static const struct file_operations l2cap_debugfs_fops
= {
6475 .open
= l2cap_debugfs_open
,
6477 .llseek
= seq_lseek
,
6478 .release
= single_release
,
6481 static struct dentry
*l2cap_debugfs
;
6483 int __init
l2cap_init(void)
6487 err
= l2cap_init_sockets();
6492 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
6493 NULL
, &l2cap_debugfs_fops
);
6495 BT_ERR("Failed to create L2CAP debug file");
6501 void l2cap_exit(void)
6503 debugfs_remove(l2cap_debugfs
);
6504 l2cap_cleanup_sockets();
6507 module_param(disable_ertm
, bool, 0644);
6508 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");