2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/debugfs.h>
34 #include <linux/crc16.h>
36 #include <net/bluetooth/bluetooth.h>
37 #include <net/bluetooth/hci_core.h>
38 #include <net/bluetooth/l2cap.h>
46 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
| L2CAP_FEAT_UCD
;
47 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
| L2CAP_FC_CONNLESS
, };
49 static LIST_HEAD(chan_list
);
50 static DEFINE_RWLOCK(chan_list_lock
);
52 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
53 u8 code
, u8 ident
, u16 dlen
, void *data
);
54 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
56 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
57 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
);
59 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
60 struct sk_buff_head
*skbs
, u8 event
);
62 static inline __u8
bdaddr_type(struct hci_conn
*hcon
, __u8 type
)
64 if (hcon
->type
== LE_LINK
) {
65 if (type
== ADDR_LE_DEV_PUBLIC
)
66 return BDADDR_LE_PUBLIC
;
68 return BDADDR_LE_RANDOM
;
74 /* ---- L2CAP channels ---- */
76 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
81 list_for_each_entry(c
, &conn
->chan_l
, list
) {
88 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
93 list_for_each_entry(c
, &conn
->chan_l
, list
) {
100 /* Find channel with given SCID.
101 * Returns locked channel. */
102 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
,
105 struct l2cap_chan
*c
;
107 mutex_lock(&conn
->chan_lock
);
108 c
= __l2cap_get_chan_by_scid(conn
, cid
);
111 mutex_unlock(&conn
->chan_lock
);
116 /* Find channel with given DCID.
117 * Returns locked channel.
119 static struct l2cap_chan
*l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
,
122 struct l2cap_chan
*c
;
124 mutex_lock(&conn
->chan_lock
);
125 c
= __l2cap_get_chan_by_dcid(conn
, cid
);
128 mutex_unlock(&conn
->chan_lock
);
133 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
136 struct l2cap_chan
*c
;
138 list_for_each_entry(c
, &conn
->chan_l
, list
) {
139 if (c
->ident
== ident
)
145 static struct l2cap_chan
*l2cap_get_chan_by_ident(struct l2cap_conn
*conn
,
148 struct l2cap_chan
*c
;
150 mutex_lock(&conn
->chan_lock
);
151 c
= __l2cap_get_chan_by_ident(conn
, ident
);
154 mutex_unlock(&conn
->chan_lock
);
159 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
161 struct l2cap_chan
*c
;
163 list_for_each_entry(c
, &chan_list
, global_l
) {
164 if (c
->sport
== psm
&& !bacmp(&c
->src
, src
))
170 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
174 write_lock(&chan_list_lock
);
176 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
189 for (p
= 0x1001; p
< 0x1100; p
+= 2)
190 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
191 chan
->psm
= cpu_to_le16(p
);
192 chan
->sport
= cpu_to_le16(p
);
199 write_unlock(&chan_list_lock
);
203 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
205 write_lock(&chan_list_lock
);
209 write_unlock(&chan_list_lock
);
214 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
216 u16 cid
= L2CAP_CID_DYN_START
;
218 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
219 if (!__l2cap_get_chan_by_scid(conn
, cid
))
226 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
228 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
229 state_to_string(state
));
232 chan
->ops
->state_change(chan
, state
, 0);
235 static inline void l2cap_state_change_and_error(struct l2cap_chan
*chan
,
239 chan
->ops
->state_change(chan
, chan
->state
, err
);
242 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
244 chan
->ops
->state_change(chan
, chan
->state
, err
);
247 static void __set_retrans_timer(struct l2cap_chan
*chan
)
249 if (!delayed_work_pending(&chan
->monitor_timer
) &&
250 chan
->retrans_timeout
) {
251 l2cap_set_timer(chan
, &chan
->retrans_timer
,
252 msecs_to_jiffies(chan
->retrans_timeout
));
256 static void __set_monitor_timer(struct l2cap_chan
*chan
)
258 __clear_retrans_timer(chan
);
259 if (chan
->monitor_timeout
) {
260 l2cap_set_timer(chan
, &chan
->monitor_timer
,
261 msecs_to_jiffies(chan
->monitor_timeout
));
265 static struct sk_buff
*l2cap_ertm_seq_in_queue(struct sk_buff_head
*head
,
270 skb_queue_walk(head
, skb
) {
271 if (bt_cb(skb
)->control
.txseq
== seq
)
278 /* ---- L2CAP sequence number lists ---- */
280 /* For ERTM, ordered lists of sequence numbers must be tracked for
281 * SREJ requests that are received and for frames that are to be
282 * retransmitted. These seq_list functions implement a singly-linked
283 * list in an array, where membership in the list can also be checked
284 * in constant time. Items can also be added to the tail of the list
285 * and removed from the head in constant time, without further memory
289 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
291 size_t alloc_size
, i
;
293 /* Allocated size is a power of 2 to map sequence numbers
294 * (which may be up to 14 bits) in to a smaller array that is
295 * sized for the negotiated ERTM transmit windows.
297 alloc_size
= roundup_pow_of_two(size
);
299 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
303 seq_list
->mask
= alloc_size
- 1;
304 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
305 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
306 for (i
= 0; i
< alloc_size
; i
++)
307 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
312 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
314 kfree(seq_list
->list
);
317 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
320 /* Constant-time check for list membership */
321 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
324 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
326 u16 mask
= seq_list
->mask
;
328 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
329 /* In case someone tries to pop the head of an empty list */
330 return L2CAP_SEQ_LIST_CLEAR
;
331 } else if (seq_list
->head
== seq
) {
332 /* Head can be removed in constant time */
333 seq_list
->head
= seq_list
->list
[seq
& mask
];
334 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
336 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
337 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
338 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
341 /* Walk the list to find the sequence number */
342 u16 prev
= seq_list
->head
;
343 while (seq_list
->list
[prev
& mask
] != seq
) {
344 prev
= seq_list
->list
[prev
& mask
];
345 if (prev
== L2CAP_SEQ_LIST_TAIL
)
346 return L2CAP_SEQ_LIST_CLEAR
;
349 /* Unlink the number from the list and clear it */
350 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
351 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
352 if (seq_list
->tail
== seq
)
353 seq_list
->tail
= prev
;
358 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
360 /* Remove the head in constant time */
361 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
364 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
368 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
)
371 for (i
= 0; i
<= seq_list
->mask
; i
++)
372 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
374 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
375 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
378 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
380 u16 mask
= seq_list
->mask
;
382 /* All appends happen in constant time */
384 if (seq_list
->list
[seq
& mask
] != L2CAP_SEQ_LIST_CLEAR
)
387 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
388 seq_list
->head
= seq
;
390 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
392 seq_list
->tail
= seq
;
393 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
396 static void l2cap_chan_timeout(struct work_struct
*work
)
398 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
400 struct l2cap_conn
*conn
= chan
->conn
;
403 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
405 mutex_lock(&conn
->chan_lock
);
406 l2cap_chan_lock(chan
);
408 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
409 reason
= ECONNREFUSED
;
410 else if (chan
->state
== BT_CONNECT
&&
411 chan
->sec_level
!= BT_SECURITY_SDP
)
412 reason
= ECONNREFUSED
;
416 l2cap_chan_close(chan
, reason
);
418 l2cap_chan_unlock(chan
);
420 chan
->ops
->close(chan
);
421 mutex_unlock(&conn
->chan_lock
);
423 l2cap_chan_put(chan
);
426 struct l2cap_chan
*l2cap_chan_create(void)
428 struct l2cap_chan
*chan
;
430 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
434 mutex_init(&chan
->lock
);
436 write_lock(&chan_list_lock
);
437 list_add(&chan
->global_l
, &chan_list
);
438 write_unlock(&chan_list_lock
);
440 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
442 chan
->state
= BT_OPEN
;
444 kref_init(&chan
->kref
);
446 /* This flag is cleared in l2cap_chan_ready() */
447 set_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
);
449 BT_DBG("chan %p", chan
);
454 static void l2cap_chan_destroy(struct kref
*kref
)
456 struct l2cap_chan
*chan
= container_of(kref
, struct l2cap_chan
, kref
);
458 BT_DBG("chan %p", chan
);
460 write_lock(&chan_list_lock
);
461 list_del(&chan
->global_l
);
462 write_unlock(&chan_list_lock
);
467 void l2cap_chan_hold(struct l2cap_chan
*c
)
469 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
474 void l2cap_chan_put(struct l2cap_chan
*c
)
476 BT_DBG("chan %p orig refcnt %d", c
, atomic_read(&c
->kref
.refcount
));
478 kref_put(&c
->kref
, l2cap_chan_destroy
);
481 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
483 chan
->fcs
= L2CAP_FCS_CRC16
;
484 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
485 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
486 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
487 chan
->ack_win
= L2CAP_DEFAULT_TX_WINDOW
;
488 chan
->sec_level
= BT_SECURITY_LOW
;
490 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
493 void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
495 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
496 __le16_to_cpu(chan
->psm
), chan
->dcid
);
498 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
502 switch (chan
->chan_type
) {
503 case L2CAP_CHAN_CONN_ORIENTED
:
504 if (conn
->hcon
->type
== LE_LINK
) {
506 chan
->omtu
= L2CAP_DEFAULT_MTU
;
507 if (chan
->dcid
== L2CAP_CID_ATT
)
508 chan
->scid
= L2CAP_CID_ATT
;
510 chan
->scid
= l2cap_alloc_cid(conn
);
512 /* Alloc CID for connection-oriented socket */
513 chan
->scid
= l2cap_alloc_cid(conn
);
514 chan
->omtu
= L2CAP_DEFAULT_MTU
;
518 case L2CAP_CHAN_CONN_LESS
:
519 /* Connectionless socket */
520 chan
->scid
= L2CAP_CID_CONN_LESS
;
521 chan
->dcid
= L2CAP_CID_CONN_LESS
;
522 chan
->omtu
= L2CAP_DEFAULT_MTU
;
525 case L2CAP_CHAN_CONN_FIX_A2MP
:
526 chan
->scid
= L2CAP_CID_A2MP
;
527 chan
->dcid
= L2CAP_CID_A2MP
;
528 chan
->omtu
= L2CAP_A2MP_DEFAULT_MTU
;
529 chan
->imtu
= L2CAP_A2MP_DEFAULT_MTU
;
533 /* Raw socket can send/recv signalling messages only */
534 chan
->scid
= L2CAP_CID_SIGNALING
;
535 chan
->dcid
= L2CAP_CID_SIGNALING
;
536 chan
->omtu
= L2CAP_DEFAULT_MTU
;
539 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
540 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
541 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
542 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
543 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
544 chan
->local_flush_to
= L2CAP_EFS_DEFAULT_FLUSH_TO
;
546 l2cap_chan_hold(chan
);
548 hci_conn_hold(conn
->hcon
);
550 list_add(&chan
->list
, &conn
->chan_l
);
553 void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
555 mutex_lock(&conn
->chan_lock
);
556 __l2cap_chan_add(conn
, chan
);
557 mutex_unlock(&conn
->chan_lock
);
560 void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
562 struct l2cap_conn
*conn
= chan
->conn
;
564 __clear_chan_timer(chan
);
566 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
569 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
570 /* Delete from channel list */
571 list_del(&chan
->list
);
573 l2cap_chan_put(chan
);
577 if (chan
->chan_type
!= L2CAP_CHAN_CONN_FIX_A2MP
)
578 hci_conn_drop(conn
->hcon
);
580 if (mgr
&& mgr
->bredr_chan
== chan
)
581 mgr
->bredr_chan
= NULL
;
584 if (chan
->hs_hchan
) {
585 struct hci_chan
*hs_hchan
= chan
->hs_hchan
;
587 BT_DBG("chan %p disconnect hs_hchan %p", chan
, hs_hchan
);
588 amp_disconnect_logical_link(hs_hchan
);
591 chan
->ops
->teardown(chan
, err
);
593 if (test_bit(CONF_NOT_COMPLETE
, &chan
->conf_state
))
597 case L2CAP_MODE_BASIC
:
600 case L2CAP_MODE_ERTM
:
601 __clear_retrans_timer(chan
);
602 __clear_monitor_timer(chan
);
603 __clear_ack_timer(chan
);
605 skb_queue_purge(&chan
->srej_q
);
607 l2cap_seq_list_free(&chan
->srej_list
);
608 l2cap_seq_list_free(&chan
->retrans_list
);
612 case L2CAP_MODE_STREAMING
:
613 skb_queue_purge(&chan
->tx_q
);
620 static void l2cap_chan_connect_reject(struct l2cap_chan
*chan
)
622 struct l2cap_conn
*conn
= chan
->conn
;
623 struct l2cap_conn_rsp rsp
;
626 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
))
627 result
= L2CAP_CR_SEC_BLOCK
;
629 result
= L2CAP_CR_BAD_PSM
;
631 l2cap_state_change(chan
, BT_DISCONN
);
633 rsp
.scid
= cpu_to_le16(chan
->dcid
);
634 rsp
.dcid
= cpu_to_le16(chan
->scid
);
635 rsp
.result
= cpu_to_le16(result
);
636 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
638 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
641 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
643 struct l2cap_conn
*conn
= chan
->conn
;
645 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
647 switch (chan
->state
) {
649 chan
->ops
->teardown(chan
, 0);
654 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
655 conn
->hcon
->type
== ACL_LINK
) {
656 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
657 l2cap_send_disconn_req(chan
, reason
);
659 l2cap_chan_del(chan
, reason
);
663 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
) {
664 if (conn
->hcon
->type
== ACL_LINK
)
665 l2cap_chan_connect_reject(chan
);
668 l2cap_chan_del(chan
, reason
);
673 l2cap_chan_del(chan
, reason
);
677 chan
->ops
->teardown(chan
, 0);
682 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
684 switch (chan
->chan_type
) {
686 switch (chan
->sec_level
) {
687 case BT_SECURITY_HIGH
:
688 return HCI_AT_DEDICATED_BONDING_MITM
;
689 case BT_SECURITY_MEDIUM
:
690 return HCI_AT_DEDICATED_BONDING
;
692 return HCI_AT_NO_BONDING
;
695 case L2CAP_CHAN_CONN_LESS
:
696 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_3DSP
)) {
697 if (chan
->sec_level
== BT_SECURITY_LOW
)
698 chan
->sec_level
= BT_SECURITY_SDP
;
700 if (chan
->sec_level
== BT_SECURITY_HIGH
)
701 return HCI_AT_NO_BONDING_MITM
;
703 return HCI_AT_NO_BONDING
;
705 case L2CAP_CHAN_CONN_ORIENTED
:
706 if (chan
->psm
== __constant_cpu_to_le16(L2CAP_PSM_SDP
)) {
707 if (chan
->sec_level
== BT_SECURITY_LOW
)
708 chan
->sec_level
= BT_SECURITY_SDP
;
710 if (chan
->sec_level
== BT_SECURITY_HIGH
)
711 return HCI_AT_NO_BONDING_MITM
;
713 return HCI_AT_NO_BONDING
;
717 switch (chan
->sec_level
) {
718 case BT_SECURITY_HIGH
:
719 return HCI_AT_GENERAL_BONDING_MITM
;
720 case BT_SECURITY_MEDIUM
:
721 return HCI_AT_GENERAL_BONDING
;
723 return HCI_AT_NO_BONDING
;
729 /* Service level security */
730 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
732 struct l2cap_conn
*conn
= chan
->conn
;
735 if (conn
->hcon
->type
== LE_LINK
)
736 return smp_conn_security(conn
->hcon
, chan
->sec_level
);
738 auth_type
= l2cap_get_auth_type(chan
);
740 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
743 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
747 /* Get next available identificator.
748 * 1 - 128 are used by kernel.
749 * 129 - 199 are reserved.
750 * 200 - 254 are used by utilities like l2ping, etc.
753 spin_lock(&conn
->lock
);
755 if (++conn
->tx_ident
> 128)
760 spin_unlock(&conn
->lock
);
765 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
768 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
771 BT_DBG("code 0x%2.2x", code
);
776 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
777 flags
= ACL_START_NO_FLUSH
;
781 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
782 skb
->priority
= HCI_PRIO_MAX
;
784 hci_send_acl(conn
->hchan
, skb
, flags
);
787 static bool __chan_is_moving(struct l2cap_chan
*chan
)
789 return chan
->move_state
!= L2CAP_MOVE_STABLE
&&
790 chan
->move_state
!= L2CAP_MOVE_WAIT_PREPARE
;
793 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
795 struct hci_conn
*hcon
= chan
->conn
->hcon
;
798 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
801 if (chan
->hs_hcon
&& !__chan_is_moving(chan
)) {
803 hci_send_acl(chan
->hs_hchan
, skb
, ACL_COMPLETE
);
810 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
811 lmp_no_flush_capable(hcon
->hdev
))
812 flags
= ACL_START_NO_FLUSH
;
816 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
817 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
820 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
822 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
823 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
825 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
828 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
829 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
836 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
837 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
844 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
846 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
847 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
849 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
852 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
853 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
860 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
861 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
868 static inline void __unpack_control(struct l2cap_chan
*chan
,
871 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
872 __unpack_extended_control(get_unaligned_le32(skb
->data
),
873 &bt_cb(skb
)->control
);
874 skb_pull(skb
, L2CAP_EXT_CTRL_SIZE
);
876 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
877 &bt_cb(skb
)->control
);
878 skb_pull(skb
, L2CAP_ENH_CTRL_SIZE
);
882 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
886 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
887 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
889 if (control
->sframe
) {
890 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
891 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
892 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
894 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
895 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
901 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
905 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
906 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
908 if (control
->sframe
) {
909 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
910 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
911 packed
|= L2CAP_CTRL_FRAME_TYPE
;
913 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
914 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
920 static inline void __pack_control(struct l2cap_chan
*chan
,
921 struct l2cap_ctrl
*control
,
924 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
925 put_unaligned_le32(__pack_extended_control(control
),
926 skb
->data
+ L2CAP_HDR_SIZE
);
928 put_unaligned_le16(__pack_enhanced_control(control
),
929 skb
->data
+ L2CAP_HDR_SIZE
);
933 static inline unsigned int __ertm_hdr_size(struct l2cap_chan
*chan
)
935 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
936 return L2CAP_EXT_HDR_SIZE
;
938 return L2CAP_ENH_HDR_SIZE
;
941 static struct sk_buff
*l2cap_create_sframe_pdu(struct l2cap_chan
*chan
,
945 struct l2cap_hdr
*lh
;
946 int hlen
= __ertm_hdr_size(chan
);
948 if (chan
->fcs
== L2CAP_FCS_CRC16
)
949 hlen
+= L2CAP_FCS_SIZE
;
951 skb
= bt_skb_alloc(hlen
, GFP_KERNEL
);
954 return ERR_PTR(-ENOMEM
);
956 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
957 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
958 lh
->cid
= cpu_to_le16(chan
->dcid
);
960 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
961 put_unaligned_le32(control
, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
963 put_unaligned_le16(control
, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
965 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
966 u16 fcs
= crc16(0, (u8
*)skb
->data
, skb
->len
);
967 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
970 skb
->priority
= HCI_PRIO_MAX
;
974 static void l2cap_send_sframe(struct l2cap_chan
*chan
,
975 struct l2cap_ctrl
*control
)
980 BT_DBG("chan %p, control %p", chan
, control
);
982 if (!control
->sframe
)
985 if (__chan_is_moving(chan
))
988 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
) &&
992 if (control
->super
== L2CAP_SUPER_RR
)
993 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
994 else if (control
->super
== L2CAP_SUPER_RNR
)
995 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
997 if (control
->super
!= L2CAP_SUPER_SREJ
) {
998 chan
->last_acked_seq
= control
->reqseq
;
999 __clear_ack_timer(chan
);
1002 BT_DBG("reqseq %d, final %d, poll %d, super %d", control
->reqseq
,
1003 control
->final
, control
->poll
, control
->super
);
1005 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1006 control_field
= __pack_extended_control(control
);
1008 control_field
= __pack_enhanced_control(control
);
1010 skb
= l2cap_create_sframe_pdu(chan
, control_field
);
1012 l2cap_do_send(chan
, skb
);
1015 static void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, bool poll
)
1017 struct l2cap_ctrl control
;
1019 BT_DBG("chan %p, poll %d", chan
, poll
);
1021 memset(&control
, 0, sizeof(control
));
1023 control
.poll
= poll
;
1025 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
1026 control
.super
= L2CAP_SUPER_RNR
;
1028 control
.super
= L2CAP_SUPER_RR
;
1030 control
.reqseq
= chan
->buffer_seq
;
1031 l2cap_send_sframe(chan
, &control
);
1034 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
1036 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1039 static bool __amp_capable(struct l2cap_chan
*chan
)
1041 struct l2cap_conn
*conn
= chan
->conn
;
1042 struct hci_dev
*hdev
;
1043 bool amp_available
= false;
1045 if (!conn
->hs_enabled
)
1048 if (!(conn
->fixed_chan_mask
& L2CAP_FC_A2MP
))
1051 read_lock(&hci_dev_list_lock
);
1052 list_for_each_entry(hdev
, &hci_dev_list
, list
) {
1053 if (hdev
->amp_type
!= AMP_TYPE_BREDR
&&
1054 test_bit(HCI_UP
, &hdev
->flags
)) {
1055 amp_available
= true;
1059 read_unlock(&hci_dev_list_lock
);
1061 if (chan
->chan_policy
== BT_CHANNEL_POLICY_AMP_PREFERRED
)
1062 return amp_available
;
1067 static bool l2cap_check_efs(struct l2cap_chan
*chan
)
1069 /* Check EFS parameters */
1073 void l2cap_send_conn_req(struct l2cap_chan
*chan
)
1075 struct l2cap_conn
*conn
= chan
->conn
;
1076 struct l2cap_conn_req req
;
1078 req
.scid
= cpu_to_le16(chan
->scid
);
1079 req
.psm
= chan
->psm
;
1081 chan
->ident
= l2cap_get_ident(conn
);
1083 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
1085 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
1088 static void l2cap_send_create_chan_req(struct l2cap_chan
*chan
, u8 amp_id
)
1090 struct l2cap_create_chan_req req
;
1091 req
.scid
= cpu_to_le16(chan
->scid
);
1092 req
.psm
= chan
->psm
;
1093 req
.amp_id
= amp_id
;
1095 chan
->ident
= l2cap_get_ident(chan
->conn
);
1097 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_REQ
,
1101 static void l2cap_move_setup(struct l2cap_chan
*chan
)
1103 struct sk_buff
*skb
;
1105 BT_DBG("chan %p", chan
);
1107 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1110 __clear_retrans_timer(chan
);
1111 __clear_monitor_timer(chan
);
1112 __clear_ack_timer(chan
);
1114 chan
->retry_count
= 0;
1115 skb_queue_walk(&chan
->tx_q
, skb
) {
1116 if (bt_cb(skb
)->control
.retries
)
1117 bt_cb(skb
)->control
.retries
= 1;
1122 chan
->expected_tx_seq
= chan
->buffer_seq
;
1124 clear_bit(CONN_REJ_ACT
, &chan
->conn_state
);
1125 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
1126 l2cap_seq_list_clear(&chan
->retrans_list
);
1127 l2cap_seq_list_clear(&chan
->srej_list
);
1128 skb_queue_purge(&chan
->srej_q
);
1130 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
1131 chan
->rx_state
= L2CAP_RX_STATE_MOVE
;
1133 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
1136 static void l2cap_move_done(struct l2cap_chan
*chan
)
1138 u8 move_role
= chan
->move_role
;
1139 BT_DBG("chan %p", chan
);
1141 chan
->move_state
= L2CAP_MOVE_STABLE
;
1142 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
1144 if (chan
->mode
!= L2CAP_MODE_ERTM
)
1147 switch (move_role
) {
1148 case L2CAP_MOVE_ROLE_INITIATOR
:
1149 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_EXPLICIT_POLL
);
1150 chan
->rx_state
= L2CAP_RX_STATE_WAIT_F
;
1152 case L2CAP_MOVE_ROLE_RESPONDER
:
1153 chan
->rx_state
= L2CAP_RX_STATE_WAIT_P
;
1158 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
1160 /* This clears all conf flags, including CONF_NOT_COMPLETE */
1161 chan
->conf_state
= 0;
1162 __clear_chan_timer(chan
);
1164 chan
->state
= BT_CONNECTED
;
1166 chan
->ops
->ready(chan
);
1169 static void l2cap_le_connect(struct l2cap_chan
*chan
)
1171 struct l2cap_conn
*conn
= chan
->conn
;
1172 struct l2cap_le_conn_req req
;
1174 req
.psm
= chan
->psm
;
1175 req
.scid
= cpu_to_le16(chan
->scid
);
1176 req
.mtu
= cpu_to_le16(chan
->imtu
);
1177 req
.mps
= __constant_cpu_to_le16(L2CAP_LE_DEFAULT_MPS
);
1178 req
.credits
= __constant_cpu_to_le16(L2CAP_LE_MAX_CREDITS
);
1180 chan
->ident
= l2cap_get_ident(conn
);
1182 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_LE_CONN_REQ
,
1186 static void l2cap_le_start(struct l2cap_chan
*chan
)
1188 struct l2cap_conn
*conn
= chan
->conn
;
1190 if (!smp_conn_security(conn
->hcon
, chan
->sec_level
))
1194 l2cap_chan_ready(chan
);
1198 if (chan
->state
== BT_CONNECT
)
1199 l2cap_le_connect(chan
);
1202 static void l2cap_start_connection(struct l2cap_chan
*chan
)
1204 if (__amp_capable(chan
)) {
1205 BT_DBG("chan %p AMP capable: discover AMPs", chan
);
1206 a2mp_discover_amp(chan
);
1207 } else if (chan
->conn
->hcon
->type
== LE_LINK
) {
1208 l2cap_le_start(chan
);
1210 l2cap_send_conn_req(chan
);
1214 static void l2cap_do_start(struct l2cap_chan
*chan
)
1216 struct l2cap_conn
*conn
= chan
->conn
;
1218 if (conn
->hcon
->type
== LE_LINK
) {
1219 l2cap_le_start(chan
);
1223 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
1224 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
1227 if (l2cap_chan_check_security(chan
) &&
1228 __l2cap_no_conn_pending(chan
)) {
1229 l2cap_start_connection(chan
);
1232 struct l2cap_info_req req
;
1233 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
1235 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
1236 conn
->info_ident
= l2cap_get_ident(conn
);
1238 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
1240 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
1245 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
1247 u32 local_feat_mask
= l2cap_feat_mask
;
1249 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
1252 case L2CAP_MODE_ERTM
:
1253 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
1254 case L2CAP_MODE_STREAMING
:
1255 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
1261 static void l2cap_send_disconn_req(struct l2cap_chan
*chan
, int err
)
1263 struct l2cap_conn
*conn
= chan
->conn
;
1264 struct l2cap_disconn_req req
;
1269 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->state
== BT_CONNECTED
) {
1270 __clear_retrans_timer(chan
);
1271 __clear_monitor_timer(chan
);
1272 __clear_ack_timer(chan
);
1275 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1276 l2cap_state_change(chan
, BT_DISCONN
);
1280 req
.dcid
= cpu_to_le16(chan
->dcid
);
1281 req
.scid
= cpu_to_le16(chan
->scid
);
1282 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_DISCONN_REQ
,
1285 l2cap_state_change_and_error(chan
, BT_DISCONN
, err
);
1288 /* ---- L2CAP connections ---- */
1289 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1291 struct l2cap_chan
*chan
, *tmp
;
1293 BT_DBG("conn %p", conn
);
1295 mutex_lock(&conn
->chan_lock
);
1297 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1298 l2cap_chan_lock(chan
);
1300 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1301 l2cap_chan_unlock(chan
);
1305 if (chan
->state
== BT_CONNECT
) {
1306 if (!l2cap_chan_check_security(chan
) ||
1307 !__l2cap_no_conn_pending(chan
)) {
1308 l2cap_chan_unlock(chan
);
1312 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1313 && test_bit(CONF_STATE2_DEVICE
,
1314 &chan
->conf_state
)) {
1315 l2cap_chan_close(chan
, ECONNRESET
);
1316 l2cap_chan_unlock(chan
);
1320 l2cap_start_connection(chan
);
1322 } else if (chan
->state
== BT_CONNECT2
) {
1323 struct l2cap_conn_rsp rsp
;
1325 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1326 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1328 if (l2cap_chan_check_security(chan
)) {
1329 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
1330 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1331 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1332 chan
->ops
->defer(chan
);
1335 l2cap_state_change(chan
, BT_CONFIG
);
1336 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
1337 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
1340 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_PEND
);
1341 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1344 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1347 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1348 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1349 l2cap_chan_unlock(chan
);
1353 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1354 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1355 l2cap_build_conf_req(chan
, buf
), buf
);
1356 chan
->num_conf_req
++;
1359 l2cap_chan_unlock(chan
);
1362 mutex_unlock(&conn
->chan_lock
);
1365 /* Find socket with cid and source/destination bdaddr.
1366 * Returns closest match, locked.
1368 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1372 struct l2cap_chan
*c
, *c1
= NULL
;
1374 read_lock(&chan_list_lock
);
1376 list_for_each_entry(c
, &chan_list
, global_l
) {
1377 if (state
&& c
->state
!= state
)
1380 if (c
->scid
== cid
) {
1381 int src_match
, dst_match
;
1382 int src_any
, dst_any
;
1385 src_match
= !bacmp(&c
->src
, src
);
1386 dst_match
= !bacmp(&c
->dst
, dst
);
1387 if (src_match
&& dst_match
) {
1388 read_unlock(&chan_list_lock
);
1393 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1394 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1395 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1396 (src_any
&& dst_any
))
1401 read_unlock(&chan_list_lock
);
1406 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1408 struct hci_conn
*hcon
= conn
->hcon
;
1409 struct l2cap_chan
*chan
, *pchan
;
1414 /* Check if we have socket listening on cid */
1415 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_ATT
,
1416 &hcon
->src
, &hcon
->dst
);
1420 /* Client ATT sockets should override the server one */
1421 if (__l2cap_get_chan_by_dcid(conn
, L2CAP_CID_ATT
))
1424 dst_type
= bdaddr_type(hcon
, hcon
->dst_type
);
1426 /* If device is blocked, do not create a channel for it */
1427 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, dst_type
))
1430 l2cap_chan_lock(pchan
);
1432 chan
= pchan
->ops
->new_connection(pchan
);
1436 chan
->dcid
= L2CAP_CID_ATT
;
1438 bacpy(&chan
->src
, &hcon
->src
);
1439 bacpy(&chan
->dst
, &hcon
->dst
);
1440 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1441 chan
->dst_type
= dst_type
;
1443 __l2cap_chan_add(conn
, chan
);
1446 l2cap_chan_unlock(pchan
);
1449 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1451 struct l2cap_chan
*chan
;
1452 struct hci_conn
*hcon
= conn
->hcon
;
1454 BT_DBG("conn %p", conn
);
1456 /* For outgoing pairing which doesn't necessarily have an
1457 * associated socket (e.g. mgmt_pair_device).
1459 if (hcon
->out
&& hcon
->type
== LE_LINK
)
1460 smp_conn_security(hcon
, hcon
->pending_sec_level
);
1462 mutex_lock(&conn
->chan_lock
);
1464 if (hcon
->type
== LE_LINK
)
1465 l2cap_le_conn_ready(conn
);
1467 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1469 l2cap_chan_lock(chan
);
1471 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
1472 l2cap_chan_unlock(chan
);
1476 if (hcon
->type
== LE_LINK
) {
1477 l2cap_le_start(chan
);
1478 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1479 l2cap_chan_ready(chan
);
1481 } else if (chan
->state
== BT_CONNECT
) {
1482 l2cap_do_start(chan
);
1485 l2cap_chan_unlock(chan
);
1488 mutex_unlock(&conn
->chan_lock
);
1491 /* Notify sockets that we cannot guaranty reliability anymore */
1492 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1494 struct l2cap_chan
*chan
;
1496 BT_DBG("conn %p", conn
);
1498 mutex_lock(&conn
->chan_lock
);
1500 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1501 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1502 l2cap_chan_set_err(chan
, err
);
1505 mutex_unlock(&conn
->chan_lock
);
1508 static void l2cap_info_timeout(struct work_struct
*work
)
1510 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1513 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1514 conn
->info_ident
= 0;
1516 l2cap_conn_start(conn
);
1521 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1522 * callback is called during registration. The ->remove callback is called
1523 * during unregistration.
1524 * An l2cap_user object can either be explicitly unregistered or when the
1525 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1526 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1527 * External modules must own a reference to the l2cap_conn object if they intend
1528 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1529 * any time if they don't.
1532 int l2cap_register_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1534 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1537 /* We need to check whether l2cap_conn is registered. If it is not, we
1538 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1539 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1540 * relies on the parent hci_conn object to be locked. This itself relies
1541 * on the hci_dev object to be locked. So we must lock the hci device
1546 if (user
->list
.next
|| user
->list
.prev
) {
1551 /* conn->hchan is NULL after l2cap_conn_del() was called */
1557 ret
= user
->probe(conn
, user
);
1561 list_add(&user
->list
, &conn
->users
);
1565 hci_dev_unlock(hdev
);
1568 EXPORT_SYMBOL(l2cap_register_user
);
1570 void l2cap_unregister_user(struct l2cap_conn
*conn
, struct l2cap_user
*user
)
1572 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
1576 if (!user
->list
.next
|| !user
->list
.prev
)
1579 list_del(&user
->list
);
1580 user
->list
.next
= NULL
;
1581 user
->list
.prev
= NULL
;
1582 user
->remove(conn
, user
);
1585 hci_dev_unlock(hdev
);
1587 EXPORT_SYMBOL(l2cap_unregister_user
);
1589 static void l2cap_unregister_all_users(struct l2cap_conn
*conn
)
1591 struct l2cap_user
*user
;
1593 while (!list_empty(&conn
->users
)) {
1594 user
= list_first_entry(&conn
->users
, struct l2cap_user
, list
);
1595 list_del(&user
->list
);
1596 user
->list
.next
= NULL
;
1597 user
->list
.prev
= NULL
;
1598 user
->remove(conn
, user
);
1602 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1604 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1605 struct l2cap_chan
*chan
, *l
;
1610 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1612 kfree_skb(conn
->rx_skb
);
1614 l2cap_unregister_all_users(conn
);
1616 mutex_lock(&conn
->chan_lock
);
1619 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1620 l2cap_chan_hold(chan
);
1621 l2cap_chan_lock(chan
);
1623 l2cap_chan_del(chan
, err
);
1625 l2cap_chan_unlock(chan
);
1627 chan
->ops
->close(chan
);
1628 l2cap_chan_put(chan
);
1631 mutex_unlock(&conn
->chan_lock
);
1633 hci_chan_del(conn
->hchan
);
1635 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1636 cancel_delayed_work_sync(&conn
->info_timer
);
1638 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1639 cancel_delayed_work_sync(&conn
->security_timer
);
1640 smp_chan_destroy(conn
);
1643 hcon
->l2cap_data
= NULL
;
1645 l2cap_conn_put(conn
);
1648 static void security_timeout(struct work_struct
*work
)
1650 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1651 security_timer
.work
);
1653 BT_DBG("conn %p", conn
);
1655 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &conn
->hcon
->flags
)) {
1656 smp_chan_destroy(conn
);
1657 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1661 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
)
1663 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1664 struct hci_chan
*hchan
;
1669 hchan
= hci_chan_create(hcon
);
1673 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_KERNEL
);
1675 hci_chan_del(hchan
);
1679 kref_init(&conn
->ref
);
1680 hcon
->l2cap_data
= conn
;
1682 hci_conn_get(conn
->hcon
);
1683 conn
->hchan
= hchan
;
1685 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1687 switch (hcon
->type
) {
1689 if (hcon
->hdev
->le_mtu
) {
1690 conn
->mtu
= hcon
->hdev
->le_mtu
;
1695 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1699 conn
->feat_mask
= 0;
1701 if (hcon
->type
== ACL_LINK
)
1702 conn
->hs_enabled
= test_bit(HCI_HS_ENABLED
,
1703 &hcon
->hdev
->dev_flags
);
1705 spin_lock_init(&conn
->lock
);
1706 mutex_init(&conn
->chan_lock
);
1708 INIT_LIST_HEAD(&conn
->chan_l
);
1709 INIT_LIST_HEAD(&conn
->users
);
1711 if (hcon
->type
== LE_LINK
)
1712 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1714 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1716 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1721 static void l2cap_conn_free(struct kref
*ref
)
1723 struct l2cap_conn
*conn
= container_of(ref
, struct l2cap_conn
, ref
);
1725 hci_conn_put(conn
->hcon
);
1729 void l2cap_conn_get(struct l2cap_conn
*conn
)
1731 kref_get(&conn
->ref
);
1733 EXPORT_SYMBOL(l2cap_conn_get
);
1735 void l2cap_conn_put(struct l2cap_conn
*conn
)
1737 kref_put(&conn
->ref
, l2cap_conn_free
);
1739 EXPORT_SYMBOL(l2cap_conn_put
);
1741 /* ---- Socket interface ---- */
1743 /* Find socket with psm and source / destination bdaddr.
1744 * Returns closest match.
1746 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1751 struct l2cap_chan
*c
, *c1
= NULL
;
1753 read_lock(&chan_list_lock
);
1755 list_for_each_entry(c
, &chan_list
, global_l
) {
1756 if (state
&& c
->state
!= state
)
1759 if (link_type
== ACL_LINK
&& c
->src_type
!= BDADDR_BREDR
)
1762 if (link_type
== LE_LINK
&& c
->src_type
== BDADDR_BREDR
)
1765 if (c
->psm
== psm
) {
1766 int src_match
, dst_match
;
1767 int src_any
, dst_any
;
1770 src_match
= !bacmp(&c
->src
, src
);
1771 dst_match
= !bacmp(&c
->dst
, dst
);
1772 if (src_match
&& dst_match
) {
1773 read_unlock(&chan_list_lock
);
1778 src_any
= !bacmp(&c
->src
, BDADDR_ANY
);
1779 dst_any
= !bacmp(&c
->dst
, BDADDR_ANY
);
1780 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1781 (src_any
&& dst_any
))
1786 read_unlock(&chan_list_lock
);
1791 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1792 bdaddr_t
*dst
, u8 dst_type
)
1794 struct l2cap_conn
*conn
;
1795 struct hci_conn
*hcon
;
1796 struct hci_dev
*hdev
;
1800 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan
->src
, dst
,
1801 dst_type
, __le16_to_cpu(psm
));
1803 hdev
= hci_get_route(dst
, &chan
->src
);
1805 return -EHOSTUNREACH
;
1809 l2cap_chan_lock(chan
);
1811 /* PSM must be odd and lsb of upper byte must be 0 */
1812 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1813 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1818 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1823 switch (chan
->mode
) {
1824 case L2CAP_MODE_BASIC
:
1826 case L2CAP_MODE_ERTM
:
1827 case L2CAP_MODE_STREAMING
:
1836 switch (chan
->state
) {
1840 /* Already connecting */
1845 /* Already connected */
1859 /* Set destination address and psm */
1860 bacpy(&chan
->dst
, dst
);
1861 chan
->dst_type
= dst_type
;
1866 auth_type
= l2cap_get_auth_type(chan
);
1868 if (bdaddr_type_is_le(dst_type
))
1869 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1870 chan
->sec_level
, auth_type
);
1872 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1873 chan
->sec_level
, auth_type
);
1876 err
= PTR_ERR(hcon
);
1880 conn
= l2cap_conn_add(hcon
);
1882 hci_conn_drop(hcon
);
1887 if (cid
&& __l2cap_get_chan_by_dcid(conn
, cid
)) {
1888 hci_conn_drop(hcon
);
1893 /* Update source addr of the socket */
1894 bacpy(&chan
->src
, &hcon
->src
);
1895 chan
->src_type
= bdaddr_type(hcon
, hcon
->src_type
);
1897 l2cap_chan_unlock(chan
);
1898 l2cap_chan_add(conn
, chan
);
1899 l2cap_chan_lock(chan
);
1901 /* l2cap_chan_add takes its own ref so we can drop this one */
1902 hci_conn_drop(hcon
);
1904 l2cap_state_change(chan
, BT_CONNECT
);
1905 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
1907 if (hcon
->state
== BT_CONNECTED
) {
1908 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1909 __clear_chan_timer(chan
);
1910 if (l2cap_chan_check_security(chan
))
1911 l2cap_state_change(chan
, BT_CONNECTED
);
1913 l2cap_do_start(chan
);
1919 l2cap_chan_unlock(chan
);
1920 hci_dev_unlock(hdev
);
1925 static void l2cap_monitor_timeout(struct work_struct
*work
)
1927 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1928 monitor_timer
.work
);
1930 BT_DBG("chan %p", chan
);
1932 l2cap_chan_lock(chan
);
1935 l2cap_chan_unlock(chan
);
1936 l2cap_chan_put(chan
);
1940 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_MONITOR_TO
);
1942 l2cap_chan_unlock(chan
);
1943 l2cap_chan_put(chan
);
1946 static void l2cap_retrans_timeout(struct work_struct
*work
)
1948 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1949 retrans_timer
.work
);
1951 BT_DBG("chan %p", chan
);
1953 l2cap_chan_lock(chan
);
1956 l2cap_chan_unlock(chan
);
1957 l2cap_chan_put(chan
);
1961 l2cap_tx(chan
, NULL
, NULL
, L2CAP_EV_RETRANS_TO
);
1962 l2cap_chan_unlock(chan
);
1963 l2cap_chan_put(chan
);
1966 static void l2cap_streaming_send(struct l2cap_chan
*chan
,
1967 struct sk_buff_head
*skbs
)
1969 struct sk_buff
*skb
;
1970 struct l2cap_ctrl
*control
;
1972 BT_DBG("chan %p, skbs %p", chan
, skbs
);
1974 if (__chan_is_moving(chan
))
1977 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
1979 while (!skb_queue_empty(&chan
->tx_q
)) {
1981 skb
= skb_dequeue(&chan
->tx_q
);
1983 bt_cb(skb
)->control
.retries
= 1;
1984 control
= &bt_cb(skb
)->control
;
1986 control
->reqseq
= 0;
1987 control
->txseq
= chan
->next_tx_seq
;
1989 __pack_control(chan
, control
, skb
);
1991 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1992 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
1993 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
1996 l2cap_do_send(chan
, skb
);
1998 BT_DBG("Sent txseq %u", control
->txseq
);
2000 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2001 chan
->frames_sent
++;
2005 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
2007 struct sk_buff
*skb
, *tx_skb
;
2008 struct l2cap_ctrl
*control
;
2011 BT_DBG("chan %p", chan
);
2013 if (chan
->state
!= BT_CONNECTED
)
2016 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2019 if (__chan_is_moving(chan
))
2022 while (chan
->tx_send_head
&&
2023 chan
->unacked_frames
< chan
->remote_tx_win
&&
2024 chan
->tx_state
== L2CAP_TX_STATE_XMIT
) {
2026 skb
= chan
->tx_send_head
;
2028 bt_cb(skb
)->control
.retries
= 1;
2029 control
= &bt_cb(skb
)->control
;
2031 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2034 control
->reqseq
= chan
->buffer_seq
;
2035 chan
->last_acked_seq
= chan
->buffer_seq
;
2036 control
->txseq
= chan
->next_tx_seq
;
2038 __pack_control(chan
, control
, skb
);
2040 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2041 u16 fcs
= crc16(0, (u8
*) skb
->data
, skb
->len
);
2042 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
2045 /* Clone after data has been modified. Data is assumed to be
2046 read-only (for locking purposes) on cloned sk_buffs.
2048 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2053 __set_retrans_timer(chan
);
2055 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
2056 chan
->unacked_frames
++;
2057 chan
->frames_sent
++;
2060 if (skb_queue_is_last(&chan
->tx_q
, skb
))
2061 chan
->tx_send_head
= NULL
;
2063 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
2065 l2cap_do_send(chan
, tx_skb
);
2066 BT_DBG("Sent txseq %u", control
->txseq
);
2069 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent
,
2070 chan
->unacked_frames
, skb_queue_len(&chan
->tx_q
));
2075 static void l2cap_ertm_resend(struct l2cap_chan
*chan
)
2077 struct l2cap_ctrl control
;
2078 struct sk_buff
*skb
;
2079 struct sk_buff
*tx_skb
;
2082 BT_DBG("chan %p", chan
);
2084 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2087 if (__chan_is_moving(chan
))
2090 while (chan
->retrans_list
.head
!= L2CAP_SEQ_LIST_CLEAR
) {
2091 seq
= l2cap_seq_list_pop(&chan
->retrans_list
);
2093 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, seq
);
2095 BT_DBG("Error: Can't retransmit seq %d, frame missing",
2100 bt_cb(skb
)->control
.retries
++;
2101 control
= bt_cb(skb
)->control
;
2103 if (chan
->max_tx
!= 0 &&
2104 bt_cb(skb
)->control
.retries
> chan
->max_tx
) {
2105 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
2106 l2cap_send_disconn_req(chan
, ECONNRESET
);
2107 l2cap_seq_list_clear(&chan
->retrans_list
);
2111 control
.reqseq
= chan
->buffer_seq
;
2112 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
2117 if (skb_cloned(skb
)) {
2118 /* Cloned sk_buffs are read-only, so we need a
2121 tx_skb
= skb_copy(skb
, GFP_KERNEL
);
2123 tx_skb
= skb_clone(skb
, GFP_KERNEL
);
2127 l2cap_seq_list_clear(&chan
->retrans_list
);
2131 /* Update skb contents */
2132 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
2133 put_unaligned_le32(__pack_extended_control(&control
),
2134 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2136 put_unaligned_le16(__pack_enhanced_control(&control
),
2137 tx_skb
->data
+ L2CAP_HDR_SIZE
);
2140 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
2141 u16 fcs
= crc16(0, (u8
*) tx_skb
->data
, tx_skb
->len
);
2142 put_unaligned_le16(fcs
, skb_put(tx_skb
,
2146 l2cap_do_send(chan
, tx_skb
);
2148 BT_DBG("Resent txseq %d", control
.txseq
);
2150 chan
->last_acked_seq
= chan
->buffer_seq
;
2154 static void l2cap_retransmit(struct l2cap_chan
*chan
,
2155 struct l2cap_ctrl
*control
)
2157 BT_DBG("chan %p, control %p", chan
, control
);
2159 l2cap_seq_list_append(&chan
->retrans_list
, control
->reqseq
);
2160 l2cap_ertm_resend(chan
);
2163 static void l2cap_retransmit_all(struct l2cap_chan
*chan
,
2164 struct l2cap_ctrl
*control
)
2166 struct sk_buff
*skb
;
2168 BT_DBG("chan %p, control %p", chan
, control
);
2171 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
2173 l2cap_seq_list_clear(&chan
->retrans_list
);
2175 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
2178 if (chan
->unacked_frames
) {
2179 skb_queue_walk(&chan
->tx_q
, skb
) {
2180 if (bt_cb(skb
)->control
.txseq
== control
->reqseq
||
2181 skb
== chan
->tx_send_head
)
2185 skb_queue_walk_from(&chan
->tx_q
, skb
) {
2186 if (skb
== chan
->tx_send_head
)
2189 l2cap_seq_list_append(&chan
->retrans_list
,
2190 bt_cb(skb
)->control
.txseq
);
2193 l2cap_ertm_resend(chan
);
2197 static void l2cap_send_ack(struct l2cap_chan
*chan
)
2199 struct l2cap_ctrl control
;
2200 u16 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
2201 chan
->last_acked_seq
);
2204 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2205 chan
, chan
->last_acked_seq
, chan
->buffer_seq
);
2207 memset(&control
, 0, sizeof(control
));
2210 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
2211 chan
->rx_state
== L2CAP_RX_STATE_RECV
) {
2212 __clear_ack_timer(chan
);
2213 control
.super
= L2CAP_SUPER_RNR
;
2214 control
.reqseq
= chan
->buffer_seq
;
2215 l2cap_send_sframe(chan
, &control
);
2217 if (!test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
)) {
2218 l2cap_ertm_send(chan
);
2219 /* If any i-frames were sent, they included an ack */
2220 if (chan
->buffer_seq
== chan
->last_acked_seq
)
2224 /* Ack now if the window is 3/4ths full.
2225 * Calculate without mul or div
2227 threshold
= chan
->ack_win
;
2228 threshold
+= threshold
<< 1;
2231 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack
,
2234 if (frames_to_ack
>= threshold
) {
2235 __clear_ack_timer(chan
);
2236 control
.super
= L2CAP_SUPER_RR
;
2237 control
.reqseq
= chan
->buffer_seq
;
2238 l2cap_send_sframe(chan
, &control
);
2243 __set_ack_timer(chan
);
2247 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
2248 struct msghdr
*msg
, int len
,
2249 int count
, struct sk_buff
*skb
)
2251 struct l2cap_conn
*conn
= chan
->conn
;
2252 struct sk_buff
**frag
;
2255 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
2261 /* Continuation fragments (no L2CAP header) */
2262 frag
= &skb_shinfo(skb
)->frag_list
;
2264 struct sk_buff
*tmp
;
2266 count
= min_t(unsigned int, conn
->mtu
, len
);
2268 tmp
= chan
->ops
->alloc_skb(chan
, count
,
2269 msg
->msg_flags
& MSG_DONTWAIT
);
2271 return PTR_ERR(tmp
);
2275 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
2278 (*frag
)->priority
= skb
->priority
;
2283 skb
->len
+= (*frag
)->len
;
2284 skb
->data_len
+= (*frag
)->len
;
2286 frag
= &(*frag
)->next
;
2292 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
2293 struct msghdr
*msg
, size_t len
,
2296 struct l2cap_conn
*conn
= chan
->conn
;
2297 struct sk_buff
*skb
;
2298 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
2299 struct l2cap_hdr
*lh
;
2301 BT_DBG("chan %p psm 0x%2.2x len %zu priority %u", chan
,
2302 __le16_to_cpu(chan
->psm
), len
, priority
);
2304 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2306 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2307 msg
->msg_flags
& MSG_DONTWAIT
);
2311 skb
->priority
= priority
;
2313 /* Create L2CAP header */
2314 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2315 lh
->cid
= cpu_to_le16(chan
->dcid
);
2316 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
2317 put_unaligned(chan
->psm
, (__le16
*) skb_put(skb
, L2CAP_PSMLEN_SIZE
));
2319 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2320 if (unlikely(err
< 0)) {
2322 return ERR_PTR(err
);
2327 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
2328 struct msghdr
*msg
, size_t len
,
2331 struct l2cap_conn
*conn
= chan
->conn
;
2332 struct sk_buff
*skb
;
2334 struct l2cap_hdr
*lh
;
2336 BT_DBG("chan %p len %zu", chan
, len
);
2338 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
2340 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
2341 msg
->msg_flags
& MSG_DONTWAIT
);
2345 skb
->priority
= priority
;
2347 /* Create L2CAP header */
2348 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2349 lh
->cid
= cpu_to_le16(chan
->dcid
);
2350 lh
->len
= cpu_to_le16(len
);
2352 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2353 if (unlikely(err
< 0)) {
2355 return ERR_PTR(err
);
2360 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
2361 struct msghdr
*msg
, size_t len
,
2364 struct l2cap_conn
*conn
= chan
->conn
;
2365 struct sk_buff
*skb
;
2366 int err
, count
, hlen
;
2367 struct l2cap_hdr
*lh
;
2369 BT_DBG("chan %p len %zu", chan
, len
);
2372 return ERR_PTR(-ENOTCONN
);
2374 hlen
= __ertm_hdr_size(chan
);
2377 hlen
+= L2CAP_SDULEN_SIZE
;
2379 if (chan
->fcs
== L2CAP_FCS_CRC16
)
2380 hlen
+= L2CAP_FCS_SIZE
;
2382 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
2384 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
2385 msg
->msg_flags
& MSG_DONTWAIT
);
2389 /* Create L2CAP header */
2390 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2391 lh
->cid
= cpu_to_le16(chan
->dcid
);
2392 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
2394 /* Control header is populated later */
2395 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2396 put_unaligned_le32(0, skb_put(skb
, L2CAP_EXT_CTRL_SIZE
));
2398 put_unaligned_le16(0, skb_put(skb
, L2CAP_ENH_CTRL_SIZE
));
2401 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
2403 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
2404 if (unlikely(err
< 0)) {
2406 return ERR_PTR(err
);
2409 bt_cb(skb
)->control
.fcs
= chan
->fcs
;
2410 bt_cb(skb
)->control
.retries
= 0;
2414 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
2415 struct sk_buff_head
*seg_queue
,
2416 struct msghdr
*msg
, size_t len
)
2418 struct sk_buff
*skb
;
2423 BT_DBG("chan %p, msg %p, len %zu", chan
, msg
, len
);
2425 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2426 * so fragmented skbs are not used. The HCI layer's handling
2427 * of fragmented skbs is not compatible with ERTM's queueing.
2430 /* PDU size is derived from the HCI MTU */
2431 pdu_len
= chan
->conn
->mtu
;
2433 /* Constrain PDU size for BR/EDR connections */
2435 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2437 /* Adjust for largest possible L2CAP overhead. */
2439 pdu_len
-= L2CAP_FCS_SIZE
;
2441 pdu_len
-= __ertm_hdr_size(chan
);
2443 /* Remote device may have requested smaller PDUs */
2444 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2446 if (len
<= pdu_len
) {
2447 sar
= L2CAP_SAR_UNSEGMENTED
;
2451 sar
= L2CAP_SAR_START
;
2453 pdu_len
-= L2CAP_SDULEN_SIZE
;
2457 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2460 __skb_queue_purge(seg_queue
);
2461 return PTR_ERR(skb
);
2464 bt_cb(skb
)->control
.sar
= sar
;
2465 __skb_queue_tail(seg_queue
, skb
);
2470 pdu_len
+= L2CAP_SDULEN_SIZE
;
2473 if (len
<= pdu_len
) {
2474 sar
= L2CAP_SAR_END
;
2477 sar
= L2CAP_SAR_CONTINUE
;
2484 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2487 struct sk_buff
*skb
;
2489 struct sk_buff_head seg_queue
;
2494 /* Connectionless channel */
2495 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2496 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2498 return PTR_ERR(skb
);
2500 l2cap_do_send(chan
, skb
);
2504 switch (chan
->mode
) {
2505 case L2CAP_MODE_BASIC
:
2506 /* Check outgoing MTU */
2507 if (len
> chan
->omtu
)
2510 /* Create a basic PDU */
2511 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2513 return PTR_ERR(skb
);
2515 l2cap_do_send(chan
, skb
);
2519 case L2CAP_MODE_ERTM
:
2520 case L2CAP_MODE_STREAMING
:
2521 /* Check outgoing MTU */
2522 if (len
> chan
->omtu
) {
2527 __skb_queue_head_init(&seg_queue
);
2529 /* Do segmentation before calling in to the state machine,
2530 * since it's possible to block while waiting for memory
2533 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2535 /* The channel could have been closed while segmenting,
2536 * check that it is still connected.
2538 if (chan
->state
!= BT_CONNECTED
) {
2539 __skb_queue_purge(&seg_queue
);
2546 if (chan
->mode
== L2CAP_MODE_ERTM
)
2547 l2cap_tx(chan
, NULL
, &seg_queue
, L2CAP_EV_DATA_REQUEST
);
2549 l2cap_streaming_send(chan
, &seg_queue
);
2553 /* If the skbs were not queued for sending, they'll still be in
2554 * seg_queue and need to be purged.
2556 __skb_queue_purge(&seg_queue
);
2560 BT_DBG("bad state %1.1x", chan
->mode
);
2567 static void l2cap_send_srej(struct l2cap_chan
*chan
, u16 txseq
)
2569 struct l2cap_ctrl control
;
2572 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2574 memset(&control
, 0, sizeof(control
));
2576 control
.super
= L2CAP_SUPER_SREJ
;
2578 for (seq
= chan
->expected_tx_seq
; seq
!= txseq
;
2579 seq
= __next_seq(chan
, seq
)) {
2580 if (!l2cap_ertm_seq_in_queue(&chan
->srej_q
, seq
)) {
2581 control
.reqseq
= seq
;
2582 l2cap_send_sframe(chan
, &control
);
2583 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2587 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
2590 static void l2cap_send_srej_tail(struct l2cap_chan
*chan
)
2592 struct l2cap_ctrl control
;
2594 BT_DBG("chan %p", chan
);
2596 if (chan
->srej_list
.tail
== L2CAP_SEQ_LIST_CLEAR
)
2599 memset(&control
, 0, sizeof(control
));
2601 control
.super
= L2CAP_SUPER_SREJ
;
2602 control
.reqseq
= chan
->srej_list
.tail
;
2603 l2cap_send_sframe(chan
, &control
);
2606 static void l2cap_send_srej_list(struct l2cap_chan
*chan
, u16 txseq
)
2608 struct l2cap_ctrl control
;
2612 BT_DBG("chan %p, txseq %u", chan
, txseq
);
2614 memset(&control
, 0, sizeof(control
));
2616 control
.super
= L2CAP_SUPER_SREJ
;
2618 /* Capture initial list head to allow only one pass through the list. */
2619 initial_head
= chan
->srej_list
.head
;
2622 seq
= l2cap_seq_list_pop(&chan
->srej_list
);
2623 if (seq
== txseq
|| seq
== L2CAP_SEQ_LIST_CLEAR
)
2626 control
.reqseq
= seq
;
2627 l2cap_send_sframe(chan
, &control
);
2628 l2cap_seq_list_append(&chan
->srej_list
, seq
);
2629 } while (chan
->srej_list
.head
!= initial_head
);
2632 static void l2cap_process_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
2634 struct sk_buff
*acked_skb
;
2637 BT_DBG("chan %p, reqseq %u", chan
, reqseq
);
2639 if (chan
->unacked_frames
== 0 || reqseq
== chan
->expected_ack_seq
)
2642 BT_DBG("expected_ack_seq %u, unacked_frames %u",
2643 chan
->expected_ack_seq
, chan
->unacked_frames
);
2645 for (ackseq
= chan
->expected_ack_seq
; ackseq
!= reqseq
;
2646 ackseq
= __next_seq(chan
, ackseq
)) {
2648 acked_skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, ackseq
);
2650 skb_unlink(acked_skb
, &chan
->tx_q
);
2651 kfree_skb(acked_skb
);
2652 chan
->unacked_frames
--;
2656 chan
->expected_ack_seq
= reqseq
;
2658 if (chan
->unacked_frames
== 0)
2659 __clear_retrans_timer(chan
);
2661 BT_DBG("unacked_frames %u", chan
->unacked_frames
);
2664 static void l2cap_abort_rx_srej_sent(struct l2cap_chan
*chan
)
2666 BT_DBG("chan %p", chan
);
2668 chan
->expected_tx_seq
= chan
->buffer_seq
;
2669 l2cap_seq_list_clear(&chan
->srej_list
);
2670 skb_queue_purge(&chan
->srej_q
);
2671 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2674 static void l2cap_tx_state_xmit(struct l2cap_chan
*chan
,
2675 struct l2cap_ctrl
*control
,
2676 struct sk_buff_head
*skbs
, u8 event
)
2678 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2682 case L2CAP_EV_DATA_REQUEST
:
2683 if (chan
->tx_send_head
== NULL
)
2684 chan
->tx_send_head
= skb_peek(skbs
);
2686 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2687 l2cap_ertm_send(chan
);
2689 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2690 BT_DBG("Enter LOCAL_BUSY");
2691 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2693 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2694 /* The SREJ_SENT state must be aborted if we are to
2695 * enter the LOCAL_BUSY state.
2697 l2cap_abort_rx_srej_sent(chan
);
2700 l2cap_send_ack(chan
);
2703 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2704 BT_DBG("Exit LOCAL_BUSY");
2705 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2707 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2708 struct l2cap_ctrl local_control
;
2710 memset(&local_control
, 0, sizeof(local_control
));
2711 local_control
.sframe
= 1;
2712 local_control
.super
= L2CAP_SUPER_RR
;
2713 local_control
.poll
= 1;
2714 local_control
.reqseq
= chan
->buffer_seq
;
2715 l2cap_send_sframe(chan
, &local_control
);
2717 chan
->retry_count
= 1;
2718 __set_monitor_timer(chan
);
2719 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2722 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2723 l2cap_process_reqseq(chan
, control
->reqseq
);
2725 case L2CAP_EV_EXPLICIT_POLL
:
2726 l2cap_send_rr_or_rnr(chan
, 1);
2727 chan
->retry_count
= 1;
2728 __set_monitor_timer(chan
);
2729 __clear_ack_timer(chan
);
2730 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2732 case L2CAP_EV_RETRANS_TO
:
2733 l2cap_send_rr_or_rnr(chan
, 1);
2734 chan
->retry_count
= 1;
2735 __set_monitor_timer(chan
);
2736 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2738 case L2CAP_EV_RECV_FBIT
:
2739 /* Nothing to process */
2746 static void l2cap_tx_state_wait_f(struct l2cap_chan
*chan
,
2747 struct l2cap_ctrl
*control
,
2748 struct sk_buff_head
*skbs
, u8 event
)
2750 BT_DBG("chan %p, control %p, skbs %p, event %d", chan
, control
, skbs
,
2754 case L2CAP_EV_DATA_REQUEST
:
2755 if (chan
->tx_send_head
== NULL
)
2756 chan
->tx_send_head
= skb_peek(skbs
);
2757 /* Queue data, but don't send. */
2758 skb_queue_splice_tail_init(skbs
, &chan
->tx_q
);
2760 case L2CAP_EV_LOCAL_BUSY_DETECTED
:
2761 BT_DBG("Enter LOCAL_BUSY");
2762 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2764 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
2765 /* The SREJ_SENT state must be aborted if we are to
2766 * enter the LOCAL_BUSY state.
2768 l2cap_abort_rx_srej_sent(chan
);
2771 l2cap_send_ack(chan
);
2774 case L2CAP_EV_LOCAL_BUSY_CLEAR
:
2775 BT_DBG("Exit LOCAL_BUSY");
2776 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
2778 if (test_bit(CONN_RNR_SENT
, &chan
->conn_state
)) {
2779 struct l2cap_ctrl local_control
;
2780 memset(&local_control
, 0, sizeof(local_control
));
2781 local_control
.sframe
= 1;
2782 local_control
.super
= L2CAP_SUPER_RR
;
2783 local_control
.poll
= 1;
2784 local_control
.reqseq
= chan
->buffer_seq
;
2785 l2cap_send_sframe(chan
, &local_control
);
2787 chan
->retry_count
= 1;
2788 __set_monitor_timer(chan
);
2789 chan
->tx_state
= L2CAP_TX_STATE_WAIT_F
;
2792 case L2CAP_EV_RECV_REQSEQ_AND_FBIT
:
2793 l2cap_process_reqseq(chan
, control
->reqseq
);
2797 case L2CAP_EV_RECV_FBIT
:
2798 if (control
&& control
->final
) {
2799 __clear_monitor_timer(chan
);
2800 if (chan
->unacked_frames
> 0)
2801 __set_retrans_timer(chan
);
2802 chan
->retry_count
= 0;
2803 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2804 BT_DBG("recv fbit tx_state 0x2.2%x", chan
->tx_state
);
2807 case L2CAP_EV_EXPLICIT_POLL
:
2810 case L2CAP_EV_MONITOR_TO
:
2811 if (chan
->max_tx
== 0 || chan
->retry_count
< chan
->max_tx
) {
2812 l2cap_send_rr_or_rnr(chan
, 1);
2813 __set_monitor_timer(chan
);
2814 chan
->retry_count
++;
2816 l2cap_send_disconn_req(chan
, ECONNABORTED
);
2824 static void l2cap_tx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
2825 struct sk_buff_head
*skbs
, u8 event
)
2827 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2828 chan
, control
, skbs
, event
, chan
->tx_state
);
2830 switch (chan
->tx_state
) {
2831 case L2CAP_TX_STATE_XMIT
:
2832 l2cap_tx_state_xmit(chan
, control
, skbs
, event
);
2834 case L2CAP_TX_STATE_WAIT_F
:
2835 l2cap_tx_state_wait_f(chan
, control
, skbs
, event
);
2843 static void l2cap_pass_to_tx(struct l2cap_chan
*chan
,
2844 struct l2cap_ctrl
*control
)
2846 BT_DBG("chan %p, control %p", chan
, control
);
2847 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_REQSEQ_AND_FBIT
);
2850 static void l2cap_pass_to_tx_fbit(struct l2cap_chan
*chan
,
2851 struct l2cap_ctrl
*control
)
2853 BT_DBG("chan %p, control %p", chan
, control
);
2854 l2cap_tx(chan
, control
, NULL
, L2CAP_EV_RECV_FBIT
);
2857 /* Copy frame to all raw sockets on that connection */
2858 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2860 struct sk_buff
*nskb
;
2861 struct l2cap_chan
*chan
;
2863 BT_DBG("conn %p", conn
);
2865 mutex_lock(&conn
->chan_lock
);
2867 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2868 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2871 /* Don't send frame to the channel it came from */
2872 if (bt_cb(skb
)->chan
== chan
)
2875 nskb
= skb_clone(skb
, GFP_KERNEL
);
2878 if (chan
->ops
->recv(chan
, nskb
))
2882 mutex_unlock(&conn
->chan_lock
);
2885 /* ---- L2CAP signalling commands ---- */
2886 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
, u8 code
,
2887 u8 ident
, u16 dlen
, void *data
)
2889 struct sk_buff
*skb
, **frag
;
2890 struct l2cap_cmd_hdr
*cmd
;
2891 struct l2cap_hdr
*lh
;
2894 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2895 conn
, code
, ident
, dlen
);
2897 if (conn
->mtu
< L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
)
2900 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2901 count
= min_t(unsigned int, conn
->mtu
, len
);
2903 skb
= bt_skb_alloc(count
, GFP_KERNEL
);
2907 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2908 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2910 if (conn
->hcon
->type
== LE_LINK
)
2911 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2913 lh
->cid
= __constant_cpu_to_le16(L2CAP_CID_SIGNALING
);
2915 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2918 cmd
->len
= cpu_to_le16(dlen
);
2921 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2922 memcpy(skb_put(skb
, count
), data
, count
);
2928 /* Continuation fragments (no L2CAP header) */
2929 frag
= &skb_shinfo(skb
)->frag_list
;
2931 count
= min_t(unsigned int, conn
->mtu
, len
);
2933 *frag
= bt_skb_alloc(count
, GFP_KERNEL
);
2937 memcpy(skb_put(*frag
, count
), data
, count
);
2942 frag
= &(*frag
)->next
;
2952 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
,
2955 struct l2cap_conf_opt
*opt
= *ptr
;
2958 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2966 *val
= *((u8
*) opt
->val
);
2970 *val
= get_unaligned_le16(opt
->val
);
2974 *val
= get_unaligned_le32(opt
->val
);
2978 *val
= (unsigned long) opt
->val
;
2982 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type
, opt
->len
, *val
);
2986 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2988 struct l2cap_conf_opt
*opt
= *ptr
;
2990 BT_DBG("type 0x%2.2x len %u val 0x%lx", type
, len
, val
);
2997 *((u8
*) opt
->val
) = val
;
3001 put_unaligned_le16(val
, opt
->val
);
3005 put_unaligned_le32(val
, opt
->val
);
3009 memcpy(opt
->val
, (void *) val
, len
);
3013 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
3016 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
3018 struct l2cap_conf_efs efs
;
3020 switch (chan
->mode
) {
3021 case L2CAP_MODE_ERTM
:
3022 efs
.id
= chan
->local_id
;
3023 efs
.stype
= chan
->local_stype
;
3024 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3025 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3026 efs
.acc_lat
= __constant_cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
3027 efs
.flush_to
= __constant_cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO
);
3030 case L2CAP_MODE_STREAMING
:
3032 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
3033 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
3034 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
3043 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3044 (unsigned long) &efs
);
3047 static void l2cap_ack_timeout(struct work_struct
*work
)
3049 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
3053 BT_DBG("chan %p", chan
);
3055 l2cap_chan_lock(chan
);
3057 frames_to_ack
= __seq_offset(chan
, chan
->buffer_seq
,
3058 chan
->last_acked_seq
);
3061 l2cap_send_rr_or_rnr(chan
, 0);
3063 l2cap_chan_unlock(chan
);
3064 l2cap_chan_put(chan
);
3067 int l2cap_ertm_init(struct l2cap_chan
*chan
)
3071 chan
->next_tx_seq
= 0;
3072 chan
->expected_tx_seq
= 0;
3073 chan
->expected_ack_seq
= 0;
3074 chan
->unacked_frames
= 0;
3075 chan
->buffer_seq
= 0;
3076 chan
->frames_sent
= 0;
3077 chan
->last_acked_seq
= 0;
3079 chan
->sdu_last_frag
= NULL
;
3082 skb_queue_head_init(&chan
->tx_q
);
3084 chan
->local_amp_id
= AMP_ID_BREDR
;
3085 chan
->move_id
= AMP_ID_BREDR
;
3086 chan
->move_state
= L2CAP_MOVE_STABLE
;
3087 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
3089 if (chan
->mode
!= L2CAP_MODE_ERTM
)
3092 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
3093 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
3095 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
3096 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
3097 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
3099 skb_queue_head_init(&chan
->srej_q
);
3101 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
3105 err
= l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
3107 l2cap_seq_list_free(&chan
->srej_list
);
3112 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
3115 case L2CAP_MODE_STREAMING
:
3116 case L2CAP_MODE_ERTM
:
3117 if (l2cap_mode_supported(mode
, remote_feat_mask
))
3121 return L2CAP_MODE_BASIC
;
3125 static inline bool __l2cap_ews_supported(struct l2cap_conn
*conn
)
3127 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
3130 static inline bool __l2cap_efs_supported(struct l2cap_conn
*conn
)
3132 return conn
->hs_enabled
&& conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
3135 static void __l2cap_set_ertm_timeouts(struct l2cap_chan
*chan
,
3136 struct l2cap_conf_rfc
*rfc
)
3138 if (chan
->local_amp_id
!= AMP_ID_BREDR
&& chan
->hs_hcon
) {
3139 u64 ertm_to
= chan
->hs_hcon
->hdev
->amp_be_flush_to
;
3141 /* Class 1 devices have must have ERTM timeouts
3142 * exceeding the Link Supervision Timeout. The
3143 * default Link Supervision Timeout for AMP
3144 * controllers is 10 seconds.
3146 * Class 1 devices use 0xffffffff for their
3147 * best-effort flush timeout, so the clamping logic
3148 * will result in a timeout that meets the above
3149 * requirement. ERTM timeouts are 16-bit values, so
3150 * the maximum timeout is 65.535 seconds.
3153 /* Convert timeout to milliseconds and round */
3154 ertm_to
= DIV_ROUND_UP_ULL(ertm_to
, 1000);
3156 /* This is the recommended formula for class 2 devices
3157 * that start ERTM timers when packets are sent to the
3160 ertm_to
= 3 * ertm_to
+ 500;
3162 if (ertm_to
> 0xffff)
3165 rfc
->retrans_timeout
= cpu_to_le16((u16
) ertm_to
);
3166 rfc
->monitor_timeout
= rfc
->retrans_timeout
;
3168 rfc
->retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
3169 rfc
->monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
3173 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
3175 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
3176 __l2cap_ews_supported(chan
->conn
)) {
3177 /* use extended control field */
3178 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3179 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3181 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
3182 L2CAP_DEFAULT_TX_WINDOW
);
3183 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
3185 chan
->ack_win
= chan
->tx_win
;
3188 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
3190 struct l2cap_conf_req
*req
= data
;
3191 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
3192 void *ptr
= req
->data
;
3195 BT_DBG("chan %p", chan
);
3197 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
3200 switch (chan
->mode
) {
3201 case L2CAP_MODE_STREAMING
:
3202 case L2CAP_MODE_ERTM
:
3203 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
3206 if (__l2cap_efs_supported(chan
->conn
))
3207 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3211 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
3216 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
3217 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3219 switch (chan
->mode
) {
3220 case L2CAP_MODE_BASIC
:
3221 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
3222 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
3225 rfc
.mode
= L2CAP_MODE_BASIC
;
3227 rfc
.max_transmit
= 0;
3228 rfc
.retrans_timeout
= 0;
3229 rfc
.monitor_timeout
= 0;
3230 rfc
.max_pdu_size
= 0;
3232 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3233 (unsigned long) &rfc
);
3236 case L2CAP_MODE_ERTM
:
3237 rfc
.mode
= L2CAP_MODE_ERTM
;
3238 rfc
.max_transmit
= chan
->max_tx
;
3240 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3242 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3243 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3245 rfc
.max_pdu_size
= cpu_to_le16(size
);
3247 l2cap_txwin_setup(chan
);
3249 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
3250 L2CAP_DEFAULT_TX_WINDOW
);
3252 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3253 (unsigned long) &rfc
);
3255 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3256 l2cap_add_opt_efs(&ptr
, chan
);
3258 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3259 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3262 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3263 if (chan
->fcs
== L2CAP_FCS_NONE
||
3264 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3265 chan
->fcs
= L2CAP_FCS_NONE
;
3266 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3271 case L2CAP_MODE_STREAMING
:
3272 l2cap_txwin_setup(chan
);
3273 rfc
.mode
= L2CAP_MODE_STREAMING
;
3275 rfc
.max_transmit
= 0;
3276 rfc
.retrans_timeout
= 0;
3277 rfc
.monitor_timeout
= 0;
3279 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
3280 L2CAP_EXT_HDR_SIZE
- L2CAP_SDULEN_SIZE
-
3282 rfc
.max_pdu_size
= cpu_to_le16(size
);
3284 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3285 (unsigned long) &rfc
);
3287 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
3288 l2cap_add_opt_efs(&ptr
, chan
);
3290 if (chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
)
3291 if (chan
->fcs
== L2CAP_FCS_NONE
||
3292 test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
)) {
3293 chan
->fcs
= L2CAP_FCS_NONE
;
3294 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1,
3300 req
->dcid
= cpu_to_le16(chan
->dcid
);
3301 req
->flags
= __constant_cpu_to_le16(0);
3306 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
3308 struct l2cap_conf_rsp
*rsp
= data
;
3309 void *ptr
= rsp
->data
;
3310 void *req
= chan
->conf_req
;
3311 int len
= chan
->conf_len
;
3312 int type
, hint
, olen
;
3314 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3315 struct l2cap_conf_efs efs
;
3317 u16 mtu
= L2CAP_DEFAULT_MTU
;
3318 u16 result
= L2CAP_CONF_SUCCESS
;
3321 BT_DBG("chan %p", chan
);
3323 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3324 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
3326 hint
= type
& L2CAP_CONF_HINT
;
3327 type
&= L2CAP_CONF_MASK
;
3330 case L2CAP_CONF_MTU
:
3334 case L2CAP_CONF_FLUSH_TO
:
3335 chan
->flush_to
= val
;
3338 case L2CAP_CONF_QOS
:
3341 case L2CAP_CONF_RFC
:
3342 if (olen
== sizeof(rfc
))
3343 memcpy(&rfc
, (void *) val
, olen
);
3346 case L2CAP_CONF_FCS
:
3347 if (val
== L2CAP_FCS_NONE
)
3348 set_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
);
3351 case L2CAP_CONF_EFS
:
3353 if (olen
== sizeof(efs
))
3354 memcpy(&efs
, (void *) val
, olen
);
3357 case L2CAP_CONF_EWS
:
3358 if (!chan
->conn
->hs_enabled
)
3359 return -ECONNREFUSED
;
3361 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
3362 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
3363 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
3364 chan
->remote_tx_win
= val
;
3371 result
= L2CAP_CONF_UNKNOWN
;
3372 *((u8
*) ptr
++) = type
;
3377 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
3380 switch (chan
->mode
) {
3381 case L2CAP_MODE_STREAMING
:
3382 case L2CAP_MODE_ERTM
:
3383 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
3384 chan
->mode
= l2cap_select_mode(rfc
.mode
,
3385 chan
->conn
->feat_mask
);
3390 if (__l2cap_efs_supported(chan
->conn
))
3391 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
3393 return -ECONNREFUSED
;
3396 if (chan
->mode
!= rfc
.mode
)
3397 return -ECONNREFUSED
;
3403 if (chan
->mode
!= rfc
.mode
) {
3404 result
= L2CAP_CONF_UNACCEPT
;
3405 rfc
.mode
= chan
->mode
;
3407 if (chan
->num_conf_rsp
== 1)
3408 return -ECONNREFUSED
;
3410 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3411 (unsigned long) &rfc
);
3414 if (result
== L2CAP_CONF_SUCCESS
) {
3415 /* Configure output options and let the other side know
3416 * which ones we don't like. */
3418 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
3419 result
= L2CAP_CONF_UNACCEPT
;
3422 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
3424 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
3427 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3428 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3429 efs
.stype
!= chan
->local_stype
) {
3431 result
= L2CAP_CONF_UNACCEPT
;
3433 if (chan
->num_conf_req
>= 1)
3434 return -ECONNREFUSED
;
3436 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3438 (unsigned long) &efs
);
3440 /* Send PENDING Conf Rsp */
3441 result
= L2CAP_CONF_PENDING
;
3442 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3447 case L2CAP_MODE_BASIC
:
3448 chan
->fcs
= L2CAP_FCS_NONE
;
3449 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3452 case L2CAP_MODE_ERTM
:
3453 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
3454 chan
->remote_tx_win
= rfc
.txwin_size
;
3456 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
3458 chan
->remote_max_tx
= rfc
.max_transmit
;
3460 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3461 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3462 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3463 rfc
.max_pdu_size
= cpu_to_le16(size
);
3464 chan
->remote_mps
= size
;
3466 __l2cap_set_ertm_timeouts(chan
, &rfc
);
3468 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3470 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3471 sizeof(rfc
), (unsigned long) &rfc
);
3473 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3474 chan
->remote_id
= efs
.id
;
3475 chan
->remote_stype
= efs
.stype
;
3476 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
3477 chan
->remote_flush_to
=
3478 le32_to_cpu(efs
.flush_to
);
3479 chan
->remote_acc_lat
=
3480 le32_to_cpu(efs
.acc_lat
);
3481 chan
->remote_sdu_itime
=
3482 le32_to_cpu(efs
.sdu_itime
);
3483 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
3485 (unsigned long) &efs
);
3489 case L2CAP_MODE_STREAMING
:
3490 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
3491 chan
->conn
->mtu
- L2CAP_EXT_HDR_SIZE
-
3492 L2CAP_SDULEN_SIZE
- L2CAP_FCS_SIZE
);
3493 rfc
.max_pdu_size
= cpu_to_le16(size
);
3494 chan
->remote_mps
= size
;
3496 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
3498 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
3499 (unsigned long) &rfc
);
3504 result
= L2CAP_CONF_UNACCEPT
;
3506 memset(&rfc
, 0, sizeof(rfc
));
3507 rfc
.mode
= chan
->mode
;
3510 if (result
== L2CAP_CONF_SUCCESS
)
3511 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3513 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3514 rsp
->result
= cpu_to_le16(result
);
3515 rsp
->flags
= __constant_cpu_to_le16(0);
3520 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
,
3521 void *data
, u16
*result
)
3523 struct l2cap_conf_req
*req
= data
;
3524 void *ptr
= req
->data
;
3527 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
3528 struct l2cap_conf_efs efs
;
3530 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
3532 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3533 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3536 case L2CAP_CONF_MTU
:
3537 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
3538 *result
= L2CAP_CONF_UNACCEPT
;
3539 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
3542 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
3545 case L2CAP_CONF_FLUSH_TO
:
3546 chan
->flush_to
= val
;
3547 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
3551 case L2CAP_CONF_RFC
:
3552 if (olen
== sizeof(rfc
))
3553 memcpy(&rfc
, (void *)val
, olen
);
3555 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
3556 rfc
.mode
!= chan
->mode
)
3557 return -ECONNREFUSED
;
3561 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
3562 sizeof(rfc
), (unsigned long) &rfc
);
3565 case L2CAP_CONF_EWS
:
3566 chan
->ack_win
= min_t(u16
, val
, chan
->ack_win
);
3567 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
3571 case L2CAP_CONF_EFS
:
3572 if (olen
== sizeof(efs
))
3573 memcpy(&efs
, (void *)val
, olen
);
3575 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
3576 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
3577 efs
.stype
!= chan
->local_stype
)
3578 return -ECONNREFUSED
;
3580 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
3581 (unsigned long) &efs
);
3584 case L2CAP_CONF_FCS
:
3585 if (*result
== L2CAP_CONF_PENDING
)
3586 if (val
== L2CAP_FCS_NONE
)
3587 set_bit(CONF_RECV_NO_FCS
,
3593 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
3594 return -ECONNREFUSED
;
3596 chan
->mode
= rfc
.mode
;
3598 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
3600 case L2CAP_MODE_ERTM
:
3601 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3602 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3603 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3604 if (!test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3605 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3608 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
3609 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
3610 chan
->local_sdu_itime
=
3611 le32_to_cpu(efs
.sdu_itime
);
3612 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
3613 chan
->local_flush_to
=
3614 le32_to_cpu(efs
.flush_to
);
3618 case L2CAP_MODE_STREAMING
:
3619 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3623 req
->dcid
= cpu_to_le16(chan
->dcid
);
3624 req
->flags
= __constant_cpu_to_le16(0);
3629 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3630 u16 result
, u16 flags
)
3632 struct l2cap_conf_rsp
*rsp
= data
;
3633 void *ptr
= rsp
->data
;
3635 BT_DBG("chan %p", chan
);
3637 rsp
->scid
= cpu_to_le16(chan
->dcid
);
3638 rsp
->result
= cpu_to_le16(result
);
3639 rsp
->flags
= cpu_to_le16(flags
);
3644 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
3646 struct l2cap_conn_rsp rsp
;
3647 struct l2cap_conn
*conn
= chan
->conn
;
3651 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3652 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3653 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
3654 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3657 rsp_code
= L2CAP_CREATE_CHAN_RSP
;
3659 rsp_code
= L2CAP_CONN_RSP
;
3661 BT_DBG("chan %p rsp_code %u", chan
, rsp_code
);
3663 l2cap_send_cmd(conn
, chan
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3665 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3668 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3669 l2cap_build_conf_req(chan
, buf
), buf
);
3670 chan
->num_conf_req
++;
3673 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
3677 /* Use sane default values in case a misbehaving remote device
3678 * did not send an RFC or extended window size option.
3680 u16 txwin_ext
= chan
->ack_win
;
3681 struct l2cap_conf_rfc rfc
= {
3683 .retrans_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
),
3684 .monitor_timeout
= __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
),
3685 .max_pdu_size
= cpu_to_le16(chan
->imtu
),
3686 .txwin_size
= min_t(u16
, chan
->ack_win
, L2CAP_DEFAULT_TX_WINDOW
),
3689 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
3691 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
3694 while (len
>= L2CAP_CONF_OPT_SIZE
) {
3695 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
3698 case L2CAP_CONF_RFC
:
3699 if (olen
== sizeof(rfc
))
3700 memcpy(&rfc
, (void *)val
, olen
);
3702 case L2CAP_CONF_EWS
:
3709 case L2CAP_MODE_ERTM
:
3710 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
3711 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
3712 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3713 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3714 chan
->ack_win
= min_t(u16
, chan
->ack_win
, txwin_ext
);
3716 chan
->ack_win
= min_t(u16
, chan
->ack_win
,
3719 case L2CAP_MODE_STREAMING
:
3720 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
3724 static inline int l2cap_command_rej(struct l2cap_conn
*conn
,
3725 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3728 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
3730 if (cmd_len
< sizeof(*rej
))
3733 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
3736 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
3737 cmd
->ident
== conn
->info_ident
) {
3738 cancel_delayed_work(&conn
->info_timer
);
3740 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3741 conn
->info_ident
= 0;
3743 l2cap_conn_start(conn
);
3749 static struct l2cap_chan
*l2cap_connect(struct l2cap_conn
*conn
,
3750 struct l2cap_cmd_hdr
*cmd
,
3751 u8
*data
, u8 rsp_code
, u8 amp_id
)
3753 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
3754 struct l2cap_conn_rsp rsp
;
3755 struct l2cap_chan
*chan
= NULL
, *pchan
;
3756 int result
, status
= L2CAP_CS_NO_INFO
;
3758 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
3759 __le16 psm
= req
->psm
;
3761 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
3763 /* Check if we have socket listening on psm */
3764 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, &conn
->hcon
->src
,
3765 &conn
->hcon
->dst
, ACL_LINK
);
3767 result
= L2CAP_CR_BAD_PSM
;
3771 mutex_lock(&conn
->chan_lock
);
3772 l2cap_chan_lock(pchan
);
3774 /* Check if the ACL is secure enough (if not SDP) */
3775 if (psm
!= __constant_cpu_to_le16(L2CAP_PSM_SDP
) &&
3776 !hci_conn_check_link_mode(conn
->hcon
)) {
3777 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
3778 result
= L2CAP_CR_SEC_BLOCK
;
3782 result
= L2CAP_CR_NO_MEM
;
3784 /* Check if we already have channel with that dcid */
3785 if (__l2cap_get_chan_by_dcid(conn
, scid
))
3788 chan
= pchan
->ops
->new_connection(pchan
);
3792 /* For certain devices (ex: HID mouse), support for authentication,
3793 * pairing and bonding is optional. For such devices, inorder to avoid
3794 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3795 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3797 conn
->hcon
->disc_timeout
= HCI_DISCONN_TIMEOUT
;
3799 bacpy(&chan
->src
, &conn
->hcon
->src
);
3800 bacpy(&chan
->dst
, &conn
->hcon
->dst
);
3801 chan
->src_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->src_type
);
3802 chan
->dst_type
= bdaddr_type(conn
->hcon
, conn
->hcon
->dst_type
);
3805 chan
->local_amp_id
= amp_id
;
3807 __l2cap_chan_add(conn
, chan
);
3811 __set_chan_timer(chan
, chan
->ops
->get_sndtimeo(chan
));
3813 chan
->ident
= cmd
->ident
;
3815 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3816 if (l2cap_chan_check_security(chan
)) {
3817 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
3818 l2cap_state_change(chan
, BT_CONNECT2
);
3819 result
= L2CAP_CR_PEND
;
3820 status
= L2CAP_CS_AUTHOR_PEND
;
3821 chan
->ops
->defer(chan
);
3823 /* Force pending result for AMP controllers.
3824 * The connection will succeed after the
3825 * physical link is up.
3827 if (amp_id
== AMP_ID_BREDR
) {
3828 l2cap_state_change(chan
, BT_CONFIG
);
3829 result
= L2CAP_CR_SUCCESS
;
3831 l2cap_state_change(chan
, BT_CONNECT2
);
3832 result
= L2CAP_CR_PEND
;
3834 status
= L2CAP_CS_NO_INFO
;
3837 l2cap_state_change(chan
, BT_CONNECT2
);
3838 result
= L2CAP_CR_PEND
;
3839 status
= L2CAP_CS_AUTHEN_PEND
;
3842 l2cap_state_change(chan
, BT_CONNECT2
);
3843 result
= L2CAP_CR_PEND
;
3844 status
= L2CAP_CS_NO_INFO
;
3848 l2cap_chan_unlock(pchan
);
3849 mutex_unlock(&conn
->chan_lock
);
3852 rsp
.scid
= cpu_to_le16(scid
);
3853 rsp
.dcid
= cpu_to_le16(dcid
);
3854 rsp
.result
= cpu_to_le16(result
);
3855 rsp
.status
= cpu_to_le16(status
);
3856 l2cap_send_cmd(conn
, cmd
->ident
, rsp_code
, sizeof(rsp
), &rsp
);
3858 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3859 struct l2cap_info_req info
;
3860 info
.type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3862 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3863 conn
->info_ident
= l2cap_get_ident(conn
);
3865 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3867 l2cap_send_cmd(conn
, conn
->info_ident
, L2CAP_INFO_REQ
,
3868 sizeof(info
), &info
);
3871 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3872 result
== L2CAP_CR_SUCCESS
) {
3874 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3875 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3876 l2cap_build_conf_req(chan
, buf
), buf
);
3877 chan
->num_conf_req
++;
3883 static int l2cap_connect_req(struct l2cap_conn
*conn
,
3884 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3886 struct hci_dev
*hdev
= conn
->hcon
->hdev
;
3887 struct hci_conn
*hcon
= conn
->hcon
;
3889 if (cmd_len
< sizeof(struct l2cap_conn_req
))
3893 if (test_bit(HCI_MGMT
, &hdev
->dev_flags
) &&
3894 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED
, &hcon
->flags
))
3895 mgmt_device_connected(hdev
, &hcon
->dst
, hcon
->type
,
3896 hcon
->dst_type
, 0, NULL
, 0,
3898 hci_dev_unlock(hdev
);
3900 l2cap_connect(conn
, cmd
, data
, L2CAP_CONN_RSP
, 0);
3904 static int l2cap_connect_create_rsp(struct l2cap_conn
*conn
,
3905 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3908 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3909 u16 scid
, dcid
, result
, status
;
3910 struct l2cap_chan
*chan
;
3914 if (cmd_len
< sizeof(*rsp
))
3917 scid
= __le16_to_cpu(rsp
->scid
);
3918 dcid
= __le16_to_cpu(rsp
->dcid
);
3919 result
= __le16_to_cpu(rsp
->result
);
3920 status
= __le16_to_cpu(rsp
->status
);
3922 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3923 dcid
, scid
, result
, status
);
3925 mutex_lock(&conn
->chan_lock
);
3928 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3934 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3943 l2cap_chan_lock(chan
);
3946 case L2CAP_CR_SUCCESS
:
3947 l2cap_state_change(chan
, BT_CONFIG
);
3950 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3952 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3955 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3956 l2cap_build_conf_req(chan
, req
), req
);
3957 chan
->num_conf_req
++;
3961 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3965 l2cap_chan_del(chan
, ECONNREFUSED
);
3969 l2cap_chan_unlock(chan
);
3972 mutex_unlock(&conn
->chan_lock
);
3977 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3979 /* FCS is enabled only in ERTM or streaming mode, if one or both
3982 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3983 chan
->fcs
= L2CAP_FCS_NONE
;
3984 else if (!test_bit(CONF_RECV_NO_FCS
, &chan
->conf_state
))
3985 chan
->fcs
= L2CAP_FCS_CRC16
;
3988 static void l2cap_send_efs_conf_rsp(struct l2cap_chan
*chan
, void *data
,
3989 u8 ident
, u16 flags
)
3991 struct l2cap_conn
*conn
= chan
->conn
;
3993 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn
, chan
, ident
,
3996 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3997 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3999 l2cap_send_cmd(conn
, ident
, L2CAP_CONF_RSP
,
4000 l2cap_build_conf_rsp(chan
, data
,
4001 L2CAP_CONF_SUCCESS
, flags
), data
);
4004 static void cmd_reject_invalid_cid(struct l2cap_conn
*conn
, u8 ident
,
4007 struct l2cap_cmd_rej_cid rej
;
4009 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_INVALID_CID
);
4010 rej
.scid
= __cpu_to_le16(scid
);
4011 rej
.dcid
= __cpu_to_le16(dcid
);
4013 l2cap_send_cmd(conn
, ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
4016 static inline int l2cap_config_req(struct l2cap_conn
*conn
,
4017 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4020 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
4023 struct l2cap_chan
*chan
;
4026 if (cmd_len
< sizeof(*req
))
4029 dcid
= __le16_to_cpu(req
->dcid
);
4030 flags
= __le16_to_cpu(req
->flags
);
4032 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
4034 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
4036 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, 0);
4040 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
4041 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4046 /* Reject if config buffer is too small. */
4047 len
= cmd_len
- sizeof(*req
);
4048 if (chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
4049 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4050 l2cap_build_conf_rsp(chan
, rsp
,
4051 L2CAP_CONF_REJECT
, flags
), rsp
);
4056 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
4057 chan
->conf_len
+= len
;
4059 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
) {
4060 /* Incomplete config. Send empty response. */
4061 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
4062 l2cap_build_conf_rsp(chan
, rsp
,
4063 L2CAP_CONF_SUCCESS
, flags
), rsp
);
4067 /* Complete config. */
4068 len
= l2cap_parse_conf_req(chan
, rsp
);
4070 l2cap_send_disconn_req(chan
, ECONNRESET
);
4074 chan
->ident
= cmd
->ident
;
4075 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
4076 chan
->num_conf_rsp
++;
4078 /* Reset config buffer. */
4081 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
4084 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4085 set_default_fcs(chan
);
4087 if (chan
->mode
== L2CAP_MODE_ERTM
||
4088 chan
->mode
== L2CAP_MODE_STREAMING
)
4089 err
= l2cap_ertm_init(chan
);
4092 l2cap_send_disconn_req(chan
, -err
);
4094 l2cap_chan_ready(chan
);
4099 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
4101 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
4102 l2cap_build_conf_req(chan
, buf
), buf
);
4103 chan
->num_conf_req
++;
4106 /* Got Conf Rsp PENDING from remote side and asume we sent
4107 Conf Rsp PENDING in the code above */
4108 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
4109 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4111 /* check compatibility */
4113 /* Send rsp for BR/EDR channel */
4115 l2cap_send_efs_conf_rsp(chan
, rsp
, cmd
->ident
, flags
);
4117 chan
->ident
= cmd
->ident
;
4121 l2cap_chan_unlock(chan
);
4125 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
,
4126 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4129 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
4130 u16 scid
, flags
, result
;
4131 struct l2cap_chan
*chan
;
4132 int len
= cmd_len
- sizeof(*rsp
);
4135 if (cmd_len
< sizeof(*rsp
))
4138 scid
= __le16_to_cpu(rsp
->scid
);
4139 flags
= __le16_to_cpu(rsp
->flags
);
4140 result
= __le16_to_cpu(rsp
->result
);
4142 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
4145 chan
= l2cap_get_chan_by_scid(conn
, scid
);
4150 case L2CAP_CONF_SUCCESS
:
4151 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
4152 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4155 case L2CAP_CONF_PENDING
:
4156 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
4158 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
4161 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4164 l2cap_send_disconn_req(chan
, ECONNRESET
);
4168 if (!chan
->hs_hcon
) {
4169 l2cap_send_efs_conf_rsp(chan
, buf
, cmd
->ident
,
4172 if (l2cap_check_efs(chan
)) {
4173 amp_create_logical_link(chan
);
4174 chan
->ident
= cmd
->ident
;
4180 case L2CAP_CONF_UNACCEPT
:
4181 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
4184 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
4185 l2cap_send_disconn_req(chan
, ECONNRESET
);
4189 /* throw out any old stored conf requests */
4190 result
= L2CAP_CONF_SUCCESS
;
4191 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
4194 l2cap_send_disconn_req(chan
, ECONNRESET
);
4198 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
4199 L2CAP_CONF_REQ
, len
, req
);
4200 chan
->num_conf_req
++;
4201 if (result
!= L2CAP_CONF_SUCCESS
)
4207 l2cap_chan_set_err(chan
, ECONNRESET
);
4209 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
4210 l2cap_send_disconn_req(chan
, ECONNRESET
);
4214 if (flags
& L2CAP_CONF_FLAG_CONTINUATION
)
4217 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
4219 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
4220 set_default_fcs(chan
);
4222 if (chan
->mode
== L2CAP_MODE_ERTM
||
4223 chan
->mode
== L2CAP_MODE_STREAMING
)
4224 err
= l2cap_ertm_init(chan
);
4227 l2cap_send_disconn_req(chan
, -err
);
4229 l2cap_chan_ready(chan
);
4233 l2cap_chan_unlock(chan
);
4237 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
,
4238 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4241 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
4242 struct l2cap_disconn_rsp rsp
;
4244 struct l2cap_chan
*chan
;
4246 if (cmd_len
!= sizeof(*req
))
4249 scid
= __le16_to_cpu(req
->scid
);
4250 dcid
= __le16_to_cpu(req
->dcid
);
4252 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
4254 mutex_lock(&conn
->chan_lock
);
4256 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
4258 mutex_unlock(&conn
->chan_lock
);
4259 cmd_reject_invalid_cid(conn
, cmd
->ident
, dcid
, scid
);
4263 l2cap_chan_lock(chan
);
4265 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4266 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4267 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
4269 chan
->ops
->set_shutdown(chan
);
4271 l2cap_chan_hold(chan
);
4272 l2cap_chan_del(chan
, ECONNRESET
);
4274 l2cap_chan_unlock(chan
);
4276 chan
->ops
->close(chan
);
4277 l2cap_chan_put(chan
);
4279 mutex_unlock(&conn
->chan_lock
);
4284 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
,
4285 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4288 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
4290 struct l2cap_chan
*chan
;
4292 if (cmd_len
!= sizeof(*rsp
))
4295 scid
= __le16_to_cpu(rsp
->scid
);
4296 dcid
= __le16_to_cpu(rsp
->dcid
);
4298 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
4300 mutex_lock(&conn
->chan_lock
);
4302 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
4304 mutex_unlock(&conn
->chan_lock
);
4308 l2cap_chan_lock(chan
);
4310 l2cap_chan_hold(chan
);
4311 l2cap_chan_del(chan
, 0);
4313 l2cap_chan_unlock(chan
);
4315 chan
->ops
->close(chan
);
4316 l2cap_chan_put(chan
);
4318 mutex_unlock(&conn
->chan_lock
);
4323 static inline int l2cap_information_req(struct l2cap_conn
*conn
,
4324 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4327 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
4330 if (cmd_len
!= sizeof(*req
))
4333 type
= __le16_to_cpu(req
->type
);
4335 BT_DBG("type 0x%4.4x", type
);
4337 if (type
== L2CAP_IT_FEAT_MASK
) {
4339 u32 feat_mask
= l2cap_feat_mask
;
4340 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4341 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FEAT_MASK
);
4342 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4344 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
4346 if (conn
->hs_enabled
)
4347 feat_mask
|= L2CAP_FEAT_EXT_FLOW
4348 | L2CAP_FEAT_EXT_WINDOW
;
4350 put_unaligned_le32(feat_mask
, rsp
->data
);
4351 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4353 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
4355 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
4357 if (conn
->hs_enabled
)
4358 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
4360 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
4362 rsp
->type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4363 rsp
->result
= __constant_cpu_to_le16(L2CAP_IR_SUCCESS
);
4364 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
4365 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(buf
),
4368 struct l2cap_info_rsp rsp
;
4369 rsp
.type
= cpu_to_le16(type
);
4370 rsp
.result
= __constant_cpu_to_le16(L2CAP_IR_NOTSUPP
);
4371 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_INFO_RSP
, sizeof(rsp
),
4378 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
,
4379 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
4382 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
4385 if (cmd_len
< sizeof(*rsp
))
4388 type
= __le16_to_cpu(rsp
->type
);
4389 result
= __le16_to_cpu(rsp
->result
);
4391 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
4393 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4394 if (cmd
->ident
!= conn
->info_ident
||
4395 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
4398 cancel_delayed_work(&conn
->info_timer
);
4400 if (result
!= L2CAP_IR_SUCCESS
) {
4401 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4402 conn
->info_ident
= 0;
4404 l2cap_conn_start(conn
);
4410 case L2CAP_IT_FEAT_MASK
:
4411 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
4413 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
4414 struct l2cap_info_req req
;
4415 req
.type
= __constant_cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
4417 conn
->info_ident
= l2cap_get_ident(conn
);
4419 l2cap_send_cmd(conn
, conn
->info_ident
,
4420 L2CAP_INFO_REQ
, sizeof(req
), &req
);
4422 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4423 conn
->info_ident
= 0;
4425 l2cap_conn_start(conn
);
4429 case L2CAP_IT_FIXED_CHAN
:
4430 conn
->fixed_chan_mask
= rsp
->data
[0];
4431 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
4432 conn
->info_ident
= 0;
4434 l2cap_conn_start(conn
);
4441 static int l2cap_create_channel_req(struct l2cap_conn
*conn
,
4442 struct l2cap_cmd_hdr
*cmd
,
4443 u16 cmd_len
, void *data
)
4445 struct l2cap_create_chan_req
*req
= data
;
4446 struct l2cap_create_chan_rsp rsp
;
4447 struct l2cap_chan
*chan
;
4448 struct hci_dev
*hdev
;
4451 if (cmd_len
!= sizeof(*req
))
4454 if (!conn
->hs_enabled
)
4457 psm
= le16_to_cpu(req
->psm
);
4458 scid
= le16_to_cpu(req
->scid
);
4460 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm
, scid
, req
->amp_id
);
4462 /* For controller id 0 make BR/EDR connection */
4463 if (req
->amp_id
== AMP_ID_BREDR
) {
4464 l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4469 /* Validate AMP controller id */
4470 hdev
= hci_dev_get(req
->amp_id
);
4474 if (hdev
->dev_type
!= HCI_AMP
|| !test_bit(HCI_UP
, &hdev
->flags
)) {
4479 chan
= l2cap_connect(conn
, cmd
, data
, L2CAP_CREATE_CHAN_RSP
,
4482 struct amp_mgr
*mgr
= conn
->hcon
->amp_mgr
;
4483 struct hci_conn
*hs_hcon
;
4485 hs_hcon
= hci_conn_hash_lookup_ba(hdev
, AMP_LINK
,
4489 cmd_reject_invalid_cid(conn
, cmd
->ident
, chan
->scid
,
4494 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr
, chan
, hs_hcon
);
4496 mgr
->bredr_chan
= chan
;
4497 chan
->hs_hcon
= hs_hcon
;
4498 chan
->fcs
= L2CAP_FCS_NONE
;
4499 conn
->mtu
= hdev
->block_mtu
;
4508 rsp
.scid
= cpu_to_le16(scid
);
4509 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_BAD_AMP
);
4510 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4512 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
4518 static void l2cap_send_move_chan_req(struct l2cap_chan
*chan
, u8 dest_amp_id
)
4520 struct l2cap_move_chan_req req
;
4523 BT_DBG("chan %p, dest_amp_id %d", chan
, dest_amp_id
);
4525 ident
= l2cap_get_ident(chan
->conn
);
4526 chan
->ident
= ident
;
4528 req
.icid
= cpu_to_le16(chan
->scid
);
4529 req
.dest_amp_id
= dest_amp_id
;
4531 l2cap_send_cmd(chan
->conn
, ident
, L2CAP_MOVE_CHAN_REQ
, sizeof(req
),
4534 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4537 static void l2cap_send_move_chan_rsp(struct l2cap_chan
*chan
, u16 result
)
4539 struct l2cap_move_chan_rsp rsp
;
4541 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4543 rsp
.icid
= cpu_to_le16(chan
->dcid
);
4544 rsp
.result
= cpu_to_le16(result
);
4546 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_RSP
,
4550 static void l2cap_send_move_chan_cfm(struct l2cap_chan
*chan
, u16 result
)
4552 struct l2cap_move_chan_cfm cfm
;
4554 BT_DBG("chan %p, result 0x%4.4x", chan
, result
);
4556 chan
->ident
= l2cap_get_ident(chan
->conn
);
4558 cfm
.icid
= cpu_to_le16(chan
->scid
);
4559 cfm
.result
= cpu_to_le16(result
);
4561 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_MOVE_CHAN_CFM
,
4564 __set_chan_timer(chan
, L2CAP_MOVE_TIMEOUT
);
4567 static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn
*conn
, u16 icid
)
4569 struct l2cap_move_chan_cfm cfm
;
4571 BT_DBG("conn %p, icid 0x%4.4x", conn
, icid
);
4573 cfm
.icid
= cpu_to_le16(icid
);
4574 cfm
.result
= __constant_cpu_to_le16(L2CAP_MC_UNCONFIRMED
);
4576 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_MOVE_CHAN_CFM
,
4580 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
4583 struct l2cap_move_chan_cfm_rsp rsp
;
4585 BT_DBG("icid 0x%4.4x", icid
);
4587 rsp
.icid
= cpu_to_le16(icid
);
4588 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
4591 static void __release_logical_link(struct l2cap_chan
*chan
)
4593 chan
->hs_hchan
= NULL
;
4594 chan
->hs_hcon
= NULL
;
4596 /* Placeholder - release the logical link */
4599 static void l2cap_logical_fail(struct l2cap_chan
*chan
)
4601 /* Logical link setup failed */
4602 if (chan
->state
!= BT_CONNECTED
) {
4603 /* Create channel failure, disconnect */
4604 l2cap_send_disconn_req(chan
, ECONNRESET
);
4608 switch (chan
->move_role
) {
4609 case L2CAP_MOVE_ROLE_RESPONDER
:
4610 l2cap_move_done(chan
);
4611 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_SUPP
);
4613 case L2CAP_MOVE_ROLE_INITIATOR
:
4614 if (chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_COMP
||
4615 chan
->move_state
== L2CAP_MOVE_WAIT_LOGICAL_CFM
) {
4616 /* Remote has only sent pending or
4617 * success responses, clean up
4619 l2cap_move_done(chan
);
4622 /* Other amp move states imply that the move
4623 * has already aborted
4625 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
4630 static void l2cap_logical_finish_create(struct l2cap_chan
*chan
,
4631 struct hci_chan
*hchan
)
4633 struct l2cap_conf_rsp rsp
;
4635 chan
->hs_hchan
= hchan
;
4636 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4638 l2cap_send_efs_conf_rsp(chan
, &rsp
, chan
->ident
, 0);
4640 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
4643 set_default_fcs(chan
);
4645 err
= l2cap_ertm_init(chan
);
4647 l2cap_send_disconn_req(chan
, -err
);
4649 l2cap_chan_ready(chan
);
4653 static void l2cap_logical_finish_move(struct l2cap_chan
*chan
,
4654 struct hci_chan
*hchan
)
4656 chan
->hs_hcon
= hchan
->conn
;
4657 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4659 BT_DBG("move_state %d", chan
->move_state
);
4661 switch (chan
->move_state
) {
4662 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4663 /* Move confirm will be sent after a success
4664 * response is received
4666 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4668 case L2CAP_MOVE_WAIT_LOGICAL_CFM
:
4669 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4670 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4671 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
4672 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4673 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
4674 } else if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4675 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4676 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4680 /* Move was not in expected state, free the channel */
4681 __release_logical_link(chan
);
4683 chan
->move_state
= L2CAP_MOVE_STABLE
;
4687 /* Call with chan locked */
4688 void l2cap_logical_cfm(struct l2cap_chan
*chan
, struct hci_chan
*hchan
,
4691 BT_DBG("chan %p, hchan %p, status %d", chan
, hchan
, status
);
4694 l2cap_logical_fail(chan
);
4695 __release_logical_link(chan
);
4699 if (chan
->state
!= BT_CONNECTED
) {
4700 /* Ignore logical link if channel is on BR/EDR */
4701 if (chan
->local_amp_id
!= AMP_ID_BREDR
)
4702 l2cap_logical_finish_create(chan
, hchan
);
4704 l2cap_logical_finish_move(chan
, hchan
);
4708 void l2cap_move_start(struct l2cap_chan
*chan
)
4710 BT_DBG("chan %p", chan
);
4712 if (chan
->local_amp_id
== AMP_ID_BREDR
) {
4713 if (chan
->chan_policy
!= BT_CHANNEL_POLICY_AMP_PREFERRED
)
4715 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4716 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4717 /* Placeholder - start physical link setup */
4719 chan
->move_role
= L2CAP_MOVE_ROLE_INITIATOR
;
4720 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
4722 l2cap_move_setup(chan
);
4723 l2cap_send_move_chan_req(chan
, 0);
4727 static void l2cap_do_create(struct l2cap_chan
*chan
, int result
,
4728 u8 local_amp_id
, u8 remote_amp_id
)
4730 BT_DBG("chan %p state %s %u -> %u", chan
, state_to_string(chan
->state
),
4731 local_amp_id
, remote_amp_id
);
4733 chan
->fcs
= L2CAP_FCS_NONE
;
4735 /* Outgoing channel on AMP */
4736 if (chan
->state
== BT_CONNECT
) {
4737 if (result
== L2CAP_CR_SUCCESS
) {
4738 chan
->local_amp_id
= local_amp_id
;
4739 l2cap_send_create_chan_req(chan
, remote_amp_id
);
4741 /* Revert to BR/EDR connect */
4742 l2cap_send_conn_req(chan
);
4748 /* Incoming channel on AMP */
4749 if (__l2cap_no_conn_pending(chan
)) {
4750 struct l2cap_conn_rsp rsp
;
4752 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4753 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4755 if (result
== L2CAP_CR_SUCCESS
) {
4756 /* Send successful response */
4757 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_SUCCESS
);
4758 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4760 /* Send negative response */
4761 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
4762 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
4765 l2cap_send_cmd(chan
->conn
, chan
->ident
, L2CAP_CREATE_CHAN_RSP
,
4768 if (result
== L2CAP_CR_SUCCESS
) {
4769 l2cap_state_change(chan
, BT_CONFIG
);
4770 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
4771 l2cap_send_cmd(chan
->conn
, l2cap_get_ident(chan
->conn
),
4773 l2cap_build_conf_req(chan
, buf
), buf
);
4774 chan
->num_conf_req
++;
4779 static void l2cap_do_move_initiate(struct l2cap_chan
*chan
, u8 local_amp_id
,
4782 l2cap_move_setup(chan
);
4783 chan
->move_id
= local_amp_id
;
4784 chan
->move_state
= L2CAP_MOVE_WAIT_RSP
;
4786 l2cap_send_move_chan_req(chan
, remote_amp_id
);
4789 static void l2cap_do_move_respond(struct l2cap_chan
*chan
, int result
)
4791 struct hci_chan
*hchan
= NULL
;
4793 /* Placeholder - get hci_chan for logical link */
4796 if (hchan
->state
== BT_CONNECTED
) {
4797 /* Logical link is ready to go */
4798 chan
->hs_hcon
= hchan
->conn
;
4799 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
4800 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4801 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_SUCCESS
);
4803 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
4805 /* Wait for logical link to be ready */
4806 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4809 /* Logical link not available */
4810 l2cap_send_move_chan_rsp(chan
, L2CAP_MR_NOT_ALLOWED
);
4814 static void l2cap_do_move_cancel(struct l2cap_chan
*chan
, int result
)
4816 if (chan
->move_role
== L2CAP_MOVE_ROLE_RESPONDER
) {
4818 if (result
== -EINVAL
)
4819 rsp_result
= L2CAP_MR_BAD_ID
;
4821 rsp_result
= L2CAP_MR_NOT_ALLOWED
;
4823 l2cap_send_move_chan_rsp(chan
, rsp_result
);
4826 chan
->move_role
= L2CAP_MOVE_ROLE_NONE
;
4827 chan
->move_state
= L2CAP_MOVE_STABLE
;
4829 /* Restart data transmission */
4830 l2cap_ertm_send(chan
);
4833 /* Invoke with locked chan */
4834 void __l2cap_physical_cfm(struct l2cap_chan
*chan
, int result
)
4836 u8 local_amp_id
= chan
->local_amp_id
;
4837 u8 remote_amp_id
= chan
->remote_amp_id
;
4839 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4840 chan
, result
, local_amp_id
, remote_amp_id
);
4842 if (chan
->state
== BT_DISCONN
|| chan
->state
== BT_CLOSED
) {
4843 l2cap_chan_unlock(chan
);
4847 if (chan
->state
!= BT_CONNECTED
) {
4848 l2cap_do_create(chan
, result
, local_amp_id
, remote_amp_id
);
4849 } else if (result
!= L2CAP_MR_SUCCESS
) {
4850 l2cap_do_move_cancel(chan
, result
);
4852 switch (chan
->move_role
) {
4853 case L2CAP_MOVE_ROLE_INITIATOR
:
4854 l2cap_do_move_initiate(chan
, local_amp_id
,
4857 case L2CAP_MOVE_ROLE_RESPONDER
:
4858 l2cap_do_move_respond(chan
, result
);
4861 l2cap_do_move_cancel(chan
, result
);
4867 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
4868 struct l2cap_cmd_hdr
*cmd
,
4869 u16 cmd_len
, void *data
)
4871 struct l2cap_move_chan_req
*req
= data
;
4872 struct l2cap_move_chan_rsp rsp
;
4873 struct l2cap_chan
*chan
;
4875 u16 result
= L2CAP_MR_NOT_ALLOWED
;
4877 if (cmd_len
!= sizeof(*req
))
4880 icid
= le16_to_cpu(req
->icid
);
4882 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid
, req
->dest_amp_id
);
4884 if (!conn
->hs_enabled
)
4887 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
4889 rsp
.icid
= cpu_to_le16(icid
);
4890 rsp
.result
= __constant_cpu_to_le16(L2CAP_MR_NOT_ALLOWED
);
4891 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_MOVE_CHAN_RSP
,
4896 chan
->ident
= cmd
->ident
;
4898 if (chan
->scid
< L2CAP_CID_DYN_START
||
4899 chan
->chan_policy
== BT_CHANNEL_POLICY_BREDR_ONLY
||
4900 (chan
->mode
!= L2CAP_MODE_ERTM
&&
4901 chan
->mode
!= L2CAP_MODE_STREAMING
)) {
4902 result
= L2CAP_MR_NOT_ALLOWED
;
4903 goto send_move_response
;
4906 if (chan
->local_amp_id
== req
->dest_amp_id
) {
4907 result
= L2CAP_MR_SAME_ID
;
4908 goto send_move_response
;
4911 if (req
->dest_amp_id
!= AMP_ID_BREDR
) {
4912 struct hci_dev
*hdev
;
4913 hdev
= hci_dev_get(req
->dest_amp_id
);
4914 if (!hdev
|| hdev
->dev_type
!= HCI_AMP
||
4915 !test_bit(HCI_UP
, &hdev
->flags
)) {
4919 result
= L2CAP_MR_BAD_ID
;
4920 goto send_move_response
;
4925 /* Detect a move collision. Only send a collision response
4926 * if this side has "lost", otherwise proceed with the move.
4927 * The winner has the larger bd_addr.
4929 if ((__chan_is_moving(chan
) ||
4930 chan
->move_role
!= L2CAP_MOVE_ROLE_NONE
) &&
4931 bacmp(&conn
->hcon
->src
, &conn
->hcon
->dst
) > 0) {
4932 result
= L2CAP_MR_COLLISION
;
4933 goto send_move_response
;
4936 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
4937 l2cap_move_setup(chan
);
4938 chan
->move_id
= req
->dest_amp_id
;
4941 if (req
->dest_amp_id
== AMP_ID_BREDR
) {
4942 /* Moving to BR/EDR */
4943 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4944 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4945 result
= L2CAP_MR_PEND
;
4947 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM
;
4948 result
= L2CAP_MR_SUCCESS
;
4951 chan
->move_state
= L2CAP_MOVE_WAIT_PREPARE
;
4952 /* Placeholder - uncomment when amp functions are available */
4953 /*amp_accept_physical(chan, req->dest_amp_id);*/
4954 result
= L2CAP_MR_PEND
;
4958 l2cap_send_move_chan_rsp(chan
, result
);
4960 l2cap_chan_unlock(chan
);
4965 static void l2cap_move_continue(struct l2cap_conn
*conn
, u16 icid
, u16 result
)
4967 struct l2cap_chan
*chan
;
4968 struct hci_chan
*hchan
= NULL
;
4970 chan
= l2cap_get_chan_by_scid(conn
, icid
);
4972 l2cap_send_move_chan_cfm_icid(conn
, icid
);
4976 __clear_chan_timer(chan
);
4977 if (result
== L2CAP_MR_PEND
)
4978 __set_chan_timer(chan
, L2CAP_MOVE_ERTX_TIMEOUT
);
4980 switch (chan
->move_state
) {
4981 case L2CAP_MOVE_WAIT_LOGICAL_COMP
:
4982 /* Move confirm will be sent when logical link
4985 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
4987 case L2CAP_MOVE_WAIT_RSP_SUCCESS
:
4988 if (result
== L2CAP_MR_PEND
) {
4990 } else if (test_bit(CONN_LOCAL_BUSY
,
4991 &chan
->conn_state
)) {
4992 chan
->move_state
= L2CAP_MOVE_WAIT_LOCAL_BUSY
;
4994 /* Logical link is up or moving to BR/EDR,
4997 chan
->move_state
= L2CAP_MOVE_WAIT_CONFIRM_RSP
;
4998 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5001 case L2CAP_MOVE_WAIT_RSP
:
5003 if (result
== L2CAP_MR_SUCCESS
) {
5004 /* Remote is ready, send confirm immediately
5005 * after logical link is ready
5007 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_CFM
;
5009 /* Both logical link and move success
5010 * are required to confirm
5012 chan
->move_state
= L2CAP_MOVE_WAIT_LOGICAL_COMP
;
5015 /* Placeholder - get hci_chan for logical link */
5017 /* Logical link not available */
5018 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5022 /* If the logical link is not yet connected, do not
5023 * send confirmation.
5025 if (hchan
->state
!= BT_CONNECTED
)
5028 /* Logical link is already ready to go */
5030 chan
->hs_hcon
= hchan
->conn
;
5031 chan
->hs_hcon
->l2cap_data
= chan
->conn
;
5033 if (result
== L2CAP_MR_SUCCESS
) {
5034 /* Can confirm now */
5035 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_CONFIRMED
);
5037 /* Now only need move success
5040 chan
->move_state
= L2CAP_MOVE_WAIT_RSP_SUCCESS
;
5043 l2cap_logical_cfm(chan
, hchan
, L2CAP_MR_SUCCESS
);
5046 /* Any other amp move state means the move failed. */
5047 chan
->move_id
= chan
->local_amp_id
;
5048 l2cap_move_done(chan
);
5049 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5052 l2cap_chan_unlock(chan
);
5055 static void l2cap_move_fail(struct l2cap_conn
*conn
, u8 ident
, u16 icid
,
5058 struct l2cap_chan
*chan
;
5060 chan
= l2cap_get_chan_by_ident(conn
, ident
);
5062 /* Could not locate channel, icid is best guess */
5063 l2cap_send_move_chan_cfm_icid(conn
, icid
);
5067 __clear_chan_timer(chan
);
5069 if (chan
->move_role
== L2CAP_MOVE_ROLE_INITIATOR
) {
5070 if (result
== L2CAP_MR_COLLISION
) {
5071 chan
->move_role
= L2CAP_MOVE_ROLE_RESPONDER
;
5073 /* Cleanup - cancel move */
5074 chan
->move_id
= chan
->local_amp_id
;
5075 l2cap_move_done(chan
);
5079 l2cap_send_move_chan_cfm(chan
, L2CAP_MC_UNCONFIRMED
);
5081 l2cap_chan_unlock(chan
);
5084 static int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
5085 struct l2cap_cmd_hdr
*cmd
,
5086 u16 cmd_len
, void *data
)
5088 struct l2cap_move_chan_rsp
*rsp
= data
;
5091 if (cmd_len
!= sizeof(*rsp
))
5094 icid
= le16_to_cpu(rsp
->icid
);
5095 result
= le16_to_cpu(rsp
->result
);
5097 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5099 if (result
== L2CAP_MR_SUCCESS
|| result
== L2CAP_MR_PEND
)
5100 l2cap_move_continue(conn
, icid
, result
);
5102 l2cap_move_fail(conn
, cmd
->ident
, icid
, result
);
5107 static int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
5108 struct l2cap_cmd_hdr
*cmd
,
5109 u16 cmd_len
, void *data
)
5111 struct l2cap_move_chan_cfm
*cfm
= data
;
5112 struct l2cap_chan
*chan
;
5115 if (cmd_len
!= sizeof(*cfm
))
5118 icid
= le16_to_cpu(cfm
->icid
);
5119 result
= le16_to_cpu(cfm
->result
);
5121 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid
, result
);
5123 chan
= l2cap_get_chan_by_dcid(conn
, icid
);
5125 /* Spec requires a response even if the icid was not found */
5126 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5130 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM
) {
5131 if (result
== L2CAP_MC_CONFIRMED
) {
5132 chan
->local_amp_id
= chan
->move_id
;
5133 if (chan
->local_amp_id
== AMP_ID_BREDR
)
5134 __release_logical_link(chan
);
5136 chan
->move_id
= chan
->local_amp_id
;
5139 l2cap_move_done(chan
);
5142 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
5144 l2cap_chan_unlock(chan
);
5149 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
5150 struct l2cap_cmd_hdr
*cmd
,
5151 u16 cmd_len
, void *data
)
5153 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
5154 struct l2cap_chan
*chan
;
5157 if (cmd_len
!= sizeof(*rsp
))
5160 icid
= le16_to_cpu(rsp
->icid
);
5162 BT_DBG("icid 0x%4.4x", icid
);
5164 chan
= l2cap_get_chan_by_scid(conn
, icid
);
5168 __clear_chan_timer(chan
);
5170 if (chan
->move_state
== L2CAP_MOVE_WAIT_CONFIRM_RSP
) {
5171 chan
->local_amp_id
= chan
->move_id
;
5173 if (chan
->local_amp_id
== AMP_ID_BREDR
&& chan
->hs_hchan
)
5174 __release_logical_link(chan
);
5176 l2cap_move_done(chan
);
5179 l2cap_chan_unlock(chan
);
5184 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
5189 if (min
> max
|| min
< 6 || max
> 3200)
5192 if (to_multiplier
< 10 || to_multiplier
> 3200)
5195 if (max
>= to_multiplier
* 8)
5198 max_latency
= (to_multiplier
* 8 / max
) - 1;
5199 if (latency
> 499 || latency
> max_latency
)
5205 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
5206 struct l2cap_cmd_hdr
*cmd
,
5207 u16 cmd_len
, u8
*data
)
5209 struct hci_conn
*hcon
= conn
->hcon
;
5210 struct l2cap_conn_param_update_req
*req
;
5211 struct l2cap_conn_param_update_rsp rsp
;
5212 u16 min
, max
, latency
, to_multiplier
;
5215 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
5218 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
5221 req
= (struct l2cap_conn_param_update_req
*) data
;
5222 min
= __le16_to_cpu(req
->min
);
5223 max
= __le16_to_cpu(req
->max
);
5224 latency
= __le16_to_cpu(req
->latency
);
5225 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
5227 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5228 min
, max
, latency
, to_multiplier
);
5230 memset(&rsp
, 0, sizeof(rsp
));
5232 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
5234 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
5236 rsp
.result
= __constant_cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
5238 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
5242 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
5247 static int l2cap_le_connect_rsp(struct l2cap_conn
*conn
,
5248 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5251 struct l2cap_le_conn_rsp
*rsp
= (struct l2cap_le_conn_rsp
*) data
;
5252 u16 dcid
, mtu
, mps
, credits
, result
;
5253 struct l2cap_chan
*chan
;
5256 if (cmd_len
< sizeof(*rsp
))
5259 dcid
= __le16_to_cpu(rsp
->dcid
);
5260 mtu
= __le16_to_cpu(rsp
->mtu
);
5261 mps
= __le16_to_cpu(rsp
->mps
);
5262 credits
= __le16_to_cpu(rsp
->credits
);
5263 result
= __le16_to_cpu(rsp
->result
);
5265 if (result
== L2CAP_CR_SUCCESS
&& (mtu
< 23 || mps
< 23))
5268 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5269 dcid
, mtu
, mps
, credits
, result
);
5271 mutex_lock(&conn
->chan_lock
);
5273 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
5281 l2cap_chan_lock(chan
);
5284 case L2CAP_CR_SUCCESS
:
5288 chan
->remote_mps
= mps
;
5289 l2cap_chan_ready(chan
);
5293 l2cap_chan_del(chan
, ECONNREFUSED
);
5297 l2cap_chan_unlock(chan
);
5300 mutex_unlock(&conn
->chan_lock
);
5305 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
5306 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5311 switch (cmd
->code
) {
5312 case L2CAP_COMMAND_REJ
:
5313 l2cap_command_rej(conn
, cmd
, cmd_len
, data
);
5316 case L2CAP_CONN_REQ
:
5317 err
= l2cap_connect_req(conn
, cmd
, cmd_len
, data
);
5320 case L2CAP_CONN_RSP
:
5321 case L2CAP_CREATE_CHAN_RSP
:
5322 l2cap_connect_create_rsp(conn
, cmd
, cmd_len
, data
);
5325 case L2CAP_CONF_REQ
:
5326 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
5329 case L2CAP_CONF_RSP
:
5330 l2cap_config_rsp(conn
, cmd
, cmd_len
, data
);
5333 case L2CAP_DISCONN_REQ
:
5334 err
= l2cap_disconnect_req(conn
, cmd
, cmd_len
, data
);
5337 case L2CAP_DISCONN_RSP
:
5338 l2cap_disconnect_rsp(conn
, cmd
, cmd_len
, data
);
5341 case L2CAP_ECHO_REQ
:
5342 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
5345 case L2CAP_ECHO_RSP
:
5348 case L2CAP_INFO_REQ
:
5349 err
= l2cap_information_req(conn
, cmd
, cmd_len
, data
);
5352 case L2CAP_INFO_RSP
:
5353 l2cap_information_rsp(conn
, cmd
, cmd_len
, data
);
5356 case L2CAP_CREATE_CHAN_REQ
:
5357 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
5360 case L2CAP_MOVE_CHAN_REQ
:
5361 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
5364 case L2CAP_MOVE_CHAN_RSP
:
5365 l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
5368 case L2CAP_MOVE_CHAN_CFM
:
5369 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
5372 case L2CAP_MOVE_CHAN_CFM_RSP
:
5373 l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
5377 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
5385 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
5386 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
5389 switch (cmd
->code
) {
5390 case L2CAP_COMMAND_REJ
:
5393 case L2CAP_CONN_PARAM_UPDATE_REQ
:
5394 return l2cap_conn_param_update_req(conn
, cmd
, cmd_len
, data
);
5396 case L2CAP_CONN_PARAM_UPDATE_RSP
:
5399 case L2CAP_LE_CONN_RSP
:
5400 l2cap_le_connect_rsp(conn
, cmd
, cmd_len
, data
);
5404 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
5409 static inline void l2cap_le_sig_channel(struct l2cap_conn
*conn
,
5410 struct sk_buff
*skb
)
5412 struct hci_conn
*hcon
= conn
->hcon
;
5413 struct l2cap_cmd_hdr
*cmd
;
5417 if (hcon
->type
!= LE_LINK
)
5420 if (skb
->len
< L2CAP_CMD_HDR_SIZE
)
5423 cmd
= (void *) skb
->data
;
5424 skb_pull(skb
, L2CAP_CMD_HDR_SIZE
);
5426 len
= le16_to_cpu(cmd
->len
);
5428 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
->code
, len
, cmd
->ident
);
5430 if (len
!= skb
->len
|| !cmd
->ident
) {
5431 BT_DBG("corrupted command");
5435 err
= l2cap_le_sig_cmd(conn
, cmd
, len
, skb
->data
);
5437 struct l2cap_cmd_rej_unk rej
;
5439 BT_ERR("Wrong link type (%d)", err
);
5441 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5442 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
5450 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
5451 struct sk_buff
*skb
)
5453 struct hci_conn
*hcon
= conn
->hcon
;
5454 u8
*data
= skb
->data
;
5456 struct l2cap_cmd_hdr cmd
;
5459 l2cap_raw_recv(conn
, skb
);
5461 if (hcon
->type
!= ACL_LINK
)
5464 while (len
>= L2CAP_CMD_HDR_SIZE
) {
5466 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
5467 data
+= L2CAP_CMD_HDR_SIZE
;
5468 len
-= L2CAP_CMD_HDR_SIZE
;
5470 cmd_len
= le16_to_cpu(cmd
.len
);
5472 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
,
5475 if (cmd_len
> len
|| !cmd
.ident
) {
5476 BT_DBG("corrupted command");
5480 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
5482 struct l2cap_cmd_rej_unk rej
;
5484 BT_ERR("Wrong link type (%d)", err
);
5486 rej
.reason
= __constant_cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
5487 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
,
5499 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
5501 u16 our_fcs
, rcv_fcs
;
5504 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
5505 hdr_size
= L2CAP_EXT_HDR_SIZE
;
5507 hdr_size
= L2CAP_ENH_HDR_SIZE
;
5509 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
5510 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
5511 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
5512 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
5514 if (our_fcs
!= rcv_fcs
)
5520 static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
5522 struct l2cap_ctrl control
;
5524 BT_DBG("chan %p", chan
);
5526 memset(&control
, 0, sizeof(control
));
5529 control
.reqseq
= chan
->buffer_seq
;
5530 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5532 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5533 control
.super
= L2CAP_SUPER_RNR
;
5534 l2cap_send_sframe(chan
, &control
);
5537 if (test_and_clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
5538 chan
->unacked_frames
> 0)
5539 __set_retrans_timer(chan
);
5541 /* Send pending iframes */
5542 l2cap_ertm_send(chan
);
5544 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
5545 test_bit(CONN_SEND_FBIT
, &chan
->conn_state
)) {
5546 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5549 control
.super
= L2CAP_SUPER_RR
;
5550 l2cap_send_sframe(chan
, &control
);
5554 static void append_skb_frag(struct sk_buff
*skb
, struct sk_buff
*new_frag
,
5555 struct sk_buff
**last_frag
)
5557 /* skb->len reflects data in skb as well as all fragments
5558 * skb->data_len reflects only data in fragments
5560 if (!skb_has_frag_list(skb
))
5561 skb_shinfo(skb
)->frag_list
= new_frag
;
5563 new_frag
->next
= NULL
;
5565 (*last_frag
)->next
= new_frag
;
5566 *last_frag
= new_frag
;
5568 skb
->len
+= new_frag
->len
;
5569 skb
->data_len
+= new_frag
->len
;
5570 skb
->truesize
+= new_frag
->truesize
;
5573 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
,
5574 struct l2cap_ctrl
*control
)
5578 switch (control
->sar
) {
5579 case L2CAP_SAR_UNSEGMENTED
:
5583 err
= chan
->ops
->recv(chan
, skb
);
5586 case L2CAP_SAR_START
:
5590 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
5591 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
5593 if (chan
->sdu_len
> chan
->imtu
) {
5598 if (skb
->len
>= chan
->sdu_len
)
5602 chan
->sdu_last_frag
= skb
;
5608 case L2CAP_SAR_CONTINUE
:
5612 append_skb_frag(chan
->sdu
, skb
,
5613 &chan
->sdu_last_frag
);
5616 if (chan
->sdu
->len
>= chan
->sdu_len
)
5626 append_skb_frag(chan
->sdu
, skb
,
5627 &chan
->sdu_last_frag
);
5630 if (chan
->sdu
->len
!= chan
->sdu_len
)
5633 err
= chan
->ops
->recv(chan
, chan
->sdu
);
5636 /* Reassembly complete */
5638 chan
->sdu_last_frag
= NULL
;
5646 kfree_skb(chan
->sdu
);
5648 chan
->sdu_last_frag
= NULL
;
5655 static int l2cap_resegment(struct l2cap_chan
*chan
)
5661 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
5665 if (chan
->mode
!= L2CAP_MODE_ERTM
)
5668 event
= busy
? L2CAP_EV_LOCAL_BUSY_DETECTED
: L2CAP_EV_LOCAL_BUSY_CLEAR
;
5669 l2cap_tx(chan
, NULL
, NULL
, event
);
5672 static int l2cap_rx_queued_iframes(struct l2cap_chan
*chan
)
5675 /* Pass sequential frames to l2cap_reassemble_sdu()
5676 * until a gap is encountered.
5679 BT_DBG("chan %p", chan
);
5681 while (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5682 struct sk_buff
*skb
;
5683 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5684 chan
->buffer_seq
, skb_queue_len(&chan
->srej_q
));
5686 skb
= l2cap_ertm_seq_in_queue(&chan
->srej_q
, chan
->buffer_seq
);
5691 skb_unlink(skb
, &chan
->srej_q
);
5692 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
5693 err
= l2cap_reassemble_sdu(chan
, skb
, &bt_cb(skb
)->control
);
5698 if (skb_queue_empty(&chan
->srej_q
)) {
5699 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
5700 l2cap_send_ack(chan
);
5706 static void l2cap_handle_srej(struct l2cap_chan
*chan
,
5707 struct l2cap_ctrl
*control
)
5709 struct sk_buff
*skb
;
5711 BT_DBG("chan %p, control %p", chan
, control
);
5713 if (control
->reqseq
== chan
->next_tx_seq
) {
5714 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5715 l2cap_send_disconn_req(chan
, ECONNRESET
);
5719 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5722 BT_DBG("Seq %d not available for retransmission",
5727 if (chan
->max_tx
!= 0 && bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5728 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5729 l2cap_send_disconn_req(chan
, ECONNRESET
);
5733 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5735 if (control
->poll
) {
5736 l2cap_pass_to_tx(chan
, control
);
5738 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5739 l2cap_retransmit(chan
, control
);
5740 l2cap_ertm_send(chan
);
5742 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5743 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5744 chan
->srej_save_reqseq
= control
->reqseq
;
5747 l2cap_pass_to_tx_fbit(chan
, control
);
5749 if (control
->final
) {
5750 if (chan
->srej_save_reqseq
!= control
->reqseq
||
5751 !test_and_clear_bit(CONN_SREJ_ACT
,
5753 l2cap_retransmit(chan
, control
);
5755 l2cap_retransmit(chan
, control
);
5756 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
) {
5757 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5758 chan
->srej_save_reqseq
= control
->reqseq
;
5764 static void l2cap_handle_rej(struct l2cap_chan
*chan
,
5765 struct l2cap_ctrl
*control
)
5767 struct sk_buff
*skb
;
5769 BT_DBG("chan %p, control %p", chan
, control
);
5771 if (control
->reqseq
== chan
->next_tx_seq
) {
5772 BT_DBG("Invalid reqseq %d, disconnecting", control
->reqseq
);
5773 l2cap_send_disconn_req(chan
, ECONNRESET
);
5777 skb
= l2cap_ertm_seq_in_queue(&chan
->tx_q
, control
->reqseq
);
5779 if (chan
->max_tx
&& skb
&&
5780 bt_cb(skb
)->control
.retries
>= chan
->max_tx
) {
5781 BT_DBG("Retry limit exceeded (%d)", chan
->max_tx
);
5782 l2cap_send_disconn_req(chan
, ECONNRESET
);
5786 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5788 l2cap_pass_to_tx(chan
, control
);
5790 if (control
->final
) {
5791 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
5792 l2cap_retransmit_all(chan
, control
);
5794 l2cap_retransmit_all(chan
, control
);
5795 l2cap_ertm_send(chan
);
5796 if (chan
->tx_state
== L2CAP_TX_STATE_WAIT_F
)
5797 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
5801 static u8
l2cap_classify_txseq(struct l2cap_chan
*chan
, u16 txseq
)
5803 BT_DBG("chan %p, txseq %d", chan
, txseq
);
5805 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan
->last_acked_seq
,
5806 chan
->expected_tx_seq
);
5808 if (chan
->rx_state
== L2CAP_RX_STATE_SREJ_SENT
) {
5809 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5811 /* See notes below regarding "double poll" and
5814 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5815 BT_DBG("Invalid/Ignore - after SREJ");
5816 return L2CAP_TXSEQ_INVALID_IGNORE
;
5818 BT_DBG("Invalid - in window after SREJ sent");
5819 return L2CAP_TXSEQ_INVALID
;
5823 if (chan
->srej_list
.head
== txseq
) {
5824 BT_DBG("Expected SREJ");
5825 return L2CAP_TXSEQ_EXPECTED_SREJ
;
5828 if (l2cap_ertm_seq_in_queue(&chan
->srej_q
, txseq
)) {
5829 BT_DBG("Duplicate SREJ - txseq already stored");
5830 return L2CAP_TXSEQ_DUPLICATE_SREJ
;
5833 if (l2cap_seq_list_contains(&chan
->srej_list
, txseq
)) {
5834 BT_DBG("Unexpected SREJ - not requested");
5835 return L2CAP_TXSEQ_UNEXPECTED_SREJ
;
5839 if (chan
->expected_tx_seq
== txseq
) {
5840 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >=
5842 BT_DBG("Invalid - txseq outside tx window");
5843 return L2CAP_TXSEQ_INVALID
;
5846 return L2CAP_TXSEQ_EXPECTED
;
5850 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) <
5851 __seq_offset(chan
, chan
->expected_tx_seq
, chan
->last_acked_seq
)) {
5852 BT_DBG("Duplicate - expected_tx_seq later than txseq");
5853 return L2CAP_TXSEQ_DUPLICATE
;
5856 if (__seq_offset(chan
, txseq
, chan
->last_acked_seq
) >= chan
->tx_win
) {
5857 /* A source of invalid packets is a "double poll" condition,
5858 * where delays cause us to send multiple poll packets. If
5859 * the remote stack receives and processes both polls,
5860 * sequence numbers can wrap around in such a way that a
5861 * resent frame has a sequence number that looks like new data
5862 * with a sequence gap. This would trigger an erroneous SREJ
5865 * Fortunately, this is impossible with a tx window that's
5866 * less than half of the maximum sequence number, which allows
5867 * invalid frames to be safely ignored.
5869 * With tx window sizes greater than half of the tx window
5870 * maximum, the frame is invalid and cannot be ignored. This
5871 * causes a disconnect.
5874 if (chan
->tx_win
<= ((chan
->tx_win_max
+ 1) >> 1)) {
5875 BT_DBG("Invalid/Ignore - txseq outside tx window");
5876 return L2CAP_TXSEQ_INVALID_IGNORE
;
5878 BT_DBG("Invalid - txseq outside tx window");
5879 return L2CAP_TXSEQ_INVALID
;
5882 BT_DBG("Unexpected - txseq indicates missing frames");
5883 return L2CAP_TXSEQ_UNEXPECTED
;
5887 static int l2cap_rx_state_recv(struct l2cap_chan
*chan
,
5888 struct l2cap_ctrl
*control
,
5889 struct sk_buff
*skb
, u8 event
)
5892 bool skb_in_use
= false;
5894 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
5898 case L2CAP_EV_RECV_IFRAME
:
5899 switch (l2cap_classify_txseq(chan
, control
->txseq
)) {
5900 case L2CAP_TXSEQ_EXPECTED
:
5901 l2cap_pass_to_tx(chan
, control
);
5903 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5904 BT_DBG("Busy, discarding expected seq %d",
5909 chan
->expected_tx_seq
= __next_seq(chan
,
5912 chan
->buffer_seq
= chan
->expected_tx_seq
;
5915 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
5919 if (control
->final
) {
5920 if (!test_and_clear_bit(CONN_REJ_ACT
,
5921 &chan
->conn_state
)) {
5923 l2cap_retransmit_all(chan
, control
);
5924 l2cap_ertm_send(chan
);
5928 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
))
5929 l2cap_send_ack(chan
);
5931 case L2CAP_TXSEQ_UNEXPECTED
:
5932 l2cap_pass_to_tx(chan
, control
);
5934 /* Can't issue SREJ frames in the local busy state.
5935 * Drop this frame, it will be seen as missing
5936 * when local busy is exited.
5938 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
5939 BT_DBG("Busy, discarding unexpected seq %d",
5944 /* There was a gap in the sequence, so an SREJ
5945 * must be sent for each missing frame. The
5946 * current frame is stored for later use.
5948 skb_queue_tail(&chan
->srej_q
, skb
);
5950 BT_DBG("Queued %p (queue len %d)", skb
,
5951 skb_queue_len(&chan
->srej_q
));
5953 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
5954 l2cap_seq_list_clear(&chan
->srej_list
);
5955 l2cap_send_srej(chan
, control
->txseq
);
5957 chan
->rx_state
= L2CAP_RX_STATE_SREJ_SENT
;
5959 case L2CAP_TXSEQ_DUPLICATE
:
5960 l2cap_pass_to_tx(chan
, control
);
5962 case L2CAP_TXSEQ_INVALID_IGNORE
:
5964 case L2CAP_TXSEQ_INVALID
:
5966 l2cap_send_disconn_req(chan
, ECONNRESET
);
5970 case L2CAP_EV_RECV_RR
:
5971 l2cap_pass_to_tx(chan
, control
);
5972 if (control
->final
) {
5973 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5975 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
) &&
5976 !__chan_is_moving(chan
)) {
5978 l2cap_retransmit_all(chan
, control
);
5981 l2cap_ertm_send(chan
);
5982 } else if (control
->poll
) {
5983 l2cap_send_i_or_rr_or_rnr(chan
);
5985 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
5986 &chan
->conn_state
) &&
5987 chan
->unacked_frames
)
5988 __set_retrans_timer(chan
);
5990 l2cap_ertm_send(chan
);
5993 case L2CAP_EV_RECV_RNR
:
5994 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
5995 l2cap_pass_to_tx(chan
, control
);
5996 if (control
&& control
->poll
) {
5997 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
5998 l2cap_send_rr_or_rnr(chan
, 0);
6000 __clear_retrans_timer(chan
);
6001 l2cap_seq_list_clear(&chan
->retrans_list
);
6003 case L2CAP_EV_RECV_REJ
:
6004 l2cap_handle_rej(chan
, control
);
6006 case L2CAP_EV_RECV_SREJ
:
6007 l2cap_handle_srej(chan
, control
);
6013 if (skb
&& !skb_in_use
) {
6014 BT_DBG("Freeing %p", skb
);
6021 static int l2cap_rx_state_srej_sent(struct l2cap_chan
*chan
,
6022 struct l2cap_ctrl
*control
,
6023 struct sk_buff
*skb
, u8 event
)
6026 u16 txseq
= control
->txseq
;
6027 bool skb_in_use
= false;
6029 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6033 case L2CAP_EV_RECV_IFRAME
:
6034 switch (l2cap_classify_txseq(chan
, txseq
)) {
6035 case L2CAP_TXSEQ_EXPECTED
:
6036 /* Keep frame for reassembly later */
6037 l2cap_pass_to_tx(chan
, control
);
6038 skb_queue_tail(&chan
->srej_q
, skb
);
6040 BT_DBG("Queued %p (queue len %d)", skb
,
6041 skb_queue_len(&chan
->srej_q
));
6043 chan
->expected_tx_seq
= __next_seq(chan
, txseq
);
6045 case L2CAP_TXSEQ_EXPECTED_SREJ
:
6046 l2cap_seq_list_pop(&chan
->srej_list
);
6048 l2cap_pass_to_tx(chan
, control
);
6049 skb_queue_tail(&chan
->srej_q
, skb
);
6051 BT_DBG("Queued %p (queue len %d)", skb
,
6052 skb_queue_len(&chan
->srej_q
));
6054 err
= l2cap_rx_queued_iframes(chan
);
6059 case L2CAP_TXSEQ_UNEXPECTED
:
6060 /* Got a frame that can't be reassembled yet.
6061 * Save it for later, and send SREJs to cover
6062 * the missing frames.
6064 skb_queue_tail(&chan
->srej_q
, skb
);
6066 BT_DBG("Queued %p (queue len %d)", skb
,
6067 skb_queue_len(&chan
->srej_q
));
6069 l2cap_pass_to_tx(chan
, control
);
6070 l2cap_send_srej(chan
, control
->txseq
);
6072 case L2CAP_TXSEQ_UNEXPECTED_SREJ
:
6073 /* This frame was requested with an SREJ, but
6074 * some expected retransmitted frames are
6075 * missing. Request retransmission of missing
6078 skb_queue_tail(&chan
->srej_q
, skb
);
6080 BT_DBG("Queued %p (queue len %d)", skb
,
6081 skb_queue_len(&chan
->srej_q
));
6083 l2cap_pass_to_tx(chan
, control
);
6084 l2cap_send_srej_list(chan
, control
->txseq
);
6086 case L2CAP_TXSEQ_DUPLICATE_SREJ
:
6087 /* We've already queued this frame. Drop this copy. */
6088 l2cap_pass_to_tx(chan
, control
);
6090 case L2CAP_TXSEQ_DUPLICATE
:
6091 /* Expecting a later sequence number, so this frame
6092 * was already received. Ignore it completely.
6095 case L2CAP_TXSEQ_INVALID_IGNORE
:
6097 case L2CAP_TXSEQ_INVALID
:
6099 l2cap_send_disconn_req(chan
, ECONNRESET
);
6103 case L2CAP_EV_RECV_RR
:
6104 l2cap_pass_to_tx(chan
, control
);
6105 if (control
->final
) {
6106 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6108 if (!test_and_clear_bit(CONN_REJ_ACT
,
6109 &chan
->conn_state
)) {
6111 l2cap_retransmit_all(chan
, control
);
6114 l2cap_ertm_send(chan
);
6115 } else if (control
->poll
) {
6116 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6117 &chan
->conn_state
) &&
6118 chan
->unacked_frames
) {
6119 __set_retrans_timer(chan
);
6122 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6123 l2cap_send_srej_tail(chan
);
6125 if (test_and_clear_bit(CONN_REMOTE_BUSY
,
6126 &chan
->conn_state
) &&
6127 chan
->unacked_frames
)
6128 __set_retrans_timer(chan
);
6130 l2cap_send_ack(chan
);
6133 case L2CAP_EV_RECV_RNR
:
6134 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6135 l2cap_pass_to_tx(chan
, control
);
6136 if (control
->poll
) {
6137 l2cap_send_srej_tail(chan
);
6139 struct l2cap_ctrl rr_control
;
6140 memset(&rr_control
, 0, sizeof(rr_control
));
6141 rr_control
.sframe
= 1;
6142 rr_control
.super
= L2CAP_SUPER_RR
;
6143 rr_control
.reqseq
= chan
->buffer_seq
;
6144 l2cap_send_sframe(chan
, &rr_control
);
6148 case L2CAP_EV_RECV_REJ
:
6149 l2cap_handle_rej(chan
, control
);
6151 case L2CAP_EV_RECV_SREJ
:
6152 l2cap_handle_srej(chan
, control
);
6156 if (skb
&& !skb_in_use
) {
6157 BT_DBG("Freeing %p", skb
);
6164 static int l2cap_finish_move(struct l2cap_chan
*chan
)
6166 BT_DBG("chan %p", chan
);
6168 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6171 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6173 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6175 return l2cap_resegment(chan
);
6178 static int l2cap_rx_state_wait_p(struct l2cap_chan
*chan
,
6179 struct l2cap_ctrl
*control
,
6180 struct sk_buff
*skb
, u8 event
)
6184 BT_DBG("chan %p, control %p, skb %p, event %d", chan
, control
, skb
,
6190 l2cap_process_reqseq(chan
, control
->reqseq
);
6192 if (!skb_queue_empty(&chan
->tx_q
))
6193 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6195 chan
->tx_send_head
= NULL
;
6197 /* Rewind next_tx_seq to the point expected
6200 chan
->next_tx_seq
= control
->reqseq
;
6201 chan
->unacked_frames
= 0;
6203 err
= l2cap_finish_move(chan
);
6207 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
6208 l2cap_send_i_or_rr_or_rnr(chan
);
6210 if (event
== L2CAP_EV_RECV_IFRAME
)
6213 return l2cap_rx_state_recv(chan
, control
, NULL
, event
);
6216 static int l2cap_rx_state_wait_f(struct l2cap_chan
*chan
,
6217 struct l2cap_ctrl
*control
,
6218 struct sk_buff
*skb
, u8 event
)
6222 if (!control
->final
)
6225 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
6227 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
6228 l2cap_process_reqseq(chan
, control
->reqseq
);
6230 if (!skb_queue_empty(&chan
->tx_q
))
6231 chan
->tx_send_head
= skb_peek(&chan
->tx_q
);
6233 chan
->tx_send_head
= NULL
;
6235 /* Rewind next_tx_seq to the point expected
6238 chan
->next_tx_seq
= control
->reqseq
;
6239 chan
->unacked_frames
= 0;
6242 chan
->conn
->mtu
= chan
->hs_hcon
->hdev
->block_mtu
;
6244 chan
->conn
->mtu
= chan
->conn
->hcon
->hdev
->acl_mtu
;
6246 err
= l2cap_resegment(chan
);
6249 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6254 static bool __valid_reqseq(struct l2cap_chan
*chan
, u16 reqseq
)
6256 /* Make sure reqseq is for a packet that has been sent but not acked */
6259 unacked
= __seq_offset(chan
, chan
->next_tx_seq
, chan
->expected_ack_seq
);
6260 return __seq_offset(chan
, chan
->next_tx_seq
, reqseq
) <= unacked
;
6263 static int l2cap_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6264 struct sk_buff
*skb
, u8 event
)
6268 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan
,
6269 control
, skb
, event
, chan
->rx_state
);
6271 if (__valid_reqseq(chan
, control
->reqseq
)) {
6272 switch (chan
->rx_state
) {
6273 case L2CAP_RX_STATE_RECV
:
6274 err
= l2cap_rx_state_recv(chan
, control
, skb
, event
);
6276 case L2CAP_RX_STATE_SREJ_SENT
:
6277 err
= l2cap_rx_state_srej_sent(chan
, control
, skb
,
6280 case L2CAP_RX_STATE_WAIT_P
:
6281 err
= l2cap_rx_state_wait_p(chan
, control
, skb
, event
);
6283 case L2CAP_RX_STATE_WAIT_F
:
6284 err
= l2cap_rx_state_wait_f(chan
, control
, skb
, event
);
6291 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6292 control
->reqseq
, chan
->next_tx_seq
,
6293 chan
->expected_ack_seq
);
6294 l2cap_send_disconn_req(chan
, ECONNRESET
);
6300 static int l2cap_stream_rx(struct l2cap_chan
*chan
, struct l2cap_ctrl
*control
,
6301 struct sk_buff
*skb
)
6305 BT_DBG("chan %p, control %p, skb %p, state %d", chan
, control
, skb
,
6308 if (l2cap_classify_txseq(chan
, control
->txseq
) ==
6309 L2CAP_TXSEQ_EXPECTED
) {
6310 l2cap_pass_to_tx(chan
, control
);
6312 BT_DBG("buffer_seq %d->%d", chan
->buffer_seq
,
6313 __next_seq(chan
, chan
->buffer_seq
));
6315 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
6317 l2cap_reassemble_sdu(chan
, skb
, control
);
6320 kfree_skb(chan
->sdu
);
6323 chan
->sdu_last_frag
= NULL
;
6327 BT_DBG("Freeing %p", skb
);
6332 chan
->last_acked_seq
= control
->txseq
;
6333 chan
->expected_tx_seq
= __next_seq(chan
, control
->txseq
);
6338 static int l2cap_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
6340 struct l2cap_ctrl
*control
= &bt_cb(skb
)->control
;
6344 __unpack_control(chan
, skb
);
6349 * We can just drop the corrupted I-frame here.
6350 * Receiver will miss it and start proper recovery
6351 * procedures and ask for retransmission.
6353 if (l2cap_check_fcs(chan
, skb
))
6356 if (!control
->sframe
&& control
->sar
== L2CAP_SAR_START
)
6357 len
-= L2CAP_SDULEN_SIZE
;
6359 if (chan
->fcs
== L2CAP_FCS_CRC16
)
6360 len
-= L2CAP_FCS_SIZE
;
6362 if (len
> chan
->mps
) {
6363 l2cap_send_disconn_req(chan
, ECONNRESET
);
6367 if (!control
->sframe
) {
6370 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6371 control
->sar
, control
->reqseq
, control
->final
,
6374 /* Validate F-bit - F=0 always valid, F=1 only
6375 * valid in TX WAIT_F
6377 if (control
->final
&& chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
)
6380 if (chan
->mode
!= L2CAP_MODE_STREAMING
) {
6381 event
= L2CAP_EV_RECV_IFRAME
;
6382 err
= l2cap_rx(chan
, control
, skb
, event
);
6384 err
= l2cap_stream_rx(chan
, control
, skb
);
6388 l2cap_send_disconn_req(chan
, ECONNRESET
);
6390 const u8 rx_func_to_event
[4] = {
6391 L2CAP_EV_RECV_RR
, L2CAP_EV_RECV_REJ
,
6392 L2CAP_EV_RECV_RNR
, L2CAP_EV_RECV_SREJ
6395 /* Only I-frames are expected in streaming mode */
6396 if (chan
->mode
== L2CAP_MODE_STREAMING
)
6399 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6400 control
->reqseq
, control
->final
, control
->poll
,
6404 BT_ERR("Trailing bytes: %d in sframe", len
);
6405 l2cap_send_disconn_req(chan
, ECONNRESET
);
6409 /* Validate F and P bits */
6410 if (control
->final
&& (control
->poll
||
6411 chan
->tx_state
!= L2CAP_TX_STATE_WAIT_F
))
6414 event
= rx_func_to_event
[control
->super
];
6415 if (l2cap_rx(chan
, control
, skb
, event
))
6416 l2cap_send_disconn_req(chan
, ECONNRESET
);
6426 static void l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
,
6427 struct sk_buff
*skb
)
6429 struct l2cap_chan
*chan
;
6431 chan
= l2cap_get_chan_by_scid(conn
, cid
);
6433 if (cid
== L2CAP_CID_A2MP
) {
6434 chan
= a2mp_channel_create(conn
, skb
);
6440 l2cap_chan_lock(chan
);
6442 BT_DBG("unknown cid 0x%4.4x", cid
);
6443 /* Drop packet and return */
6449 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6451 if (chan
->state
!= BT_CONNECTED
)
6454 switch (chan
->mode
) {
6455 case L2CAP_MODE_BASIC
:
6456 /* If socket recv buffers overflows we drop data here
6457 * which is *bad* because L2CAP has to be reliable.
6458 * But we don't have any other choice. L2CAP doesn't
6459 * provide flow control mechanism. */
6461 if (chan
->imtu
< skb
->len
)
6464 if (!chan
->ops
->recv(chan
, skb
))
6468 case L2CAP_MODE_ERTM
:
6469 case L2CAP_MODE_STREAMING
:
6470 l2cap_data_rcv(chan
, skb
);
6474 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
6482 l2cap_chan_unlock(chan
);
6485 static void l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
,
6486 struct sk_buff
*skb
)
6488 struct hci_conn
*hcon
= conn
->hcon
;
6489 struct l2cap_chan
*chan
;
6491 if (hcon
->type
!= ACL_LINK
)
6494 chan
= l2cap_global_chan_by_psm(0, psm
, &hcon
->src
, &hcon
->dst
,
6499 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6501 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
6504 if (chan
->imtu
< skb
->len
)
6507 /* Store remote BD_ADDR and PSM for msg_name */
6508 bacpy(&bt_cb(skb
)->bdaddr
, &hcon
->dst
);
6509 bt_cb(skb
)->psm
= psm
;
6511 if (!chan
->ops
->recv(chan
, skb
))
6518 static void l2cap_att_channel(struct l2cap_conn
*conn
,
6519 struct sk_buff
*skb
)
6521 struct hci_conn
*hcon
= conn
->hcon
;
6522 struct l2cap_chan
*chan
;
6524 if (hcon
->type
!= LE_LINK
)
6527 chan
= l2cap_global_chan_by_scid(BT_CONNECTED
, L2CAP_CID_ATT
,
6528 &hcon
->src
, &hcon
->dst
);
6532 BT_DBG("chan %p, len %d", chan
, skb
->len
);
6534 if (hci_blacklist_lookup(hcon
->hdev
, &hcon
->dst
, hcon
->dst_type
))
6537 if (chan
->imtu
< skb
->len
)
6540 if (!chan
->ops
->recv(chan
, skb
))
6547 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
6549 struct l2cap_hdr
*lh
= (void *) skb
->data
;
6553 skb_pull(skb
, L2CAP_HDR_SIZE
);
6554 cid
= __le16_to_cpu(lh
->cid
);
6555 len
= __le16_to_cpu(lh
->len
);
6557 if (len
!= skb
->len
) {
6562 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
6565 case L2CAP_CID_SIGNALING
:
6566 l2cap_sig_channel(conn
, skb
);
6569 case L2CAP_CID_CONN_LESS
:
6570 psm
= get_unaligned((__le16
*) skb
->data
);
6571 skb_pull(skb
, L2CAP_PSMLEN_SIZE
);
6572 l2cap_conless_channel(conn
, psm
, skb
);
6576 l2cap_att_channel(conn
, skb
);
6579 case L2CAP_CID_LE_SIGNALING
:
6580 l2cap_le_sig_channel(conn
, skb
);
6584 if (smp_sig_channel(conn
, skb
))
6585 l2cap_conn_del(conn
->hcon
, EACCES
);
6589 l2cap_data_channel(conn
, cid
, skb
);
6594 /* ---- L2CAP interface with lower layer (HCI) ---- */
6596 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
6598 int exact
= 0, lm1
= 0, lm2
= 0;
6599 struct l2cap_chan
*c
;
6601 BT_DBG("hdev %s, bdaddr %pMR", hdev
->name
, bdaddr
);
6603 /* Find listening sockets and check their link_mode */
6604 read_lock(&chan_list_lock
);
6605 list_for_each_entry(c
, &chan_list
, global_l
) {
6606 if (c
->state
!= BT_LISTEN
)
6609 if (!bacmp(&c
->src
, &hdev
->bdaddr
)) {
6610 lm1
|= HCI_LM_ACCEPT
;
6611 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6612 lm1
|= HCI_LM_MASTER
;
6614 } else if (!bacmp(&c
->src
, BDADDR_ANY
)) {
6615 lm2
|= HCI_LM_ACCEPT
;
6616 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
6617 lm2
|= HCI_LM_MASTER
;
6620 read_unlock(&chan_list_lock
);
6622 return exact
? lm1
: lm2
;
6625 void l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
6627 struct l2cap_conn
*conn
;
6629 BT_DBG("hcon %p bdaddr %pMR status %d", hcon
, &hcon
->dst
, status
);
6632 conn
= l2cap_conn_add(hcon
);
6634 l2cap_conn_ready(conn
);
6636 l2cap_conn_del(hcon
, bt_to_errno(status
));
6640 int l2cap_disconn_ind(struct hci_conn
*hcon
)
6642 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6644 BT_DBG("hcon %p", hcon
);
6647 return HCI_ERROR_REMOTE_USER_TERM
;
6648 return conn
->disc_reason
;
6651 void l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
6653 BT_DBG("hcon %p reason %d", hcon
, reason
);
6655 l2cap_conn_del(hcon
, bt_to_errno(reason
));
6658 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
6660 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
6663 if (encrypt
== 0x00) {
6664 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
6665 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
6666 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
6667 l2cap_chan_close(chan
, ECONNREFUSED
);
6669 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
6670 __clear_chan_timer(chan
);
6674 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
6676 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6677 struct l2cap_chan
*chan
;
6682 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn
, status
, encrypt
);
6684 if (hcon
->type
== LE_LINK
) {
6685 if (!status
&& encrypt
)
6686 smp_distribute_keys(conn
, 0);
6687 cancel_delayed_work(&conn
->security_timer
);
6690 mutex_lock(&conn
->chan_lock
);
6692 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
6693 l2cap_chan_lock(chan
);
6695 BT_DBG("chan %p scid 0x%4.4x state %s", chan
, chan
->scid
,
6696 state_to_string(chan
->state
));
6698 if (chan
->chan_type
== L2CAP_CHAN_CONN_FIX_A2MP
) {
6699 l2cap_chan_unlock(chan
);
6703 if (chan
->scid
== L2CAP_CID_ATT
) {
6704 if (!status
&& encrypt
) {
6705 chan
->sec_level
= hcon
->sec_level
;
6706 l2cap_chan_ready(chan
);
6709 l2cap_chan_unlock(chan
);
6713 if (!__l2cap_no_conn_pending(chan
)) {
6714 l2cap_chan_unlock(chan
);
6718 if (!status
&& (chan
->state
== BT_CONNECTED
||
6719 chan
->state
== BT_CONFIG
)) {
6720 chan
->ops
->resume(chan
);
6721 l2cap_check_encryption(chan
, encrypt
);
6722 l2cap_chan_unlock(chan
);
6726 if (chan
->state
== BT_CONNECT
) {
6728 l2cap_start_connection(chan
);
6730 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6731 } else if (chan
->state
== BT_CONNECT2
) {
6732 struct l2cap_conn_rsp rsp
;
6736 if (test_bit(FLAG_DEFER_SETUP
, &chan
->flags
)) {
6737 res
= L2CAP_CR_PEND
;
6738 stat
= L2CAP_CS_AUTHOR_PEND
;
6739 chan
->ops
->defer(chan
);
6741 l2cap_state_change(chan
, BT_CONFIG
);
6742 res
= L2CAP_CR_SUCCESS
;
6743 stat
= L2CAP_CS_NO_INFO
;
6746 l2cap_state_change(chan
, BT_DISCONN
);
6747 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
6748 res
= L2CAP_CR_SEC_BLOCK
;
6749 stat
= L2CAP_CS_NO_INFO
;
6752 rsp
.scid
= cpu_to_le16(chan
->dcid
);
6753 rsp
.dcid
= cpu_to_le16(chan
->scid
);
6754 rsp
.result
= cpu_to_le16(res
);
6755 rsp
.status
= cpu_to_le16(stat
);
6756 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
6759 if (!test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
6760 res
== L2CAP_CR_SUCCESS
) {
6762 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
6763 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
6765 l2cap_build_conf_req(chan
, buf
),
6767 chan
->num_conf_req
++;
6771 l2cap_chan_unlock(chan
);
6774 mutex_unlock(&conn
->chan_lock
);
6779 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
6781 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
6782 struct l2cap_hdr
*hdr
;
6785 /* For AMP controller do not create l2cap conn */
6786 if (!conn
&& hcon
->hdev
->dev_type
!= HCI_BREDR
)
6790 conn
= l2cap_conn_add(hcon
);
6795 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
6799 case ACL_START_NO_FLUSH
:
6802 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
6803 kfree_skb(conn
->rx_skb
);
6804 conn
->rx_skb
= NULL
;
6806 l2cap_conn_unreliable(conn
, ECOMM
);
6809 /* Start fragment always begin with Basic L2CAP header */
6810 if (skb
->len
< L2CAP_HDR_SIZE
) {
6811 BT_ERR("Frame is too short (len %d)", skb
->len
);
6812 l2cap_conn_unreliable(conn
, ECOMM
);
6816 hdr
= (struct l2cap_hdr
*) skb
->data
;
6817 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
6819 if (len
== skb
->len
) {
6820 /* Complete frame received */
6821 l2cap_recv_frame(conn
, skb
);
6825 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
6827 if (skb
->len
> len
) {
6828 BT_ERR("Frame is too long (len %d, expected len %d)",
6830 l2cap_conn_unreliable(conn
, ECOMM
);
6834 /* Allocate skb for the complete frame (with header) */
6835 conn
->rx_skb
= bt_skb_alloc(len
, GFP_KERNEL
);
6839 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6841 conn
->rx_len
= len
- skb
->len
;
6845 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
6847 if (!conn
->rx_len
) {
6848 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
6849 l2cap_conn_unreliable(conn
, ECOMM
);
6853 if (skb
->len
> conn
->rx_len
) {
6854 BT_ERR("Fragment is too long (len %d, expected %d)",
6855 skb
->len
, conn
->rx_len
);
6856 kfree_skb(conn
->rx_skb
);
6857 conn
->rx_skb
= NULL
;
6859 l2cap_conn_unreliable(conn
, ECOMM
);
6863 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
6865 conn
->rx_len
-= skb
->len
;
6867 if (!conn
->rx_len
) {
6868 /* Complete frame received. l2cap_recv_frame
6869 * takes ownership of the skb so set the global
6870 * rx_skb pointer to NULL first.
6872 struct sk_buff
*rx_skb
= conn
->rx_skb
;
6873 conn
->rx_skb
= NULL
;
6874 l2cap_recv_frame(conn
, rx_skb
);
6884 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
6886 struct l2cap_chan
*c
;
6888 read_lock(&chan_list_lock
);
6890 list_for_each_entry(c
, &chan_list
, global_l
) {
6891 seq_printf(f
, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
6893 c
->state
, __le16_to_cpu(c
->psm
),
6894 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
6895 c
->sec_level
, c
->mode
);
6898 read_unlock(&chan_list_lock
);
6903 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
6905 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
6908 static const struct file_operations l2cap_debugfs_fops
= {
6909 .open
= l2cap_debugfs_open
,
6911 .llseek
= seq_lseek
,
6912 .release
= single_release
,
6915 static struct dentry
*l2cap_debugfs
;
6917 int __init
l2cap_init(void)
6921 err
= l2cap_init_sockets();
6925 if (IS_ERR_OR_NULL(bt_debugfs
))
6928 l2cap_debugfs
= debugfs_create_file("l2cap", 0444, bt_debugfs
,
6929 NULL
, &l2cap_debugfs_fops
);
6934 void l2cap_exit(void)
6936 debugfs_remove(l2cap_debugfs
);
6937 l2cap_cleanup_sockets();
6940 module_param(disable_ertm
, bool, 0644);
6941 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");