2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
19 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
22 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
24 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
26 SOFTWARE IS DISCLAIMED.
29 /* Bluetooth L2CAP core. */
31 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/capability.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/sched.h>
38 #include <linux/slab.h>
39 #include <linux/poll.h>
40 #include <linux/fcntl.h>
41 #include <linux/init.h>
42 #include <linux/interrupt.h>
43 #include <linux/socket.h>
44 #include <linux/skbuff.h>
45 #include <linux/list.h>
46 #include <linux/device.h>
47 #include <linux/debugfs.h>
48 #include <linux/seq_file.h>
49 #include <linux/uaccess.h>
50 #include <linux/crc16.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
63 static u8 l2cap_fixed_chan
[8] = { L2CAP_FC_L2CAP
, };
65 static LIST_HEAD(chan_list
);
66 static DEFINE_RWLOCK(chan_list_lock
);
68 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
69 u8 code
, u8 ident
, u16 dlen
, void *data
);
70 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
,
72 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
);
73 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
,
74 struct l2cap_chan
*chan
, int err
);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan
*__l2cap_get_chan_by_dcid(struct l2cap_conn
*conn
, u16 cid
)
82 list_for_each_entry(c
, &conn
->chan_l
, list
) {
89 static struct l2cap_chan
*__l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
93 list_for_each_entry(c
, &conn
->chan_l
, list
) {
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan
*l2cap_get_chan_by_scid(struct l2cap_conn
*conn
, u16 cid
)
104 struct l2cap_chan
*c
;
106 mutex_lock(&conn
->chan_lock
);
107 c
= __l2cap_get_chan_by_scid(conn
, cid
);
108 mutex_unlock(&conn
->chan_lock
);
113 static struct l2cap_chan
*__l2cap_get_chan_by_ident(struct l2cap_conn
*conn
, u8 ident
)
115 struct l2cap_chan
*c
;
117 list_for_each_entry(c
, &conn
->chan_l
, list
) {
118 if (c
->ident
== ident
)
124 static struct l2cap_chan
*__l2cap_global_chan_by_addr(__le16 psm
, bdaddr_t
*src
)
126 struct l2cap_chan
*c
;
128 list_for_each_entry(c
, &chan_list
, global_l
) {
129 if (c
->sport
== psm
&& !bacmp(&bt_sk(c
->sk
)->src
, src
))
135 int l2cap_add_psm(struct l2cap_chan
*chan
, bdaddr_t
*src
, __le16 psm
)
139 write_lock(&chan_list_lock
);
141 if (psm
&& __l2cap_global_chan_by_addr(psm
, src
)) {
154 for (p
= 0x1001; p
< 0x1100; p
+= 2)
155 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p
), src
)) {
156 chan
->psm
= cpu_to_le16(p
);
157 chan
->sport
= cpu_to_le16(p
);
164 write_unlock(&chan_list_lock
);
168 int l2cap_add_scid(struct l2cap_chan
*chan
, __u16 scid
)
170 write_lock(&chan_list_lock
);
174 write_unlock(&chan_list_lock
);
179 static u16
l2cap_alloc_cid(struct l2cap_conn
*conn
)
181 u16 cid
= L2CAP_CID_DYN_START
;
183 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
184 if (!__l2cap_get_chan_by_scid(conn
, cid
))
191 static void __l2cap_state_change(struct l2cap_chan
*chan
, int state
)
193 BT_DBG("chan %p %s -> %s", chan
, state_to_string(chan
->state
),
194 state_to_string(state
));
197 chan
->ops
->state_change(chan
->data
, state
);
200 static void l2cap_state_change(struct l2cap_chan
*chan
, int state
)
202 struct sock
*sk
= chan
->sk
;
205 __l2cap_state_change(chan
, state
);
209 static inline void __l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
211 struct sock
*sk
= chan
->sk
;
216 static inline void l2cap_chan_set_err(struct l2cap_chan
*chan
, int err
)
218 struct sock
*sk
= chan
->sk
;
221 __l2cap_chan_set_err(chan
, err
);
225 /* ---- L2CAP sequence number lists ---- */
227 /* For ERTM, ordered lists of sequence numbers must be tracked for
228 * SREJ requests that are received and for frames that are to be
229 * retransmitted. These seq_list functions implement a singly-linked
230 * list in an array, where membership in the list can also be checked
231 * in constant time. Items can also be added to the tail of the list
232 * and removed from the head in constant time, without further memory
236 static int l2cap_seq_list_init(struct l2cap_seq_list
*seq_list
, u16 size
)
238 size_t alloc_size
, i
;
240 /* Allocated size is a power of 2 to map sequence numbers
241 * (which may be up to 14 bits) in to a smaller array that is
242 * sized for the negotiated ERTM transmit windows.
244 alloc_size
= roundup_pow_of_two(size
);
246 seq_list
->list
= kmalloc(sizeof(u16
) * alloc_size
, GFP_KERNEL
);
250 seq_list
->mask
= alloc_size
- 1;
251 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
252 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
253 for (i
= 0; i
< alloc_size
; i
++)
254 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
259 static inline void l2cap_seq_list_free(struct l2cap_seq_list
*seq_list
)
261 kfree(seq_list
->list
);
264 static inline bool l2cap_seq_list_contains(struct l2cap_seq_list
*seq_list
,
267 /* Constant-time check for list membership */
268 return seq_list
->list
[seq
& seq_list
->mask
] != L2CAP_SEQ_LIST_CLEAR
;
271 static u16
l2cap_seq_list_remove(struct l2cap_seq_list
*seq_list
, u16 seq
)
273 u16 mask
= seq_list
->mask
;
275 if (seq_list
->head
== L2CAP_SEQ_LIST_CLEAR
) {
276 /* In case someone tries to pop the head of an empty list */
277 return L2CAP_SEQ_LIST_CLEAR
;
278 } else if (seq_list
->head
== seq
) {
279 /* Head can be removed in constant time */
280 seq_list
->head
= seq_list
->list
[seq
& mask
];
281 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
283 if (seq_list
->head
== L2CAP_SEQ_LIST_TAIL
) {
284 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
285 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
288 /* Walk the list to find the sequence number */
289 u16 prev
= seq_list
->head
;
290 while (seq_list
->list
[prev
& mask
] != seq
) {
291 prev
= seq_list
->list
[prev
& mask
];
292 if (prev
== L2CAP_SEQ_LIST_TAIL
)
293 return L2CAP_SEQ_LIST_CLEAR
;
296 /* Unlink the number from the list and clear it */
297 seq_list
->list
[prev
& mask
] = seq_list
->list
[seq
& mask
];
298 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_CLEAR
;
299 if (seq_list
->tail
== seq
)
300 seq_list
->tail
= prev
;
305 static inline u16
l2cap_seq_list_pop(struct l2cap_seq_list
*seq_list
)
307 /* Remove the head in constant time */
308 return l2cap_seq_list_remove(seq_list
, seq_list
->head
);
311 static void l2cap_seq_list_clear(struct l2cap_seq_list
*seq_list
)
313 if (seq_list
->head
!= L2CAP_SEQ_LIST_CLEAR
) {
315 for (i
= 0; i
<= seq_list
->mask
; i
++)
316 seq_list
->list
[i
] = L2CAP_SEQ_LIST_CLEAR
;
318 seq_list
->head
= L2CAP_SEQ_LIST_CLEAR
;
319 seq_list
->tail
= L2CAP_SEQ_LIST_CLEAR
;
323 static void l2cap_seq_list_append(struct l2cap_seq_list
*seq_list
, u16 seq
)
325 u16 mask
= seq_list
->mask
;
327 /* All appends happen in constant time */
329 if (seq_list
->list
[seq
& mask
] == L2CAP_SEQ_LIST_CLEAR
) {
330 if (seq_list
->tail
== L2CAP_SEQ_LIST_CLEAR
)
331 seq_list
->head
= seq
;
333 seq_list
->list
[seq_list
->tail
& mask
] = seq
;
335 seq_list
->tail
= seq
;
336 seq_list
->list
[seq
& mask
] = L2CAP_SEQ_LIST_TAIL
;
340 static void l2cap_chan_timeout(struct work_struct
*work
)
342 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
344 struct l2cap_conn
*conn
= chan
->conn
;
347 BT_DBG("chan %p state %s", chan
, state_to_string(chan
->state
));
349 mutex_lock(&conn
->chan_lock
);
350 l2cap_chan_lock(chan
);
352 if (chan
->state
== BT_CONNECTED
|| chan
->state
== BT_CONFIG
)
353 reason
= ECONNREFUSED
;
354 else if (chan
->state
== BT_CONNECT
&&
355 chan
->sec_level
!= BT_SECURITY_SDP
)
356 reason
= ECONNREFUSED
;
360 l2cap_chan_close(chan
, reason
);
362 l2cap_chan_unlock(chan
);
364 chan
->ops
->close(chan
->data
);
365 mutex_unlock(&conn
->chan_lock
);
367 l2cap_chan_put(chan
);
370 struct l2cap_chan
*l2cap_chan_create(void)
372 struct l2cap_chan
*chan
;
374 chan
= kzalloc(sizeof(*chan
), GFP_ATOMIC
);
378 mutex_init(&chan
->lock
);
380 write_lock(&chan_list_lock
);
381 list_add(&chan
->global_l
, &chan_list
);
382 write_unlock(&chan_list_lock
);
384 INIT_DELAYED_WORK(&chan
->chan_timer
, l2cap_chan_timeout
);
386 chan
->state
= BT_OPEN
;
388 atomic_set(&chan
->refcnt
, 1);
390 BT_DBG("chan %p", chan
);
395 void l2cap_chan_destroy(struct l2cap_chan
*chan
)
397 write_lock(&chan_list_lock
);
398 list_del(&chan
->global_l
);
399 write_unlock(&chan_list_lock
);
401 l2cap_chan_put(chan
);
404 void l2cap_chan_set_defaults(struct l2cap_chan
*chan
)
406 chan
->fcs
= L2CAP_FCS_CRC16
;
407 chan
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
408 chan
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
409 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
410 chan
->sec_level
= BT_SECURITY_LOW
;
412 set_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
415 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
417 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
418 __le16_to_cpu(chan
->psm
), chan
->dcid
);
420 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
424 switch (chan
->chan_type
) {
425 case L2CAP_CHAN_CONN_ORIENTED
:
426 if (conn
->hcon
->type
== LE_LINK
) {
428 chan
->omtu
= L2CAP_LE_DEFAULT_MTU
;
429 chan
->scid
= L2CAP_CID_LE_DATA
;
430 chan
->dcid
= L2CAP_CID_LE_DATA
;
432 /* Alloc CID for connection-oriented socket */
433 chan
->scid
= l2cap_alloc_cid(conn
);
434 chan
->omtu
= L2CAP_DEFAULT_MTU
;
438 case L2CAP_CHAN_CONN_LESS
:
439 /* Connectionless socket */
440 chan
->scid
= L2CAP_CID_CONN_LESS
;
441 chan
->dcid
= L2CAP_CID_CONN_LESS
;
442 chan
->omtu
= L2CAP_DEFAULT_MTU
;
446 /* Raw socket can send/recv signalling messages only */
447 chan
->scid
= L2CAP_CID_SIGNALING
;
448 chan
->dcid
= L2CAP_CID_SIGNALING
;
449 chan
->omtu
= L2CAP_DEFAULT_MTU
;
452 chan
->local_id
= L2CAP_BESTEFFORT_ID
;
453 chan
->local_stype
= L2CAP_SERV_BESTEFFORT
;
454 chan
->local_msdu
= L2CAP_DEFAULT_MAX_SDU_SIZE
;
455 chan
->local_sdu_itime
= L2CAP_DEFAULT_SDU_ITIME
;
456 chan
->local_acc_lat
= L2CAP_DEFAULT_ACC_LAT
;
457 chan
->local_flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
459 l2cap_chan_hold(chan
);
461 list_add(&chan
->list
, &conn
->chan_l
);
464 static void l2cap_chan_add(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
)
466 mutex_lock(&conn
->chan_lock
);
467 __l2cap_chan_add(conn
, chan
);
468 mutex_unlock(&conn
->chan_lock
);
471 static void l2cap_chan_del(struct l2cap_chan
*chan
, int err
)
473 struct sock
*sk
= chan
->sk
;
474 struct l2cap_conn
*conn
= chan
->conn
;
475 struct sock
*parent
= bt_sk(sk
)->parent
;
477 __clear_chan_timer(chan
);
479 BT_DBG("chan %p, conn %p, err %d", chan
, conn
, err
);
482 /* Delete from channel list */
483 list_del(&chan
->list
);
485 l2cap_chan_put(chan
);
488 hci_conn_put(conn
->hcon
);
493 __l2cap_state_change(chan
, BT_CLOSED
);
494 sock_set_flag(sk
, SOCK_ZAPPED
);
497 __l2cap_chan_set_err(chan
, err
);
500 bt_accept_unlink(sk
);
501 parent
->sk_data_ready(parent
, 0);
503 sk
->sk_state_change(sk
);
507 if (!(test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
) &&
508 test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)))
511 skb_queue_purge(&chan
->tx_q
);
513 if (chan
->mode
== L2CAP_MODE_ERTM
) {
514 struct srej_list
*l
, *tmp
;
516 __clear_retrans_timer(chan
);
517 __clear_monitor_timer(chan
);
518 __clear_ack_timer(chan
);
520 skb_queue_purge(&chan
->srej_q
);
522 l2cap_seq_list_free(&chan
->srej_list
);
523 l2cap_seq_list_free(&chan
->retrans_list
);
524 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
531 static void l2cap_chan_cleanup_listen(struct sock
*parent
)
535 BT_DBG("parent %p", parent
);
537 /* Close not yet accepted channels */
538 while ((sk
= bt_accept_dequeue(parent
, NULL
))) {
539 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
541 l2cap_chan_lock(chan
);
542 __clear_chan_timer(chan
);
543 l2cap_chan_close(chan
, ECONNRESET
);
544 l2cap_chan_unlock(chan
);
546 chan
->ops
->close(chan
->data
);
550 void l2cap_chan_close(struct l2cap_chan
*chan
, int reason
)
552 struct l2cap_conn
*conn
= chan
->conn
;
553 struct sock
*sk
= chan
->sk
;
555 BT_DBG("chan %p state %s sk %p", chan
,
556 state_to_string(chan
->state
), sk
);
558 switch (chan
->state
) {
561 l2cap_chan_cleanup_listen(sk
);
563 __l2cap_state_change(chan
, BT_CLOSED
);
564 sock_set_flag(sk
, SOCK_ZAPPED
);
570 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
571 conn
->hcon
->type
== ACL_LINK
) {
572 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
573 l2cap_send_disconn_req(conn
, chan
, reason
);
575 l2cap_chan_del(chan
, reason
);
579 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&&
580 conn
->hcon
->type
== ACL_LINK
) {
581 struct l2cap_conn_rsp rsp
;
584 if (bt_sk(sk
)->defer_setup
)
585 result
= L2CAP_CR_SEC_BLOCK
;
587 result
= L2CAP_CR_BAD_PSM
;
588 l2cap_state_change(chan
, BT_DISCONN
);
590 rsp
.scid
= cpu_to_le16(chan
->dcid
);
591 rsp
.dcid
= cpu_to_le16(chan
->scid
);
592 rsp
.result
= cpu_to_le16(result
);
593 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
594 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
598 l2cap_chan_del(chan
, reason
);
603 l2cap_chan_del(chan
, reason
);
608 sock_set_flag(sk
, SOCK_ZAPPED
);
614 static inline u8
l2cap_get_auth_type(struct l2cap_chan
*chan
)
616 if (chan
->chan_type
== L2CAP_CHAN_RAW
) {
617 switch (chan
->sec_level
) {
618 case BT_SECURITY_HIGH
:
619 return HCI_AT_DEDICATED_BONDING_MITM
;
620 case BT_SECURITY_MEDIUM
:
621 return HCI_AT_DEDICATED_BONDING
;
623 return HCI_AT_NO_BONDING
;
625 } else if (chan
->psm
== cpu_to_le16(0x0001)) {
626 if (chan
->sec_level
== BT_SECURITY_LOW
)
627 chan
->sec_level
= BT_SECURITY_SDP
;
629 if (chan
->sec_level
== BT_SECURITY_HIGH
)
630 return HCI_AT_NO_BONDING_MITM
;
632 return HCI_AT_NO_BONDING
;
634 switch (chan
->sec_level
) {
635 case BT_SECURITY_HIGH
:
636 return HCI_AT_GENERAL_BONDING_MITM
;
637 case BT_SECURITY_MEDIUM
:
638 return HCI_AT_GENERAL_BONDING
;
640 return HCI_AT_NO_BONDING
;
645 /* Service level security */
646 int l2cap_chan_check_security(struct l2cap_chan
*chan
)
648 struct l2cap_conn
*conn
= chan
->conn
;
651 auth_type
= l2cap_get_auth_type(chan
);
653 return hci_conn_security(conn
->hcon
, chan
->sec_level
, auth_type
);
656 static u8
l2cap_get_ident(struct l2cap_conn
*conn
)
660 /* Get next available identificator.
661 * 1 - 128 are used by kernel.
662 * 129 - 199 are reserved.
663 * 200 - 254 are used by utilities like l2ping, etc.
666 spin_lock(&conn
->lock
);
668 if (++conn
->tx_ident
> 128)
673 spin_unlock(&conn
->lock
);
678 static void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
680 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
683 BT_DBG("code 0x%2.2x", code
);
688 if (lmp_no_flush_capable(conn
->hcon
->hdev
))
689 flags
= ACL_START_NO_FLUSH
;
693 bt_cb(skb
)->force_active
= BT_POWER_FORCE_ACTIVE_ON
;
694 skb
->priority
= HCI_PRIO_MAX
;
696 hci_send_acl(conn
->hchan
, skb
, flags
);
699 static void l2cap_do_send(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
701 struct hci_conn
*hcon
= chan
->conn
->hcon
;
704 BT_DBG("chan %p, skb %p len %d priority %u", chan
, skb
, skb
->len
,
707 if (!test_bit(FLAG_FLUSHABLE
, &chan
->flags
) &&
708 lmp_no_flush_capable(hcon
->hdev
))
709 flags
= ACL_START_NO_FLUSH
;
713 bt_cb(skb
)->force_active
= test_bit(FLAG_FORCE_ACTIVE
, &chan
->flags
);
714 hci_send_acl(chan
->conn
->hchan
, skb
, flags
);
717 static void __unpack_enhanced_control(u16 enh
, struct l2cap_ctrl
*control
)
719 control
->reqseq
= (enh
& L2CAP_CTRL_REQSEQ
) >> L2CAP_CTRL_REQSEQ_SHIFT
;
720 control
->final
= (enh
& L2CAP_CTRL_FINAL
) >> L2CAP_CTRL_FINAL_SHIFT
;
722 if (enh
& L2CAP_CTRL_FRAME_TYPE
) {
725 control
->poll
= (enh
& L2CAP_CTRL_POLL
) >> L2CAP_CTRL_POLL_SHIFT
;
726 control
->super
= (enh
& L2CAP_CTRL_SUPERVISE
) >> L2CAP_CTRL_SUPER_SHIFT
;
733 control
->sar
= (enh
& L2CAP_CTRL_SAR
) >> L2CAP_CTRL_SAR_SHIFT
;
734 control
->txseq
= (enh
& L2CAP_CTRL_TXSEQ
) >> L2CAP_CTRL_TXSEQ_SHIFT
;
741 static void __unpack_extended_control(u32 ext
, struct l2cap_ctrl
*control
)
743 control
->reqseq
= (ext
& L2CAP_EXT_CTRL_REQSEQ
) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
744 control
->final
= (ext
& L2CAP_EXT_CTRL_FINAL
) >> L2CAP_EXT_CTRL_FINAL_SHIFT
;
746 if (ext
& L2CAP_EXT_CTRL_FRAME_TYPE
) {
749 control
->poll
= (ext
& L2CAP_EXT_CTRL_POLL
) >> L2CAP_EXT_CTRL_POLL_SHIFT
;
750 control
->super
= (ext
& L2CAP_EXT_CTRL_SUPERVISE
) >> L2CAP_EXT_CTRL_SUPER_SHIFT
;
757 control
->sar
= (ext
& L2CAP_EXT_CTRL_SAR
) >> L2CAP_EXT_CTRL_SAR_SHIFT
;
758 control
->txseq
= (ext
& L2CAP_EXT_CTRL_TXSEQ
) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
765 static inline void __unpack_control(struct l2cap_chan
*chan
,
768 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
769 __unpack_extended_control(get_unaligned_le32(skb
->data
),
770 &bt_cb(skb
)->control
);
772 __unpack_enhanced_control(get_unaligned_le16(skb
->data
),
773 &bt_cb(skb
)->control
);
777 static u32
__pack_extended_control(struct l2cap_ctrl
*control
)
781 packed
= control
->reqseq
<< L2CAP_EXT_CTRL_REQSEQ_SHIFT
;
782 packed
|= control
->final
<< L2CAP_EXT_CTRL_FINAL_SHIFT
;
784 if (control
->sframe
) {
785 packed
|= control
->poll
<< L2CAP_EXT_CTRL_POLL_SHIFT
;
786 packed
|= control
->super
<< L2CAP_EXT_CTRL_SUPER_SHIFT
;
787 packed
|= L2CAP_EXT_CTRL_FRAME_TYPE
;
789 packed
|= control
->sar
<< L2CAP_EXT_CTRL_SAR_SHIFT
;
790 packed
|= control
->txseq
<< L2CAP_EXT_CTRL_TXSEQ_SHIFT
;
796 static u16
__pack_enhanced_control(struct l2cap_ctrl
*control
)
800 packed
= control
->reqseq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
801 packed
|= control
->final
<< L2CAP_CTRL_FINAL_SHIFT
;
803 if (control
->sframe
) {
804 packed
|= control
->poll
<< L2CAP_CTRL_POLL_SHIFT
;
805 packed
|= control
->super
<< L2CAP_CTRL_SUPER_SHIFT
;
806 packed
|= L2CAP_CTRL_FRAME_TYPE
;
808 packed
|= control
->sar
<< L2CAP_CTRL_SAR_SHIFT
;
809 packed
|= control
->txseq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
815 static inline void __pack_control(struct l2cap_chan
*chan
,
816 struct l2cap_ctrl
*control
,
819 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
)) {
820 put_unaligned_le32(__pack_extended_control(control
),
821 skb
->data
+ L2CAP_HDR_SIZE
);
823 put_unaligned_le16(__pack_enhanced_control(control
),
824 skb
->data
+ L2CAP_HDR_SIZE
);
828 static inline void l2cap_send_sframe(struct l2cap_chan
*chan
, u32 control
)
831 struct l2cap_hdr
*lh
;
832 struct l2cap_conn
*conn
= chan
->conn
;
835 if (chan
->state
!= BT_CONNECTED
)
838 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
839 hlen
= L2CAP_EXT_HDR_SIZE
;
841 hlen
= L2CAP_ENH_HDR_SIZE
;
843 if (chan
->fcs
== L2CAP_FCS_CRC16
)
844 hlen
+= L2CAP_FCS_SIZE
;
846 BT_DBG("chan %p, control 0x%8.8x", chan
, control
);
848 count
= min_t(unsigned int, conn
->mtu
, hlen
);
850 control
|= __set_sframe(chan
);
852 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
853 control
|= __set_ctrl_final(chan
);
855 if (test_and_clear_bit(CONN_SEND_PBIT
, &chan
->conn_state
))
856 control
|= __set_ctrl_poll(chan
);
858 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
862 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
863 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
864 lh
->cid
= cpu_to_le16(chan
->dcid
);
866 __put_control(chan
, control
, skb_put(skb
, __ctrl_size(chan
)));
868 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
869 u16 fcs
= crc16(0, (u8
*)lh
, count
- L2CAP_FCS_SIZE
);
870 put_unaligned_le16(fcs
, skb_put(skb
, L2CAP_FCS_SIZE
));
873 skb
->priority
= HCI_PRIO_MAX
;
874 l2cap_do_send(chan
, skb
);
877 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan
*chan
, u32 control
)
879 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
880 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
881 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
883 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
885 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
887 l2cap_send_sframe(chan
, control
);
890 static inline int __l2cap_no_conn_pending(struct l2cap_chan
*chan
)
892 return !test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
895 static void l2cap_send_conn_req(struct l2cap_chan
*chan
)
897 struct l2cap_conn
*conn
= chan
->conn
;
898 struct l2cap_conn_req req
;
900 req
.scid
= cpu_to_le16(chan
->scid
);
903 chan
->ident
= l2cap_get_ident(conn
);
905 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
907 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_REQ
, sizeof(req
), &req
);
910 static void l2cap_chan_ready(struct l2cap_chan
*chan
)
912 struct sock
*sk
= chan
->sk
;
917 parent
= bt_sk(sk
)->parent
;
919 BT_DBG("sk %p, parent %p", sk
, parent
);
921 chan
->conf_state
= 0;
922 __clear_chan_timer(chan
);
924 __l2cap_state_change(chan
, BT_CONNECTED
);
925 sk
->sk_state_change(sk
);
928 parent
->sk_data_ready(parent
, 0);
933 static void l2cap_do_start(struct l2cap_chan
*chan
)
935 struct l2cap_conn
*conn
= chan
->conn
;
937 if (conn
->hcon
->type
== LE_LINK
) {
938 l2cap_chan_ready(chan
);
942 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
943 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
946 if (l2cap_chan_check_security(chan
) &&
947 __l2cap_no_conn_pending(chan
))
948 l2cap_send_conn_req(chan
);
950 struct l2cap_info_req req
;
951 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
953 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
954 conn
->info_ident
= l2cap_get_ident(conn
);
956 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
958 l2cap_send_cmd(conn
, conn
->info_ident
,
959 L2CAP_INFO_REQ
, sizeof(req
), &req
);
963 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
965 u32 local_feat_mask
= l2cap_feat_mask
;
967 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
970 case L2CAP_MODE_ERTM
:
971 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
972 case L2CAP_MODE_STREAMING
:
973 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
979 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct l2cap_chan
*chan
, int err
)
981 struct sock
*sk
= chan
->sk
;
982 struct l2cap_disconn_req req
;
987 if (chan
->mode
== L2CAP_MODE_ERTM
) {
988 __clear_retrans_timer(chan
);
989 __clear_monitor_timer(chan
);
990 __clear_ack_timer(chan
);
993 req
.dcid
= cpu_to_le16(chan
->dcid
);
994 req
.scid
= cpu_to_le16(chan
->scid
);
995 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
996 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
999 __l2cap_state_change(chan
, BT_DISCONN
);
1000 __l2cap_chan_set_err(chan
, err
);
1004 /* ---- L2CAP connections ---- */
1005 static void l2cap_conn_start(struct l2cap_conn
*conn
)
1007 struct l2cap_chan
*chan
, *tmp
;
1009 BT_DBG("conn %p", conn
);
1011 mutex_lock(&conn
->chan_lock
);
1013 list_for_each_entry_safe(chan
, tmp
, &conn
->chan_l
, list
) {
1014 struct sock
*sk
= chan
->sk
;
1016 l2cap_chan_lock(chan
);
1018 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1019 l2cap_chan_unlock(chan
);
1023 if (chan
->state
== BT_CONNECT
) {
1024 if (!l2cap_chan_check_security(chan
) ||
1025 !__l2cap_no_conn_pending(chan
)) {
1026 l2cap_chan_unlock(chan
);
1030 if (!l2cap_mode_supported(chan
->mode
, conn
->feat_mask
)
1031 && test_bit(CONF_STATE2_DEVICE
,
1032 &chan
->conf_state
)) {
1033 l2cap_chan_close(chan
, ECONNRESET
);
1034 l2cap_chan_unlock(chan
);
1038 l2cap_send_conn_req(chan
);
1040 } else if (chan
->state
== BT_CONNECT2
) {
1041 struct l2cap_conn_rsp rsp
;
1043 rsp
.scid
= cpu_to_le16(chan
->dcid
);
1044 rsp
.dcid
= cpu_to_le16(chan
->scid
);
1046 if (l2cap_chan_check_security(chan
)) {
1048 if (bt_sk(sk
)->defer_setup
) {
1049 struct sock
*parent
= bt_sk(sk
)->parent
;
1050 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1051 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
1053 parent
->sk_data_ready(parent
, 0);
1056 __l2cap_state_change(chan
, BT_CONFIG
);
1057 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1058 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1062 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
1063 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
1066 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
1069 if (test_bit(CONF_REQ_SENT
, &chan
->conf_state
) ||
1070 rsp
.result
!= L2CAP_CR_SUCCESS
) {
1071 l2cap_chan_unlock(chan
);
1075 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
1076 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
1077 l2cap_build_conf_req(chan
, buf
), buf
);
1078 chan
->num_conf_req
++;
1081 l2cap_chan_unlock(chan
);
1084 mutex_unlock(&conn
->chan_lock
);
1087 /* Find socket with cid and source/destination bdaddr.
1088 * Returns closest match, locked.
1090 static struct l2cap_chan
*l2cap_global_chan_by_scid(int state
, u16 cid
,
1094 struct l2cap_chan
*c
, *c1
= NULL
;
1096 read_lock(&chan_list_lock
);
1098 list_for_each_entry(c
, &chan_list
, global_l
) {
1099 struct sock
*sk
= c
->sk
;
1101 if (state
&& c
->state
!= state
)
1104 if (c
->scid
== cid
) {
1105 int src_match
, dst_match
;
1106 int src_any
, dst_any
;
1109 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1110 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1111 if (src_match
&& dst_match
) {
1112 read_unlock(&chan_list_lock
);
1117 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1118 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1119 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1120 (src_any
&& dst_any
))
1125 read_unlock(&chan_list_lock
);
1130 static void l2cap_le_conn_ready(struct l2cap_conn
*conn
)
1132 struct sock
*parent
, *sk
;
1133 struct l2cap_chan
*chan
, *pchan
;
1137 /* Check if we have socket listening on cid */
1138 pchan
= l2cap_global_chan_by_scid(BT_LISTEN
, L2CAP_CID_LE_DATA
,
1139 conn
->src
, conn
->dst
);
1147 /* Check for backlog size */
1148 if (sk_acceptq_is_full(parent
)) {
1149 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
1153 chan
= pchan
->ops
->new_connection(pchan
->data
);
1159 hci_conn_hold(conn
->hcon
);
1161 bacpy(&bt_sk(sk
)->src
, conn
->src
);
1162 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
1164 bt_accept_enqueue(parent
, sk
);
1166 l2cap_chan_add(conn
, chan
);
1168 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1170 __l2cap_state_change(chan
, BT_CONNECTED
);
1171 parent
->sk_data_ready(parent
, 0);
1174 release_sock(parent
);
1177 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
1179 struct l2cap_chan
*chan
;
1181 BT_DBG("conn %p", conn
);
1183 if (!conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1184 l2cap_le_conn_ready(conn
);
1186 if (conn
->hcon
->out
&& conn
->hcon
->type
== LE_LINK
)
1187 smp_conn_security(conn
, conn
->hcon
->pending_sec_level
);
1189 mutex_lock(&conn
->chan_lock
);
1191 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1193 l2cap_chan_lock(chan
);
1195 if (conn
->hcon
->type
== LE_LINK
) {
1196 if (smp_conn_security(conn
, chan
->sec_level
))
1197 l2cap_chan_ready(chan
);
1199 } else if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1200 struct sock
*sk
= chan
->sk
;
1201 __clear_chan_timer(chan
);
1203 __l2cap_state_change(chan
, BT_CONNECTED
);
1204 sk
->sk_state_change(sk
);
1207 } else if (chan
->state
== BT_CONNECT
)
1208 l2cap_do_start(chan
);
1210 l2cap_chan_unlock(chan
);
1213 mutex_unlock(&conn
->chan_lock
);
1216 /* Notify sockets that we cannot guaranty reliability anymore */
1217 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
1219 struct l2cap_chan
*chan
;
1221 BT_DBG("conn %p", conn
);
1223 mutex_lock(&conn
->chan_lock
);
1225 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
1226 if (test_bit(FLAG_FORCE_RELIABLE
, &chan
->flags
))
1227 __l2cap_chan_set_err(chan
, err
);
1230 mutex_unlock(&conn
->chan_lock
);
1233 static void l2cap_info_timeout(struct work_struct
*work
)
1235 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1238 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
1239 conn
->info_ident
= 0;
1241 l2cap_conn_start(conn
);
1244 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
1246 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1247 struct l2cap_chan
*chan
, *l
;
1252 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
1254 kfree_skb(conn
->rx_skb
);
1256 mutex_lock(&conn
->chan_lock
);
1259 list_for_each_entry_safe(chan
, l
, &conn
->chan_l
, list
) {
1260 l2cap_chan_hold(chan
);
1261 l2cap_chan_lock(chan
);
1263 l2cap_chan_del(chan
, err
);
1265 l2cap_chan_unlock(chan
);
1267 chan
->ops
->close(chan
->data
);
1268 l2cap_chan_put(chan
);
1271 mutex_unlock(&conn
->chan_lock
);
1273 hci_chan_del(conn
->hchan
);
1275 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
1276 cancel_delayed_work_sync(&conn
->info_timer
);
1278 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND
, &hcon
->flags
)) {
1279 cancel_delayed_work_sync(&conn
->security_timer
);
1280 smp_chan_destroy(conn
);
1283 hcon
->l2cap_data
= NULL
;
1287 static void security_timeout(struct work_struct
*work
)
1289 struct l2cap_conn
*conn
= container_of(work
, struct l2cap_conn
,
1290 security_timer
.work
);
1292 l2cap_conn_del(conn
->hcon
, ETIMEDOUT
);
1295 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
1297 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
1298 struct hci_chan
*hchan
;
1303 hchan
= hci_chan_create(hcon
);
1307 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
1309 hci_chan_del(hchan
);
1313 hcon
->l2cap_data
= conn
;
1315 conn
->hchan
= hchan
;
1317 BT_DBG("hcon %p conn %p hchan %p", hcon
, conn
, hchan
);
1319 if (hcon
->hdev
->le_mtu
&& hcon
->type
== LE_LINK
)
1320 conn
->mtu
= hcon
->hdev
->le_mtu
;
1322 conn
->mtu
= hcon
->hdev
->acl_mtu
;
1324 conn
->src
= &hcon
->hdev
->bdaddr
;
1325 conn
->dst
= &hcon
->dst
;
1327 conn
->feat_mask
= 0;
1329 spin_lock_init(&conn
->lock
);
1330 mutex_init(&conn
->chan_lock
);
1332 INIT_LIST_HEAD(&conn
->chan_l
);
1334 if (hcon
->type
== LE_LINK
)
1335 INIT_DELAYED_WORK(&conn
->security_timer
, security_timeout
);
1337 INIT_DELAYED_WORK(&conn
->info_timer
, l2cap_info_timeout
);
1339 conn
->disc_reason
= HCI_ERROR_REMOTE_USER_TERM
;
1344 /* ---- Socket interface ---- */
1346 /* Find socket with psm and source / destination bdaddr.
1347 * Returns closest match.
1349 static struct l2cap_chan
*l2cap_global_chan_by_psm(int state
, __le16 psm
,
1353 struct l2cap_chan
*c
, *c1
= NULL
;
1355 read_lock(&chan_list_lock
);
1357 list_for_each_entry(c
, &chan_list
, global_l
) {
1358 struct sock
*sk
= c
->sk
;
1360 if (state
&& c
->state
!= state
)
1363 if (c
->psm
== psm
) {
1364 int src_match
, dst_match
;
1365 int src_any
, dst_any
;
1368 src_match
= !bacmp(&bt_sk(sk
)->src
, src
);
1369 dst_match
= !bacmp(&bt_sk(sk
)->dst
, dst
);
1370 if (src_match
&& dst_match
) {
1371 read_unlock(&chan_list_lock
);
1376 src_any
= !bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
);
1377 dst_any
= !bacmp(&bt_sk(sk
)->dst
, BDADDR_ANY
);
1378 if ((src_match
&& dst_any
) || (src_any
&& dst_match
) ||
1379 (src_any
&& dst_any
))
1384 read_unlock(&chan_list_lock
);
1389 int l2cap_chan_connect(struct l2cap_chan
*chan
, __le16 psm
, u16 cid
,
1390 bdaddr_t
*dst
, u8 dst_type
)
1392 struct sock
*sk
= chan
->sk
;
1393 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1394 struct l2cap_conn
*conn
;
1395 struct hci_conn
*hcon
;
1396 struct hci_dev
*hdev
;
1400 BT_DBG("%s -> %s (type %u) psm 0x%2.2x", batostr(src
), batostr(dst
),
1401 dst_type
, __le16_to_cpu(chan
->psm
));
1403 hdev
= hci_get_route(dst
, src
);
1405 return -EHOSTUNREACH
;
1409 l2cap_chan_lock(chan
);
1411 /* PSM must be odd and lsb of upper byte must be 0 */
1412 if ((__le16_to_cpu(psm
) & 0x0101) != 0x0001 && !cid
&&
1413 chan
->chan_type
!= L2CAP_CHAN_RAW
) {
1418 if (chan
->chan_type
== L2CAP_CHAN_CONN_ORIENTED
&& !(psm
|| cid
)) {
1423 switch (chan
->mode
) {
1424 case L2CAP_MODE_BASIC
:
1426 case L2CAP_MODE_ERTM
:
1427 case L2CAP_MODE_STREAMING
:
1438 switch (sk
->sk_state
) {
1442 /* Already connecting */
1448 /* Already connected */
1464 /* Set destination address and psm */
1465 bacpy(&bt_sk(sk
)->dst
, dst
);
1472 auth_type
= l2cap_get_auth_type(chan
);
1474 if (chan
->dcid
== L2CAP_CID_LE_DATA
)
1475 hcon
= hci_connect(hdev
, LE_LINK
, dst
, dst_type
,
1476 chan
->sec_level
, auth_type
);
1478 hcon
= hci_connect(hdev
, ACL_LINK
, dst
, dst_type
,
1479 chan
->sec_level
, auth_type
);
1482 err
= PTR_ERR(hcon
);
1486 conn
= l2cap_conn_add(hcon
, 0);
1493 if (hcon
->type
== LE_LINK
) {
1496 if (!list_empty(&conn
->chan_l
)) {
1505 /* Update source addr of the socket */
1506 bacpy(src
, conn
->src
);
1508 l2cap_chan_unlock(chan
);
1509 l2cap_chan_add(conn
, chan
);
1510 l2cap_chan_lock(chan
);
1512 l2cap_state_change(chan
, BT_CONNECT
);
1513 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
1515 if (hcon
->state
== BT_CONNECTED
) {
1516 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
) {
1517 __clear_chan_timer(chan
);
1518 if (l2cap_chan_check_security(chan
))
1519 l2cap_state_change(chan
, BT_CONNECTED
);
1521 l2cap_do_start(chan
);
1527 l2cap_chan_unlock(chan
);
1528 hci_dev_unlock(hdev
);
1533 int __l2cap_wait_ack(struct sock
*sk
)
1535 struct l2cap_chan
*chan
= l2cap_pi(sk
)->chan
;
1536 DECLARE_WAITQUEUE(wait
, current
);
1540 add_wait_queue(sk_sleep(sk
), &wait
);
1541 set_current_state(TASK_INTERRUPTIBLE
);
1542 while (chan
->unacked_frames
> 0 && chan
->conn
) {
1546 if (signal_pending(current
)) {
1547 err
= sock_intr_errno(timeo
);
1552 timeo
= schedule_timeout(timeo
);
1554 set_current_state(TASK_INTERRUPTIBLE
);
1556 err
= sock_error(sk
);
1560 set_current_state(TASK_RUNNING
);
1561 remove_wait_queue(sk_sleep(sk
), &wait
);
1565 static void l2cap_monitor_timeout(struct work_struct
*work
)
1567 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1568 monitor_timer
.work
);
1570 BT_DBG("chan %p", chan
);
1572 l2cap_chan_lock(chan
);
1574 if (chan
->retry_count
>= chan
->remote_max_tx
) {
1575 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1576 l2cap_chan_unlock(chan
);
1577 l2cap_chan_put(chan
);
1581 chan
->retry_count
++;
1582 __set_monitor_timer(chan
);
1584 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1585 l2cap_chan_unlock(chan
);
1586 l2cap_chan_put(chan
);
1589 static void l2cap_retrans_timeout(struct work_struct
*work
)
1591 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
1592 retrans_timer
.work
);
1594 BT_DBG("chan %p", chan
);
1596 l2cap_chan_lock(chan
);
1598 chan
->retry_count
= 1;
1599 __set_monitor_timer(chan
);
1601 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
1603 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_POLL
);
1605 l2cap_chan_unlock(chan
);
1606 l2cap_chan_put(chan
);
1609 static void l2cap_drop_acked_frames(struct l2cap_chan
*chan
)
1611 struct sk_buff
*skb
;
1613 while ((skb
= skb_peek(&chan
->tx_q
)) &&
1614 chan
->unacked_frames
) {
1615 if (bt_cb(skb
)->control
.txseq
== chan
->expected_ack_seq
)
1618 skb
= skb_dequeue(&chan
->tx_q
);
1621 chan
->unacked_frames
--;
1624 if (!chan
->unacked_frames
)
1625 __clear_retrans_timer(chan
);
1628 static void l2cap_streaming_send(struct l2cap_chan
*chan
)
1630 struct sk_buff
*skb
;
1634 while ((skb
= skb_dequeue(&chan
->tx_q
))) {
1635 control
= __get_control(chan
, skb
->data
+ L2CAP_HDR_SIZE
);
1636 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1637 control
|= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
1638 __put_control(chan
, control
, skb
->data
+ L2CAP_HDR_SIZE
);
1640 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1641 fcs
= crc16(0, (u8
*)skb
->data
,
1642 skb
->len
- L2CAP_FCS_SIZE
);
1643 put_unaligned_le16(fcs
,
1644 skb
->data
+ skb
->len
- L2CAP_FCS_SIZE
);
1647 l2cap_do_send(chan
, skb
);
1649 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1653 static void l2cap_retransmit_one_frame(struct l2cap_chan
*chan
, u16 tx_seq
)
1655 struct sk_buff
*skb
, *tx_skb
;
1659 skb
= skb_peek(&chan
->tx_q
);
1663 while (bt_cb(skb
)->control
.txseq
!= tx_seq
) {
1664 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1667 skb
= skb_queue_next(&chan
->tx_q
, skb
);
1670 if (bt_cb(skb
)->control
.retries
== chan
->remote_max_tx
&&
1671 chan
->remote_max_tx
) {
1672 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1676 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1677 bt_cb(skb
)->control
.retries
++;
1679 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1680 control
&= __get_sar_mask(chan
);
1682 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1683 control
|= __set_ctrl_final(chan
);
1685 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1686 control
|= __set_txseq(chan
, tx_seq
);
1688 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1690 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1691 fcs
= crc16(0, (u8
*)tx_skb
->data
,
1692 tx_skb
->len
- L2CAP_FCS_SIZE
);
1693 put_unaligned_le16(fcs
,
1694 tx_skb
->data
+ tx_skb
->len
- L2CAP_FCS_SIZE
);
1697 l2cap_do_send(chan
, tx_skb
);
1700 static int l2cap_ertm_send(struct l2cap_chan
*chan
)
1702 struct sk_buff
*skb
, *tx_skb
;
1707 if (chan
->state
!= BT_CONNECTED
)
1710 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
1713 while ((skb
= chan
->tx_send_head
) && (!l2cap_tx_window_full(chan
))) {
1715 if (bt_cb(skb
)->control
.retries
== chan
->remote_max_tx
&&
1716 chan
->remote_max_tx
) {
1717 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNABORTED
);
1721 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1723 bt_cb(skb
)->control
.retries
++;
1725 control
= __get_control(chan
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1726 control
&= __get_sar_mask(chan
);
1728 if (test_and_clear_bit(CONN_SEND_FBIT
, &chan
->conn_state
))
1729 control
|= __set_ctrl_final(chan
);
1731 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1732 control
|= __set_txseq(chan
, chan
->next_tx_seq
);
1733 control
|= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
1735 __put_control(chan
, control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1737 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
1738 fcs
= crc16(0, (u8
*)skb
->data
,
1739 tx_skb
->len
- L2CAP_FCS_SIZE
);
1740 put_unaligned_le16(fcs
, skb
->data
+
1741 tx_skb
->len
- L2CAP_FCS_SIZE
);
1744 l2cap_do_send(chan
, tx_skb
);
1746 __set_retrans_timer(chan
);
1748 bt_cb(skb
)->control
.txseq
= chan
->next_tx_seq
;
1750 chan
->next_tx_seq
= __next_seq(chan
, chan
->next_tx_seq
);
1752 if (bt_cb(skb
)->control
.retries
== 1) {
1753 chan
->unacked_frames
++;
1756 __clear_ack_timer(chan
);
1759 chan
->frames_sent
++;
1761 if (skb_queue_is_last(&chan
->tx_q
, skb
))
1762 chan
->tx_send_head
= NULL
;
1764 chan
->tx_send_head
= skb_queue_next(&chan
->tx_q
, skb
);
1770 static int l2cap_retransmit_frames(struct l2cap_chan
*chan
)
1774 if (!skb_queue_empty(&chan
->tx_q
))
1775 chan
->tx_send_head
= chan
->tx_q
.next
;
1777 chan
->next_tx_seq
= chan
->expected_ack_seq
;
1778 ret
= l2cap_ertm_send(chan
);
1782 static void __l2cap_send_ack(struct l2cap_chan
*chan
)
1786 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
1788 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
1789 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
1790 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
1791 l2cap_send_sframe(chan
, control
);
1795 if (l2cap_ertm_send(chan
) > 0)
1798 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
1799 l2cap_send_sframe(chan
, control
);
1802 static void l2cap_send_ack(struct l2cap_chan
*chan
)
1804 __clear_ack_timer(chan
);
1805 __l2cap_send_ack(chan
);
1808 static void l2cap_send_srejtail(struct l2cap_chan
*chan
)
1810 struct srej_list
*tail
;
1813 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
1814 control
|= __set_ctrl_final(chan
);
1816 tail
= list_entry((&chan
->srej_l
)->prev
, struct srej_list
, list
);
1817 control
|= __set_reqseq(chan
, tail
->tx_seq
);
1819 l2cap_send_sframe(chan
, control
);
1822 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan
*chan
,
1823 struct msghdr
*msg
, int len
,
1824 int count
, struct sk_buff
*skb
)
1826 struct l2cap_conn
*conn
= chan
->conn
;
1827 struct sk_buff
**frag
;
1830 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1836 /* Continuation fragments (no L2CAP header) */
1837 frag
= &skb_shinfo(skb
)->frag_list
;
1839 struct sk_buff
*tmp
;
1841 count
= min_t(unsigned int, conn
->mtu
, len
);
1843 tmp
= chan
->ops
->alloc_skb(chan
, count
,
1844 msg
->msg_flags
& MSG_DONTWAIT
);
1846 return PTR_ERR(tmp
);
1850 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1853 (*frag
)->priority
= skb
->priority
;
1858 frag
= &(*frag
)->next
;
1864 static struct sk_buff
*l2cap_create_connless_pdu(struct l2cap_chan
*chan
,
1865 struct msghdr
*msg
, size_t len
,
1868 struct l2cap_conn
*conn
= chan
->conn
;
1869 struct sk_buff
*skb
;
1870 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ L2CAP_PSMLEN_SIZE
;
1871 struct l2cap_hdr
*lh
;
1873 BT_DBG("chan %p len %d priority %u", chan
, (int)len
, priority
);
1875 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1877 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1878 msg
->msg_flags
& MSG_DONTWAIT
);
1882 skb
->priority
= priority
;
1884 /* Create L2CAP header */
1885 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1886 lh
->cid
= cpu_to_le16(chan
->dcid
);
1887 lh
->len
= cpu_to_le16(len
+ L2CAP_PSMLEN_SIZE
);
1888 put_unaligned(chan
->psm
, skb_put(skb
, L2CAP_PSMLEN_SIZE
));
1890 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1891 if (unlikely(err
< 0)) {
1893 return ERR_PTR(err
);
1898 static struct sk_buff
*l2cap_create_basic_pdu(struct l2cap_chan
*chan
,
1899 struct msghdr
*msg
, size_t len
,
1902 struct l2cap_conn
*conn
= chan
->conn
;
1903 struct sk_buff
*skb
;
1905 struct l2cap_hdr
*lh
;
1907 BT_DBG("chan %p len %d", chan
, (int)len
);
1909 count
= min_t(unsigned int, (conn
->mtu
- L2CAP_HDR_SIZE
), len
);
1911 skb
= chan
->ops
->alloc_skb(chan
, count
+ L2CAP_HDR_SIZE
,
1912 msg
->msg_flags
& MSG_DONTWAIT
);
1916 skb
->priority
= priority
;
1918 /* Create L2CAP header */
1919 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1920 lh
->cid
= cpu_to_le16(chan
->dcid
);
1921 lh
->len
= cpu_to_le16(len
);
1923 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1924 if (unlikely(err
< 0)) {
1926 return ERR_PTR(err
);
1931 static struct sk_buff
*l2cap_create_iframe_pdu(struct l2cap_chan
*chan
,
1932 struct msghdr
*msg
, size_t len
,
1935 struct l2cap_conn
*conn
= chan
->conn
;
1936 struct sk_buff
*skb
;
1937 int err
, count
, hlen
;
1938 struct l2cap_hdr
*lh
;
1940 BT_DBG("chan %p len %d", chan
, (int)len
);
1943 return ERR_PTR(-ENOTCONN
);
1945 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
1946 hlen
= L2CAP_EXT_HDR_SIZE
;
1948 hlen
= L2CAP_ENH_HDR_SIZE
;
1951 hlen
+= L2CAP_SDULEN_SIZE
;
1953 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1954 hlen
+= L2CAP_FCS_SIZE
;
1956 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1958 skb
= chan
->ops
->alloc_skb(chan
, count
+ hlen
,
1959 msg
->msg_flags
& MSG_DONTWAIT
);
1963 /* Create L2CAP header */
1964 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1965 lh
->cid
= cpu_to_le16(chan
->dcid
);
1966 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1968 __put_control(chan
, 0, skb_put(skb
, __ctrl_size(chan
)));
1971 put_unaligned_le16(sdulen
, skb_put(skb
, L2CAP_SDULEN_SIZE
));
1973 err
= l2cap_skbuff_fromiovec(chan
, msg
, len
, count
, skb
);
1974 if (unlikely(err
< 0)) {
1976 return ERR_PTR(err
);
1979 if (chan
->fcs
== L2CAP_FCS_CRC16
)
1980 put_unaligned_le16(0, skb_put(skb
, L2CAP_FCS_SIZE
));
1982 bt_cb(skb
)->control
.retries
= 0;
1986 static int l2cap_segment_sdu(struct l2cap_chan
*chan
,
1987 struct sk_buff_head
*seg_queue
,
1988 struct msghdr
*msg
, size_t len
)
1990 struct sk_buff
*skb
;
1996 BT_DBG("chan %p, msg %p, len %d", chan
, msg
, (int)len
);
1998 /* It is critical that ERTM PDUs fit in a single HCI fragment,
1999 * so fragmented skbs are not used. The HCI layer's handling
2000 * of fragmented skbs is not compatible with ERTM's queueing.
2003 /* PDU size is derived from the HCI MTU */
2004 pdu_len
= chan
->conn
->mtu
;
2006 pdu_len
= min_t(size_t, pdu_len
, L2CAP_BREDR_MAX_PAYLOAD
);
2008 /* Adjust for largest possible L2CAP overhead. */
2009 pdu_len
-= L2CAP_EXT_HDR_SIZE
+ L2CAP_FCS_SIZE
;
2011 /* Remote device may have requested smaller PDUs */
2012 pdu_len
= min_t(size_t, pdu_len
, chan
->remote_mps
);
2014 if (len
<= pdu_len
) {
2015 sar
= L2CAP_SAR_UNSEGMENTED
;
2019 sar
= L2CAP_SAR_START
;
2021 pdu_len
-= L2CAP_SDULEN_SIZE
;
2025 skb
= l2cap_create_iframe_pdu(chan
, msg
, pdu_len
, sdu_len
);
2028 __skb_queue_purge(seg_queue
);
2029 return PTR_ERR(skb
);
2032 bt_cb(skb
)->control
.sar
= sar
;
2033 __skb_queue_tail(seg_queue
, skb
);
2038 pdu_len
+= L2CAP_SDULEN_SIZE
;
2041 if (len
<= pdu_len
) {
2042 sar
= L2CAP_SAR_END
;
2045 sar
= L2CAP_SAR_CONTINUE
;
2052 int l2cap_chan_send(struct l2cap_chan
*chan
, struct msghdr
*msg
, size_t len
,
2055 struct sk_buff
*skb
;
2057 struct sk_buff_head seg_queue
;
2059 /* Connectionless channel */
2060 if (chan
->chan_type
== L2CAP_CHAN_CONN_LESS
) {
2061 skb
= l2cap_create_connless_pdu(chan
, msg
, len
, priority
);
2063 return PTR_ERR(skb
);
2065 l2cap_do_send(chan
, skb
);
2069 switch (chan
->mode
) {
2070 case L2CAP_MODE_BASIC
:
2071 /* Check outgoing MTU */
2072 if (len
> chan
->omtu
)
2075 /* Create a basic PDU */
2076 skb
= l2cap_create_basic_pdu(chan
, msg
, len
, priority
);
2078 return PTR_ERR(skb
);
2080 l2cap_do_send(chan
, skb
);
2084 case L2CAP_MODE_ERTM
:
2085 case L2CAP_MODE_STREAMING
:
2086 /* Check outgoing MTU */
2087 if (len
> chan
->omtu
) {
2092 __skb_queue_head_init(&seg_queue
);
2094 /* Do segmentation before calling in to the state machine,
2095 * since it's possible to block while waiting for memory
2098 err
= l2cap_segment_sdu(chan
, &seg_queue
, msg
, len
);
2100 /* The channel could have been closed while segmenting,
2101 * check that it is still connected.
2103 if (chan
->state
!= BT_CONNECTED
) {
2104 __skb_queue_purge(&seg_queue
);
2111 if (chan
->mode
== L2CAP_MODE_ERTM
&& chan
->tx_send_head
== NULL
)
2112 chan
->tx_send_head
= seg_queue
.next
;
2113 skb_queue_splice_tail_init(&seg_queue
, &chan
->tx_q
);
2115 if (chan
->mode
== L2CAP_MODE_ERTM
)
2116 err
= l2cap_ertm_send(chan
);
2118 l2cap_streaming_send(chan
);
2123 /* If the skbs were not queued for sending, they'll still be in
2124 * seg_queue and need to be purged.
2126 __skb_queue_purge(&seg_queue
);
2130 BT_DBG("bad state %1.1x", chan
->mode
);
2137 /* Copy frame to all raw sockets on that connection */
2138 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2140 struct sk_buff
*nskb
;
2141 struct l2cap_chan
*chan
;
2143 BT_DBG("conn %p", conn
);
2145 mutex_lock(&conn
->chan_lock
);
2147 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
2148 struct sock
*sk
= chan
->sk
;
2149 if (chan
->chan_type
!= L2CAP_CHAN_RAW
)
2152 /* Don't send frame to the socket it came from */
2155 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2159 if (chan
->ops
->recv(chan
->data
, nskb
))
2163 mutex_unlock(&conn
->chan_lock
);
2166 /* ---- L2CAP signalling commands ---- */
2167 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2168 u8 code
, u8 ident
, u16 dlen
, void *data
)
2170 struct sk_buff
*skb
, **frag
;
2171 struct l2cap_cmd_hdr
*cmd
;
2172 struct l2cap_hdr
*lh
;
2175 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2176 conn
, code
, ident
, dlen
);
2178 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2179 count
= min_t(unsigned int, conn
->mtu
, len
);
2181 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2185 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2186 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2188 if (conn
->hcon
->type
== LE_LINK
)
2189 lh
->cid
= cpu_to_le16(L2CAP_CID_LE_SIGNALING
);
2191 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2193 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2196 cmd
->len
= cpu_to_le16(dlen
);
2199 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2200 memcpy(skb_put(skb
, count
), data
, count
);
2206 /* Continuation fragments (no L2CAP header) */
2207 frag
= &skb_shinfo(skb
)->frag_list
;
2209 count
= min_t(unsigned int, conn
->mtu
, len
);
2211 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2215 memcpy(skb_put(*frag
, count
), data
, count
);
2220 frag
= &(*frag
)->next
;
2230 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2232 struct l2cap_conf_opt
*opt
= *ptr
;
2235 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2243 *val
= *((u8
*) opt
->val
);
2247 *val
= get_unaligned_le16(opt
->val
);
2251 *val
= get_unaligned_le32(opt
->val
);
2255 *val
= (unsigned long) opt
->val
;
2259 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2263 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2265 struct l2cap_conf_opt
*opt
= *ptr
;
2267 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2274 *((u8
*) opt
->val
) = val
;
2278 put_unaligned_le16(val
, opt
->val
);
2282 put_unaligned_le32(val
, opt
->val
);
2286 memcpy(opt
->val
, (void *) val
, len
);
2290 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2293 static void l2cap_add_opt_efs(void **ptr
, struct l2cap_chan
*chan
)
2295 struct l2cap_conf_efs efs
;
2297 switch (chan
->mode
) {
2298 case L2CAP_MODE_ERTM
:
2299 efs
.id
= chan
->local_id
;
2300 efs
.stype
= chan
->local_stype
;
2301 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2302 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2303 efs
.acc_lat
= cpu_to_le32(L2CAP_DEFAULT_ACC_LAT
);
2304 efs
.flush_to
= cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO
);
2307 case L2CAP_MODE_STREAMING
:
2309 efs
.stype
= L2CAP_SERV_BESTEFFORT
;
2310 efs
.msdu
= cpu_to_le16(chan
->local_msdu
);
2311 efs
.sdu_itime
= cpu_to_le32(chan
->local_sdu_itime
);
2320 l2cap_add_conf_opt(ptr
, L2CAP_CONF_EFS
, sizeof(efs
),
2321 (unsigned long) &efs
);
2324 static void l2cap_ack_timeout(struct work_struct
*work
)
2326 struct l2cap_chan
*chan
= container_of(work
, struct l2cap_chan
,
2329 BT_DBG("chan %p", chan
);
2331 l2cap_chan_lock(chan
);
2333 __l2cap_send_ack(chan
);
2335 l2cap_chan_unlock(chan
);
2337 l2cap_chan_put(chan
);
2340 static inline int l2cap_ertm_init(struct l2cap_chan
*chan
)
2344 chan
->next_tx_seq
= 0;
2345 chan
->expected_tx_seq
= 0;
2346 chan
->expected_ack_seq
= 0;
2347 chan
->unacked_frames
= 0;
2348 chan
->buffer_seq
= 0;
2349 chan
->num_acked
= 0;
2350 chan
->frames_sent
= 0;
2351 chan
->last_acked_seq
= 0;
2353 chan
->sdu_last_frag
= NULL
;
2356 skb_queue_head_init(&chan
->tx_q
);
2358 if (chan
->mode
!= L2CAP_MODE_ERTM
)
2361 chan
->rx_state
= L2CAP_RX_STATE_RECV
;
2362 chan
->tx_state
= L2CAP_TX_STATE_XMIT
;
2364 INIT_DELAYED_WORK(&chan
->retrans_timer
, l2cap_retrans_timeout
);
2365 INIT_DELAYED_WORK(&chan
->monitor_timer
, l2cap_monitor_timeout
);
2366 INIT_DELAYED_WORK(&chan
->ack_timer
, l2cap_ack_timeout
);
2368 skb_queue_head_init(&chan
->srej_q
);
2370 INIT_LIST_HEAD(&chan
->srej_l
);
2371 err
= l2cap_seq_list_init(&chan
->srej_list
, chan
->tx_win
);
2375 return l2cap_seq_list_init(&chan
->retrans_list
, chan
->remote_tx_win
);
2378 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2381 case L2CAP_MODE_STREAMING
:
2382 case L2CAP_MODE_ERTM
:
2383 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2387 return L2CAP_MODE_BASIC
;
2391 static inline bool __l2cap_ews_supported(struct l2cap_chan
*chan
)
2393 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_WINDOW
;
2396 static inline bool __l2cap_efs_supported(struct l2cap_chan
*chan
)
2398 return enable_hs
&& chan
->conn
->feat_mask
& L2CAP_FEAT_EXT_FLOW
;
2401 static inline void l2cap_txwin_setup(struct l2cap_chan
*chan
)
2403 if (chan
->tx_win
> L2CAP_DEFAULT_TX_WINDOW
&&
2404 __l2cap_ews_supported(chan
)) {
2405 /* use extended control field */
2406 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2407 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2409 chan
->tx_win
= min_t(u16
, chan
->tx_win
,
2410 L2CAP_DEFAULT_TX_WINDOW
);
2411 chan
->tx_win_max
= L2CAP_DEFAULT_TX_WINDOW
;
2415 static int l2cap_build_conf_req(struct l2cap_chan
*chan
, void *data
)
2417 struct l2cap_conf_req
*req
= data
;
2418 struct l2cap_conf_rfc rfc
= { .mode
= chan
->mode
};
2419 void *ptr
= req
->data
;
2422 BT_DBG("chan %p", chan
);
2424 if (chan
->num_conf_req
|| chan
->num_conf_rsp
)
2427 switch (chan
->mode
) {
2428 case L2CAP_MODE_STREAMING
:
2429 case L2CAP_MODE_ERTM
:
2430 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
))
2433 if (__l2cap_efs_supported(chan
))
2434 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2438 chan
->mode
= l2cap_select_mode(rfc
.mode
, chan
->conn
->feat_mask
);
2443 if (chan
->imtu
!= L2CAP_DEFAULT_MTU
)
2444 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2446 switch (chan
->mode
) {
2447 case L2CAP_MODE_BASIC
:
2448 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_ERTM
) &&
2449 !(chan
->conn
->feat_mask
& L2CAP_FEAT_STREAMING
))
2452 rfc
.mode
= L2CAP_MODE_BASIC
;
2454 rfc
.max_transmit
= 0;
2455 rfc
.retrans_timeout
= 0;
2456 rfc
.monitor_timeout
= 0;
2457 rfc
.max_pdu_size
= 0;
2459 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2460 (unsigned long) &rfc
);
2463 case L2CAP_MODE_ERTM
:
2464 rfc
.mode
= L2CAP_MODE_ERTM
;
2465 rfc
.max_transmit
= chan
->max_tx
;
2466 rfc
.retrans_timeout
= 0;
2467 rfc
.monitor_timeout
= 0;
2469 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2470 L2CAP_EXT_HDR_SIZE
-
2473 rfc
.max_pdu_size
= cpu_to_le16(size
);
2475 l2cap_txwin_setup(chan
);
2477 rfc
.txwin_size
= min_t(u16
, chan
->tx_win
,
2478 L2CAP_DEFAULT_TX_WINDOW
);
2480 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2481 (unsigned long) &rfc
);
2483 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2484 l2cap_add_opt_efs(&ptr
, chan
);
2486 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2489 if (chan
->fcs
== L2CAP_FCS_NONE
||
2490 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2491 chan
->fcs
= L2CAP_FCS_NONE
;
2492 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2495 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
2496 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2500 case L2CAP_MODE_STREAMING
:
2501 rfc
.mode
= L2CAP_MODE_STREAMING
;
2503 rfc
.max_transmit
= 0;
2504 rfc
.retrans_timeout
= 0;
2505 rfc
.monitor_timeout
= 0;
2507 size
= min_t(u16
, L2CAP_DEFAULT_MAX_PDU_SIZE
, chan
->conn
->mtu
-
2508 L2CAP_EXT_HDR_SIZE
-
2511 rfc
.max_pdu_size
= cpu_to_le16(size
);
2513 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2514 (unsigned long) &rfc
);
2516 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
))
2517 l2cap_add_opt_efs(&ptr
, chan
);
2519 if (!(chan
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2522 if (chan
->fcs
== L2CAP_FCS_NONE
||
2523 test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
)) {
2524 chan
->fcs
= L2CAP_FCS_NONE
;
2525 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, chan
->fcs
);
2530 req
->dcid
= cpu_to_le16(chan
->dcid
);
2531 req
->flags
= cpu_to_le16(0);
2536 static int l2cap_parse_conf_req(struct l2cap_chan
*chan
, void *data
)
2538 struct l2cap_conf_rsp
*rsp
= data
;
2539 void *ptr
= rsp
->data
;
2540 void *req
= chan
->conf_req
;
2541 int len
= chan
->conf_len
;
2542 int type
, hint
, olen
;
2544 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2545 struct l2cap_conf_efs efs
;
2547 u16 mtu
= L2CAP_DEFAULT_MTU
;
2548 u16 result
= L2CAP_CONF_SUCCESS
;
2551 BT_DBG("chan %p", chan
);
2553 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2554 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2556 hint
= type
& L2CAP_CONF_HINT
;
2557 type
&= L2CAP_CONF_MASK
;
2560 case L2CAP_CONF_MTU
:
2564 case L2CAP_CONF_FLUSH_TO
:
2565 chan
->flush_to
= val
;
2568 case L2CAP_CONF_QOS
:
2571 case L2CAP_CONF_RFC
:
2572 if (olen
== sizeof(rfc
))
2573 memcpy(&rfc
, (void *) val
, olen
);
2576 case L2CAP_CONF_FCS
:
2577 if (val
== L2CAP_FCS_NONE
)
2578 set_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
);
2581 case L2CAP_CONF_EFS
:
2583 if (olen
== sizeof(efs
))
2584 memcpy(&efs
, (void *) val
, olen
);
2587 case L2CAP_CONF_EWS
:
2589 return -ECONNREFUSED
;
2591 set_bit(FLAG_EXT_CTRL
, &chan
->flags
);
2592 set_bit(CONF_EWS_RECV
, &chan
->conf_state
);
2593 chan
->tx_win_max
= L2CAP_DEFAULT_EXT_WINDOW
;
2594 chan
->remote_tx_win
= val
;
2601 result
= L2CAP_CONF_UNKNOWN
;
2602 *((u8
*) ptr
++) = type
;
2607 if (chan
->num_conf_rsp
|| chan
->num_conf_req
> 1)
2610 switch (chan
->mode
) {
2611 case L2CAP_MODE_STREAMING
:
2612 case L2CAP_MODE_ERTM
:
2613 if (!test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
)) {
2614 chan
->mode
= l2cap_select_mode(rfc
.mode
,
2615 chan
->conn
->feat_mask
);
2620 if (__l2cap_efs_supported(chan
))
2621 set_bit(FLAG_EFS_ENABLE
, &chan
->flags
);
2623 return -ECONNREFUSED
;
2626 if (chan
->mode
!= rfc
.mode
)
2627 return -ECONNREFUSED
;
2633 if (chan
->mode
!= rfc
.mode
) {
2634 result
= L2CAP_CONF_UNACCEPT
;
2635 rfc
.mode
= chan
->mode
;
2637 if (chan
->num_conf_rsp
== 1)
2638 return -ECONNREFUSED
;
2640 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2641 sizeof(rfc
), (unsigned long) &rfc
);
2644 if (result
== L2CAP_CONF_SUCCESS
) {
2645 /* Configure output options and let the other side know
2646 * which ones we don't like. */
2648 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2649 result
= L2CAP_CONF_UNACCEPT
;
2652 set_bit(CONF_MTU_DONE
, &chan
->conf_state
);
2654 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->omtu
);
2657 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2658 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2659 efs
.stype
!= chan
->local_stype
) {
2661 result
= L2CAP_CONF_UNACCEPT
;
2663 if (chan
->num_conf_req
>= 1)
2664 return -ECONNREFUSED
;
2666 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2668 (unsigned long) &efs
);
2670 /* Send PENDING Conf Rsp */
2671 result
= L2CAP_CONF_PENDING
;
2672 set_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
2677 case L2CAP_MODE_BASIC
:
2678 chan
->fcs
= L2CAP_FCS_NONE
;
2679 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2682 case L2CAP_MODE_ERTM
:
2683 if (!test_bit(CONF_EWS_RECV
, &chan
->conf_state
))
2684 chan
->remote_tx_win
= rfc
.txwin_size
;
2686 rfc
.txwin_size
= L2CAP_DEFAULT_TX_WINDOW
;
2688 chan
->remote_max_tx
= rfc
.max_transmit
;
2690 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2692 L2CAP_EXT_HDR_SIZE
-
2695 rfc
.max_pdu_size
= cpu_to_le16(size
);
2696 chan
->remote_mps
= size
;
2698 rfc
.retrans_timeout
=
2699 __constant_cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2700 rfc
.monitor_timeout
=
2701 __constant_cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2703 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2705 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2706 sizeof(rfc
), (unsigned long) &rfc
);
2708 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2709 chan
->remote_id
= efs
.id
;
2710 chan
->remote_stype
= efs
.stype
;
2711 chan
->remote_msdu
= le16_to_cpu(efs
.msdu
);
2712 chan
->remote_flush_to
=
2713 le32_to_cpu(efs
.flush_to
);
2714 chan
->remote_acc_lat
=
2715 le32_to_cpu(efs
.acc_lat
);
2716 chan
->remote_sdu_itime
=
2717 le32_to_cpu(efs
.sdu_itime
);
2718 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2719 sizeof(efs
), (unsigned long) &efs
);
2723 case L2CAP_MODE_STREAMING
:
2724 size
= min_t(u16
, le16_to_cpu(rfc
.max_pdu_size
),
2726 L2CAP_EXT_HDR_SIZE
-
2729 rfc
.max_pdu_size
= cpu_to_le16(size
);
2730 chan
->remote_mps
= size
;
2732 set_bit(CONF_MODE_DONE
, &chan
->conf_state
);
2734 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2735 sizeof(rfc
), (unsigned long) &rfc
);
2740 result
= L2CAP_CONF_UNACCEPT
;
2742 memset(&rfc
, 0, sizeof(rfc
));
2743 rfc
.mode
= chan
->mode
;
2746 if (result
== L2CAP_CONF_SUCCESS
)
2747 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
2749 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2750 rsp
->result
= cpu_to_le16(result
);
2751 rsp
->flags
= cpu_to_le16(0x0000);
2756 static int l2cap_parse_conf_rsp(struct l2cap_chan
*chan
, void *rsp
, int len
, void *data
, u16
*result
)
2758 struct l2cap_conf_req
*req
= data
;
2759 void *ptr
= req
->data
;
2762 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2763 struct l2cap_conf_efs efs
;
2765 BT_DBG("chan %p, rsp %p, len %d, req %p", chan
, rsp
, len
, data
);
2767 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2768 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2771 case L2CAP_CONF_MTU
:
2772 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2773 *result
= L2CAP_CONF_UNACCEPT
;
2774 chan
->imtu
= L2CAP_DEFAULT_MIN_MTU
;
2777 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, chan
->imtu
);
2780 case L2CAP_CONF_FLUSH_TO
:
2781 chan
->flush_to
= val
;
2782 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2786 case L2CAP_CONF_RFC
:
2787 if (olen
== sizeof(rfc
))
2788 memcpy(&rfc
, (void *)val
, olen
);
2790 if (test_bit(CONF_STATE2_DEVICE
, &chan
->conf_state
) &&
2791 rfc
.mode
!= chan
->mode
)
2792 return -ECONNREFUSED
;
2796 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2797 sizeof(rfc
), (unsigned long) &rfc
);
2800 case L2CAP_CONF_EWS
:
2801 chan
->tx_win
= min_t(u16
, val
,
2802 L2CAP_DEFAULT_EXT_WINDOW
);
2803 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EWS
, 2,
2807 case L2CAP_CONF_EFS
:
2808 if (olen
== sizeof(efs
))
2809 memcpy(&efs
, (void *)val
, olen
);
2811 if (chan
->local_stype
!= L2CAP_SERV_NOTRAFIC
&&
2812 efs
.stype
!= L2CAP_SERV_NOTRAFIC
&&
2813 efs
.stype
!= chan
->local_stype
)
2814 return -ECONNREFUSED
;
2816 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_EFS
,
2817 sizeof(efs
), (unsigned long) &efs
);
2822 if (chan
->mode
== L2CAP_MODE_BASIC
&& chan
->mode
!= rfc
.mode
)
2823 return -ECONNREFUSED
;
2825 chan
->mode
= rfc
.mode
;
2827 if (*result
== L2CAP_CONF_SUCCESS
|| *result
== L2CAP_CONF_PENDING
) {
2829 case L2CAP_MODE_ERTM
:
2830 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2831 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2832 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2834 if (test_bit(FLAG_EFS_ENABLE
, &chan
->flags
)) {
2835 chan
->local_msdu
= le16_to_cpu(efs
.msdu
);
2836 chan
->local_sdu_itime
=
2837 le32_to_cpu(efs
.sdu_itime
);
2838 chan
->local_acc_lat
= le32_to_cpu(efs
.acc_lat
);
2839 chan
->local_flush_to
=
2840 le32_to_cpu(efs
.flush_to
);
2844 case L2CAP_MODE_STREAMING
:
2845 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2849 req
->dcid
= cpu_to_le16(chan
->dcid
);
2850 req
->flags
= cpu_to_le16(0x0000);
2855 static int l2cap_build_conf_rsp(struct l2cap_chan
*chan
, void *data
, u16 result
, u16 flags
)
2857 struct l2cap_conf_rsp
*rsp
= data
;
2858 void *ptr
= rsp
->data
;
2860 BT_DBG("chan %p", chan
);
2862 rsp
->scid
= cpu_to_le16(chan
->dcid
);
2863 rsp
->result
= cpu_to_le16(result
);
2864 rsp
->flags
= cpu_to_le16(flags
);
2869 void __l2cap_connect_rsp_defer(struct l2cap_chan
*chan
)
2871 struct l2cap_conn_rsp rsp
;
2872 struct l2cap_conn
*conn
= chan
->conn
;
2875 rsp
.scid
= cpu_to_le16(chan
->dcid
);
2876 rsp
.dcid
= cpu_to_le16(chan
->scid
);
2877 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
2878 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
2879 l2cap_send_cmd(conn
, chan
->ident
,
2880 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2882 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
2885 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
2886 l2cap_build_conf_req(chan
, buf
), buf
);
2887 chan
->num_conf_req
++;
2890 static void l2cap_conf_rfc_get(struct l2cap_chan
*chan
, void *rsp
, int len
)
2894 struct l2cap_conf_rfc rfc
;
2896 BT_DBG("chan %p, rsp %p, len %d", chan
, rsp
, len
);
2898 if ((chan
->mode
!= L2CAP_MODE_ERTM
) && (chan
->mode
!= L2CAP_MODE_STREAMING
))
2901 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2902 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2905 case L2CAP_CONF_RFC
:
2906 if (olen
== sizeof(rfc
))
2907 memcpy(&rfc
, (void *)val
, olen
);
2912 /* Use sane default values in case a misbehaving remote device
2913 * did not send an RFC option.
2915 rfc
.mode
= chan
->mode
;
2916 rfc
.retrans_timeout
= cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO
);
2917 rfc
.monitor_timeout
= cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO
);
2918 rfc
.max_pdu_size
= cpu_to_le16(chan
->imtu
);
2920 BT_ERR("Expected RFC option was not found, using defaults");
2924 case L2CAP_MODE_ERTM
:
2925 chan
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2926 chan
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2927 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2929 case L2CAP_MODE_STREAMING
:
2930 chan
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2934 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2936 struct l2cap_cmd_rej_unk
*rej
= (struct l2cap_cmd_rej_unk
*) data
;
2938 if (rej
->reason
!= L2CAP_REJ_NOT_UNDERSTOOD
)
2941 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2942 cmd
->ident
== conn
->info_ident
) {
2943 cancel_delayed_work(&conn
->info_timer
);
2945 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2946 conn
->info_ident
= 0;
2948 l2cap_conn_start(conn
);
2954 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2956 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2957 struct l2cap_conn_rsp rsp
;
2958 struct l2cap_chan
*chan
= NULL
, *pchan
;
2959 struct sock
*parent
, *sk
= NULL
;
2960 int result
, status
= L2CAP_CS_NO_INFO
;
2962 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2963 __le16 psm
= req
->psm
;
2965 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm
), scid
);
2967 /* Check if we have socket listening on psm */
2968 pchan
= l2cap_global_chan_by_psm(BT_LISTEN
, psm
, conn
->src
, conn
->dst
);
2970 result
= L2CAP_CR_BAD_PSM
;
2976 mutex_lock(&conn
->chan_lock
);
2979 /* Check if the ACL is secure enough (if not SDP) */
2980 if (psm
!= cpu_to_le16(0x0001) &&
2981 !hci_conn_check_link_mode(conn
->hcon
)) {
2982 conn
->disc_reason
= HCI_ERROR_AUTH_FAILURE
;
2983 result
= L2CAP_CR_SEC_BLOCK
;
2987 result
= L2CAP_CR_NO_MEM
;
2989 /* Check for backlog size */
2990 if (sk_acceptq_is_full(parent
)) {
2991 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2995 chan
= pchan
->ops
->new_connection(pchan
->data
);
3001 /* Check if we already have channel with that dcid */
3002 if (__l2cap_get_chan_by_dcid(conn
, scid
)) {
3003 sock_set_flag(sk
, SOCK_ZAPPED
);
3004 chan
->ops
->close(chan
->data
);
3008 hci_conn_hold(conn
->hcon
);
3010 bacpy(&bt_sk(sk
)->src
, conn
->src
);
3011 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
3015 bt_accept_enqueue(parent
, sk
);
3017 __l2cap_chan_add(conn
, chan
);
3021 __set_chan_timer(chan
, sk
->sk_sndtimeo
);
3023 chan
->ident
= cmd
->ident
;
3025 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
3026 if (l2cap_chan_check_security(chan
)) {
3027 if (bt_sk(sk
)->defer_setup
) {
3028 __l2cap_state_change(chan
, BT_CONNECT2
);
3029 result
= L2CAP_CR_PEND
;
3030 status
= L2CAP_CS_AUTHOR_PEND
;
3031 parent
->sk_data_ready(parent
, 0);
3033 __l2cap_state_change(chan
, BT_CONFIG
);
3034 result
= L2CAP_CR_SUCCESS
;
3035 status
= L2CAP_CS_NO_INFO
;
3038 __l2cap_state_change(chan
, BT_CONNECT2
);
3039 result
= L2CAP_CR_PEND
;
3040 status
= L2CAP_CS_AUTHEN_PEND
;
3043 __l2cap_state_change(chan
, BT_CONNECT2
);
3044 result
= L2CAP_CR_PEND
;
3045 status
= L2CAP_CS_NO_INFO
;
3049 release_sock(parent
);
3050 mutex_unlock(&conn
->chan_lock
);
3053 rsp
.scid
= cpu_to_le16(scid
);
3054 rsp
.dcid
= cpu_to_le16(dcid
);
3055 rsp
.result
= cpu_to_le16(result
);
3056 rsp
.status
= cpu_to_le16(status
);
3057 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
3059 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
3060 struct l2cap_info_req info
;
3061 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3063 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
3064 conn
->info_ident
= l2cap_get_ident(conn
);
3066 schedule_delayed_work(&conn
->info_timer
, L2CAP_INFO_TIMEOUT
);
3068 l2cap_send_cmd(conn
, conn
->info_ident
,
3069 L2CAP_INFO_REQ
, sizeof(info
), &info
);
3072 if (chan
&& !test_bit(CONF_REQ_SENT
, &chan
->conf_state
) &&
3073 result
== L2CAP_CR_SUCCESS
) {
3075 set_bit(CONF_REQ_SENT
, &chan
->conf_state
);
3076 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3077 l2cap_build_conf_req(chan
, buf
), buf
);
3078 chan
->num_conf_req
++;
3084 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3086 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
3087 u16 scid
, dcid
, result
, status
;
3088 struct l2cap_chan
*chan
;
3092 scid
= __le16_to_cpu(rsp
->scid
);
3093 dcid
= __le16_to_cpu(rsp
->dcid
);
3094 result
= __le16_to_cpu(rsp
->result
);
3095 status
= __le16_to_cpu(rsp
->status
);
3097 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
3098 dcid
, scid
, result
, status
);
3100 mutex_lock(&conn
->chan_lock
);
3103 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3109 chan
= __l2cap_get_chan_by_ident(conn
, cmd
->ident
);
3118 l2cap_chan_lock(chan
);
3121 case L2CAP_CR_SUCCESS
:
3122 l2cap_state_change(chan
, BT_CONFIG
);
3125 clear_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3127 if (test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
))
3130 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3131 l2cap_build_conf_req(chan
, req
), req
);
3132 chan
->num_conf_req
++;
3136 set_bit(CONF_CONNECT_PEND
, &chan
->conf_state
);
3140 l2cap_chan_del(chan
, ECONNREFUSED
);
3144 l2cap_chan_unlock(chan
);
3147 mutex_unlock(&conn
->chan_lock
);
3152 static inline void set_default_fcs(struct l2cap_chan
*chan
)
3154 /* FCS is enabled only in ERTM or streaming mode, if one or both
3157 if (chan
->mode
!= L2CAP_MODE_ERTM
&& chan
->mode
!= L2CAP_MODE_STREAMING
)
3158 chan
->fcs
= L2CAP_FCS_NONE
;
3159 else if (!test_bit(CONF_NO_FCS_RECV
, &chan
->conf_state
))
3160 chan
->fcs
= L2CAP_FCS_CRC16
;
3163 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3165 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3168 struct l2cap_chan
*chan
;
3171 dcid
= __le16_to_cpu(req
->dcid
);
3172 flags
= __le16_to_cpu(req
->flags
);
3174 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3176 chan
= l2cap_get_chan_by_scid(conn
, dcid
);
3180 l2cap_chan_lock(chan
);
3182 if (chan
->state
!= BT_CONFIG
&& chan
->state
!= BT_CONNECT2
) {
3183 struct l2cap_cmd_rej_cid rej
;
3185 rej
.reason
= cpu_to_le16(L2CAP_REJ_INVALID_CID
);
3186 rej
.scid
= cpu_to_le16(chan
->scid
);
3187 rej
.dcid
= cpu_to_le16(chan
->dcid
);
3189 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3194 /* Reject if config buffer is too small. */
3195 len
= cmd_len
- sizeof(*req
);
3196 if (len
< 0 || chan
->conf_len
+ len
> sizeof(chan
->conf_req
)) {
3197 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3198 l2cap_build_conf_rsp(chan
, rsp
,
3199 L2CAP_CONF_REJECT
, flags
), rsp
);
3204 memcpy(chan
->conf_req
+ chan
->conf_len
, req
->data
, len
);
3205 chan
->conf_len
+= len
;
3207 if (flags
& 0x0001) {
3208 /* Incomplete config. Send empty response. */
3209 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3210 l2cap_build_conf_rsp(chan
, rsp
,
3211 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3215 /* Complete config. */
3216 len
= l2cap_parse_conf_req(chan
, rsp
);
3218 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3222 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3223 chan
->num_conf_rsp
++;
3225 /* Reset config buffer. */
3228 if (!test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
))
3231 if (test_bit(CONF_INPUT_DONE
, &chan
->conf_state
)) {
3232 set_default_fcs(chan
);
3234 l2cap_state_change(chan
, BT_CONNECTED
);
3236 if (chan
->mode
== L2CAP_MODE_ERTM
||
3237 chan
->mode
== L2CAP_MODE_STREAMING
)
3238 err
= l2cap_ertm_init(chan
);
3241 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3243 l2cap_chan_ready(chan
);
3248 if (!test_and_set_bit(CONF_REQ_SENT
, &chan
->conf_state
)) {
3250 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3251 l2cap_build_conf_req(chan
, buf
), buf
);
3252 chan
->num_conf_req
++;
3255 /* Got Conf Rsp PENDING from remote side and asume we sent
3256 Conf Rsp PENDING in the code above */
3257 if (test_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
) &&
3258 test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3260 /* check compatibility */
3262 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3263 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3265 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3266 l2cap_build_conf_rsp(chan
, rsp
,
3267 L2CAP_CONF_SUCCESS
, 0x0000), rsp
);
3271 l2cap_chan_unlock(chan
);
3275 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3277 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3278 u16 scid
, flags
, result
;
3279 struct l2cap_chan
*chan
;
3280 int len
= le16_to_cpu(cmd
->len
) - sizeof(*rsp
);
3283 scid
= __le16_to_cpu(rsp
->scid
);
3284 flags
= __le16_to_cpu(rsp
->flags
);
3285 result
= __le16_to_cpu(rsp
->result
);
3287 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid
, flags
,
3290 chan
= l2cap_get_chan_by_scid(conn
, scid
);
3294 l2cap_chan_lock(chan
);
3297 case L2CAP_CONF_SUCCESS
:
3298 l2cap_conf_rfc_get(chan
, rsp
->data
, len
);
3299 clear_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3302 case L2CAP_CONF_PENDING
:
3303 set_bit(CONF_REM_CONF_PEND
, &chan
->conf_state
);
3305 if (test_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
)) {
3308 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3311 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3315 /* check compatibility */
3317 clear_bit(CONF_LOC_CONF_PEND
, &chan
->conf_state
);
3318 set_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
);
3320 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3321 l2cap_build_conf_rsp(chan
, buf
,
3322 L2CAP_CONF_SUCCESS
, 0x0000), buf
);
3326 case L2CAP_CONF_UNACCEPT
:
3327 if (chan
->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3330 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3331 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3335 /* throw out any old stored conf requests */
3336 result
= L2CAP_CONF_SUCCESS
;
3337 len
= l2cap_parse_conf_rsp(chan
, rsp
->data
, len
,
3340 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3344 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3345 L2CAP_CONF_REQ
, len
, req
);
3346 chan
->num_conf_req
++;
3347 if (result
!= L2CAP_CONF_SUCCESS
)
3353 l2cap_chan_set_err(chan
, ECONNRESET
);
3355 __set_chan_timer(chan
, L2CAP_DISC_REJ_TIMEOUT
);
3356 l2cap_send_disconn_req(conn
, chan
, ECONNRESET
);
3363 set_bit(CONF_INPUT_DONE
, &chan
->conf_state
);
3365 if (test_bit(CONF_OUTPUT_DONE
, &chan
->conf_state
)) {
3366 set_default_fcs(chan
);
3368 l2cap_state_change(chan
, BT_CONNECTED
);
3369 if (chan
->mode
== L2CAP_MODE_ERTM
||
3370 chan
->mode
== L2CAP_MODE_STREAMING
)
3371 err
= l2cap_ertm_init(chan
);
3374 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
3376 l2cap_chan_ready(chan
);
3380 l2cap_chan_unlock(chan
);
3384 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3386 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3387 struct l2cap_disconn_rsp rsp
;
3389 struct l2cap_chan
*chan
;
3392 scid
= __le16_to_cpu(req
->scid
);
3393 dcid
= __le16_to_cpu(req
->dcid
);
3395 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3397 mutex_lock(&conn
->chan_lock
);
3399 chan
= __l2cap_get_chan_by_scid(conn
, dcid
);
3401 mutex_unlock(&conn
->chan_lock
);
3405 l2cap_chan_lock(chan
);
3409 rsp
.dcid
= cpu_to_le16(chan
->scid
);
3410 rsp
.scid
= cpu_to_le16(chan
->dcid
);
3411 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3414 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3417 l2cap_chan_hold(chan
);
3418 l2cap_chan_del(chan
, ECONNRESET
);
3420 l2cap_chan_unlock(chan
);
3422 chan
->ops
->close(chan
->data
);
3423 l2cap_chan_put(chan
);
3425 mutex_unlock(&conn
->chan_lock
);
3430 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3432 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3434 struct l2cap_chan
*chan
;
3436 scid
= __le16_to_cpu(rsp
->scid
);
3437 dcid
= __le16_to_cpu(rsp
->dcid
);
3439 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3441 mutex_lock(&conn
->chan_lock
);
3443 chan
= __l2cap_get_chan_by_scid(conn
, scid
);
3445 mutex_unlock(&conn
->chan_lock
);
3449 l2cap_chan_lock(chan
);
3451 l2cap_chan_hold(chan
);
3452 l2cap_chan_del(chan
, 0);
3454 l2cap_chan_unlock(chan
);
3456 chan
->ops
->close(chan
->data
);
3457 l2cap_chan_put(chan
);
3459 mutex_unlock(&conn
->chan_lock
);
3464 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3466 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3469 type
= __le16_to_cpu(req
->type
);
3471 BT_DBG("type 0x%4.4x", type
);
3473 if (type
== L2CAP_IT_FEAT_MASK
) {
3475 u32 feat_mask
= l2cap_feat_mask
;
3476 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3477 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3478 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3480 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3483 feat_mask
|= L2CAP_FEAT_EXT_FLOW
3484 | L2CAP_FEAT_EXT_WINDOW
;
3486 put_unaligned_le32(feat_mask
, rsp
->data
);
3487 l2cap_send_cmd(conn
, cmd
->ident
,
3488 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3489 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3491 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3494 l2cap_fixed_chan
[0] |= L2CAP_FC_A2MP
;
3496 l2cap_fixed_chan
[0] &= ~L2CAP_FC_A2MP
;
3498 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3499 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3500 memcpy(rsp
->data
, l2cap_fixed_chan
, sizeof(l2cap_fixed_chan
));
3501 l2cap_send_cmd(conn
, cmd
->ident
,
3502 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3504 struct l2cap_info_rsp rsp
;
3505 rsp
.type
= cpu_to_le16(type
);
3506 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3507 l2cap_send_cmd(conn
, cmd
->ident
,
3508 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3514 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3516 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3519 type
= __le16_to_cpu(rsp
->type
);
3520 result
= __le16_to_cpu(rsp
->result
);
3522 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3524 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3525 if (cmd
->ident
!= conn
->info_ident
||
3526 conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
)
3529 cancel_delayed_work(&conn
->info_timer
);
3531 if (result
!= L2CAP_IR_SUCCESS
) {
3532 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3533 conn
->info_ident
= 0;
3535 l2cap_conn_start(conn
);
3541 case L2CAP_IT_FEAT_MASK
:
3542 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3544 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3545 struct l2cap_info_req req
;
3546 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3548 conn
->info_ident
= l2cap_get_ident(conn
);
3550 l2cap_send_cmd(conn
, conn
->info_ident
,
3551 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3553 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3554 conn
->info_ident
= 0;
3556 l2cap_conn_start(conn
);
3560 case L2CAP_IT_FIXED_CHAN
:
3561 conn
->fixed_chan_mask
= rsp
->data
[0];
3562 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3563 conn
->info_ident
= 0;
3565 l2cap_conn_start(conn
);
3572 static inline int l2cap_create_channel_req(struct l2cap_conn
*conn
,
3573 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
,
3576 struct l2cap_create_chan_req
*req
= data
;
3577 struct l2cap_create_chan_rsp rsp
;
3580 if (cmd_len
!= sizeof(*req
))
3586 psm
= le16_to_cpu(req
->psm
);
3587 scid
= le16_to_cpu(req
->scid
);
3589 BT_DBG("psm %d, scid %d, amp_id %d", psm
, scid
, req
->amp_id
);
3591 /* Placeholder: Always reject */
3593 rsp
.scid
= cpu_to_le16(scid
);
3594 rsp
.result
= __constant_cpu_to_le16(L2CAP_CR_NO_MEM
);
3595 rsp
.status
= __constant_cpu_to_le16(L2CAP_CS_NO_INFO
);
3597 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CREATE_CHAN_RSP
,
3603 static inline int l2cap_create_channel_rsp(struct l2cap_conn
*conn
,
3604 struct l2cap_cmd_hdr
*cmd
, void *data
)
3606 BT_DBG("conn %p", conn
);
3608 return l2cap_connect_rsp(conn
, cmd
, data
);
3611 static void l2cap_send_move_chan_rsp(struct l2cap_conn
*conn
, u8 ident
,
3612 u16 icid
, u16 result
)
3614 struct l2cap_move_chan_rsp rsp
;
3616 BT_DBG("icid %d, result %d", icid
, result
);
3618 rsp
.icid
= cpu_to_le16(icid
);
3619 rsp
.result
= cpu_to_le16(result
);
3621 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_RSP
, sizeof(rsp
), &rsp
);
3624 static void l2cap_send_move_chan_cfm(struct l2cap_conn
*conn
,
3625 struct l2cap_chan
*chan
, u16 icid
, u16 result
)
3627 struct l2cap_move_chan_cfm cfm
;
3630 BT_DBG("icid %d, result %d", icid
, result
);
3632 ident
= l2cap_get_ident(conn
);
3634 chan
->ident
= ident
;
3636 cfm
.icid
= cpu_to_le16(icid
);
3637 cfm
.result
= cpu_to_le16(result
);
3639 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM
, sizeof(cfm
), &cfm
);
3642 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn
*conn
, u8 ident
,
3645 struct l2cap_move_chan_cfm_rsp rsp
;
3647 BT_DBG("icid %d", icid
);
3649 rsp
.icid
= cpu_to_le16(icid
);
3650 l2cap_send_cmd(conn
, ident
, L2CAP_MOVE_CHAN_CFM_RSP
, sizeof(rsp
), &rsp
);
3653 static inline int l2cap_move_channel_req(struct l2cap_conn
*conn
,
3654 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3656 struct l2cap_move_chan_req
*req
= data
;
3658 u16 result
= L2CAP_MR_NOT_ALLOWED
;
3660 if (cmd_len
!= sizeof(*req
))
3663 icid
= le16_to_cpu(req
->icid
);
3665 BT_DBG("icid %d, dest_amp_id %d", icid
, req
->dest_amp_id
);
3670 /* Placeholder: Always refuse */
3671 l2cap_send_move_chan_rsp(conn
, cmd
->ident
, icid
, result
);
3676 static inline int l2cap_move_channel_rsp(struct l2cap_conn
*conn
,
3677 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3679 struct l2cap_move_chan_rsp
*rsp
= data
;
3682 if (cmd_len
!= sizeof(*rsp
))
3685 icid
= le16_to_cpu(rsp
->icid
);
3686 result
= le16_to_cpu(rsp
->result
);
3688 BT_DBG("icid %d, result %d", icid
, result
);
3690 /* Placeholder: Always unconfirmed */
3691 l2cap_send_move_chan_cfm(conn
, NULL
, icid
, L2CAP_MC_UNCONFIRMED
);
3696 static inline int l2cap_move_channel_confirm(struct l2cap_conn
*conn
,
3697 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3699 struct l2cap_move_chan_cfm
*cfm
= data
;
3702 if (cmd_len
!= sizeof(*cfm
))
3705 icid
= le16_to_cpu(cfm
->icid
);
3706 result
= le16_to_cpu(cfm
->result
);
3708 BT_DBG("icid %d, result %d", icid
, result
);
3710 l2cap_send_move_chan_cfm_rsp(conn
, cmd
->ident
, icid
);
3715 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn
*conn
,
3716 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, void *data
)
3718 struct l2cap_move_chan_cfm_rsp
*rsp
= data
;
3721 if (cmd_len
!= sizeof(*rsp
))
3724 icid
= le16_to_cpu(rsp
->icid
);
3726 BT_DBG("icid %d", icid
);
3731 static inline int l2cap_check_conn_param(u16 min
, u16 max
, u16 latency
,
3736 if (min
> max
|| min
< 6 || max
> 3200)
3739 if (to_multiplier
< 10 || to_multiplier
> 3200)
3742 if (max
>= to_multiplier
* 8)
3745 max_latency
= (to_multiplier
* 8 / max
) - 1;
3746 if (latency
> 499 || latency
> max_latency
)
3752 static inline int l2cap_conn_param_update_req(struct l2cap_conn
*conn
,
3753 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3755 struct hci_conn
*hcon
= conn
->hcon
;
3756 struct l2cap_conn_param_update_req
*req
;
3757 struct l2cap_conn_param_update_rsp rsp
;
3758 u16 min
, max
, latency
, to_multiplier
, cmd_len
;
3761 if (!(hcon
->link_mode
& HCI_LM_MASTER
))
3764 cmd_len
= __le16_to_cpu(cmd
->len
);
3765 if (cmd_len
!= sizeof(struct l2cap_conn_param_update_req
))
3768 req
= (struct l2cap_conn_param_update_req
*) data
;
3769 min
= __le16_to_cpu(req
->min
);
3770 max
= __le16_to_cpu(req
->max
);
3771 latency
= __le16_to_cpu(req
->latency
);
3772 to_multiplier
= __le16_to_cpu(req
->to_multiplier
);
3774 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3775 min
, max
, latency
, to_multiplier
);
3777 memset(&rsp
, 0, sizeof(rsp
));
3779 err
= l2cap_check_conn_param(min
, max
, latency
, to_multiplier
);
3781 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_REJECTED
);
3783 rsp
.result
= cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED
);
3785 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_PARAM_UPDATE_RSP
,
3789 hci_le_conn_update(hcon
, min
, max
, latency
, to_multiplier
);
3794 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn
*conn
,
3795 struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3799 switch (cmd
->code
) {
3800 case L2CAP_COMMAND_REJ
:
3801 l2cap_command_rej(conn
, cmd
, data
);
3804 case L2CAP_CONN_REQ
:
3805 err
= l2cap_connect_req(conn
, cmd
, data
);
3808 case L2CAP_CONN_RSP
:
3809 err
= l2cap_connect_rsp(conn
, cmd
, data
);
3812 case L2CAP_CONF_REQ
:
3813 err
= l2cap_config_req(conn
, cmd
, cmd_len
, data
);
3816 case L2CAP_CONF_RSP
:
3817 err
= l2cap_config_rsp(conn
, cmd
, data
);
3820 case L2CAP_DISCONN_REQ
:
3821 err
= l2cap_disconnect_req(conn
, cmd
, data
);
3824 case L2CAP_DISCONN_RSP
:
3825 err
= l2cap_disconnect_rsp(conn
, cmd
, data
);
3828 case L2CAP_ECHO_REQ
:
3829 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3832 case L2CAP_ECHO_RSP
:
3835 case L2CAP_INFO_REQ
:
3836 err
= l2cap_information_req(conn
, cmd
, data
);
3839 case L2CAP_INFO_RSP
:
3840 err
= l2cap_information_rsp(conn
, cmd
, data
);
3843 case L2CAP_CREATE_CHAN_REQ
:
3844 err
= l2cap_create_channel_req(conn
, cmd
, cmd_len
, data
);
3847 case L2CAP_CREATE_CHAN_RSP
:
3848 err
= l2cap_create_channel_rsp(conn
, cmd
, data
);
3851 case L2CAP_MOVE_CHAN_REQ
:
3852 err
= l2cap_move_channel_req(conn
, cmd
, cmd_len
, data
);
3855 case L2CAP_MOVE_CHAN_RSP
:
3856 err
= l2cap_move_channel_rsp(conn
, cmd
, cmd_len
, data
);
3859 case L2CAP_MOVE_CHAN_CFM
:
3860 err
= l2cap_move_channel_confirm(conn
, cmd
, cmd_len
, data
);
3863 case L2CAP_MOVE_CHAN_CFM_RSP
:
3864 err
= l2cap_move_channel_confirm_rsp(conn
, cmd
, cmd_len
, data
);
3868 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd
->code
);
3876 static inline int l2cap_le_sig_cmd(struct l2cap_conn
*conn
,
3877 struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3879 switch (cmd
->code
) {
3880 case L2CAP_COMMAND_REJ
:
3883 case L2CAP_CONN_PARAM_UPDATE_REQ
:
3884 return l2cap_conn_param_update_req(conn
, cmd
, data
);
3886 case L2CAP_CONN_PARAM_UPDATE_RSP
:
3890 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd
->code
);
3895 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
,
3896 struct sk_buff
*skb
)
3898 u8
*data
= skb
->data
;
3900 struct l2cap_cmd_hdr cmd
;
3903 l2cap_raw_recv(conn
, skb
);
3905 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3907 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3908 data
+= L2CAP_CMD_HDR_SIZE
;
3909 len
-= L2CAP_CMD_HDR_SIZE
;
3911 cmd_len
= le16_to_cpu(cmd
.len
);
3913 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3915 if (cmd_len
> len
|| !cmd
.ident
) {
3916 BT_DBG("corrupted command");
3920 if (conn
->hcon
->type
== LE_LINK
)
3921 err
= l2cap_le_sig_cmd(conn
, &cmd
, data
);
3923 err
= l2cap_bredr_sig_cmd(conn
, &cmd
, cmd_len
, data
);
3926 struct l2cap_cmd_rej_unk rej
;
3928 BT_ERR("Wrong link type (%d)", err
);
3930 /* FIXME: Map err to a valid reason */
3931 rej
.reason
= cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD
);
3932 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3942 static int l2cap_check_fcs(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
3944 u16 our_fcs
, rcv_fcs
;
3947 if (test_bit(FLAG_EXT_CTRL
, &chan
->flags
))
3948 hdr_size
= L2CAP_EXT_HDR_SIZE
;
3950 hdr_size
= L2CAP_ENH_HDR_SIZE
;
3952 if (chan
->fcs
== L2CAP_FCS_CRC16
) {
3953 skb_trim(skb
, skb
->len
- L2CAP_FCS_SIZE
);
3954 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3955 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3957 if (our_fcs
!= rcv_fcs
)
3963 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan
*chan
)
3967 chan
->frames_sent
= 0;
3969 control
|= __set_reqseq(chan
, chan
->buffer_seq
);
3971 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
3972 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RNR
);
3973 l2cap_send_sframe(chan
, control
);
3974 set_bit(CONN_RNR_SENT
, &chan
->conn_state
);
3977 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
))
3978 l2cap_retransmit_frames(chan
);
3980 l2cap_ertm_send(chan
);
3982 if (!test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
) &&
3983 chan
->frames_sent
== 0) {
3984 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
3985 l2cap_send_sframe(chan
, control
);
3989 static int l2cap_add_to_srej_queue(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u16 tx_seq
, u8 sar
)
3991 struct sk_buff
*next_skb
;
3992 int tx_seq_offset
, next_tx_seq_offset
;
3994 bt_cb(skb
)->control
.txseq
= tx_seq
;
3995 bt_cb(skb
)->control
.sar
= sar
;
3997 next_skb
= skb_peek(&chan
->srej_q
);
3999 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
4002 if (bt_cb(next_skb
)->control
.txseq
== tx_seq
)
4005 next_tx_seq_offset
= __seq_offset(chan
,
4006 bt_cb(next_skb
)->control
.txseq
, chan
->buffer_seq
);
4008 if (next_tx_seq_offset
> tx_seq_offset
) {
4009 __skb_queue_before(&chan
->srej_q
, next_skb
, skb
);
4013 if (skb_queue_is_last(&chan
->srej_q
, next_skb
))
4016 next_skb
= skb_queue_next(&chan
->srej_q
, next_skb
);
4019 __skb_queue_tail(&chan
->srej_q
, skb
);
4024 static void append_skb_frag(struct sk_buff
*skb
,
4025 struct sk_buff
*new_frag
, struct sk_buff
**last_frag
)
4027 /* skb->len reflects data in skb as well as all fragments
4028 * skb->data_len reflects only data in fragments
4030 if (!skb_has_frag_list(skb
))
4031 skb_shinfo(skb
)->frag_list
= new_frag
;
4033 new_frag
->next
= NULL
;
4035 (*last_frag
)->next
= new_frag
;
4036 *last_frag
= new_frag
;
4038 skb
->len
+= new_frag
->len
;
4039 skb
->data_len
+= new_frag
->len
;
4040 skb
->truesize
+= new_frag
->truesize
;
4043 static int l2cap_reassemble_sdu(struct l2cap_chan
*chan
, struct sk_buff
*skb
, u32 control
)
4047 switch (__get_ctrl_sar(chan
, control
)) {
4048 case L2CAP_SAR_UNSEGMENTED
:
4052 err
= chan
->ops
->recv(chan
->data
, skb
);
4055 case L2CAP_SAR_START
:
4059 chan
->sdu_len
= get_unaligned_le16(skb
->data
);
4060 skb_pull(skb
, L2CAP_SDULEN_SIZE
);
4062 if (chan
->sdu_len
> chan
->imtu
) {
4067 if (skb
->len
>= chan
->sdu_len
)
4071 chan
->sdu_last_frag
= skb
;
4077 case L2CAP_SAR_CONTINUE
:
4081 append_skb_frag(chan
->sdu
, skb
,
4082 &chan
->sdu_last_frag
);
4085 if (chan
->sdu
->len
>= chan
->sdu_len
)
4095 append_skb_frag(chan
->sdu
, skb
,
4096 &chan
->sdu_last_frag
);
4099 if (chan
->sdu
->len
!= chan
->sdu_len
)
4102 err
= chan
->ops
->recv(chan
->data
, chan
->sdu
);
4105 /* Reassembly complete */
4107 chan
->sdu_last_frag
= NULL
;
4115 kfree_skb(chan
->sdu
);
4117 chan
->sdu_last_frag
= NULL
;
4124 static void l2cap_ertm_enter_local_busy(struct l2cap_chan
*chan
)
4126 BT_DBG("chan %p, Enter local busy", chan
);
4128 set_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4129 l2cap_seq_list_clear(&chan
->srej_list
);
4131 __set_ack_timer(chan
);
4134 static void l2cap_ertm_exit_local_busy(struct l2cap_chan
*chan
)
4138 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4141 control
= __set_reqseq(chan
, chan
->buffer_seq
);
4142 control
|= __set_ctrl_poll(chan
);
4143 control
|= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4144 l2cap_send_sframe(chan
, control
);
4145 chan
->retry_count
= 1;
4147 __clear_retrans_timer(chan
);
4148 __set_monitor_timer(chan
);
4150 set_bit(CONN_WAIT_F
, &chan
->conn_state
);
4153 clear_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
);
4154 clear_bit(CONN_RNR_SENT
, &chan
->conn_state
);
4156 BT_DBG("chan %p, Exit local busy", chan
);
4159 void l2cap_chan_busy(struct l2cap_chan
*chan
, int busy
)
4161 if (chan
->mode
== L2CAP_MODE_ERTM
) {
4163 l2cap_ertm_enter_local_busy(chan
);
4165 l2cap_ertm_exit_local_busy(chan
);
4169 static void l2cap_check_srej_gap(struct l2cap_chan
*chan
, u16 tx_seq
)
4171 struct sk_buff
*skb
;
4174 while ((skb
= skb_peek(&chan
->srej_q
)) &&
4175 !test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4178 if (bt_cb(skb
)->control
.txseq
!= tx_seq
)
4181 skb
= skb_dequeue(&chan
->srej_q
);
4182 control
= __set_ctrl_sar(chan
, bt_cb(skb
)->control
.sar
);
4183 err
= l2cap_reassemble_sdu(chan
, skb
, control
);
4186 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4190 chan
->buffer_seq_srej
= __next_seq(chan
, chan
->buffer_seq_srej
);
4191 tx_seq
= __next_seq(chan
, tx_seq
);
4195 static void l2cap_resend_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4197 struct srej_list
*l
, *tmp
;
4200 list_for_each_entry_safe(l
, tmp
, &chan
->srej_l
, list
) {
4201 if (l
->tx_seq
== tx_seq
) {
4206 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4207 control
|= __set_reqseq(chan
, l
->tx_seq
);
4208 l2cap_send_sframe(chan
, control
);
4210 list_add_tail(&l
->list
, &chan
->srej_l
);
4214 static int l2cap_send_srejframe(struct l2cap_chan
*chan
, u16 tx_seq
)
4216 struct srej_list
*new;
4219 while (tx_seq
!= chan
->expected_tx_seq
) {
4220 control
= __set_ctrl_super(chan
, L2CAP_SUPER_SREJ
);
4221 control
|= __set_reqseq(chan
, chan
->expected_tx_seq
);
4222 l2cap_seq_list_append(&chan
->srej_list
, chan
->expected_tx_seq
);
4223 l2cap_send_sframe(chan
, control
);
4225 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
4229 new->tx_seq
= chan
->expected_tx_seq
;
4231 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4233 list_add_tail(&new->list
, &chan
->srej_l
);
4236 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4241 static inline int l2cap_data_channel_iframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4243 u16 tx_seq
= __get_txseq(chan
, rx_control
);
4244 u16 req_seq
= __get_reqseq(chan
, rx_control
);
4245 u8 sar
= __get_ctrl_sar(chan
, rx_control
);
4246 int tx_seq_offset
, expected_tx_seq_offset
;
4247 int num_to_ack
= (chan
->tx_win
/6) + 1;
4250 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan
, skb
->len
,
4251 tx_seq
, rx_control
);
4253 if (__is_ctrl_final(chan
, rx_control
) &&
4254 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4255 __clear_monitor_timer(chan
);
4256 if (chan
->unacked_frames
> 0)
4257 __set_retrans_timer(chan
);
4258 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4261 chan
->expected_ack_seq
= req_seq
;
4262 l2cap_drop_acked_frames(chan
);
4264 tx_seq_offset
= __seq_offset(chan
, tx_seq
, chan
->buffer_seq
);
4266 /* invalid tx_seq */
4267 if (tx_seq_offset
>= chan
->tx_win
) {
4268 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4272 if (test_bit(CONN_LOCAL_BUSY
, &chan
->conn_state
)) {
4273 if (!test_bit(CONN_RNR_SENT
, &chan
->conn_state
))
4274 l2cap_send_ack(chan
);
4278 if (tx_seq
== chan
->expected_tx_seq
)
4281 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4282 struct srej_list
*first
;
4284 first
= list_first_entry(&chan
->srej_l
,
4285 struct srej_list
, list
);
4286 if (tx_seq
== first
->tx_seq
) {
4287 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4288 l2cap_check_srej_gap(chan
, tx_seq
);
4290 list_del(&first
->list
);
4293 if (list_empty(&chan
->srej_l
)) {
4294 chan
->buffer_seq
= chan
->buffer_seq_srej
;
4295 clear_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4296 l2cap_send_ack(chan
);
4297 BT_DBG("chan %p, Exit SREJ_SENT", chan
);
4300 struct srej_list
*l
;
4302 /* duplicated tx_seq */
4303 if (l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
) < 0)
4306 list_for_each_entry(l
, &chan
->srej_l
, list
) {
4307 if (l
->tx_seq
== tx_seq
) {
4308 l2cap_resend_srejframe(chan
, tx_seq
);
4313 err
= l2cap_send_srejframe(chan
, tx_seq
);
4315 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4320 expected_tx_seq_offset
= __seq_offset(chan
,
4321 chan
->expected_tx_seq
, chan
->buffer_seq
);
4323 /* duplicated tx_seq */
4324 if (tx_seq_offset
< expected_tx_seq_offset
)
4327 set_bit(CONN_SREJ_SENT
, &chan
->conn_state
);
4329 BT_DBG("chan %p, Enter SREJ", chan
);
4331 INIT_LIST_HEAD(&chan
->srej_l
);
4332 chan
->buffer_seq_srej
= chan
->buffer_seq
;
4334 __skb_queue_head_init(&chan
->srej_q
);
4335 l2cap_add_to_srej_queue(chan
, skb
, tx_seq
, sar
);
4337 /* Set P-bit only if there are some I-frames to ack. */
4338 if (__clear_ack_timer(chan
))
4339 set_bit(CONN_SEND_PBIT
, &chan
->conn_state
);
4341 err
= l2cap_send_srejframe(chan
, tx_seq
);
4343 l2cap_send_disconn_req(chan
->conn
, chan
, -err
);
4350 chan
->expected_tx_seq
= __next_seq(chan
, chan
->expected_tx_seq
);
4352 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4353 bt_cb(skb
)->control
.txseq
= tx_seq
;
4354 bt_cb(skb
)->control
.sar
= sar
;
4355 __skb_queue_tail(&chan
->srej_q
, skb
);
4359 err
= l2cap_reassemble_sdu(chan
, skb
, rx_control
);
4360 chan
->buffer_seq
= __next_seq(chan
, chan
->buffer_seq
);
4363 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4367 if (__is_ctrl_final(chan
, rx_control
)) {
4368 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4369 l2cap_retransmit_frames(chan
);
4373 chan
->num_acked
= (chan
->num_acked
+ 1) % num_to_ack
;
4374 if (chan
->num_acked
== num_to_ack
- 1)
4375 l2cap_send_ack(chan
);
4377 __set_ack_timer(chan
);
4386 static inline void l2cap_data_channel_rrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4388 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
,
4389 __get_reqseq(chan
, rx_control
), rx_control
);
4391 chan
->expected_ack_seq
= __get_reqseq(chan
, rx_control
);
4392 l2cap_drop_acked_frames(chan
);
4394 if (__is_ctrl_poll(chan
, rx_control
)) {
4395 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4396 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4397 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4398 (chan
->unacked_frames
> 0))
4399 __set_retrans_timer(chan
);
4401 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4402 l2cap_send_srejtail(chan
);
4404 l2cap_send_i_or_rr_or_rnr(chan
);
4407 } else if (__is_ctrl_final(chan
, rx_control
)) {
4408 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4410 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4411 l2cap_retransmit_frames(chan
);
4414 if (test_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
) &&
4415 (chan
->unacked_frames
> 0))
4416 __set_retrans_timer(chan
);
4418 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4419 if (test_bit(CONN_SREJ_SENT
, &chan
->conn_state
))
4420 l2cap_send_ack(chan
);
4422 l2cap_ertm_send(chan
);
4426 static inline void l2cap_data_channel_rejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4428 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4430 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4432 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4434 chan
->expected_ack_seq
= tx_seq
;
4435 l2cap_drop_acked_frames(chan
);
4437 if (__is_ctrl_final(chan
, rx_control
)) {
4438 if (!test_and_clear_bit(CONN_REJ_ACT
, &chan
->conn_state
))
4439 l2cap_retransmit_frames(chan
);
4441 l2cap_retransmit_frames(chan
);
4443 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
))
4444 set_bit(CONN_REJ_ACT
, &chan
->conn_state
);
4447 static inline void l2cap_data_channel_srejframe(struct l2cap_chan
*chan
, u32 rx_control
)
4449 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4451 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4453 clear_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4455 if (__is_ctrl_poll(chan
, rx_control
)) {
4456 chan
->expected_ack_seq
= tx_seq
;
4457 l2cap_drop_acked_frames(chan
);
4459 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4460 l2cap_retransmit_one_frame(chan
, tx_seq
);
4462 l2cap_ertm_send(chan
);
4464 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4465 chan
->srej_save_reqseq
= tx_seq
;
4466 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4468 } else if (__is_ctrl_final(chan
, rx_control
)) {
4469 if (test_bit(CONN_SREJ_ACT
, &chan
->conn_state
) &&
4470 chan
->srej_save_reqseq
== tx_seq
)
4471 clear_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4473 l2cap_retransmit_one_frame(chan
, tx_seq
);
4475 l2cap_retransmit_one_frame(chan
, tx_seq
);
4476 if (test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4477 chan
->srej_save_reqseq
= tx_seq
;
4478 set_bit(CONN_SREJ_ACT
, &chan
->conn_state
);
4483 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan
*chan
, u32 rx_control
)
4485 u16 tx_seq
= __get_reqseq(chan
, rx_control
);
4487 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan
, tx_seq
, rx_control
);
4489 set_bit(CONN_REMOTE_BUSY
, &chan
->conn_state
);
4490 chan
->expected_ack_seq
= tx_seq
;
4491 l2cap_drop_acked_frames(chan
);
4493 if (__is_ctrl_poll(chan
, rx_control
))
4494 set_bit(CONN_SEND_FBIT
, &chan
->conn_state
);
4496 if (!test_bit(CONN_SREJ_SENT
, &chan
->conn_state
)) {
4497 __clear_retrans_timer(chan
);
4498 if (__is_ctrl_poll(chan
, rx_control
))
4499 l2cap_send_rr_or_rnr(chan
, L2CAP_CTRL_FINAL
);
4503 if (__is_ctrl_poll(chan
, rx_control
)) {
4504 l2cap_send_srejtail(chan
);
4506 rx_control
= __set_ctrl_super(chan
, L2CAP_SUPER_RR
);
4507 l2cap_send_sframe(chan
, rx_control
);
4511 static inline int l2cap_data_channel_sframe(struct l2cap_chan
*chan
, u32 rx_control
, struct sk_buff
*skb
)
4513 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan
, rx_control
, skb
->len
);
4515 if (__is_ctrl_final(chan
, rx_control
) &&
4516 test_bit(CONN_WAIT_F
, &chan
->conn_state
)) {
4517 __clear_monitor_timer(chan
);
4518 if (chan
->unacked_frames
> 0)
4519 __set_retrans_timer(chan
);
4520 clear_bit(CONN_WAIT_F
, &chan
->conn_state
);
4523 switch (__get_ctrl_super(chan
, rx_control
)) {
4524 case L2CAP_SUPER_RR
:
4525 l2cap_data_channel_rrframe(chan
, rx_control
);
4528 case L2CAP_SUPER_REJ
:
4529 l2cap_data_channel_rejframe(chan
, rx_control
);
4532 case L2CAP_SUPER_SREJ
:
4533 l2cap_data_channel_srejframe(chan
, rx_control
);
4536 case L2CAP_SUPER_RNR
:
4537 l2cap_data_channel_rnrframe(chan
, rx_control
);
4545 static int l2cap_ertm_data_rcv(struct l2cap_chan
*chan
, struct sk_buff
*skb
)
4549 int len
, next_tx_seq_offset
, req_seq_offset
;
4551 __unpack_control(chan
, skb
);
4553 control
= __get_control(chan
, skb
->data
);
4554 skb_pull(skb
, __ctrl_size(chan
));
4558 * We can just drop the corrupted I-frame here.
4559 * Receiver will miss it and start proper recovery
4560 * procedures and ask retransmission.
4562 if (l2cap_check_fcs(chan
, skb
))
4565 if (__is_sar_start(chan
, control
) && !__is_sframe(chan
, control
))
4566 len
-= L2CAP_SDULEN_SIZE
;
4568 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4569 len
-= L2CAP_FCS_SIZE
;
4571 if (len
> chan
->mps
) {
4572 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4576 req_seq
= __get_reqseq(chan
, control
);
4578 req_seq_offset
= __seq_offset(chan
, req_seq
, chan
->expected_ack_seq
);
4580 next_tx_seq_offset
= __seq_offset(chan
, chan
->next_tx_seq
,
4581 chan
->expected_ack_seq
);
4583 /* check for invalid req-seq */
4584 if (req_seq_offset
> next_tx_seq_offset
) {
4585 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4589 if (!__is_sframe(chan
, control
)) {
4591 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4595 l2cap_data_channel_iframe(chan
, control
, skb
);
4599 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4603 l2cap_data_channel_sframe(chan
, control
, skb
);
4613 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4615 struct l2cap_chan
*chan
;
4620 chan
= l2cap_get_chan_by_scid(conn
, cid
);
4622 BT_DBG("unknown cid 0x%4.4x", cid
);
4623 /* Drop packet and return */
4628 l2cap_chan_lock(chan
);
4630 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4632 if (chan
->state
!= BT_CONNECTED
)
4635 switch (chan
->mode
) {
4636 case L2CAP_MODE_BASIC
:
4637 /* If socket recv buffers overflows we drop data here
4638 * which is *bad* because L2CAP has to be reliable.
4639 * But we don't have any other choice. L2CAP doesn't
4640 * provide flow control mechanism. */
4642 if (chan
->imtu
< skb
->len
)
4645 if (!chan
->ops
->recv(chan
->data
, skb
))
4649 case L2CAP_MODE_ERTM
:
4650 l2cap_ertm_data_rcv(chan
, skb
);
4654 case L2CAP_MODE_STREAMING
:
4655 control
= __get_control(chan
, skb
->data
);
4656 skb_pull(skb
, __ctrl_size(chan
));
4659 if (l2cap_check_fcs(chan
, skb
))
4662 if (__is_sar_start(chan
, control
))
4663 len
-= L2CAP_SDULEN_SIZE
;
4665 if (chan
->fcs
== L2CAP_FCS_CRC16
)
4666 len
-= L2CAP_FCS_SIZE
;
4668 if (len
> chan
->mps
|| len
< 0 || __is_sframe(chan
, control
))
4671 tx_seq
= __get_txseq(chan
, control
);
4673 if (chan
->expected_tx_seq
!= tx_seq
) {
4674 /* Frame(s) missing - must discard partial SDU */
4675 kfree_skb(chan
->sdu
);
4677 chan
->sdu_last_frag
= NULL
;
4680 /* TODO: Notify userland of missing data */
4683 chan
->expected_tx_seq
= __next_seq(chan
, tx_seq
);
4685 if (l2cap_reassemble_sdu(chan
, skb
, control
) == -EMSGSIZE
)
4686 l2cap_send_disconn_req(chan
->conn
, chan
, ECONNRESET
);
4691 BT_DBG("chan %p: bad mode 0x%2.2x", chan
, chan
->mode
);
4699 l2cap_chan_unlock(chan
);
4704 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4706 struct l2cap_chan
*chan
;
4708 chan
= l2cap_global_chan_by_psm(0, psm
, conn
->src
, conn
->dst
);
4712 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4714 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4717 if (chan
->imtu
< skb
->len
)
4720 if (!chan
->ops
->recv(chan
->data
, skb
))
4729 static inline int l2cap_att_channel(struct l2cap_conn
*conn
, u16 cid
,
4730 struct sk_buff
*skb
)
4732 struct l2cap_chan
*chan
;
4734 chan
= l2cap_global_chan_by_scid(0, cid
, conn
->src
, conn
->dst
);
4738 BT_DBG("chan %p, len %d", chan
, skb
->len
);
4740 if (chan
->state
!= BT_BOUND
&& chan
->state
!= BT_CONNECTED
)
4743 if (chan
->imtu
< skb
->len
)
4746 if (!chan
->ops
->recv(chan
->data
, skb
))
4755 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4757 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4761 skb_pull(skb
, L2CAP_HDR_SIZE
);
4762 cid
= __le16_to_cpu(lh
->cid
);
4763 len
= __le16_to_cpu(lh
->len
);
4765 if (len
!= skb
->len
) {
4770 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4773 case L2CAP_CID_LE_SIGNALING
:
4774 case L2CAP_CID_SIGNALING
:
4775 l2cap_sig_channel(conn
, skb
);
4778 case L2CAP_CID_CONN_LESS
:
4779 psm
= get_unaligned((__le16
*) skb
->data
);
4781 l2cap_conless_channel(conn
, psm
, skb
);
4784 case L2CAP_CID_LE_DATA
:
4785 l2cap_att_channel(conn
, cid
, skb
);
4789 if (smp_sig_channel(conn
, skb
))
4790 l2cap_conn_del(conn
->hcon
, EACCES
);
4794 l2cap_data_channel(conn
, cid
, skb
);
4799 /* ---- L2CAP interface with lower layer (HCI) ---- */
4801 int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
)
4803 int exact
= 0, lm1
= 0, lm2
= 0;
4804 struct l2cap_chan
*c
;
4806 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4808 /* Find listening sockets and check their link_mode */
4809 read_lock(&chan_list_lock
);
4810 list_for_each_entry(c
, &chan_list
, global_l
) {
4811 struct sock
*sk
= c
->sk
;
4813 if (c
->state
!= BT_LISTEN
)
4816 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4817 lm1
|= HCI_LM_ACCEPT
;
4818 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4819 lm1
|= HCI_LM_MASTER
;
4821 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4822 lm2
|= HCI_LM_ACCEPT
;
4823 if (test_bit(FLAG_ROLE_SWITCH
, &c
->flags
))
4824 lm2
|= HCI_LM_MASTER
;
4827 read_unlock(&chan_list_lock
);
4829 return exact
? lm1
: lm2
;
4832 int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4834 struct l2cap_conn
*conn
;
4836 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4839 conn
= l2cap_conn_add(hcon
, status
);
4841 l2cap_conn_ready(conn
);
4843 l2cap_conn_del(hcon
, bt_to_errno(status
));
4848 int l2cap_disconn_ind(struct hci_conn
*hcon
)
4850 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4852 BT_DBG("hcon %p", hcon
);
4855 return HCI_ERROR_REMOTE_USER_TERM
;
4856 return conn
->disc_reason
;
4859 int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4861 BT_DBG("hcon %p reason %d", hcon
, reason
);
4863 l2cap_conn_del(hcon
, bt_to_errno(reason
));
4867 static inline void l2cap_check_encryption(struct l2cap_chan
*chan
, u8 encrypt
)
4869 if (chan
->chan_type
!= L2CAP_CHAN_CONN_ORIENTED
)
4872 if (encrypt
== 0x00) {
4873 if (chan
->sec_level
== BT_SECURITY_MEDIUM
) {
4874 __set_chan_timer(chan
, L2CAP_ENC_TIMEOUT
);
4875 } else if (chan
->sec_level
== BT_SECURITY_HIGH
)
4876 l2cap_chan_close(chan
, ECONNREFUSED
);
4878 if (chan
->sec_level
== BT_SECURITY_MEDIUM
)
4879 __clear_chan_timer(chan
);
4883 int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4885 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4886 struct l2cap_chan
*chan
;
4891 BT_DBG("conn %p", conn
);
4893 if (hcon
->type
== LE_LINK
) {
4894 if (!status
&& encrypt
)
4895 smp_distribute_keys(conn
, 0);
4896 cancel_delayed_work(&conn
->security_timer
);
4899 mutex_lock(&conn
->chan_lock
);
4901 list_for_each_entry(chan
, &conn
->chan_l
, list
) {
4902 l2cap_chan_lock(chan
);
4904 BT_DBG("chan->scid %d", chan
->scid
);
4906 if (chan
->scid
== L2CAP_CID_LE_DATA
) {
4907 if (!status
&& encrypt
) {
4908 chan
->sec_level
= hcon
->sec_level
;
4909 l2cap_chan_ready(chan
);
4912 l2cap_chan_unlock(chan
);
4916 if (test_bit(CONF_CONNECT_PEND
, &chan
->conf_state
)) {
4917 l2cap_chan_unlock(chan
);
4921 if (!status
&& (chan
->state
== BT_CONNECTED
||
4922 chan
->state
== BT_CONFIG
)) {
4923 struct sock
*sk
= chan
->sk
;
4925 bt_sk(sk
)->suspended
= false;
4926 sk
->sk_state_change(sk
);
4928 l2cap_check_encryption(chan
, encrypt
);
4929 l2cap_chan_unlock(chan
);
4933 if (chan
->state
== BT_CONNECT
) {
4935 l2cap_send_conn_req(chan
);
4937 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4939 } else if (chan
->state
== BT_CONNECT2
) {
4940 struct sock
*sk
= chan
->sk
;
4941 struct l2cap_conn_rsp rsp
;
4947 if (bt_sk(sk
)->defer_setup
) {
4948 struct sock
*parent
= bt_sk(sk
)->parent
;
4949 res
= L2CAP_CR_PEND
;
4950 stat
= L2CAP_CS_AUTHOR_PEND
;
4952 parent
->sk_data_ready(parent
, 0);
4954 __l2cap_state_change(chan
, BT_CONFIG
);
4955 res
= L2CAP_CR_SUCCESS
;
4956 stat
= L2CAP_CS_NO_INFO
;
4959 __l2cap_state_change(chan
, BT_DISCONN
);
4960 __set_chan_timer(chan
, L2CAP_DISC_TIMEOUT
);
4961 res
= L2CAP_CR_SEC_BLOCK
;
4962 stat
= L2CAP_CS_NO_INFO
;
4967 rsp
.scid
= cpu_to_le16(chan
->dcid
);
4968 rsp
.dcid
= cpu_to_le16(chan
->scid
);
4969 rsp
.result
= cpu_to_le16(res
);
4970 rsp
.status
= cpu_to_le16(stat
);
4971 l2cap_send_cmd(conn
, chan
->ident
, L2CAP_CONN_RSP
,
4975 l2cap_chan_unlock(chan
);
4978 mutex_unlock(&conn
->chan_lock
);
4983 int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4985 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4988 conn
= l2cap_conn_add(hcon
, 0);
4993 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4995 if (!(flags
& ACL_CONT
)) {
4996 struct l2cap_hdr
*hdr
;
4997 struct l2cap_chan
*chan
;
5002 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
5003 kfree_skb(conn
->rx_skb
);
5004 conn
->rx_skb
= NULL
;
5006 l2cap_conn_unreliable(conn
, ECOMM
);
5009 /* Start fragment always begin with Basic L2CAP header */
5010 if (skb
->len
< L2CAP_HDR_SIZE
) {
5011 BT_ERR("Frame is too short (len %d)", skb
->len
);
5012 l2cap_conn_unreliable(conn
, ECOMM
);
5016 hdr
= (struct l2cap_hdr
*) skb
->data
;
5017 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
5018 cid
= __le16_to_cpu(hdr
->cid
);
5020 if (len
== skb
->len
) {
5021 /* Complete frame received */
5022 l2cap_recv_frame(conn
, skb
);
5026 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
5028 if (skb
->len
> len
) {
5029 BT_ERR("Frame is too long (len %d, expected len %d)",
5031 l2cap_conn_unreliable(conn
, ECOMM
);
5035 chan
= l2cap_get_chan_by_scid(conn
, cid
);
5037 if (chan
&& chan
->sk
) {
5038 struct sock
*sk
= chan
->sk
;
5041 if (chan
->imtu
< len
- L2CAP_HDR_SIZE
) {
5042 BT_ERR("Frame exceeding recv MTU (len %d, "
5046 l2cap_conn_unreliable(conn
, ECOMM
);
5052 /* Allocate skb for the complete frame (with header) */
5053 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
5057 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5059 conn
->rx_len
= len
- skb
->len
;
5061 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
5063 if (!conn
->rx_len
) {
5064 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
5065 l2cap_conn_unreliable(conn
, ECOMM
);
5069 if (skb
->len
> conn
->rx_len
) {
5070 BT_ERR("Fragment is too long (len %d, expected %d)",
5071 skb
->len
, conn
->rx_len
);
5072 kfree_skb(conn
->rx_skb
);
5073 conn
->rx_skb
= NULL
;
5075 l2cap_conn_unreliable(conn
, ECOMM
);
5079 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
5081 conn
->rx_len
-= skb
->len
;
5083 if (!conn
->rx_len
) {
5084 /* Complete frame received */
5085 l2cap_recv_frame(conn
, conn
->rx_skb
);
5086 conn
->rx_skb
= NULL
;
5095 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
5097 struct l2cap_chan
*c
;
5099 read_lock(&chan_list_lock
);
5101 list_for_each_entry(c
, &chan_list
, global_l
) {
5102 struct sock
*sk
= c
->sk
;
5104 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
5105 batostr(&bt_sk(sk
)->src
),
5106 batostr(&bt_sk(sk
)->dst
),
5107 c
->state
, __le16_to_cpu(c
->psm
),
5108 c
->scid
, c
->dcid
, c
->imtu
, c
->omtu
,
5109 c
->sec_level
, c
->mode
);
5112 read_unlock(&chan_list_lock
);
5117 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
5119 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
5122 static const struct file_operations l2cap_debugfs_fops
= {
5123 .open
= l2cap_debugfs_open
,
5125 .llseek
= seq_lseek
,
5126 .release
= single_release
,
5129 static struct dentry
*l2cap_debugfs
;
5131 int __init
l2cap_init(void)
5135 err
= l2cap_init_sockets();
5140 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
5141 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
5143 BT_ERR("Failed to create L2CAP debug file");
5149 void l2cap_exit(void)
5151 debugfs_remove(l2cap_debugfs
);
5152 l2cap_cleanup_sockets();
5155 module_param(disable_ertm
, bool, 0644);
5156 MODULE_PARM_DESC(disable_ertm
, "Disable enhanced retransmission mode");