2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm
= 0;
60 static u32 l2cap_feat_mask
= L2CAP_FEAT_FIXED_CHAN
;
61 static u8 l2cap_fixed_chan
[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops
;
65 static struct workqueue_struct
*_busy_wq
;
67 static struct bt_sock_list l2cap_sk_list
= {
68 .lock
= __RW_LOCK_UNLOCKED(l2cap_sk_list
.lock
)
71 static void l2cap_busy_work(struct work_struct
*work
);
73 static void __l2cap_sock_close(struct sock
*sk
, int reason
);
74 static void l2cap_sock_close(struct sock
*sk
);
75 static void l2cap_sock_kill(struct sock
*sk
);
77 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
78 u8 code
, u8 ident
, u16 dlen
, void *data
);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg
)
83 struct sock
*sk
= (struct sock
*) arg
;
86 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
90 if (sk
->sk_state
== BT_CONNECTED
|| sk
->sk_state
== BT_CONFIG
)
91 reason
= ECONNREFUSED
;
92 else if (sk
->sk_state
== BT_CONNECT
&&
93 l2cap_pi(sk
)->sec_level
!= BT_SECURITY_SDP
)
94 reason
= ECONNREFUSED
;
98 __l2cap_sock_close(sk
, reason
);
106 static void l2cap_sock_set_timer(struct sock
*sk
, long timeout
)
108 BT_DBG("sk %p state %d timeout %ld", sk
, sk
->sk_state
, timeout
);
109 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ timeout
);
112 static void l2cap_sock_clear_timer(struct sock
*sk
)
114 BT_DBG("sock %p state %d", sk
, sk
->sk_state
);
115 sk_stop_timer(sk
, &sk
->sk_timer
);
118 /* ---- L2CAP channels ---- */
119 static struct sock
*__l2cap_get_chan_by_dcid(struct l2cap_chan_list
*l
, u16 cid
)
122 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
123 if (l2cap_pi(s
)->dcid
== cid
)
129 static struct sock
*__l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
132 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
133 if (l2cap_pi(s
)->scid
== cid
)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock
*l2cap_get_chan_by_scid(struct l2cap_chan_list
*l
, u16 cid
)
145 s
= __l2cap_get_chan_by_scid(l
, cid
);
148 read_unlock(&l
->lock
);
152 static struct sock
*__l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
155 for (s
= l
->head
; s
; s
= l2cap_pi(s
)->next_c
) {
156 if (l2cap_pi(s
)->ident
== ident
)
162 static inline struct sock
*l2cap_get_chan_by_ident(struct l2cap_chan_list
*l
, u8 ident
)
166 s
= __l2cap_get_chan_by_ident(l
, ident
);
169 read_unlock(&l
->lock
);
173 static u16
l2cap_alloc_cid(struct l2cap_chan_list
*l
)
175 u16 cid
= L2CAP_CID_DYN_START
;
177 for (; cid
< L2CAP_CID_DYN_END
; cid
++) {
178 if (!__l2cap_get_chan_by_scid(l
, cid
))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list
*l
, struct sock
*sk
)
190 l2cap_pi(l
->head
)->prev_c
= sk
;
192 l2cap_pi(sk
)->next_c
= l
->head
;
193 l2cap_pi(sk
)->prev_c
= NULL
;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list
*l
, struct sock
*sk
)
199 struct sock
*next
= l2cap_pi(sk
)->next_c
, *prev
= l2cap_pi(sk
)->prev_c
;
201 write_lock_bh(&l
->lock
);
206 l2cap_pi(next
)->prev_c
= prev
;
208 l2cap_pi(prev
)->next_c
= next
;
209 write_unlock_bh(&l
->lock
);
214 static void __l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
216 struct l2cap_chan_list
*l
= &conn
->chan_list
;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn
,
219 l2cap_pi(sk
)->psm
, l2cap_pi(sk
)->dcid
);
221 conn
->disc_reason
= 0x13;
223 l2cap_pi(sk
)->conn
= conn
;
225 if (sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk
)->scid
= l2cap_alloc_cid(l
);
228 } else if (sk
->sk_type
== SOCK_DGRAM
) {
229 /* Connectionless socket */
230 l2cap_pi(sk
)->scid
= L2CAP_CID_CONN_LESS
;
231 l2cap_pi(sk
)->dcid
= L2CAP_CID_CONN_LESS
;
232 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk
)->scid
= L2CAP_CID_SIGNALING
;
236 l2cap_pi(sk
)->dcid
= L2CAP_CID_SIGNALING
;
237 l2cap_pi(sk
)->omtu
= L2CAP_DEFAULT_MTU
;
240 __l2cap_chan_link(l
, sk
);
243 bt_accept_enqueue(parent
, sk
);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock
*sk
, int err
)
250 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
251 struct sock
*parent
= bt_sk(sk
)->parent
;
253 l2cap_sock_clear_timer(sk
);
255 BT_DBG("sk %p, conn %p, err %d", sk
, conn
, err
);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn
->chan_list
, sk
);
260 l2cap_pi(sk
)->conn
= NULL
;
261 hci_conn_put(conn
->hcon
);
264 sk
->sk_state
= BT_CLOSED
;
265 sock_set_flag(sk
, SOCK_ZAPPED
);
271 bt_accept_unlink(sk
);
272 parent
->sk_data_ready(parent
, 0);
274 sk
->sk_state_change(sk
);
276 skb_queue_purge(TX_QUEUE(sk
));
278 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
279 struct srej_list
*l
, *tmp
;
281 del_timer(&l2cap_pi(sk
)->retrans_timer
);
282 del_timer(&l2cap_pi(sk
)->monitor_timer
);
283 del_timer(&l2cap_pi(sk
)->ack_timer
);
285 skb_queue_purge(SREJ_QUEUE(sk
));
286 skb_queue_purge(BUSY_QUEUE(sk
));
288 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock
*sk
)
298 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
301 if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
303 auth_type
= HCI_AT_NO_BONDING_MITM
;
305 auth_type
= HCI_AT_NO_BONDING
;
307 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
308 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
310 switch (l2cap_pi(sk
)->sec_level
) {
311 case BT_SECURITY_HIGH
:
312 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
314 case BT_SECURITY_MEDIUM
:
315 auth_type
= HCI_AT_GENERAL_BONDING
;
318 auth_type
= HCI_AT_NO_BONDING
;
323 return hci_conn_security(conn
->hcon
, l2cap_pi(sk
)->sec_level
,
327 static inline u8
l2cap_get_ident(struct l2cap_conn
*conn
)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn
->lock
);
339 if (++conn
->tx_ident
> 128)
344 spin_unlock_bh(&conn
->lock
);
349 static inline void l2cap_send_cmd(struct l2cap_conn
*conn
, u8 ident
, u8 code
, u16 len
, void *data
)
351 struct sk_buff
*skb
= l2cap_build_cmd(conn
, code
, ident
, len
, data
);
353 BT_DBG("code 0x%2.2x", code
);
358 hci_send_acl(conn
->hcon
, skb
, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo
*pi
, u16 control
)
364 struct l2cap_hdr
*lh
;
365 struct l2cap_conn
*conn
= pi
->conn
;
366 struct sock
*sk
= (struct sock
*)pi
;
367 int count
, hlen
= L2CAP_HDR_SIZE
+ 2;
369 if (sk
->sk_state
!= BT_CONNECTED
)
372 if (pi
->fcs
== L2CAP_FCS_CRC16
)
375 BT_DBG("pi %p, control 0x%2.2x", pi
, control
);
377 count
= min_t(unsigned int, conn
->mtu
, hlen
);
378 control
|= L2CAP_CTRL_FRAME_TYPE
;
380 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
381 control
|= L2CAP_CTRL_FINAL
;
382 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
385 if (pi
->conn_state
& L2CAP_CONN_SEND_PBIT
) {
386 control
|= L2CAP_CTRL_POLL
;
387 pi
->conn_state
&= ~L2CAP_CONN_SEND_PBIT
;
390 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
394 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
395 lh
->len
= cpu_to_le16(hlen
- L2CAP_HDR_SIZE
);
396 lh
->cid
= cpu_to_le16(pi
->dcid
);
397 put_unaligned_le16(control
, skb_put(skb
, 2));
399 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
400 u16 fcs
= crc16(0, (u8
*)lh
, count
- 2);
401 put_unaligned_le16(fcs
, skb_put(skb
, 2));
404 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo
*pi
, u16 control
)
409 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
410 control
|= L2CAP_SUPER_RCV_NOT_READY
;
411 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
413 control
|= L2CAP_SUPER_RCV_READY
;
415 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
417 l2cap_send_sframe(pi
, control
);
420 static inline int __l2cap_no_conn_pending(struct sock
*sk
)
422 return !(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
);
425 static void l2cap_do_start(struct sock
*sk
)
427 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
429 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) {
430 if (!(conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
))
433 if (l2cap_check_security(sk
) && __l2cap_no_conn_pending(sk
)) {
434 struct l2cap_conn_req req
;
435 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
436 req
.psm
= l2cap_pi(sk
)->psm
;
438 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
439 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
441 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
442 L2CAP_CONN_REQ
, sizeof(req
), &req
);
445 struct l2cap_info_req req
;
446 req
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
448 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
449 conn
->info_ident
= l2cap_get_ident(conn
);
451 mod_timer(&conn
->info_timer
, jiffies
+
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
454 l2cap_send_cmd(conn
, conn
->info_ident
,
455 L2CAP_INFO_REQ
, sizeof(req
), &req
);
459 static inline int l2cap_mode_supported(__u8 mode
, __u32 feat_mask
)
461 u32 local_feat_mask
= l2cap_feat_mask
;
463 local_feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
;
466 case L2CAP_MODE_ERTM
:
467 return L2CAP_FEAT_ERTM
& feat_mask
& local_feat_mask
;
468 case L2CAP_MODE_STREAMING
:
469 return L2CAP_FEAT_STREAMING
& feat_mask
& local_feat_mask
;
475 static void l2cap_send_disconn_req(struct l2cap_conn
*conn
, struct sock
*sk
, int err
)
477 struct l2cap_disconn_req req
;
482 skb_queue_purge(TX_QUEUE(sk
));
484 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
) {
485 del_timer(&l2cap_pi(sk
)->retrans_timer
);
486 del_timer(&l2cap_pi(sk
)->monitor_timer
);
487 del_timer(&l2cap_pi(sk
)->ack_timer
);
490 req
.dcid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
491 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
492 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
493 L2CAP_DISCONN_REQ
, sizeof(req
), &req
);
495 sk
->sk_state
= BT_DISCONN
;
499 /* ---- L2CAP connections ---- */
500 static void l2cap_conn_start(struct l2cap_conn
*conn
)
502 struct l2cap_chan_list
*l
= &conn
->chan_list
;
503 struct sock_del_list del
, *tmp1
, *tmp2
;
506 BT_DBG("conn %p", conn
);
508 INIT_LIST_HEAD(&del
.list
);
512 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
515 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
516 sk
->sk_type
!= SOCK_STREAM
) {
521 if (sk
->sk_state
== BT_CONNECT
) {
522 if (l2cap_check_security(sk
) &&
523 __l2cap_no_conn_pending(sk
)) {
524 struct l2cap_conn_req req
;
526 if (!l2cap_mode_supported(l2cap_pi(sk
)->mode
,
528 && l2cap_pi(sk
)->conf_state
&
529 L2CAP_CONF_STATE2_DEVICE
) {
530 tmp1
= kzalloc(sizeof(struct srej_list
),
533 list_add_tail(&tmp1
->list
, &del
.list
);
538 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
539 req
.psm
= l2cap_pi(sk
)->psm
;
541 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
542 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
544 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
545 L2CAP_CONN_REQ
, sizeof(req
), &req
);
547 } else if (sk
->sk_state
== BT_CONNECT2
) {
548 struct l2cap_conn_rsp rsp
;
549 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
550 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
552 if (l2cap_check_security(sk
)) {
553 if (bt_sk(sk
)->defer_setup
) {
554 struct sock
*parent
= bt_sk(sk
)->parent
;
555 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
556 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHOR_PEND
);
557 parent
->sk_data_ready(parent
, 0);
560 sk
->sk_state
= BT_CONFIG
;
561 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
562 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
565 rsp
.result
= cpu_to_le16(L2CAP_CR_PEND
);
566 rsp
.status
= cpu_to_le16(L2CAP_CS_AUTHEN_PEND
);
569 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
570 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
576 read_unlock(&l
->lock
);
578 list_for_each_entry_safe(tmp1
, tmp2
, &del
.list
, list
) {
579 bh_lock_sock(tmp1
->sk
);
580 __l2cap_sock_close(tmp1
->sk
, ECONNRESET
);
581 bh_unlock_sock(tmp1
->sk
);
582 list_del(&tmp1
->list
);
587 static void l2cap_conn_ready(struct l2cap_conn
*conn
)
589 struct l2cap_chan_list
*l
= &conn
->chan_list
;
592 BT_DBG("conn %p", conn
);
596 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
599 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
600 sk
->sk_type
!= SOCK_STREAM
) {
601 l2cap_sock_clear_timer(sk
);
602 sk
->sk_state
= BT_CONNECTED
;
603 sk
->sk_state_change(sk
);
604 } else if (sk
->sk_state
== BT_CONNECT
)
610 read_unlock(&l
->lock
);
613 /* Notify sockets that we cannot guaranty reliability anymore */
614 static void l2cap_conn_unreliable(struct l2cap_conn
*conn
, int err
)
616 struct l2cap_chan_list
*l
= &conn
->chan_list
;
619 BT_DBG("conn %p", conn
);
623 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
624 if (l2cap_pi(sk
)->force_reliable
)
628 read_unlock(&l
->lock
);
631 static void l2cap_info_timeout(unsigned long arg
)
633 struct l2cap_conn
*conn
= (void *) arg
;
635 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
636 conn
->info_ident
= 0;
638 l2cap_conn_start(conn
);
641 static struct l2cap_conn
*l2cap_conn_add(struct hci_conn
*hcon
, u8 status
)
643 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
648 conn
= kzalloc(sizeof(struct l2cap_conn
), GFP_ATOMIC
);
652 hcon
->l2cap_data
= conn
;
655 BT_DBG("hcon %p conn %p", hcon
, conn
);
657 conn
->mtu
= hcon
->hdev
->acl_mtu
;
658 conn
->src
= &hcon
->hdev
->bdaddr
;
659 conn
->dst
= &hcon
->dst
;
663 spin_lock_init(&conn
->lock
);
664 rwlock_init(&conn
->chan_list
.lock
);
666 setup_timer(&conn
->info_timer
, l2cap_info_timeout
,
667 (unsigned long) conn
);
669 conn
->disc_reason
= 0x13;
674 static void l2cap_conn_del(struct hci_conn
*hcon
, int err
)
676 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
682 BT_DBG("hcon %p conn %p, err %d", hcon
, conn
, err
);
684 kfree_skb(conn
->rx_skb
);
687 while ((sk
= conn
->chan_list
.head
)) {
689 l2cap_chan_del(sk
, err
);
694 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
)
695 del_timer_sync(&conn
->info_timer
);
697 hcon
->l2cap_data
= NULL
;
701 static inline void l2cap_chan_add(struct l2cap_conn
*conn
, struct sock
*sk
, struct sock
*parent
)
703 struct l2cap_chan_list
*l
= &conn
->chan_list
;
704 write_lock_bh(&l
->lock
);
705 __l2cap_chan_add(conn
, sk
, parent
);
706 write_unlock_bh(&l
->lock
);
709 /* ---- Socket interface ---- */
710 static struct sock
*__l2cap_get_sock_by_addr(__le16 psm
, bdaddr_t
*src
)
713 struct hlist_node
*node
;
714 sk_for_each(sk
, node
, &l2cap_sk_list
.head
)
715 if (l2cap_pi(sk
)->sport
== psm
&& !bacmp(&bt_sk(sk
)->src
, src
))
722 /* Find socket with psm and source bdaddr.
723 * Returns closest match.
725 static struct sock
*__l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
727 struct sock
*sk
= NULL
, *sk1
= NULL
;
728 struct hlist_node
*node
;
730 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
731 if (state
&& sk
->sk_state
!= state
)
734 if (l2cap_pi(sk
)->psm
== psm
) {
736 if (!bacmp(&bt_sk(sk
)->src
, src
))
740 if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
))
744 return node
? sk
: sk1
;
747 /* Find socket with given address (psm, src).
748 * Returns locked socket */
749 static inline struct sock
*l2cap_get_sock_by_psm(int state
, __le16 psm
, bdaddr_t
*src
)
752 read_lock(&l2cap_sk_list
.lock
);
753 s
= __l2cap_get_sock_by_psm(state
, psm
, src
);
756 read_unlock(&l2cap_sk_list
.lock
);
760 static void l2cap_sock_destruct(struct sock
*sk
)
764 skb_queue_purge(&sk
->sk_receive_queue
);
765 skb_queue_purge(&sk
->sk_write_queue
);
768 static void l2cap_sock_cleanup_listen(struct sock
*parent
)
772 BT_DBG("parent %p", parent
);
774 /* Close not yet accepted channels */
775 while ((sk
= bt_accept_dequeue(parent
, NULL
)))
776 l2cap_sock_close(sk
);
778 parent
->sk_state
= BT_CLOSED
;
779 sock_set_flag(parent
, SOCK_ZAPPED
);
782 /* Kill socket (only if zapped and orphan)
783 * Must be called on unlocked socket.
785 static void l2cap_sock_kill(struct sock
*sk
)
787 if (!sock_flag(sk
, SOCK_ZAPPED
) || sk
->sk_socket
)
790 BT_DBG("sk %p state %d", sk
, sk
->sk_state
);
792 /* Kill poor orphan */
793 bt_sock_unlink(&l2cap_sk_list
, sk
);
794 sock_set_flag(sk
, SOCK_DEAD
);
798 static void __l2cap_sock_close(struct sock
*sk
, int reason
)
800 BT_DBG("sk %p state %d socket %p", sk
, sk
->sk_state
, sk
->sk_socket
);
802 switch (sk
->sk_state
) {
804 l2cap_sock_cleanup_listen(sk
);
809 if (sk
->sk_type
== SOCK_SEQPACKET
||
810 sk
->sk_type
== SOCK_STREAM
) {
811 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
813 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
814 l2cap_send_disconn_req(conn
, sk
, reason
);
816 l2cap_chan_del(sk
, reason
);
820 if (sk
->sk_type
== SOCK_SEQPACKET
||
821 sk
->sk_type
== SOCK_STREAM
) {
822 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
823 struct l2cap_conn_rsp rsp
;
826 if (bt_sk(sk
)->defer_setup
)
827 result
= L2CAP_CR_SEC_BLOCK
;
829 result
= L2CAP_CR_BAD_PSM
;
831 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
832 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
833 rsp
.result
= cpu_to_le16(result
);
834 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
835 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
836 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
838 l2cap_chan_del(sk
, reason
);
843 l2cap_chan_del(sk
, reason
);
847 sock_set_flag(sk
, SOCK_ZAPPED
);
852 /* Must be called on unlocked socket. */
853 static void l2cap_sock_close(struct sock
*sk
)
855 l2cap_sock_clear_timer(sk
);
857 __l2cap_sock_close(sk
, ECONNRESET
);
862 static void l2cap_sock_init(struct sock
*sk
, struct sock
*parent
)
864 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
869 sk
->sk_type
= parent
->sk_type
;
870 bt_sk(sk
)->defer_setup
= bt_sk(parent
)->defer_setup
;
872 pi
->imtu
= l2cap_pi(parent
)->imtu
;
873 pi
->omtu
= l2cap_pi(parent
)->omtu
;
874 pi
->conf_state
= l2cap_pi(parent
)->conf_state
;
875 pi
->mode
= l2cap_pi(parent
)->mode
;
876 pi
->fcs
= l2cap_pi(parent
)->fcs
;
877 pi
->max_tx
= l2cap_pi(parent
)->max_tx
;
878 pi
->tx_win
= l2cap_pi(parent
)->tx_win
;
879 pi
->sec_level
= l2cap_pi(parent
)->sec_level
;
880 pi
->role_switch
= l2cap_pi(parent
)->role_switch
;
881 pi
->force_reliable
= l2cap_pi(parent
)->force_reliable
;
883 pi
->imtu
= L2CAP_DEFAULT_MTU
;
885 if (enable_ertm
&& sk
->sk_type
== SOCK_STREAM
) {
886 pi
->mode
= L2CAP_MODE_ERTM
;
887 pi
->conf_state
|= L2CAP_CONF_STATE2_DEVICE
;
889 pi
->mode
= L2CAP_MODE_BASIC
;
891 pi
->max_tx
= L2CAP_DEFAULT_MAX_TX
;
892 pi
->fcs
= L2CAP_FCS_CRC16
;
893 pi
->tx_win
= L2CAP_DEFAULT_TX_WINDOW
;
894 pi
->sec_level
= BT_SECURITY_LOW
;
896 pi
->force_reliable
= 0;
899 /* Default config options */
901 pi
->flush_to
= L2CAP_DEFAULT_FLUSH_TO
;
902 skb_queue_head_init(TX_QUEUE(sk
));
903 skb_queue_head_init(SREJ_QUEUE(sk
));
904 skb_queue_head_init(BUSY_QUEUE(sk
));
905 INIT_LIST_HEAD(SREJ_LIST(sk
));
908 static struct proto l2cap_proto
= {
910 .owner
= THIS_MODULE
,
911 .obj_size
= sizeof(struct l2cap_pinfo
)
914 static struct sock
*l2cap_sock_alloc(struct net
*net
, struct socket
*sock
, int proto
, gfp_t prio
)
918 sk
= sk_alloc(net
, PF_BLUETOOTH
, prio
, &l2cap_proto
);
922 sock_init_data(sock
, sk
);
923 INIT_LIST_HEAD(&bt_sk(sk
)->accept_q
);
925 sk
->sk_destruct
= l2cap_sock_destruct
;
926 sk
->sk_sndtimeo
= msecs_to_jiffies(L2CAP_CONN_TIMEOUT
);
928 sock_reset_flag(sk
, SOCK_ZAPPED
);
930 sk
->sk_protocol
= proto
;
931 sk
->sk_state
= BT_OPEN
;
933 setup_timer(&sk
->sk_timer
, l2cap_sock_timeout
, (unsigned long) sk
);
935 bt_sock_link(&l2cap_sk_list
, sk
);
939 static int l2cap_sock_create(struct net
*net
, struct socket
*sock
, int protocol
,
944 BT_DBG("sock %p", sock
);
946 sock
->state
= SS_UNCONNECTED
;
948 if (sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
&&
949 sock
->type
!= SOCK_DGRAM
&& sock
->type
!= SOCK_RAW
)
950 return -ESOCKTNOSUPPORT
;
952 if (sock
->type
== SOCK_RAW
&& !kern
&& !capable(CAP_NET_RAW
))
955 sock
->ops
= &l2cap_sock_ops
;
957 sk
= l2cap_sock_alloc(net
, sock
, protocol
, GFP_ATOMIC
);
961 l2cap_sock_init(sk
, NULL
);
965 static int l2cap_sock_bind(struct socket
*sock
, struct sockaddr
*addr
, int alen
)
967 struct sock
*sk
= sock
->sk
;
968 struct sockaddr_l2 la
;
973 if (!addr
|| addr
->sa_family
!= AF_BLUETOOTH
)
976 memset(&la
, 0, sizeof(la
));
977 len
= min_t(unsigned int, sizeof(la
), alen
);
978 memcpy(&la
, addr
, len
);
985 if (sk
->sk_state
!= BT_OPEN
) {
990 if (la
.l2_psm
&& __le16_to_cpu(la
.l2_psm
) < 0x1001 &&
991 !capable(CAP_NET_BIND_SERVICE
)) {
996 write_lock_bh(&l2cap_sk_list
.lock
);
998 if (la
.l2_psm
&& __l2cap_get_sock_by_addr(la
.l2_psm
, &la
.l2_bdaddr
)) {
1001 /* Save source address */
1002 bacpy(&bt_sk(sk
)->src
, &la
.l2_bdaddr
);
1003 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1004 l2cap_pi(sk
)->sport
= la
.l2_psm
;
1005 sk
->sk_state
= BT_BOUND
;
1007 if (__le16_to_cpu(la
.l2_psm
) == 0x0001 ||
1008 __le16_to_cpu(la
.l2_psm
) == 0x0003)
1009 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1012 write_unlock_bh(&l2cap_sk_list
.lock
);
1019 static int l2cap_do_connect(struct sock
*sk
)
1021 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1022 bdaddr_t
*dst
= &bt_sk(sk
)->dst
;
1023 struct l2cap_conn
*conn
;
1024 struct hci_conn
*hcon
;
1025 struct hci_dev
*hdev
;
1029 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src
), batostr(dst
),
1032 hdev
= hci_get_route(dst
, src
);
1034 return -EHOSTUNREACH
;
1036 hci_dev_lock_bh(hdev
);
1040 if (sk
->sk_type
== SOCK_RAW
) {
1041 switch (l2cap_pi(sk
)->sec_level
) {
1042 case BT_SECURITY_HIGH
:
1043 auth_type
= HCI_AT_DEDICATED_BONDING_MITM
;
1045 case BT_SECURITY_MEDIUM
:
1046 auth_type
= HCI_AT_DEDICATED_BONDING
;
1049 auth_type
= HCI_AT_NO_BONDING
;
1052 } else if (l2cap_pi(sk
)->psm
== cpu_to_le16(0x0001)) {
1053 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
1054 auth_type
= HCI_AT_NO_BONDING_MITM
;
1056 auth_type
= HCI_AT_NO_BONDING
;
1058 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_LOW
)
1059 l2cap_pi(sk
)->sec_level
= BT_SECURITY_SDP
;
1061 switch (l2cap_pi(sk
)->sec_level
) {
1062 case BT_SECURITY_HIGH
:
1063 auth_type
= HCI_AT_GENERAL_BONDING_MITM
;
1065 case BT_SECURITY_MEDIUM
:
1066 auth_type
= HCI_AT_GENERAL_BONDING
;
1069 auth_type
= HCI_AT_NO_BONDING
;
1074 hcon
= hci_connect(hdev
, ACL_LINK
, dst
,
1075 l2cap_pi(sk
)->sec_level
, auth_type
);
1079 conn
= l2cap_conn_add(hcon
, 0);
1087 /* Update source addr of the socket */
1088 bacpy(src
, conn
->src
);
1090 l2cap_chan_add(conn
, sk
, NULL
);
1092 sk
->sk_state
= BT_CONNECT
;
1093 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
1095 if (hcon
->state
== BT_CONNECTED
) {
1096 if (sk
->sk_type
!= SOCK_SEQPACKET
&&
1097 sk
->sk_type
!= SOCK_STREAM
) {
1098 l2cap_sock_clear_timer(sk
);
1099 sk
->sk_state
= BT_CONNECTED
;
1105 hci_dev_unlock_bh(hdev
);
1110 static int l2cap_sock_connect(struct socket
*sock
, struct sockaddr
*addr
, int alen
, int flags
)
1112 struct sock
*sk
= sock
->sk
;
1113 struct sockaddr_l2 la
;
1116 BT_DBG("sk %p", sk
);
1118 if (!addr
|| alen
< sizeof(addr
->sa_family
) ||
1119 addr
->sa_family
!= AF_BLUETOOTH
)
1122 memset(&la
, 0, sizeof(la
));
1123 len
= min_t(unsigned int, sizeof(la
), alen
);
1124 memcpy(&la
, addr
, len
);
1131 if ((sk
->sk_type
== SOCK_SEQPACKET
|| sk
->sk_type
== SOCK_STREAM
)
1137 switch (l2cap_pi(sk
)->mode
) {
1138 case L2CAP_MODE_BASIC
:
1140 case L2CAP_MODE_ERTM
:
1141 case L2CAP_MODE_STREAMING
:
1150 switch (sk
->sk_state
) {
1154 /* Already connecting */
1158 /* Already connected */
1171 /* Set destination address and psm */
1172 bacpy(&bt_sk(sk
)->dst
, &la
.l2_bdaddr
);
1173 l2cap_pi(sk
)->psm
= la
.l2_psm
;
1175 err
= l2cap_do_connect(sk
);
1180 err
= bt_sock_wait_state(sk
, BT_CONNECTED
,
1181 sock_sndtimeo(sk
, flags
& O_NONBLOCK
));
1187 static int l2cap_sock_listen(struct socket
*sock
, int backlog
)
1189 struct sock
*sk
= sock
->sk
;
1192 BT_DBG("sk %p backlog %d", sk
, backlog
);
1196 if ((sock
->type
!= SOCK_SEQPACKET
&& sock
->type
!= SOCK_STREAM
)
1197 || sk
->sk_state
!= BT_BOUND
) {
1202 switch (l2cap_pi(sk
)->mode
) {
1203 case L2CAP_MODE_BASIC
:
1205 case L2CAP_MODE_ERTM
:
1206 case L2CAP_MODE_STREAMING
:
1215 if (!l2cap_pi(sk
)->psm
) {
1216 bdaddr_t
*src
= &bt_sk(sk
)->src
;
1221 write_lock_bh(&l2cap_sk_list
.lock
);
1223 for (psm
= 0x1001; psm
< 0x1100; psm
+= 2)
1224 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm
), src
)) {
1225 l2cap_pi(sk
)->psm
= cpu_to_le16(psm
);
1226 l2cap_pi(sk
)->sport
= cpu_to_le16(psm
);
1231 write_unlock_bh(&l2cap_sk_list
.lock
);
1237 sk
->sk_max_ack_backlog
= backlog
;
1238 sk
->sk_ack_backlog
= 0;
1239 sk
->sk_state
= BT_LISTEN
;
1246 static int l2cap_sock_accept(struct socket
*sock
, struct socket
*newsock
, int flags
)
1248 DECLARE_WAITQUEUE(wait
, current
);
1249 struct sock
*sk
= sock
->sk
, *nsk
;
1253 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1255 if (sk
->sk_state
!= BT_LISTEN
) {
1260 timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
1262 BT_DBG("sk %p timeo %ld", sk
, timeo
);
1264 /* Wait for an incoming connection. (wake-one). */
1265 add_wait_queue_exclusive(sk_sleep(sk
), &wait
);
1266 while (!(nsk
= bt_accept_dequeue(sk
, newsock
))) {
1267 set_current_state(TASK_INTERRUPTIBLE
);
1274 timeo
= schedule_timeout(timeo
);
1275 lock_sock_nested(sk
, SINGLE_DEPTH_NESTING
);
1277 if (sk
->sk_state
!= BT_LISTEN
) {
1282 if (signal_pending(current
)) {
1283 err
= sock_intr_errno(timeo
);
1287 set_current_state(TASK_RUNNING
);
1288 remove_wait_queue(sk_sleep(sk
), &wait
);
1293 newsock
->state
= SS_CONNECTED
;
1295 BT_DBG("new socket %p", nsk
);
1302 static int l2cap_sock_getname(struct socket
*sock
, struct sockaddr
*addr
, int *len
, int peer
)
1304 struct sockaddr_l2
*la
= (struct sockaddr_l2
*) addr
;
1305 struct sock
*sk
= sock
->sk
;
1307 BT_DBG("sock %p, sk %p", sock
, sk
);
1309 addr
->sa_family
= AF_BLUETOOTH
;
1310 *len
= sizeof(struct sockaddr_l2
);
1313 la
->l2_psm
= l2cap_pi(sk
)->psm
;
1314 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->dst
);
1315 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1317 la
->l2_psm
= l2cap_pi(sk
)->sport
;
1318 bacpy(&la
->l2_bdaddr
, &bt_sk(sk
)->src
);
1319 la
->l2_cid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1325 static int __l2cap_wait_ack(struct sock
*sk
)
1327 DECLARE_WAITQUEUE(wait
, current
);
1331 add_wait_queue(sk_sleep(sk
), &wait
);
1332 while ((l2cap_pi(sk
)->unacked_frames
> 0 && l2cap_pi(sk
)->conn
)) {
1333 set_current_state(TASK_INTERRUPTIBLE
);
1338 if (signal_pending(current
)) {
1339 err
= sock_intr_errno(timeo
);
1344 timeo
= schedule_timeout(timeo
);
1347 err
= sock_error(sk
);
1351 set_current_state(TASK_RUNNING
);
1352 remove_wait_queue(sk_sleep(sk
), &wait
);
1356 static void l2cap_monitor_timeout(unsigned long arg
)
1358 struct sock
*sk
= (void *) arg
;
1360 BT_DBG("sk %p", sk
);
1363 if (l2cap_pi(sk
)->retry_count
>= l2cap_pi(sk
)->remote_max_tx
) {
1364 l2cap_send_disconn_req(l2cap_pi(sk
)->conn
, sk
, ECONNABORTED
);
1369 l2cap_pi(sk
)->retry_count
++;
1370 __mod_monitor_timer();
1372 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1376 static void l2cap_retrans_timeout(unsigned long arg
)
1378 struct sock
*sk
= (void *) arg
;
1380 BT_DBG("sk %p", sk
);
1383 l2cap_pi(sk
)->retry_count
= 1;
1384 __mod_monitor_timer();
1386 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
1388 l2cap_send_rr_or_rnr(l2cap_pi(sk
), L2CAP_CTRL_POLL
);
1392 static void l2cap_drop_acked_frames(struct sock
*sk
)
1394 struct sk_buff
*skb
;
1396 while ((skb
= skb_peek(TX_QUEUE(sk
))) &&
1397 l2cap_pi(sk
)->unacked_frames
) {
1398 if (bt_cb(skb
)->tx_seq
== l2cap_pi(sk
)->expected_ack_seq
)
1401 skb
= skb_dequeue(TX_QUEUE(sk
));
1404 l2cap_pi(sk
)->unacked_frames
--;
1407 if (!l2cap_pi(sk
)->unacked_frames
)
1408 del_timer(&l2cap_pi(sk
)->retrans_timer
);
1411 static inline void l2cap_do_send(struct sock
*sk
, struct sk_buff
*skb
)
1413 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1415 BT_DBG("sk %p, skb %p len %d", sk
, skb
, skb
->len
);
1417 hci_send_acl(pi
->conn
->hcon
, skb
, 0);
1420 static int l2cap_streaming_send(struct sock
*sk
)
1422 struct sk_buff
*skb
, *tx_skb
;
1423 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1426 while ((skb
= sk
->sk_send_head
)) {
1427 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1429 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1430 control
|= pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
;
1431 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1433 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1434 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1435 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1438 l2cap_do_send(sk
, tx_skb
);
1440 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1442 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1443 sk
->sk_send_head
= NULL
;
1445 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1447 skb
= skb_dequeue(TX_QUEUE(sk
));
1453 static void l2cap_retransmit_one_frame(struct sock
*sk
, u8 tx_seq
)
1455 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1456 struct sk_buff
*skb
, *tx_skb
;
1459 skb
= skb_peek(TX_QUEUE(sk
));
1464 if (bt_cb(skb
)->tx_seq
== tx_seq
)
1467 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1470 } while ((skb
= skb_queue_next(TX_QUEUE(sk
), skb
)));
1472 if (pi
->remote_max_tx
&&
1473 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1474 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1478 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1479 bt_cb(skb
)->retries
++;
1480 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1482 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1483 control
|= L2CAP_CTRL_FINAL
;
1484 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1487 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1488 | (tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1490 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1492 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1493 fcs
= crc16(0, (u8
*)tx_skb
->data
, tx_skb
->len
- 2);
1494 put_unaligned_le16(fcs
, tx_skb
->data
+ tx_skb
->len
- 2);
1497 l2cap_do_send(sk
, tx_skb
);
1500 static int l2cap_ertm_send(struct sock
*sk
)
1502 struct sk_buff
*skb
, *tx_skb
;
1503 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1507 if (sk
->sk_state
!= BT_CONNECTED
)
1510 while ((skb
= sk
->sk_send_head
) && (!l2cap_tx_window_full(sk
))) {
1512 if (pi
->remote_max_tx
&&
1513 bt_cb(skb
)->retries
== pi
->remote_max_tx
) {
1514 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNABORTED
);
1518 tx_skb
= skb_clone(skb
, GFP_ATOMIC
);
1520 bt_cb(skb
)->retries
++;
1522 control
= get_unaligned_le16(tx_skb
->data
+ L2CAP_HDR_SIZE
);
1523 control
&= L2CAP_CTRL_SAR
;
1525 if (pi
->conn_state
& L2CAP_CONN_SEND_FBIT
) {
1526 control
|= L2CAP_CTRL_FINAL
;
1527 pi
->conn_state
&= ~L2CAP_CONN_SEND_FBIT
;
1529 control
|= (pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
)
1530 | (pi
->next_tx_seq
<< L2CAP_CTRL_TXSEQ_SHIFT
);
1531 put_unaligned_le16(control
, tx_skb
->data
+ L2CAP_HDR_SIZE
);
1534 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
1535 fcs
= crc16(0, (u8
*)skb
->data
, tx_skb
->len
- 2);
1536 put_unaligned_le16(fcs
, skb
->data
+ tx_skb
->len
- 2);
1539 l2cap_do_send(sk
, tx_skb
);
1541 __mod_retrans_timer();
1543 bt_cb(skb
)->tx_seq
= pi
->next_tx_seq
;
1544 pi
->next_tx_seq
= (pi
->next_tx_seq
+ 1) % 64;
1546 pi
->unacked_frames
++;
1549 if (skb_queue_is_last(TX_QUEUE(sk
), skb
))
1550 sk
->sk_send_head
= NULL
;
1552 sk
->sk_send_head
= skb_queue_next(TX_QUEUE(sk
), skb
);
1560 static int l2cap_retransmit_frames(struct sock
*sk
)
1562 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1565 if (!skb_queue_empty(TX_QUEUE(sk
)))
1566 sk
->sk_send_head
= TX_QUEUE(sk
)->next
;
1568 pi
->next_tx_seq
= pi
->expected_ack_seq
;
1569 ret
= l2cap_ertm_send(sk
);
1573 static void l2cap_send_ack(struct l2cap_pinfo
*pi
)
1575 struct sock
*sk
= (struct sock
*)pi
;
1578 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1580 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
1581 control
|= L2CAP_SUPER_RCV_NOT_READY
;
1582 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
1583 l2cap_send_sframe(pi
, control
);
1587 if (l2cap_ertm_send(sk
) > 0)
1590 control
|= L2CAP_SUPER_RCV_READY
;
1591 l2cap_send_sframe(pi
, control
);
1594 static void l2cap_send_srejtail(struct sock
*sk
)
1596 struct srej_list
*tail
;
1599 control
= L2CAP_SUPER_SELECT_REJECT
;
1600 control
|= L2CAP_CTRL_FINAL
;
1602 tail
= list_entry(SREJ_LIST(sk
)->prev
, struct srej_list
, list
);
1603 control
|= tail
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
1605 l2cap_send_sframe(l2cap_pi(sk
), control
);
1608 static inline int l2cap_skbuff_fromiovec(struct sock
*sk
, struct msghdr
*msg
, int len
, int count
, struct sk_buff
*skb
)
1610 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1611 struct sk_buff
**frag
;
1614 if (memcpy_fromiovec(skb_put(skb
, count
), msg
->msg_iov
, count
))
1620 /* Continuation fragments (no L2CAP header) */
1621 frag
= &skb_shinfo(skb
)->frag_list
;
1623 count
= min_t(unsigned int, conn
->mtu
, len
);
1625 *frag
= bt_skb_send_alloc(sk
, count
, msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1628 if (memcpy_fromiovec(skb_put(*frag
, count
), msg
->msg_iov
, count
))
1634 frag
= &(*frag
)->next
;
1640 static struct sk_buff
*l2cap_create_connless_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1642 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1643 struct sk_buff
*skb
;
1644 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1645 struct l2cap_hdr
*lh
;
1647 BT_DBG("sk %p len %d", sk
, (int)len
);
1649 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1650 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1651 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1653 return ERR_PTR(-ENOMEM
);
1655 /* Create L2CAP header */
1656 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1657 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1658 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1659 put_unaligned_le16(l2cap_pi(sk
)->psm
, skb_put(skb
, 2));
1661 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1662 if (unlikely(err
< 0)) {
1664 return ERR_PTR(err
);
1669 static struct sk_buff
*l2cap_create_basic_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1671 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1672 struct sk_buff
*skb
;
1673 int err
, count
, hlen
= L2CAP_HDR_SIZE
;
1674 struct l2cap_hdr
*lh
;
1676 BT_DBG("sk %p len %d", sk
, (int)len
);
1678 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1679 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1680 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1682 return ERR_PTR(-ENOMEM
);
1684 /* Create L2CAP header */
1685 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1686 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1687 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1689 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1690 if (unlikely(err
< 0)) {
1692 return ERR_PTR(err
);
1697 static struct sk_buff
*l2cap_create_iframe_pdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
, u16 control
, u16 sdulen
)
1699 struct l2cap_conn
*conn
= l2cap_pi(sk
)->conn
;
1700 struct sk_buff
*skb
;
1701 int err
, count
, hlen
= L2CAP_HDR_SIZE
+ 2;
1702 struct l2cap_hdr
*lh
;
1704 BT_DBG("sk %p len %d", sk
, (int)len
);
1707 return ERR_PTR(-ENOTCONN
);
1712 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1715 count
= min_t(unsigned int, (conn
->mtu
- hlen
), len
);
1716 skb
= bt_skb_send_alloc(sk
, count
+ hlen
,
1717 msg
->msg_flags
& MSG_DONTWAIT
, &err
);
1719 return ERR_PTR(-ENOMEM
);
1721 /* Create L2CAP header */
1722 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
1723 lh
->cid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1724 lh
->len
= cpu_to_le16(len
+ (hlen
- L2CAP_HDR_SIZE
));
1725 put_unaligned_le16(control
, skb_put(skb
, 2));
1727 put_unaligned_le16(sdulen
, skb_put(skb
, 2));
1729 err
= l2cap_skbuff_fromiovec(sk
, msg
, len
, count
, skb
);
1730 if (unlikely(err
< 0)) {
1732 return ERR_PTR(err
);
1735 if (l2cap_pi(sk
)->fcs
== L2CAP_FCS_CRC16
)
1736 put_unaligned_le16(0, skb_put(skb
, 2));
1738 bt_cb(skb
)->retries
= 0;
1742 static inline int l2cap_sar_segment_sdu(struct sock
*sk
, struct msghdr
*msg
, size_t len
)
1744 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1745 struct sk_buff
*skb
;
1746 struct sk_buff_head sar_queue
;
1750 skb_queue_head_init(&sar_queue
);
1751 control
= L2CAP_SDU_START
;
1752 skb
= l2cap_create_iframe_pdu(sk
, msg
, pi
->remote_mps
, control
, len
);
1754 return PTR_ERR(skb
);
1756 __skb_queue_tail(&sar_queue
, skb
);
1757 len
-= pi
->remote_mps
;
1758 size
+= pi
->remote_mps
;
1763 if (len
> pi
->remote_mps
) {
1764 control
= L2CAP_SDU_CONTINUE
;
1765 buflen
= pi
->remote_mps
;
1767 control
= L2CAP_SDU_END
;
1771 skb
= l2cap_create_iframe_pdu(sk
, msg
, buflen
, control
, 0);
1773 skb_queue_purge(&sar_queue
);
1774 return PTR_ERR(skb
);
1777 __skb_queue_tail(&sar_queue
, skb
);
1781 skb_queue_splice_tail(&sar_queue
, TX_QUEUE(sk
));
1782 if (sk
->sk_send_head
== NULL
)
1783 sk
->sk_send_head
= sar_queue
.next
;
1788 static int l2cap_sock_sendmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
)
1790 struct sock
*sk
= sock
->sk
;
1791 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
1792 struct sk_buff
*skb
;
1796 BT_DBG("sock %p, sk %p", sock
, sk
);
1798 err
= sock_error(sk
);
1802 if (msg
->msg_flags
& MSG_OOB
)
1807 if (sk
->sk_state
!= BT_CONNECTED
) {
1812 /* Connectionless channel */
1813 if (sk
->sk_type
== SOCK_DGRAM
) {
1814 skb
= l2cap_create_connless_pdu(sk
, msg
, len
);
1818 l2cap_do_send(sk
, skb
);
1825 case L2CAP_MODE_BASIC
:
1826 /* Check outgoing MTU */
1827 if (len
> pi
->omtu
) {
1832 /* Create a basic PDU */
1833 skb
= l2cap_create_basic_pdu(sk
, msg
, len
);
1839 l2cap_do_send(sk
, skb
);
1843 case L2CAP_MODE_ERTM
:
1844 case L2CAP_MODE_STREAMING
:
1845 /* Entire SDU fits into one PDU */
1846 if (len
<= pi
->remote_mps
) {
1847 control
= L2CAP_SDU_UNSEGMENTED
;
1848 skb
= l2cap_create_iframe_pdu(sk
, msg
, len
, control
, 0);
1853 __skb_queue_tail(TX_QUEUE(sk
), skb
);
1855 if (sk
->sk_send_head
== NULL
)
1856 sk
->sk_send_head
= skb
;
1859 /* Segment SDU into multiples PDUs */
1860 err
= l2cap_sar_segment_sdu(sk
, msg
, len
);
1865 if (pi
->mode
== L2CAP_MODE_STREAMING
) {
1866 err
= l2cap_streaming_send(sk
);
1868 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
&&
1869 pi
->conn_state
&& L2CAP_CONN_WAIT_F
) {
1873 err
= l2cap_ertm_send(sk
);
1881 BT_DBG("bad state %1.1x", pi
->mode
);
1890 static int l2cap_sock_recvmsg(struct kiocb
*iocb
, struct socket
*sock
, struct msghdr
*msg
, size_t len
, int flags
)
1892 struct sock
*sk
= sock
->sk
;
1896 if (sk
->sk_state
== BT_CONNECT2
&& bt_sk(sk
)->defer_setup
) {
1897 struct l2cap_conn_rsp rsp
;
1899 sk
->sk_state
= BT_CONFIG
;
1901 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
1902 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
1903 rsp
.result
= cpu_to_le16(L2CAP_CR_SUCCESS
);
1904 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
1905 l2cap_send_cmd(l2cap_pi(sk
)->conn
, l2cap_pi(sk
)->ident
,
1906 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
1914 return bt_sock_recvmsg(iocb
, sock
, msg
, len
, flags
);
1917 static int l2cap_sock_setsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, unsigned int optlen
)
1919 struct sock
*sk
= sock
->sk
;
1920 struct l2cap_options opts
;
1924 BT_DBG("sk %p", sk
);
1930 opts
.imtu
= l2cap_pi(sk
)->imtu
;
1931 opts
.omtu
= l2cap_pi(sk
)->omtu
;
1932 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
1933 opts
.mode
= l2cap_pi(sk
)->mode
;
1934 opts
.fcs
= l2cap_pi(sk
)->fcs
;
1935 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
1936 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
1938 len
= min_t(unsigned int, sizeof(opts
), optlen
);
1939 if (copy_from_user((char *) &opts
, optval
, len
)) {
1944 if (opts
.txwin_size
> L2CAP_DEFAULT_TX_WINDOW
) {
1949 l2cap_pi(sk
)->mode
= opts
.mode
;
1950 switch (l2cap_pi(sk
)->mode
) {
1951 case L2CAP_MODE_BASIC
:
1952 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_STATE2_DEVICE
;
1954 case L2CAP_MODE_ERTM
:
1955 case L2CAP_MODE_STREAMING
:
1964 l2cap_pi(sk
)->imtu
= opts
.imtu
;
1965 l2cap_pi(sk
)->omtu
= opts
.omtu
;
1966 l2cap_pi(sk
)->fcs
= opts
.fcs
;
1967 l2cap_pi(sk
)->max_tx
= opts
.max_tx
;
1968 l2cap_pi(sk
)->tx_win
= (__u8
)opts
.txwin_size
;
1972 if (get_user(opt
, (u32 __user
*) optval
)) {
1977 if (opt
& L2CAP_LM_AUTH
)
1978 l2cap_pi(sk
)->sec_level
= BT_SECURITY_LOW
;
1979 if (opt
& L2CAP_LM_ENCRYPT
)
1980 l2cap_pi(sk
)->sec_level
= BT_SECURITY_MEDIUM
;
1981 if (opt
& L2CAP_LM_SECURE
)
1982 l2cap_pi(sk
)->sec_level
= BT_SECURITY_HIGH
;
1984 l2cap_pi(sk
)->role_switch
= (opt
& L2CAP_LM_MASTER
);
1985 l2cap_pi(sk
)->force_reliable
= (opt
& L2CAP_LM_RELIABLE
);
1997 static int l2cap_sock_setsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, unsigned int optlen
)
1999 struct sock
*sk
= sock
->sk
;
2000 struct bt_security sec
;
2004 BT_DBG("sk %p", sk
);
2006 if (level
== SOL_L2CAP
)
2007 return l2cap_sock_setsockopt_old(sock
, optname
, optval
, optlen
);
2009 if (level
!= SOL_BLUETOOTH
)
2010 return -ENOPROTOOPT
;
2016 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2017 && sk
->sk_type
!= SOCK_RAW
) {
2022 sec
.level
= BT_SECURITY_LOW
;
2024 len
= min_t(unsigned int, sizeof(sec
), optlen
);
2025 if (copy_from_user((char *) &sec
, optval
, len
)) {
2030 if (sec
.level
< BT_SECURITY_LOW
||
2031 sec
.level
> BT_SECURITY_HIGH
) {
2036 l2cap_pi(sk
)->sec_level
= sec
.level
;
2039 case BT_DEFER_SETUP
:
2040 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2045 if (get_user(opt
, (u32 __user
*) optval
)) {
2050 bt_sk(sk
)->defer_setup
= opt
;
2062 static int l2cap_sock_getsockopt_old(struct socket
*sock
, int optname
, char __user
*optval
, int __user
*optlen
)
2064 struct sock
*sk
= sock
->sk
;
2065 struct l2cap_options opts
;
2066 struct l2cap_conninfo cinfo
;
2070 BT_DBG("sk %p", sk
);
2072 if (get_user(len
, optlen
))
2079 opts
.imtu
= l2cap_pi(sk
)->imtu
;
2080 opts
.omtu
= l2cap_pi(sk
)->omtu
;
2081 opts
.flush_to
= l2cap_pi(sk
)->flush_to
;
2082 opts
.mode
= l2cap_pi(sk
)->mode
;
2083 opts
.fcs
= l2cap_pi(sk
)->fcs
;
2084 opts
.max_tx
= l2cap_pi(sk
)->max_tx
;
2085 opts
.txwin_size
= (__u16
)l2cap_pi(sk
)->tx_win
;
2087 len
= min_t(unsigned int, len
, sizeof(opts
));
2088 if (copy_to_user(optval
, (char *) &opts
, len
))
2094 switch (l2cap_pi(sk
)->sec_level
) {
2095 case BT_SECURITY_LOW
:
2096 opt
= L2CAP_LM_AUTH
;
2098 case BT_SECURITY_MEDIUM
:
2099 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
;
2101 case BT_SECURITY_HIGH
:
2102 opt
= L2CAP_LM_AUTH
| L2CAP_LM_ENCRYPT
|
2110 if (l2cap_pi(sk
)->role_switch
)
2111 opt
|= L2CAP_LM_MASTER
;
2113 if (l2cap_pi(sk
)->force_reliable
)
2114 opt
|= L2CAP_LM_RELIABLE
;
2116 if (put_user(opt
, (u32 __user
*) optval
))
2120 case L2CAP_CONNINFO
:
2121 if (sk
->sk_state
!= BT_CONNECTED
&&
2122 !(sk
->sk_state
== BT_CONNECT2
&&
2123 bt_sk(sk
)->defer_setup
)) {
2128 cinfo
.hci_handle
= l2cap_pi(sk
)->conn
->hcon
->handle
;
2129 memcpy(cinfo
.dev_class
, l2cap_pi(sk
)->conn
->hcon
->dev_class
, 3);
2131 len
= min_t(unsigned int, len
, sizeof(cinfo
));
2132 if (copy_to_user(optval
, (char *) &cinfo
, len
))
2146 static int l2cap_sock_getsockopt(struct socket
*sock
, int level
, int optname
, char __user
*optval
, int __user
*optlen
)
2148 struct sock
*sk
= sock
->sk
;
2149 struct bt_security sec
;
2152 BT_DBG("sk %p", sk
);
2154 if (level
== SOL_L2CAP
)
2155 return l2cap_sock_getsockopt_old(sock
, optname
, optval
, optlen
);
2157 if (level
!= SOL_BLUETOOTH
)
2158 return -ENOPROTOOPT
;
2160 if (get_user(len
, optlen
))
2167 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
2168 && sk
->sk_type
!= SOCK_RAW
) {
2173 sec
.level
= l2cap_pi(sk
)->sec_level
;
2175 len
= min_t(unsigned int, len
, sizeof(sec
));
2176 if (copy_to_user(optval
, (char *) &sec
, len
))
2181 case BT_DEFER_SETUP
:
2182 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_LISTEN
) {
2187 if (put_user(bt_sk(sk
)->defer_setup
, (u32 __user
*) optval
))
2201 static int l2cap_sock_shutdown(struct socket
*sock
, int how
)
2203 struct sock
*sk
= sock
->sk
;
2206 BT_DBG("sock %p, sk %p", sock
, sk
);
2212 if (!sk
->sk_shutdown
) {
2213 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
2214 err
= __l2cap_wait_ack(sk
);
2216 sk
->sk_shutdown
= SHUTDOWN_MASK
;
2217 l2cap_sock_clear_timer(sk
);
2218 __l2cap_sock_close(sk
, 0);
2220 if (sock_flag(sk
, SOCK_LINGER
) && sk
->sk_lingertime
)
2221 err
= bt_sock_wait_state(sk
, BT_CLOSED
,
2225 if (!err
&& sk
->sk_err
)
2232 static int l2cap_sock_release(struct socket
*sock
)
2234 struct sock
*sk
= sock
->sk
;
2237 BT_DBG("sock %p, sk %p", sock
, sk
);
2242 err
= l2cap_sock_shutdown(sock
, 2);
2245 l2cap_sock_kill(sk
);
2249 static void l2cap_chan_ready(struct sock
*sk
)
2251 struct sock
*parent
= bt_sk(sk
)->parent
;
2253 BT_DBG("sk %p, parent %p", sk
, parent
);
2255 l2cap_pi(sk
)->conf_state
= 0;
2256 l2cap_sock_clear_timer(sk
);
2259 /* Outgoing channel.
2260 * Wake up socket sleeping on connect.
2262 sk
->sk_state
= BT_CONNECTED
;
2263 sk
->sk_state_change(sk
);
2265 /* Incoming channel.
2266 * Wake up socket sleeping on accept.
2268 parent
->sk_data_ready(parent
, 0);
2272 /* Copy frame to all raw sockets on that connection */
2273 static void l2cap_raw_recv(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
2275 struct l2cap_chan_list
*l
= &conn
->chan_list
;
2276 struct sk_buff
*nskb
;
2279 BT_DBG("conn %p", conn
);
2281 read_lock(&l
->lock
);
2282 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
2283 if (sk
->sk_type
!= SOCK_RAW
)
2286 /* Don't send frame to the socket it came from */
2289 nskb
= skb_clone(skb
, GFP_ATOMIC
);
2293 if (sock_queue_rcv_skb(sk
, nskb
))
2296 read_unlock(&l
->lock
);
2299 /* ---- L2CAP signalling commands ---- */
2300 static struct sk_buff
*l2cap_build_cmd(struct l2cap_conn
*conn
,
2301 u8 code
, u8 ident
, u16 dlen
, void *data
)
2303 struct sk_buff
*skb
, **frag
;
2304 struct l2cap_cmd_hdr
*cmd
;
2305 struct l2cap_hdr
*lh
;
2308 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2309 conn
, code
, ident
, dlen
);
2311 len
= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
+ dlen
;
2312 count
= min_t(unsigned int, conn
->mtu
, len
);
2314 skb
= bt_skb_alloc(count
, GFP_ATOMIC
);
2318 lh
= (struct l2cap_hdr
*) skb_put(skb
, L2CAP_HDR_SIZE
);
2319 lh
->len
= cpu_to_le16(L2CAP_CMD_HDR_SIZE
+ dlen
);
2320 lh
->cid
= cpu_to_le16(L2CAP_CID_SIGNALING
);
2322 cmd
= (struct l2cap_cmd_hdr
*) skb_put(skb
, L2CAP_CMD_HDR_SIZE
);
2325 cmd
->len
= cpu_to_le16(dlen
);
2328 count
-= L2CAP_HDR_SIZE
+ L2CAP_CMD_HDR_SIZE
;
2329 memcpy(skb_put(skb
, count
), data
, count
);
2335 /* Continuation fragments (no L2CAP header) */
2336 frag
= &skb_shinfo(skb
)->frag_list
;
2338 count
= min_t(unsigned int, conn
->mtu
, len
);
2340 *frag
= bt_skb_alloc(count
, GFP_ATOMIC
);
2344 memcpy(skb_put(*frag
, count
), data
, count
);
2349 frag
= &(*frag
)->next
;
2359 static inline int l2cap_get_conf_opt(void **ptr
, int *type
, int *olen
, unsigned long *val
)
2361 struct l2cap_conf_opt
*opt
= *ptr
;
2364 len
= L2CAP_CONF_OPT_SIZE
+ opt
->len
;
2372 *val
= *((u8
*) opt
->val
);
2376 *val
= __le16_to_cpu(*((__le16
*) opt
->val
));
2380 *val
= __le32_to_cpu(*((__le32
*) opt
->val
));
2384 *val
= (unsigned long) opt
->val
;
2388 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type
, opt
->len
, *val
);
2392 static void l2cap_add_conf_opt(void **ptr
, u8 type
, u8 len
, unsigned long val
)
2394 struct l2cap_conf_opt
*opt
= *ptr
;
2396 BT_DBG("type 0x%2.2x len %d val 0x%lx", type
, len
, val
);
2403 *((u8
*) opt
->val
) = val
;
2407 *((__le16
*) opt
->val
) = cpu_to_le16(val
);
2411 *((__le32
*) opt
->val
) = cpu_to_le32(val
);
2415 memcpy(opt
->val
, (void *) val
, len
);
2419 *ptr
+= L2CAP_CONF_OPT_SIZE
+ len
;
2422 static void l2cap_ack_timeout(unsigned long arg
)
2424 struct sock
*sk
= (void *) arg
;
2427 l2cap_send_ack(l2cap_pi(sk
));
2431 static inline void l2cap_ertm_init(struct sock
*sk
)
2433 l2cap_pi(sk
)->expected_ack_seq
= 0;
2434 l2cap_pi(sk
)->unacked_frames
= 0;
2435 l2cap_pi(sk
)->buffer_seq
= 0;
2436 l2cap_pi(sk
)->num_acked
= 0;
2437 l2cap_pi(sk
)->frames_sent
= 0;
2439 setup_timer(&l2cap_pi(sk
)->retrans_timer
,
2440 l2cap_retrans_timeout
, (unsigned long) sk
);
2441 setup_timer(&l2cap_pi(sk
)->monitor_timer
,
2442 l2cap_monitor_timeout
, (unsigned long) sk
);
2443 setup_timer(&l2cap_pi(sk
)->ack_timer
,
2444 l2cap_ack_timeout
, (unsigned long) sk
);
2446 __skb_queue_head_init(SREJ_QUEUE(sk
));
2447 __skb_queue_head_init(BUSY_QUEUE(sk
));
2449 INIT_WORK(&l2cap_pi(sk
)->busy_work
, l2cap_busy_work
);
2452 static inline __u8
l2cap_select_mode(__u8 mode
, __u16 remote_feat_mask
)
2455 case L2CAP_MODE_STREAMING
:
2456 case L2CAP_MODE_ERTM
:
2457 if (l2cap_mode_supported(mode
, remote_feat_mask
))
2461 return L2CAP_MODE_BASIC
;
2465 static int l2cap_build_conf_req(struct sock
*sk
, void *data
)
2467 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2468 struct l2cap_conf_req
*req
= data
;
2469 struct l2cap_conf_rfc rfc
= { .mode
= pi
->mode
};
2470 void *ptr
= req
->data
;
2472 BT_DBG("sk %p", sk
);
2474 if (pi
->num_conf_req
|| pi
->num_conf_rsp
)
2478 case L2CAP_MODE_STREAMING
:
2479 case L2CAP_MODE_ERTM
:
2480 if (pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)
2485 pi
->mode
= l2cap_select_mode(rfc
.mode
, pi
->conn
->feat_mask
);
2491 case L2CAP_MODE_BASIC
:
2492 if (pi
->imtu
!= L2CAP_DEFAULT_MTU
)
2493 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->imtu
);
2495 rfc
.mode
= L2CAP_MODE_BASIC
;
2497 rfc
.max_transmit
= 0;
2498 rfc
.retrans_timeout
= 0;
2499 rfc
.monitor_timeout
= 0;
2500 rfc
.max_pdu_size
= 0;
2504 case L2CAP_MODE_ERTM
:
2505 rfc
.mode
= L2CAP_MODE_ERTM
;
2506 rfc
.txwin_size
= pi
->tx_win
;
2507 rfc
.max_transmit
= pi
->max_tx
;
2508 rfc
.retrans_timeout
= 0;
2509 rfc
.monitor_timeout
= 0;
2510 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2511 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2512 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2514 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2517 if (pi
->fcs
== L2CAP_FCS_NONE
||
2518 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2519 pi
->fcs
= L2CAP_FCS_NONE
;
2520 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2524 case L2CAP_MODE_STREAMING
:
2525 rfc
.mode
= L2CAP_MODE_STREAMING
;
2527 rfc
.max_transmit
= 0;
2528 rfc
.retrans_timeout
= 0;
2529 rfc
.monitor_timeout
= 0;
2530 rfc
.max_pdu_size
= cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE
);
2531 if (L2CAP_DEFAULT_MAX_PDU_SIZE
> pi
->conn
->mtu
- 10)
2532 rfc
.max_pdu_size
= cpu_to_le16(pi
->conn
->mtu
- 10);
2534 if (!(pi
->conn
->feat_mask
& L2CAP_FEAT_FCS
))
2537 if (pi
->fcs
== L2CAP_FCS_NONE
||
2538 pi
->conf_state
& L2CAP_CONF_NO_FCS_RECV
) {
2539 pi
->fcs
= L2CAP_FCS_NONE
;
2540 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FCS
, 1, pi
->fcs
);
2545 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
, sizeof(rfc
),
2546 (unsigned long) &rfc
);
2548 /* FIXME: Need actual value of the flush timeout */
2549 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2550 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2552 req
->dcid
= cpu_to_le16(pi
->dcid
);
2553 req
->flags
= cpu_to_le16(0);
2558 static int l2cap_parse_conf_req(struct sock
*sk
, void *data
)
2560 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2561 struct l2cap_conf_rsp
*rsp
= data
;
2562 void *ptr
= rsp
->data
;
2563 void *req
= pi
->conf_req
;
2564 int len
= pi
->conf_len
;
2565 int type
, hint
, olen
;
2567 struct l2cap_conf_rfc rfc
= { .mode
= L2CAP_MODE_BASIC
};
2568 u16 mtu
= L2CAP_DEFAULT_MTU
;
2569 u16 result
= L2CAP_CONF_SUCCESS
;
2571 BT_DBG("sk %p", sk
);
2573 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2574 len
-= l2cap_get_conf_opt(&req
, &type
, &olen
, &val
);
2576 hint
= type
& L2CAP_CONF_HINT
;
2577 type
&= L2CAP_CONF_MASK
;
2580 case L2CAP_CONF_MTU
:
2584 case L2CAP_CONF_FLUSH_TO
:
2588 case L2CAP_CONF_QOS
:
2591 case L2CAP_CONF_RFC
:
2592 if (olen
== sizeof(rfc
))
2593 memcpy(&rfc
, (void *) val
, olen
);
2596 case L2CAP_CONF_FCS
:
2597 if (val
== L2CAP_FCS_NONE
)
2598 pi
->conf_state
|= L2CAP_CONF_NO_FCS_RECV
;
2606 result
= L2CAP_CONF_UNKNOWN
;
2607 *((u8
*) ptr
++) = type
;
2612 if (pi
->num_conf_rsp
|| pi
->num_conf_req
)
2616 case L2CAP_MODE_STREAMING
:
2617 case L2CAP_MODE_ERTM
:
2618 if (!(pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
)) {
2619 pi
->mode
= l2cap_select_mode(rfc
.mode
,
2620 pi
->conn
->feat_mask
);
2624 if (pi
->mode
!= rfc
.mode
)
2625 return -ECONNREFUSED
;
2631 if (pi
->mode
!= rfc
.mode
) {
2632 result
= L2CAP_CONF_UNACCEPT
;
2633 rfc
.mode
= pi
->mode
;
2635 if (pi
->num_conf_rsp
== 1)
2636 return -ECONNREFUSED
;
2638 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2639 sizeof(rfc
), (unsigned long) &rfc
);
2643 if (result
== L2CAP_CONF_SUCCESS
) {
2644 /* Configure output options and let the other side know
2645 * which ones we don't like. */
2647 if (mtu
< L2CAP_DEFAULT_MIN_MTU
)
2648 result
= L2CAP_CONF_UNACCEPT
;
2651 pi
->conf_state
|= L2CAP_CONF_MTU_DONE
;
2653 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2656 case L2CAP_MODE_BASIC
:
2657 pi
->fcs
= L2CAP_FCS_NONE
;
2658 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2661 case L2CAP_MODE_ERTM
:
2662 pi
->remote_tx_win
= rfc
.txwin_size
;
2663 pi
->remote_max_tx
= rfc
.max_transmit
;
2664 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2665 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2667 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2669 rfc
.retrans_timeout
=
2670 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO
);
2671 rfc
.monitor_timeout
=
2672 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO
);
2674 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2676 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2677 sizeof(rfc
), (unsigned long) &rfc
);
2681 case L2CAP_MODE_STREAMING
:
2682 if (rfc
.max_pdu_size
> pi
->conn
->mtu
- 10)
2683 rfc
.max_pdu_size
= le16_to_cpu(pi
->conn
->mtu
- 10);
2685 pi
->remote_mps
= le16_to_cpu(rfc
.max_pdu_size
);
2687 pi
->conf_state
|= L2CAP_CONF_MODE_DONE
;
2689 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2690 sizeof(rfc
), (unsigned long) &rfc
);
2695 result
= L2CAP_CONF_UNACCEPT
;
2697 memset(&rfc
, 0, sizeof(rfc
));
2698 rfc
.mode
= pi
->mode
;
2701 if (result
== L2CAP_CONF_SUCCESS
)
2702 pi
->conf_state
|= L2CAP_CONF_OUTPUT_DONE
;
2704 rsp
->scid
= cpu_to_le16(pi
->dcid
);
2705 rsp
->result
= cpu_to_le16(result
);
2706 rsp
->flags
= cpu_to_le16(0x0000);
2711 static int l2cap_parse_conf_rsp(struct sock
*sk
, void *rsp
, int len
, void *data
, u16
*result
)
2713 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2714 struct l2cap_conf_req
*req
= data
;
2715 void *ptr
= req
->data
;
2718 struct l2cap_conf_rfc rfc
;
2720 BT_DBG("sk %p, rsp %p, len %d, req %p", sk
, rsp
, len
, data
);
2722 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2723 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2726 case L2CAP_CONF_MTU
:
2727 if (val
< L2CAP_DEFAULT_MIN_MTU
) {
2728 *result
= L2CAP_CONF_UNACCEPT
;
2729 pi
->omtu
= L2CAP_DEFAULT_MIN_MTU
;
2732 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_MTU
, 2, pi
->omtu
);
2735 case L2CAP_CONF_FLUSH_TO
:
2737 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_FLUSH_TO
,
2741 case L2CAP_CONF_RFC
:
2742 if (olen
== sizeof(rfc
))
2743 memcpy(&rfc
, (void *)val
, olen
);
2745 if ((pi
->conf_state
& L2CAP_CONF_STATE2_DEVICE
) &&
2746 rfc
.mode
!= pi
->mode
)
2747 return -ECONNREFUSED
;
2751 l2cap_add_conf_opt(&ptr
, L2CAP_CONF_RFC
,
2752 sizeof(rfc
), (unsigned long) &rfc
);
2757 if (pi
->mode
== L2CAP_MODE_BASIC
&& pi
->mode
!= rfc
.mode
)
2758 return -ECONNREFUSED
;
2760 pi
->mode
= rfc
.mode
;
2762 if (*result
== L2CAP_CONF_SUCCESS
) {
2764 case L2CAP_MODE_ERTM
:
2765 pi
->remote_tx_win
= rfc
.txwin_size
;
2766 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2767 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2768 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2770 case L2CAP_MODE_STREAMING
:
2771 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2775 req
->dcid
= cpu_to_le16(pi
->dcid
);
2776 req
->flags
= cpu_to_le16(0x0000);
2781 static int l2cap_build_conf_rsp(struct sock
*sk
, void *data
, u16 result
, u16 flags
)
2783 struct l2cap_conf_rsp
*rsp
= data
;
2784 void *ptr
= rsp
->data
;
2786 BT_DBG("sk %p", sk
);
2788 rsp
->scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
2789 rsp
->result
= cpu_to_le16(result
);
2790 rsp
->flags
= cpu_to_le16(flags
);
2795 static void l2cap_conf_rfc_get(struct sock
*sk
, void *rsp
, int len
)
2797 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
2800 struct l2cap_conf_rfc rfc
;
2802 BT_DBG("sk %p, rsp %p, len %d", sk
, rsp
, len
);
2804 if ((pi
->mode
!= L2CAP_MODE_ERTM
) && (pi
->mode
!= L2CAP_MODE_STREAMING
))
2807 while (len
>= L2CAP_CONF_OPT_SIZE
) {
2808 len
-= l2cap_get_conf_opt(&rsp
, &type
, &olen
, &val
);
2811 case L2CAP_CONF_RFC
:
2812 if (olen
== sizeof(rfc
))
2813 memcpy(&rfc
, (void *)val
, olen
);
2820 case L2CAP_MODE_ERTM
:
2821 pi
->remote_tx_win
= rfc
.txwin_size
;
2822 pi
->retrans_timeout
= le16_to_cpu(rfc
.retrans_timeout
);
2823 pi
->monitor_timeout
= le16_to_cpu(rfc
.monitor_timeout
);
2824 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2826 case L2CAP_MODE_STREAMING
:
2827 pi
->mps
= le16_to_cpu(rfc
.max_pdu_size
);
2831 static inline int l2cap_command_rej(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2833 struct l2cap_cmd_rej
*rej
= (struct l2cap_cmd_rej
*) data
;
2835 if (rej
->reason
!= 0x0000)
2838 if ((conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_SENT
) &&
2839 cmd
->ident
== conn
->info_ident
) {
2840 del_timer(&conn
->info_timer
);
2842 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
2843 conn
->info_ident
= 0;
2845 l2cap_conn_start(conn
);
2851 static inline int l2cap_connect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2853 struct l2cap_chan_list
*list
= &conn
->chan_list
;
2854 struct l2cap_conn_req
*req
= (struct l2cap_conn_req
*) data
;
2855 struct l2cap_conn_rsp rsp
;
2856 struct sock
*sk
, *parent
;
2857 int result
, status
= L2CAP_CS_NO_INFO
;
2859 u16 dcid
= 0, scid
= __le16_to_cpu(req
->scid
);
2860 __le16 psm
= req
->psm
;
2862 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm
, scid
);
2864 /* Check if we have socket listening on psm */
2865 parent
= l2cap_get_sock_by_psm(BT_LISTEN
, psm
, conn
->src
);
2867 result
= L2CAP_CR_BAD_PSM
;
2871 /* Check if the ACL is secure enough (if not SDP) */
2872 if (psm
!= cpu_to_le16(0x0001) &&
2873 !hci_conn_check_link_mode(conn
->hcon
)) {
2874 conn
->disc_reason
= 0x05;
2875 result
= L2CAP_CR_SEC_BLOCK
;
2879 result
= L2CAP_CR_NO_MEM
;
2881 /* Check for backlog size */
2882 if (sk_acceptq_is_full(parent
)) {
2883 BT_DBG("backlog full %d", parent
->sk_ack_backlog
);
2887 sk
= l2cap_sock_alloc(sock_net(parent
), NULL
, BTPROTO_L2CAP
, GFP_ATOMIC
);
2891 write_lock_bh(&list
->lock
);
2893 /* Check if we already have channel with that dcid */
2894 if (__l2cap_get_chan_by_dcid(list
, scid
)) {
2895 write_unlock_bh(&list
->lock
);
2896 sock_set_flag(sk
, SOCK_ZAPPED
);
2897 l2cap_sock_kill(sk
);
2901 hci_conn_hold(conn
->hcon
);
2903 l2cap_sock_init(sk
, parent
);
2904 bacpy(&bt_sk(sk
)->src
, conn
->src
);
2905 bacpy(&bt_sk(sk
)->dst
, conn
->dst
);
2906 l2cap_pi(sk
)->psm
= psm
;
2907 l2cap_pi(sk
)->dcid
= scid
;
2909 __l2cap_chan_add(conn
, sk
, parent
);
2910 dcid
= l2cap_pi(sk
)->scid
;
2912 l2cap_sock_set_timer(sk
, sk
->sk_sndtimeo
);
2914 l2cap_pi(sk
)->ident
= cmd
->ident
;
2916 if (conn
->info_state
& L2CAP_INFO_FEAT_MASK_REQ_DONE
) {
2917 if (l2cap_check_security(sk
)) {
2918 if (bt_sk(sk
)->defer_setup
) {
2919 sk
->sk_state
= BT_CONNECT2
;
2920 result
= L2CAP_CR_PEND
;
2921 status
= L2CAP_CS_AUTHOR_PEND
;
2922 parent
->sk_data_ready(parent
, 0);
2924 sk
->sk_state
= BT_CONFIG
;
2925 result
= L2CAP_CR_SUCCESS
;
2926 status
= L2CAP_CS_NO_INFO
;
2929 sk
->sk_state
= BT_CONNECT2
;
2930 result
= L2CAP_CR_PEND
;
2931 status
= L2CAP_CS_AUTHEN_PEND
;
2934 sk
->sk_state
= BT_CONNECT2
;
2935 result
= L2CAP_CR_PEND
;
2936 status
= L2CAP_CS_NO_INFO
;
2939 write_unlock_bh(&list
->lock
);
2942 bh_unlock_sock(parent
);
2945 rsp
.scid
= cpu_to_le16(scid
);
2946 rsp
.dcid
= cpu_to_le16(dcid
);
2947 rsp
.result
= cpu_to_le16(result
);
2948 rsp
.status
= cpu_to_le16(status
);
2949 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
2951 if (result
== L2CAP_CR_PEND
&& status
== L2CAP_CS_NO_INFO
) {
2952 struct l2cap_info_req info
;
2953 info
.type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
2955 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_SENT
;
2956 conn
->info_ident
= l2cap_get_ident(conn
);
2958 mod_timer(&conn
->info_timer
, jiffies
+
2959 msecs_to_jiffies(L2CAP_INFO_TIMEOUT
));
2961 l2cap_send_cmd(conn
, conn
->info_ident
,
2962 L2CAP_INFO_REQ
, sizeof(info
), &info
);
2968 static inline int l2cap_connect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
2970 struct l2cap_conn_rsp
*rsp
= (struct l2cap_conn_rsp
*) data
;
2971 u16 scid
, dcid
, result
, status
;
2975 scid
= __le16_to_cpu(rsp
->scid
);
2976 dcid
= __le16_to_cpu(rsp
->dcid
);
2977 result
= __le16_to_cpu(rsp
->result
);
2978 status
= __le16_to_cpu(rsp
->status
);
2980 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid
, scid
, result
, status
);
2983 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
2987 sk
= l2cap_get_chan_by_ident(&conn
->chan_list
, cmd
->ident
);
2993 case L2CAP_CR_SUCCESS
:
2994 sk
->sk_state
= BT_CONFIG
;
2995 l2cap_pi(sk
)->ident
= 0;
2996 l2cap_pi(sk
)->dcid
= dcid
;
2997 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_REQ_SENT
;
2998 l2cap_pi(sk
)->conf_state
&= ~L2CAP_CONF_CONNECT_PEND
;
3000 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3001 l2cap_build_conf_req(sk
, req
), req
);
3002 l2cap_pi(sk
)->num_conf_req
++;
3006 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
3010 l2cap_chan_del(sk
, ECONNREFUSED
);
3018 static inline int l2cap_config_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u16 cmd_len
, u8
*data
)
3020 struct l2cap_conf_req
*req
= (struct l2cap_conf_req
*) data
;
3026 dcid
= __le16_to_cpu(req
->dcid
);
3027 flags
= __le16_to_cpu(req
->flags
);
3029 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid
, flags
);
3031 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3035 if (sk
->sk_state
!= BT_CONFIG
) {
3036 struct l2cap_cmd_rej rej
;
3038 rej
.reason
= cpu_to_le16(0x0002);
3039 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_COMMAND_REJ
,
3044 /* Reject if config buffer is too small. */
3045 len
= cmd_len
- sizeof(*req
);
3046 if (l2cap_pi(sk
)->conf_len
+ len
> sizeof(l2cap_pi(sk
)->conf_req
)) {
3047 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3048 l2cap_build_conf_rsp(sk
, rsp
,
3049 L2CAP_CONF_REJECT
, flags
), rsp
);
3054 memcpy(l2cap_pi(sk
)->conf_req
+ l2cap_pi(sk
)->conf_len
, req
->data
, len
);
3055 l2cap_pi(sk
)->conf_len
+= len
;
3057 if (flags
& 0x0001) {
3058 /* Incomplete config. Send empty response. */
3059 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
,
3060 l2cap_build_conf_rsp(sk
, rsp
,
3061 L2CAP_CONF_SUCCESS
, 0x0001), rsp
);
3065 /* Complete config. */
3066 len
= l2cap_parse_conf_req(sk
, rsp
);
3068 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3072 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_CONF_RSP
, len
, rsp
);
3073 l2cap_pi(sk
)->num_conf_rsp
++;
3075 /* Reset config buffer. */
3076 l2cap_pi(sk
)->conf_len
= 0;
3078 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
))
3081 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_INPUT_DONE
) {
3082 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3083 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3084 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3086 sk
->sk_state
= BT_CONNECTED
;
3088 l2cap_pi(sk
)->next_tx_seq
= 0;
3089 l2cap_pi(sk
)->expected_tx_seq
= 0;
3090 __skb_queue_head_init(TX_QUEUE(sk
));
3091 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3092 l2cap_ertm_init(sk
);
3094 l2cap_chan_ready(sk
);
3098 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_REQ_SENT
)) {
3100 l2cap_send_cmd(conn
, l2cap_get_ident(conn
), L2CAP_CONF_REQ
,
3101 l2cap_build_conf_req(sk
, buf
), buf
);
3102 l2cap_pi(sk
)->num_conf_req
++;
3110 static inline int l2cap_config_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3112 struct l2cap_conf_rsp
*rsp
= (struct l2cap_conf_rsp
*)data
;
3113 u16 scid
, flags
, result
;
3115 int len
= cmd
->len
- sizeof(*rsp
);
3117 scid
= __le16_to_cpu(rsp
->scid
);
3118 flags
= __le16_to_cpu(rsp
->flags
);
3119 result
= __le16_to_cpu(rsp
->result
);
3121 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3122 scid
, flags
, result
);
3124 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3129 case L2CAP_CONF_SUCCESS
:
3130 l2cap_conf_rfc_get(sk
, rsp
->data
, len
);
3133 case L2CAP_CONF_UNACCEPT
:
3134 if (l2cap_pi(sk
)->num_conf_rsp
<= L2CAP_CONF_MAX_CONF_RSP
) {
3137 if (len
> sizeof(req
) - sizeof(struct l2cap_conf_req
)) {
3138 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3142 /* throw out any old stored conf requests */
3143 result
= L2CAP_CONF_SUCCESS
;
3144 len
= l2cap_parse_conf_rsp(sk
, rsp
->data
,
3147 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3151 l2cap_send_cmd(conn
, l2cap_get_ident(conn
),
3152 L2CAP_CONF_REQ
, len
, req
);
3153 l2cap_pi(sk
)->num_conf_req
++;
3154 if (result
!= L2CAP_CONF_SUCCESS
)
3160 sk
->sk_err
= ECONNRESET
;
3161 l2cap_sock_set_timer(sk
, HZ
* 5);
3162 l2cap_send_disconn_req(conn
, sk
, ECONNRESET
);
3169 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_INPUT_DONE
;
3171 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_OUTPUT_DONE
) {
3172 if (!(l2cap_pi(sk
)->conf_state
& L2CAP_CONF_NO_FCS_RECV
) ||
3173 l2cap_pi(sk
)->fcs
!= L2CAP_FCS_NONE
)
3174 l2cap_pi(sk
)->fcs
= L2CAP_FCS_CRC16
;
3176 sk
->sk_state
= BT_CONNECTED
;
3177 l2cap_pi(sk
)->next_tx_seq
= 0;
3178 l2cap_pi(sk
)->expected_tx_seq
= 0;
3179 __skb_queue_head_init(TX_QUEUE(sk
));
3180 if (l2cap_pi(sk
)->mode
== L2CAP_MODE_ERTM
)
3181 l2cap_ertm_init(sk
);
3183 l2cap_chan_ready(sk
);
3191 static inline int l2cap_disconnect_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3193 struct l2cap_disconn_req
*req
= (struct l2cap_disconn_req
*) data
;
3194 struct l2cap_disconn_rsp rsp
;
3198 scid
= __le16_to_cpu(req
->scid
);
3199 dcid
= __le16_to_cpu(req
->dcid
);
3201 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid
, dcid
);
3203 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, dcid
);
3207 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
3208 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
3209 l2cap_send_cmd(conn
, cmd
->ident
, L2CAP_DISCONN_RSP
, sizeof(rsp
), &rsp
);
3211 sk
->sk_shutdown
= SHUTDOWN_MASK
;
3213 l2cap_chan_del(sk
, ECONNRESET
);
3216 l2cap_sock_kill(sk
);
3220 static inline int l2cap_disconnect_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3222 struct l2cap_disconn_rsp
*rsp
= (struct l2cap_disconn_rsp
*) data
;
3226 scid
= __le16_to_cpu(rsp
->scid
);
3227 dcid
= __le16_to_cpu(rsp
->dcid
);
3229 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid
, scid
);
3231 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, scid
);
3235 l2cap_chan_del(sk
, 0);
3238 l2cap_sock_kill(sk
);
3242 static inline int l2cap_information_req(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3244 struct l2cap_info_req
*req
= (struct l2cap_info_req
*) data
;
3247 type
= __le16_to_cpu(req
->type
);
3249 BT_DBG("type 0x%4.4x", type
);
3251 if (type
== L2CAP_IT_FEAT_MASK
) {
3253 u32 feat_mask
= l2cap_feat_mask
;
3254 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3255 rsp
->type
= cpu_to_le16(L2CAP_IT_FEAT_MASK
);
3256 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3258 feat_mask
|= L2CAP_FEAT_ERTM
| L2CAP_FEAT_STREAMING
3260 put_unaligned_le32(feat_mask
, rsp
->data
);
3261 l2cap_send_cmd(conn
, cmd
->ident
,
3262 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3263 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3265 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) buf
;
3266 rsp
->type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3267 rsp
->result
= cpu_to_le16(L2CAP_IR_SUCCESS
);
3268 memcpy(buf
+ 4, l2cap_fixed_chan
, 8);
3269 l2cap_send_cmd(conn
, cmd
->ident
,
3270 L2CAP_INFO_RSP
, sizeof(buf
), buf
);
3272 struct l2cap_info_rsp rsp
;
3273 rsp
.type
= cpu_to_le16(type
);
3274 rsp
.result
= cpu_to_le16(L2CAP_IR_NOTSUPP
);
3275 l2cap_send_cmd(conn
, cmd
->ident
,
3276 L2CAP_INFO_RSP
, sizeof(rsp
), &rsp
);
3282 static inline int l2cap_information_rsp(struct l2cap_conn
*conn
, struct l2cap_cmd_hdr
*cmd
, u8
*data
)
3284 struct l2cap_info_rsp
*rsp
= (struct l2cap_info_rsp
*) data
;
3287 type
= __le16_to_cpu(rsp
->type
);
3288 result
= __le16_to_cpu(rsp
->result
);
3290 BT_DBG("type 0x%4.4x result 0x%2.2x", type
, result
);
3292 del_timer(&conn
->info_timer
);
3294 if (type
== L2CAP_IT_FEAT_MASK
) {
3295 conn
->feat_mask
= get_unaligned_le32(rsp
->data
);
3297 if (conn
->feat_mask
& L2CAP_FEAT_FIXED_CHAN
) {
3298 struct l2cap_info_req req
;
3299 req
.type
= cpu_to_le16(L2CAP_IT_FIXED_CHAN
);
3301 conn
->info_ident
= l2cap_get_ident(conn
);
3303 l2cap_send_cmd(conn
, conn
->info_ident
,
3304 L2CAP_INFO_REQ
, sizeof(req
), &req
);
3306 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3307 conn
->info_ident
= 0;
3309 l2cap_conn_start(conn
);
3311 } else if (type
== L2CAP_IT_FIXED_CHAN
) {
3312 conn
->info_state
|= L2CAP_INFO_FEAT_MASK_REQ_DONE
;
3313 conn
->info_ident
= 0;
3315 l2cap_conn_start(conn
);
3321 static inline void l2cap_sig_channel(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
3323 u8
*data
= skb
->data
;
3325 struct l2cap_cmd_hdr cmd
;
3328 l2cap_raw_recv(conn
, skb
);
3330 while (len
>= L2CAP_CMD_HDR_SIZE
) {
3332 memcpy(&cmd
, data
, L2CAP_CMD_HDR_SIZE
);
3333 data
+= L2CAP_CMD_HDR_SIZE
;
3334 len
-= L2CAP_CMD_HDR_SIZE
;
3336 cmd_len
= le16_to_cpu(cmd
.len
);
3338 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd
.code
, cmd_len
, cmd
.ident
);
3340 if (cmd_len
> len
|| !cmd
.ident
) {
3341 BT_DBG("corrupted command");
3346 case L2CAP_COMMAND_REJ
:
3347 l2cap_command_rej(conn
, &cmd
, data
);
3350 case L2CAP_CONN_REQ
:
3351 err
= l2cap_connect_req(conn
, &cmd
, data
);
3354 case L2CAP_CONN_RSP
:
3355 err
= l2cap_connect_rsp(conn
, &cmd
, data
);
3358 case L2CAP_CONF_REQ
:
3359 err
= l2cap_config_req(conn
, &cmd
, cmd_len
, data
);
3362 case L2CAP_CONF_RSP
:
3363 err
= l2cap_config_rsp(conn
, &cmd
, data
);
3366 case L2CAP_DISCONN_REQ
:
3367 err
= l2cap_disconnect_req(conn
, &cmd
, data
);
3370 case L2CAP_DISCONN_RSP
:
3371 err
= l2cap_disconnect_rsp(conn
, &cmd
, data
);
3374 case L2CAP_ECHO_REQ
:
3375 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_ECHO_RSP
, cmd_len
, data
);
3378 case L2CAP_ECHO_RSP
:
3381 case L2CAP_INFO_REQ
:
3382 err
= l2cap_information_req(conn
, &cmd
, data
);
3385 case L2CAP_INFO_RSP
:
3386 err
= l2cap_information_rsp(conn
, &cmd
, data
);
3390 BT_ERR("Unknown signaling command 0x%2.2x", cmd
.code
);
3396 struct l2cap_cmd_rej rej
;
3397 BT_DBG("error %d", err
);
3399 /* FIXME: Map err to a valid reason */
3400 rej
.reason
= cpu_to_le16(0);
3401 l2cap_send_cmd(conn
, cmd
.ident
, L2CAP_COMMAND_REJ
, sizeof(rej
), &rej
);
3411 static int l2cap_check_fcs(struct l2cap_pinfo
*pi
, struct sk_buff
*skb
)
3413 u16 our_fcs
, rcv_fcs
;
3414 int hdr_size
= L2CAP_HDR_SIZE
+ 2;
3416 if (pi
->fcs
== L2CAP_FCS_CRC16
) {
3417 skb_trim(skb
, skb
->len
- 2);
3418 rcv_fcs
= get_unaligned_le16(skb
->data
+ skb
->len
);
3419 our_fcs
= crc16(0, skb
->data
- hdr_size
, skb
->len
+ hdr_size
);
3421 if (our_fcs
!= rcv_fcs
)
3427 static inline void l2cap_send_i_or_rr_or_rnr(struct sock
*sk
)
3429 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3432 pi
->frames_sent
= 0;
3434 control
|= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3436 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3437 control
|= L2CAP_SUPER_RCV_NOT_READY
;
3438 l2cap_send_sframe(pi
, control
);
3439 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3442 if (pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
)
3443 l2cap_retransmit_frames(sk
);
3445 l2cap_ertm_send(sk
);
3447 if (!(pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) &&
3448 pi
->frames_sent
== 0) {
3449 control
|= L2CAP_SUPER_RCV_READY
;
3450 l2cap_send_sframe(pi
, control
);
3454 static int l2cap_add_to_srej_queue(struct sock
*sk
, struct sk_buff
*skb
, u8 tx_seq
, u8 sar
)
3456 struct sk_buff
*next_skb
;
3457 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3458 int tx_seq_offset
, next_tx_seq_offset
;
3460 bt_cb(skb
)->tx_seq
= tx_seq
;
3461 bt_cb(skb
)->sar
= sar
;
3463 next_skb
= skb_peek(SREJ_QUEUE(sk
));
3465 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3469 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3470 if (tx_seq_offset
< 0)
3471 tx_seq_offset
+= 64;
3474 if (bt_cb(next_skb
)->tx_seq
== tx_seq
)
3477 next_tx_seq_offset
= (bt_cb(next_skb
)->tx_seq
-
3478 pi
->buffer_seq
) % 64;
3479 if (next_tx_seq_offset
< 0)
3480 next_tx_seq_offset
+= 64;
3482 if (next_tx_seq_offset
> tx_seq_offset
) {
3483 __skb_queue_before(SREJ_QUEUE(sk
), next_skb
, skb
);
3487 if (skb_queue_is_last(SREJ_QUEUE(sk
), next_skb
))
3490 } while ((next_skb
= skb_queue_next(SREJ_QUEUE(sk
), next_skb
)));
3492 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3497 static int l2cap_ertm_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3499 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3500 struct sk_buff
*_skb
;
3503 switch (control
& L2CAP_CTRL_SAR
) {
3504 case L2CAP_SDU_UNSEGMENTED
:
3505 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3508 err
= sock_queue_rcv_skb(sk
, skb
);
3514 case L2CAP_SDU_START
:
3515 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
)
3518 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3520 if (pi
->sdu_len
> pi
->imtu
)
3523 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3527 /* pull sdu_len bytes only after alloc, because of Local Busy
3528 * condition we have to be sure that this will be executed
3529 * only once, i.e., when alloc does not fail */
3532 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3534 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3535 pi
->partial_sdu_len
= skb
->len
;
3538 case L2CAP_SDU_CONTINUE
:
3539 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3545 pi
->partial_sdu_len
+= skb
->len
;
3546 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3549 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3554 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3560 if (!(pi
->conn_state
& L2CAP_CONN_SAR_RETRY
)) {
3561 pi
->partial_sdu_len
+= skb
->len
;
3563 if (pi
->partial_sdu_len
> pi
->imtu
)
3566 if (pi
->partial_sdu_len
!= pi
->sdu_len
)
3569 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3572 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3574 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3578 err
= sock_queue_rcv_skb(sk
, _skb
);
3581 pi
->conn_state
|= L2CAP_CONN_SAR_RETRY
;
3585 pi
->conn_state
&= ~L2CAP_CONN_SAR_RETRY
;
3586 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3600 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3605 static void l2cap_busy_work(struct work_struct
*work
)
3607 DECLARE_WAITQUEUE(wait
, current
);
3608 struct l2cap_pinfo
*pi
=
3609 container_of(work
, struct l2cap_pinfo
, busy_work
);
3610 struct sock
*sk
= (struct sock
*)pi
;
3611 int n_tries
= 0, timeo
= HZ
/5, err
;
3612 struct sk_buff
*skb
;
3617 add_wait_queue(sk_sleep(sk
), &wait
);
3618 while ((skb
= skb_peek(BUSY_QUEUE(sk
)))) {
3619 set_current_state(TASK_INTERRUPTIBLE
);
3621 if (n_tries
++ > L2CAP_LOCAL_BUSY_TRIES
) {
3623 l2cap_send_disconn_req(pi
->conn
, sk
, EBUSY
);
3630 if (signal_pending(current
)) {
3631 err
= sock_intr_errno(timeo
);
3636 timeo
= schedule_timeout(timeo
);
3639 err
= sock_error(sk
);
3643 while ((skb
= skb_dequeue(BUSY_QUEUE(sk
)))) {
3644 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3645 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3647 skb_queue_head(BUSY_QUEUE(sk
), skb
);
3651 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3658 if (!(pi
->conn_state
& L2CAP_CONN_RNR_SENT
))
3661 control
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3662 control
|= L2CAP_SUPER_RCV_READY
| L2CAP_CTRL_POLL
;
3663 l2cap_send_sframe(pi
, control
);
3664 l2cap_pi(sk
)->retry_count
= 1;
3666 del_timer(&pi
->retrans_timer
);
3667 __mod_monitor_timer();
3669 l2cap_pi(sk
)->conn_state
|= L2CAP_CONN_WAIT_F
;
3672 pi
->conn_state
&= ~L2CAP_CONN_LOCAL_BUSY
;
3673 pi
->conn_state
&= ~L2CAP_CONN_RNR_SENT
;
3675 BT_DBG("sk %p, Exit local busy", sk
);
3677 set_current_state(TASK_RUNNING
);
3678 remove_wait_queue(sk_sleep(sk
), &wait
);
3683 static int l2cap_push_rx_skb(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3685 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3688 if (pi
->conn_state
& L2CAP_CONN_LOCAL_BUSY
) {
3689 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3690 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3694 err
= l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3696 pi
->buffer_seq
= (pi
->buffer_seq
+ 1) % 64;
3700 /* Busy Condition */
3701 BT_DBG("sk %p, Enter local busy", sk
);
3703 pi
->conn_state
|= L2CAP_CONN_LOCAL_BUSY
;
3704 bt_cb(skb
)->sar
= control
>> L2CAP_CTRL_SAR_SHIFT
;
3705 __skb_queue_tail(BUSY_QUEUE(sk
), skb
);
3707 sctrl
= pi
->buffer_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3708 sctrl
|= L2CAP_SUPER_RCV_NOT_READY
;
3709 l2cap_send_sframe(pi
, sctrl
);
3711 pi
->conn_state
|= L2CAP_CONN_RNR_SENT
;
3713 del_timer(&pi
->ack_timer
);
3715 queue_work(_busy_wq
, &pi
->busy_work
);
3720 static int l2cap_streaming_reassembly_sdu(struct sock
*sk
, struct sk_buff
*skb
, u16 control
)
3722 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3723 struct sk_buff
*_skb
;
3727 * TODO: We have to notify the userland if some data is lost with the
3731 switch (control
& L2CAP_CTRL_SAR
) {
3732 case L2CAP_SDU_UNSEGMENTED
:
3733 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3738 err
= sock_queue_rcv_skb(sk
, skb
);
3744 case L2CAP_SDU_START
:
3745 if (pi
->conn_state
& L2CAP_CONN_SAR_SDU
) {
3750 pi
->sdu_len
= get_unaligned_le16(skb
->data
);
3753 if (pi
->sdu_len
> pi
->imtu
) {
3758 pi
->sdu
= bt_skb_alloc(pi
->sdu_len
, GFP_ATOMIC
);
3764 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3766 pi
->conn_state
|= L2CAP_CONN_SAR_SDU
;
3767 pi
->partial_sdu_len
= skb
->len
;
3771 case L2CAP_SDU_CONTINUE
:
3772 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3775 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3777 pi
->partial_sdu_len
+= skb
->len
;
3778 if (pi
->partial_sdu_len
> pi
->sdu_len
)
3786 if (!(pi
->conn_state
& L2CAP_CONN_SAR_SDU
))
3789 memcpy(skb_put(pi
->sdu
, skb
->len
), skb
->data
, skb
->len
);
3791 pi
->conn_state
&= ~L2CAP_CONN_SAR_SDU
;
3792 pi
->partial_sdu_len
+= skb
->len
;
3794 if (pi
->partial_sdu_len
> pi
->imtu
)
3797 if (pi
->partial_sdu_len
== pi
->sdu_len
) {
3798 _skb
= skb_clone(pi
->sdu
, GFP_ATOMIC
);
3799 err
= sock_queue_rcv_skb(sk
, _skb
);
3814 static void l2cap_check_srej_gap(struct sock
*sk
, u8 tx_seq
)
3816 struct sk_buff
*skb
;
3819 while ((skb
= skb_peek(SREJ_QUEUE(sk
)))) {
3820 if (bt_cb(skb
)->tx_seq
!= tx_seq
)
3823 skb
= skb_dequeue(SREJ_QUEUE(sk
));
3824 control
= bt_cb(skb
)->sar
<< L2CAP_CTRL_SAR_SHIFT
;
3825 l2cap_ertm_reassembly_sdu(sk
, skb
, control
);
3826 l2cap_pi(sk
)->buffer_seq_srej
=
3827 (l2cap_pi(sk
)->buffer_seq_srej
+ 1) % 64;
3828 tx_seq
= (tx_seq
+ 1) % 64;
3832 static void l2cap_resend_srejframe(struct sock
*sk
, u8 tx_seq
)
3834 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3835 struct srej_list
*l
, *tmp
;
3838 list_for_each_entry_safe(l
, tmp
, SREJ_LIST(sk
), list
) {
3839 if (l
->tx_seq
== tx_seq
) {
3844 control
= L2CAP_SUPER_SELECT_REJECT
;
3845 control
|= l
->tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3846 l2cap_send_sframe(pi
, control
);
3848 list_add_tail(&l
->list
, SREJ_LIST(sk
));
3852 static void l2cap_send_srejframe(struct sock
*sk
, u8 tx_seq
)
3854 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3855 struct srej_list
*new;
3858 while (tx_seq
!= pi
->expected_tx_seq
) {
3859 control
= L2CAP_SUPER_SELECT_REJECT
;
3860 control
|= pi
->expected_tx_seq
<< L2CAP_CTRL_REQSEQ_SHIFT
;
3861 l2cap_send_sframe(pi
, control
);
3863 new = kzalloc(sizeof(struct srej_list
), GFP_ATOMIC
);
3864 new->tx_seq
= pi
->expected_tx_seq
;
3865 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3866 list_add_tail(&new->list
, SREJ_LIST(sk
));
3868 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3871 static inline int l2cap_data_channel_iframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
3873 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
3874 u8 tx_seq
= __get_txseq(rx_control
);
3875 u8 req_seq
= __get_reqseq(rx_control
);
3876 u8 sar
= rx_control
>> L2CAP_CTRL_SAR_SHIFT
;
3877 int tx_seq_offset
, expected_tx_seq_offset
;
3878 int num_to_ack
= (pi
->tx_win
/6) + 1;
3881 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk
, skb
->len
, tx_seq
,
3884 if (L2CAP_CTRL_FINAL
& rx_control
&&
3885 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
3886 del_timer(&pi
->monitor_timer
);
3887 if (pi
->unacked_frames
> 0)
3888 __mod_retrans_timer();
3889 pi
->conn_state
&= ~L2CAP_CONN_WAIT_F
;
3892 pi
->expected_ack_seq
= req_seq
;
3893 l2cap_drop_acked_frames(sk
);
3895 if (tx_seq
== pi
->expected_tx_seq
)
3898 tx_seq_offset
= (tx_seq
- pi
->buffer_seq
) % 64;
3899 if (tx_seq_offset
< 0)
3900 tx_seq_offset
+= 64;
3902 /* invalid tx_seq */
3903 if (tx_seq_offset
>= pi
->tx_win
) {
3904 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
3908 if (pi
->conn_state
== L2CAP_CONN_LOCAL_BUSY
)
3911 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3912 struct srej_list
*first
;
3914 first
= list_first_entry(SREJ_LIST(sk
),
3915 struct srej_list
, list
);
3916 if (tx_seq
== first
->tx_seq
) {
3917 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3918 l2cap_check_srej_gap(sk
, tx_seq
);
3920 list_del(&first
->list
);
3923 if (list_empty(SREJ_LIST(sk
))) {
3924 pi
->buffer_seq
= pi
->buffer_seq_srej
;
3925 pi
->conn_state
&= ~L2CAP_CONN_SREJ_SENT
;
3927 BT_DBG("sk %p, Exit SREJ_SENT", sk
);
3930 struct srej_list
*l
;
3932 /* duplicated tx_seq */
3933 if (l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
) < 0)
3936 list_for_each_entry(l
, SREJ_LIST(sk
), list
) {
3937 if (l
->tx_seq
== tx_seq
) {
3938 l2cap_resend_srejframe(sk
, tx_seq
);
3942 l2cap_send_srejframe(sk
, tx_seq
);
3945 expected_tx_seq_offset
=
3946 (pi
->expected_tx_seq
- pi
->buffer_seq
) % 64;
3947 if (expected_tx_seq_offset
< 0)
3948 expected_tx_seq_offset
+= 64;
3950 /* duplicated tx_seq */
3951 if (tx_seq_offset
< expected_tx_seq_offset
)
3954 pi
->conn_state
|= L2CAP_CONN_SREJ_SENT
;
3956 BT_DBG("sk %p, Enter SREJ", sk
);
3958 INIT_LIST_HEAD(SREJ_LIST(sk
));
3959 pi
->buffer_seq_srej
= pi
->buffer_seq
;
3961 __skb_queue_head_init(SREJ_QUEUE(sk
));
3962 __skb_queue_head_init(BUSY_QUEUE(sk
));
3963 l2cap_add_to_srej_queue(sk
, skb
, tx_seq
, sar
);
3965 pi
->conn_state
|= L2CAP_CONN_SEND_PBIT
;
3967 l2cap_send_srejframe(sk
, tx_seq
);
3969 del_timer(&pi
->ack_timer
);
3974 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
3976 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
3977 bt_cb(skb
)->tx_seq
= tx_seq
;
3978 bt_cb(skb
)->sar
= sar
;
3979 __skb_queue_tail(SREJ_QUEUE(sk
), skb
);
3983 err
= l2cap_push_rx_skb(sk
, skb
, rx_control
);
3987 if (rx_control
& L2CAP_CTRL_FINAL
) {
3988 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
3989 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
3991 l2cap_retransmit_frames(sk
);
3996 pi
->num_acked
= (pi
->num_acked
+ 1) % num_to_ack
;
3997 if (pi
->num_acked
== num_to_ack
- 1)
4007 static inline void l2cap_data_channel_rrframe(struct sock
*sk
, u16 rx_control
)
4009 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4011 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, __get_reqseq(rx_control
),
4014 pi
->expected_ack_seq
= __get_reqseq(rx_control
);
4015 l2cap_drop_acked_frames(sk
);
4017 if (rx_control
& L2CAP_CTRL_POLL
) {
4018 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4019 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4020 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4021 (pi
->unacked_frames
> 0))
4022 __mod_retrans_timer();
4024 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4025 l2cap_send_srejtail(sk
);
4027 l2cap_send_i_or_rr_or_rnr(sk
);
4030 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4031 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4033 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4034 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4036 l2cap_retransmit_frames(sk
);
4039 if ((pi
->conn_state
& L2CAP_CONN_REMOTE_BUSY
) &&
4040 (pi
->unacked_frames
> 0))
4041 __mod_retrans_timer();
4043 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4044 if (pi
->conn_state
& L2CAP_CONN_SREJ_SENT
) {
4047 l2cap_ertm_send(sk
);
4052 static inline void l2cap_data_channel_rejframe(struct sock
*sk
, u16 rx_control
)
4054 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4055 u8 tx_seq
= __get_reqseq(rx_control
);
4057 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4059 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4061 pi
->expected_ack_seq
= tx_seq
;
4062 l2cap_drop_acked_frames(sk
);
4064 if (rx_control
& L2CAP_CTRL_FINAL
) {
4065 if (pi
->conn_state
& L2CAP_CONN_REJ_ACT
)
4066 pi
->conn_state
&= ~L2CAP_CONN_REJ_ACT
;
4068 l2cap_retransmit_frames(sk
);
4070 l2cap_retransmit_frames(sk
);
4072 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
)
4073 pi
->conn_state
|= L2CAP_CONN_REJ_ACT
;
4076 static inline void l2cap_data_channel_srejframe(struct sock
*sk
, u16 rx_control
)
4078 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4079 u8 tx_seq
= __get_reqseq(rx_control
);
4081 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4083 pi
->conn_state
&= ~L2CAP_CONN_REMOTE_BUSY
;
4085 if (rx_control
& L2CAP_CTRL_POLL
) {
4086 pi
->expected_ack_seq
= tx_seq
;
4087 l2cap_drop_acked_frames(sk
);
4089 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4090 l2cap_retransmit_one_frame(sk
, tx_seq
);
4092 l2cap_ertm_send(sk
);
4094 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4095 pi
->srej_save_reqseq
= tx_seq
;
4096 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4098 } else if (rx_control
& L2CAP_CTRL_FINAL
) {
4099 if ((pi
->conn_state
& L2CAP_CONN_SREJ_ACT
) &&
4100 pi
->srej_save_reqseq
== tx_seq
)
4101 pi
->conn_state
&= ~L2CAP_CONN_SREJ_ACT
;
4103 l2cap_retransmit_one_frame(sk
, tx_seq
);
4105 l2cap_retransmit_one_frame(sk
, tx_seq
);
4106 if (pi
->conn_state
& L2CAP_CONN_WAIT_F
) {
4107 pi
->srej_save_reqseq
= tx_seq
;
4108 pi
->conn_state
|= L2CAP_CONN_SREJ_ACT
;
4113 static inline void l2cap_data_channel_rnrframe(struct sock
*sk
, u16 rx_control
)
4115 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4116 u8 tx_seq
= __get_reqseq(rx_control
);
4118 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk
, tx_seq
, rx_control
);
4120 pi
->conn_state
|= L2CAP_CONN_REMOTE_BUSY
;
4121 pi
->expected_ack_seq
= tx_seq
;
4122 l2cap_drop_acked_frames(sk
);
4124 if (rx_control
& L2CAP_CTRL_POLL
)
4125 pi
->conn_state
|= L2CAP_CONN_SEND_FBIT
;
4127 if (!(pi
->conn_state
& L2CAP_CONN_SREJ_SENT
)) {
4128 del_timer(&pi
->retrans_timer
);
4129 if (rx_control
& L2CAP_CTRL_POLL
)
4130 l2cap_send_rr_or_rnr(pi
, L2CAP_CTRL_FINAL
);
4134 if (rx_control
& L2CAP_CTRL_POLL
)
4135 l2cap_send_srejtail(sk
);
4137 l2cap_send_sframe(pi
, L2CAP_SUPER_RCV_READY
);
4140 static inline int l2cap_data_channel_sframe(struct sock
*sk
, u16 rx_control
, struct sk_buff
*skb
)
4142 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk
, rx_control
, skb
->len
);
4144 if (L2CAP_CTRL_FINAL
& rx_control
&&
4145 l2cap_pi(sk
)->conn_state
& L2CAP_CONN_WAIT_F
) {
4146 del_timer(&l2cap_pi(sk
)->monitor_timer
);
4147 if (l2cap_pi(sk
)->unacked_frames
> 0)
4148 __mod_retrans_timer();
4149 l2cap_pi(sk
)->conn_state
&= ~L2CAP_CONN_WAIT_F
;
4152 switch (rx_control
& L2CAP_CTRL_SUPERVISE
) {
4153 case L2CAP_SUPER_RCV_READY
:
4154 l2cap_data_channel_rrframe(sk
, rx_control
);
4157 case L2CAP_SUPER_REJECT
:
4158 l2cap_data_channel_rejframe(sk
, rx_control
);
4161 case L2CAP_SUPER_SELECT_REJECT
:
4162 l2cap_data_channel_srejframe(sk
, rx_control
);
4165 case L2CAP_SUPER_RCV_NOT_READY
:
4166 l2cap_data_channel_rnrframe(sk
, rx_control
);
4174 static inline int l2cap_data_channel(struct l2cap_conn
*conn
, u16 cid
, struct sk_buff
*skb
)
4177 struct l2cap_pinfo
*pi
;
4180 int len
, next_tx_seq_offset
, req_seq_offset
;
4182 sk
= l2cap_get_chan_by_scid(&conn
->chan_list
, cid
);
4184 BT_DBG("unknown cid 0x%4.4x", cid
);
4190 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4192 if (sk
->sk_state
!= BT_CONNECTED
)
4196 case L2CAP_MODE_BASIC
:
4197 /* If socket recv buffers overflows we drop data here
4198 * which is *bad* because L2CAP has to be reliable.
4199 * But we don't have any other choice. L2CAP doesn't
4200 * provide flow control mechanism. */
4202 if (pi
->imtu
< skb
->len
)
4205 if (!sock_queue_rcv_skb(sk
, skb
))
4209 case L2CAP_MODE_ERTM
:
4210 control
= get_unaligned_le16(skb
->data
);
4215 * We can just drop the corrupted I-frame here.
4216 * Receiver will miss it and start proper recovery
4217 * procedures and ask retransmission.
4219 if (l2cap_check_fcs(pi
, skb
))
4222 if (__is_sar_start(control
) && __is_iframe(control
))
4225 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4228 if (len
> pi
->mps
) {
4229 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4233 req_seq
= __get_reqseq(control
);
4234 req_seq_offset
= (req_seq
- pi
->expected_ack_seq
) % 64;
4235 if (req_seq_offset
< 0)
4236 req_seq_offset
+= 64;
4238 next_tx_seq_offset
=
4239 (pi
->next_tx_seq
- pi
->expected_ack_seq
) % 64;
4240 if (next_tx_seq_offset
< 0)
4241 next_tx_seq_offset
+= 64;
4243 /* check for invalid req-seq */
4244 if (req_seq_offset
> next_tx_seq_offset
) {
4245 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4249 if (__is_iframe(control
)) {
4251 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4255 l2cap_data_channel_iframe(sk
, control
, skb
);
4258 l2cap_send_disconn_req(pi
->conn
, sk
, ECONNRESET
);
4262 l2cap_data_channel_sframe(sk
, control
, skb
);
4267 case L2CAP_MODE_STREAMING
:
4268 control
= get_unaligned_le16(skb
->data
);
4272 if (l2cap_check_fcs(pi
, skb
))
4275 if (__is_sar_start(control
))
4278 if (pi
->fcs
== L2CAP_FCS_CRC16
)
4281 if (len
> pi
->mps
|| len
< 0 || __is_sframe(control
))
4284 tx_seq
= __get_txseq(control
);
4286 if (pi
->expected_tx_seq
== tx_seq
)
4287 pi
->expected_tx_seq
= (pi
->expected_tx_seq
+ 1) % 64;
4289 pi
->expected_tx_seq
= (tx_seq
+ 1) % 64;
4291 l2cap_streaming_reassembly_sdu(sk
, skb
, control
);
4296 BT_DBG("sk %p: bad mode 0x%2.2x", sk
, pi
->mode
);
4310 static inline int l2cap_conless_channel(struct l2cap_conn
*conn
, __le16 psm
, struct sk_buff
*skb
)
4314 sk
= l2cap_get_sock_by_psm(0, psm
, conn
->src
);
4318 BT_DBG("sk %p, len %d", sk
, skb
->len
);
4320 if (sk
->sk_state
!= BT_BOUND
&& sk
->sk_state
!= BT_CONNECTED
)
4323 if (l2cap_pi(sk
)->imtu
< skb
->len
)
4326 if (!sock_queue_rcv_skb(sk
, skb
))
4338 static void l2cap_recv_frame(struct l2cap_conn
*conn
, struct sk_buff
*skb
)
4340 struct l2cap_hdr
*lh
= (void *) skb
->data
;
4344 skb_pull(skb
, L2CAP_HDR_SIZE
);
4345 cid
= __le16_to_cpu(lh
->cid
);
4346 len
= __le16_to_cpu(lh
->len
);
4348 if (len
!= skb
->len
) {
4353 BT_DBG("len %d, cid 0x%4.4x", len
, cid
);
4356 case L2CAP_CID_SIGNALING
:
4357 l2cap_sig_channel(conn
, skb
);
4360 case L2CAP_CID_CONN_LESS
:
4361 psm
= get_unaligned_le16(skb
->data
);
4363 l2cap_conless_channel(conn
, psm
, skb
);
4367 l2cap_data_channel(conn
, cid
, skb
);
4372 /* ---- L2CAP interface with lower layer (HCI) ---- */
4374 static int l2cap_connect_ind(struct hci_dev
*hdev
, bdaddr_t
*bdaddr
, u8 type
)
4376 int exact
= 0, lm1
= 0, lm2
= 0;
4377 register struct sock
*sk
;
4378 struct hlist_node
*node
;
4380 if (type
!= ACL_LINK
)
4383 BT_DBG("hdev %s, bdaddr %s", hdev
->name
, batostr(bdaddr
));
4385 /* Find listening sockets and check their link_mode */
4386 read_lock(&l2cap_sk_list
.lock
);
4387 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4388 if (sk
->sk_state
!= BT_LISTEN
)
4391 if (!bacmp(&bt_sk(sk
)->src
, &hdev
->bdaddr
)) {
4392 lm1
|= HCI_LM_ACCEPT
;
4393 if (l2cap_pi(sk
)->role_switch
)
4394 lm1
|= HCI_LM_MASTER
;
4396 } else if (!bacmp(&bt_sk(sk
)->src
, BDADDR_ANY
)) {
4397 lm2
|= HCI_LM_ACCEPT
;
4398 if (l2cap_pi(sk
)->role_switch
)
4399 lm2
|= HCI_LM_MASTER
;
4402 read_unlock(&l2cap_sk_list
.lock
);
4404 return exact
? lm1
: lm2
;
4407 static int l2cap_connect_cfm(struct hci_conn
*hcon
, u8 status
)
4409 struct l2cap_conn
*conn
;
4411 BT_DBG("hcon %p bdaddr %s status %d", hcon
, batostr(&hcon
->dst
), status
);
4413 if (hcon
->type
!= ACL_LINK
)
4417 conn
= l2cap_conn_add(hcon
, status
);
4419 l2cap_conn_ready(conn
);
4421 l2cap_conn_del(hcon
, bt_err(status
));
4426 static int l2cap_disconn_ind(struct hci_conn
*hcon
)
4428 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4430 BT_DBG("hcon %p", hcon
);
4432 if (hcon
->type
!= ACL_LINK
|| !conn
)
4435 return conn
->disc_reason
;
4438 static int l2cap_disconn_cfm(struct hci_conn
*hcon
, u8 reason
)
4440 BT_DBG("hcon %p reason %d", hcon
, reason
);
4442 if (hcon
->type
!= ACL_LINK
)
4445 l2cap_conn_del(hcon
, bt_err(reason
));
4450 static inline void l2cap_check_encryption(struct sock
*sk
, u8 encrypt
)
4452 if (sk
->sk_type
!= SOCK_SEQPACKET
&& sk
->sk_type
!= SOCK_STREAM
)
4455 if (encrypt
== 0x00) {
4456 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
) {
4457 l2cap_sock_clear_timer(sk
);
4458 l2cap_sock_set_timer(sk
, HZ
* 5);
4459 } else if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_HIGH
)
4460 __l2cap_sock_close(sk
, ECONNREFUSED
);
4462 if (l2cap_pi(sk
)->sec_level
== BT_SECURITY_MEDIUM
)
4463 l2cap_sock_clear_timer(sk
);
4467 static int l2cap_security_cfm(struct hci_conn
*hcon
, u8 status
, u8 encrypt
)
4469 struct l2cap_chan_list
*l
;
4470 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4476 l
= &conn
->chan_list
;
4478 BT_DBG("conn %p", conn
);
4480 read_lock(&l
->lock
);
4482 for (sk
= l
->head
; sk
; sk
= l2cap_pi(sk
)->next_c
) {
4485 if (l2cap_pi(sk
)->conf_state
& L2CAP_CONF_CONNECT_PEND
) {
4490 if (!status
&& (sk
->sk_state
== BT_CONNECTED
||
4491 sk
->sk_state
== BT_CONFIG
)) {
4492 l2cap_check_encryption(sk
, encrypt
);
4497 if (sk
->sk_state
== BT_CONNECT
) {
4499 struct l2cap_conn_req req
;
4500 req
.scid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4501 req
.psm
= l2cap_pi(sk
)->psm
;
4503 l2cap_pi(sk
)->ident
= l2cap_get_ident(conn
);
4504 l2cap_pi(sk
)->conf_state
|= L2CAP_CONF_CONNECT_PEND
;
4506 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4507 L2CAP_CONN_REQ
, sizeof(req
), &req
);
4509 l2cap_sock_clear_timer(sk
);
4510 l2cap_sock_set_timer(sk
, HZ
/ 10);
4512 } else if (sk
->sk_state
== BT_CONNECT2
) {
4513 struct l2cap_conn_rsp rsp
;
4517 sk
->sk_state
= BT_CONFIG
;
4518 result
= L2CAP_CR_SUCCESS
;
4520 sk
->sk_state
= BT_DISCONN
;
4521 l2cap_sock_set_timer(sk
, HZ
/ 10);
4522 result
= L2CAP_CR_SEC_BLOCK
;
4525 rsp
.scid
= cpu_to_le16(l2cap_pi(sk
)->dcid
);
4526 rsp
.dcid
= cpu_to_le16(l2cap_pi(sk
)->scid
);
4527 rsp
.result
= cpu_to_le16(result
);
4528 rsp
.status
= cpu_to_le16(L2CAP_CS_NO_INFO
);
4529 l2cap_send_cmd(conn
, l2cap_pi(sk
)->ident
,
4530 L2CAP_CONN_RSP
, sizeof(rsp
), &rsp
);
4536 read_unlock(&l
->lock
);
4541 static int l2cap_recv_acldata(struct hci_conn
*hcon
, struct sk_buff
*skb
, u16 flags
)
4543 struct l2cap_conn
*conn
= hcon
->l2cap_data
;
4545 if (!conn
&& !(conn
= l2cap_conn_add(hcon
, 0)))
4548 BT_DBG("conn %p len %d flags 0x%x", conn
, skb
->len
, flags
);
4550 if (flags
& ACL_START
) {
4551 struct l2cap_hdr
*hdr
;
4555 BT_ERR("Unexpected start frame (len %d)", skb
->len
);
4556 kfree_skb(conn
->rx_skb
);
4557 conn
->rx_skb
= NULL
;
4559 l2cap_conn_unreliable(conn
, ECOMM
);
4563 BT_ERR("Frame is too short (len %d)", skb
->len
);
4564 l2cap_conn_unreliable(conn
, ECOMM
);
4568 hdr
= (struct l2cap_hdr
*) skb
->data
;
4569 len
= __le16_to_cpu(hdr
->len
) + L2CAP_HDR_SIZE
;
4571 if (len
== skb
->len
) {
4572 /* Complete frame received */
4573 l2cap_recv_frame(conn
, skb
);
4577 BT_DBG("Start: total len %d, frag len %d", len
, skb
->len
);
4579 if (skb
->len
> len
) {
4580 BT_ERR("Frame is too long (len %d, expected len %d)",
4582 l2cap_conn_unreliable(conn
, ECOMM
);
4586 /* Allocate skb for the complete frame (with header) */
4587 conn
->rx_skb
= bt_skb_alloc(len
, GFP_ATOMIC
);
4591 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4593 conn
->rx_len
= len
- skb
->len
;
4595 BT_DBG("Cont: frag len %d (expecting %d)", skb
->len
, conn
->rx_len
);
4597 if (!conn
->rx_len
) {
4598 BT_ERR("Unexpected continuation frame (len %d)", skb
->len
);
4599 l2cap_conn_unreliable(conn
, ECOMM
);
4603 if (skb
->len
> conn
->rx_len
) {
4604 BT_ERR("Fragment is too long (len %d, expected %d)",
4605 skb
->len
, conn
->rx_len
);
4606 kfree_skb(conn
->rx_skb
);
4607 conn
->rx_skb
= NULL
;
4609 l2cap_conn_unreliable(conn
, ECOMM
);
4613 skb_copy_from_linear_data(skb
, skb_put(conn
->rx_skb
, skb
->len
),
4615 conn
->rx_len
-= skb
->len
;
4617 if (!conn
->rx_len
) {
4618 /* Complete frame received */
4619 l2cap_recv_frame(conn
, conn
->rx_skb
);
4620 conn
->rx_skb
= NULL
;
4629 static int l2cap_debugfs_show(struct seq_file
*f
, void *p
)
4632 struct hlist_node
*node
;
4634 read_lock_bh(&l2cap_sk_list
.lock
);
4636 sk_for_each(sk
, node
, &l2cap_sk_list
.head
) {
4637 struct l2cap_pinfo
*pi
= l2cap_pi(sk
);
4639 seq_printf(f
, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4640 batostr(&bt_sk(sk
)->src
),
4641 batostr(&bt_sk(sk
)->dst
),
4642 sk
->sk_state
, __le16_to_cpu(pi
->psm
),
4644 pi
->imtu
, pi
->omtu
, pi
->sec_level
);
4647 read_unlock_bh(&l2cap_sk_list
.lock
);
4652 static int l2cap_debugfs_open(struct inode
*inode
, struct file
*file
)
4654 return single_open(file
, l2cap_debugfs_show
, inode
->i_private
);
4657 static const struct file_operations l2cap_debugfs_fops
= {
4658 .open
= l2cap_debugfs_open
,
4660 .llseek
= seq_lseek
,
4661 .release
= single_release
,
4664 static struct dentry
*l2cap_debugfs
;
4666 static const struct proto_ops l2cap_sock_ops
= {
4667 .family
= PF_BLUETOOTH
,
4668 .owner
= THIS_MODULE
,
4669 .release
= l2cap_sock_release
,
4670 .bind
= l2cap_sock_bind
,
4671 .connect
= l2cap_sock_connect
,
4672 .listen
= l2cap_sock_listen
,
4673 .accept
= l2cap_sock_accept
,
4674 .getname
= l2cap_sock_getname
,
4675 .sendmsg
= l2cap_sock_sendmsg
,
4676 .recvmsg
= l2cap_sock_recvmsg
,
4677 .poll
= bt_sock_poll
,
4678 .ioctl
= bt_sock_ioctl
,
4679 .mmap
= sock_no_mmap
,
4680 .socketpair
= sock_no_socketpair
,
4681 .shutdown
= l2cap_sock_shutdown
,
4682 .setsockopt
= l2cap_sock_setsockopt
,
4683 .getsockopt
= l2cap_sock_getsockopt
4686 static const struct net_proto_family l2cap_sock_family_ops
= {
4687 .family
= PF_BLUETOOTH
,
4688 .owner
= THIS_MODULE
,
4689 .create
= l2cap_sock_create
,
4692 static struct hci_proto l2cap_hci_proto
= {
4694 .id
= HCI_PROTO_L2CAP
,
4695 .connect_ind
= l2cap_connect_ind
,
4696 .connect_cfm
= l2cap_connect_cfm
,
4697 .disconn_ind
= l2cap_disconn_ind
,
4698 .disconn_cfm
= l2cap_disconn_cfm
,
4699 .security_cfm
= l2cap_security_cfm
,
4700 .recv_acldata
= l2cap_recv_acldata
4703 static int __init
l2cap_init(void)
4707 err
= proto_register(&l2cap_proto
, 0);
4711 _busy_wq
= create_singlethread_workqueue("l2cap");
4715 err
= bt_sock_register(BTPROTO_L2CAP
, &l2cap_sock_family_ops
);
4717 BT_ERR("L2CAP socket registration failed");
4721 err
= hci_register_proto(&l2cap_hci_proto
);
4723 BT_ERR("L2CAP protocol registration failed");
4724 bt_sock_unregister(BTPROTO_L2CAP
);
4729 l2cap_debugfs
= debugfs_create_file("l2cap", 0444,
4730 bt_debugfs
, NULL
, &l2cap_debugfs_fops
);
4732 BT_ERR("Failed to create L2CAP debug file");
4735 BT_INFO("L2CAP ver %s", VERSION
);
4736 BT_INFO("L2CAP socket layer initialized");
4741 proto_unregister(&l2cap_proto
);
4745 static void __exit
l2cap_exit(void)
4747 debugfs_remove(l2cap_debugfs
);
4749 flush_workqueue(_busy_wq
);
4750 destroy_workqueue(_busy_wq
);
4752 if (bt_sock_unregister(BTPROTO_L2CAP
) < 0)
4753 BT_ERR("L2CAP socket unregistration failed");
4755 if (hci_unregister_proto(&l2cap_hci_proto
) < 0)
4756 BT_ERR("L2CAP protocol unregistration failed");
4758 proto_unregister(&l2cap_proto
);
4761 void l2cap_load(void)
4763 /* Dummy function to trigger automatic L2CAP module loading by
4764 * other modules that use L2CAP sockets but don't use any other
4765 * symbols from it. */
4767 EXPORT_SYMBOL(l2cap_load
);
4769 module_init(l2cap_init
);
4770 module_exit(l2cap_exit
);
4772 module_param(enable_ertm
, bool, 0644);
4773 MODULE_PARM_DESC(enable_ertm
, "Enable enhanced retransmission mode");
4775 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4776 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION
);
4777 MODULE_VERSION(VERSION
);
4778 MODULE_LICENSE("GPL");
4779 MODULE_ALIAS("bt-proto-0");