Bluetooth: Move more vars to struct l2cap_chan
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57
58 int disable_ertm;
59
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
62
63 static struct workqueue_struct *_busy_wq;
64
65 struct bt_sock_list l2cap_sk_list = {
66 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
67 };
68
69 static void l2cap_busy_work(struct work_struct *work);
70
71 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
72 u8 code, u8 ident, u16 dlen, void *data);
73 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
74
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76
77 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
79 {
80 struct l2cap_chan *c;
81
82 list_for_each_entry(c, &conn->chan_l, list) {
83 struct sock *s = c->sk;
84 if (l2cap_pi(s)->dcid == cid)
85 return c;
86 }
87 return NULL;
88
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 struct sock *s = c->sk;
97 if (l2cap_pi(s)->scid == cid)
98 return c;
99 }
100 return NULL;
101 }
102
103 /* Find channel with given SCID.
104 * Returns locked socket */
105 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
106 {
107 struct l2cap_chan *c;
108
109 read_lock(&conn->chan_lock);
110 c = __l2cap_get_chan_by_scid(conn, cid);
111 if (c)
112 bh_lock_sock(c->sk);
113 read_unlock(&conn->chan_lock);
114 return c;
115 }
116
117 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
118 {
119 struct l2cap_chan *c;
120
121 list_for_each_entry(c, &conn->chan_l, list) {
122 if (c->ident == ident)
123 return c;
124 }
125 return NULL;
126 }
127
128 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 {
130 struct l2cap_chan *c;
131
132 read_lock(&conn->chan_lock);
133 c = __l2cap_get_chan_by_ident(conn, ident);
134 if (c)
135 bh_lock_sock(c->sk);
136 read_unlock(&conn->chan_lock);
137 return c;
138 }
139
140 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
141 {
142 u16 cid = L2CAP_CID_DYN_START;
143
144 for (; cid < L2CAP_CID_DYN_END; cid++) {
145 if (!__l2cap_get_chan_by_scid(conn, cid))
146 return cid;
147 }
148
149 return 0;
150 }
151
152 struct l2cap_chan *l2cap_chan_alloc(struct sock *sk)
153 {
154 struct l2cap_chan *chan;
155
156 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
157 if (!chan)
158 return NULL;
159
160 chan->sk = sk;
161
162 return chan;
163 }
164
165 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
166 {
167 struct sock *sk = chan->sk;
168
169 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
170 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
171
172 conn->disc_reason = 0x13;
173
174 l2cap_pi(sk)->conn = conn;
175
176 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
177 if (conn->hcon->type == LE_LINK) {
178 /* LE connection */
179 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
180 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
181 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
182 } else {
183 /* Alloc CID for connection-oriented socket */
184 l2cap_pi(sk)->scid = l2cap_alloc_cid(conn);
185 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
186 }
187 } else if (sk->sk_type == SOCK_DGRAM) {
188 /* Connectionless socket */
189 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
190 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
191 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
192 } else {
193 /* Raw socket can send/recv signalling messages only */
194 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
195 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
196 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
197 }
198
199 sock_hold(sk);
200
201 list_add(&chan->list, &conn->chan_l);
202 }
203
204 /* Delete channel.
205 * Must be called on the locked socket. */
206 void l2cap_chan_del(struct l2cap_chan *chan, int err)
207 {
208 struct sock *sk = chan->sk;
209 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
210 struct sock *parent = bt_sk(sk)->parent;
211
212 l2cap_sock_clear_timer(sk);
213
214 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
215
216 if (conn) {
217 /* Delete from channel list */
218 write_lock_bh(&conn->chan_lock);
219 list_del(&chan->list);
220 write_unlock_bh(&conn->chan_lock);
221 __sock_put(sk);
222
223 l2cap_pi(sk)->conn = NULL;
224 hci_conn_put(conn->hcon);
225 }
226
227 sk->sk_state = BT_CLOSED;
228 sock_set_flag(sk, SOCK_ZAPPED);
229
230 if (err)
231 sk->sk_err = err;
232
233 if (parent) {
234 bt_accept_unlink(sk);
235 parent->sk_data_ready(parent, 0);
236 } else
237 sk->sk_state_change(sk);
238
239 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE &&
240 chan->conf_state & L2CAP_CONF_INPUT_DONE))
241 goto free;
242
243 skb_queue_purge(&chan->tx_q);
244
245 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
246 struct srej_list *l, *tmp;
247
248 del_timer(&chan->retrans_timer);
249 del_timer(&chan->monitor_timer);
250 del_timer(&chan->ack_timer);
251
252 skb_queue_purge(&chan->srej_q);
253 skb_queue_purge(&chan->busy_q);
254
255 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
256 list_del(&l->list);
257 kfree(l);
258 }
259 }
260
261 free:
262 kfree(chan);
263 }
264
265 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
266 {
267 struct sock *sk = chan->sk;
268
269 if (sk->sk_type == SOCK_RAW) {
270 switch (chan->sec_level) {
271 case BT_SECURITY_HIGH:
272 return HCI_AT_DEDICATED_BONDING_MITM;
273 case BT_SECURITY_MEDIUM:
274 return HCI_AT_DEDICATED_BONDING;
275 default:
276 return HCI_AT_NO_BONDING;
277 }
278 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
279 if (chan->sec_level == BT_SECURITY_LOW)
280 chan->sec_level = BT_SECURITY_SDP;
281
282 if (chan->sec_level == BT_SECURITY_HIGH)
283 return HCI_AT_NO_BONDING_MITM;
284 else
285 return HCI_AT_NO_BONDING;
286 } else {
287 switch (chan->sec_level) {
288 case BT_SECURITY_HIGH:
289 return HCI_AT_GENERAL_BONDING_MITM;
290 case BT_SECURITY_MEDIUM:
291 return HCI_AT_GENERAL_BONDING;
292 default:
293 return HCI_AT_NO_BONDING;
294 }
295 }
296 }
297
298 /* Service level security */
299 static inline int l2cap_check_security(struct l2cap_chan *chan)
300 {
301 struct l2cap_conn *conn = l2cap_pi(chan->sk)->conn;
302 __u8 auth_type;
303
304 auth_type = l2cap_get_auth_type(chan);
305
306 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
307 }
308
309 u8 l2cap_get_ident(struct l2cap_conn *conn)
310 {
311 u8 id;
312
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
317 */
318
319 spin_lock_bh(&conn->lock);
320
321 if (++conn->tx_ident > 128)
322 conn->tx_ident = 1;
323
324 id = conn->tx_ident;
325
326 spin_unlock_bh(&conn->lock);
327
328 return id;
329 }
330
331 void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
332 {
333 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
334 u8 flags;
335
336 BT_DBG("code 0x%2.2x", code);
337
338 if (!skb)
339 return;
340
341 if (lmp_no_flush_capable(conn->hcon->hdev))
342 flags = ACL_START_NO_FLUSH;
343 else
344 flags = ACL_START;
345
346 hci_send_acl(conn->hcon, skb, flags);
347 }
348
349 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
350 {
351 struct sk_buff *skb;
352 struct l2cap_hdr *lh;
353 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
354 struct l2cap_conn *conn = pi->conn;
355 struct sock *sk = (struct sock *)pi;
356 int count, hlen = L2CAP_HDR_SIZE + 2;
357 u8 flags;
358
359 if (sk->sk_state != BT_CONNECTED)
360 return;
361
362 if (chan->fcs == L2CAP_FCS_CRC16)
363 hlen += 2;
364
365 BT_DBG("chan %p, control 0x%2.2x", chan, control);
366
367 count = min_t(unsigned int, conn->mtu, hlen);
368 control |= L2CAP_CTRL_FRAME_TYPE;
369
370 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
371 control |= L2CAP_CTRL_FINAL;
372 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
373 }
374
375 if (chan->conn_state & L2CAP_CONN_SEND_PBIT) {
376 control |= L2CAP_CTRL_POLL;
377 chan->conn_state &= ~L2CAP_CONN_SEND_PBIT;
378 }
379
380 skb = bt_skb_alloc(count, GFP_ATOMIC);
381 if (!skb)
382 return;
383
384 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
385 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
386 lh->cid = cpu_to_le16(pi->dcid);
387 put_unaligned_le16(control, skb_put(skb, 2));
388
389 if (chan->fcs == L2CAP_FCS_CRC16) {
390 u16 fcs = crc16(0, (u8 *)lh, count - 2);
391 put_unaligned_le16(fcs, skb_put(skb, 2));
392 }
393
394 if (lmp_no_flush_capable(conn->hcon->hdev))
395 flags = ACL_START_NO_FLUSH;
396 else
397 flags = ACL_START;
398
399 hci_send_acl(pi->conn->hcon, skb, flags);
400 }
401
402 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
403 {
404 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
405 control |= L2CAP_SUPER_RCV_NOT_READY;
406 chan->conn_state |= L2CAP_CONN_RNR_SENT;
407 } else
408 control |= L2CAP_SUPER_RCV_READY;
409
410 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
411
412 l2cap_send_sframe(chan, control);
413 }
414
415 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
416 {
417 return !(chan->conf_state & L2CAP_CONF_CONNECT_PEND);
418 }
419
420 static void l2cap_do_start(struct l2cap_chan *chan)
421 {
422 struct sock *sk = chan->sk;
423 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
424
425 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
426 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
427 return;
428
429 if (l2cap_check_security(chan) &&
430 __l2cap_no_conn_pending(chan)) {
431 struct l2cap_conn_req req;
432 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
433 req.psm = l2cap_pi(sk)->psm;
434
435 chan->ident = l2cap_get_ident(conn);
436 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
437
438 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
439 sizeof(req), &req);
440 }
441 } else {
442 struct l2cap_info_req req;
443 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
444
445 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
446 conn->info_ident = l2cap_get_ident(conn);
447
448 mod_timer(&conn->info_timer, jiffies +
449 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
450
451 l2cap_send_cmd(conn, conn->info_ident,
452 L2CAP_INFO_REQ, sizeof(req), &req);
453 }
454 }
455
456 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
457 {
458 u32 local_feat_mask = l2cap_feat_mask;
459 if (!disable_ertm)
460 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
461
462 switch (mode) {
463 case L2CAP_MODE_ERTM:
464 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
465 case L2CAP_MODE_STREAMING:
466 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
467 default:
468 return 0x00;
469 }
470 }
471
472 void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
473 {
474 struct sock *sk;
475 struct l2cap_disconn_req req;
476
477 if (!conn)
478 return;
479
480 sk = chan->sk;
481
482 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
483 del_timer(&chan->retrans_timer);
484 del_timer(&chan->monitor_timer);
485 del_timer(&chan->ack_timer);
486 }
487
488 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
489 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
490 l2cap_send_cmd(conn, l2cap_get_ident(conn),
491 L2CAP_DISCONN_REQ, sizeof(req), &req);
492
493 sk->sk_state = BT_DISCONN;
494 sk->sk_err = err;
495 }
496
497 /* ---- L2CAP connections ---- */
498 static void l2cap_conn_start(struct l2cap_conn *conn)
499 {
500 struct l2cap_chan *chan, *tmp;
501
502 BT_DBG("conn %p", conn);
503
504 read_lock(&conn->chan_lock);
505
506 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
507 struct sock *sk = chan->sk;
508
509 bh_lock_sock(sk);
510
511 if (sk->sk_type != SOCK_SEQPACKET &&
512 sk->sk_type != SOCK_STREAM) {
513 bh_unlock_sock(sk);
514 continue;
515 }
516
517 if (sk->sk_state == BT_CONNECT) {
518 struct l2cap_conn_req req;
519
520 if (!l2cap_check_security(chan) ||
521 !__l2cap_no_conn_pending(chan)) {
522 bh_unlock_sock(sk);
523 continue;
524 }
525
526 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
527 conn->feat_mask)
528 && chan->conf_state &
529 L2CAP_CONF_STATE2_DEVICE) {
530 /* __l2cap_sock_close() calls list_del(chan)
531 * so release the lock */
532 read_unlock_bh(&conn->chan_lock);
533 __l2cap_sock_close(sk, ECONNRESET);
534 read_lock_bh(&conn->chan_lock);
535 bh_unlock_sock(sk);
536 continue;
537 }
538
539 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
540 req.psm = l2cap_pi(sk)->psm;
541
542 chan->ident = l2cap_get_ident(conn);
543 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
544
545 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
546 sizeof(req), &req);
547
548 } else if (sk->sk_state == BT_CONNECT2) {
549 struct l2cap_conn_rsp rsp;
550 char buf[128];
551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
553
554 if (l2cap_check_security(chan)) {
555 if (bt_sk(sk)->defer_setup) {
556 struct sock *parent = bt_sk(sk)->parent;
557 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
558 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
559 parent->sk_data_ready(parent, 0);
560
561 } else {
562 sk->sk_state = BT_CONFIG;
563 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
564 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
565 }
566 } else {
567 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
568 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
569 }
570
571 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
572 sizeof(rsp), &rsp);
573
574 if (chan->conf_state & L2CAP_CONF_REQ_SENT ||
575 rsp.result != L2CAP_CR_SUCCESS) {
576 bh_unlock_sock(sk);
577 continue;
578 }
579
580 chan->conf_state |= L2CAP_CONF_REQ_SENT;
581 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
582 l2cap_build_conf_req(chan, buf), buf);
583 chan->num_conf_req++;
584 }
585
586 bh_unlock_sock(sk);
587 }
588
589 read_unlock(&conn->chan_lock);
590 }
591
592 /* Find socket with cid and source bdaddr.
593 * Returns closest match, locked.
594 */
595 static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
596 {
597 struct sock *sk = NULL, *sk1 = NULL;
598 struct hlist_node *node;
599
600 read_lock(&l2cap_sk_list.lock);
601
602 sk_for_each(sk, node, &l2cap_sk_list.head) {
603 if (state && sk->sk_state != state)
604 continue;
605
606 if (l2cap_pi(sk)->scid == cid) {
607 /* Exact match. */
608 if (!bacmp(&bt_sk(sk)->src, src))
609 break;
610
611 /* Closest match */
612 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
613 sk1 = sk;
614 }
615 }
616
617 read_unlock(&l2cap_sk_list.lock);
618
619 return node ? sk : sk1;
620 }
621
622 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
623 {
624 struct sock *parent, *sk;
625 struct l2cap_chan *chan;
626
627 BT_DBG("");
628
629 /* Check if we have socket listening on cid */
630 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
631 conn->src);
632 if (!parent)
633 return;
634
635 bh_lock_sock(parent);
636
637 /* Check for backlog size */
638 if (sk_acceptq_is_full(parent)) {
639 BT_DBG("backlog full %d", parent->sk_ack_backlog);
640 goto clean;
641 }
642
643 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
644 if (!sk)
645 goto clean;
646
647 chan = l2cap_chan_alloc(sk);
648 if (!chan) {
649 l2cap_sock_kill(sk);
650 goto clean;
651 }
652
653 l2cap_pi(sk)->chan = chan;
654
655 write_lock_bh(&conn->chan_lock);
656
657 hci_conn_hold(conn->hcon);
658
659 l2cap_sock_init(sk, parent);
660
661 bacpy(&bt_sk(sk)->src, conn->src);
662 bacpy(&bt_sk(sk)->dst, conn->dst);
663
664 bt_accept_enqueue(parent, sk);
665
666 __l2cap_chan_add(conn, chan);
667
668 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
669
670 sk->sk_state = BT_CONNECTED;
671 parent->sk_data_ready(parent, 0);
672
673 write_unlock_bh(&conn->chan_lock);
674
675 clean:
676 bh_unlock_sock(parent);
677 }
678
679 static void l2cap_conn_ready(struct l2cap_conn *conn)
680 {
681 struct l2cap_chan *chan;
682
683 BT_DBG("conn %p", conn);
684
685 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
686 l2cap_le_conn_ready(conn);
687
688 read_lock(&conn->chan_lock);
689
690 list_for_each_entry(chan, &conn->chan_l, list) {
691 struct sock *sk = chan->sk;
692
693 bh_lock_sock(sk);
694
695 if (conn->hcon->type == LE_LINK) {
696 l2cap_sock_clear_timer(sk);
697 sk->sk_state = BT_CONNECTED;
698 sk->sk_state_change(sk);
699 }
700
701 if (sk->sk_type != SOCK_SEQPACKET &&
702 sk->sk_type != SOCK_STREAM) {
703 l2cap_sock_clear_timer(sk);
704 sk->sk_state = BT_CONNECTED;
705 sk->sk_state_change(sk);
706 } else if (sk->sk_state == BT_CONNECT)
707 l2cap_do_start(chan);
708
709 bh_unlock_sock(sk);
710 }
711
712 read_unlock(&conn->chan_lock);
713 }
714
715 /* Notify sockets that we cannot guaranty reliability anymore */
716 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
717 {
718 struct l2cap_chan *chan;
719
720 BT_DBG("conn %p", conn);
721
722 read_lock(&conn->chan_lock);
723
724 list_for_each_entry(chan, &conn->chan_l, list) {
725 struct sock *sk = chan->sk;
726
727 if (chan->force_reliable)
728 sk->sk_err = err;
729 }
730
731 read_unlock(&conn->chan_lock);
732 }
733
734 static void l2cap_info_timeout(unsigned long arg)
735 {
736 struct l2cap_conn *conn = (void *) arg;
737
738 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
739 conn->info_ident = 0;
740
741 l2cap_conn_start(conn);
742 }
743
744 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
745 {
746 struct l2cap_conn *conn = hcon->l2cap_data;
747
748 if (conn || status)
749 return conn;
750
751 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
752 if (!conn)
753 return NULL;
754
755 hcon->l2cap_data = conn;
756 conn->hcon = hcon;
757
758 BT_DBG("hcon %p conn %p", hcon, conn);
759
760 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
761 conn->mtu = hcon->hdev->le_mtu;
762 else
763 conn->mtu = hcon->hdev->acl_mtu;
764
765 conn->src = &hcon->hdev->bdaddr;
766 conn->dst = &hcon->dst;
767
768 conn->feat_mask = 0;
769
770 spin_lock_init(&conn->lock);
771 rwlock_init(&conn->chan_lock);
772
773 INIT_LIST_HEAD(&conn->chan_l);
774
775 if (hcon->type != LE_LINK)
776 setup_timer(&conn->info_timer, l2cap_info_timeout,
777 (unsigned long) conn);
778
779 conn->disc_reason = 0x13;
780
781 return conn;
782 }
783
784 static void l2cap_conn_del(struct hci_conn *hcon, int err)
785 {
786 struct l2cap_conn *conn = hcon->l2cap_data;
787 struct l2cap_chan *chan, *l;
788 struct sock *sk;
789
790 if (!conn)
791 return;
792
793 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
794
795 kfree_skb(conn->rx_skb);
796
797 /* Kill channels */
798 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
799 sk = chan->sk;
800 bh_lock_sock(sk);
801 l2cap_chan_del(chan, err);
802 bh_unlock_sock(sk);
803 l2cap_sock_kill(sk);
804 }
805
806 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
807 del_timer_sync(&conn->info_timer);
808
809 hcon->l2cap_data = NULL;
810 kfree(conn);
811 }
812
813 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
814 {
815 write_lock_bh(&conn->chan_lock);
816 __l2cap_chan_add(conn, chan);
817 write_unlock_bh(&conn->chan_lock);
818 }
819
820 /* ---- Socket interface ---- */
821
822 /* Find socket with psm and source bdaddr.
823 * Returns closest match.
824 */
825 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
826 {
827 struct sock *sk = NULL, *sk1 = NULL;
828 struct hlist_node *node;
829
830 read_lock(&l2cap_sk_list.lock);
831
832 sk_for_each(sk, node, &l2cap_sk_list.head) {
833 if (state && sk->sk_state != state)
834 continue;
835
836 if (l2cap_pi(sk)->psm == psm) {
837 /* Exact match. */
838 if (!bacmp(&bt_sk(sk)->src, src))
839 break;
840
841 /* Closest match */
842 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
843 sk1 = sk;
844 }
845 }
846
847 read_unlock(&l2cap_sk_list.lock);
848
849 return node ? sk : sk1;
850 }
851
852 int l2cap_chan_connect(struct l2cap_chan *chan)
853 {
854 struct sock *sk = chan->sk;
855 bdaddr_t *src = &bt_sk(sk)->src;
856 bdaddr_t *dst = &bt_sk(sk)->dst;
857 struct l2cap_conn *conn;
858 struct hci_conn *hcon;
859 struct hci_dev *hdev;
860 __u8 auth_type;
861 int err;
862
863 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
864 l2cap_pi(sk)->psm);
865
866 hdev = hci_get_route(dst, src);
867 if (!hdev)
868 return -EHOSTUNREACH;
869
870 hci_dev_lock_bh(hdev);
871
872 auth_type = l2cap_get_auth_type(chan);
873
874 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
875 hcon = hci_connect(hdev, LE_LINK, dst,
876 chan->sec_level, auth_type);
877 else
878 hcon = hci_connect(hdev, ACL_LINK, dst,
879 chan->sec_level, auth_type);
880
881 if (IS_ERR(hcon)) {
882 err = PTR_ERR(hcon);
883 goto done;
884 }
885
886 conn = l2cap_conn_add(hcon, 0);
887 if (!conn) {
888 hci_conn_put(hcon);
889 err = -ENOMEM;
890 goto done;
891 }
892
893 /* Update source addr of the socket */
894 bacpy(src, conn->src);
895
896 l2cap_chan_add(conn, chan);
897
898 sk->sk_state = BT_CONNECT;
899 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
900
901 if (hcon->state == BT_CONNECTED) {
902 if (sk->sk_type != SOCK_SEQPACKET &&
903 sk->sk_type != SOCK_STREAM) {
904 l2cap_sock_clear_timer(sk);
905 if (l2cap_check_security(chan))
906 sk->sk_state = BT_CONNECTED;
907 } else
908 l2cap_do_start(chan);
909 }
910
911 err = 0;
912
913 done:
914 hci_dev_unlock_bh(hdev);
915 hci_dev_put(hdev);
916 return err;
917 }
918
919 int __l2cap_wait_ack(struct sock *sk)
920 {
921 DECLARE_WAITQUEUE(wait, current);
922 int err = 0;
923 int timeo = HZ/5;
924
925 add_wait_queue(sk_sleep(sk), &wait);
926 while ((l2cap_pi(sk)->chan->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
927 set_current_state(TASK_INTERRUPTIBLE);
928
929 if (!timeo)
930 timeo = HZ/5;
931
932 if (signal_pending(current)) {
933 err = sock_intr_errno(timeo);
934 break;
935 }
936
937 release_sock(sk);
938 timeo = schedule_timeout(timeo);
939 lock_sock(sk);
940
941 err = sock_error(sk);
942 if (err)
943 break;
944 }
945 set_current_state(TASK_RUNNING);
946 remove_wait_queue(sk_sleep(sk), &wait);
947 return err;
948 }
949
950 static void l2cap_monitor_timeout(unsigned long arg)
951 {
952 struct l2cap_chan *chan = (void *) arg;
953 struct sock *sk = chan->sk;
954
955 BT_DBG("chan %p", chan);
956
957 bh_lock_sock(sk);
958 if (chan->retry_count >= chan->remote_max_tx) {
959 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, ECONNABORTED);
960 bh_unlock_sock(sk);
961 return;
962 }
963
964 chan->retry_count++;
965 __mod_monitor_timer();
966
967 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
968 bh_unlock_sock(sk);
969 }
970
971 static void l2cap_retrans_timeout(unsigned long arg)
972 {
973 struct l2cap_chan *chan = (void *) arg;
974 struct sock *sk = chan->sk;
975
976 BT_DBG("chan %p", chan);
977
978 bh_lock_sock(sk);
979 chan->retry_count = 1;
980 __mod_monitor_timer();
981
982 chan->conn_state |= L2CAP_CONN_WAIT_F;
983
984 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
985 bh_unlock_sock(sk);
986 }
987
988 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
989 {
990 struct sk_buff *skb;
991
992 while ((skb = skb_peek(&chan->tx_q)) &&
993 chan->unacked_frames) {
994 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
995 break;
996
997 skb = skb_dequeue(&chan->tx_q);
998 kfree_skb(skb);
999
1000 chan->unacked_frames--;
1001 }
1002
1003 if (!chan->unacked_frames)
1004 del_timer(&chan->retrans_timer);
1005 }
1006
1007 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1008 {
1009 struct sock *sk = chan->sk;
1010 struct hci_conn *hcon = l2cap_pi(sk)->conn->hcon;
1011 u16 flags;
1012
1013 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1014
1015 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1016 flags = ACL_START_NO_FLUSH;
1017 else
1018 flags = ACL_START;
1019
1020 hci_send_acl(hcon, skb, flags);
1021 }
1022
1023 void l2cap_streaming_send(struct l2cap_chan *chan)
1024 {
1025 struct sk_buff *skb;
1026 u16 control, fcs;
1027
1028 while ((skb = skb_dequeue(&chan->tx_q))) {
1029 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1030 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1031 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1032
1033 if (chan->fcs == L2CAP_FCS_CRC16) {
1034 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1035 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1036 }
1037
1038 l2cap_do_send(chan, skb);
1039
1040 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1041 }
1042 }
1043
1044 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1045 {
1046 struct sock *sk = chan->sk;
1047 struct l2cap_pinfo *pi = l2cap_pi(sk);
1048 struct sk_buff *skb, *tx_skb;
1049 u16 control, fcs;
1050
1051 skb = skb_peek(&chan->tx_q);
1052 if (!skb)
1053 return;
1054
1055 do {
1056 if (bt_cb(skb)->tx_seq == tx_seq)
1057 break;
1058
1059 if (skb_queue_is_last(&chan->tx_q, skb))
1060 return;
1061
1062 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1063
1064 if (chan->remote_max_tx &&
1065 bt_cb(skb)->retries == chan->remote_max_tx) {
1066 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1067 return;
1068 }
1069
1070 tx_skb = skb_clone(skb, GFP_ATOMIC);
1071 bt_cb(skb)->retries++;
1072 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1073 control &= L2CAP_CTRL_SAR;
1074
1075 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1076 control |= L2CAP_CTRL_FINAL;
1077 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1078 }
1079
1080 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1081 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1082
1083 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1084
1085 if (chan->fcs == L2CAP_FCS_CRC16) {
1086 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1087 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1088 }
1089
1090 l2cap_do_send(chan, tx_skb);
1091 }
1092
1093 int l2cap_ertm_send(struct l2cap_chan *chan)
1094 {
1095 struct sk_buff *skb, *tx_skb;
1096 struct sock *sk = chan->sk;
1097 struct l2cap_pinfo *pi = l2cap_pi(sk);
1098 u16 control, fcs;
1099 int nsent = 0;
1100
1101 if (sk->sk_state != BT_CONNECTED)
1102 return -ENOTCONN;
1103
1104 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1105
1106 if (chan->remote_max_tx &&
1107 bt_cb(skb)->retries == chan->remote_max_tx) {
1108 l2cap_send_disconn_req(pi->conn, chan, ECONNABORTED);
1109 break;
1110 }
1111
1112 tx_skb = skb_clone(skb, GFP_ATOMIC);
1113
1114 bt_cb(skb)->retries++;
1115
1116 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1117 control &= L2CAP_CTRL_SAR;
1118
1119 if (chan->conn_state & L2CAP_CONN_SEND_FBIT) {
1120 control |= L2CAP_CTRL_FINAL;
1121 chan->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1122 }
1123 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1124 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1125 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1126
1127
1128 if (chan->fcs == L2CAP_FCS_CRC16) {
1129 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1130 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1131 }
1132
1133 l2cap_do_send(chan, tx_skb);
1134
1135 __mod_retrans_timer();
1136
1137 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1138 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1139
1140 if (bt_cb(skb)->retries == 1)
1141 chan->unacked_frames++;
1142
1143 chan->frames_sent++;
1144
1145 if (skb_queue_is_last(&chan->tx_q, skb))
1146 chan->tx_send_head = NULL;
1147 else
1148 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1149
1150 nsent++;
1151 }
1152
1153 return nsent;
1154 }
1155
1156 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1157 {
1158 int ret;
1159
1160 if (!skb_queue_empty(&chan->tx_q))
1161 chan->tx_send_head = chan->tx_q.next;
1162
1163 chan->next_tx_seq = chan->expected_ack_seq;
1164 ret = l2cap_ertm_send(chan);
1165 return ret;
1166 }
1167
1168 static void l2cap_send_ack(struct l2cap_chan *chan)
1169 {
1170 u16 control = 0;
1171
1172 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1173
1174 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1175 control |= L2CAP_SUPER_RCV_NOT_READY;
1176 chan->conn_state |= L2CAP_CONN_RNR_SENT;
1177 l2cap_send_sframe(chan, control);
1178 return;
1179 }
1180
1181 if (l2cap_ertm_send(chan) > 0)
1182 return;
1183
1184 control |= L2CAP_SUPER_RCV_READY;
1185 l2cap_send_sframe(chan, control);
1186 }
1187
1188 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1189 {
1190 struct srej_list *tail;
1191 u16 control;
1192
1193 control = L2CAP_SUPER_SELECT_REJECT;
1194 control |= L2CAP_CTRL_FINAL;
1195
1196 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1197 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1198
1199 l2cap_send_sframe(chan, control);
1200 }
1201
1202 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1203 {
1204 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1205 struct sk_buff **frag;
1206 int err, sent = 0;
1207
1208 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1209 return -EFAULT;
1210
1211 sent += count;
1212 len -= count;
1213
1214 /* Continuation fragments (no L2CAP header) */
1215 frag = &skb_shinfo(skb)->frag_list;
1216 while (len) {
1217 count = min_t(unsigned int, conn->mtu, len);
1218
1219 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1220 if (!*frag)
1221 return err;
1222 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1223 return -EFAULT;
1224
1225 sent += count;
1226 len -= count;
1227
1228 frag = &(*frag)->next;
1229 }
1230
1231 return sent;
1232 }
1233
1234 struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1235 {
1236 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1237 struct sk_buff *skb;
1238 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1239 struct l2cap_hdr *lh;
1240
1241 BT_DBG("sk %p len %d", sk, (int)len);
1242
1243 count = min_t(unsigned int, (conn->mtu - hlen), len);
1244 skb = bt_skb_send_alloc(sk, count + hlen,
1245 msg->msg_flags & MSG_DONTWAIT, &err);
1246 if (!skb)
1247 return ERR_PTR(err);
1248
1249 /* Create L2CAP header */
1250 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1251 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1252 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1253 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1254
1255 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1256 if (unlikely(err < 0)) {
1257 kfree_skb(skb);
1258 return ERR_PTR(err);
1259 }
1260 return skb;
1261 }
1262
1263 struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1264 {
1265 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1266 struct sk_buff *skb;
1267 int err, count, hlen = L2CAP_HDR_SIZE;
1268 struct l2cap_hdr *lh;
1269
1270 BT_DBG("sk %p len %d", sk, (int)len);
1271
1272 count = min_t(unsigned int, (conn->mtu - hlen), len);
1273 skb = bt_skb_send_alloc(sk, count + hlen,
1274 msg->msg_flags & MSG_DONTWAIT, &err);
1275 if (!skb)
1276 return ERR_PTR(err);
1277
1278 /* Create L2CAP header */
1279 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1280 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1281 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1282
1283 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1284 if (unlikely(err < 0)) {
1285 kfree_skb(skb);
1286 return ERR_PTR(err);
1287 }
1288 return skb;
1289 }
1290
1291 struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1292 {
1293 struct sock *sk = chan->sk;
1294 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1295 struct sk_buff *skb;
1296 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1297 struct l2cap_hdr *lh;
1298
1299 BT_DBG("sk %p len %d", sk, (int)len);
1300
1301 if (!conn)
1302 return ERR_PTR(-ENOTCONN);
1303
1304 if (sdulen)
1305 hlen += 2;
1306
1307 if (chan->fcs == L2CAP_FCS_CRC16)
1308 hlen += 2;
1309
1310 count = min_t(unsigned int, (conn->mtu - hlen), len);
1311 skb = bt_skb_send_alloc(sk, count + hlen,
1312 msg->msg_flags & MSG_DONTWAIT, &err);
1313 if (!skb)
1314 return ERR_PTR(err);
1315
1316 /* Create L2CAP header */
1317 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1318 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1319 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1320 put_unaligned_le16(control, skb_put(skb, 2));
1321 if (sdulen)
1322 put_unaligned_le16(sdulen, skb_put(skb, 2));
1323
1324 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1325 if (unlikely(err < 0)) {
1326 kfree_skb(skb);
1327 return ERR_PTR(err);
1328 }
1329
1330 if (chan->fcs == L2CAP_FCS_CRC16)
1331 put_unaligned_le16(0, skb_put(skb, 2));
1332
1333 bt_cb(skb)->retries = 0;
1334 return skb;
1335 }
1336
1337 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1338 {
1339 struct sk_buff *skb;
1340 struct sk_buff_head sar_queue;
1341 u16 control;
1342 size_t size = 0;
1343
1344 skb_queue_head_init(&sar_queue);
1345 control = L2CAP_SDU_START;
1346 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1347 if (IS_ERR(skb))
1348 return PTR_ERR(skb);
1349
1350 __skb_queue_tail(&sar_queue, skb);
1351 len -= chan->remote_mps;
1352 size += chan->remote_mps;
1353
1354 while (len > 0) {
1355 size_t buflen;
1356
1357 if (len > chan->remote_mps) {
1358 control = L2CAP_SDU_CONTINUE;
1359 buflen = chan->remote_mps;
1360 } else {
1361 control = L2CAP_SDU_END;
1362 buflen = len;
1363 }
1364
1365 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1366 if (IS_ERR(skb)) {
1367 skb_queue_purge(&sar_queue);
1368 return PTR_ERR(skb);
1369 }
1370
1371 __skb_queue_tail(&sar_queue, skb);
1372 len -= buflen;
1373 size += buflen;
1374 }
1375 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1376 if (chan->tx_send_head == NULL)
1377 chan->tx_send_head = sar_queue.next;
1378
1379 return size;
1380 }
1381
1382 static void l2cap_chan_ready(struct sock *sk)
1383 {
1384 struct sock *parent = bt_sk(sk)->parent;
1385 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1386
1387 BT_DBG("sk %p, parent %p", sk, parent);
1388
1389 chan->conf_state = 0;
1390 l2cap_sock_clear_timer(sk);
1391
1392 if (!parent) {
1393 /* Outgoing channel.
1394 * Wake up socket sleeping on connect.
1395 */
1396 sk->sk_state = BT_CONNECTED;
1397 sk->sk_state_change(sk);
1398 } else {
1399 /* Incoming channel.
1400 * Wake up socket sleeping on accept.
1401 */
1402 parent->sk_data_ready(parent, 0);
1403 }
1404 }
1405
1406 /* Copy frame to all raw sockets on that connection */
1407 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1408 {
1409 struct sk_buff *nskb;
1410 struct l2cap_chan *chan;
1411
1412 BT_DBG("conn %p", conn);
1413
1414 read_lock(&conn->chan_lock);
1415 list_for_each_entry(chan, &conn->chan_l, list) {
1416 struct sock *sk = chan->sk;
1417 if (sk->sk_type != SOCK_RAW)
1418 continue;
1419
1420 /* Don't send frame to the socket it came from */
1421 if (skb->sk == sk)
1422 continue;
1423 nskb = skb_clone(skb, GFP_ATOMIC);
1424 if (!nskb)
1425 continue;
1426
1427 if (sock_queue_rcv_skb(sk, nskb))
1428 kfree_skb(nskb);
1429 }
1430 read_unlock(&conn->chan_lock);
1431 }
1432
1433 /* ---- L2CAP signalling commands ---- */
1434 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1435 u8 code, u8 ident, u16 dlen, void *data)
1436 {
1437 struct sk_buff *skb, **frag;
1438 struct l2cap_cmd_hdr *cmd;
1439 struct l2cap_hdr *lh;
1440 int len, count;
1441
1442 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1443 conn, code, ident, dlen);
1444
1445 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1446 count = min_t(unsigned int, conn->mtu, len);
1447
1448 skb = bt_skb_alloc(count, GFP_ATOMIC);
1449 if (!skb)
1450 return NULL;
1451
1452 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1453 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1454
1455 if (conn->hcon->type == LE_LINK)
1456 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1457 else
1458 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1459
1460 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1461 cmd->code = code;
1462 cmd->ident = ident;
1463 cmd->len = cpu_to_le16(dlen);
1464
1465 if (dlen) {
1466 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1467 memcpy(skb_put(skb, count), data, count);
1468 data += count;
1469 }
1470
1471 len -= skb->len;
1472
1473 /* Continuation fragments (no L2CAP header) */
1474 frag = &skb_shinfo(skb)->frag_list;
1475 while (len) {
1476 count = min_t(unsigned int, conn->mtu, len);
1477
1478 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1479 if (!*frag)
1480 goto fail;
1481
1482 memcpy(skb_put(*frag, count), data, count);
1483
1484 len -= count;
1485 data += count;
1486
1487 frag = &(*frag)->next;
1488 }
1489
1490 return skb;
1491
1492 fail:
1493 kfree_skb(skb);
1494 return NULL;
1495 }
1496
1497 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1498 {
1499 struct l2cap_conf_opt *opt = *ptr;
1500 int len;
1501
1502 len = L2CAP_CONF_OPT_SIZE + opt->len;
1503 *ptr += len;
1504
1505 *type = opt->type;
1506 *olen = opt->len;
1507
1508 switch (opt->len) {
1509 case 1:
1510 *val = *((u8 *) opt->val);
1511 break;
1512
1513 case 2:
1514 *val = get_unaligned_le16(opt->val);
1515 break;
1516
1517 case 4:
1518 *val = get_unaligned_le32(opt->val);
1519 break;
1520
1521 default:
1522 *val = (unsigned long) opt->val;
1523 break;
1524 }
1525
1526 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1527 return len;
1528 }
1529
1530 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1531 {
1532 struct l2cap_conf_opt *opt = *ptr;
1533
1534 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1535
1536 opt->type = type;
1537 opt->len = len;
1538
1539 switch (len) {
1540 case 1:
1541 *((u8 *) opt->val) = val;
1542 break;
1543
1544 case 2:
1545 put_unaligned_le16(val, opt->val);
1546 break;
1547
1548 case 4:
1549 put_unaligned_le32(val, opt->val);
1550 break;
1551
1552 default:
1553 memcpy(opt->val, (void *) val, len);
1554 break;
1555 }
1556
1557 *ptr += L2CAP_CONF_OPT_SIZE + len;
1558 }
1559
1560 static void l2cap_ack_timeout(unsigned long arg)
1561 {
1562 struct l2cap_chan *chan = (void *) arg;
1563
1564 bh_lock_sock(chan->sk);
1565 l2cap_send_ack(chan);
1566 bh_unlock_sock(chan->sk);
1567 }
1568
1569 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1570 {
1571 struct sock *sk = chan->sk;
1572
1573 chan->expected_ack_seq = 0;
1574 chan->unacked_frames = 0;
1575 chan->buffer_seq = 0;
1576 chan->num_acked = 0;
1577 chan->frames_sent = 0;
1578
1579 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1580 (unsigned long) chan);
1581 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1582 (unsigned long) chan);
1583 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1584
1585 skb_queue_head_init(&chan->srej_q);
1586 skb_queue_head_init(&chan->busy_q);
1587
1588 INIT_LIST_HEAD(&chan->srej_l);
1589
1590 INIT_WORK(&chan->busy_work, l2cap_busy_work);
1591
1592 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1593 }
1594
1595 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1596 {
1597 switch (mode) {
1598 case L2CAP_MODE_STREAMING:
1599 case L2CAP_MODE_ERTM:
1600 if (l2cap_mode_supported(mode, remote_feat_mask))
1601 return mode;
1602 /* fall through */
1603 default:
1604 return L2CAP_MODE_BASIC;
1605 }
1606 }
1607
1608 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1609 {
1610 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1611 struct l2cap_conf_req *req = data;
1612 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
1613 void *ptr = req->data;
1614
1615 BT_DBG("chan %p", chan);
1616
1617 if (chan->num_conf_req || chan->num_conf_rsp)
1618 goto done;
1619
1620 switch (pi->mode) {
1621 case L2CAP_MODE_STREAMING:
1622 case L2CAP_MODE_ERTM:
1623 if (chan->conf_state & L2CAP_CONF_STATE2_DEVICE)
1624 break;
1625
1626 /* fall through */
1627 default:
1628 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
1629 break;
1630 }
1631
1632 done:
1633 if (pi->imtu != L2CAP_DEFAULT_MTU)
1634 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1635
1636 switch (pi->mode) {
1637 case L2CAP_MODE_BASIC:
1638 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1639 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
1640 break;
1641
1642 rfc.mode = L2CAP_MODE_BASIC;
1643 rfc.txwin_size = 0;
1644 rfc.max_transmit = 0;
1645 rfc.retrans_timeout = 0;
1646 rfc.monitor_timeout = 0;
1647 rfc.max_pdu_size = 0;
1648
1649 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1650 (unsigned long) &rfc);
1651 break;
1652
1653 case L2CAP_MODE_ERTM:
1654 rfc.mode = L2CAP_MODE_ERTM;
1655 rfc.txwin_size = chan->tx_win;
1656 rfc.max_transmit = chan->max_tx;
1657 rfc.retrans_timeout = 0;
1658 rfc.monitor_timeout = 0;
1659 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1660 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1661 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1662
1663 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1664 (unsigned long) &rfc);
1665
1666 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1667 break;
1668
1669 if (chan->fcs == L2CAP_FCS_NONE ||
1670 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1671 chan->fcs = L2CAP_FCS_NONE;
1672 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1673 }
1674 break;
1675
1676 case L2CAP_MODE_STREAMING:
1677 rfc.mode = L2CAP_MODE_STREAMING;
1678 rfc.txwin_size = 0;
1679 rfc.max_transmit = 0;
1680 rfc.retrans_timeout = 0;
1681 rfc.monitor_timeout = 0;
1682 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1683 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
1684 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1685
1686 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1687 (unsigned long) &rfc);
1688
1689 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
1690 break;
1691
1692 if (chan->fcs == L2CAP_FCS_NONE ||
1693 chan->conf_state & L2CAP_CONF_NO_FCS_RECV) {
1694 chan->fcs = L2CAP_FCS_NONE;
1695 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1696 }
1697 break;
1698 }
1699
1700 req->dcid = cpu_to_le16(pi->dcid);
1701 req->flags = cpu_to_le16(0);
1702
1703 return ptr - data;
1704 }
1705
1706 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1707 {
1708 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1709 struct l2cap_conf_rsp *rsp = data;
1710 void *ptr = rsp->data;
1711 void *req = chan->conf_req;
1712 int len = chan->conf_len;
1713 int type, hint, olen;
1714 unsigned long val;
1715 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
1716 u16 mtu = L2CAP_DEFAULT_MTU;
1717 u16 result = L2CAP_CONF_SUCCESS;
1718
1719 BT_DBG("chan %p", chan);
1720
1721 while (len >= L2CAP_CONF_OPT_SIZE) {
1722 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
1723
1724 hint = type & L2CAP_CONF_HINT;
1725 type &= L2CAP_CONF_MASK;
1726
1727 switch (type) {
1728 case L2CAP_CONF_MTU:
1729 mtu = val;
1730 break;
1731
1732 case L2CAP_CONF_FLUSH_TO:
1733 pi->flush_to = val;
1734 break;
1735
1736 case L2CAP_CONF_QOS:
1737 break;
1738
1739 case L2CAP_CONF_RFC:
1740 if (olen == sizeof(rfc))
1741 memcpy(&rfc, (void *) val, olen);
1742 break;
1743
1744 case L2CAP_CONF_FCS:
1745 if (val == L2CAP_FCS_NONE)
1746 chan->conf_state |= L2CAP_CONF_NO_FCS_RECV;
1747
1748 break;
1749
1750 default:
1751 if (hint)
1752 break;
1753
1754 result = L2CAP_CONF_UNKNOWN;
1755 *((u8 *) ptr++) = type;
1756 break;
1757 }
1758 }
1759
1760 if (chan->num_conf_rsp || chan->num_conf_req > 1)
1761 goto done;
1762
1763 switch (pi->mode) {
1764 case L2CAP_MODE_STREAMING:
1765 case L2CAP_MODE_ERTM:
1766 if (!(chan->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
1767 pi->mode = l2cap_select_mode(rfc.mode,
1768 pi->conn->feat_mask);
1769 break;
1770 }
1771
1772 if (pi->mode != rfc.mode)
1773 return -ECONNREFUSED;
1774
1775 break;
1776 }
1777
1778 done:
1779 if (pi->mode != rfc.mode) {
1780 result = L2CAP_CONF_UNACCEPT;
1781 rfc.mode = pi->mode;
1782
1783 if (chan->num_conf_rsp == 1)
1784 return -ECONNREFUSED;
1785
1786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1787 sizeof(rfc), (unsigned long) &rfc);
1788 }
1789
1790
1791 if (result == L2CAP_CONF_SUCCESS) {
1792 /* Configure output options and let the other side know
1793 * which ones we don't like. */
1794
1795 if (mtu < L2CAP_DEFAULT_MIN_MTU)
1796 result = L2CAP_CONF_UNACCEPT;
1797 else {
1798 pi->omtu = mtu;
1799 chan->conf_state |= L2CAP_CONF_MTU_DONE;
1800 }
1801 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
1802
1803 switch (rfc.mode) {
1804 case L2CAP_MODE_BASIC:
1805 chan->fcs = L2CAP_FCS_NONE;
1806 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1807 break;
1808
1809 case L2CAP_MODE_ERTM:
1810 chan->remote_tx_win = rfc.txwin_size;
1811 chan->remote_max_tx = rfc.max_transmit;
1812
1813 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1814 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1815
1816 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1817
1818 rfc.retrans_timeout =
1819 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
1820 rfc.monitor_timeout =
1821 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
1822
1823 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1824
1825 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1826 sizeof(rfc), (unsigned long) &rfc);
1827
1828 break;
1829
1830 case L2CAP_MODE_STREAMING:
1831 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
1832 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
1833
1834 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
1835
1836 chan->conf_state |= L2CAP_CONF_MODE_DONE;
1837
1838 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1839 sizeof(rfc), (unsigned long) &rfc);
1840
1841 break;
1842
1843 default:
1844 result = L2CAP_CONF_UNACCEPT;
1845
1846 memset(&rfc, 0, sizeof(rfc));
1847 rfc.mode = pi->mode;
1848 }
1849
1850 if (result == L2CAP_CONF_SUCCESS)
1851 chan->conf_state |= L2CAP_CONF_OUTPUT_DONE;
1852 }
1853 rsp->scid = cpu_to_le16(pi->dcid);
1854 rsp->result = cpu_to_le16(result);
1855 rsp->flags = cpu_to_le16(0x0000);
1856
1857 return ptr - data;
1858 }
1859
1860 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
1861 {
1862 struct sock *sk = chan->sk;
1863 struct l2cap_pinfo *pi = l2cap_pi(sk);
1864 struct l2cap_conf_req *req = data;
1865 void *ptr = req->data;
1866 int type, olen;
1867 unsigned long val;
1868 struct l2cap_conf_rfc rfc;
1869
1870 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
1871
1872 while (len >= L2CAP_CONF_OPT_SIZE) {
1873 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1874
1875 switch (type) {
1876 case L2CAP_CONF_MTU:
1877 if (val < L2CAP_DEFAULT_MIN_MTU) {
1878 *result = L2CAP_CONF_UNACCEPT;
1879 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
1880 } else
1881 pi->imtu = val;
1882 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
1883 break;
1884
1885 case L2CAP_CONF_FLUSH_TO:
1886 pi->flush_to = val;
1887 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
1888 2, pi->flush_to);
1889 break;
1890
1891 case L2CAP_CONF_RFC:
1892 if (olen == sizeof(rfc))
1893 memcpy(&rfc, (void *)val, olen);
1894
1895 if ((chan->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
1896 rfc.mode != pi->mode)
1897 return -ECONNREFUSED;
1898
1899 chan->fcs = 0;
1900
1901 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
1902 sizeof(rfc), (unsigned long) &rfc);
1903 break;
1904 }
1905 }
1906
1907 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
1908 return -ECONNREFUSED;
1909
1910 pi->mode = rfc.mode;
1911
1912 if (*result == L2CAP_CONF_SUCCESS) {
1913 switch (rfc.mode) {
1914 case L2CAP_MODE_ERTM:
1915 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1916 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1917 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1918 break;
1919 case L2CAP_MODE_STREAMING:
1920 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1921 }
1922 }
1923
1924 req->dcid = cpu_to_le16(pi->dcid);
1925 req->flags = cpu_to_le16(0x0000);
1926
1927 return ptr - data;
1928 }
1929
1930 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
1931 {
1932 struct l2cap_conf_rsp *rsp = data;
1933 void *ptr = rsp->data;
1934
1935 BT_DBG("sk %p", sk);
1936
1937 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1938 rsp->result = cpu_to_le16(result);
1939 rsp->flags = cpu_to_le16(flags);
1940
1941 return ptr - data;
1942 }
1943
1944 void __l2cap_connect_rsp_defer(struct sock *sk)
1945 {
1946 struct l2cap_conn_rsp rsp;
1947 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1948 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1949 u8 buf[128];
1950
1951 sk->sk_state = BT_CONFIG;
1952
1953 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1954 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1955 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1956 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1957 l2cap_send_cmd(conn, chan->ident,
1958 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1959
1960 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
1961 return;
1962
1963 chan->conf_state |= L2CAP_CONF_REQ_SENT;
1964 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1965 l2cap_build_conf_req(chan, buf), buf);
1966 chan->num_conf_req++;
1967 }
1968
1969 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
1970 {
1971 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
1972 int type, olen;
1973 unsigned long val;
1974 struct l2cap_conf_rfc rfc;
1975
1976 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
1977
1978 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
1979 return;
1980
1981 while (len >= L2CAP_CONF_OPT_SIZE) {
1982 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
1983
1984 switch (type) {
1985 case L2CAP_CONF_RFC:
1986 if (olen == sizeof(rfc))
1987 memcpy(&rfc, (void *)val, olen);
1988 goto done;
1989 }
1990 }
1991
1992 done:
1993 switch (rfc.mode) {
1994 case L2CAP_MODE_ERTM:
1995 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
1996 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
1997 chan->mps = le16_to_cpu(rfc.max_pdu_size);
1998 break;
1999 case L2CAP_MODE_STREAMING:
2000 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2001 }
2002 }
2003
2004 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2005 {
2006 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2007
2008 if (rej->reason != 0x0000)
2009 return 0;
2010
2011 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2012 cmd->ident == conn->info_ident) {
2013 del_timer(&conn->info_timer);
2014
2015 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2016 conn->info_ident = 0;
2017
2018 l2cap_conn_start(conn);
2019 }
2020
2021 return 0;
2022 }
2023
2024 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2025 {
2026 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2027 struct l2cap_conn_rsp rsp;
2028 struct l2cap_chan *chan = NULL;
2029 struct sock *parent, *sk = NULL;
2030 int result, status = L2CAP_CS_NO_INFO;
2031
2032 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2033 __le16 psm = req->psm;
2034
2035 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2036
2037 /* Check if we have socket listening on psm */
2038 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2039 if (!parent) {
2040 result = L2CAP_CR_BAD_PSM;
2041 goto sendresp;
2042 }
2043
2044 bh_lock_sock(parent);
2045
2046 /* Check if the ACL is secure enough (if not SDP) */
2047 if (psm != cpu_to_le16(0x0001) &&
2048 !hci_conn_check_link_mode(conn->hcon)) {
2049 conn->disc_reason = 0x05;
2050 result = L2CAP_CR_SEC_BLOCK;
2051 goto response;
2052 }
2053
2054 result = L2CAP_CR_NO_MEM;
2055
2056 /* Check for backlog size */
2057 if (sk_acceptq_is_full(parent)) {
2058 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2059 goto response;
2060 }
2061
2062 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2063 if (!sk)
2064 goto response;
2065
2066 chan = l2cap_chan_alloc(sk);
2067 if (!chan) {
2068 l2cap_sock_kill(sk);
2069 goto response;
2070 }
2071
2072 l2cap_pi(sk)->chan = chan;
2073
2074 write_lock_bh(&conn->chan_lock);
2075
2076 /* Check if we already have channel with that dcid */
2077 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2078 write_unlock_bh(&conn->chan_lock);
2079 sock_set_flag(sk, SOCK_ZAPPED);
2080 l2cap_sock_kill(sk);
2081 goto response;
2082 }
2083
2084 hci_conn_hold(conn->hcon);
2085
2086 l2cap_sock_init(sk, parent);
2087 bacpy(&bt_sk(sk)->src, conn->src);
2088 bacpy(&bt_sk(sk)->dst, conn->dst);
2089 l2cap_pi(sk)->psm = psm;
2090 l2cap_pi(sk)->dcid = scid;
2091
2092 bt_accept_enqueue(parent, sk);
2093
2094 __l2cap_chan_add(conn, chan);
2095
2096 dcid = l2cap_pi(sk)->scid;
2097
2098 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2099
2100 chan->ident = cmd->ident;
2101
2102 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2103 if (l2cap_check_security(chan)) {
2104 if (bt_sk(sk)->defer_setup) {
2105 sk->sk_state = BT_CONNECT2;
2106 result = L2CAP_CR_PEND;
2107 status = L2CAP_CS_AUTHOR_PEND;
2108 parent->sk_data_ready(parent, 0);
2109 } else {
2110 sk->sk_state = BT_CONFIG;
2111 result = L2CAP_CR_SUCCESS;
2112 status = L2CAP_CS_NO_INFO;
2113 }
2114 } else {
2115 sk->sk_state = BT_CONNECT2;
2116 result = L2CAP_CR_PEND;
2117 status = L2CAP_CS_AUTHEN_PEND;
2118 }
2119 } else {
2120 sk->sk_state = BT_CONNECT2;
2121 result = L2CAP_CR_PEND;
2122 status = L2CAP_CS_NO_INFO;
2123 }
2124
2125 write_unlock_bh(&conn->chan_lock);
2126
2127 response:
2128 bh_unlock_sock(parent);
2129
2130 sendresp:
2131 rsp.scid = cpu_to_le16(scid);
2132 rsp.dcid = cpu_to_le16(dcid);
2133 rsp.result = cpu_to_le16(result);
2134 rsp.status = cpu_to_le16(status);
2135 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2136
2137 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2138 struct l2cap_info_req info;
2139 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2140
2141 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2142 conn->info_ident = l2cap_get_ident(conn);
2143
2144 mod_timer(&conn->info_timer, jiffies +
2145 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2146
2147 l2cap_send_cmd(conn, conn->info_ident,
2148 L2CAP_INFO_REQ, sizeof(info), &info);
2149 }
2150
2151 if (chan && !(chan->conf_state & L2CAP_CONF_REQ_SENT) &&
2152 result == L2CAP_CR_SUCCESS) {
2153 u8 buf[128];
2154 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2155 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2156 l2cap_build_conf_req(chan, buf), buf);
2157 chan->num_conf_req++;
2158 }
2159
2160 return 0;
2161 }
2162
2163 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2164 {
2165 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2166 u16 scid, dcid, result, status;
2167 struct l2cap_chan *chan;
2168 struct sock *sk;
2169 u8 req[128];
2170
2171 scid = __le16_to_cpu(rsp->scid);
2172 dcid = __le16_to_cpu(rsp->dcid);
2173 result = __le16_to_cpu(rsp->result);
2174 status = __le16_to_cpu(rsp->status);
2175
2176 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2177
2178 if (scid) {
2179 chan = l2cap_get_chan_by_scid(conn, scid);
2180 if (!chan)
2181 return -EFAULT;
2182 } else {
2183 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2184 if (!chan)
2185 return -EFAULT;
2186 }
2187
2188 sk = chan->sk;
2189
2190 switch (result) {
2191 case L2CAP_CR_SUCCESS:
2192 sk->sk_state = BT_CONFIG;
2193 chan->ident = 0;
2194 l2cap_pi(sk)->dcid = dcid;
2195 chan->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2196
2197 if (chan->conf_state & L2CAP_CONF_REQ_SENT)
2198 break;
2199
2200 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2201
2202 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2203 l2cap_build_conf_req(chan, req), req);
2204 chan->num_conf_req++;
2205 break;
2206
2207 case L2CAP_CR_PEND:
2208 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
2209 break;
2210
2211 default:
2212 /* don't delete l2cap channel if sk is owned by user */
2213 if (sock_owned_by_user(sk)) {
2214 sk->sk_state = BT_DISCONN;
2215 l2cap_sock_clear_timer(sk);
2216 l2cap_sock_set_timer(sk, HZ / 5);
2217 break;
2218 }
2219
2220 l2cap_chan_del(chan, ECONNREFUSED);
2221 break;
2222 }
2223
2224 bh_unlock_sock(sk);
2225 return 0;
2226 }
2227
2228 static inline void set_default_fcs(struct l2cap_chan *chan)
2229 {
2230 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2231
2232 /* FCS is enabled only in ERTM or streaming mode, if one or both
2233 * sides request it.
2234 */
2235 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
2236 chan->fcs = L2CAP_FCS_NONE;
2237 else if (!(pi->chan->conf_state & L2CAP_CONF_NO_FCS_RECV))
2238 chan->fcs = L2CAP_FCS_CRC16;
2239 }
2240
2241 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2242 {
2243 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2244 u16 dcid, flags;
2245 u8 rsp[64];
2246 struct l2cap_chan *chan;
2247 struct sock *sk;
2248 int len;
2249
2250 dcid = __le16_to_cpu(req->dcid);
2251 flags = __le16_to_cpu(req->flags);
2252
2253 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2254
2255 chan = l2cap_get_chan_by_scid(conn, dcid);
2256 if (!chan)
2257 return -ENOENT;
2258
2259 sk = chan->sk;
2260
2261 if (sk->sk_state != BT_CONFIG) {
2262 struct l2cap_cmd_rej rej;
2263
2264 rej.reason = cpu_to_le16(0x0002);
2265 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2266 sizeof(rej), &rej);
2267 goto unlock;
2268 }
2269
2270 /* Reject if config buffer is too small. */
2271 len = cmd_len - sizeof(*req);
2272 if (chan->conf_len + len > sizeof(chan->conf_req)) {
2273 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2274 l2cap_build_conf_rsp(sk, rsp,
2275 L2CAP_CONF_REJECT, flags), rsp);
2276 goto unlock;
2277 }
2278
2279 /* Store config. */
2280 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2281 chan->conf_len += len;
2282
2283 if (flags & 0x0001) {
2284 /* Incomplete config. Send empty response. */
2285 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2286 l2cap_build_conf_rsp(sk, rsp,
2287 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2288 goto unlock;
2289 }
2290
2291 /* Complete config. */
2292 len = l2cap_parse_conf_req(chan, rsp);
2293 if (len < 0) {
2294 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2295 goto unlock;
2296 }
2297
2298 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2299 chan->num_conf_rsp++;
2300
2301 /* Reset config buffer. */
2302 chan->conf_len = 0;
2303
2304 if (!(chan->conf_state & L2CAP_CONF_OUTPUT_DONE))
2305 goto unlock;
2306
2307 if (chan->conf_state & L2CAP_CONF_INPUT_DONE) {
2308 set_default_fcs(chan);
2309
2310 sk->sk_state = BT_CONNECTED;
2311
2312 chan->next_tx_seq = 0;
2313 chan->expected_tx_seq = 0;
2314 skb_queue_head_init(&chan->tx_q);
2315 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2316 l2cap_ertm_init(chan);
2317
2318 l2cap_chan_ready(sk);
2319 goto unlock;
2320 }
2321
2322 if (!(chan->conf_state & L2CAP_CONF_REQ_SENT)) {
2323 u8 buf[64];
2324 chan->conf_state |= L2CAP_CONF_REQ_SENT;
2325 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2326 l2cap_build_conf_req(chan, buf), buf);
2327 chan->num_conf_req++;
2328 }
2329
2330 unlock:
2331 bh_unlock_sock(sk);
2332 return 0;
2333 }
2334
2335 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2336 {
2337 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2338 u16 scid, flags, result;
2339 struct l2cap_chan *chan;
2340 struct sock *sk;
2341 int len = cmd->len - sizeof(*rsp);
2342
2343 scid = __le16_to_cpu(rsp->scid);
2344 flags = __le16_to_cpu(rsp->flags);
2345 result = __le16_to_cpu(rsp->result);
2346
2347 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2348 scid, flags, result);
2349
2350 chan = l2cap_get_chan_by_scid(conn, scid);
2351 if (!chan)
2352 return 0;
2353
2354 sk = chan->sk;
2355
2356 switch (result) {
2357 case L2CAP_CONF_SUCCESS:
2358 l2cap_conf_rfc_get(chan, rsp->data, len);
2359 break;
2360
2361 case L2CAP_CONF_UNACCEPT:
2362 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2363 char req[64];
2364
2365 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2366 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2367 goto done;
2368 }
2369
2370 /* throw out any old stored conf requests */
2371 result = L2CAP_CONF_SUCCESS;
2372 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2373 req, &result);
2374 if (len < 0) {
2375 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2376 goto done;
2377 }
2378
2379 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2380 L2CAP_CONF_REQ, len, req);
2381 chan->num_conf_req++;
2382 if (result != L2CAP_CONF_SUCCESS)
2383 goto done;
2384 break;
2385 }
2386
2387 default:
2388 sk->sk_err = ECONNRESET;
2389 l2cap_sock_set_timer(sk, HZ * 5);
2390 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2391 goto done;
2392 }
2393
2394 if (flags & 0x01)
2395 goto done;
2396
2397 chan->conf_state |= L2CAP_CONF_INPUT_DONE;
2398
2399 if (chan->conf_state & L2CAP_CONF_OUTPUT_DONE) {
2400 set_default_fcs(chan);
2401
2402 sk->sk_state = BT_CONNECTED;
2403 chan->next_tx_seq = 0;
2404 chan->expected_tx_seq = 0;
2405 skb_queue_head_init(&chan->tx_q);
2406 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2407 l2cap_ertm_init(chan);
2408
2409 l2cap_chan_ready(sk);
2410 }
2411
2412 done:
2413 bh_unlock_sock(sk);
2414 return 0;
2415 }
2416
2417 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2418 {
2419 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2420 struct l2cap_disconn_rsp rsp;
2421 u16 dcid, scid;
2422 struct l2cap_chan *chan;
2423 struct sock *sk;
2424
2425 scid = __le16_to_cpu(req->scid);
2426 dcid = __le16_to_cpu(req->dcid);
2427
2428 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2429
2430 chan = l2cap_get_chan_by_scid(conn, dcid);
2431 if (!chan)
2432 return 0;
2433
2434 sk = chan->sk;
2435
2436 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
2437 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2438 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2439
2440 sk->sk_shutdown = SHUTDOWN_MASK;
2441
2442 /* don't delete l2cap channel if sk is owned by user */
2443 if (sock_owned_by_user(sk)) {
2444 sk->sk_state = BT_DISCONN;
2445 l2cap_sock_clear_timer(sk);
2446 l2cap_sock_set_timer(sk, HZ / 5);
2447 bh_unlock_sock(sk);
2448 return 0;
2449 }
2450
2451 l2cap_chan_del(chan, ECONNRESET);
2452 bh_unlock_sock(sk);
2453
2454 l2cap_sock_kill(sk);
2455 return 0;
2456 }
2457
2458 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2459 {
2460 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2461 u16 dcid, scid;
2462 struct l2cap_chan *chan;
2463 struct sock *sk;
2464
2465 scid = __le16_to_cpu(rsp->scid);
2466 dcid = __le16_to_cpu(rsp->dcid);
2467
2468 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2469
2470 chan = l2cap_get_chan_by_scid(conn, scid);
2471 if (!chan)
2472 return 0;
2473
2474 sk = chan->sk;
2475
2476 /* don't delete l2cap channel if sk is owned by user */
2477 if (sock_owned_by_user(sk)) {
2478 sk->sk_state = BT_DISCONN;
2479 l2cap_sock_clear_timer(sk);
2480 l2cap_sock_set_timer(sk, HZ / 5);
2481 bh_unlock_sock(sk);
2482 return 0;
2483 }
2484
2485 l2cap_chan_del(chan, 0);
2486 bh_unlock_sock(sk);
2487
2488 l2cap_sock_kill(sk);
2489 return 0;
2490 }
2491
2492 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2493 {
2494 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2495 u16 type;
2496
2497 type = __le16_to_cpu(req->type);
2498
2499 BT_DBG("type 0x%4.4x", type);
2500
2501 if (type == L2CAP_IT_FEAT_MASK) {
2502 u8 buf[8];
2503 u32 feat_mask = l2cap_feat_mask;
2504 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2505 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2506 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2507 if (!disable_ertm)
2508 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2509 | L2CAP_FEAT_FCS;
2510 put_unaligned_le32(feat_mask, rsp->data);
2511 l2cap_send_cmd(conn, cmd->ident,
2512 L2CAP_INFO_RSP, sizeof(buf), buf);
2513 } else if (type == L2CAP_IT_FIXED_CHAN) {
2514 u8 buf[12];
2515 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2516 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2517 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2518 memcpy(buf + 4, l2cap_fixed_chan, 8);
2519 l2cap_send_cmd(conn, cmd->ident,
2520 L2CAP_INFO_RSP, sizeof(buf), buf);
2521 } else {
2522 struct l2cap_info_rsp rsp;
2523 rsp.type = cpu_to_le16(type);
2524 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2525 l2cap_send_cmd(conn, cmd->ident,
2526 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2527 }
2528
2529 return 0;
2530 }
2531
2532 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2533 {
2534 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2535 u16 type, result;
2536
2537 type = __le16_to_cpu(rsp->type);
2538 result = __le16_to_cpu(rsp->result);
2539
2540 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2541
2542 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2543 if (cmd->ident != conn->info_ident ||
2544 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2545 return 0;
2546
2547 del_timer(&conn->info_timer);
2548
2549 if (result != L2CAP_IR_SUCCESS) {
2550 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2551 conn->info_ident = 0;
2552
2553 l2cap_conn_start(conn);
2554
2555 return 0;
2556 }
2557
2558 if (type == L2CAP_IT_FEAT_MASK) {
2559 conn->feat_mask = get_unaligned_le32(rsp->data);
2560
2561 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2562 struct l2cap_info_req req;
2563 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2564
2565 conn->info_ident = l2cap_get_ident(conn);
2566
2567 l2cap_send_cmd(conn, conn->info_ident,
2568 L2CAP_INFO_REQ, sizeof(req), &req);
2569 } else {
2570 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2571 conn->info_ident = 0;
2572
2573 l2cap_conn_start(conn);
2574 }
2575 } else if (type == L2CAP_IT_FIXED_CHAN) {
2576 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2577 conn->info_ident = 0;
2578
2579 l2cap_conn_start(conn);
2580 }
2581
2582 return 0;
2583 }
2584
2585 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2586 u16 to_multiplier)
2587 {
2588 u16 max_latency;
2589
2590 if (min > max || min < 6 || max > 3200)
2591 return -EINVAL;
2592
2593 if (to_multiplier < 10 || to_multiplier > 3200)
2594 return -EINVAL;
2595
2596 if (max >= to_multiplier * 8)
2597 return -EINVAL;
2598
2599 max_latency = (to_multiplier * 8 / max) - 1;
2600 if (latency > 499 || latency > max_latency)
2601 return -EINVAL;
2602
2603 return 0;
2604 }
2605
2606 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2607 struct l2cap_cmd_hdr *cmd, u8 *data)
2608 {
2609 struct hci_conn *hcon = conn->hcon;
2610 struct l2cap_conn_param_update_req *req;
2611 struct l2cap_conn_param_update_rsp rsp;
2612 u16 min, max, latency, to_multiplier, cmd_len;
2613 int err;
2614
2615 if (!(hcon->link_mode & HCI_LM_MASTER))
2616 return -EINVAL;
2617
2618 cmd_len = __le16_to_cpu(cmd->len);
2619 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2620 return -EPROTO;
2621
2622 req = (struct l2cap_conn_param_update_req *) data;
2623 min = __le16_to_cpu(req->min);
2624 max = __le16_to_cpu(req->max);
2625 latency = __le16_to_cpu(req->latency);
2626 to_multiplier = __le16_to_cpu(req->to_multiplier);
2627
2628 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2629 min, max, latency, to_multiplier);
2630
2631 memset(&rsp, 0, sizeof(rsp));
2632
2633 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2634 if (err)
2635 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2636 else
2637 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2638
2639 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2640 sizeof(rsp), &rsp);
2641
2642 if (!err)
2643 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2644
2645 return 0;
2646 }
2647
2648 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2649 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2650 {
2651 int err = 0;
2652
2653 switch (cmd->code) {
2654 case L2CAP_COMMAND_REJ:
2655 l2cap_command_rej(conn, cmd, data);
2656 break;
2657
2658 case L2CAP_CONN_REQ:
2659 err = l2cap_connect_req(conn, cmd, data);
2660 break;
2661
2662 case L2CAP_CONN_RSP:
2663 err = l2cap_connect_rsp(conn, cmd, data);
2664 break;
2665
2666 case L2CAP_CONF_REQ:
2667 err = l2cap_config_req(conn, cmd, cmd_len, data);
2668 break;
2669
2670 case L2CAP_CONF_RSP:
2671 err = l2cap_config_rsp(conn, cmd, data);
2672 break;
2673
2674 case L2CAP_DISCONN_REQ:
2675 err = l2cap_disconnect_req(conn, cmd, data);
2676 break;
2677
2678 case L2CAP_DISCONN_RSP:
2679 err = l2cap_disconnect_rsp(conn, cmd, data);
2680 break;
2681
2682 case L2CAP_ECHO_REQ:
2683 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2684 break;
2685
2686 case L2CAP_ECHO_RSP:
2687 break;
2688
2689 case L2CAP_INFO_REQ:
2690 err = l2cap_information_req(conn, cmd, data);
2691 break;
2692
2693 case L2CAP_INFO_RSP:
2694 err = l2cap_information_rsp(conn, cmd, data);
2695 break;
2696
2697 default:
2698 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2699 err = -EINVAL;
2700 break;
2701 }
2702
2703 return err;
2704 }
2705
2706 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2707 struct l2cap_cmd_hdr *cmd, u8 *data)
2708 {
2709 switch (cmd->code) {
2710 case L2CAP_COMMAND_REJ:
2711 return 0;
2712
2713 case L2CAP_CONN_PARAM_UPDATE_REQ:
2714 return l2cap_conn_param_update_req(conn, cmd, data);
2715
2716 case L2CAP_CONN_PARAM_UPDATE_RSP:
2717 return 0;
2718
2719 default:
2720 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2721 return -EINVAL;
2722 }
2723 }
2724
2725 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
2726 struct sk_buff *skb)
2727 {
2728 u8 *data = skb->data;
2729 int len = skb->len;
2730 struct l2cap_cmd_hdr cmd;
2731 int err;
2732
2733 l2cap_raw_recv(conn, skb);
2734
2735 while (len >= L2CAP_CMD_HDR_SIZE) {
2736 u16 cmd_len;
2737 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
2738 data += L2CAP_CMD_HDR_SIZE;
2739 len -= L2CAP_CMD_HDR_SIZE;
2740
2741 cmd_len = le16_to_cpu(cmd.len);
2742
2743 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
2744
2745 if (cmd_len > len || !cmd.ident) {
2746 BT_DBG("corrupted command");
2747 break;
2748 }
2749
2750 if (conn->hcon->type == LE_LINK)
2751 err = l2cap_le_sig_cmd(conn, &cmd, data);
2752 else
2753 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
2754
2755 if (err) {
2756 struct l2cap_cmd_rej rej;
2757
2758 BT_ERR("Wrong link type (%d)", err);
2759
2760 /* FIXME: Map err to a valid reason */
2761 rej.reason = cpu_to_le16(0);
2762 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
2763 }
2764
2765 data += cmd_len;
2766 len -= cmd_len;
2767 }
2768
2769 kfree_skb(skb);
2770 }
2771
2772 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
2773 {
2774 u16 our_fcs, rcv_fcs;
2775 int hdr_size = L2CAP_HDR_SIZE + 2;
2776
2777 if (chan->fcs == L2CAP_FCS_CRC16) {
2778 skb_trim(skb, skb->len - 2);
2779 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
2780 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
2781
2782 if (our_fcs != rcv_fcs)
2783 return -EBADMSG;
2784 }
2785 return 0;
2786 }
2787
2788 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
2789 {
2790 u16 control = 0;
2791
2792 chan->frames_sent = 0;
2793
2794 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2795
2796 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2797 control |= L2CAP_SUPER_RCV_NOT_READY;
2798 l2cap_send_sframe(chan, control);
2799 chan->conn_state |= L2CAP_CONN_RNR_SENT;
2800 }
2801
2802 if (chan->conn_state & L2CAP_CONN_REMOTE_BUSY)
2803 l2cap_retransmit_frames(chan);
2804
2805 l2cap_ertm_send(chan);
2806
2807 if (!(chan->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2808 chan->frames_sent == 0) {
2809 control |= L2CAP_SUPER_RCV_READY;
2810 l2cap_send_sframe(chan, control);
2811 }
2812 }
2813
2814 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
2815 {
2816 struct sk_buff *next_skb;
2817 int tx_seq_offset, next_tx_seq_offset;
2818
2819 bt_cb(skb)->tx_seq = tx_seq;
2820 bt_cb(skb)->sar = sar;
2821
2822 next_skb = skb_peek(&chan->srej_q);
2823 if (!next_skb) {
2824 __skb_queue_tail(&chan->srej_q, skb);
2825 return 0;
2826 }
2827
2828 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
2829 if (tx_seq_offset < 0)
2830 tx_seq_offset += 64;
2831
2832 do {
2833 if (bt_cb(next_skb)->tx_seq == tx_seq)
2834 return -EINVAL;
2835
2836 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
2837 chan->buffer_seq) % 64;
2838 if (next_tx_seq_offset < 0)
2839 next_tx_seq_offset += 64;
2840
2841 if (next_tx_seq_offset > tx_seq_offset) {
2842 __skb_queue_before(&chan->srej_q, next_skb, skb);
2843 return 0;
2844 }
2845
2846 if (skb_queue_is_last(&chan->srej_q, next_skb))
2847 break;
2848
2849 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
2850
2851 __skb_queue_tail(&chan->srej_q, skb);
2852
2853 return 0;
2854 }
2855
2856 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
2857 {
2858 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
2859 struct sk_buff *_skb;
2860 int err;
2861
2862 switch (control & L2CAP_CTRL_SAR) {
2863 case L2CAP_SDU_UNSEGMENTED:
2864 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2865 goto drop;
2866
2867 err = sock_queue_rcv_skb(chan->sk, skb);
2868 if (!err)
2869 return err;
2870
2871 break;
2872
2873 case L2CAP_SDU_START:
2874 if (chan->conn_state & L2CAP_CONN_SAR_SDU)
2875 goto drop;
2876
2877 chan->sdu_len = get_unaligned_le16(skb->data);
2878
2879 if (chan->sdu_len > pi->imtu)
2880 goto disconnect;
2881
2882 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
2883 if (!chan->sdu)
2884 return -ENOMEM;
2885
2886 /* pull sdu_len bytes only after alloc, because of Local Busy
2887 * condition we have to be sure that this will be executed
2888 * only once, i.e., when alloc does not fail */
2889 skb_pull(skb, 2);
2890
2891 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2892
2893 chan->conn_state |= L2CAP_CONN_SAR_SDU;
2894 chan->partial_sdu_len = skb->len;
2895 break;
2896
2897 case L2CAP_SDU_CONTINUE:
2898 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2899 goto disconnect;
2900
2901 if (!chan->sdu)
2902 goto disconnect;
2903
2904 chan->partial_sdu_len += skb->len;
2905 if (chan->partial_sdu_len > chan->sdu_len)
2906 goto drop;
2907
2908 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2909
2910 break;
2911
2912 case L2CAP_SDU_END:
2913 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
2914 goto disconnect;
2915
2916 if (!chan->sdu)
2917 goto disconnect;
2918
2919 if (!(chan->conn_state & L2CAP_CONN_SAR_RETRY)) {
2920 chan->partial_sdu_len += skb->len;
2921
2922 if (chan->partial_sdu_len > pi->imtu)
2923 goto drop;
2924
2925 if (chan->partial_sdu_len != chan->sdu_len)
2926 goto drop;
2927
2928 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
2929 }
2930
2931 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
2932 if (!_skb) {
2933 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2934 return -ENOMEM;
2935 }
2936
2937 err = sock_queue_rcv_skb(chan->sk, _skb);
2938 if (err < 0) {
2939 kfree_skb(_skb);
2940 chan->conn_state |= L2CAP_CONN_SAR_RETRY;
2941 return err;
2942 }
2943
2944 chan->conn_state &= ~L2CAP_CONN_SAR_RETRY;
2945 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
2946
2947 kfree_skb(chan->sdu);
2948 break;
2949 }
2950
2951 kfree_skb(skb);
2952 return 0;
2953
2954 drop:
2955 kfree_skb(chan->sdu);
2956 chan->sdu = NULL;
2957
2958 disconnect:
2959 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
2960 kfree_skb(skb);
2961 return 0;
2962 }
2963
2964 static int l2cap_try_push_rx_skb(struct l2cap_chan *chan)
2965 {
2966 struct sk_buff *skb;
2967 u16 control;
2968 int err;
2969
2970 while ((skb = skb_dequeue(&chan->busy_q))) {
2971 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
2972 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
2973 if (err < 0) {
2974 skb_queue_head(&chan->busy_q, skb);
2975 return -EBUSY;
2976 }
2977
2978 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
2979 }
2980
2981 if (!(chan->conn_state & L2CAP_CONN_RNR_SENT))
2982 goto done;
2983
2984 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
2985 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
2986 l2cap_send_sframe(chan, control);
2987 chan->retry_count = 1;
2988
2989 del_timer(&chan->retrans_timer);
2990 __mod_monitor_timer();
2991
2992 chan->conn_state |= L2CAP_CONN_WAIT_F;
2993
2994 done:
2995 chan->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2996 chan->conn_state &= ~L2CAP_CONN_RNR_SENT;
2997
2998 BT_DBG("chan %p, Exit local busy", chan);
2999
3000 return 0;
3001 }
3002
3003 static void l2cap_busy_work(struct work_struct *work)
3004 {
3005 DECLARE_WAITQUEUE(wait, current);
3006 struct l2cap_chan *chan =
3007 container_of(work, struct l2cap_chan, busy_work);
3008 struct sock *sk = chan->sk;
3009 int n_tries = 0, timeo = HZ/5, err;
3010 struct sk_buff *skb;
3011
3012 lock_sock(sk);
3013
3014 add_wait_queue(sk_sleep(sk), &wait);
3015 while ((skb = skb_peek(&chan->busy_q))) {
3016 set_current_state(TASK_INTERRUPTIBLE);
3017
3018 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3019 err = -EBUSY;
3020 l2cap_send_disconn_req(l2cap_pi(sk)->conn, chan, EBUSY);
3021 break;
3022 }
3023
3024 if (!timeo)
3025 timeo = HZ/5;
3026
3027 if (signal_pending(current)) {
3028 err = sock_intr_errno(timeo);
3029 break;
3030 }
3031
3032 release_sock(sk);
3033 timeo = schedule_timeout(timeo);
3034 lock_sock(sk);
3035
3036 err = sock_error(sk);
3037 if (err)
3038 break;
3039
3040 if (l2cap_try_push_rx_skb(chan) == 0)
3041 break;
3042 }
3043
3044 set_current_state(TASK_RUNNING);
3045 remove_wait_queue(sk_sleep(sk), &wait);
3046
3047 release_sock(sk);
3048 }
3049
3050 static int l2cap_push_rx_skb(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3051 {
3052 int sctrl, err;
3053
3054 if (chan->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3055 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3056 __skb_queue_tail(&chan->busy_q, skb);
3057 return l2cap_try_push_rx_skb(chan);
3058
3059
3060 }
3061
3062 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3063 if (err >= 0) {
3064 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3065 return err;
3066 }
3067
3068 /* Busy Condition */
3069 BT_DBG("chan %p, Enter local busy", chan);
3070
3071 chan->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3072 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3073 __skb_queue_tail(&chan->busy_q, skb);
3074
3075 sctrl = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3076 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3077 l2cap_send_sframe(chan, sctrl);
3078
3079 chan->conn_state |= L2CAP_CONN_RNR_SENT;
3080
3081 del_timer(&chan->ack_timer);
3082
3083 queue_work(_busy_wq, &chan->busy_work);
3084
3085 return err;
3086 }
3087
3088 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3089 {
3090 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3091 struct sk_buff *_skb;
3092 int err = -EINVAL;
3093
3094 /*
3095 * TODO: We have to notify the userland if some data is lost with the
3096 * Streaming Mode.
3097 */
3098
3099 switch (control & L2CAP_CTRL_SAR) {
3100 case L2CAP_SDU_UNSEGMENTED:
3101 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3102 kfree_skb(chan->sdu);
3103 break;
3104 }
3105
3106 err = sock_queue_rcv_skb(chan->sk, skb);
3107 if (!err)
3108 return 0;
3109
3110 break;
3111
3112 case L2CAP_SDU_START:
3113 if (chan->conn_state & L2CAP_CONN_SAR_SDU) {
3114 kfree_skb(chan->sdu);
3115 break;
3116 }
3117
3118 chan->sdu_len = get_unaligned_le16(skb->data);
3119 skb_pull(skb, 2);
3120
3121 if (chan->sdu_len > pi->imtu) {
3122 err = -EMSGSIZE;
3123 break;
3124 }
3125
3126 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3127 if (!chan->sdu) {
3128 err = -ENOMEM;
3129 break;
3130 }
3131
3132 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3133
3134 chan->conn_state |= L2CAP_CONN_SAR_SDU;
3135 chan->partial_sdu_len = skb->len;
3136 err = 0;
3137 break;
3138
3139 case L2CAP_SDU_CONTINUE:
3140 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3141 break;
3142
3143 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3144
3145 chan->partial_sdu_len += skb->len;
3146 if (chan->partial_sdu_len > chan->sdu_len)
3147 kfree_skb(chan->sdu);
3148 else
3149 err = 0;
3150
3151 break;
3152
3153 case L2CAP_SDU_END:
3154 if (!(chan->conn_state & L2CAP_CONN_SAR_SDU))
3155 break;
3156
3157 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3158
3159 chan->conn_state &= ~L2CAP_CONN_SAR_SDU;
3160 chan->partial_sdu_len += skb->len;
3161
3162 if (chan->partial_sdu_len > pi->imtu)
3163 goto drop;
3164
3165 if (chan->partial_sdu_len == chan->sdu_len) {
3166 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3167 err = sock_queue_rcv_skb(chan->sk, _skb);
3168 if (err < 0)
3169 kfree_skb(_skb);
3170 }
3171 err = 0;
3172
3173 drop:
3174 kfree_skb(chan->sdu);
3175 break;
3176 }
3177
3178 kfree_skb(skb);
3179 return err;
3180 }
3181
3182 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3183 {
3184 struct sk_buff *skb;
3185 u16 control;
3186
3187 while ((skb = skb_peek(&chan->srej_q))) {
3188 if (bt_cb(skb)->tx_seq != tx_seq)
3189 break;
3190
3191 skb = skb_dequeue(&chan->srej_q);
3192 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3193 l2cap_ertm_reassembly_sdu(chan, skb, control);
3194 chan->buffer_seq_srej =
3195 (chan->buffer_seq_srej + 1) % 64;
3196 tx_seq = (tx_seq + 1) % 64;
3197 }
3198 }
3199
3200 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3201 {
3202 struct srej_list *l, *tmp;
3203 u16 control;
3204
3205 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3206 if (l->tx_seq == tx_seq) {
3207 list_del(&l->list);
3208 kfree(l);
3209 return;
3210 }
3211 control = L2CAP_SUPER_SELECT_REJECT;
3212 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3213 l2cap_send_sframe(chan, control);
3214 list_del(&l->list);
3215 list_add_tail(&l->list, &chan->srej_l);
3216 }
3217 }
3218
3219 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3220 {
3221 struct srej_list *new;
3222 u16 control;
3223
3224 while (tx_seq != chan->expected_tx_seq) {
3225 control = L2CAP_SUPER_SELECT_REJECT;
3226 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3227 l2cap_send_sframe(chan, control);
3228
3229 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3230 new->tx_seq = chan->expected_tx_seq;
3231 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3232 list_add_tail(&new->list, &chan->srej_l);
3233 }
3234 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3235 }
3236
3237 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3238 {
3239 struct l2cap_pinfo *pi = l2cap_pi(chan->sk);
3240 u8 tx_seq = __get_txseq(rx_control);
3241 u8 req_seq = __get_reqseq(rx_control);
3242 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3243 int tx_seq_offset, expected_tx_seq_offset;
3244 int num_to_ack = (chan->tx_win/6) + 1;
3245 int err = 0;
3246
3247 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3248 tx_seq, rx_control);
3249
3250 if (L2CAP_CTRL_FINAL & rx_control &&
3251 chan->conn_state & L2CAP_CONN_WAIT_F) {
3252 del_timer(&chan->monitor_timer);
3253 if (chan->unacked_frames > 0)
3254 __mod_retrans_timer();
3255 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3256 }
3257
3258 chan->expected_ack_seq = req_seq;
3259 l2cap_drop_acked_frames(chan);
3260
3261 if (tx_seq == chan->expected_tx_seq)
3262 goto expected;
3263
3264 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3265 if (tx_seq_offset < 0)
3266 tx_seq_offset += 64;
3267
3268 /* invalid tx_seq */
3269 if (tx_seq_offset >= chan->tx_win) {
3270 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3271 goto drop;
3272 }
3273
3274 if (chan->conn_state == L2CAP_CONN_LOCAL_BUSY)
3275 goto drop;
3276
3277 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3278 struct srej_list *first;
3279
3280 first = list_first_entry(&chan->srej_l,
3281 struct srej_list, list);
3282 if (tx_seq == first->tx_seq) {
3283 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3284 l2cap_check_srej_gap(chan, tx_seq);
3285
3286 list_del(&first->list);
3287 kfree(first);
3288
3289 if (list_empty(&chan->srej_l)) {
3290 chan->buffer_seq = chan->buffer_seq_srej;
3291 chan->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3292 l2cap_send_ack(chan);
3293 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3294 }
3295 } else {
3296 struct srej_list *l;
3297
3298 /* duplicated tx_seq */
3299 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3300 goto drop;
3301
3302 list_for_each_entry(l, &chan->srej_l, list) {
3303 if (l->tx_seq == tx_seq) {
3304 l2cap_resend_srejframe(chan, tx_seq);
3305 return 0;
3306 }
3307 }
3308 l2cap_send_srejframe(chan, tx_seq);
3309 }
3310 } else {
3311 expected_tx_seq_offset =
3312 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3313 if (expected_tx_seq_offset < 0)
3314 expected_tx_seq_offset += 64;
3315
3316 /* duplicated tx_seq */
3317 if (tx_seq_offset < expected_tx_seq_offset)
3318 goto drop;
3319
3320 chan->conn_state |= L2CAP_CONN_SREJ_SENT;
3321
3322 BT_DBG("chan %p, Enter SREJ", chan);
3323
3324 INIT_LIST_HEAD(&chan->srej_l);
3325 chan->buffer_seq_srej = chan->buffer_seq;
3326
3327 __skb_queue_head_init(&chan->srej_q);
3328 __skb_queue_head_init(&chan->busy_q);
3329 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3330
3331 chan->conn_state |= L2CAP_CONN_SEND_PBIT;
3332
3333 l2cap_send_srejframe(chan, tx_seq);
3334
3335 del_timer(&chan->ack_timer);
3336 }
3337 return 0;
3338
3339 expected:
3340 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3341
3342 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3343 bt_cb(skb)->tx_seq = tx_seq;
3344 bt_cb(skb)->sar = sar;
3345 __skb_queue_tail(&chan->srej_q, skb);
3346 return 0;
3347 }
3348
3349 err = l2cap_push_rx_skb(chan, skb, rx_control);
3350 if (err < 0)
3351 return 0;
3352
3353 if (rx_control & L2CAP_CTRL_FINAL) {
3354 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3355 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3356 else
3357 l2cap_retransmit_frames(chan);
3358 }
3359
3360 __mod_ack_timer();
3361
3362 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3363 if (chan->num_acked == num_to_ack - 1)
3364 l2cap_send_ack(chan);
3365
3366 return 0;
3367
3368 drop:
3369 kfree_skb(skb);
3370 return 0;
3371 }
3372
3373 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3374 {
3375 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3376 rx_control);
3377
3378 chan->expected_ack_seq = __get_reqseq(rx_control);
3379 l2cap_drop_acked_frames(chan);
3380
3381 if (rx_control & L2CAP_CTRL_POLL) {
3382 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3383 if (chan->conn_state & L2CAP_CONN_SREJ_SENT) {
3384 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3385 (chan->unacked_frames > 0))
3386 __mod_retrans_timer();
3387
3388 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3389 l2cap_send_srejtail(chan);
3390 } else {
3391 l2cap_send_i_or_rr_or_rnr(chan);
3392 }
3393
3394 } else if (rx_control & L2CAP_CTRL_FINAL) {
3395 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3396
3397 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3398 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3399 else
3400 l2cap_retransmit_frames(chan);
3401
3402 } else {
3403 if ((chan->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3404 (chan->unacked_frames > 0))
3405 __mod_retrans_timer();
3406
3407 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3408 if (chan->conn_state & L2CAP_CONN_SREJ_SENT)
3409 l2cap_send_ack(chan);
3410 else
3411 l2cap_ertm_send(chan);
3412 }
3413 }
3414
3415 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3416 {
3417 u8 tx_seq = __get_reqseq(rx_control);
3418
3419 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3420
3421 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3422
3423 chan->expected_ack_seq = tx_seq;
3424 l2cap_drop_acked_frames(chan);
3425
3426 if (rx_control & L2CAP_CTRL_FINAL) {
3427 if (chan->conn_state & L2CAP_CONN_REJ_ACT)
3428 chan->conn_state &= ~L2CAP_CONN_REJ_ACT;
3429 else
3430 l2cap_retransmit_frames(chan);
3431 } else {
3432 l2cap_retransmit_frames(chan);
3433
3434 if (chan->conn_state & L2CAP_CONN_WAIT_F)
3435 chan->conn_state |= L2CAP_CONN_REJ_ACT;
3436 }
3437 }
3438 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3439 {
3440 u8 tx_seq = __get_reqseq(rx_control);
3441
3442 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3443
3444 chan->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3445
3446 if (rx_control & L2CAP_CTRL_POLL) {
3447 chan->expected_ack_seq = tx_seq;
3448 l2cap_drop_acked_frames(chan);
3449
3450 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3451 l2cap_retransmit_one_frame(chan, tx_seq);
3452
3453 l2cap_ertm_send(chan);
3454
3455 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3456 chan->srej_save_reqseq = tx_seq;
3457 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3458 }
3459 } else if (rx_control & L2CAP_CTRL_FINAL) {
3460 if ((chan->conn_state & L2CAP_CONN_SREJ_ACT) &&
3461 chan->srej_save_reqseq == tx_seq)
3462 chan->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3463 else
3464 l2cap_retransmit_one_frame(chan, tx_seq);
3465 } else {
3466 l2cap_retransmit_one_frame(chan, tx_seq);
3467 if (chan->conn_state & L2CAP_CONN_WAIT_F) {
3468 chan->srej_save_reqseq = tx_seq;
3469 chan->conn_state |= L2CAP_CONN_SREJ_ACT;
3470 }
3471 }
3472 }
3473
3474 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3475 {
3476 u8 tx_seq = __get_reqseq(rx_control);
3477
3478 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3479
3480 chan->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3481 chan->expected_ack_seq = tx_seq;
3482 l2cap_drop_acked_frames(chan);
3483
3484 if (rx_control & L2CAP_CTRL_POLL)
3485 chan->conn_state |= L2CAP_CONN_SEND_FBIT;
3486
3487 if (!(chan->conn_state & L2CAP_CONN_SREJ_SENT)) {
3488 del_timer(&chan->retrans_timer);
3489 if (rx_control & L2CAP_CTRL_POLL)
3490 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3491 return;
3492 }
3493
3494 if (rx_control & L2CAP_CTRL_POLL)
3495 l2cap_send_srejtail(chan);
3496 else
3497 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3498 }
3499
3500 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3501 {
3502 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3503
3504 if (L2CAP_CTRL_FINAL & rx_control &&
3505 chan->conn_state & L2CAP_CONN_WAIT_F) {
3506 del_timer(&chan->monitor_timer);
3507 if (chan->unacked_frames > 0)
3508 __mod_retrans_timer();
3509 chan->conn_state &= ~L2CAP_CONN_WAIT_F;
3510 }
3511
3512 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3513 case L2CAP_SUPER_RCV_READY:
3514 l2cap_data_channel_rrframe(chan, rx_control);
3515 break;
3516
3517 case L2CAP_SUPER_REJECT:
3518 l2cap_data_channel_rejframe(chan, rx_control);
3519 break;
3520
3521 case L2CAP_SUPER_SELECT_REJECT:
3522 l2cap_data_channel_srejframe(chan, rx_control);
3523 break;
3524
3525 case L2CAP_SUPER_RCV_NOT_READY:
3526 l2cap_data_channel_rnrframe(chan, rx_control);
3527 break;
3528 }
3529
3530 kfree_skb(skb);
3531 return 0;
3532 }
3533
3534 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3535 {
3536 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3537 struct l2cap_pinfo *pi = l2cap_pi(sk);
3538 u16 control;
3539 u8 req_seq;
3540 int len, next_tx_seq_offset, req_seq_offset;
3541
3542 control = get_unaligned_le16(skb->data);
3543 skb_pull(skb, 2);
3544 len = skb->len;
3545
3546 /*
3547 * We can just drop the corrupted I-frame here.
3548 * Receiver will miss it and start proper recovery
3549 * procedures and ask retransmission.
3550 */
3551 if (l2cap_check_fcs(chan, skb))
3552 goto drop;
3553
3554 if (__is_sar_start(control) && __is_iframe(control))
3555 len -= 2;
3556
3557 if (chan->fcs == L2CAP_FCS_CRC16)
3558 len -= 2;
3559
3560 if (len > chan->mps) {
3561 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3562 goto drop;
3563 }
3564
3565 req_seq = __get_reqseq(control);
3566 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3567 if (req_seq_offset < 0)
3568 req_seq_offset += 64;
3569
3570 next_tx_seq_offset =
3571 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3572 if (next_tx_seq_offset < 0)
3573 next_tx_seq_offset += 64;
3574
3575 /* check for invalid req-seq */
3576 if (req_seq_offset > next_tx_seq_offset) {
3577 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3578 goto drop;
3579 }
3580
3581 if (__is_iframe(control)) {
3582 if (len < 0) {
3583 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3584 goto drop;
3585 }
3586
3587 l2cap_data_channel_iframe(chan, control, skb);
3588 } else {
3589 if (len != 0) {
3590 BT_ERR("%d", len);
3591 l2cap_send_disconn_req(pi->conn, chan, ECONNRESET);
3592 goto drop;
3593 }
3594
3595 l2cap_data_channel_sframe(chan, control, skb);
3596 }
3597
3598 return 0;
3599
3600 drop:
3601 kfree_skb(skb);
3602 return 0;
3603 }
3604
3605 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3606 {
3607 struct l2cap_chan *chan;
3608 struct sock *sk;
3609 struct l2cap_pinfo *pi;
3610 u16 control;
3611 u8 tx_seq;
3612 int len;
3613
3614 chan = l2cap_get_chan_by_scid(conn, cid);
3615 if (!chan) {
3616 BT_DBG("unknown cid 0x%4.4x", cid);
3617 goto drop;
3618 }
3619
3620 sk = chan->sk;
3621 pi = l2cap_pi(sk);
3622
3623 BT_DBG("chan %p, len %d", chan, skb->len);
3624
3625 if (sk->sk_state != BT_CONNECTED)
3626 goto drop;
3627
3628 switch (pi->mode) {
3629 case L2CAP_MODE_BASIC:
3630 /* If socket recv buffers overflows we drop data here
3631 * which is *bad* because L2CAP has to be reliable.
3632 * But we don't have any other choice. L2CAP doesn't
3633 * provide flow control mechanism. */
3634
3635 if (pi->imtu < skb->len)
3636 goto drop;
3637
3638 if (!sock_queue_rcv_skb(sk, skb))
3639 goto done;
3640 break;
3641
3642 case L2CAP_MODE_ERTM:
3643 if (!sock_owned_by_user(sk)) {
3644 l2cap_ertm_data_rcv(sk, skb);
3645 } else {
3646 if (sk_add_backlog(sk, skb))
3647 goto drop;
3648 }
3649
3650 goto done;
3651
3652 case L2CAP_MODE_STREAMING:
3653 control = get_unaligned_le16(skb->data);
3654 skb_pull(skb, 2);
3655 len = skb->len;
3656
3657 if (l2cap_check_fcs(chan, skb))
3658 goto drop;
3659
3660 if (__is_sar_start(control))
3661 len -= 2;
3662
3663 if (chan->fcs == L2CAP_FCS_CRC16)
3664 len -= 2;
3665
3666 if (len > chan->mps || len < 0 || __is_sframe(control))
3667 goto drop;
3668
3669 tx_seq = __get_txseq(control);
3670
3671 if (chan->expected_tx_seq == tx_seq)
3672 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3673 else
3674 chan->expected_tx_seq = (tx_seq + 1) % 64;
3675
3676 l2cap_streaming_reassembly_sdu(chan, skb, control);
3677
3678 goto done;
3679
3680 default:
3681 BT_DBG("chan %p: bad mode 0x%2.2x", chan, pi->mode);
3682 break;
3683 }
3684
3685 drop:
3686 kfree_skb(skb);
3687
3688 done:
3689 if (sk)
3690 bh_unlock_sock(sk);
3691
3692 return 0;
3693 }
3694
3695 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3696 {
3697 struct sock *sk;
3698
3699 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3700 if (!sk)
3701 goto drop;
3702
3703 bh_lock_sock(sk);
3704
3705 BT_DBG("sk %p, len %d", sk, skb->len);
3706
3707 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3708 goto drop;
3709
3710 if (l2cap_pi(sk)->imtu < skb->len)
3711 goto drop;
3712
3713 if (!sock_queue_rcv_skb(sk, skb))
3714 goto done;
3715
3716 drop:
3717 kfree_skb(skb);
3718
3719 done:
3720 if (sk)
3721 bh_unlock_sock(sk);
3722 return 0;
3723 }
3724
3725 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3726 {
3727 struct sock *sk;
3728
3729 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
3730 if (!sk)
3731 goto drop;
3732
3733 bh_lock_sock(sk);
3734
3735 BT_DBG("sk %p, len %d", sk, skb->len);
3736
3737 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3738 goto drop;
3739
3740 if (l2cap_pi(sk)->imtu < skb->len)
3741 goto drop;
3742
3743 if (!sock_queue_rcv_skb(sk, skb))
3744 goto done;
3745
3746 drop:
3747 kfree_skb(skb);
3748
3749 done:
3750 if (sk)
3751 bh_unlock_sock(sk);
3752 return 0;
3753 }
3754
3755 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3756 {
3757 struct l2cap_hdr *lh = (void *) skb->data;
3758 u16 cid, len;
3759 __le16 psm;
3760
3761 skb_pull(skb, L2CAP_HDR_SIZE);
3762 cid = __le16_to_cpu(lh->cid);
3763 len = __le16_to_cpu(lh->len);
3764
3765 if (len != skb->len) {
3766 kfree_skb(skb);
3767 return;
3768 }
3769
3770 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3771
3772 switch (cid) {
3773 case L2CAP_CID_LE_SIGNALING:
3774 case L2CAP_CID_SIGNALING:
3775 l2cap_sig_channel(conn, skb);
3776 break;
3777
3778 case L2CAP_CID_CONN_LESS:
3779 psm = get_unaligned_le16(skb->data);
3780 skb_pull(skb, 2);
3781 l2cap_conless_channel(conn, psm, skb);
3782 break;
3783
3784 case L2CAP_CID_LE_DATA:
3785 l2cap_att_channel(conn, cid, skb);
3786 break;
3787
3788 default:
3789 l2cap_data_channel(conn, cid, skb);
3790 break;
3791 }
3792 }
3793
3794 /* ---- L2CAP interface with lower layer (HCI) ---- */
3795
3796 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3797 {
3798 int exact = 0, lm1 = 0, lm2 = 0;
3799 register struct sock *sk;
3800 struct hlist_node *node;
3801
3802 if (type != ACL_LINK)
3803 return -EINVAL;
3804
3805 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3806
3807 /* Find listening sockets and check their link_mode */
3808 read_lock(&l2cap_sk_list.lock);
3809 sk_for_each(sk, node, &l2cap_sk_list.head) {
3810 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3811
3812 if (sk->sk_state != BT_LISTEN)
3813 continue;
3814
3815 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3816 lm1 |= HCI_LM_ACCEPT;
3817 if (chan->role_switch)
3818 lm1 |= HCI_LM_MASTER;
3819 exact++;
3820 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3821 lm2 |= HCI_LM_ACCEPT;
3822 if (chan->role_switch)
3823 lm2 |= HCI_LM_MASTER;
3824 }
3825 }
3826 read_unlock(&l2cap_sk_list.lock);
3827
3828 return exact ? lm1 : lm2;
3829 }
3830
3831 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3832 {
3833 struct l2cap_conn *conn;
3834
3835 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3836
3837 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3838 return -EINVAL;
3839
3840 if (!status) {
3841 conn = l2cap_conn_add(hcon, status);
3842 if (conn)
3843 l2cap_conn_ready(conn);
3844 } else
3845 l2cap_conn_del(hcon, bt_err(status));
3846
3847 return 0;
3848 }
3849
3850 static int l2cap_disconn_ind(struct hci_conn *hcon)
3851 {
3852 struct l2cap_conn *conn = hcon->l2cap_data;
3853
3854 BT_DBG("hcon %p", hcon);
3855
3856 if (hcon->type != ACL_LINK || !conn)
3857 return 0x13;
3858
3859 return conn->disc_reason;
3860 }
3861
3862 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3863 {
3864 BT_DBG("hcon %p reason %d", hcon, reason);
3865
3866 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
3867 return -EINVAL;
3868
3869 l2cap_conn_del(hcon, bt_err(reason));
3870
3871 return 0;
3872 }
3873
3874 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
3875 {
3876 struct sock *sk = chan->sk;
3877
3878 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
3879 return;
3880
3881 if (encrypt == 0x00) {
3882 if (chan->sec_level == BT_SECURITY_MEDIUM) {
3883 l2cap_sock_clear_timer(sk);
3884 l2cap_sock_set_timer(sk, HZ * 5);
3885 } else if (chan->sec_level == BT_SECURITY_HIGH)
3886 __l2cap_sock_close(sk, ECONNREFUSED);
3887 } else {
3888 if (chan->sec_level == BT_SECURITY_MEDIUM)
3889 l2cap_sock_clear_timer(sk);
3890 }
3891 }
3892
3893 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
3894 {
3895 struct l2cap_conn *conn = hcon->l2cap_data;
3896 struct l2cap_chan *chan;
3897
3898 if (!conn)
3899 return 0;
3900
3901 BT_DBG("conn %p", conn);
3902
3903 read_lock(&conn->chan_lock);
3904
3905 list_for_each_entry(chan, &conn->chan_l, list) {
3906 struct sock *sk = chan->sk;
3907
3908 bh_lock_sock(sk);
3909
3910 if (chan->conf_state & L2CAP_CONF_CONNECT_PEND) {
3911 bh_unlock_sock(sk);
3912 continue;
3913 }
3914
3915 if (!status && (sk->sk_state == BT_CONNECTED ||
3916 sk->sk_state == BT_CONFIG)) {
3917 l2cap_check_encryption(chan, encrypt);
3918 bh_unlock_sock(sk);
3919 continue;
3920 }
3921
3922 if (sk->sk_state == BT_CONNECT) {
3923 if (!status) {
3924 struct l2cap_conn_req req;
3925 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
3926 req.psm = l2cap_pi(sk)->psm;
3927
3928 chan->ident = l2cap_get_ident(conn);
3929 chan->conf_state |= L2CAP_CONF_CONNECT_PEND;
3930
3931 l2cap_send_cmd(conn, chan->ident,
3932 L2CAP_CONN_REQ, sizeof(req), &req);
3933 } else {
3934 l2cap_sock_clear_timer(sk);
3935 l2cap_sock_set_timer(sk, HZ / 10);
3936 }
3937 } else if (sk->sk_state == BT_CONNECT2) {
3938 struct l2cap_conn_rsp rsp;
3939 __u16 result;
3940
3941 if (!status) {
3942 sk->sk_state = BT_CONFIG;
3943 result = L2CAP_CR_SUCCESS;
3944 } else {
3945 sk->sk_state = BT_DISCONN;
3946 l2cap_sock_set_timer(sk, HZ / 10);
3947 result = L2CAP_CR_SEC_BLOCK;
3948 }
3949
3950 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3951 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3952 rsp.result = cpu_to_le16(result);
3953 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
3954 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
3955 sizeof(rsp), &rsp);
3956 }
3957
3958 bh_unlock_sock(sk);
3959 }
3960
3961 read_unlock(&conn->chan_lock);
3962
3963 return 0;
3964 }
3965
3966 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
3967 {
3968 struct l2cap_conn *conn = hcon->l2cap_data;
3969
3970 if (!conn)
3971 conn = l2cap_conn_add(hcon, 0);
3972
3973 if (!conn)
3974 goto drop;
3975
3976 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
3977
3978 if (!(flags & ACL_CONT)) {
3979 struct l2cap_hdr *hdr;
3980 struct l2cap_chan *chan;
3981 u16 cid;
3982 int len;
3983
3984 if (conn->rx_len) {
3985 BT_ERR("Unexpected start frame (len %d)", skb->len);
3986 kfree_skb(conn->rx_skb);
3987 conn->rx_skb = NULL;
3988 conn->rx_len = 0;
3989 l2cap_conn_unreliable(conn, ECOMM);
3990 }
3991
3992 /* Start fragment always begin with Basic L2CAP header */
3993 if (skb->len < L2CAP_HDR_SIZE) {
3994 BT_ERR("Frame is too short (len %d)", skb->len);
3995 l2cap_conn_unreliable(conn, ECOMM);
3996 goto drop;
3997 }
3998
3999 hdr = (struct l2cap_hdr *) skb->data;
4000 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4001 cid = __le16_to_cpu(hdr->cid);
4002
4003 if (len == skb->len) {
4004 /* Complete frame received */
4005 l2cap_recv_frame(conn, skb);
4006 return 0;
4007 }
4008
4009 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4010
4011 if (skb->len > len) {
4012 BT_ERR("Frame is too long (len %d, expected len %d)",
4013 skb->len, len);
4014 l2cap_conn_unreliable(conn, ECOMM);
4015 goto drop;
4016 }
4017
4018 chan = l2cap_get_chan_by_scid(conn, cid);
4019
4020 if (chan && chan->sk) {
4021 struct sock *sk = chan->sk;
4022
4023 if (l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4024 BT_ERR("Frame exceeding recv MTU (len %d, "
4025 "MTU %d)", len,
4026 l2cap_pi(sk)->imtu);
4027 bh_unlock_sock(sk);
4028 l2cap_conn_unreliable(conn, ECOMM);
4029 goto drop;
4030 }
4031 bh_unlock_sock(sk);
4032 }
4033
4034 /* Allocate skb for the complete frame (with header) */
4035 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4036 if (!conn->rx_skb)
4037 goto drop;
4038
4039 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4040 skb->len);
4041 conn->rx_len = len - skb->len;
4042 } else {
4043 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4044
4045 if (!conn->rx_len) {
4046 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4047 l2cap_conn_unreliable(conn, ECOMM);
4048 goto drop;
4049 }
4050
4051 if (skb->len > conn->rx_len) {
4052 BT_ERR("Fragment is too long (len %d, expected %d)",
4053 skb->len, conn->rx_len);
4054 kfree_skb(conn->rx_skb);
4055 conn->rx_skb = NULL;
4056 conn->rx_len = 0;
4057 l2cap_conn_unreliable(conn, ECOMM);
4058 goto drop;
4059 }
4060
4061 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4062 skb->len);
4063 conn->rx_len -= skb->len;
4064
4065 if (!conn->rx_len) {
4066 /* Complete frame received */
4067 l2cap_recv_frame(conn, conn->rx_skb);
4068 conn->rx_skb = NULL;
4069 }
4070 }
4071
4072 drop:
4073 kfree_skb(skb);
4074 return 0;
4075 }
4076
4077 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4078 {
4079 struct sock *sk;
4080 struct hlist_node *node;
4081
4082 read_lock_bh(&l2cap_sk_list.lock);
4083
4084 sk_for_each(sk, node, &l2cap_sk_list.head) {
4085 struct l2cap_pinfo *pi = l2cap_pi(sk);
4086
4087 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4088 batostr(&bt_sk(sk)->src),
4089 batostr(&bt_sk(sk)->dst),
4090 sk->sk_state, __le16_to_cpu(pi->psm),
4091 pi->scid, pi->dcid,
4092 pi->imtu, pi->omtu, pi->chan->sec_level,
4093 pi->mode);
4094 }
4095
4096 read_unlock_bh(&l2cap_sk_list.lock);
4097
4098 return 0;
4099 }
4100
4101 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4102 {
4103 return single_open(file, l2cap_debugfs_show, inode->i_private);
4104 }
4105
4106 static const struct file_operations l2cap_debugfs_fops = {
4107 .open = l2cap_debugfs_open,
4108 .read = seq_read,
4109 .llseek = seq_lseek,
4110 .release = single_release,
4111 };
4112
4113 static struct dentry *l2cap_debugfs;
4114
4115 static struct hci_proto l2cap_hci_proto = {
4116 .name = "L2CAP",
4117 .id = HCI_PROTO_L2CAP,
4118 .connect_ind = l2cap_connect_ind,
4119 .connect_cfm = l2cap_connect_cfm,
4120 .disconn_ind = l2cap_disconn_ind,
4121 .disconn_cfm = l2cap_disconn_cfm,
4122 .security_cfm = l2cap_security_cfm,
4123 .recv_acldata = l2cap_recv_acldata
4124 };
4125
4126 int __init l2cap_init(void)
4127 {
4128 int err;
4129
4130 err = l2cap_init_sockets();
4131 if (err < 0)
4132 return err;
4133
4134 _busy_wq = create_singlethread_workqueue("l2cap");
4135 if (!_busy_wq) {
4136 err = -ENOMEM;
4137 goto error;
4138 }
4139
4140 err = hci_register_proto(&l2cap_hci_proto);
4141 if (err < 0) {
4142 BT_ERR("L2CAP protocol registration failed");
4143 bt_sock_unregister(BTPROTO_L2CAP);
4144 goto error;
4145 }
4146
4147 if (bt_debugfs) {
4148 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4149 bt_debugfs, NULL, &l2cap_debugfs_fops);
4150 if (!l2cap_debugfs)
4151 BT_ERR("Failed to create L2CAP debug file");
4152 }
4153
4154 return 0;
4155
4156 error:
4157 destroy_workqueue(_busy_wq);
4158 l2cap_cleanup_sockets();
4159 return err;
4160 }
4161
4162 void l2cap_exit(void)
4163 {
4164 debugfs_remove(l2cap_debugfs);
4165
4166 flush_workqueue(_busy_wq);
4167 destroy_workqueue(_busy_wq);
4168
4169 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4170 BT_ERR("L2CAP protocol unregistration failed");
4171
4172 l2cap_cleanup_sockets();
4173 }
4174
4175 module_param(disable_ertm, bool, 0644);
4176 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.222356 seconds and 6 git commands to generate.