Bluetooth: mark l2cap_create_iframe_pdu as static
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 int disable_ertm;
60
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { 0x02, };
63
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
66
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
74
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
76
77 /* ---- L2CAP channels ---- */
78
79 static inline void chan_hold(struct l2cap_chan *c)
80 {
81 atomic_inc(&c->refcnt);
82 }
83
84 static inline void chan_put(struct l2cap_chan *c)
85 {
86 if (atomic_dec_and_test(&c->refcnt))
87 kfree(c);
88 }
89
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
91 {
92 struct l2cap_chan *c;
93
94 list_for_each_entry(c, &conn->chan_l, list) {
95 if (c->dcid == cid)
96 return c;
97 }
98 return NULL;
99
100 }
101
102 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 {
104 struct l2cap_chan *c;
105
106 list_for_each_entry(c, &conn->chan_l, list) {
107 if (c->scid == cid)
108 return c;
109 }
110 return NULL;
111 }
112
113 /* Find channel with given SCID.
114 * Returns locked socket */
115 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 {
117 struct l2cap_chan *c;
118
119 read_lock(&conn->chan_lock);
120 c = __l2cap_get_chan_by_scid(conn, cid);
121 if (c)
122 bh_lock_sock(c->sk);
123 read_unlock(&conn->chan_lock);
124 return c;
125 }
126
127 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 {
129 struct l2cap_chan *c;
130
131 list_for_each_entry(c, &conn->chan_l, list) {
132 if (c->ident == ident)
133 return c;
134 }
135 return NULL;
136 }
137
138 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139 {
140 struct l2cap_chan *c;
141
142 read_lock(&conn->chan_lock);
143 c = __l2cap_get_chan_by_ident(conn, ident);
144 if (c)
145 bh_lock_sock(c->sk);
146 read_unlock(&conn->chan_lock);
147 return c;
148 }
149
150 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
151 {
152 struct l2cap_chan *c;
153
154 list_for_each_entry(c, &chan_list, global_l) {
155 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
156 goto found;
157 }
158
159 c = NULL;
160 found:
161 return c;
162 }
163
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
165 {
166 int err;
167
168 write_lock_bh(&chan_list_lock);
169
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
171 err = -EADDRINUSE;
172 goto done;
173 }
174
175 if (psm) {
176 chan->psm = psm;
177 chan->sport = psm;
178 err = 0;
179 } else {
180 u16 p;
181
182 err = -EINVAL;
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
187 err = 0;
188 break;
189 }
190 }
191
192 done:
193 write_unlock_bh(&chan_list_lock);
194 return err;
195 }
196
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
198 {
199 write_lock_bh(&chan_list_lock);
200
201 chan->scid = scid;
202
203 write_unlock_bh(&chan_list_lock);
204
205 return 0;
206 }
207
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
209 {
210 u16 cid = L2CAP_CID_DYN_START;
211
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
214 return cid;
215 }
216
217 return 0;
218 }
219
220 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
221 {
222 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
223
224 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
225 chan_hold(chan);
226 }
227
228 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
229 {
230 BT_DBG("chan %p state %d", chan, chan->state);
231
232 if (timer_pending(timer) && del_timer(timer))
233 chan_put(chan);
234 }
235
236 static void l2cap_state_change(struct l2cap_chan *chan, int state)
237 {
238 chan->state = state;
239 chan->ops->state_change(chan->data, state);
240 }
241
242 static void l2cap_chan_timeout(unsigned long arg)
243 {
244 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
245 struct sock *sk = chan->sk;
246 int reason;
247
248 BT_DBG("chan %p state %d", chan, chan->state);
249
250 bh_lock_sock(sk);
251
252 if (sock_owned_by_user(sk)) {
253 /* sk is owned by user. Try again later */
254 __set_chan_timer(chan, HZ / 5);
255 bh_unlock_sock(sk);
256 chan_put(chan);
257 return;
258 }
259
260 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
261 reason = ECONNREFUSED;
262 else if (chan->state == BT_CONNECT &&
263 chan->sec_level != BT_SECURITY_SDP)
264 reason = ECONNREFUSED;
265 else
266 reason = ETIMEDOUT;
267
268 l2cap_chan_close(chan, reason);
269
270 bh_unlock_sock(sk);
271
272 chan->ops->close(chan->data);
273 chan_put(chan);
274 }
275
276 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
277 {
278 struct l2cap_chan *chan;
279
280 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
281 if (!chan)
282 return NULL;
283
284 chan->sk = sk;
285
286 write_lock_bh(&chan_list_lock);
287 list_add(&chan->global_l, &chan_list);
288 write_unlock_bh(&chan_list_lock);
289
290 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
291
292 chan->state = BT_OPEN;
293
294 atomic_set(&chan->refcnt, 1);
295
296 return chan;
297 }
298
299 void l2cap_chan_destroy(struct l2cap_chan *chan)
300 {
301 write_lock_bh(&chan_list_lock);
302 list_del(&chan->global_l);
303 write_unlock_bh(&chan_list_lock);
304
305 chan_put(chan);
306 }
307
308 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
309 {
310 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
311 chan->psm, chan->dcid);
312
313 conn->disc_reason = 0x13;
314
315 chan->conn = conn;
316
317 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
318 if (conn->hcon->type == LE_LINK) {
319 /* LE connection */
320 chan->omtu = L2CAP_LE_DEFAULT_MTU;
321 chan->scid = L2CAP_CID_LE_DATA;
322 chan->dcid = L2CAP_CID_LE_DATA;
323 } else {
324 /* Alloc CID for connection-oriented socket */
325 chan->scid = l2cap_alloc_cid(conn);
326 chan->omtu = L2CAP_DEFAULT_MTU;
327 }
328 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
329 /* Connectionless socket */
330 chan->scid = L2CAP_CID_CONN_LESS;
331 chan->dcid = L2CAP_CID_CONN_LESS;
332 chan->omtu = L2CAP_DEFAULT_MTU;
333 } else {
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
338 }
339
340 chan_hold(chan);
341
342 list_add(&chan->list, &conn->chan_l);
343 }
344
345 /* Delete channel.
346 * Must be called on the locked socket. */
347 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
348 {
349 struct sock *sk = chan->sk;
350 struct l2cap_conn *conn = chan->conn;
351 struct sock *parent = bt_sk(sk)->parent;
352
353 __clear_chan_timer(chan);
354
355 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
356
357 if (conn) {
358 /* Delete from channel list */
359 write_lock_bh(&conn->chan_lock);
360 list_del(&chan->list);
361 write_unlock_bh(&conn->chan_lock);
362 chan_put(chan);
363
364 chan->conn = NULL;
365 hci_conn_put(conn->hcon);
366 }
367
368 l2cap_state_change(chan, BT_CLOSED);
369 sock_set_flag(sk, SOCK_ZAPPED);
370
371 if (err)
372 sk->sk_err = err;
373
374 if (parent) {
375 bt_accept_unlink(sk);
376 parent->sk_data_ready(parent, 0);
377 } else
378 sk->sk_state_change(sk);
379
380 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
381 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
382 return;
383
384 skb_queue_purge(&chan->tx_q);
385
386 if (chan->mode == L2CAP_MODE_ERTM) {
387 struct srej_list *l, *tmp;
388
389 __clear_retrans_timer(chan);
390 __clear_monitor_timer(chan);
391 __clear_ack_timer(chan);
392
393 skb_queue_purge(&chan->srej_q);
394
395 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
396 list_del(&l->list);
397 kfree(l);
398 }
399 }
400 }
401
402 static void l2cap_chan_cleanup_listen(struct sock *parent)
403 {
404 struct sock *sk;
405
406 BT_DBG("parent %p", parent);
407
408 /* Close not yet accepted channels */
409 while ((sk = bt_accept_dequeue(parent, NULL))) {
410 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
411 __clear_chan_timer(chan);
412 lock_sock(sk);
413 l2cap_chan_close(chan, ECONNRESET);
414 release_sock(sk);
415 chan->ops->close(chan->data);
416 }
417 }
418
419 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
420 {
421 struct l2cap_conn *conn = chan->conn;
422 struct sock *sk = chan->sk;
423
424 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
425
426 switch (chan->state) {
427 case BT_LISTEN:
428 l2cap_chan_cleanup_listen(sk);
429
430 l2cap_state_change(chan, BT_CLOSED);
431 sock_set_flag(sk, SOCK_ZAPPED);
432 break;
433
434 case BT_CONNECTED:
435 case BT_CONFIG:
436 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
437 conn->hcon->type == ACL_LINK) {
438 __clear_chan_timer(chan);
439 __set_chan_timer(chan, sk->sk_sndtimeo);
440 l2cap_send_disconn_req(conn, chan, reason);
441 } else
442 l2cap_chan_del(chan, reason);
443 break;
444
445 case BT_CONNECT2:
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 struct l2cap_conn_rsp rsp;
449 __u16 result;
450
451 if (bt_sk(sk)->defer_setup)
452 result = L2CAP_CR_SEC_BLOCK;
453 else
454 result = L2CAP_CR_BAD_PSM;
455 l2cap_state_change(chan, BT_DISCONN);
456
457 rsp.scid = cpu_to_le16(chan->dcid);
458 rsp.dcid = cpu_to_le16(chan->scid);
459 rsp.result = cpu_to_le16(result);
460 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
461 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
462 sizeof(rsp), &rsp);
463 }
464
465 l2cap_chan_del(chan, reason);
466 break;
467
468 case BT_CONNECT:
469 case BT_DISCONN:
470 l2cap_chan_del(chan, reason);
471 break;
472
473 default:
474 sock_set_flag(sk, SOCK_ZAPPED);
475 break;
476 }
477 }
478
479 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
480 {
481 if (chan->chan_type == L2CAP_CHAN_RAW) {
482 switch (chan->sec_level) {
483 case BT_SECURITY_HIGH:
484 return HCI_AT_DEDICATED_BONDING_MITM;
485 case BT_SECURITY_MEDIUM:
486 return HCI_AT_DEDICATED_BONDING;
487 default:
488 return HCI_AT_NO_BONDING;
489 }
490 } else if (chan->psm == cpu_to_le16(0x0001)) {
491 if (chan->sec_level == BT_SECURITY_LOW)
492 chan->sec_level = BT_SECURITY_SDP;
493
494 if (chan->sec_level == BT_SECURITY_HIGH)
495 return HCI_AT_NO_BONDING_MITM;
496 else
497 return HCI_AT_NO_BONDING;
498 } else {
499 switch (chan->sec_level) {
500 case BT_SECURITY_HIGH:
501 return HCI_AT_GENERAL_BONDING_MITM;
502 case BT_SECURITY_MEDIUM:
503 return HCI_AT_GENERAL_BONDING;
504 default:
505 return HCI_AT_NO_BONDING;
506 }
507 }
508 }
509
510 /* Service level security */
511 static inline int l2cap_check_security(struct l2cap_chan *chan)
512 {
513 struct l2cap_conn *conn = chan->conn;
514 __u8 auth_type;
515
516 auth_type = l2cap_get_auth_type(chan);
517
518 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
519 }
520
521 static u8 l2cap_get_ident(struct l2cap_conn *conn)
522 {
523 u8 id;
524
525 /* Get next available identificator.
526 * 1 - 128 are used by kernel.
527 * 129 - 199 are reserved.
528 * 200 - 254 are used by utilities like l2ping, etc.
529 */
530
531 spin_lock_bh(&conn->lock);
532
533 if (++conn->tx_ident > 128)
534 conn->tx_ident = 1;
535
536 id = conn->tx_ident;
537
538 spin_unlock_bh(&conn->lock);
539
540 return id;
541 }
542
543 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
544 {
545 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
546 u8 flags;
547
548 BT_DBG("code 0x%2.2x", code);
549
550 if (!skb)
551 return;
552
553 if (lmp_no_flush_capable(conn->hcon->hdev))
554 flags = ACL_START_NO_FLUSH;
555 else
556 flags = ACL_START;
557
558 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
559
560 hci_send_acl(conn->hcon, skb, flags);
561 }
562
563 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
564 {
565 struct sk_buff *skb;
566 struct l2cap_hdr *lh;
567 struct l2cap_conn *conn = chan->conn;
568 int count, hlen = L2CAP_HDR_SIZE + 2;
569 u8 flags;
570
571 if (chan->state != BT_CONNECTED)
572 return;
573
574 if (chan->fcs == L2CAP_FCS_CRC16)
575 hlen += 2;
576
577 BT_DBG("chan %p, control 0x%2.2x", chan, control);
578
579 count = min_t(unsigned int, conn->mtu, hlen);
580 control |= L2CAP_CTRL_FRAME_TYPE;
581
582 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
583 control |= L2CAP_CTRL_FINAL;
584
585 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
586 control |= L2CAP_CTRL_POLL;
587
588 skb = bt_skb_alloc(count, GFP_ATOMIC);
589 if (!skb)
590 return;
591
592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
593 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
594 lh->cid = cpu_to_le16(chan->dcid);
595 put_unaligned_le16(control, skb_put(skb, 2));
596
597 if (chan->fcs == L2CAP_FCS_CRC16) {
598 u16 fcs = crc16(0, (u8 *)lh, count - 2);
599 put_unaligned_le16(fcs, skb_put(skb, 2));
600 }
601
602 if (lmp_no_flush_capable(conn->hcon->hdev))
603 flags = ACL_START_NO_FLUSH;
604 else
605 flags = ACL_START;
606
607 bt_cb(skb)->force_active = chan->force_active;
608
609 hci_send_acl(chan->conn->hcon, skb, flags);
610 }
611
612 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
613 {
614 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
615 control |= L2CAP_SUPER_RCV_NOT_READY;
616 set_bit(CONN_RNR_SENT, &chan->conn_state);
617 } else
618 control |= L2CAP_SUPER_RCV_READY;
619
620 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
621
622 l2cap_send_sframe(chan, control);
623 }
624
625 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
626 {
627 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
628 }
629
630 static void l2cap_do_start(struct l2cap_chan *chan)
631 {
632 struct l2cap_conn *conn = chan->conn;
633
634 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
635 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
636 return;
637
638 if (l2cap_check_security(chan) &&
639 __l2cap_no_conn_pending(chan)) {
640 struct l2cap_conn_req req;
641 req.scid = cpu_to_le16(chan->scid);
642 req.psm = chan->psm;
643
644 chan->ident = l2cap_get_ident(conn);
645 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
646
647 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
648 sizeof(req), &req);
649 }
650 } else {
651 struct l2cap_info_req req;
652 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
653
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
655 conn->info_ident = l2cap_get_ident(conn);
656
657 mod_timer(&conn->info_timer, jiffies +
658 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
659
660 l2cap_send_cmd(conn, conn->info_ident,
661 L2CAP_INFO_REQ, sizeof(req), &req);
662 }
663 }
664
665 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
666 {
667 u32 local_feat_mask = l2cap_feat_mask;
668 if (!disable_ertm)
669 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
670
671 switch (mode) {
672 case L2CAP_MODE_ERTM:
673 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
674 case L2CAP_MODE_STREAMING:
675 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
676 default:
677 return 0x00;
678 }
679 }
680
681 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
682 {
683 struct sock *sk;
684 struct l2cap_disconn_req req;
685
686 if (!conn)
687 return;
688
689 sk = chan->sk;
690
691 if (chan->mode == L2CAP_MODE_ERTM) {
692 __clear_retrans_timer(chan);
693 __clear_monitor_timer(chan);
694 __clear_ack_timer(chan);
695 }
696
697 req.dcid = cpu_to_le16(chan->dcid);
698 req.scid = cpu_to_le16(chan->scid);
699 l2cap_send_cmd(conn, l2cap_get_ident(conn),
700 L2CAP_DISCONN_REQ, sizeof(req), &req);
701
702 l2cap_state_change(chan, BT_DISCONN);
703 sk->sk_err = err;
704 }
705
706 /* ---- L2CAP connections ---- */
707 static void l2cap_conn_start(struct l2cap_conn *conn)
708 {
709 struct l2cap_chan *chan, *tmp;
710
711 BT_DBG("conn %p", conn);
712
713 read_lock(&conn->chan_lock);
714
715 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
716 struct sock *sk = chan->sk;
717
718 bh_lock_sock(sk);
719
720 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
721 bh_unlock_sock(sk);
722 continue;
723 }
724
725 if (chan->state == BT_CONNECT) {
726 struct l2cap_conn_req req;
727
728 if (!l2cap_check_security(chan) ||
729 !__l2cap_no_conn_pending(chan)) {
730 bh_unlock_sock(sk);
731 continue;
732 }
733
734 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
735 && test_bit(CONF_STATE2_DEVICE,
736 &chan->conf_state)) {
737 /* l2cap_chan_close() calls list_del(chan)
738 * so release the lock */
739 read_unlock(&conn->chan_lock);
740 l2cap_chan_close(chan, ECONNRESET);
741 read_lock(&conn->chan_lock);
742 bh_unlock_sock(sk);
743 continue;
744 }
745
746 req.scid = cpu_to_le16(chan->scid);
747 req.psm = chan->psm;
748
749 chan->ident = l2cap_get_ident(conn);
750 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
751
752 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
753 sizeof(req), &req);
754
755 } else if (chan->state == BT_CONNECT2) {
756 struct l2cap_conn_rsp rsp;
757 char buf[128];
758 rsp.scid = cpu_to_le16(chan->dcid);
759 rsp.dcid = cpu_to_le16(chan->scid);
760
761 if (l2cap_check_security(chan)) {
762 if (bt_sk(sk)->defer_setup) {
763 struct sock *parent = bt_sk(sk)->parent;
764 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
765 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
766 if (parent)
767 parent->sk_data_ready(parent, 0);
768
769 } else {
770 l2cap_state_change(chan, BT_CONFIG);
771 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
772 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
773 }
774 } else {
775 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
776 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
777 }
778
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
780 sizeof(rsp), &rsp);
781
782 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
783 rsp.result != L2CAP_CR_SUCCESS) {
784 bh_unlock_sock(sk);
785 continue;
786 }
787
788 set_bit(CONF_REQ_SENT, &chan->conf_state);
789 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
790 l2cap_build_conf_req(chan, buf), buf);
791 chan->num_conf_req++;
792 }
793
794 bh_unlock_sock(sk);
795 }
796
797 read_unlock(&conn->chan_lock);
798 }
799
800 /* Find socket with cid and source bdaddr.
801 * Returns closest match, locked.
802 */
803 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
804 {
805 struct l2cap_chan *c, *c1 = NULL;
806
807 read_lock(&chan_list_lock);
808
809 list_for_each_entry(c, &chan_list, global_l) {
810 struct sock *sk = c->sk;
811
812 if (state && c->state != state)
813 continue;
814
815 if (c->scid == cid) {
816 /* Exact match. */
817 if (!bacmp(&bt_sk(sk)->src, src)) {
818 read_unlock(&chan_list_lock);
819 return c;
820 }
821
822 /* Closest match */
823 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
824 c1 = c;
825 }
826 }
827
828 read_unlock(&chan_list_lock);
829
830 return c1;
831 }
832
833 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
834 {
835 struct sock *parent, *sk;
836 struct l2cap_chan *chan, *pchan;
837
838 BT_DBG("");
839
840 /* Check if we have socket listening on cid */
841 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
842 conn->src);
843 if (!pchan)
844 return;
845
846 parent = pchan->sk;
847
848 bh_lock_sock(parent);
849
850 /* Check for backlog size */
851 if (sk_acceptq_is_full(parent)) {
852 BT_DBG("backlog full %d", parent->sk_ack_backlog);
853 goto clean;
854 }
855
856 chan = pchan->ops->new_connection(pchan->data);
857 if (!chan)
858 goto clean;
859
860 sk = chan->sk;
861
862 write_lock_bh(&conn->chan_lock);
863
864 hci_conn_hold(conn->hcon);
865
866 bacpy(&bt_sk(sk)->src, conn->src);
867 bacpy(&bt_sk(sk)->dst, conn->dst);
868
869 bt_accept_enqueue(parent, sk);
870
871 __l2cap_chan_add(conn, chan);
872
873 __set_chan_timer(chan, sk->sk_sndtimeo);
874
875 l2cap_state_change(chan, BT_CONNECTED);
876 parent->sk_data_ready(parent, 0);
877
878 write_unlock_bh(&conn->chan_lock);
879
880 clean:
881 bh_unlock_sock(parent);
882 }
883
884 static void l2cap_chan_ready(struct sock *sk)
885 {
886 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
887 struct sock *parent = bt_sk(sk)->parent;
888
889 BT_DBG("sk %p, parent %p", sk, parent);
890
891 chan->conf_state = 0;
892 __clear_chan_timer(chan);
893
894 l2cap_state_change(chan, BT_CONNECTED);
895 sk->sk_state_change(sk);
896
897 if (parent)
898 parent->sk_data_ready(parent, 0);
899 }
900
901 static void l2cap_conn_ready(struct l2cap_conn *conn)
902 {
903 struct l2cap_chan *chan;
904
905 BT_DBG("conn %p", conn);
906
907 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
908 l2cap_le_conn_ready(conn);
909
910 if (conn->hcon->out && conn->hcon->type == LE_LINK)
911 smp_conn_security(conn, conn->hcon->pending_sec_level);
912
913 read_lock(&conn->chan_lock);
914
915 list_for_each_entry(chan, &conn->chan_l, list) {
916 struct sock *sk = chan->sk;
917
918 bh_lock_sock(sk);
919
920 if (conn->hcon->type == LE_LINK) {
921 if (smp_conn_security(conn, chan->sec_level))
922 l2cap_chan_ready(sk);
923
924 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
925 __clear_chan_timer(chan);
926 l2cap_state_change(chan, BT_CONNECTED);
927 sk->sk_state_change(sk);
928
929 } else if (chan->state == BT_CONNECT)
930 l2cap_do_start(chan);
931
932 bh_unlock_sock(sk);
933 }
934
935 read_unlock(&conn->chan_lock);
936 }
937
938 /* Notify sockets that we cannot guaranty reliability anymore */
939 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
940 {
941 struct l2cap_chan *chan;
942
943 BT_DBG("conn %p", conn);
944
945 read_lock(&conn->chan_lock);
946
947 list_for_each_entry(chan, &conn->chan_l, list) {
948 struct sock *sk = chan->sk;
949
950 if (chan->force_reliable)
951 sk->sk_err = err;
952 }
953
954 read_unlock(&conn->chan_lock);
955 }
956
957 static void l2cap_info_timeout(unsigned long arg)
958 {
959 struct l2cap_conn *conn = (void *) arg;
960
961 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
962 conn->info_ident = 0;
963
964 l2cap_conn_start(conn);
965 }
966
967 static void l2cap_conn_del(struct hci_conn *hcon, int err)
968 {
969 struct l2cap_conn *conn = hcon->l2cap_data;
970 struct l2cap_chan *chan, *l;
971 struct sock *sk;
972
973 if (!conn)
974 return;
975
976 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
977
978 kfree_skb(conn->rx_skb);
979
980 /* Kill channels */
981 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
982 sk = chan->sk;
983 bh_lock_sock(sk);
984 l2cap_chan_del(chan, err);
985 bh_unlock_sock(sk);
986 chan->ops->close(chan->data);
987 }
988
989 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
990 del_timer_sync(&conn->info_timer);
991
992 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
993 del_timer(&conn->security_timer);
994 smp_chan_destroy(conn);
995 }
996
997 hcon->l2cap_data = NULL;
998 kfree(conn);
999 }
1000
1001 static void security_timeout(unsigned long arg)
1002 {
1003 struct l2cap_conn *conn = (void *) arg;
1004
1005 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1006 }
1007
1008 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1009 {
1010 struct l2cap_conn *conn = hcon->l2cap_data;
1011
1012 if (conn || status)
1013 return conn;
1014
1015 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1016 if (!conn)
1017 return NULL;
1018
1019 hcon->l2cap_data = conn;
1020 conn->hcon = hcon;
1021
1022 BT_DBG("hcon %p conn %p", hcon, conn);
1023
1024 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1025 conn->mtu = hcon->hdev->le_mtu;
1026 else
1027 conn->mtu = hcon->hdev->acl_mtu;
1028
1029 conn->src = &hcon->hdev->bdaddr;
1030 conn->dst = &hcon->dst;
1031
1032 conn->feat_mask = 0;
1033
1034 spin_lock_init(&conn->lock);
1035 rwlock_init(&conn->chan_lock);
1036
1037 INIT_LIST_HEAD(&conn->chan_l);
1038
1039 if (hcon->type == LE_LINK)
1040 setup_timer(&conn->security_timer, security_timeout,
1041 (unsigned long) conn);
1042 else
1043 setup_timer(&conn->info_timer, l2cap_info_timeout,
1044 (unsigned long) conn);
1045
1046 conn->disc_reason = 0x13;
1047
1048 return conn;
1049 }
1050
1051 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1052 {
1053 write_lock_bh(&conn->chan_lock);
1054 __l2cap_chan_add(conn, chan);
1055 write_unlock_bh(&conn->chan_lock);
1056 }
1057
1058 /* ---- Socket interface ---- */
1059
1060 /* Find socket with psm and source bdaddr.
1061 * Returns closest match.
1062 */
1063 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1064 {
1065 struct l2cap_chan *c, *c1 = NULL;
1066
1067 read_lock(&chan_list_lock);
1068
1069 list_for_each_entry(c, &chan_list, global_l) {
1070 struct sock *sk = c->sk;
1071
1072 if (state && c->state != state)
1073 continue;
1074
1075 if (c->psm == psm) {
1076 /* Exact match. */
1077 if (!bacmp(&bt_sk(sk)->src, src)) {
1078 read_unlock(&chan_list_lock);
1079 return c;
1080 }
1081
1082 /* Closest match */
1083 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1084 c1 = c;
1085 }
1086 }
1087
1088 read_unlock(&chan_list_lock);
1089
1090 return c1;
1091 }
1092
1093 int l2cap_chan_connect(struct l2cap_chan *chan)
1094 {
1095 struct sock *sk = chan->sk;
1096 bdaddr_t *src = &bt_sk(sk)->src;
1097 bdaddr_t *dst = &bt_sk(sk)->dst;
1098 struct l2cap_conn *conn;
1099 struct hci_conn *hcon;
1100 struct hci_dev *hdev;
1101 __u8 auth_type;
1102 int err;
1103
1104 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1105 chan->psm);
1106
1107 hdev = hci_get_route(dst, src);
1108 if (!hdev)
1109 return -EHOSTUNREACH;
1110
1111 hci_dev_lock_bh(hdev);
1112
1113 auth_type = l2cap_get_auth_type(chan);
1114
1115 if (chan->dcid == L2CAP_CID_LE_DATA)
1116 hcon = hci_connect(hdev, LE_LINK, dst,
1117 chan->sec_level, auth_type);
1118 else
1119 hcon = hci_connect(hdev, ACL_LINK, dst,
1120 chan->sec_level, auth_type);
1121
1122 if (IS_ERR(hcon)) {
1123 err = PTR_ERR(hcon);
1124 goto done;
1125 }
1126
1127 conn = l2cap_conn_add(hcon, 0);
1128 if (!conn) {
1129 hci_conn_put(hcon);
1130 err = -ENOMEM;
1131 goto done;
1132 }
1133
1134 /* Update source addr of the socket */
1135 bacpy(src, conn->src);
1136
1137 l2cap_chan_add(conn, chan);
1138
1139 l2cap_state_change(chan, BT_CONNECT);
1140 __set_chan_timer(chan, sk->sk_sndtimeo);
1141
1142 if (hcon->state == BT_CONNECTED) {
1143 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1144 __clear_chan_timer(chan);
1145 if (l2cap_check_security(chan))
1146 l2cap_state_change(chan, BT_CONNECTED);
1147 } else
1148 l2cap_do_start(chan);
1149 }
1150
1151 err = 0;
1152
1153 done:
1154 hci_dev_unlock_bh(hdev);
1155 hci_dev_put(hdev);
1156 return err;
1157 }
1158
1159 int __l2cap_wait_ack(struct sock *sk)
1160 {
1161 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1162 DECLARE_WAITQUEUE(wait, current);
1163 int err = 0;
1164 int timeo = HZ/5;
1165
1166 add_wait_queue(sk_sleep(sk), &wait);
1167 set_current_state(TASK_INTERRUPTIBLE);
1168 while (chan->unacked_frames > 0 && chan->conn) {
1169 if (!timeo)
1170 timeo = HZ/5;
1171
1172 if (signal_pending(current)) {
1173 err = sock_intr_errno(timeo);
1174 break;
1175 }
1176
1177 release_sock(sk);
1178 timeo = schedule_timeout(timeo);
1179 lock_sock(sk);
1180 set_current_state(TASK_INTERRUPTIBLE);
1181
1182 err = sock_error(sk);
1183 if (err)
1184 break;
1185 }
1186 set_current_state(TASK_RUNNING);
1187 remove_wait_queue(sk_sleep(sk), &wait);
1188 return err;
1189 }
1190
1191 static void l2cap_monitor_timeout(unsigned long arg)
1192 {
1193 struct l2cap_chan *chan = (void *) arg;
1194 struct sock *sk = chan->sk;
1195
1196 BT_DBG("chan %p", chan);
1197
1198 bh_lock_sock(sk);
1199 if (chan->retry_count >= chan->remote_max_tx) {
1200 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1201 bh_unlock_sock(sk);
1202 return;
1203 }
1204
1205 chan->retry_count++;
1206 __set_monitor_timer(chan);
1207
1208 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1209 bh_unlock_sock(sk);
1210 }
1211
1212 static void l2cap_retrans_timeout(unsigned long arg)
1213 {
1214 struct l2cap_chan *chan = (void *) arg;
1215 struct sock *sk = chan->sk;
1216
1217 BT_DBG("chan %p", chan);
1218
1219 bh_lock_sock(sk);
1220 chan->retry_count = 1;
1221 __set_monitor_timer(chan);
1222
1223 set_bit(CONN_WAIT_F, &chan->conn_state);
1224
1225 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1226 bh_unlock_sock(sk);
1227 }
1228
1229 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1230 {
1231 struct sk_buff *skb;
1232
1233 while ((skb = skb_peek(&chan->tx_q)) &&
1234 chan->unacked_frames) {
1235 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1236 break;
1237
1238 skb = skb_dequeue(&chan->tx_q);
1239 kfree_skb(skb);
1240
1241 chan->unacked_frames--;
1242 }
1243
1244 if (!chan->unacked_frames)
1245 __clear_retrans_timer(chan);
1246 }
1247
1248 void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1249 {
1250 struct hci_conn *hcon = chan->conn->hcon;
1251 u16 flags;
1252
1253 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1254
1255 if (!chan->flushable && lmp_no_flush_capable(hcon->hdev))
1256 flags = ACL_START_NO_FLUSH;
1257 else
1258 flags = ACL_START;
1259
1260 bt_cb(skb)->force_active = chan->force_active;
1261 hci_send_acl(hcon, skb, flags);
1262 }
1263
1264 void l2cap_streaming_send(struct l2cap_chan *chan)
1265 {
1266 struct sk_buff *skb;
1267 u16 control, fcs;
1268
1269 while ((skb = skb_dequeue(&chan->tx_q))) {
1270 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1271 control |= chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1272 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1273
1274 if (chan->fcs == L2CAP_FCS_CRC16) {
1275 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1276 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1277 }
1278
1279 l2cap_do_send(chan, skb);
1280
1281 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1282 }
1283 }
1284
1285 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u8 tx_seq)
1286 {
1287 struct sk_buff *skb, *tx_skb;
1288 u16 control, fcs;
1289
1290 skb = skb_peek(&chan->tx_q);
1291 if (!skb)
1292 return;
1293
1294 do {
1295 if (bt_cb(skb)->tx_seq == tx_seq)
1296 break;
1297
1298 if (skb_queue_is_last(&chan->tx_q, skb))
1299 return;
1300
1301 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1302
1303 if (chan->remote_max_tx &&
1304 bt_cb(skb)->retries == chan->remote_max_tx) {
1305 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1306 return;
1307 }
1308
1309 tx_skb = skb_clone(skb, GFP_ATOMIC);
1310 bt_cb(skb)->retries++;
1311 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1312 control &= L2CAP_CTRL_SAR;
1313
1314 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1315 control |= L2CAP_CTRL_FINAL;
1316
1317 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1318 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1319
1320 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1321
1322 if (chan->fcs == L2CAP_FCS_CRC16) {
1323 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1324 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1325 }
1326
1327 l2cap_do_send(chan, tx_skb);
1328 }
1329
1330 int l2cap_ertm_send(struct l2cap_chan *chan)
1331 {
1332 struct sk_buff *skb, *tx_skb;
1333 u16 control, fcs;
1334 int nsent = 0;
1335
1336 if (chan->state != BT_CONNECTED)
1337 return -ENOTCONN;
1338
1339 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1340
1341 if (chan->remote_max_tx &&
1342 bt_cb(skb)->retries == chan->remote_max_tx) {
1343 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1344 break;
1345 }
1346
1347 tx_skb = skb_clone(skb, GFP_ATOMIC);
1348
1349 bt_cb(skb)->retries++;
1350
1351 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1352 control &= L2CAP_CTRL_SAR;
1353
1354 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1355 control |= L2CAP_CTRL_FINAL;
1356
1357 control |= (chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1358 | (chan->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1359 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1360
1361
1362 if (chan->fcs == L2CAP_FCS_CRC16) {
1363 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1364 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1365 }
1366
1367 l2cap_do_send(chan, tx_skb);
1368
1369 __set_retrans_timer(chan);
1370
1371 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1372 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1373
1374 if (bt_cb(skb)->retries == 1)
1375 chan->unacked_frames++;
1376
1377 chan->frames_sent++;
1378
1379 if (skb_queue_is_last(&chan->tx_q, skb))
1380 chan->tx_send_head = NULL;
1381 else
1382 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1383
1384 nsent++;
1385 }
1386
1387 return nsent;
1388 }
1389
1390 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1391 {
1392 int ret;
1393
1394 if (!skb_queue_empty(&chan->tx_q))
1395 chan->tx_send_head = chan->tx_q.next;
1396
1397 chan->next_tx_seq = chan->expected_ack_seq;
1398 ret = l2cap_ertm_send(chan);
1399 return ret;
1400 }
1401
1402 static void l2cap_send_ack(struct l2cap_chan *chan)
1403 {
1404 u16 control = 0;
1405
1406 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1407
1408 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1409 control |= L2CAP_SUPER_RCV_NOT_READY;
1410 set_bit(CONN_RNR_SENT, &chan->conn_state);
1411 l2cap_send_sframe(chan, control);
1412 return;
1413 }
1414
1415 if (l2cap_ertm_send(chan) > 0)
1416 return;
1417
1418 control |= L2CAP_SUPER_RCV_READY;
1419 l2cap_send_sframe(chan, control);
1420 }
1421
1422 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1423 {
1424 struct srej_list *tail;
1425 u16 control;
1426
1427 control = L2CAP_SUPER_SELECT_REJECT;
1428 control |= L2CAP_CTRL_FINAL;
1429
1430 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1431 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1432
1433 l2cap_send_sframe(chan, control);
1434 }
1435
1436 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1437 {
1438 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1439 struct sk_buff **frag;
1440 int err, sent = 0;
1441
1442 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1443 return -EFAULT;
1444
1445 sent += count;
1446 len -= count;
1447
1448 /* Continuation fragments (no L2CAP header) */
1449 frag = &skb_shinfo(skb)->frag_list;
1450 while (len) {
1451 count = min_t(unsigned int, conn->mtu, len);
1452
1453 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1454 if (!*frag)
1455 return err;
1456 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1457 return -EFAULT;
1458
1459 sent += count;
1460 len -= count;
1461
1462 frag = &(*frag)->next;
1463 }
1464
1465 return sent;
1466 }
1467
1468 struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1469 {
1470 struct sock *sk = chan->sk;
1471 struct l2cap_conn *conn = chan->conn;
1472 struct sk_buff *skb;
1473 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1474 struct l2cap_hdr *lh;
1475
1476 BT_DBG("sk %p len %d", sk, (int)len);
1477
1478 count = min_t(unsigned int, (conn->mtu - hlen), len);
1479 skb = bt_skb_send_alloc(sk, count + hlen,
1480 msg->msg_flags & MSG_DONTWAIT, &err);
1481 if (!skb)
1482 return ERR_PTR(err);
1483
1484 /* Create L2CAP header */
1485 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1486 lh->cid = cpu_to_le16(chan->dcid);
1487 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1488 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1489
1490 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1491 if (unlikely(err < 0)) {
1492 kfree_skb(skb);
1493 return ERR_PTR(err);
1494 }
1495 return skb;
1496 }
1497
1498 struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1499 {
1500 struct sock *sk = chan->sk;
1501 struct l2cap_conn *conn = chan->conn;
1502 struct sk_buff *skb;
1503 int err, count, hlen = L2CAP_HDR_SIZE;
1504 struct l2cap_hdr *lh;
1505
1506 BT_DBG("sk %p len %d", sk, (int)len);
1507
1508 count = min_t(unsigned int, (conn->mtu - hlen), len);
1509 skb = bt_skb_send_alloc(sk, count + hlen,
1510 msg->msg_flags & MSG_DONTWAIT, &err);
1511 if (!skb)
1512 return ERR_PTR(err);
1513
1514 /* Create L2CAP header */
1515 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1516 lh->cid = cpu_to_le16(chan->dcid);
1517 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1518
1519 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1520 if (unlikely(err < 0)) {
1521 kfree_skb(skb);
1522 return ERR_PTR(err);
1523 }
1524 return skb;
1525 }
1526
1527 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1528 struct msghdr *msg, size_t len,
1529 u16 control, u16 sdulen)
1530 {
1531 struct sock *sk = chan->sk;
1532 struct l2cap_conn *conn = chan->conn;
1533 struct sk_buff *skb;
1534 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1535 struct l2cap_hdr *lh;
1536
1537 BT_DBG("sk %p len %d", sk, (int)len);
1538
1539 if (!conn)
1540 return ERR_PTR(-ENOTCONN);
1541
1542 if (sdulen)
1543 hlen += 2;
1544
1545 if (chan->fcs == L2CAP_FCS_CRC16)
1546 hlen += 2;
1547
1548 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 skb = bt_skb_send_alloc(sk, count + hlen,
1550 msg->msg_flags & MSG_DONTWAIT, &err);
1551 if (!skb)
1552 return ERR_PTR(err);
1553
1554 /* Create L2CAP header */
1555 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1556 lh->cid = cpu_to_le16(chan->dcid);
1557 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1558 put_unaligned_le16(control, skb_put(skb, 2));
1559 if (sdulen)
1560 put_unaligned_le16(sdulen, skb_put(skb, 2));
1561
1562 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1563 if (unlikely(err < 0)) {
1564 kfree_skb(skb);
1565 return ERR_PTR(err);
1566 }
1567
1568 if (chan->fcs == L2CAP_FCS_CRC16)
1569 put_unaligned_le16(0, skb_put(skb, 2));
1570
1571 bt_cb(skb)->retries = 0;
1572 return skb;
1573 }
1574
1575 int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1576 {
1577 struct sk_buff *skb;
1578 struct sk_buff_head sar_queue;
1579 u16 control;
1580 size_t size = 0;
1581
1582 skb_queue_head_init(&sar_queue);
1583 control = L2CAP_SDU_START;
1584 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1585 if (IS_ERR(skb))
1586 return PTR_ERR(skb);
1587
1588 __skb_queue_tail(&sar_queue, skb);
1589 len -= chan->remote_mps;
1590 size += chan->remote_mps;
1591
1592 while (len > 0) {
1593 size_t buflen;
1594
1595 if (len > chan->remote_mps) {
1596 control = L2CAP_SDU_CONTINUE;
1597 buflen = chan->remote_mps;
1598 } else {
1599 control = L2CAP_SDU_END;
1600 buflen = len;
1601 }
1602
1603 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1604 if (IS_ERR(skb)) {
1605 skb_queue_purge(&sar_queue);
1606 return PTR_ERR(skb);
1607 }
1608
1609 __skb_queue_tail(&sar_queue, skb);
1610 len -= buflen;
1611 size += buflen;
1612 }
1613 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1614 if (chan->tx_send_head == NULL)
1615 chan->tx_send_head = sar_queue.next;
1616
1617 return size;
1618 }
1619
1620 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1621 {
1622 struct sk_buff *skb;
1623 u16 control;
1624 int err;
1625
1626 /* Connectionless channel */
1627 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1628 skb = l2cap_create_connless_pdu(chan, msg, len);
1629 if (IS_ERR(skb))
1630 return PTR_ERR(skb);
1631
1632 l2cap_do_send(chan, skb);
1633 return len;
1634 }
1635
1636 switch (chan->mode) {
1637 case L2CAP_MODE_BASIC:
1638 /* Check outgoing MTU */
1639 if (len > chan->omtu)
1640 return -EMSGSIZE;
1641
1642 /* Create a basic PDU */
1643 skb = l2cap_create_basic_pdu(chan, msg, len);
1644 if (IS_ERR(skb))
1645 return PTR_ERR(skb);
1646
1647 l2cap_do_send(chan, skb);
1648 err = len;
1649 break;
1650
1651 case L2CAP_MODE_ERTM:
1652 case L2CAP_MODE_STREAMING:
1653 /* Entire SDU fits into one PDU */
1654 if (len <= chan->remote_mps) {
1655 control = L2CAP_SDU_UNSEGMENTED;
1656 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1657 0);
1658 if (IS_ERR(skb))
1659 return PTR_ERR(skb);
1660
1661 __skb_queue_tail(&chan->tx_q, skb);
1662
1663 if (chan->tx_send_head == NULL)
1664 chan->tx_send_head = skb;
1665
1666 } else {
1667 /* Segment SDU into multiples PDUs */
1668 err = l2cap_sar_segment_sdu(chan, msg, len);
1669 if (err < 0)
1670 return err;
1671 }
1672
1673 if (chan->mode == L2CAP_MODE_STREAMING) {
1674 l2cap_streaming_send(chan);
1675 err = len;
1676 break;
1677 }
1678
1679 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1680 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1681 err = len;
1682 break;
1683 }
1684
1685 err = l2cap_ertm_send(chan);
1686 if (err >= 0)
1687 err = len;
1688
1689 break;
1690
1691 default:
1692 BT_DBG("bad state %1.1x", chan->mode);
1693 err = -EBADFD;
1694 }
1695
1696 return err;
1697 }
1698
1699 /* Copy frame to all raw sockets on that connection */
1700 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1701 {
1702 struct sk_buff *nskb;
1703 struct l2cap_chan *chan;
1704
1705 BT_DBG("conn %p", conn);
1706
1707 read_lock(&conn->chan_lock);
1708 list_for_each_entry(chan, &conn->chan_l, list) {
1709 struct sock *sk = chan->sk;
1710 if (chan->chan_type != L2CAP_CHAN_RAW)
1711 continue;
1712
1713 /* Don't send frame to the socket it came from */
1714 if (skb->sk == sk)
1715 continue;
1716 nskb = skb_clone(skb, GFP_ATOMIC);
1717 if (!nskb)
1718 continue;
1719
1720 if (chan->ops->recv(chan->data, nskb))
1721 kfree_skb(nskb);
1722 }
1723 read_unlock(&conn->chan_lock);
1724 }
1725
1726 /* ---- L2CAP signalling commands ---- */
1727 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1728 u8 code, u8 ident, u16 dlen, void *data)
1729 {
1730 struct sk_buff *skb, **frag;
1731 struct l2cap_cmd_hdr *cmd;
1732 struct l2cap_hdr *lh;
1733 int len, count;
1734
1735 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1736 conn, code, ident, dlen);
1737
1738 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1739 count = min_t(unsigned int, conn->mtu, len);
1740
1741 skb = bt_skb_alloc(count, GFP_ATOMIC);
1742 if (!skb)
1743 return NULL;
1744
1745 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1746 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1747
1748 if (conn->hcon->type == LE_LINK)
1749 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1750 else
1751 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1752
1753 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1754 cmd->code = code;
1755 cmd->ident = ident;
1756 cmd->len = cpu_to_le16(dlen);
1757
1758 if (dlen) {
1759 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1760 memcpy(skb_put(skb, count), data, count);
1761 data += count;
1762 }
1763
1764 len -= skb->len;
1765
1766 /* Continuation fragments (no L2CAP header) */
1767 frag = &skb_shinfo(skb)->frag_list;
1768 while (len) {
1769 count = min_t(unsigned int, conn->mtu, len);
1770
1771 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1772 if (!*frag)
1773 goto fail;
1774
1775 memcpy(skb_put(*frag, count), data, count);
1776
1777 len -= count;
1778 data += count;
1779
1780 frag = &(*frag)->next;
1781 }
1782
1783 return skb;
1784
1785 fail:
1786 kfree_skb(skb);
1787 return NULL;
1788 }
1789
1790 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1791 {
1792 struct l2cap_conf_opt *opt = *ptr;
1793 int len;
1794
1795 len = L2CAP_CONF_OPT_SIZE + opt->len;
1796 *ptr += len;
1797
1798 *type = opt->type;
1799 *olen = opt->len;
1800
1801 switch (opt->len) {
1802 case 1:
1803 *val = *((u8 *) opt->val);
1804 break;
1805
1806 case 2:
1807 *val = get_unaligned_le16(opt->val);
1808 break;
1809
1810 case 4:
1811 *val = get_unaligned_le32(opt->val);
1812 break;
1813
1814 default:
1815 *val = (unsigned long) opt->val;
1816 break;
1817 }
1818
1819 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1820 return len;
1821 }
1822
1823 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1824 {
1825 struct l2cap_conf_opt *opt = *ptr;
1826
1827 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1828
1829 opt->type = type;
1830 opt->len = len;
1831
1832 switch (len) {
1833 case 1:
1834 *((u8 *) opt->val) = val;
1835 break;
1836
1837 case 2:
1838 put_unaligned_le16(val, opt->val);
1839 break;
1840
1841 case 4:
1842 put_unaligned_le32(val, opt->val);
1843 break;
1844
1845 default:
1846 memcpy(opt->val, (void *) val, len);
1847 break;
1848 }
1849
1850 *ptr += L2CAP_CONF_OPT_SIZE + len;
1851 }
1852
1853 static void l2cap_ack_timeout(unsigned long arg)
1854 {
1855 struct l2cap_chan *chan = (void *) arg;
1856
1857 bh_lock_sock(chan->sk);
1858 l2cap_send_ack(chan);
1859 bh_unlock_sock(chan->sk);
1860 }
1861
1862 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1863 {
1864 struct sock *sk = chan->sk;
1865
1866 chan->expected_ack_seq = 0;
1867 chan->unacked_frames = 0;
1868 chan->buffer_seq = 0;
1869 chan->num_acked = 0;
1870 chan->frames_sent = 0;
1871
1872 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1873 (unsigned long) chan);
1874 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1875 (unsigned long) chan);
1876 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1877
1878 skb_queue_head_init(&chan->srej_q);
1879
1880 INIT_LIST_HEAD(&chan->srej_l);
1881
1882
1883 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1884 }
1885
1886 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1887 {
1888 switch (mode) {
1889 case L2CAP_MODE_STREAMING:
1890 case L2CAP_MODE_ERTM:
1891 if (l2cap_mode_supported(mode, remote_feat_mask))
1892 return mode;
1893 /* fall through */
1894 default:
1895 return L2CAP_MODE_BASIC;
1896 }
1897 }
1898
1899 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1900 {
1901 struct l2cap_conf_req *req = data;
1902 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1903 void *ptr = req->data;
1904
1905 BT_DBG("chan %p", chan);
1906
1907 if (chan->num_conf_req || chan->num_conf_rsp)
1908 goto done;
1909
1910 switch (chan->mode) {
1911 case L2CAP_MODE_STREAMING:
1912 case L2CAP_MODE_ERTM:
1913 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1914 break;
1915
1916 /* fall through */
1917 default:
1918 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1919 break;
1920 }
1921
1922 done:
1923 if (chan->imtu != L2CAP_DEFAULT_MTU)
1924 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1925
1926 switch (chan->mode) {
1927 case L2CAP_MODE_BASIC:
1928 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1929 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1930 break;
1931
1932 rfc.mode = L2CAP_MODE_BASIC;
1933 rfc.txwin_size = 0;
1934 rfc.max_transmit = 0;
1935 rfc.retrans_timeout = 0;
1936 rfc.monitor_timeout = 0;
1937 rfc.max_pdu_size = 0;
1938
1939 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1940 (unsigned long) &rfc);
1941 break;
1942
1943 case L2CAP_MODE_ERTM:
1944 rfc.mode = L2CAP_MODE_ERTM;
1945 rfc.txwin_size = chan->tx_win;
1946 rfc.max_transmit = chan->max_tx;
1947 rfc.retrans_timeout = 0;
1948 rfc.monitor_timeout = 0;
1949 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1950 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1951 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1952
1953 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1954 (unsigned long) &rfc);
1955
1956 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1957 break;
1958
1959 if (chan->fcs == L2CAP_FCS_NONE ||
1960 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1961 chan->fcs = L2CAP_FCS_NONE;
1962 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1963 }
1964 break;
1965
1966 case L2CAP_MODE_STREAMING:
1967 rfc.mode = L2CAP_MODE_STREAMING;
1968 rfc.txwin_size = 0;
1969 rfc.max_transmit = 0;
1970 rfc.retrans_timeout = 0;
1971 rfc.monitor_timeout = 0;
1972 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1973 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1974 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1975
1976 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1977 (unsigned long) &rfc);
1978
1979 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1980 break;
1981
1982 if (chan->fcs == L2CAP_FCS_NONE ||
1983 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1984 chan->fcs = L2CAP_FCS_NONE;
1985 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1986 }
1987 break;
1988 }
1989
1990 req->dcid = cpu_to_le16(chan->dcid);
1991 req->flags = cpu_to_le16(0);
1992
1993 return ptr - data;
1994 }
1995
1996 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
1997 {
1998 struct l2cap_conf_rsp *rsp = data;
1999 void *ptr = rsp->data;
2000 void *req = chan->conf_req;
2001 int len = chan->conf_len;
2002 int type, hint, olen;
2003 unsigned long val;
2004 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2005 u16 mtu = L2CAP_DEFAULT_MTU;
2006 u16 result = L2CAP_CONF_SUCCESS;
2007
2008 BT_DBG("chan %p", chan);
2009
2010 while (len >= L2CAP_CONF_OPT_SIZE) {
2011 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2012
2013 hint = type & L2CAP_CONF_HINT;
2014 type &= L2CAP_CONF_MASK;
2015
2016 switch (type) {
2017 case L2CAP_CONF_MTU:
2018 mtu = val;
2019 break;
2020
2021 case L2CAP_CONF_FLUSH_TO:
2022 chan->flush_to = val;
2023 break;
2024
2025 case L2CAP_CONF_QOS:
2026 break;
2027
2028 case L2CAP_CONF_RFC:
2029 if (olen == sizeof(rfc))
2030 memcpy(&rfc, (void *) val, olen);
2031 break;
2032
2033 case L2CAP_CONF_FCS:
2034 if (val == L2CAP_FCS_NONE)
2035 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2036
2037 break;
2038
2039 default:
2040 if (hint)
2041 break;
2042
2043 result = L2CAP_CONF_UNKNOWN;
2044 *((u8 *) ptr++) = type;
2045 break;
2046 }
2047 }
2048
2049 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2050 goto done;
2051
2052 switch (chan->mode) {
2053 case L2CAP_MODE_STREAMING:
2054 case L2CAP_MODE_ERTM:
2055 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2056 chan->mode = l2cap_select_mode(rfc.mode,
2057 chan->conn->feat_mask);
2058 break;
2059 }
2060
2061 if (chan->mode != rfc.mode)
2062 return -ECONNREFUSED;
2063
2064 break;
2065 }
2066
2067 done:
2068 if (chan->mode != rfc.mode) {
2069 result = L2CAP_CONF_UNACCEPT;
2070 rfc.mode = chan->mode;
2071
2072 if (chan->num_conf_rsp == 1)
2073 return -ECONNREFUSED;
2074
2075 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2076 sizeof(rfc), (unsigned long) &rfc);
2077 }
2078
2079
2080 if (result == L2CAP_CONF_SUCCESS) {
2081 /* Configure output options and let the other side know
2082 * which ones we don't like. */
2083
2084 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2085 result = L2CAP_CONF_UNACCEPT;
2086 else {
2087 chan->omtu = mtu;
2088 set_bit(CONF_MTU_DONE, &chan->conf_state);
2089 }
2090 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2091
2092 switch (rfc.mode) {
2093 case L2CAP_MODE_BASIC:
2094 chan->fcs = L2CAP_FCS_NONE;
2095 set_bit(CONF_MODE_DONE, &chan->conf_state);
2096 break;
2097
2098 case L2CAP_MODE_ERTM:
2099 chan->remote_tx_win = rfc.txwin_size;
2100 chan->remote_max_tx = rfc.max_transmit;
2101
2102 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2103 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2104
2105 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2106
2107 rfc.retrans_timeout =
2108 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2109 rfc.monitor_timeout =
2110 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2111
2112 set_bit(CONF_MODE_DONE, &chan->conf_state);
2113
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2115 sizeof(rfc), (unsigned long) &rfc);
2116
2117 break;
2118
2119 case L2CAP_MODE_STREAMING:
2120 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2121 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2122
2123 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2124
2125 set_bit(CONF_MODE_DONE, &chan->conf_state);
2126
2127 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2128 sizeof(rfc), (unsigned long) &rfc);
2129
2130 break;
2131
2132 default:
2133 result = L2CAP_CONF_UNACCEPT;
2134
2135 memset(&rfc, 0, sizeof(rfc));
2136 rfc.mode = chan->mode;
2137 }
2138
2139 if (result == L2CAP_CONF_SUCCESS)
2140 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2141 }
2142 rsp->scid = cpu_to_le16(chan->dcid);
2143 rsp->result = cpu_to_le16(result);
2144 rsp->flags = cpu_to_le16(0x0000);
2145
2146 return ptr - data;
2147 }
2148
2149 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2150 {
2151 struct l2cap_conf_req *req = data;
2152 void *ptr = req->data;
2153 int type, olen;
2154 unsigned long val;
2155 struct l2cap_conf_rfc rfc;
2156
2157 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2158
2159 while (len >= L2CAP_CONF_OPT_SIZE) {
2160 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2161
2162 switch (type) {
2163 case L2CAP_CONF_MTU:
2164 if (val < L2CAP_DEFAULT_MIN_MTU) {
2165 *result = L2CAP_CONF_UNACCEPT;
2166 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2167 } else
2168 chan->imtu = val;
2169 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2170 break;
2171
2172 case L2CAP_CONF_FLUSH_TO:
2173 chan->flush_to = val;
2174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2175 2, chan->flush_to);
2176 break;
2177
2178 case L2CAP_CONF_RFC:
2179 if (olen == sizeof(rfc))
2180 memcpy(&rfc, (void *)val, olen);
2181
2182 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2183 rfc.mode != chan->mode)
2184 return -ECONNREFUSED;
2185
2186 chan->fcs = 0;
2187
2188 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2189 sizeof(rfc), (unsigned long) &rfc);
2190 break;
2191 }
2192 }
2193
2194 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2195 return -ECONNREFUSED;
2196
2197 chan->mode = rfc.mode;
2198
2199 if (*result == L2CAP_CONF_SUCCESS) {
2200 switch (rfc.mode) {
2201 case L2CAP_MODE_ERTM:
2202 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2203 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2204 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2205 break;
2206 case L2CAP_MODE_STREAMING:
2207 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2208 }
2209 }
2210
2211 req->dcid = cpu_to_le16(chan->dcid);
2212 req->flags = cpu_to_le16(0x0000);
2213
2214 return ptr - data;
2215 }
2216
2217 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2218 {
2219 struct l2cap_conf_rsp *rsp = data;
2220 void *ptr = rsp->data;
2221
2222 BT_DBG("chan %p", chan);
2223
2224 rsp->scid = cpu_to_le16(chan->dcid);
2225 rsp->result = cpu_to_le16(result);
2226 rsp->flags = cpu_to_le16(flags);
2227
2228 return ptr - data;
2229 }
2230
2231 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2232 {
2233 struct l2cap_conn_rsp rsp;
2234 struct l2cap_conn *conn = chan->conn;
2235 u8 buf[128];
2236
2237 rsp.scid = cpu_to_le16(chan->dcid);
2238 rsp.dcid = cpu_to_le16(chan->scid);
2239 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2240 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2241 l2cap_send_cmd(conn, chan->ident,
2242 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2243
2244 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2245 return;
2246
2247 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2248 l2cap_build_conf_req(chan, buf), buf);
2249 chan->num_conf_req++;
2250 }
2251
2252 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2253 {
2254 int type, olen;
2255 unsigned long val;
2256 struct l2cap_conf_rfc rfc;
2257
2258 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2259
2260 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2261 return;
2262
2263 while (len >= L2CAP_CONF_OPT_SIZE) {
2264 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2265
2266 switch (type) {
2267 case L2CAP_CONF_RFC:
2268 if (olen == sizeof(rfc))
2269 memcpy(&rfc, (void *)val, olen);
2270 goto done;
2271 }
2272 }
2273
2274 done:
2275 switch (rfc.mode) {
2276 case L2CAP_MODE_ERTM:
2277 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2278 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2279 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2280 break;
2281 case L2CAP_MODE_STREAMING:
2282 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2283 }
2284 }
2285
2286 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2287 {
2288 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2289
2290 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2291 return 0;
2292
2293 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2294 cmd->ident == conn->info_ident) {
2295 del_timer(&conn->info_timer);
2296
2297 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2298 conn->info_ident = 0;
2299
2300 l2cap_conn_start(conn);
2301 }
2302
2303 return 0;
2304 }
2305
2306 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2307 {
2308 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2309 struct l2cap_conn_rsp rsp;
2310 struct l2cap_chan *chan = NULL, *pchan;
2311 struct sock *parent, *sk = NULL;
2312 int result, status = L2CAP_CS_NO_INFO;
2313
2314 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2315 __le16 psm = req->psm;
2316
2317 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2318
2319 /* Check if we have socket listening on psm */
2320 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2321 if (!pchan) {
2322 result = L2CAP_CR_BAD_PSM;
2323 goto sendresp;
2324 }
2325
2326 parent = pchan->sk;
2327
2328 bh_lock_sock(parent);
2329
2330 /* Check if the ACL is secure enough (if not SDP) */
2331 if (psm != cpu_to_le16(0x0001) &&
2332 !hci_conn_check_link_mode(conn->hcon)) {
2333 conn->disc_reason = 0x05;
2334 result = L2CAP_CR_SEC_BLOCK;
2335 goto response;
2336 }
2337
2338 result = L2CAP_CR_NO_MEM;
2339
2340 /* Check for backlog size */
2341 if (sk_acceptq_is_full(parent)) {
2342 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2343 goto response;
2344 }
2345
2346 chan = pchan->ops->new_connection(pchan->data);
2347 if (!chan)
2348 goto response;
2349
2350 sk = chan->sk;
2351
2352 write_lock_bh(&conn->chan_lock);
2353
2354 /* Check if we already have channel with that dcid */
2355 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2356 write_unlock_bh(&conn->chan_lock);
2357 sock_set_flag(sk, SOCK_ZAPPED);
2358 chan->ops->close(chan->data);
2359 goto response;
2360 }
2361
2362 hci_conn_hold(conn->hcon);
2363
2364 bacpy(&bt_sk(sk)->src, conn->src);
2365 bacpy(&bt_sk(sk)->dst, conn->dst);
2366 chan->psm = psm;
2367 chan->dcid = scid;
2368
2369 bt_accept_enqueue(parent, sk);
2370
2371 __l2cap_chan_add(conn, chan);
2372
2373 dcid = chan->scid;
2374
2375 __set_chan_timer(chan, sk->sk_sndtimeo);
2376
2377 chan->ident = cmd->ident;
2378
2379 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2380 if (l2cap_check_security(chan)) {
2381 if (bt_sk(sk)->defer_setup) {
2382 l2cap_state_change(chan, BT_CONNECT2);
2383 result = L2CAP_CR_PEND;
2384 status = L2CAP_CS_AUTHOR_PEND;
2385 parent->sk_data_ready(parent, 0);
2386 } else {
2387 l2cap_state_change(chan, BT_CONFIG);
2388 result = L2CAP_CR_SUCCESS;
2389 status = L2CAP_CS_NO_INFO;
2390 }
2391 } else {
2392 l2cap_state_change(chan, BT_CONNECT2);
2393 result = L2CAP_CR_PEND;
2394 status = L2CAP_CS_AUTHEN_PEND;
2395 }
2396 } else {
2397 l2cap_state_change(chan, BT_CONNECT2);
2398 result = L2CAP_CR_PEND;
2399 status = L2CAP_CS_NO_INFO;
2400 }
2401
2402 write_unlock_bh(&conn->chan_lock);
2403
2404 response:
2405 bh_unlock_sock(parent);
2406
2407 sendresp:
2408 rsp.scid = cpu_to_le16(scid);
2409 rsp.dcid = cpu_to_le16(dcid);
2410 rsp.result = cpu_to_le16(result);
2411 rsp.status = cpu_to_le16(status);
2412 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2413
2414 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2415 struct l2cap_info_req info;
2416 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2417
2418 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2419 conn->info_ident = l2cap_get_ident(conn);
2420
2421 mod_timer(&conn->info_timer, jiffies +
2422 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2423
2424 l2cap_send_cmd(conn, conn->info_ident,
2425 L2CAP_INFO_REQ, sizeof(info), &info);
2426 }
2427
2428 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2429 result == L2CAP_CR_SUCCESS) {
2430 u8 buf[128];
2431 set_bit(CONF_REQ_SENT, &chan->conf_state);
2432 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2433 l2cap_build_conf_req(chan, buf), buf);
2434 chan->num_conf_req++;
2435 }
2436
2437 return 0;
2438 }
2439
2440 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2441 {
2442 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2443 u16 scid, dcid, result, status;
2444 struct l2cap_chan *chan;
2445 struct sock *sk;
2446 u8 req[128];
2447
2448 scid = __le16_to_cpu(rsp->scid);
2449 dcid = __le16_to_cpu(rsp->dcid);
2450 result = __le16_to_cpu(rsp->result);
2451 status = __le16_to_cpu(rsp->status);
2452
2453 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2454
2455 if (scid) {
2456 chan = l2cap_get_chan_by_scid(conn, scid);
2457 if (!chan)
2458 return -EFAULT;
2459 } else {
2460 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2461 if (!chan)
2462 return -EFAULT;
2463 }
2464
2465 sk = chan->sk;
2466
2467 switch (result) {
2468 case L2CAP_CR_SUCCESS:
2469 l2cap_state_change(chan, BT_CONFIG);
2470 chan->ident = 0;
2471 chan->dcid = dcid;
2472 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2473
2474 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2475 break;
2476
2477 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2478 l2cap_build_conf_req(chan, req), req);
2479 chan->num_conf_req++;
2480 break;
2481
2482 case L2CAP_CR_PEND:
2483 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2484 break;
2485
2486 default:
2487 /* don't delete l2cap channel if sk is owned by user */
2488 if (sock_owned_by_user(sk)) {
2489 l2cap_state_change(chan, BT_DISCONN);
2490 __clear_chan_timer(chan);
2491 __set_chan_timer(chan, HZ / 5);
2492 break;
2493 }
2494
2495 l2cap_chan_del(chan, ECONNREFUSED);
2496 break;
2497 }
2498
2499 bh_unlock_sock(sk);
2500 return 0;
2501 }
2502
2503 static inline void set_default_fcs(struct l2cap_chan *chan)
2504 {
2505 /* FCS is enabled only in ERTM or streaming mode, if one or both
2506 * sides request it.
2507 */
2508 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2509 chan->fcs = L2CAP_FCS_NONE;
2510 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2511 chan->fcs = L2CAP_FCS_CRC16;
2512 }
2513
2514 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2515 {
2516 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2517 u16 dcid, flags;
2518 u8 rsp[64];
2519 struct l2cap_chan *chan;
2520 struct sock *sk;
2521 int len;
2522
2523 dcid = __le16_to_cpu(req->dcid);
2524 flags = __le16_to_cpu(req->flags);
2525
2526 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2527
2528 chan = l2cap_get_chan_by_scid(conn, dcid);
2529 if (!chan)
2530 return -ENOENT;
2531
2532 sk = chan->sk;
2533
2534 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2535 struct l2cap_cmd_rej_cid rej;
2536
2537 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2538 rej.scid = cpu_to_le16(chan->scid);
2539 rej.dcid = cpu_to_le16(chan->dcid);
2540
2541 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2542 sizeof(rej), &rej);
2543 goto unlock;
2544 }
2545
2546 /* Reject if config buffer is too small. */
2547 len = cmd_len - sizeof(*req);
2548 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2549 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2550 l2cap_build_conf_rsp(chan, rsp,
2551 L2CAP_CONF_REJECT, flags), rsp);
2552 goto unlock;
2553 }
2554
2555 /* Store config. */
2556 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2557 chan->conf_len += len;
2558
2559 if (flags & 0x0001) {
2560 /* Incomplete config. Send empty response. */
2561 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2562 l2cap_build_conf_rsp(chan, rsp,
2563 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2564 goto unlock;
2565 }
2566
2567 /* Complete config. */
2568 len = l2cap_parse_conf_req(chan, rsp);
2569 if (len < 0) {
2570 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2571 goto unlock;
2572 }
2573
2574 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2575 chan->num_conf_rsp++;
2576
2577 /* Reset config buffer. */
2578 chan->conf_len = 0;
2579
2580 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2581 goto unlock;
2582
2583 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2584 set_default_fcs(chan);
2585
2586 l2cap_state_change(chan, BT_CONNECTED);
2587
2588 chan->next_tx_seq = 0;
2589 chan->expected_tx_seq = 0;
2590 skb_queue_head_init(&chan->tx_q);
2591 if (chan->mode == L2CAP_MODE_ERTM)
2592 l2cap_ertm_init(chan);
2593
2594 l2cap_chan_ready(sk);
2595 goto unlock;
2596 }
2597
2598 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2599 u8 buf[64];
2600 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2601 l2cap_build_conf_req(chan, buf), buf);
2602 chan->num_conf_req++;
2603 }
2604
2605 unlock:
2606 bh_unlock_sock(sk);
2607 return 0;
2608 }
2609
2610 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2611 {
2612 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2613 u16 scid, flags, result;
2614 struct l2cap_chan *chan;
2615 struct sock *sk;
2616 int len = cmd->len - sizeof(*rsp);
2617
2618 scid = __le16_to_cpu(rsp->scid);
2619 flags = __le16_to_cpu(rsp->flags);
2620 result = __le16_to_cpu(rsp->result);
2621
2622 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2623 scid, flags, result);
2624
2625 chan = l2cap_get_chan_by_scid(conn, scid);
2626 if (!chan)
2627 return 0;
2628
2629 sk = chan->sk;
2630
2631 switch (result) {
2632 case L2CAP_CONF_SUCCESS:
2633 l2cap_conf_rfc_get(chan, rsp->data, len);
2634 break;
2635
2636 case L2CAP_CONF_UNACCEPT:
2637 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2638 char req[64];
2639
2640 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2641 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2642 goto done;
2643 }
2644
2645 /* throw out any old stored conf requests */
2646 result = L2CAP_CONF_SUCCESS;
2647 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2648 req, &result);
2649 if (len < 0) {
2650 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2651 goto done;
2652 }
2653
2654 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2655 L2CAP_CONF_REQ, len, req);
2656 chan->num_conf_req++;
2657 if (result != L2CAP_CONF_SUCCESS)
2658 goto done;
2659 break;
2660 }
2661
2662 default:
2663 sk->sk_err = ECONNRESET;
2664 __set_chan_timer(chan, HZ * 5);
2665 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2666 goto done;
2667 }
2668
2669 if (flags & 0x01)
2670 goto done;
2671
2672 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2673
2674 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2675 set_default_fcs(chan);
2676
2677 l2cap_state_change(chan, BT_CONNECTED);
2678 chan->next_tx_seq = 0;
2679 chan->expected_tx_seq = 0;
2680 skb_queue_head_init(&chan->tx_q);
2681 if (chan->mode == L2CAP_MODE_ERTM)
2682 l2cap_ertm_init(chan);
2683
2684 l2cap_chan_ready(sk);
2685 }
2686
2687 done:
2688 bh_unlock_sock(sk);
2689 return 0;
2690 }
2691
2692 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2693 {
2694 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2695 struct l2cap_disconn_rsp rsp;
2696 u16 dcid, scid;
2697 struct l2cap_chan *chan;
2698 struct sock *sk;
2699
2700 scid = __le16_to_cpu(req->scid);
2701 dcid = __le16_to_cpu(req->dcid);
2702
2703 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2704
2705 chan = l2cap_get_chan_by_scid(conn, dcid);
2706 if (!chan)
2707 return 0;
2708
2709 sk = chan->sk;
2710
2711 rsp.dcid = cpu_to_le16(chan->scid);
2712 rsp.scid = cpu_to_le16(chan->dcid);
2713 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2714
2715 sk->sk_shutdown = SHUTDOWN_MASK;
2716
2717 /* don't delete l2cap channel if sk is owned by user */
2718 if (sock_owned_by_user(sk)) {
2719 l2cap_state_change(chan, BT_DISCONN);
2720 __clear_chan_timer(chan);
2721 __set_chan_timer(chan, HZ / 5);
2722 bh_unlock_sock(sk);
2723 return 0;
2724 }
2725
2726 l2cap_chan_del(chan, ECONNRESET);
2727 bh_unlock_sock(sk);
2728
2729 chan->ops->close(chan->data);
2730 return 0;
2731 }
2732
2733 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2734 {
2735 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2736 u16 dcid, scid;
2737 struct l2cap_chan *chan;
2738 struct sock *sk;
2739
2740 scid = __le16_to_cpu(rsp->scid);
2741 dcid = __le16_to_cpu(rsp->dcid);
2742
2743 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2744
2745 chan = l2cap_get_chan_by_scid(conn, scid);
2746 if (!chan)
2747 return 0;
2748
2749 sk = chan->sk;
2750
2751 /* don't delete l2cap channel if sk is owned by user */
2752 if (sock_owned_by_user(sk)) {
2753 l2cap_state_change(chan,BT_DISCONN);
2754 __clear_chan_timer(chan);
2755 __set_chan_timer(chan, HZ / 5);
2756 bh_unlock_sock(sk);
2757 return 0;
2758 }
2759
2760 l2cap_chan_del(chan, 0);
2761 bh_unlock_sock(sk);
2762
2763 chan->ops->close(chan->data);
2764 return 0;
2765 }
2766
2767 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2768 {
2769 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2770 u16 type;
2771
2772 type = __le16_to_cpu(req->type);
2773
2774 BT_DBG("type 0x%4.4x", type);
2775
2776 if (type == L2CAP_IT_FEAT_MASK) {
2777 u8 buf[8];
2778 u32 feat_mask = l2cap_feat_mask;
2779 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2780 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2781 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2782 if (!disable_ertm)
2783 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2784 | L2CAP_FEAT_FCS;
2785 put_unaligned_le32(feat_mask, rsp->data);
2786 l2cap_send_cmd(conn, cmd->ident,
2787 L2CAP_INFO_RSP, sizeof(buf), buf);
2788 } else if (type == L2CAP_IT_FIXED_CHAN) {
2789 u8 buf[12];
2790 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2791 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2792 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2793 memcpy(buf + 4, l2cap_fixed_chan, 8);
2794 l2cap_send_cmd(conn, cmd->ident,
2795 L2CAP_INFO_RSP, sizeof(buf), buf);
2796 } else {
2797 struct l2cap_info_rsp rsp;
2798 rsp.type = cpu_to_le16(type);
2799 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2800 l2cap_send_cmd(conn, cmd->ident,
2801 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2802 }
2803
2804 return 0;
2805 }
2806
2807 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2808 {
2809 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2810 u16 type, result;
2811
2812 type = __le16_to_cpu(rsp->type);
2813 result = __le16_to_cpu(rsp->result);
2814
2815 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2816
2817 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2818 if (cmd->ident != conn->info_ident ||
2819 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2820 return 0;
2821
2822 del_timer(&conn->info_timer);
2823
2824 if (result != L2CAP_IR_SUCCESS) {
2825 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2826 conn->info_ident = 0;
2827
2828 l2cap_conn_start(conn);
2829
2830 return 0;
2831 }
2832
2833 if (type == L2CAP_IT_FEAT_MASK) {
2834 conn->feat_mask = get_unaligned_le32(rsp->data);
2835
2836 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2837 struct l2cap_info_req req;
2838 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2839
2840 conn->info_ident = l2cap_get_ident(conn);
2841
2842 l2cap_send_cmd(conn, conn->info_ident,
2843 L2CAP_INFO_REQ, sizeof(req), &req);
2844 } else {
2845 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2846 conn->info_ident = 0;
2847
2848 l2cap_conn_start(conn);
2849 }
2850 } else if (type == L2CAP_IT_FIXED_CHAN) {
2851 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2852 conn->info_ident = 0;
2853
2854 l2cap_conn_start(conn);
2855 }
2856
2857 return 0;
2858 }
2859
2860 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2861 u16 to_multiplier)
2862 {
2863 u16 max_latency;
2864
2865 if (min > max || min < 6 || max > 3200)
2866 return -EINVAL;
2867
2868 if (to_multiplier < 10 || to_multiplier > 3200)
2869 return -EINVAL;
2870
2871 if (max >= to_multiplier * 8)
2872 return -EINVAL;
2873
2874 max_latency = (to_multiplier * 8 / max) - 1;
2875 if (latency > 499 || latency > max_latency)
2876 return -EINVAL;
2877
2878 return 0;
2879 }
2880
2881 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2882 struct l2cap_cmd_hdr *cmd, u8 *data)
2883 {
2884 struct hci_conn *hcon = conn->hcon;
2885 struct l2cap_conn_param_update_req *req;
2886 struct l2cap_conn_param_update_rsp rsp;
2887 u16 min, max, latency, to_multiplier, cmd_len;
2888 int err;
2889
2890 if (!(hcon->link_mode & HCI_LM_MASTER))
2891 return -EINVAL;
2892
2893 cmd_len = __le16_to_cpu(cmd->len);
2894 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2895 return -EPROTO;
2896
2897 req = (struct l2cap_conn_param_update_req *) data;
2898 min = __le16_to_cpu(req->min);
2899 max = __le16_to_cpu(req->max);
2900 latency = __le16_to_cpu(req->latency);
2901 to_multiplier = __le16_to_cpu(req->to_multiplier);
2902
2903 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2904 min, max, latency, to_multiplier);
2905
2906 memset(&rsp, 0, sizeof(rsp));
2907
2908 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2909 if (err)
2910 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2911 else
2912 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2913
2914 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2915 sizeof(rsp), &rsp);
2916
2917 if (!err)
2918 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2919
2920 return 0;
2921 }
2922
2923 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2924 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2925 {
2926 int err = 0;
2927
2928 switch (cmd->code) {
2929 case L2CAP_COMMAND_REJ:
2930 l2cap_command_rej(conn, cmd, data);
2931 break;
2932
2933 case L2CAP_CONN_REQ:
2934 err = l2cap_connect_req(conn, cmd, data);
2935 break;
2936
2937 case L2CAP_CONN_RSP:
2938 err = l2cap_connect_rsp(conn, cmd, data);
2939 break;
2940
2941 case L2CAP_CONF_REQ:
2942 err = l2cap_config_req(conn, cmd, cmd_len, data);
2943 break;
2944
2945 case L2CAP_CONF_RSP:
2946 err = l2cap_config_rsp(conn, cmd, data);
2947 break;
2948
2949 case L2CAP_DISCONN_REQ:
2950 err = l2cap_disconnect_req(conn, cmd, data);
2951 break;
2952
2953 case L2CAP_DISCONN_RSP:
2954 err = l2cap_disconnect_rsp(conn, cmd, data);
2955 break;
2956
2957 case L2CAP_ECHO_REQ:
2958 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
2959 break;
2960
2961 case L2CAP_ECHO_RSP:
2962 break;
2963
2964 case L2CAP_INFO_REQ:
2965 err = l2cap_information_req(conn, cmd, data);
2966 break;
2967
2968 case L2CAP_INFO_RSP:
2969 err = l2cap_information_rsp(conn, cmd, data);
2970 break;
2971
2972 default:
2973 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
2974 err = -EINVAL;
2975 break;
2976 }
2977
2978 return err;
2979 }
2980
2981 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
2982 struct l2cap_cmd_hdr *cmd, u8 *data)
2983 {
2984 switch (cmd->code) {
2985 case L2CAP_COMMAND_REJ:
2986 return 0;
2987
2988 case L2CAP_CONN_PARAM_UPDATE_REQ:
2989 return l2cap_conn_param_update_req(conn, cmd, data);
2990
2991 case L2CAP_CONN_PARAM_UPDATE_RSP:
2992 return 0;
2993
2994 default:
2995 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
2996 return -EINVAL;
2997 }
2998 }
2999
3000 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3001 struct sk_buff *skb)
3002 {
3003 u8 *data = skb->data;
3004 int len = skb->len;
3005 struct l2cap_cmd_hdr cmd;
3006 int err;
3007
3008 l2cap_raw_recv(conn, skb);
3009
3010 while (len >= L2CAP_CMD_HDR_SIZE) {
3011 u16 cmd_len;
3012 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3013 data += L2CAP_CMD_HDR_SIZE;
3014 len -= L2CAP_CMD_HDR_SIZE;
3015
3016 cmd_len = le16_to_cpu(cmd.len);
3017
3018 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3019
3020 if (cmd_len > len || !cmd.ident) {
3021 BT_DBG("corrupted command");
3022 break;
3023 }
3024
3025 if (conn->hcon->type == LE_LINK)
3026 err = l2cap_le_sig_cmd(conn, &cmd, data);
3027 else
3028 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3029
3030 if (err) {
3031 struct l2cap_cmd_rej_unk rej;
3032
3033 BT_ERR("Wrong link type (%d)", err);
3034
3035 /* FIXME: Map err to a valid reason */
3036 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3037 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3038 }
3039
3040 data += cmd_len;
3041 len -= cmd_len;
3042 }
3043
3044 kfree_skb(skb);
3045 }
3046
3047 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3048 {
3049 u16 our_fcs, rcv_fcs;
3050 int hdr_size = L2CAP_HDR_SIZE + 2;
3051
3052 if (chan->fcs == L2CAP_FCS_CRC16) {
3053 skb_trim(skb, skb->len - 2);
3054 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3055 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3056
3057 if (our_fcs != rcv_fcs)
3058 return -EBADMSG;
3059 }
3060 return 0;
3061 }
3062
3063 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3064 {
3065 u16 control = 0;
3066
3067 chan->frames_sent = 0;
3068
3069 control |= chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3070
3071 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3072 control |= L2CAP_SUPER_RCV_NOT_READY;
3073 l2cap_send_sframe(chan, control);
3074 set_bit(CONN_RNR_SENT, &chan->conn_state);
3075 }
3076
3077 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3078 l2cap_retransmit_frames(chan);
3079
3080 l2cap_ertm_send(chan);
3081
3082 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3083 chan->frames_sent == 0) {
3084 control |= L2CAP_SUPER_RCV_READY;
3085 l2cap_send_sframe(chan, control);
3086 }
3087 }
3088
3089 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u8 tx_seq, u8 sar)
3090 {
3091 struct sk_buff *next_skb;
3092 int tx_seq_offset, next_tx_seq_offset;
3093
3094 bt_cb(skb)->tx_seq = tx_seq;
3095 bt_cb(skb)->sar = sar;
3096
3097 next_skb = skb_peek(&chan->srej_q);
3098 if (!next_skb) {
3099 __skb_queue_tail(&chan->srej_q, skb);
3100 return 0;
3101 }
3102
3103 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3104 if (tx_seq_offset < 0)
3105 tx_seq_offset += 64;
3106
3107 do {
3108 if (bt_cb(next_skb)->tx_seq == tx_seq)
3109 return -EINVAL;
3110
3111 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3112 chan->buffer_seq) % 64;
3113 if (next_tx_seq_offset < 0)
3114 next_tx_seq_offset += 64;
3115
3116 if (next_tx_seq_offset > tx_seq_offset) {
3117 __skb_queue_before(&chan->srej_q, next_skb, skb);
3118 return 0;
3119 }
3120
3121 if (skb_queue_is_last(&chan->srej_q, next_skb))
3122 break;
3123
3124 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3125
3126 __skb_queue_tail(&chan->srej_q, skb);
3127
3128 return 0;
3129 }
3130
3131 static int l2cap_ertm_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3132 {
3133 struct sk_buff *_skb;
3134 int err;
3135
3136 switch (control & L2CAP_CTRL_SAR) {
3137 case L2CAP_SDU_UNSEGMENTED:
3138 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3139 goto drop;
3140
3141 return chan->ops->recv(chan->data, skb);
3142
3143 case L2CAP_SDU_START:
3144 if (test_bit(CONN_SAR_SDU, &chan->conn_state))
3145 goto drop;
3146
3147 chan->sdu_len = get_unaligned_le16(skb->data);
3148
3149 if (chan->sdu_len > chan->imtu)
3150 goto disconnect;
3151
3152 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3153 if (!chan->sdu)
3154 return -ENOMEM;
3155
3156 /* pull sdu_len bytes only after alloc, because of Local Busy
3157 * condition we have to be sure that this will be executed
3158 * only once, i.e., when alloc does not fail */
3159 skb_pull(skb, 2);
3160
3161 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3162
3163 set_bit(CONN_SAR_SDU, &chan->conn_state);
3164 chan->partial_sdu_len = skb->len;
3165 break;
3166
3167 case L2CAP_SDU_CONTINUE:
3168 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3169 goto disconnect;
3170
3171 if (!chan->sdu)
3172 goto disconnect;
3173
3174 chan->partial_sdu_len += skb->len;
3175 if (chan->partial_sdu_len > chan->sdu_len)
3176 goto drop;
3177
3178 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3179
3180 break;
3181
3182 case L2CAP_SDU_END:
3183 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3184 goto disconnect;
3185
3186 if (!chan->sdu)
3187 goto disconnect;
3188
3189 chan->partial_sdu_len += skb->len;
3190
3191 if (chan->partial_sdu_len > chan->imtu)
3192 goto drop;
3193
3194 if (chan->partial_sdu_len != chan->sdu_len)
3195 goto drop;
3196
3197 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3198
3199 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3200 if (!_skb) {
3201 return -ENOMEM;
3202 }
3203
3204 err = chan->ops->recv(chan->data, _skb);
3205 if (err < 0) {
3206 kfree_skb(_skb);
3207 return err;
3208 }
3209
3210 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3211
3212 kfree_skb(chan->sdu);
3213 break;
3214 }
3215
3216 kfree_skb(skb);
3217 return 0;
3218
3219 drop:
3220 kfree_skb(chan->sdu);
3221 chan->sdu = NULL;
3222
3223 disconnect:
3224 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3225 kfree_skb(skb);
3226 return 0;
3227 }
3228
3229 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3230 {
3231 u16 control;
3232
3233 BT_DBG("chan %p, Enter local busy", chan);
3234
3235 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3236
3237 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3238 control |= L2CAP_SUPER_RCV_NOT_READY;
3239 l2cap_send_sframe(chan, control);
3240
3241 set_bit(CONN_RNR_SENT, &chan->conn_state);
3242
3243 __clear_ack_timer(chan);
3244 }
3245
3246 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3247 {
3248 u16 control;
3249
3250 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3251 goto done;
3252
3253 control = chan->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3254 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3255 l2cap_send_sframe(chan, control);
3256 chan->retry_count = 1;
3257
3258 __clear_retrans_timer(chan);
3259 __set_monitor_timer(chan);
3260
3261 set_bit(CONN_WAIT_F, &chan->conn_state);
3262
3263 done:
3264 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3265 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3266
3267 BT_DBG("chan %p, Exit local busy", chan);
3268 }
3269
3270 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3271 {
3272 if (chan->mode == L2CAP_MODE_ERTM) {
3273 if (busy)
3274 l2cap_ertm_enter_local_busy(chan);
3275 else
3276 l2cap_ertm_exit_local_busy(chan);
3277 }
3278 }
3279
3280 static int l2cap_streaming_reassembly_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3281 {
3282 struct sk_buff *_skb;
3283 int err = -EINVAL;
3284
3285 /*
3286 * TODO: We have to notify the userland if some data is lost with the
3287 * Streaming Mode.
3288 */
3289
3290 switch (control & L2CAP_CTRL_SAR) {
3291 case L2CAP_SDU_UNSEGMENTED:
3292 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3293 kfree_skb(chan->sdu);
3294 break;
3295 }
3296
3297 err = chan->ops->recv(chan->data, skb);
3298 if (!err)
3299 return 0;
3300
3301 break;
3302
3303 case L2CAP_SDU_START:
3304 if (test_bit(CONN_SAR_SDU, &chan->conn_state)) {
3305 kfree_skb(chan->sdu);
3306 break;
3307 }
3308
3309 chan->sdu_len = get_unaligned_le16(skb->data);
3310 skb_pull(skb, 2);
3311
3312 if (chan->sdu_len > chan->imtu) {
3313 err = -EMSGSIZE;
3314 break;
3315 }
3316
3317 chan->sdu = bt_skb_alloc(chan->sdu_len, GFP_ATOMIC);
3318 if (!chan->sdu) {
3319 err = -ENOMEM;
3320 break;
3321 }
3322
3323 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3324
3325 set_bit(CONN_SAR_SDU, &chan->conn_state);
3326 chan->partial_sdu_len = skb->len;
3327 err = 0;
3328 break;
3329
3330 case L2CAP_SDU_CONTINUE:
3331 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3332 break;
3333
3334 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3335
3336 chan->partial_sdu_len += skb->len;
3337 if (chan->partial_sdu_len > chan->sdu_len)
3338 kfree_skb(chan->sdu);
3339 else
3340 err = 0;
3341
3342 break;
3343
3344 case L2CAP_SDU_END:
3345 if (!test_bit(CONN_SAR_SDU, &chan->conn_state))
3346 break;
3347
3348 memcpy(skb_put(chan->sdu, skb->len), skb->data, skb->len);
3349
3350 clear_bit(CONN_SAR_SDU, &chan->conn_state);
3351 chan->partial_sdu_len += skb->len;
3352
3353 if (chan->partial_sdu_len > chan->imtu)
3354 goto drop;
3355
3356 if (chan->partial_sdu_len == chan->sdu_len) {
3357 _skb = skb_clone(chan->sdu, GFP_ATOMIC);
3358 err = chan->ops->recv(chan->data, _skb);
3359 if (err < 0)
3360 kfree_skb(_skb);
3361 }
3362 err = 0;
3363
3364 drop:
3365 kfree_skb(chan->sdu);
3366 break;
3367 }
3368
3369 kfree_skb(skb);
3370 return err;
3371 }
3372
3373 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u8 tx_seq)
3374 {
3375 struct sk_buff *skb;
3376 u16 control;
3377
3378 while ((skb = skb_peek(&chan->srej_q)) &&
3379 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3380 int err;
3381
3382 if (bt_cb(skb)->tx_seq != tx_seq)
3383 break;
3384
3385 skb = skb_dequeue(&chan->srej_q);
3386 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3387 err = l2cap_ertm_reassembly_sdu(chan, skb, control);
3388
3389 if (err < 0) {
3390 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3391 break;
3392 }
3393
3394 chan->buffer_seq_srej =
3395 (chan->buffer_seq_srej + 1) % 64;
3396 tx_seq = (tx_seq + 1) % 64;
3397 }
3398 }
3399
3400 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3401 {
3402 struct srej_list *l, *tmp;
3403 u16 control;
3404
3405 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3406 if (l->tx_seq == tx_seq) {
3407 list_del(&l->list);
3408 kfree(l);
3409 return;
3410 }
3411 control = L2CAP_SUPER_SELECT_REJECT;
3412 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3413 l2cap_send_sframe(chan, control);
3414 list_del(&l->list);
3415 list_add_tail(&l->list, &chan->srej_l);
3416 }
3417 }
3418
3419 static void l2cap_send_srejframe(struct l2cap_chan *chan, u8 tx_seq)
3420 {
3421 struct srej_list *new;
3422 u16 control;
3423
3424 while (tx_seq != chan->expected_tx_seq) {
3425 control = L2CAP_SUPER_SELECT_REJECT;
3426 control |= chan->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3427 l2cap_send_sframe(chan, control);
3428
3429 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3430 new->tx_seq = chan->expected_tx_seq;
3431 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3432 list_add_tail(&new->list, &chan->srej_l);
3433 }
3434 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3435 }
3436
3437 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3438 {
3439 u8 tx_seq = __get_txseq(rx_control);
3440 u8 req_seq = __get_reqseq(rx_control);
3441 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3442 int tx_seq_offset, expected_tx_seq_offset;
3443 int num_to_ack = (chan->tx_win/6) + 1;
3444 int err = 0;
3445
3446 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3447 tx_seq, rx_control);
3448
3449 if (L2CAP_CTRL_FINAL & rx_control &&
3450 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3451 __clear_monitor_timer(chan);
3452 if (chan->unacked_frames > 0)
3453 __set_retrans_timer(chan);
3454 clear_bit(CONN_WAIT_F, &chan->conn_state);
3455 }
3456
3457 chan->expected_ack_seq = req_seq;
3458 l2cap_drop_acked_frames(chan);
3459
3460 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3461 if (tx_seq_offset < 0)
3462 tx_seq_offset += 64;
3463
3464 /* invalid tx_seq */
3465 if (tx_seq_offset >= chan->tx_win) {
3466 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3467 goto drop;
3468 }
3469
3470 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3471 goto drop;
3472
3473 if (tx_seq == chan->expected_tx_seq)
3474 goto expected;
3475
3476 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3477 struct srej_list *first;
3478
3479 first = list_first_entry(&chan->srej_l,
3480 struct srej_list, list);
3481 if (tx_seq == first->tx_seq) {
3482 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3483 l2cap_check_srej_gap(chan, tx_seq);
3484
3485 list_del(&first->list);
3486 kfree(first);
3487
3488 if (list_empty(&chan->srej_l)) {
3489 chan->buffer_seq = chan->buffer_seq_srej;
3490 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3491 l2cap_send_ack(chan);
3492 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3493 }
3494 } else {
3495 struct srej_list *l;
3496
3497 /* duplicated tx_seq */
3498 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3499 goto drop;
3500
3501 list_for_each_entry(l, &chan->srej_l, list) {
3502 if (l->tx_seq == tx_seq) {
3503 l2cap_resend_srejframe(chan, tx_seq);
3504 return 0;
3505 }
3506 }
3507 l2cap_send_srejframe(chan, tx_seq);
3508 }
3509 } else {
3510 expected_tx_seq_offset =
3511 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3512 if (expected_tx_seq_offset < 0)
3513 expected_tx_seq_offset += 64;
3514
3515 /* duplicated tx_seq */
3516 if (tx_seq_offset < expected_tx_seq_offset)
3517 goto drop;
3518
3519 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3520
3521 BT_DBG("chan %p, Enter SREJ", chan);
3522
3523 INIT_LIST_HEAD(&chan->srej_l);
3524 chan->buffer_seq_srej = chan->buffer_seq;
3525
3526 __skb_queue_head_init(&chan->srej_q);
3527 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3528
3529 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3530
3531 l2cap_send_srejframe(chan, tx_seq);
3532
3533 __clear_ack_timer(chan);
3534 }
3535 return 0;
3536
3537 expected:
3538 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3539
3540 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3541 bt_cb(skb)->tx_seq = tx_seq;
3542 bt_cb(skb)->sar = sar;
3543 __skb_queue_tail(&chan->srej_q, skb);
3544 return 0;
3545 }
3546
3547 err = l2cap_ertm_reassembly_sdu(chan, skb, rx_control);
3548 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3549 if (err < 0) {
3550 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3551 return err;
3552 }
3553
3554 if (rx_control & L2CAP_CTRL_FINAL) {
3555 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3556 l2cap_retransmit_frames(chan);
3557 }
3558
3559 __set_ack_timer(chan);
3560
3561 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3562 if (chan->num_acked == num_to_ack - 1)
3563 l2cap_send_ack(chan);
3564
3565 return 0;
3566
3567 drop:
3568 kfree_skb(skb);
3569 return 0;
3570 }
3571
3572 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3573 {
3574 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, __get_reqseq(rx_control),
3575 rx_control);
3576
3577 chan->expected_ack_seq = __get_reqseq(rx_control);
3578 l2cap_drop_acked_frames(chan);
3579
3580 if (rx_control & L2CAP_CTRL_POLL) {
3581 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3582 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3583 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3584 (chan->unacked_frames > 0))
3585 __set_retrans_timer(chan);
3586
3587 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3588 l2cap_send_srejtail(chan);
3589 } else {
3590 l2cap_send_i_or_rr_or_rnr(chan);
3591 }
3592
3593 } else if (rx_control & L2CAP_CTRL_FINAL) {
3594 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3595
3596 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3597 l2cap_retransmit_frames(chan);
3598
3599 } else {
3600 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3601 (chan->unacked_frames > 0))
3602 __set_retrans_timer(chan);
3603
3604 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3605 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3606 l2cap_send_ack(chan);
3607 else
3608 l2cap_ertm_send(chan);
3609 }
3610 }
3611
3612 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3613 {
3614 u8 tx_seq = __get_reqseq(rx_control);
3615
3616 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3617
3618 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3619
3620 chan->expected_ack_seq = tx_seq;
3621 l2cap_drop_acked_frames(chan);
3622
3623 if (rx_control & L2CAP_CTRL_FINAL) {
3624 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3625 l2cap_retransmit_frames(chan);
3626 } else {
3627 l2cap_retransmit_frames(chan);
3628
3629 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3630 set_bit(CONN_REJ_ACT, &chan->conn_state);
3631 }
3632 }
3633 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3634 {
3635 u8 tx_seq = __get_reqseq(rx_control);
3636
3637 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3638
3639 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3640
3641 if (rx_control & L2CAP_CTRL_POLL) {
3642 chan->expected_ack_seq = tx_seq;
3643 l2cap_drop_acked_frames(chan);
3644
3645 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3646 l2cap_retransmit_one_frame(chan, tx_seq);
3647
3648 l2cap_ertm_send(chan);
3649
3650 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3651 chan->srej_save_reqseq = tx_seq;
3652 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3653 }
3654 } else if (rx_control & L2CAP_CTRL_FINAL) {
3655 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3656 chan->srej_save_reqseq == tx_seq)
3657 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3658 else
3659 l2cap_retransmit_one_frame(chan, tx_seq);
3660 } else {
3661 l2cap_retransmit_one_frame(chan, tx_seq);
3662 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3663 chan->srej_save_reqseq = tx_seq;
3664 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3665 }
3666 }
3667 }
3668
3669 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3670 {
3671 u8 tx_seq = __get_reqseq(rx_control);
3672
3673 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3674
3675 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3676 chan->expected_ack_seq = tx_seq;
3677 l2cap_drop_acked_frames(chan);
3678
3679 if (rx_control & L2CAP_CTRL_POLL)
3680 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3681
3682 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3683 __clear_retrans_timer(chan);
3684 if (rx_control & L2CAP_CTRL_POLL)
3685 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3686 return;
3687 }
3688
3689 if (rx_control & L2CAP_CTRL_POLL)
3690 l2cap_send_srejtail(chan);
3691 else
3692 l2cap_send_sframe(chan, L2CAP_SUPER_RCV_READY);
3693 }
3694
3695 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3696 {
3697 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3698
3699 if (L2CAP_CTRL_FINAL & rx_control &&
3700 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3701 __clear_monitor_timer(chan);
3702 if (chan->unacked_frames > 0)
3703 __set_retrans_timer(chan);
3704 clear_bit(CONN_WAIT_F, &chan->conn_state);
3705 }
3706
3707 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3708 case L2CAP_SUPER_RCV_READY:
3709 l2cap_data_channel_rrframe(chan, rx_control);
3710 break;
3711
3712 case L2CAP_SUPER_REJECT:
3713 l2cap_data_channel_rejframe(chan, rx_control);
3714 break;
3715
3716 case L2CAP_SUPER_SELECT_REJECT:
3717 l2cap_data_channel_srejframe(chan, rx_control);
3718 break;
3719
3720 case L2CAP_SUPER_RCV_NOT_READY:
3721 l2cap_data_channel_rnrframe(chan, rx_control);
3722 break;
3723 }
3724
3725 kfree_skb(skb);
3726 return 0;
3727 }
3728
3729 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3730 {
3731 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3732 u16 control;
3733 u8 req_seq;
3734 int len, next_tx_seq_offset, req_seq_offset;
3735
3736 control = get_unaligned_le16(skb->data);
3737 skb_pull(skb, 2);
3738 len = skb->len;
3739
3740 /*
3741 * We can just drop the corrupted I-frame here.
3742 * Receiver will miss it and start proper recovery
3743 * procedures and ask retransmission.
3744 */
3745 if (l2cap_check_fcs(chan, skb))
3746 goto drop;
3747
3748 if (__is_sar_start(control) && __is_iframe(control))
3749 len -= 2;
3750
3751 if (chan->fcs == L2CAP_FCS_CRC16)
3752 len -= 2;
3753
3754 if (len > chan->mps) {
3755 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3756 goto drop;
3757 }
3758
3759 req_seq = __get_reqseq(control);
3760 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3761 if (req_seq_offset < 0)
3762 req_seq_offset += 64;
3763
3764 next_tx_seq_offset =
3765 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3766 if (next_tx_seq_offset < 0)
3767 next_tx_seq_offset += 64;
3768
3769 /* check for invalid req-seq */
3770 if (req_seq_offset > next_tx_seq_offset) {
3771 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3772 goto drop;
3773 }
3774
3775 if (__is_iframe(control)) {
3776 if (len < 0) {
3777 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3778 goto drop;
3779 }
3780
3781 l2cap_data_channel_iframe(chan, control, skb);
3782 } else {
3783 if (len != 0) {
3784 BT_ERR("%d", len);
3785 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3786 goto drop;
3787 }
3788
3789 l2cap_data_channel_sframe(chan, control, skb);
3790 }
3791
3792 return 0;
3793
3794 drop:
3795 kfree_skb(skb);
3796 return 0;
3797 }
3798
3799 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3800 {
3801 struct l2cap_chan *chan;
3802 struct sock *sk = NULL;
3803 u16 control;
3804 u8 tx_seq;
3805 int len;
3806
3807 chan = l2cap_get_chan_by_scid(conn, cid);
3808 if (!chan) {
3809 BT_DBG("unknown cid 0x%4.4x", cid);
3810 goto drop;
3811 }
3812
3813 sk = chan->sk;
3814
3815 BT_DBG("chan %p, len %d", chan, skb->len);
3816
3817 if (chan->state != BT_CONNECTED)
3818 goto drop;
3819
3820 switch (chan->mode) {
3821 case L2CAP_MODE_BASIC:
3822 /* If socket recv buffers overflows we drop data here
3823 * which is *bad* because L2CAP has to be reliable.
3824 * But we don't have any other choice. L2CAP doesn't
3825 * provide flow control mechanism. */
3826
3827 if (chan->imtu < skb->len)
3828 goto drop;
3829
3830 if (!chan->ops->recv(chan->data, skb))
3831 goto done;
3832 break;
3833
3834 case L2CAP_MODE_ERTM:
3835 if (!sock_owned_by_user(sk)) {
3836 l2cap_ertm_data_rcv(sk, skb);
3837 } else {
3838 if (sk_add_backlog(sk, skb))
3839 goto drop;
3840 }
3841
3842 goto done;
3843
3844 case L2CAP_MODE_STREAMING:
3845 control = get_unaligned_le16(skb->data);
3846 skb_pull(skb, 2);
3847 len = skb->len;
3848
3849 if (l2cap_check_fcs(chan, skb))
3850 goto drop;
3851
3852 if (__is_sar_start(control))
3853 len -= 2;
3854
3855 if (chan->fcs == L2CAP_FCS_CRC16)
3856 len -= 2;
3857
3858 if (len > chan->mps || len < 0 || __is_sframe(control))
3859 goto drop;
3860
3861 tx_seq = __get_txseq(control);
3862
3863 if (chan->expected_tx_seq == tx_seq)
3864 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3865 else
3866 chan->expected_tx_seq = (tx_seq + 1) % 64;
3867
3868 l2cap_streaming_reassembly_sdu(chan, skb, control);
3869
3870 goto done;
3871
3872 default:
3873 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3874 break;
3875 }
3876
3877 drop:
3878 kfree_skb(skb);
3879
3880 done:
3881 if (sk)
3882 bh_unlock_sock(sk);
3883
3884 return 0;
3885 }
3886
3887 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3888 {
3889 struct sock *sk = NULL;
3890 struct l2cap_chan *chan;
3891
3892 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3893 if (!chan)
3894 goto drop;
3895
3896 sk = chan->sk;
3897
3898 bh_lock_sock(sk);
3899
3900 BT_DBG("sk %p, len %d", sk, skb->len);
3901
3902 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3903 goto drop;
3904
3905 if (chan->imtu < skb->len)
3906 goto drop;
3907
3908 if (!chan->ops->recv(chan->data, skb))
3909 goto done;
3910
3911 drop:
3912 kfree_skb(skb);
3913
3914 done:
3915 if (sk)
3916 bh_unlock_sock(sk);
3917 return 0;
3918 }
3919
3920 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3921 {
3922 struct sock *sk = NULL;
3923 struct l2cap_chan *chan;
3924
3925 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3926 if (!chan)
3927 goto drop;
3928
3929 sk = chan->sk;
3930
3931 bh_lock_sock(sk);
3932
3933 BT_DBG("sk %p, len %d", sk, skb->len);
3934
3935 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3936 goto drop;
3937
3938 if (chan->imtu < skb->len)
3939 goto drop;
3940
3941 if (!chan->ops->recv(chan->data, skb))
3942 goto done;
3943
3944 drop:
3945 kfree_skb(skb);
3946
3947 done:
3948 if (sk)
3949 bh_unlock_sock(sk);
3950 return 0;
3951 }
3952
3953 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3954 {
3955 struct l2cap_hdr *lh = (void *) skb->data;
3956 u16 cid, len;
3957 __le16 psm;
3958
3959 skb_pull(skb, L2CAP_HDR_SIZE);
3960 cid = __le16_to_cpu(lh->cid);
3961 len = __le16_to_cpu(lh->len);
3962
3963 if (len != skb->len) {
3964 kfree_skb(skb);
3965 return;
3966 }
3967
3968 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3969
3970 switch (cid) {
3971 case L2CAP_CID_LE_SIGNALING:
3972 case L2CAP_CID_SIGNALING:
3973 l2cap_sig_channel(conn, skb);
3974 break;
3975
3976 case L2CAP_CID_CONN_LESS:
3977 psm = get_unaligned_le16(skb->data);
3978 skb_pull(skb, 2);
3979 l2cap_conless_channel(conn, psm, skb);
3980 break;
3981
3982 case L2CAP_CID_LE_DATA:
3983 l2cap_att_channel(conn, cid, skb);
3984 break;
3985
3986 case L2CAP_CID_SMP:
3987 if (smp_sig_channel(conn, skb))
3988 l2cap_conn_del(conn->hcon, EACCES);
3989 break;
3990
3991 default:
3992 l2cap_data_channel(conn, cid, skb);
3993 break;
3994 }
3995 }
3996
3997 /* ---- L2CAP interface with lower layer (HCI) ---- */
3998
3999 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4000 {
4001 int exact = 0, lm1 = 0, lm2 = 0;
4002 struct l2cap_chan *c;
4003
4004 if (type != ACL_LINK)
4005 return -EINVAL;
4006
4007 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4008
4009 /* Find listening sockets and check their link_mode */
4010 read_lock(&chan_list_lock);
4011 list_for_each_entry(c, &chan_list, global_l) {
4012 struct sock *sk = c->sk;
4013
4014 if (c->state != BT_LISTEN)
4015 continue;
4016
4017 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4018 lm1 |= HCI_LM_ACCEPT;
4019 if (c->role_switch)
4020 lm1 |= HCI_LM_MASTER;
4021 exact++;
4022 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4023 lm2 |= HCI_LM_ACCEPT;
4024 if (c->role_switch)
4025 lm2 |= HCI_LM_MASTER;
4026 }
4027 }
4028 read_unlock(&chan_list_lock);
4029
4030 return exact ? lm1 : lm2;
4031 }
4032
4033 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4034 {
4035 struct l2cap_conn *conn;
4036
4037 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4038
4039 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4040 return -EINVAL;
4041
4042 if (!status) {
4043 conn = l2cap_conn_add(hcon, status);
4044 if (conn)
4045 l2cap_conn_ready(conn);
4046 } else
4047 l2cap_conn_del(hcon, bt_to_errno(status));
4048
4049 return 0;
4050 }
4051
4052 static int l2cap_disconn_ind(struct hci_conn *hcon)
4053 {
4054 struct l2cap_conn *conn = hcon->l2cap_data;
4055
4056 BT_DBG("hcon %p", hcon);
4057
4058 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4059 return 0x13;
4060
4061 return conn->disc_reason;
4062 }
4063
4064 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4065 {
4066 BT_DBG("hcon %p reason %d", hcon, reason);
4067
4068 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4069 return -EINVAL;
4070
4071 l2cap_conn_del(hcon, bt_to_errno(reason));
4072
4073 return 0;
4074 }
4075
4076 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4077 {
4078 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4079 return;
4080
4081 if (encrypt == 0x00) {
4082 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4083 __clear_chan_timer(chan);
4084 __set_chan_timer(chan, HZ * 5);
4085 } else if (chan->sec_level == BT_SECURITY_HIGH)
4086 l2cap_chan_close(chan, ECONNREFUSED);
4087 } else {
4088 if (chan->sec_level == BT_SECURITY_MEDIUM)
4089 __clear_chan_timer(chan);
4090 }
4091 }
4092
4093 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4094 {
4095 struct l2cap_conn *conn = hcon->l2cap_data;
4096 struct l2cap_chan *chan;
4097
4098 if (!conn)
4099 return 0;
4100
4101 BT_DBG("conn %p", conn);
4102
4103 if (hcon->type == LE_LINK) {
4104 smp_distribute_keys(conn, 0);
4105 del_timer(&conn->security_timer);
4106 }
4107
4108 read_lock(&conn->chan_lock);
4109
4110 list_for_each_entry(chan, &conn->chan_l, list) {
4111 struct sock *sk = chan->sk;
4112
4113 bh_lock_sock(sk);
4114
4115 BT_DBG("chan->scid %d", chan->scid);
4116
4117 if (chan->scid == L2CAP_CID_LE_DATA) {
4118 if (!status && encrypt) {
4119 chan->sec_level = hcon->sec_level;
4120 l2cap_chan_ready(sk);
4121 }
4122
4123 bh_unlock_sock(sk);
4124 continue;
4125 }
4126
4127 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4128 bh_unlock_sock(sk);
4129 continue;
4130 }
4131
4132 if (!status && (chan->state == BT_CONNECTED ||
4133 chan->state == BT_CONFIG)) {
4134 l2cap_check_encryption(chan, encrypt);
4135 bh_unlock_sock(sk);
4136 continue;
4137 }
4138
4139 if (chan->state == BT_CONNECT) {
4140 if (!status) {
4141 struct l2cap_conn_req req;
4142 req.scid = cpu_to_le16(chan->scid);
4143 req.psm = chan->psm;
4144
4145 chan->ident = l2cap_get_ident(conn);
4146 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4147
4148 l2cap_send_cmd(conn, chan->ident,
4149 L2CAP_CONN_REQ, sizeof(req), &req);
4150 } else {
4151 __clear_chan_timer(chan);
4152 __set_chan_timer(chan, HZ / 10);
4153 }
4154 } else if (chan->state == BT_CONNECT2) {
4155 struct l2cap_conn_rsp rsp;
4156 __u16 res, stat;
4157
4158 if (!status) {
4159 if (bt_sk(sk)->defer_setup) {
4160 struct sock *parent = bt_sk(sk)->parent;
4161 res = L2CAP_CR_PEND;
4162 stat = L2CAP_CS_AUTHOR_PEND;
4163 if (parent)
4164 parent->sk_data_ready(parent, 0);
4165 } else {
4166 l2cap_state_change(chan, BT_CONFIG);
4167 res = L2CAP_CR_SUCCESS;
4168 stat = L2CAP_CS_NO_INFO;
4169 }
4170 } else {
4171 l2cap_state_change(chan, BT_DISCONN);
4172 __set_chan_timer(chan, HZ / 10);
4173 res = L2CAP_CR_SEC_BLOCK;
4174 stat = L2CAP_CS_NO_INFO;
4175 }
4176
4177 rsp.scid = cpu_to_le16(chan->dcid);
4178 rsp.dcid = cpu_to_le16(chan->scid);
4179 rsp.result = cpu_to_le16(res);
4180 rsp.status = cpu_to_le16(stat);
4181 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4182 sizeof(rsp), &rsp);
4183 }
4184
4185 bh_unlock_sock(sk);
4186 }
4187
4188 read_unlock(&conn->chan_lock);
4189
4190 return 0;
4191 }
4192
4193 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4194 {
4195 struct l2cap_conn *conn = hcon->l2cap_data;
4196
4197 if (!conn)
4198 conn = l2cap_conn_add(hcon, 0);
4199
4200 if (!conn)
4201 goto drop;
4202
4203 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4204
4205 if (!(flags & ACL_CONT)) {
4206 struct l2cap_hdr *hdr;
4207 struct l2cap_chan *chan;
4208 u16 cid;
4209 int len;
4210
4211 if (conn->rx_len) {
4212 BT_ERR("Unexpected start frame (len %d)", skb->len);
4213 kfree_skb(conn->rx_skb);
4214 conn->rx_skb = NULL;
4215 conn->rx_len = 0;
4216 l2cap_conn_unreliable(conn, ECOMM);
4217 }
4218
4219 /* Start fragment always begin with Basic L2CAP header */
4220 if (skb->len < L2CAP_HDR_SIZE) {
4221 BT_ERR("Frame is too short (len %d)", skb->len);
4222 l2cap_conn_unreliable(conn, ECOMM);
4223 goto drop;
4224 }
4225
4226 hdr = (struct l2cap_hdr *) skb->data;
4227 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4228 cid = __le16_to_cpu(hdr->cid);
4229
4230 if (len == skb->len) {
4231 /* Complete frame received */
4232 l2cap_recv_frame(conn, skb);
4233 return 0;
4234 }
4235
4236 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4237
4238 if (skb->len > len) {
4239 BT_ERR("Frame is too long (len %d, expected len %d)",
4240 skb->len, len);
4241 l2cap_conn_unreliable(conn, ECOMM);
4242 goto drop;
4243 }
4244
4245 chan = l2cap_get_chan_by_scid(conn, cid);
4246
4247 if (chan && chan->sk) {
4248 struct sock *sk = chan->sk;
4249
4250 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4251 BT_ERR("Frame exceeding recv MTU (len %d, "
4252 "MTU %d)", len,
4253 chan->imtu);
4254 bh_unlock_sock(sk);
4255 l2cap_conn_unreliable(conn, ECOMM);
4256 goto drop;
4257 }
4258 bh_unlock_sock(sk);
4259 }
4260
4261 /* Allocate skb for the complete frame (with header) */
4262 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4263 if (!conn->rx_skb)
4264 goto drop;
4265
4266 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4267 skb->len);
4268 conn->rx_len = len - skb->len;
4269 } else {
4270 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4271
4272 if (!conn->rx_len) {
4273 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4274 l2cap_conn_unreliable(conn, ECOMM);
4275 goto drop;
4276 }
4277
4278 if (skb->len > conn->rx_len) {
4279 BT_ERR("Fragment is too long (len %d, expected %d)",
4280 skb->len, conn->rx_len);
4281 kfree_skb(conn->rx_skb);
4282 conn->rx_skb = NULL;
4283 conn->rx_len = 0;
4284 l2cap_conn_unreliable(conn, ECOMM);
4285 goto drop;
4286 }
4287
4288 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4289 skb->len);
4290 conn->rx_len -= skb->len;
4291
4292 if (!conn->rx_len) {
4293 /* Complete frame received */
4294 l2cap_recv_frame(conn, conn->rx_skb);
4295 conn->rx_skb = NULL;
4296 }
4297 }
4298
4299 drop:
4300 kfree_skb(skb);
4301 return 0;
4302 }
4303
4304 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4305 {
4306 struct l2cap_chan *c;
4307
4308 read_lock_bh(&chan_list_lock);
4309
4310 list_for_each_entry(c, &chan_list, global_l) {
4311 struct sock *sk = c->sk;
4312
4313 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4314 batostr(&bt_sk(sk)->src),
4315 batostr(&bt_sk(sk)->dst),
4316 c->state, __le16_to_cpu(c->psm),
4317 c->scid, c->dcid, c->imtu, c->omtu,
4318 c->sec_level, c->mode);
4319 }
4320
4321 read_unlock_bh(&chan_list_lock);
4322
4323 return 0;
4324 }
4325
4326 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4327 {
4328 return single_open(file, l2cap_debugfs_show, inode->i_private);
4329 }
4330
4331 static const struct file_operations l2cap_debugfs_fops = {
4332 .open = l2cap_debugfs_open,
4333 .read = seq_read,
4334 .llseek = seq_lseek,
4335 .release = single_release,
4336 };
4337
4338 static struct dentry *l2cap_debugfs;
4339
4340 static struct hci_proto l2cap_hci_proto = {
4341 .name = "L2CAP",
4342 .id = HCI_PROTO_L2CAP,
4343 .connect_ind = l2cap_connect_ind,
4344 .connect_cfm = l2cap_connect_cfm,
4345 .disconn_ind = l2cap_disconn_ind,
4346 .disconn_cfm = l2cap_disconn_cfm,
4347 .security_cfm = l2cap_security_cfm,
4348 .recv_acldata = l2cap_recv_acldata
4349 };
4350
4351 int __init l2cap_init(void)
4352 {
4353 int err;
4354
4355 err = l2cap_init_sockets();
4356 if (err < 0)
4357 return err;
4358
4359 err = hci_register_proto(&l2cap_hci_proto);
4360 if (err < 0) {
4361 BT_ERR("L2CAP protocol registration failed");
4362 bt_sock_unregister(BTPROTO_L2CAP);
4363 goto error;
4364 }
4365
4366 if (bt_debugfs) {
4367 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4368 bt_debugfs, NULL, &l2cap_debugfs_fops);
4369 if (!l2cap_debugfs)
4370 BT_ERR("Failed to create L2CAP debug file");
4371 }
4372
4373 return 0;
4374
4375 error:
4376 l2cap_cleanup_sockets();
4377 return err;
4378 }
4379
4380 void l2cap_exit(void)
4381 {
4382 debugfs_remove(l2cap_debugfs);
4383
4384 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4385 BT_ERR("L2CAP protocol unregistration failed");
4386
4387 l2cap_cleanup_sockets();
4388 }
4389
4390 module_param(disable_ertm, bool, 0644);
4391 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.130044 seconds and 5 git commands to generate.