Bluetooth: EFS: assign default values in chan add
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
25 */
26
27 /* Bluetooth L2CAP core. */
28
29 #include <linux/module.h>
30
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
49 #include <net/sock.h>
50
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 int disable_ertm;
60 int enable_hs;
61
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
64
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
67
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 void *data);
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
75
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77
78 /* ---- L2CAP channels ---- */
79
80 static inline void chan_hold(struct l2cap_chan *c)
81 {
82 atomic_inc(&c->refcnt);
83 }
84
85 static inline void chan_put(struct l2cap_chan *c)
86 {
87 if (atomic_dec_and_test(&c->refcnt))
88 kfree(c);
89 }
90
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
92 {
93 struct l2cap_chan *c;
94
95 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
98 }
99 return NULL;
100
101 }
102
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 {
105 struct l2cap_chan *c;
106
107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
110 }
111 return NULL;
112 }
113
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
117 {
118 struct l2cap_chan *c;
119
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
122 if (c)
123 bh_lock_sock(c->sk);
124 read_unlock(&conn->chan_lock);
125 return c;
126 }
127
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
129 {
130 struct l2cap_chan *c;
131
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
134 return c;
135 }
136 return NULL;
137 }
138
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
140 {
141 struct l2cap_chan *c;
142
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
145 if (c)
146 bh_lock_sock(c->sk);
147 read_unlock(&conn->chan_lock);
148 return c;
149 }
150
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
152 {
153 struct l2cap_chan *c;
154
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
157 goto found;
158 }
159
160 c = NULL;
161 found:
162 return c;
163 }
164
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 {
167 int err;
168
169 write_lock_bh(&chan_list_lock);
170
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
172 err = -EADDRINUSE;
173 goto done;
174 }
175
176 if (psm) {
177 chan->psm = psm;
178 chan->sport = psm;
179 err = 0;
180 } else {
181 u16 p;
182
183 err = -EINVAL;
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
188 err = 0;
189 break;
190 }
191 }
192
193 done:
194 write_unlock_bh(&chan_list_lock);
195 return err;
196 }
197
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 {
200 write_lock_bh(&chan_list_lock);
201
202 chan->scid = scid;
203
204 write_unlock_bh(&chan_list_lock);
205
206 return 0;
207 }
208
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 {
211 u16 cid = L2CAP_CID_DYN_START;
212
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
215 return cid;
216 }
217
218 return 0;
219 }
220
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
222 {
223 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
224
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
226 chan_hold(chan);
227 }
228
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
230 {
231 BT_DBG("chan %p state %d", chan, chan->state);
232
233 if (timer_pending(timer) && del_timer(timer))
234 chan_put(chan);
235 }
236
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
238 {
239 chan->state = state;
240 chan->ops->state_change(chan->data, state);
241 }
242
243 static void l2cap_chan_timeout(unsigned long arg)
244 {
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
247 int reason;
248
249 BT_DBG("chan %p state %d", chan, chan->state);
250
251 bh_lock_sock(sk);
252
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
256 bh_unlock_sock(sk);
257 chan_put(chan);
258 return;
259 }
260
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
266 else
267 reason = ETIMEDOUT;
268
269 l2cap_chan_close(chan, reason);
270
271 bh_unlock_sock(sk);
272
273 chan->ops->close(chan->data);
274 chan_put(chan);
275 }
276
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
278 {
279 struct l2cap_chan *chan;
280
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
282 if (!chan)
283 return NULL;
284
285 chan->sk = sk;
286
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
290
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
292
293 chan->state = BT_OPEN;
294
295 atomic_set(&chan->refcnt, 1);
296
297 return chan;
298 }
299
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
301 {
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
305
306 chan_put(chan);
307 }
308
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
310 {
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
313
314 conn->disc_reason = 0x13;
315
316 chan->conn = conn;
317
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
320 /* LE connection */
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
324 } else {
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
328 }
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
334 } else {
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
339 }
340
341 chan->local_id = L2CAP_BESTEFFORT_ID;
342 chan->local_stype = L2CAP_SERV_BESTEFFORT;
343 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
344 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
345 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
346 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
347
348 chan_hold(chan);
349
350 list_add(&chan->list, &conn->chan_l);
351 }
352
353 /* Delete channel.
354 * Must be called on the locked socket. */
355 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
356 {
357 struct sock *sk = chan->sk;
358 struct l2cap_conn *conn = chan->conn;
359 struct sock *parent = bt_sk(sk)->parent;
360
361 __clear_chan_timer(chan);
362
363 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
364
365 if (conn) {
366 /* Delete from channel list */
367 write_lock_bh(&conn->chan_lock);
368 list_del(&chan->list);
369 write_unlock_bh(&conn->chan_lock);
370 chan_put(chan);
371
372 chan->conn = NULL;
373 hci_conn_put(conn->hcon);
374 }
375
376 l2cap_state_change(chan, BT_CLOSED);
377 sock_set_flag(sk, SOCK_ZAPPED);
378
379 if (err)
380 sk->sk_err = err;
381
382 if (parent) {
383 bt_accept_unlink(sk);
384 parent->sk_data_ready(parent, 0);
385 } else
386 sk->sk_state_change(sk);
387
388 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
389 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
390 return;
391
392 skb_queue_purge(&chan->tx_q);
393
394 if (chan->mode == L2CAP_MODE_ERTM) {
395 struct srej_list *l, *tmp;
396
397 __clear_retrans_timer(chan);
398 __clear_monitor_timer(chan);
399 __clear_ack_timer(chan);
400
401 skb_queue_purge(&chan->srej_q);
402
403 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
404 list_del(&l->list);
405 kfree(l);
406 }
407 }
408 }
409
410 static void l2cap_chan_cleanup_listen(struct sock *parent)
411 {
412 struct sock *sk;
413
414 BT_DBG("parent %p", parent);
415
416 /* Close not yet accepted channels */
417 while ((sk = bt_accept_dequeue(parent, NULL))) {
418 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
419 __clear_chan_timer(chan);
420 lock_sock(sk);
421 l2cap_chan_close(chan, ECONNRESET);
422 release_sock(sk);
423 chan->ops->close(chan->data);
424 }
425 }
426
427 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
428 {
429 struct l2cap_conn *conn = chan->conn;
430 struct sock *sk = chan->sk;
431
432 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
433
434 switch (chan->state) {
435 case BT_LISTEN:
436 l2cap_chan_cleanup_listen(sk);
437
438 l2cap_state_change(chan, BT_CLOSED);
439 sock_set_flag(sk, SOCK_ZAPPED);
440 break;
441
442 case BT_CONNECTED:
443 case BT_CONFIG:
444 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
445 conn->hcon->type == ACL_LINK) {
446 __clear_chan_timer(chan);
447 __set_chan_timer(chan, sk->sk_sndtimeo);
448 l2cap_send_disconn_req(conn, chan, reason);
449 } else
450 l2cap_chan_del(chan, reason);
451 break;
452
453 case BT_CONNECT2:
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 struct l2cap_conn_rsp rsp;
457 __u16 result;
458
459 if (bt_sk(sk)->defer_setup)
460 result = L2CAP_CR_SEC_BLOCK;
461 else
462 result = L2CAP_CR_BAD_PSM;
463 l2cap_state_change(chan, BT_DISCONN);
464
465 rsp.scid = cpu_to_le16(chan->dcid);
466 rsp.dcid = cpu_to_le16(chan->scid);
467 rsp.result = cpu_to_le16(result);
468 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
469 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
470 sizeof(rsp), &rsp);
471 }
472
473 l2cap_chan_del(chan, reason);
474 break;
475
476 case BT_CONNECT:
477 case BT_DISCONN:
478 l2cap_chan_del(chan, reason);
479 break;
480
481 default:
482 sock_set_flag(sk, SOCK_ZAPPED);
483 break;
484 }
485 }
486
487 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
488 {
489 if (chan->chan_type == L2CAP_CHAN_RAW) {
490 switch (chan->sec_level) {
491 case BT_SECURITY_HIGH:
492 return HCI_AT_DEDICATED_BONDING_MITM;
493 case BT_SECURITY_MEDIUM:
494 return HCI_AT_DEDICATED_BONDING;
495 default:
496 return HCI_AT_NO_BONDING;
497 }
498 } else if (chan->psm == cpu_to_le16(0x0001)) {
499 if (chan->sec_level == BT_SECURITY_LOW)
500 chan->sec_level = BT_SECURITY_SDP;
501
502 if (chan->sec_level == BT_SECURITY_HIGH)
503 return HCI_AT_NO_BONDING_MITM;
504 else
505 return HCI_AT_NO_BONDING;
506 } else {
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_GENERAL_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_GENERAL_BONDING;
512 default:
513 return HCI_AT_NO_BONDING;
514 }
515 }
516 }
517
518 /* Service level security */
519 static inline int l2cap_check_security(struct l2cap_chan *chan)
520 {
521 struct l2cap_conn *conn = chan->conn;
522 __u8 auth_type;
523
524 auth_type = l2cap_get_auth_type(chan);
525
526 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
527 }
528
529 static u8 l2cap_get_ident(struct l2cap_conn *conn)
530 {
531 u8 id;
532
533 /* Get next available identificator.
534 * 1 - 128 are used by kernel.
535 * 129 - 199 are reserved.
536 * 200 - 254 are used by utilities like l2ping, etc.
537 */
538
539 spin_lock_bh(&conn->lock);
540
541 if (++conn->tx_ident > 128)
542 conn->tx_ident = 1;
543
544 id = conn->tx_ident;
545
546 spin_unlock_bh(&conn->lock);
547
548 return id;
549 }
550
551 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
552 {
553 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
554 u8 flags;
555
556 BT_DBG("code 0x%2.2x", code);
557
558 if (!skb)
559 return;
560
561 if (lmp_no_flush_capable(conn->hcon->hdev))
562 flags = ACL_START_NO_FLUSH;
563 else
564 flags = ACL_START;
565
566 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
567
568 hci_send_acl(conn->hcon, skb, flags);
569 }
570
571 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
572 {
573 struct sk_buff *skb;
574 struct l2cap_hdr *lh;
575 struct l2cap_conn *conn = chan->conn;
576 int count, hlen;
577 u8 flags;
578
579 if (chan->state != BT_CONNECTED)
580 return;
581
582 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
583 hlen = L2CAP_EXT_HDR_SIZE;
584 else
585 hlen = L2CAP_ENH_HDR_SIZE;
586
587 if (chan->fcs == L2CAP_FCS_CRC16)
588 hlen += 2;
589
590 BT_DBG("chan %p, control 0x%2.2x", chan, control);
591
592 count = min_t(unsigned int, conn->mtu, hlen);
593
594 control |= __set_sframe(chan);
595
596 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
597 control |= __set_ctrl_final(chan);
598
599 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
600 control |= __set_ctrl_poll(chan);
601
602 skb = bt_skb_alloc(count, GFP_ATOMIC);
603 if (!skb)
604 return;
605
606 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
607 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
608 lh->cid = cpu_to_le16(chan->dcid);
609 put_unaligned_le16(control, skb_put(skb, 2));
610
611 if (chan->fcs == L2CAP_FCS_CRC16) {
612 u16 fcs = crc16(0, (u8 *)lh, count - 2);
613 put_unaligned_le16(fcs, skb_put(skb, 2));
614 }
615
616 if (lmp_no_flush_capable(conn->hcon->hdev))
617 flags = ACL_START_NO_FLUSH;
618 else
619 flags = ACL_START;
620
621 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
622
623 hci_send_acl(chan->conn->hcon, skb, flags);
624 }
625
626 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
627 {
628 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
629 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
630 set_bit(CONN_RNR_SENT, &chan->conn_state);
631 } else
632 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
633
634 control |= __set_reqseq(chan, chan->buffer_seq);
635
636 l2cap_send_sframe(chan, control);
637 }
638
639 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
640 {
641 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
642 }
643
644 static void l2cap_do_start(struct l2cap_chan *chan)
645 {
646 struct l2cap_conn *conn = chan->conn;
647
648 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
649 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
650 return;
651
652 if (l2cap_check_security(chan) &&
653 __l2cap_no_conn_pending(chan)) {
654 struct l2cap_conn_req req;
655 req.scid = cpu_to_le16(chan->scid);
656 req.psm = chan->psm;
657
658 chan->ident = l2cap_get_ident(conn);
659 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
660
661 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
662 sizeof(req), &req);
663 }
664 } else {
665 struct l2cap_info_req req;
666 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
667
668 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
669 conn->info_ident = l2cap_get_ident(conn);
670
671 mod_timer(&conn->info_timer, jiffies +
672 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
673
674 l2cap_send_cmd(conn, conn->info_ident,
675 L2CAP_INFO_REQ, sizeof(req), &req);
676 }
677 }
678
679 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
680 {
681 u32 local_feat_mask = l2cap_feat_mask;
682 if (!disable_ertm)
683 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
684
685 switch (mode) {
686 case L2CAP_MODE_ERTM:
687 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
688 case L2CAP_MODE_STREAMING:
689 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
690 default:
691 return 0x00;
692 }
693 }
694
695 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
696 {
697 struct sock *sk;
698 struct l2cap_disconn_req req;
699
700 if (!conn)
701 return;
702
703 sk = chan->sk;
704
705 if (chan->mode == L2CAP_MODE_ERTM) {
706 __clear_retrans_timer(chan);
707 __clear_monitor_timer(chan);
708 __clear_ack_timer(chan);
709 }
710
711 req.dcid = cpu_to_le16(chan->dcid);
712 req.scid = cpu_to_le16(chan->scid);
713 l2cap_send_cmd(conn, l2cap_get_ident(conn),
714 L2CAP_DISCONN_REQ, sizeof(req), &req);
715
716 l2cap_state_change(chan, BT_DISCONN);
717 sk->sk_err = err;
718 }
719
720 /* ---- L2CAP connections ---- */
721 static void l2cap_conn_start(struct l2cap_conn *conn)
722 {
723 struct l2cap_chan *chan, *tmp;
724
725 BT_DBG("conn %p", conn);
726
727 read_lock(&conn->chan_lock);
728
729 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
730 struct sock *sk = chan->sk;
731
732 bh_lock_sock(sk);
733
734 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
735 bh_unlock_sock(sk);
736 continue;
737 }
738
739 if (chan->state == BT_CONNECT) {
740 struct l2cap_conn_req req;
741
742 if (!l2cap_check_security(chan) ||
743 !__l2cap_no_conn_pending(chan)) {
744 bh_unlock_sock(sk);
745 continue;
746 }
747
748 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
749 && test_bit(CONF_STATE2_DEVICE,
750 &chan->conf_state)) {
751 /* l2cap_chan_close() calls list_del(chan)
752 * so release the lock */
753 read_unlock(&conn->chan_lock);
754 l2cap_chan_close(chan, ECONNRESET);
755 read_lock(&conn->chan_lock);
756 bh_unlock_sock(sk);
757 continue;
758 }
759
760 req.scid = cpu_to_le16(chan->scid);
761 req.psm = chan->psm;
762
763 chan->ident = l2cap_get_ident(conn);
764 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
765
766 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
767 sizeof(req), &req);
768
769 } else if (chan->state == BT_CONNECT2) {
770 struct l2cap_conn_rsp rsp;
771 char buf[128];
772 rsp.scid = cpu_to_le16(chan->dcid);
773 rsp.dcid = cpu_to_le16(chan->scid);
774
775 if (l2cap_check_security(chan)) {
776 if (bt_sk(sk)->defer_setup) {
777 struct sock *parent = bt_sk(sk)->parent;
778 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
779 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
780 if (parent)
781 parent->sk_data_ready(parent, 0);
782
783 } else {
784 l2cap_state_change(chan, BT_CONFIG);
785 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
786 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
787 }
788 } else {
789 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
790 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
791 }
792
793 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
794 sizeof(rsp), &rsp);
795
796 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
797 rsp.result != L2CAP_CR_SUCCESS) {
798 bh_unlock_sock(sk);
799 continue;
800 }
801
802 set_bit(CONF_REQ_SENT, &chan->conf_state);
803 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
804 l2cap_build_conf_req(chan, buf), buf);
805 chan->num_conf_req++;
806 }
807
808 bh_unlock_sock(sk);
809 }
810
811 read_unlock(&conn->chan_lock);
812 }
813
814 /* Find socket with cid and source bdaddr.
815 * Returns closest match, locked.
816 */
817 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
818 {
819 struct l2cap_chan *c, *c1 = NULL;
820
821 read_lock(&chan_list_lock);
822
823 list_for_each_entry(c, &chan_list, global_l) {
824 struct sock *sk = c->sk;
825
826 if (state && c->state != state)
827 continue;
828
829 if (c->scid == cid) {
830 /* Exact match. */
831 if (!bacmp(&bt_sk(sk)->src, src)) {
832 read_unlock(&chan_list_lock);
833 return c;
834 }
835
836 /* Closest match */
837 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
838 c1 = c;
839 }
840 }
841
842 read_unlock(&chan_list_lock);
843
844 return c1;
845 }
846
847 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
848 {
849 struct sock *parent, *sk;
850 struct l2cap_chan *chan, *pchan;
851
852 BT_DBG("");
853
854 /* Check if we have socket listening on cid */
855 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
856 conn->src);
857 if (!pchan)
858 return;
859
860 parent = pchan->sk;
861
862 bh_lock_sock(parent);
863
864 /* Check for backlog size */
865 if (sk_acceptq_is_full(parent)) {
866 BT_DBG("backlog full %d", parent->sk_ack_backlog);
867 goto clean;
868 }
869
870 chan = pchan->ops->new_connection(pchan->data);
871 if (!chan)
872 goto clean;
873
874 sk = chan->sk;
875
876 write_lock_bh(&conn->chan_lock);
877
878 hci_conn_hold(conn->hcon);
879
880 bacpy(&bt_sk(sk)->src, conn->src);
881 bacpy(&bt_sk(sk)->dst, conn->dst);
882
883 bt_accept_enqueue(parent, sk);
884
885 __l2cap_chan_add(conn, chan);
886
887 __set_chan_timer(chan, sk->sk_sndtimeo);
888
889 l2cap_state_change(chan, BT_CONNECTED);
890 parent->sk_data_ready(parent, 0);
891
892 write_unlock_bh(&conn->chan_lock);
893
894 clean:
895 bh_unlock_sock(parent);
896 }
897
898 static void l2cap_chan_ready(struct sock *sk)
899 {
900 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
901 struct sock *parent = bt_sk(sk)->parent;
902
903 BT_DBG("sk %p, parent %p", sk, parent);
904
905 chan->conf_state = 0;
906 __clear_chan_timer(chan);
907
908 l2cap_state_change(chan, BT_CONNECTED);
909 sk->sk_state_change(sk);
910
911 if (parent)
912 parent->sk_data_ready(parent, 0);
913 }
914
915 static void l2cap_conn_ready(struct l2cap_conn *conn)
916 {
917 struct l2cap_chan *chan;
918
919 BT_DBG("conn %p", conn);
920
921 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
922 l2cap_le_conn_ready(conn);
923
924 if (conn->hcon->out && conn->hcon->type == LE_LINK)
925 smp_conn_security(conn, conn->hcon->pending_sec_level);
926
927 read_lock(&conn->chan_lock);
928
929 list_for_each_entry(chan, &conn->chan_l, list) {
930 struct sock *sk = chan->sk;
931
932 bh_lock_sock(sk);
933
934 if (conn->hcon->type == LE_LINK) {
935 if (smp_conn_security(conn, chan->sec_level))
936 l2cap_chan_ready(sk);
937
938 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
939 __clear_chan_timer(chan);
940 l2cap_state_change(chan, BT_CONNECTED);
941 sk->sk_state_change(sk);
942
943 } else if (chan->state == BT_CONNECT)
944 l2cap_do_start(chan);
945
946 bh_unlock_sock(sk);
947 }
948
949 read_unlock(&conn->chan_lock);
950 }
951
952 /* Notify sockets that we cannot guaranty reliability anymore */
953 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
954 {
955 struct l2cap_chan *chan;
956
957 BT_DBG("conn %p", conn);
958
959 read_lock(&conn->chan_lock);
960
961 list_for_each_entry(chan, &conn->chan_l, list) {
962 struct sock *sk = chan->sk;
963
964 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
965 sk->sk_err = err;
966 }
967
968 read_unlock(&conn->chan_lock);
969 }
970
971 static void l2cap_info_timeout(unsigned long arg)
972 {
973 struct l2cap_conn *conn = (void *) arg;
974
975 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
976 conn->info_ident = 0;
977
978 l2cap_conn_start(conn);
979 }
980
981 static void l2cap_conn_del(struct hci_conn *hcon, int err)
982 {
983 struct l2cap_conn *conn = hcon->l2cap_data;
984 struct l2cap_chan *chan, *l;
985 struct sock *sk;
986
987 if (!conn)
988 return;
989
990 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
991
992 kfree_skb(conn->rx_skb);
993
994 /* Kill channels */
995 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
996 sk = chan->sk;
997 bh_lock_sock(sk);
998 l2cap_chan_del(chan, err);
999 bh_unlock_sock(sk);
1000 chan->ops->close(chan->data);
1001 }
1002
1003 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1004 del_timer_sync(&conn->info_timer);
1005
1006 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1007 del_timer(&conn->security_timer);
1008 smp_chan_destroy(conn);
1009 }
1010
1011 hcon->l2cap_data = NULL;
1012 kfree(conn);
1013 }
1014
1015 static void security_timeout(unsigned long arg)
1016 {
1017 struct l2cap_conn *conn = (void *) arg;
1018
1019 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1020 }
1021
1022 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1023 {
1024 struct l2cap_conn *conn = hcon->l2cap_data;
1025
1026 if (conn || status)
1027 return conn;
1028
1029 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1030 if (!conn)
1031 return NULL;
1032
1033 hcon->l2cap_data = conn;
1034 conn->hcon = hcon;
1035
1036 BT_DBG("hcon %p conn %p", hcon, conn);
1037
1038 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1039 conn->mtu = hcon->hdev->le_mtu;
1040 else
1041 conn->mtu = hcon->hdev->acl_mtu;
1042
1043 conn->src = &hcon->hdev->bdaddr;
1044 conn->dst = &hcon->dst;
1045
1046 conn->feat_mask = 0;
1047
1048 spin_lock_init(&conn->lock);
1049 rwlock_init(&conn->chan_lock);
1050
1051 INIT_LIST_HEAD(&conn->chan_l);
1052
1053 if (hcon->type == LE_LINK)
1054 setup_timer(&conn->security_timer, security_timeout,
1055 (unsigned long) conn);
1056 else
1057 setup_timer(&conn->info_timer, l2cap_info_timeout,
1058 (unsigned long) conn);
1059
1060 conn->disc_reason = 0x13;
1061
1062 return conn;
1063 }
1064
1065 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1066 {
1067 write_lock_bh(&conn->chan_lock);
1068 __l2cap_chan_add(conn, chan);
1069 write_unlock_bh(&conn->chan_lock);
1070 }
1071
1072 /* ---- Socket interface ---- */
1073
1074 /* Find socket with psm and source bdaddr.
1075 * Returns closest match.
1076 */
1077 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1078 {
1079 struct l2cap_chan *c, *c1 = NULL;
1080
1081 read_lock(&chan_list_lock);
1082
1083 list_for_each_entry(c, &chan_list, global_l) {
1084 struct sock *sk = c->sk;
1085
1086 if (state && c->state != state)
1087 continue;
1088
1089 if (c->psm == psm) {
1090 /* Exact match. */
1091 if (!bacmp(&bt_sk(sk)->src, src)) {
1092 read_unlock(&chan_list_lock);
1093 return c;
1094 }
1095
1096 /* Closest match */
1097 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1098 c1 = c;
1099 }
1100 }
1101
1102 read_unlock(&chan_list_lock);
1103
1104 return c1;
1105 }
1106
1107 int l2cap_chan_connect(struct l2cap_chan *chan)
1108 {
1109 struct sock *sk = chan->sk;
1110 bdaddr_t *src = &bt_sk(sk)->src;
1111 bdaddr_t *dst = &bt_sk(sk)->dst;
1112 struct l2cap_conn *conn;
1113 struct hci_conn *hcon;
1114 struct hci_dev *hdev;
1115 __u8 auth_type;
1116 int err;
1117
1118 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1119 chan->psm);
1120
1121 hdev = hci_get_route(dst, src);
1122 if (!hdev)
1123 return -EHOSTUNREACH;
1124
1125 hci_dev_lock_bh(hdev);
1126
1127 auth_type = l2cap_get_auth_type(chan);
1128
1129 if (chan->dcid == L2CAP_CID_LE_DATA)
1130 hcon = hci_connect(hdev, LE_LINK, dst,
1131 chan->sec_level, auth_type);
1132 else
1133 hcon = hci_connect(hdev, ACL_LINK, dst,
1134 chan->sec_level, auth_type);
1135
1136 if (IS_ERR(hcon)) {
1137 err = PTR_ERR(hcon);
1138 goto done;
1139 }
1140
1141 conn = l2cap_conn_add(hcon, 0);
1142 if (!conn) {
1143 hci_conn_put(hcon);
1144 err = -ENOMEM;
1145 goto done;
1146 }
1147
1148 /* Update source addr of the socket */
1149 bacpy(src, conn->src);
1150
1151 l2cap_chan_add(conn, chan);
1152
1153 l2cap_state_change(chan, BT_CONNECT);
1154 __set_chan_timer(chan, sk->sk_sndtimeo);
1155
1156 if (hcon->state == BT_CONNECTED) {
1157 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1158 __clear_chan_timer(chan);
1159 if (l2cap_check_security(chan))
1160 l2cap_state_change(chan, BT_CONNECTED);
1161 } else
1162 l2cap_do_start(chan);
1163 }
1164
1165 err = 0;
1166
1167 done:
1168 hci_dev_unlock_bh(hdev);
1169 hci_dev_put(hdev);
1170 return err;
1171 }
1172
1173 int __l2cap_wait_ack(struct sock *sk)
1174 {
1175 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1176 DECLARE_WAITQUEUE(wait, current);
1177 int err = 0;
1178 int timeo = HZ/5;
1179
1180 add_wait_queue(sk_sleep(sk), &wait);
1181 set_current_state(TASK_INTERRUPTIBLE);
1182 while (chan->unacked_frames > 0 && chan->conn) {
1183 if (!timeo)
1184 timeo = HZ/5;
1185
1186 if (signal_pending(current)) {
1187 err = sock_intr_errno(timeo);
1188 break;
1189 }
1190
1191 release_sock(sk);
1192 timeo = schedule_timeout(timeo);
1193 lock_sock(sk);
1194 set_current_state(TASK_INTERRUPTIBLE);
1195
1196 err = sock_error(sk);
1197 if (err)
1198 break;
1199 }
1200 set_current_state(TASK_RUNNING);
1201 remove_wait_queue(sk_sleep(sk), &wait);
1202 return err;
1203 }
1204
1205 static void l2cap_monitor_timeout(unsigned long arg)
1206 {
1207 struct l2cap_chan *chan = (void *) arg;
1208 struct sock *sk = chan->sk;
1209
1210 BT_DBG("chan %p", chan);
1211
1212 bh_lock_sock(sk);
1213 if (chan->retry_count >= chan->remote_max_tx) {
1214 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1215 bh_unlock_sock(sk);
1216 return;
1217 }
1218
1219 chan->retry_count++;
1220 __set_monitor_timer(chan);
1221
1222 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1223 bh_unlock_sock(sk);
1224 }
1225
1226 static void l2cap_retrans_timeout(unsigned long arg)
1227 {
1228 struct l2cap_chan *chan = (void *) arg;
1229 struct sock *sk = chan->sk;
1230
1231 BT_DBG("chan %p", chan);
1232
1233 bh_lock_sock(sk);
1234 chan->retry_count = 1;
1235 __set_monitor_timer(chan);
1236
1237 set_bit(CONN_WAIT_F, &chan->conn_state);
1238
1239 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1240 bh_unlock_sock(sk);
1241 }
1242
1243 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1244 {
1245 struct sk_buff *skb;
1246
1247 while ((skb = skb_peek(&chan->tx_q)) &&
1248 chan->unacked_frames) {
1249 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1250 break;
1251
1252 skb = skb_dequeue(&chan->tx_q);
1253 kfree_skb(skb);
1254
1255 chan->unacked_frames--;
1256 }
1257
1258 if (!chan->unacked_frames)
1259 __clear_retrans_timer(chan);
1260 }
1261
1262 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1263 {
1264 struct hci_conn *hcon = chan->conn->hcon;
1265 u16 flags;
1266
1267 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1268
1269 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1270 lmp_no_flush_capable(hcon->hdev))
1271 flags = ACL_START_NO_FLUSH;
1272 else
1273 flags = ACL_START;
1274
1275 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1276 hci_send_acl(hcon, skb, flags);
1277 }
1278
1279 static void l2cap_streaming_send(struct l2cap_chan *chan)
1280 {
1281 struct sk_buff *skb;
1282 u16 control, fcs;
1283
1284 while ((skb = skb_dequeue(&chan->tx_q))) {
1285 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1286 control |= __set_txseq(chan, chan->next_tx_seq);
1287 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1288
1289 if (chan->fcs == L2CAP_FCS_CRC16) {
1290 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1291 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1292 }
1293
1294 l2cap_do_send(chan, skb);
1295
1296 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1297 }
1298 }
1299
1300 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1301 {
1302 struct sk_buff *skb, *tx_skb;
1303 u16 control, fcs;
1304
1305 skb = skb_peek(&chan->tx_q);
1306 if (!skb)
1307 return;
1308
1309 do {
1310 if (bt_cb(skb)->tx_seq == tx_seq)
1311 break;
1312
1313 if (skb_queue_is_last(&chan->tx_q, skb))
1314 return;
1315
1316 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1317
1318 if (chan->remote_max_tx &&
1319 bt_cb(skb)->retries == chan->remote_max_tx) {
1320 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1321 return;
1322 }
1323
1324 tx_skb = skb_clone(skb, GFP_ATOMIC);
1325 bt_cb(skb)->retries++;
1326 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1327 control &= __get_sar_mask(chan);
1328
1329 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1330 control |= __set_ctrl_final(chan);
1331
1332 control |= __set_reqseq(chan, chan->buffer_seq);
1333 control |= __set_txseq(chan, tx_seq);
1334
1335 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1336
1337 if (chan->fcs == L2CAP_FCS_CRC16) {
1338 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1339 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1340 }
1341
1342 l2cap_do_send(chan, tx_skb);
1343 }
1344
1345 static int l2cap_ertm_send(struct l2cap_chan *chan)
1346 {
1347 struct sk_buff *skb, *tx_skb;
1348 u16 control, fcs;
1349 int nsent = 0;
1350
1351 if (chan->state != BT_CONNECTED)
1352 return -ENOTCONN;
1353
1354 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1355
1356 if (chan->remote_max_tx &&
1357 bt_cb(skb)->retries == chan->remote_max_tx) {
1358 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1359 break;
1360 }
1361
1362 tx_skb = skb_clone(skb, GFP_ATOMIC);
1363
1364 bt_cb(skb)->retries++;
1365
1366 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1367 control &= __get_sar_mask(chan);
1368
1369 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1370 control |= __set_ctrl_final(chan);
1371
1372 control |= __set_reqseq(chan, chan->buffer_seq);
1373 control |= __set_txseq(chan, chan->next_tx_seq);
1374 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1375
1376
1377 if (chan->fcs == L2CAP_FCS_CRC16) {
1378 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1379 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1380 }
1381
1382 l2cap_do_send(chan, tx_skb);
1383
1384 __set_retrans_timer(chan);
1385
1386 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1387 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1388
1389 if (bt_cb(skb)->retries == 1)
1390 chan->unacked_frames++;
1391
1392 chan->frames_sent++;
1393
1394 if (skb_queue_is_last(&chan->tx_q, skb))
1395 chan->tx_send_head = NULL;
1396 else
1397 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1398
1399 nsent++;
1400 }
1401
1402 return nsent;
1403 }
1404
1405 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1406 {
1407 int ret;
1408
1409 if (!skb_queue_empty(&chan->tx_q))
1410 chan->tx_send_head = chan->tx_q.next;
1411
1412 chan->next_tx_seq = chan->expected_ack_seq;
1413 ret = l2cap_ertm_send(chan);
1414 return ret;
1415 }
1416
1417 static void l2cap_send_ack(struct l2cap_chan *chan)
1418 {
1419 u16 control = 0;
1420
1421 control |= __set_reqseq(chan, chan->buffer_seq);
1422
1423 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1424 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1425 set_bit(CONN_RNR_SENT, &chan->conn_state);
1426 l2cap_send_sframe(chan, control);
1427 return;
1428 }
1429
1430 if (l2cap_ertm_send(chan) > 0)
1431 return;
1432
1433 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1434 l2cap_send_sframe(chan, control);
1435 }
1436
1437 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1438 {
1439 struct srej_list *tail;
1440 u16 control;
1441
1442 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1443 control |= __set_ctrl_final(chan);
1444
1445 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1446 control |= __set_reqseq(chan, tail->tx_seq);
1447
1448 l2cap_send_sframe(chan, control);
1449 }
1450
1451 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1452 {
1453 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1454 struct sk_buff **frag;
1455 int err, sent = 0;
1456
1457 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1458 return -EFAULT;
1459
1460 sent += count;
1461 len -= count;
1462
1463 /* Continuation fragments (no L2CAP header) */
1464 frag = &skb_shinfo(skb)->frag_list;
1465 while (len) {
1466 count = min_t(unsigned int, conn->mtu, len);
1467
1468 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1469 if (!*frag)
1470 return err;
1471 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1472 return -EFAULT;
1473
1474 sent += count;
1475 len -= count;
1476
1477 frag = &(*frag)->next;
1478 }
1479
1480 return sent;
1481 }
1482
1483 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1484 {
1485 struct sock *sk = chan->sk;
1486 struct l2cap_conn *conn = chan->conn;
1487 struct sk_buff *skb;
1488 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1489 struct l2cap_hdr *lh;
1490
1491 BT_DBG("sk %p len %d", sk, (int)len);
1492
1493 count = min_t(unsigned int, (conn->mtu - hlen), len);
1494 skb = bt_skb_send_alloc(sk, count + hlen,
1495 msg->msg_flags & MSG_DONTWAIT, &err);
1496 if (!skb)
1497 return ERR_PTR(err);
1498
1499 /* Create L2CAP header */
1500 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1501 lh->cid = cpu_to_le16(chan->dcid);
1502 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1503 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1504
1505 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1506 if (unlikely(err < 0)) {
1507 kfree_skb(skb);
1508 return ERR_PTR(err);
1509 }
1510 return skb;
1511 }
1512
1513 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1514 {
1515 struct sock *sk = chan->sk;
1516 struct l2cap_conn *conn = chan->conn;
1517 struct sk_buff *skb;
1518 int err, count, hlen = L2CAP_HDR_SIZE;
1519 struct l2cap_hdr *lh;
1520
1521 BT_DBG("sk %p len %d", sk, (int)len);
1522
1523 count = min_t(unsigned int, (conn->mtu - hlen), len);
1524 skb = bt_skb_send_alloc(sk, count + hlen,
1525 msg->msg_flags & MSG_DONTWAIT, &err);
1526 if (!skb)
1527 return ERR_PTR(err);
1528
1529 /* Create L2CAP header */
1530 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1531 lh->cid = cpu_to_le16(chan->dcid);
1532 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1533
1534 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1535 if (unlikely(err < 0)) {
1536 kfree_skb(skb);
1537 return ERR_PTR(err);
1538 }
1539 return skb;
1540 }
1541
1542 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1543 struct msghdr *msg, size_t len,
1544 u16 control, u16 sdulen)
1545 {
1546 struct sock *sk = chan->sk;
1547 struct l2cap_conn *conn = chan->conn;
1548 struct sk_buff *skb;
1549 int err, count, hlen;
1550 struct l2cap_hdr *lh;
1551
1552 BT_DBG("sk %p len %d", sk, (int)len);
1553
1554 if (!conn)
1555 return ERR_PTR(-ENOTCONN);
1556
1557 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1558 hlen = L2CAP_EXT_HDR_SIZE;
1559 else
1560 hlen = L2CAP_ENH_HDR_SIZE;
1561
1562 if (sdulen)
1563 hlen += 2;
1564
1565 if (chan->fcs == L2CAP_FCS_CRC16)
1566 hlen += 2;
1567
1568 count = min_t(unsigned int, (conn->mtu - hlen), len);
1569 skb = bt_skb_send_alloc(sk, count + hlen,
1570 msg->msg_flags & MSG_DONTWAIT, &err);
1571 if (!skb)
1572 return ERR_PTR(err);
1573
1574 /* Create L2CAP header */
1575 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1576 lh->cid = cpu_to_le16(chan->dcid);
1577 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1578 put_unaligned_le16(control, skb_put(skb, 2));
1579 if (sdulen)
1580 put_unaligned_le16(sdulen, skb_put(skb, 2));
1581
1582 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1583 if (unlikely(err < 0)) {
1584 kfree_skb(skb);
1585 return ERR_PTR(err);
1586 }
1587
1588 if (chan->fcs == L2CAP_FCS_CRC16)
1589 put_unaligned_le16(0, skb_put(skb, 2));
1590
1591 bt_cb(skb)->retries = 0;
1592 return skb;
1593 }
1594
1595 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1596 {
1597 struct sk_buff *skb;
1598 struct sk_buff_head sar_queue;
1599 u16 control;
1600 size_t size = 0;
1601
1602 skb_queue_head_init(&sar_queue);
1603 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1604 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1605 if (IS_ERR(skb))
1606 return PTR_ERR(skb);
1607
1608 __skb_queue_tail(&sar_queue, skb);
1609 len -= chan->remote_mps;
1610 size += chan->remote_mps;
1611
1612 while (len > 0) {
1613 size_t buflen;
1614
1615 if (len > chan->remote_mps) {
1616 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1617 buflen = chan->remote_mps;
1618 } else {
1619 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1620 buflen = len;
1621 }
1622
1623 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1624 if (IS_ERR(skb)) {
1625 skb_queue_purge(&sar_queue);
1626 return PTR_ERR(skb);
1627 }
1628
1629 __skb_queue_tail(&sar_queue, skb);
1630 len -= buflen;
1631 size += buflen;
1632 }
1633 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1634 if (chan->tx_send_head == NULL)
1635 chan->tx_send_head = sar_queue.next;
1636
1637 return size;
1638 }
1639
1640 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1641 {
1642 struct sk_buff *skb;
1643 u16 control;
1644 int err;
1645
1646 /* Connectionless channel */
1647 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1648 skb = l2cap_create_connless_pdu(chan, msg, len);
1649 if (IS_ERR(skb))
1650 return PTR_ERR(skb);
1651
1652 l2cap_do_send(chan, skb);
1653 return len;
1654 }
1655
1656 switch (chan->mode) {
1657 case L2CAP_MODE_BASIC:
1658 /* Check outgoing MTU */
1659 if (len > chan->omtu)
1660 return -EMSGSIZE;
1661
1662 /* Create a basic PDU */
1663 skb = l2cap_create_basic_pdu(chan, msg, len);
1664 if (IS_ERR(skb))
1665 return PTR_ERR(skb);
1666
1667 l2cap_do_send(chan, skb);
1668 err = len;
1669 break;
1670
1671 case L2CAP_MODE_ERTM:
1672 case L2CAP_MODE_STREAMING:
1673 /* Entire SDU fits into one PDU */
1674 if (len <= chan->remote_mps) {
1675 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1676 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1677 0);
1678 if (IS_ERR(skb))
1679 return PTR_ERR(skb);
1680
1681 __skb_queue_tail(&chan->tx_q, skb);
1682
1683 if (chan->tx_send_head == NULL)
1684 chan->tx_send_head = skb;
1685
1686 } else {
1687 /* Segment SDU into multiples PDUs */
1688 err = l2cap_sar_segment_sdu(chan, msg, len);
1689 if (err < 0)
1690 return err;
1691 }
1692
1693 if (chan->mode == L2CAP_MODE_STREAMING) {
1694 l2cap_streaming_send(chan);
1695 err = len;
1696 break;
1697 }
1698
1699 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1700 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1701 err = len;
1702 break;
1703 }
1704
1705 err = l2cap_ertm_send(chan);
1706 if (err >= 0)
1707 err = len;
1708
1709 break;
1710
1711 default:
1712 BT_DBG("bad state %1.1x", chan->mode);
1713 err = -EBADFD;
1714 }
1715
1716 return err;
1717 }
1718
1719 /* Copy frame to all raw sockets on that connection */
1720 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1721 {
1722 struct sk_buff *nskb;
1723 struct l2cap_chan *chan;
1724
1725 BT_DBG("conn %p", conn);
1726
1727 read_lock(&conn->chan_lock);
1728 list_for_each_entry(chan, &conn->chan_l, list) {
1729 struct sock *sk = chan->sk;
1730 if (chan->chan_type != L2CAP_CHAN_RAW)
1731 continue;
1732
1733 /* Don't send frame to the socket it came from */
1734 if (skb->sk == sk)
1735 continue;
1736 nskb = skb_clone(skb, GFP_ATOMIC);
1737 if (!nskb)
1738 continue;
1739
1740 if (chan->ops->recv(chan->data, nskb))
1741 kfree_skb(nskb);
1742 }
1743 read_unlock(&conn->chan_lock);
1744 }
1745
1746 /* ---- L2CAP signalling commands ---- */
1747 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1748 u8 code, u8 ident, u16 dlen, void *data)
1749 {
1750 struct sk_buff *skb, **frag;
1751 struct l2cap_cmd_hdr *cmd;
1752 struct l2cap_hdr *lh;
1753 int len, count;
1754
1755 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1756 conn, code, ident, dlen);
1757
1758 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1759 count = min_t(unsigned int, conn->mtu, len);
1760
1761 skb = bt_skb_alloc(count, GFP_ATOMIC);
1762 if (!skb)
1763 return NULL;
1764
1765 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1766 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1767
1768 if (conn->hcon->type == LE_LINK)
1769 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1770 else
1771 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1772
1773 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1774 cmd->code = code;
1775 cmd->ident = ident;
1776 cmd->len = cpu_to_le16(dlen);
1777
1778 if (dlen) {
1779 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1780 memcpy(skb_put(skb, count), data, count);
1781 data += count;
1782 }
1783
1784 len -= skb->len;
1785
1786 /* Continuation fragments (no L2CAP header) */
1787 frag = &skb_shinfo(skb)->frag_list;
1788 while (len) {
1789 count = min_t(unsigned int, conn->mtu, len);
1790
1791 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1792 if (!*frag)
1793 goto fail;
1794
1795 memcpy(skb_put(*frag, count), data, count);
1796
1797 len -= count;
1798 data += count;
1799
1800 frag = &(*frag)->next;
1801 }
1802
1803 return skb;
1804
1805 fail:
1806 kfree_skb(skb);
1807 return NULL;
1808 }
1809
1810 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1811 {
1812 struct l2cap_conf_opt *opt = *ptr;
1813 int len;
1814
1815 len = L2CAP_CONF_OPT_SIZE + opt->len;
1816 *ptr += len;
1817
1818 *type = opt->type;
1819 *olen = opt->len;
1820
1821 switch (opt->len) {
1822 case 1:
1823 *val = *((u8 *) opt->val);
1824 break;
1825
1826 case 2:
1827 *val = get_unaligned_le16(opt->val);
1828 break;
1829
1830 case 4:
1831 *val = get_unaligned_le32(opt->val);
1832 break;
1833
1834 default:
1835 *val = (unsigned long) opt->val;
1836 break;
1837 }
1838
1839 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1840 return len;
1841 }
1842
1843 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1844 {
1845 struct l2cap_conf_opt *opt = *ptr;
1846
1847 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1848
1849 opt->type = type;
1850 opt->len = len;
1851
1852 switch (len) {
1853 case 1:
1854 *((u8 *) opt->val) = val;
1855 break;
1856
1857 case 2:
1858 put_unaligned_le16(val, opt->val);
1859 break;
1860
1861 case 4:
1862 put_unaligned_le32(val, opt->val);
1863 break;
1864
1865 default:
1866 memcpy(opt->val, (void *) val, len);
1867 break;
1868 }
1869
1870 *ptr += L2CAP_CONF_OPT_SIZE + len;
1871 }
1872
1873 static void l2cap_ack_timeout(unsigned long arg)
1874 {
1875 struct l2cap_chan *chan = (void *) arg;
1876
1877 bh_lock_sock(chan->sk);
1878 l2cap_send_ack(chan);
1879 bh_unlock_sock(chan->sk);
1880 }
1881
1882 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1883 {
1884 struct sock *sk = chan->sk;
1885
1886 chan->expected_ack_seq = 0;
1887 chan->unacked_frames = 0;
1888 chan->buffer_seq = 0;
1889 chan->num_acked = 0;
1890 chan->frames_sent = 0;
1891
1892 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1893 (unsigned long) chan);
1894 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1895 (unsigned long) chan);
1896 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1897
1898 skb_queue_head_init(&chan->srej_q);
1899
1900 INIT_LIST_HEAD(&chan->srej_l);
1901
1902
1903 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1904 }
1905
1906 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1907 {
1908 switch (mode) {
1909 case L2CAP_MODE_STREAMING:
1910 case L2CAP_MODE_ERTM:
1911 if (l2cap_mode_supported(mode, remote_feat_mask))
1912 return mode;
1913 /* fall through */
1914 default:
1915 return L2CAP_MODE_BASIC;
1916 }
1917 }
1918
1919 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1920 {
1921 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1922 }
1923
1924 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1925 {
1926 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1927 __l2cap_ews_supported(chan))
1928 /* use extended control field */
1929 set_bit(FLAG_EXT_CTRL, &chan->flags);
1930 else
1931 chan->tx_win = min_t(u16, chan->tx_win,
1932 L2CAP_DEFAULT_TX_WINDOW);
1933 }
1934
1935 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1936 {
1937 struct l2cap_conf_req *req = data;
1938 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1939 void *ptr = req->data;
1940
1941 BT_DBG("chan %p", chan);
1942
1943 if (chan->num_conf_req || chan->num_conf_rsp)
1944 goto done;
1945
1946 switch (chan->mode) {
1947 case L2CAP_MODE_STREAMING:
1948 case L2CAP_MODE_ERTM:
1949 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1950 break;
1951
1952 /* fall through */
1953 default:
1954 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1955 break;
1956 }
1957
1958 done:
1959 if (chan->imtu != L2CAP_DEFAULT_MTU)
1960 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1961
1962 switch (chan->mode) {
1963 case L2CAP_MODE_BASIC:
1964 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1965 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1966 break;
1967
1968 rfc.mode = L2CAP_MODE_BASIC;
1969 rfc.txwin_size = 0;
1970 rfc.max_transmit = 0;
1971 rfc.retrans_timeout = 0;
1972 rfc.monitor_timeout = 0;
1973 rfc.max_pdu_size = 0;
1974
1975 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1976 (unsigned long) &rfc);
1977 break;
1978
1979 case L2CAP_MODE_ERTM:
1980 rfc.mode = L2CAP_MODE_ERTM;
1981 rfc.max_transmit = chan->max_tx;
1982 rfc.retrans_timeout = 0;
1983 rfc.monitor_timeout = 0;
1984 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1985 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1986 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1987
1988 l2cap_txwin_setup(chan);
1989
1990 rfc.txwin_size = min_t(u16, chan->tx_win,
1991 L2CAP_DEFAULT_TX_WINDOW);
1992
1993 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1994 (unsigned long) &rfc);
1995
1996 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1997 break;
1998
1999 if (chan->fcs == L2CAP_FCS_NONE ||
2000 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2001 chan->fcs = L2CAP_FCS_NONE;
2002 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2003 }
2004
2005 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2006 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2007 chan->tx_win);
2008 break;
2009
2010 case L2CAP_MODE_STREAMING:
2011 rfc.mode = L2CAP_MODE_STREAMING;
2012 rfc.txwin_size = 0;
2013 rfc.max_transmit = 0;
2014 rfc.retrans_timeout = 0;
2015 rfc.monitor_timeout = 0;
2016 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2017 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
2018 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2019
2020 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2021 (unsigned long) &rfc);
2022
2023 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2024 break;
2025
2026 if (chan->fcs == L2CAP_FCS_NONE ||
2027 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2028 chan->fcs = L2CAP_FCS_NONE;
2029 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2030 }
2031 break;
2032 }
2033
2034 req->dcid = cpu_to_le16(chan->dcid);
2035 req->flags = cpu_to_le16(0);
2036
2037 return ptr - data;
2038 }
2039
2040 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2041 {
2042 struct l2cap_conf_rsp *rsp = data;
2043 void *ptr = rsp->data;
2044 void *req = chan->conf_req;
2045 int len = chan->conf_len;
2046 int type, hint, olen;
2047 unsigned long val;
2048 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2049 u16 mtu = L2CAP_DEFAULT_MTU;
2050 u16 result = L2CAP_CONF_SUCCESS;
2051
2052 BT_DBG("chan %p", chan);
2053
2054 while (len >= L2CAP_CONF_OPT_SIZE) {
2055 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2056
2057 hint = type & L2CAP_CONF_HINT;
2058 type &= L2CAP_CONF_MASK;
2059
2060 switch (type) {
2061 case L2CAP_CONF_MTU:
2062 mtu = val;
2063 break;
2064
2065 case L2CAP_CONF_FLUSH_TO:
2066 chan->flush_to = val;
2067 break;
2068
2069 case L2CAP_CONF_QOS:
2070 break;
2071
2072 case L2CAP_CONF_RFC:
2073 if (olen == sizeof(rfc))
2074 memcpy(&rfc, (void *) val, olen);
2075 break;
2076
2077 case L2CAP_CONF_FCS:
2078 if (val == L2CAP_FCS_NONE)
2079 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2080
2081 break;
2082
2083 case L2CAP_CONF_EWS:
2084 if (!enable_hs)
2085 return -ECONNREFUSED;
2086
2087 set_bit(FLAG_EXT_CTRL, &chan->flags);
2088 set_bit(CONF_EWS_RECV, &chan->conf_state);
2089 chan->remote_tx_win = val;
2090 break;
2091
2092 default:
2093 if (hint)
2094 break;
2095
2096 result = L2CAP_CONF_UNKNOWN;
2097 *((u8 *) ptr++) = type;
2098 break;
2099 }
2100 }
2101
2102 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2103 goto done;
2104
2105 switch (chan->mode) {
2106 case L2CAP_MODE_STREAMING:
2107 case L2CAP_MODE_ERTM:
2108 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2109 chan->mode = l2cap_select_mode(rfc.mode,
2110 chan->conn->feat_mask);
2111 break;
2112 }
2113
2114 if (chan->mode != rfc.mode)
2115 return -ECONNREFUSED;
2116
2117 break;
2118 }
2119
2120 done:
2121 if (chan->mode != rfc.mode) {
2122 result = L2CAP_CONF_UNACCEPT;
2123 rfc.mode = chan->mode;
2124
2125 if (chan->num_conf_rsp == 1)
2126 return -ECONNREFUSED;
2127
2128 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2129 sizeof(rfc), (unsigned long) &rfc);
2130 }
2131
2132
2133 if (result == L2CAP_CONF_SUCCESS) {
2134 /* Configure output options and let the other side know
2135 * which ones we don't like. */
2136
2137 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2138 result = L2CAP_CONF_UNACCEPT;
2139 else {
2140 chan->omtu = mtu;
2141 set_bit(CONF_MTU_DONE, &chan->conf_state);
2142 }
2143 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2144
2145 switch (rfc.mode) {
2146 case L2CAP_MODE_BASIC:
2147 chan->fcs = L2CAP_FCS_NONE;
2148 set_bit(CONF_MODE_DONE, &chan->conf_state);
2149 break;
2150
2151 case L2CAP_MODE_ERTM:
2152 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2153 chan->remote_tx_win = rfc.txwin_size;
2154 else
2155 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2156
2157 chan->remote_max_tx = rfc.max_transmit;
2158
2159 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2160 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2161
2162 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2163
2164 rfc.retrans_timeout =
2165 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2166 rfc.monitor_timeout =
2167 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2168
2169 set_bit(CONF_MODE_DONE, &chan->conf_state);
2170
2171 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2172 sizeof(rfc), (unsigned long) &rfc);
2173
2174 break;
2175
2176 case L2CAP_MODE_STREAMING:
2177 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2178 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2179
2180 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2181
2182 set_bit(CONF_MODE_DONE, &chan->conf_state);
2183
2184 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2185 sizeof(rfc), (unsigned long) &rfc);
2186
2187 break;
2188
2189 default:
2190 result = L2CAP_CONF_UNACCEPT;
2191
2192 memset(&rfc, 0, sizeof(rfc));
2193 rfc.mode = chan->mode;
2194 }
2195
2196 if (result == L2CAP_CONF_SUCCESS)
2197 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2198 }
2199 rsp->scid = cpu_to_le16(chan->dcid);
2200 rsp->result = cpu_to_le16(result);
2201 rsp->flags = cpu_to_le16(0x0000);
2202
2203 return ptr - data;
2204 }
2205
2206 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2207 {
2208 struct l2cap_conf_req *req = data;
2209 void *ptr = req->data;
2210 int type, olen;
2211 unsigned long val;
2212 struct l2cap_conf_rfc rfc;
2213
2214 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2215
2216 while (len >= L2CAP_CONF_OPT_SIZE) {
2217 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2218
2219 switch (type) {
2220 case L2CAP_CONF_MTU:
2221 if (val < L2CAP_DEFAULT_MIN_MTU) {
2222 *result = L2CAP_CONF_UNACCEPT;
2223 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2224 } else
2225 chan->imtu = val;
2226 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2227 break;
2228
2229 case L2CAP_CONF_FLUSH_TO:
2230 chan->flush_to = val;
2231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2232 2, chan->flush_to);
2233 break;
2234
2235 case L2CAP_CONF_RFC:
2236 if (olen == sizeof(rfc))
2237 memcpy(&rfc, (void *)val, olen);
2238
2239 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2240 rfc.mode != chan->mode)
2241 return -ECONNREFUSED;
2242
2243 chan->fcs = 0;
2244
2245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2246 sizeof(rfc), (unsigned long) &rfc);
2247 break;
2248
2249 case L2CAP_CONF_EWS:
2250 chan->tx_win = min_t(u16, val,
2251 L2CAP_DEFAULT_EXT_WINDOW);
2252 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS,
2253 2, chan->tx_win);
2254 break;
2255 }
2256 }
2257
2258 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2259 return -ECONNREFUSED;
2260
2261 chan->mode = rfc.mode;
2262
2263 if (*result == L2CAP_CONF_SUCCESS) {
2264 switch (rfc.mode) {
2265 case L2CAP_MODE_ERTM:
2266 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2267 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2268 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2269 break;
2270 case L2CAP_MODE_STREAMING:
2271 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2272 }
2273 }
2274
2275 req->dcid = cpu_to_le16(chan->dcid);
2276 req->flags = cpu_to_le16(0x0000);
2277
2278 return ptr - data;
2279 }
2280
2281 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2282 {
2283 struct l2cap_conf_rsp *rsp = data;
2284 void *ptr = rsp->data;
2285
2286 BT_DBG("chan %p", chan);
2287
2288 rsp->scid = cpu_to_le16(chan->dcid);
2289 rsp->result = cpu_to_le16(result);
2290 rsp->flags = cpu_to_le16(flags);
2291
2292 return ptr - data;
2293 }
2294
2295 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2296 {
2297 struct l2cap_conn_rsp rsp;
2298 struct l2cap_conn *conn = chan->conn;
2299 u8 buf[128];
2300
2301 rsp.scid = cpu_to_le16(chan->dcid);
2302 rsp.dcid = cpu_to_le16(chan->scid);
2303 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2304 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2305 l2cap_send_cmd(conn, chan->ident,
2306 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2307
2308 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2309 return;
2310
2311 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2312 l2cap_build_conf_req(chan, buf), buf);
2313 chan->num_conf_req++;
2314 }
2315
2316 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2317 {
2318 int type, olen;
2319 unsigned long val;
2320 struct l2cap_conf_rfc rfc;
2321
2322 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2323
2324 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2325 return;
2326
2327 while (len >= L2CAP_CONF_OPT_SIZE) {
2328 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2329
2330 switch (type) {
2331 case L2CAP_CONF_RFC:
2332 if (olen == sizeof(rfc))
2333 memcpy(&rfc, (void *)val, olen);
2334 goto done;
2335 }
2336 }
2337
2338 done:
2339 switch (rfc.mode) {
2340 case L2CAP_MODE_ERTM:
2341 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2342 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2343 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2344 break;
2345 case L2CAP_MODE_STREAMING:
2346 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2347 }
2348 }
2349
2350 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2351 {
2352 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2353
2354 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2355 return 0;
2356
2357 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2358 cmd->ident == conn->info_ident) {
2359 del_timer(&conn->info_timer);
2360
2361 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2362 conn->info_ident = 0;
2363
2364 l2cap_conn_start(conn);
2365 }
2366
2367 return 0;
2368 }
2369
2370 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2371 {
2372 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2373 struct l2cap_conn_rsp rsp;
2374 struct l2cap_chan *chan = NULL, *pchan;
2375 struct sock *parent, *sk = NULL;
2376 int result, status = L2CAP_CS_NO_INFO;
2377
2378 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2379 __le16 psm = req->psm;
2380
2381 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2382
2383 /* Check if we have socket listening on psm */
2384 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2385 if (!pchan) {
2386 result = L2CAP_CR_BAD_PSM;
2387 goto sendresp;
2388 }
2389
2390 parent = pchan->sk;
2391
2392 bh_lock_sock(parent);
2393
2394 /* Check if the ACL is secure enough (if not SDP) */
2395 if (psm != cpu_to_le16(0x0001) &&
2396 !hci_conn_check_link_mode(conn->hcon)) {
2397 conn->disc_reason = 0x05;
2398 result = L2CAP_CR_SEC_BLOCK;
2399 goto response;
2400 }
2401
2402 result = L2CAP_CR_NO_MEM;
2403
2404 /* Check for backlog size */
2405 if (sk_acceptq_is_full(parent)) {
2406 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2407 goto response;
2408 }
2409
2410 chan = pchan->ops->new_connection(pchan->data);
2411 if (!chan)
2412 goto response;
2413
2414 sk = chan->sk;
2415
2416 write_lock_bh(&conn->chan_lock);
2417
2418 /* Check if we already have channel with that dcid */
2419 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2420 write_unlock_bh(&conn->chan_lock);
2421 sock_set_flag(sk, SOCK_ZAPPED);
2422 chan->ops->close(chan->data);
2423 goto response;
2424 }
2425
2426 hci_conn_hold(conn->hcon);
2427
2428 bacpy(&bt_sk(sk)->src, conn->src);
2429 bacpy(&bt_sk(sk)->dst, conn->dst);
2430 chan->psm = psm;
2431 chan->dcid = scid;
2432
2433 bt_accept_enqueue(parent, sk);
2434
2435 __l2cap_chan_add(conn, chan);
2436
2437 dcid = chan->scid;
2438
2439 __set_chan_timer(chan, sk->sk_sndtimeo);
2440
2441 chan->ident = cmd->ident;
2442
2443 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2444 if (l2cap_check_security(chan)) {
2445 if (bt_sk(sk)->defer_setup) {
2446 l2cap_state_change(chan, BT_CONNECT2);
2447 result = L2CAP_CR_PEND;
2448 status = L2CAP_CS_AUTHOR_PEND;
2449 parent->sk_data_ready(parent, 0);
2450 } else {
2451 l2cap_state_change(chan, BT_CONFIG);
2452 result = L2CAP_CR_SUCCESS;
2453 status = L2CAP_CS_NO_INFO;
2454 }
2455 } else {
2456 l2cap_state_change(chan, BT_CONNECT2);
2457 result = L2CAP_CR_PEND;
2458 status = L2CAP_CS_AUTHEN_PEND;
2459 }
2460 } else {
2461 l2cap_state_change(chan, BT_CONNECT2);
2462 result = L2CAP_CR_PEND;
2463 status = L2CAP_CS_NO_INFO;
2464 }
2465
2466 write_unlock_bh(&conn->chan_lock);
2467
2468 response:
2469 bh_unlock_sock(parent);
2470
2471 sendresp:
2472 rsp.scid = cpu_to_le16(scid);
2473 rsp.dcid = cpu_to_le16(dcid);
2474 rsp.result = cpu_to_le16(result);
2475 rsp.status = cpu_to_le16(status);
2476 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2477
2478 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2479 struct l2cap_info_req info;
2480 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2481
2482 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2483 conn->info_ident = l2cap_get_ident(conn);
2484
2485 mod_timer(&conn->info_timer, jiffies +
2486 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2487
2488 l2cap_send_cmd(conn, conn->info_ident,
2489 L2CAP_INFO_REQ, sizeof(info), &info);
2490 }
2491
2492 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2493 result == L2CAP_CR_SUCCESS) {
2494 u8 buf[128];
2495 set_bit(CONF_REQ_SENT, &chan->conf_state);
2496 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2497 l2cap_build_conf_req(chan, buf), buf);
2498 chan->num_conf_req++;
2499 }
2500
2501 return 0;
2502 }
2503
2504 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2505 {
2506 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2507 u16 scid, dcid, result, status;
2508 struct l2cap_chan *chan;
2509 struct sock *sk;
2510 u8 req[128];
2511
2512 scid = __le16_to_cpu(rsp->scid);
2513 dcid = __le16_to_cpu(rsp->dcid);
2514 result = __le16_to_cpu(rsp->result);
2515 status = __le16_to_cpu(rsp->status);
2516
2517 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2518
2519 if (scid) {
2520 chan = l2cap_get_chan_by_scid(conn, scid);
2521 if (!chan)
2522 return -EFAULT;
2523 } else {
2524 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2525 if (!chan)
2526 return -EFAULT;
2527 }
2528
2529 sk = chan->sk;
2530
2531 switch (result) {
2532 case L2CAP_CR_SUCCESS:
2533 l2cap_state_change(chan, BT_CONFIG);
2534 chan->ident = 0;
2535 chan->dcid = dcid;
2536 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2537
2538 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2539 break;
2540
2541 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2542 l2cap_build_conf_req(chan, req), req);
2543 chan->num_conf_req++;
2544 break;
2545
2546 case L2CAP_CR_PEND:
2547 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2548 break;
2549
2550 default:
2551 /* don't delete l2cap channel if sk is owned by user */
2552 if (sock_owned_by_user(sk)) {
2553 l2cap_state_change(chan, BT_DISCONN);
2554 __clear_chan_timer(chan);
2555 __set_chan_timer(chan, HZ / 5);
2556 break;
2557 }
2558
2559 l2cap_chan_del(chan, ECONNREFUSED);
2560 break;
2561 }
2562
2563 bh_unlock_sock(sk);
2564 return 0;
2565 }
2566
2567 static inline void set_default_fcs(struct l2cap_chan *chan)
2568 {
2569 /* FCS is enabled only in ERTM or streaming mode, if one or both
2570 * sides request it.
2571 */
2572 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2573 chan->fcs = L2CAP_FCS_NONE;
2574 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2575 chan->fcs = L2CAP_FCS_CRC16;
2576 }
2577
2578 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2579 {
2580 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2581 u16 dcid, flags;
2582 u8 rsp[64];
2583 struct l2cap_chan *chan;
2584 struct sock *sk;
2585 int len;
2586
2587 dcid = __le16_to_cpu(req->dcid);
2588 flags = __le16_to_cpu(req->flags);
2589
2590 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2591
2592 chan = l2cap_get_chan_by_scid(conn, dcid);
2593 if (!chan)
2594 return -ENOENT;
2595
2596 sk = chan->sk;
2597
2598 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2599 struct l2cap_cmd_rej_cid rej;
2600
2601 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2602 rej.scid = cpu_to_le16(chan->scid);
2603 rej.dcid = cpu_to_le16(chan->dcid);
2604
2605 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2606 sizeof(rej), &rej);
2607 goto unlock;
2608 }
2609
2610 /* Reject if config buffer is too small. */
2611 len = cmd_len - sizeof(*req);
2612 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2613 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2614 l2cap_build_conf_rsp(chan, rsp,
2615 L2CAP_CONF_REJECT, flags), rsp);
2616 goto unlock;
2617 }
2618
2619 /* Store config. */
2620 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2621 chan->conf_len += len;
2622
2623 if (flags & 0x0001) {
2624 /* Incomplete config. Send empty response. */
2625 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2626 l2cap_build_conf_rsp(chan, rsp,
2627 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2628 goto unlock;
2629 }
2630
2631 /* Complete config. */
2632 len = l2cap_parse_conf_req(chan, rsp);
2633 if (len < 0) {
2634 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2635 goto unlock;
2636 }
2637
2638 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2639 chan->num_conf_rsp++;
2640
2641 /* Reset config buffer. */
2642 chan->conf_len = 0;
2643
2644 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2645 goto unlock;
2646
2647 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2648 set_default_fcs(chan);
2649
2650 l2cap_state_change(chan, BT_CONNECTED);
2651
2652 chan->next_tx_seq = 0;
2653 chan->expected_tx_seq = 0;
2654 skb_queue_head_init(&chan->tx_q);
2655 if (chan->mode == L2CAP_MODE_ERTM)
2656 l2cap_ertm_init(chan);
2657
2658 l2cap_chan_ready(sk);
2659 goto unlock;
2660 }
2661
2662 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2663 u8 buf[64];
2664 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2665 l2cap_build_conf_req(chan, buf), buf);
2666 chan->num_conf_req++;
2667 }
2668
2669 unlock:
2670 bh_unlock_sock(sk);
2671 return 0;
2672 }
2673
2674 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2675 {
2676 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2677 u16 scid, flags, result;
2678 struct l2cap_chan *chan;
2679 struct sock *sk;
2680 int len = cmd->len - sizeof(*rsp);
2681
2682 scid = __le16_to_cpu(rsp->scid);
2683 flags = __le16_to_cpu(rsp->flags);
2684 result = __le16_to_cpu(rsp->result);
2685
2686 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2687 scid, flags, result);
2688
2689 chan = l2cap_get_chan_by_scid(conn, scid);
2690 if (!chan)
2691 return 0;
2692
2693 sk = chan->sk;
2694
2695 switch (result) {
2696 case L2CAP_CONF_SUCCESS:
2697 l2cap_conf_rfc_get(chan, rsp->data, len);
2698 break;
2699
2700 case L2CAP_CONF_UNACCEPT:
2701 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2702 char req[64];
2703
2704 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2705 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2706 goto done;
2707 }
2708
2709 /* throw out any old stored conf requests */
2710 result = L2CAP_CONF_SUCCESS;
2711 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2712 req, &result);
2713 if (len < 0) {
2714 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2715 goto done;
2716 }
2717
2718 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2719 L2CAP_CONF_REQ, len, req);
2720 chan->num_conf_req++;
2721 if (result != L2CAP_CONF_SUCCESS)
2722 goto done;
2723 break;
2724 }
2725
2726 default:
2727 sk->sk_err = ECONNRESET;
2728 __set_chan_timer(chan, HZ * 5);
2729 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2730 goto done;
2731 }
2732
2733 if (flags & 0x01)
2734 goto done;
2735
2736 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2737
2738 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2739 set_default_fcs(chan);
2740
2741 l2cap_state_change(chan, BT_CONNECTED);
2742 chan->next_tx_seq = 0;
2743 chan->expected_tx_seq = 0;
2744 skb_queue_head_init(&chan->tx_q);
2745 if (chan->mode == L2CAP_MODE_ERTM)
2746 l2cap_ertm_init(chan);
2747
2748 l2cap_chan_ready(sk);
2749 }
2750
2751 done:
2752 bh_unlock_sock(sk);
2753 return 0;
2754 }
2755
2756 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2757 {
2758 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2759 struct l2cap_disconn_rsp rsp;
2760 u16 dcid, scid;
2761 struct l2cap_chan *chan;
2762 struct sock *sk;
2763
2764 scid = __le16_to_cpu(req->scid);
2765 dcid = __le16_to_cpu(req->dcid);
2766
2767 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2768
2769 chan = l2cap_get_chan_by_scid(conn, dcid);
2770 if (!chan)
2771 return 0;
2772
2773 sk = chan->sk;
2774
2775 rsp.dcid = cpu_to_le16(chan->scid);
2776 rsp.scid = cpu_to_le16(chan->dcid);
2777 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2778
2779 sk->sk_shutdown = SHUTDOWN_MASK;
2780
2781 /* don't delete l2cap channel if sk is owned by user */
2782 if (sock_owned_by_user(sk)) {
2783 l2cap_state_change(chan, BT_DISCONN);
2784 __clear_chan_timer(chan);
2785 __set_chan_timer(chan, HZ / 5);
2786 bh_unlock_sock(sk);
2787 return 0;
2788 }
2789
2790 l2cap_chan_del(chan, ECONNRESET);
2791 bh_unlock_sock(sk);
2792
2793 chan->ops->close(chan->data);
2794 return 0;
2795 }
2796
2797 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2798 {
2799 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2800 u16 dcid, scid;
2801 struct l2cap_chan *chan;
2802 struct sock *sk;
2803
2804 scid = __le16_to_cpu(rsp->scid);
2805 dcid = __le16_to_cpu(rsp->dcid);
2806
2807 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2808
2809 chan = l2cap_get_chan_by_scid(conn, scid);
2810 if (!chan)
2811 return 0;
2812
2813 sk = chan->sk;
2814
2815 /* don't delete l2cap channel if sk is owned by user */
2816 if (sock_owned_by_user(sk)) {
2817 l2cap_state_change(chan,BT_DISCONN);
2818 __clear_chan_timer(chan);
2819 __set_chan_timer(chan, HZ / 5);
2820 bh_unlock_sock(sk);
2821 return 0;
2822 }
2823
2824 l2cap_chan_del(chan, 0);
2825 bh_unlock_sock(sk);
2826
2827 chan->ops->close(chan->data);
2828 return 0;
2829 }
2830
2831 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2832 {
2833 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2834 u16 type;
2835
2836 type = __le16_to_cpu(req->type);
2837
2838 BT_DBG("type 0x%4.4x", type);
2839
2840 if (type == L2CAP_IT_FEAT_MASK) {
2841 u8 buf[8];
2842 u32 feat_mask = l2cap_feat_mask;
2843 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2844 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2845 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2846 if (!disable_ertm)
2847 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2848 | L2CAP_FEAT_FCS;
2849 if (enable_hs)
2850 feat_mask |= L2CAP_FEAT_EXT_FLOW
2851 | L2CAP_FEAT_EXT_WINDOW;
2852
2853 put_unaligned_le32(feat_mask, rsp->data);
2854 l2cap_send_cmd(conn, cmd->ident,
2855 L2CAP_INFO_RSP, sizeof(buf), buf);
2856 } else if (type == L2CAP_IT_FIXED_CHAN) {
2857 u8 buf[12];
2858 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2859 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2860 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2861 memcpy(buf + 4, l2cap_fixed_chan, 8);
2862 l2cap_send_cmd(conn, cmd->ident,
2863 L2CAP_INFO_RSP, sizeof(buf), buf);
2864 } else {
2865 struct l2cap_info_rsp rsp;
2866 rsp.type = cpu_to_le16(type);
2867 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2868 l2cap_send_cmd(conn, cmd->ident,
2869 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2870 }
2871
2872 return 0;
2873 }
2874
2875 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2876 {
2877 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2878 u16 type, result;
2879
2880 type = __le16_to_cpu(rsp->type);
2881 result = __le16_to_cpu(rsp->result);
2882
2883 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2884
2885 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2886 if (cmd->ident != conn->info_ident ||
2887 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2888 return 0;
2889
2890 del_timer(&conn->info_timer);
2891
2892 if (result != L2CAP_IR_SUCCESS) {
2893 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2894 conn->info_ident = 0;
2895
2896 l2cap_conn_start(conn);
2897
2898 return 0;
2899 }
2900
2901 if (type == L2CAP_IT_FEAT_MASK) {
2902 conn->feat_mask = get_unaligned_le32(rsp->data);
2903
2904 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2905 struct l2cap_info_req req;
2906 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2907
2908 conn->info_ident = l2cap_get_ident(conn);
2909
2910 l2cap_send_cmd(conn, conn->info_ident,
2911 L2CAP_INFO_REQ, sizeof(req), &req);
2912 } else {
2913 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2914 conn->info_ident = 0;
2915
2916 l2cap_conn_start(conn);
2917 }
2918 } else if (type == L2CAP_IT_FIXED_CHAN) {
2919 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2920 conn->info_ident = 0;
2921
2922 l2cap_conn_start(conn);
2923 }
2924
2925 return 0;
2926 }
2927
2928 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2929 u16 to_multiplier)
2930 {
2931 u16 max_latency;
2932
2933 if (min > max || min < 6 || max > 3200)
2934 return -EINVAL;
2935
2936 if (to_multiplier < 10 || to_multiplier > 3200)
2937 return -EINVAL;
2938
2939 if (max >= to_multiplier * 8)
2940 return -EINVAL;
2941
2942 max_latency = (to_multiplier * 8 / max) - 1;
2943 if (latency > 499 || latency > max_latency)
2944 return -EINVAL;
2945
2946 return 0;
2947 }
2948
2949 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2950 struct l2cap_cmd_hdr *cmd, u8 *data)
2951 {
2952 struct hci_conn *hcon = conn->hcon;
2953 struct l2cap_conn_param_update_req *req;
2954 struct l2cap_conn_param_update_rsp rsp;
2955 u16 min, max, latency, to_multiplier, cmd_len;
2956 int err;
2957
2958 if (!(hcon->link_mode & HCI_LM_MASTER))
2959 return -EINVAL;
2960
2961 cmd_len = __le16_to_cpu(cmd->len);
2962 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2963 return -EPROTO;
2964
2965 req = (struct l2cap_conn_param_update_req *) data;
2966 min = __le16_to_cpu(req->min);
2967 max = __le16_to_cpu(req->max);
2968 latency = __le16_to_cpu(req->latency);
2969 to_multiplier = __le16_to_cpu(req->to_multiplier);
2970
2971 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2972 min, max, latency, to_multiplier);
2973
2974 memset(&rsp, 0, sizeof(rsp));
2975
2976 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2977 if (err)
2978 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2979 else
2980 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2981
2982 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2983 sizeof(rsp), &rsp);
2984
2985 if (!err)
2986 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2987
2988 return 0;
2989 }
2990
2991 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2992 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2993 {
2994 int err = 0;
2995
2996 switch (cmd->code) {
2997 case L2CAP_COMMAND_REJ:
2998 l2cap_command_rej(conn, cmd, data);
2999 break;
3000
3001 case L2CAP_CONN_REQ:
3002 err = l2cap_connect_req(conn, cmd, data);
3003 break;
3004
3005 case L2CAP_CONN_RSP:
3006 err = l2cap_connect_rsp(conn, cmd, data);
3007 break;
3008
3009 case L2CAP_CONF_REQ:
3010 err = l2cap_config_req(conn, cmd, cmd_len, data);
3011 break;
3012
3013 case L2CAP_CONF_RSP:
3014 err = l2cap_config_rsp(conn, cmd, data);
3015 break;
3016
3017 case L2CAP_DISCONN_REQ:
3018 err = l2cap_disconnect_req(conn, cmd, data);
3019 break;
3020
3021 case L2CAP_DISCONN_RSP:
3022 err = l2cap_disconnect_rsp(conn, cmd, data);
3023 break;
3024
3025 case L2CAP_ECHO_REQ:
3026 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3027 break;
3028
3029 case L2CAP_ECHO_RSP:
3030 break;
3031
3032 case L2CAP_INFO_REQ:
3033 err = l2cap_information_req(conn, cmd, data);
3034 break;
3035
3036 case L2CAP_INFO_RSP:
3037 err = l2cap_information_rsp(conn, cmd, data);
3038 break;
3039
3040 default:
3041 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3042 err = -EINVAL;
3043 break;
3044 }
3045
3046 return err;
3047 }
3048
3049 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3050 struct l2cap_cmd_hdr *cmd, u8 *data)
3051 {
3052 switch (cmd->code) {
3053 case L2CAP_COMMAND_REJ:
3054 return 0;
3055
3056 case L2CAP_CONN_PARAM_UPDATE_REQ:
3057 return l2cap_conn_param_update_req(conn, cmd, data);
3058
3059 case L2CAP_CONN_PARAM_UPDATE_RSP:
3060 return 0;
3061
3062 default:
3063 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3064 return -EINVAL;
3065 }
3066 }
3067
3068 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3069 struct sk_buff *skb)
3070 {
3071 u8 *data = skb->data;
3072 int len = skb->len;
3073 struct l2cap_cmd_hdr cmd;
3074 int err;
3075
3076 l2cap_raw_recv(conn, skb);
3077
3078 while (len >= L2CAP_CMD_HDR_SIZE) {
3079 u16 cmd_len;
3080 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3081 data += L2CAP_CMD_HDR_SIZE;
3082 len -= L2CAP_CMD_HDR_SIZE;
3083
3084 cmd_len = le16_to_cpu(cmd.len);
3085
3086 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3087
3088 if (cmd_len > len || !cmd.ident) {
3089 BT_DBG("corrupted command");
3090 break;
3091 }
3092
3093 if (conn->hcon->type == LE_LINK)
3094 err = l2cap_le_sig_cmd(conn, &cmd, data);
3095 else
3096 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3097
3098 if (err) {
3099 struct l2cap_cmd_rej_unk rej;
3100
3101 BT_ERR("Wrong link type (%d)", err);
3102
3103 /* FIXME: Map err to a valid reason */
3104 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3105 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3106 }
3107
3108 data += cmd_len;
3109 len -= cmd_len;
3110 }
3111
3112 kfree_skb(skb);
3113 }
3114
3115 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3116 {
3117 u16 our_fcs, rcv_fcs;
3118 int hdr_size;
3119
3120 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3121 hdr_size = L2CAP_EXT_HDR_SIZE;
3122 else
3123 hdr_size = L2CAP_ENH_HDR_SIZE;
3124
3125 if (chan->fcs == L2CAP_FCS_CRC16) {
3126 skb_trim(skb, skb->len - 2);
3127 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3128 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3129
3130 if (our_fcs != rcv_fcs)
3131 return -EBADMSG;
3132 }
3133 return 0;
3134 }
3135
3136 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3137 {
3138 u16 control = 0;
3139
3140 chan->frames_sent = 0;
3141
3142 control |= __set_reqseq(chan, chan->buffer_seq);
3143
3144 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3145 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3146 l2cap_send_sframe(chan, control);
3147 set_bit(CONN_RNR_SENT, &chan->conn_state);
3148 }
3149
3150 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3151 l2cap_retransmit_frames(chan);
3152
3153 l2cap_ertm_send(chan);
3154
3155 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3156 chan->frames_sent == 0) {
3157 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3158 l2cap_send_sframe(chan, control);
3159 }
3160 }
3161
3162 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3163 {
3164 struct sk_buff *next_skb;
3165 int tx_seq_offset, next_tx_seq_offset;
3166
3167 bt_cb(skb)->tx_seq = tx_seq;
3168 bt_cb(skb)->sar = sar;
3169
3170 next_skb = skb_peek(&chan->srej_q);
3171 if (!next_skb) {
3172 __skb_queue_tail(&chan->srej_q, skb);
3173 return 0;
3174 }
3175
3176 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3177 if (tx_seq_offset < 0)
3178 tx_seq_offset += 64;
3179
3180 do {
3181 if (bt_cb(next_skb)->tx_seq == tx_seq)
3182 return -EINVAL;
3183
3184 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3185 chan->buffer_seq) % 64;
3186 if (next_tx_seq_offset < 0)
3187 next_tx_seq_offset += 64;
3188
3189 if (next_tx_seq_offset > tx_seq_offset) {
3190 __skb_queue_before(&chan->srej_q, next_skb, skb);
3191 return 0;
3192 }
3193
3194 if (skb_queue_is_last(&chan->srej_q, next_skb))
3195 break;
3196
3197 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3198
3199 __skb_queue_tail(&chan->srej_q, skb);
3200
3201 return 0;
3202 }
3203
3204 static void append_skb_frag(struct sk_buff *skb,
3205 struct sk_buff *new_frag, struct sk_buff **last_frag)
3206 {
3207 /* skb->len reflects data in skb as well as all fragments
3208 * skb->data_len reflects only data in fragments
3209 */
3210 if (!skb_has_frag_list(skb))
3211 skb_shinfo(skb)->frag_list = new_frag;
3212
3213 new_frag->next = NULL;
3214
3215 (*last_frag)->next = new_frag;
3216 *last_frag = new_frag;
3217
3218 skb->len += new_frag->len;
3219 skb->data_len += new_frag->len;
3220 skb->truesize += new_frag->truesize;
3221 }
3222
3223 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3224 {
3225 int err = -EINVAL;
3226
3227 switch (__get_ctrl_sar(chan, control)) {
3228 case L2CAP_SAR_UNSEGMENTED:
3229 if (chan->sdu)
3230 break;
3231
3232 err = chan->ops->recv(chan->data, skb);
3233 break;
3234
3235 case L2CAP_SAR_START:
3236 if (chan->sdu)
3237 break;
3238
3239 chan->sdu_len = get_unaligned_le16(skb->data);
3240 skb_pull(skb, 2);
3241
3242 if (chan->sdu_len > chan->imtu) {
3243 err = -EMSGSIZE;
3244 break;
3245 }
3246
3247 if (skb->len >= chan->sdu_len)
3248 break;
3249
3250 chan->sdu = skb;
3251 chan->sdu_last_frag = skb;
3252
3253 skb = NULL;
3254 err = 0;
3255 break;
3256
3257 case L2CAP_SAR_CONTINUE:
3258 if (!chan->sdu)
3259 break;
3260
3261 append_skb_frag(chan->sdu, skb,
3262 &chan->sdu_last_frag);
3263 skb = NULL;
3264
3265 if (chan->sdu->len >= chan->sdu_len)
3266 break;
3267
3268 err = 0;
3269 break;
3270
3271 case L2CAP_SAR_END:
3272 if (!chan->sdu)
3273 break;
3274
3275 append_skb_frag(chan->sdu, skb,
3276 &chan->sdu_last_frag);
3277 skb = NULL;
3278
3279 if (chan->sdu->len != chan->sdu_len)
3280 break;
3281
3282 err = chan->ops->recv(chan->data, chan->sdu);
3283
3284 if (!err) {
3285 /* Reassembly complete */
3286 chan->sdu = NULL;
3287 chan->sdu_last_frag = NULL;
3288 chan->sdu_len = 0;
3289 }
3290 break;
3291 }
3292
3293 if (err) {
3294 kfree_skb(skb);
3295 kfree_skb(chan->sdu);
3296 chan->sdu = NULL;
3297 chan->sdu_last_frag = NULL;
3298 chan->sdu_len = 0;
3299 }
3300
3301 return err;
3302 }
3303
3304 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3305 {
3306 u16 control;
3307
3308 BT_DBG("chan %p, Enter local busy", chan);
3309
3310 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3311
3312 control = __set_reqseq(chan, chan->buffer_seq);
3313 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3314 l2cap_send_sframe(chan, control);
3315
3316 set_bit(CONN_RNR_SENT, &chan->conn_state);
3317
3318 __clear_ack_timer(chan);
3319 }
3320
3321 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3322 {
3323 u16 control;
3324
3325 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3326 goto done;
3327
3328 control = __set_reqseq(chan, chan->buffer_seq);
3329 control |= __set_ctrl_poll(chan);
3330 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3331 l2cap_send_sframe(chan, control);
3332 chan->retry_count = 1;
3333
3334 __clear_retrans_timer(chan);
3335 __set_monitor_timer(chan);
3336
3337 set_bit(CONN_WAIT_F, &chan->conn_state);
3338
3339 done:
3340 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3341 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3342
3343 BT_DBG("chan %p, Exit local busy", chan);
3344 }
3345
3346 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3347 {
3348 if (chan->mode == L2CAP_MODE_ERTM) {
3349 if (busy)
3350 l2cap_ertm_enter_local_busy(chan);
3351 else
3352 l2cap_ertm_exit_local_busy(chan);
3353 }
3354 }
3355
3356 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3357 {
3358 struct sk_buff *skb;
3359 u16 control;
3360
3361 while ((skb = skb_peek(&chan->srej_q)) &&
3362 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3363 int err;
3364
3365 if (bt_cb(skb)->tx_seq != tx_seq)
3366 break;
3367
3368 skb = skb_dequeue(&chan->srej_q);
3369 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3370 err = l2cap_reassemble_sdu(chan, skb, control);
3371
3372 if (err < 0) {
3373 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3374 break;
3375 }
3376
3377 chan->buffer_seq_srej =
3378 (chan->buffer_seq_srej + 1) % 64;
3379 tx_seq = (tx_seq + 1) % 64;
3380 }
3381 }
3382
3383 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3384 {
3385 struct srej_list *l, *tmp;
3386 u16 control;
3387
3388 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3389 if (l->tx_seq == tx_seq) {
3390 list_del(&l->list);
3391 kfree(l);
3392 return;
3393 }
3394 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3395 control |= __set_reqseq(chan, l->tx_seq);
3396 l2cap_send_sframe(chan, control);
3397 list_del(&l->list);
3398 list_add_tail(&l->list, &chan->srej_l);
3399 }
3400 }
3401
3402 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3403 {
3404 struct srej_list *new;
3405 u16 control;
3406
3407 while (tx_seq != chan->expected_tx_seq) {
3408 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3409 control |= __set_reqseq(chan, chan->expected_tx_seq);
3410 l2cap_send_sframe(chan, control);
3411
3412 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3413 new->tx_seq = chan->expected_tx_seq;
3414 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3415 list_add_tail(&new->list, &chan->srej_l);
3416 }
3417 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3418 }
3419
3420 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3421 {
3422 u16 tx_seq = __get_txseq(chan, rx_control);
3423 u16 req_seq = __get_reqseq(chan, rx_control);
3424 u8 sar = __get_ctrl_sar(chan, rx_control);
3425 int tx_seq_offset, expected_tx_seq_offset;
3426 int num_to_ack = (chan->tx_win/6) + 1;
3427 int err = 0;
3428
3429 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3430 tx_seq, rx_control);
3431
3432 if (__is_ctrl_final(chan, rx_control) &&
3433 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3434 __clear_monitor_timer(chan);
3435 if (chan->unacked_frames > 0)
3436 __set_retrans_timer(chan);
3437 clear_bit(CONN_WAIT_F, &chan->conn_state);
3438 }
3439
3440 chan->expected_ack_seq = req_seq;
3441 l2cap_drop_acked_frames(chan);
3442
3443 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3444 if (tx_seq_offset < 0)
3445 tx_seq_offset += 64;
3446
3447 /* invalid tx_seq */
3448 if (tx_seq_offset >= chan->tx_win) {
3449 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3450 goto drop;
3451 }
3452
3453 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3454 goto drop;
3455
3456 if (tx_seq == chan->expected_tx_seq)
3457 goto expected;
3458
3459 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3460 struct srej_list *first;
3461
3462 first = list_first_entry(&chan->srej_l,
3463 struct srej_list, list);
3464 if (tx_seq == first->tx_seq) {
3465 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3466 l2cap_check_srej_gap(chan, tx_seq);
3467
3468 list_del(&first->list);
3469 kfree(first);
3470
3471 if (list_empty(&chan->srej_l)) {
3472 chan->buffer_seq = chan->buffer_seq_srej;
3473 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3474 l2cap_send_ack(chan);
3475 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3476 }
3477 } else {
3478 struct srej_list *l;
3479
3480 /* duplicated tx_seq */
3481 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3482 goto drop;
3483
3484 list_for_each_entry(l, &chan->srej_l, list) {
3485 if (l->tx_seq == tx_seq) {
3486 l2cap_resend_srejframe(chan, tx_seq);
3487 return 0;
3488 }
3489 }
3490 l2cap_send_srejframe(chan, tx_seq);
3491 }
3492 } else {
3493 expected_tx_seq_offset =
3494 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3495 if (expected_tx_seq_offset < 0)
3496 expected_tx_seq_offset += 64;
3497
3498 /* duplicated tx_seq */
3499 if (tx_seq_offset < expected_tx_seq_offset)
3500 goto drop;
3501
3502 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3503
3504 BT_DBG("chan %p, Enter SREJ", chan);
3505
3506 INIT_LIST_HEAD(&chan->srej_l);
3507 chan->buffer_seq_srej = chan->buffer_seq;
3508
3509 __skb_queue_head_init(&chan->srej_q);
3510 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3511
3512 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3513
3514 l2cap_send_srejframe(chan, tx_seq);
3515
3516 __clear_ack_timer(chan);
3517 }
3518 return 0;
3519
3520 expected:
3521 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3522
3523 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3524 bt_cb(skb)->tx_seq = tx_seq;
3525 bt_cb(skb)->sar = sar;
3526 __skb_queue_tail(&chan->srej_q, skb);
3527 return 0;
3528 }
3529
3530 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3531 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3532 if (err < 0) {
3533 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3534 return err;
3535 }
3536
3537 if (__is_ctrl_final(chan, rx_control)) {
3538 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3539 l2cap_retransmit_frames(chan);
3540 }
3541
3542 __set_ack_timer(chan);
3543
3544 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3545 if (chan->num_acked == num_to_ack - 1)
3546 l2cap_send_ack(chan);
3547
3548 return 0;
3549
3550 drop:
3551 kfree_skb(skb);
3552 return 0;
3553 }
3554
3555 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3556 {
3557 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan,
3558 __get_reqseq(chan, rx_control), rx_control);
3559
3560 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3561 l2cap_drop_acked_frames(chan);
3562
3563 if (__is_ctrl_poll(chan, rx_control)) {
3564 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3565 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3566 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3567 (chan->unacked_frames > 0))
3568 __set_retrans_timer(chan);
3569
3570 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3571 l2cap_send_srejtail(chan);
3572 } else {
3573 l2cap_send_i_or_rr_or_rnr(chan);
3574 }
3575
3576 } else if (__is_ctrl_final(chan, rx_control)) {
3577 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3578
3579 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3580 l2cap_retransmit_frames(chan);
3581
3582 } else {
3583 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3584 (chan->unacked_frames > 0))
3585 __set_retrans_timer(chan);
3586
3587 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3588 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3589 l2cap_send_ack(chan);
3590 else
3591 l2cap_ertm_send(chan);
3592 }
3593 }
3594
3595 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3596 {
3597 u16 tx_seq = __get_reqseq(chan, rx_control);
3598
3599 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3600
3601 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3602
3603 chan->expected_ack_seq = tx_seq;
3604 l2cap_drop_acked_frames(chan);
3605
3606 if (__is_ctrl_final(chan, rx_control)) {
3607 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3608 l2cap_retransmit_frames(chan);
3609 } else {
3610 l2cap_retransmit_frames(chan);
3611
3612 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3613 set_bit(CONN_REJ_ACT, &chan->conn_state);
3614 }
3615 }
3616 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3617 {
3618 u16 tx_seq = __get_reqseq(chan, rx_control);
3619
3620 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3621
3622 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3623
3624 if (__is_ctrl_poll(chan, rx_control)) {
3625 chan->expected_ack_seq = tx_seq;
3626 l2cap_drop_acked_frames(chan);
3627
3628 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3629 l2cap_retransmit_one_frame(chan, tx_seq);
3630
3631 l2cap_ertm_send(chan);
3632
3633 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3634 chan->srej_save_reqseq = tx_seq;
3635 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3636 }
3637 } else if (__is_ctrl_final(chan, rx_control)) {
3638 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3639 chan->srej_save_reqseq == tx_seq)
3640 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3641 else
3642 l2cap_retransmit_one_frame(chan, tx_seq);
3643 } else {
3644 l2cap_retransmit_one_frame(chan, tx_seq);
3645 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3646 chan->srej_save_reqseq = tx_seq;
3647 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3648 }
3649 }
3650 }
3651
3652 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3653 {
3654 u16 tx_seq = __get_reqseq(chan, rx_control);
3655
3656 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3657
3658 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3659 chan->expected_ack_seq = tx_seq;
3660 l2cap_drop_acked_frames(chan);
3661
3662 if (__is_ctrl_poll(chan, rx_control))
3663 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3664
3665 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3666 __clear_retrans_timer(chan);
3667 if (__is_ctrl_poll(chan, rx_control))
3668 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3669 return;
3670 }
3671
3672 if (__is_ctrl_poll(chan, rx_control)) {
3673 l2cap_send_srejtail(chan);
3674 } else {
3675 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3676 l2cap_send_sframe(chan, rx_control);
3677 }
3678 }
3679
3680 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3681 {
3682 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3683
3684 if (__is_ctrl_final(chan, rx_control) &&
3685 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3686 __clear_monitor_timer(chan);
3687 if (chan->unacked_frames > 0)
3688 __set_retrans_timer(chan);
3689 clear_bit(CONN_WAIT_F, &chan->conn_state);
3690 }
3691
3692 switch (__get_ctrl_super(chan, rx_control)) {
3693 case L2CAP_SUPER_RR:
3694 l2cap_data_channel_rrframe(chan, rx_control);
3695 break;
3696
3697 case L2CAP_SUPER_REJ:
3698 l2cap_data_channel_rejframe(chan, rx_control);
3699 break;
3700
3701 case L2CAP_SUPER_SREJ:
3702 l2cap_data_channel_srejframe(chan, rx_control);
3703 break;
3704
3705 case L2CAP_SUPER_RNR:
3706 l2cap_data_channel_rnrframe(chan, rx_control);
3707 break;
3708 }
3709
3710 kfree_skb(skb);
3711 return 0;
3712 }
3713
3714 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3715 {
3716 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3717 u16 control;
3718 u16 req_seq;
3719 int len, next_tx_seq_offset, req_seq_offset;
3720
3721 control = get_unaligned_le16(skb->data);
3722 skb_pull(skb, 2);
3723 len = skb->len;
3724
3725 /*
3726 * We can just drop the corrupted I-frame here.
3727 * Receiver will miss it and start proper recovery
3728 * procedures and ask retransmission.
3729 */
3730 if (l2cap_check_fcs(chan, skb))
3731 goto drop;
3732
3733 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3734 len -= 2;
3735
3736 if (chan->fcs == L2CAP_FCS_CRC16)
3737 len -= 2;
3738
3739 if (len > chan->mps) {
3740 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3741 goto drop;
3742 }
3743
3744 req_seq = __get_reqseq(chan, control);
3745 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3746 if (req_seq_offset < 0)
3747 req_seq_offset += 64;
3748
3749 next_tx_seq_offset =
3750 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3751 if (next_tx_seq_offset < 0)
3752 next_tx_seq_offset += 64;
3753
3754 /* check for invalid req-seq */
3755 if (req_seq_offset > next_tx_seq_offset) {
3756 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3757 goto drop;
3758 }
3759
3760 if (!__is_sframe(chan, control)) {
3761 if (len < 0) {
3762 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3763 goto drop;
3764 }
3765
3766 l2cap_data_channel_iframe(chan, control, skb);
3767 } else {
3768 if (len != 0) {
3769 BT_ERR("%d", len);
3770 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3771 goto drop;
3772 }
3773
3774 l2cap_data_channel_sframe(chan, control, skb);
3775 }
3776
3777 return 0;
3778
3779 drop:
3780 kfree_skb(skb);
3781 return 0;
3782 }
3783
3784 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3785 {
3786 struct l2cap_chan *chan;
3787 struct sock *sk = NULL;
3788 u16 control;
3789 u16 tx_seq;
3790 int len;
3791
3792 chan = l2cap_get_chan_by_scid(conn, cid);
3793 if (!chan) {
3794 BT_DBG("unknown cid 0x%4.4x", cid);
3795 goto drop;
3796 }
3797
3798 sk = chan->sk;
3799
3800 BT_DBG("chan %p, len %d", chan, skb->len);
3801
3802 if (chan->state != BT_CONNECTED)
3803 goto drop;
3804
3805 switch (chan->mode) {
3806 case L2CAP_MODE_BASIC:
3807 /* If socket recv buffers overflows we drop data here
3808 * which is *bad* because L2CAP has to be reliable.
3809 * But we don't have any other choice. L2CAP doesn't
3810 * provide flow control mechanism. */
3811
3812 if (chan->imtu < skb->len)
3813 goto drop;
3814
3815 if (!chan->ops->recv(chan->data, skb))
3816 goto done;
3817 break;
3818
3819 case L2CAP_MODE_ERTM:
3820 if (!sock_owned_by_user(sk)) {
3821 l2cap_ertm_data_rcv(sk, skb);
3822 } else {
3823 if (sk_add_backlog(sk, skb))
3824 goto drop;
3825 }
3826
3827 goto done;
3828
3829 case L2CAP_MODE_STREAMING:
3830 control = get_unaligned_le16(skb->data);
3831 skb_pull(skb, 2);
3832 len = skb->len;
3833
3834 if (l2cap_check_fcs(chan, skb))
3835 goto drop;
3836
3837 if (__is_sar_start(chan, control))
3838 len -= 2;
3839
3840 if (chan->fcs == L2CAP_FCS_CRC16)
3841 len -= 2;
3842
3843 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
3844 goto drop;
3845
3846 tx_seq = __get_txseq(chan, control);
3847
3848 if (chan->expected_tx_seq != tx_seq) {
3849 /* Frame(s) missing - must discard partial SDU */
3850 kfree_skb(chan->sdu);
3851 chan->sdu = NULL;
3852 chan->sdu_last_frag = NULL;
3853 chan->sdu_len = 0;
3854
3855 /* TODO: Notify userland of missing data */
3856 }
3857
3858 chan->expected_tx_seq = (tx_seq + 1) % 64;
3859
3860 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3861 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3862
3863 goto done;
3864
3865 default:
3866 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3867 break;
3868 }
3869
3870 drop:
3871 kfree_skb(skb);
3872
3873 done:
3874 if (sk)
3875 bh_unlock_sock(sk);
3876
3877 return 0;
3878 }
3879
3880 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3881 {
3882 struct sock *sk = NULL;
3883 struct l2cap_chan *chan;
3884
3885 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3886 if (!chan)
3887 goto drop;
3888
3889 sk = chan->sk;
3890
3891 bh_lock_sock(sk);
3892
3893 BT_DBG("sk %p, len %d", sk, skb->len);
3894
3895 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3896 goto drop;
3897
3898 if (chan->imtu < skb->len)
3899 goto drop;
3900
3901 if (!chan->ops->recv(chan->data, skb))
3902 goto done;
3903
3904 drop:
3905 kfree_skb(skb);
3906
3907 done:
3908 if (sk)
3909 bh_unlock_sock(sk);
3910 return 0;
3911 }
3912
3913 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3914 {
3915 struct sock *sk = NULL;
3916 struct l2cap_chan *chan;
3917
3918 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3919 if (!chan)
3920 goto drop;
3921
3922 sk = chan->sk;
3923
3924 bh_lock_sock(sk);
3925
3926 BT_DBG("sk %p, len %d", sk, skb->len);
3927
3928 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3929 goto drop;
3930
3931 if (chan->imtu < skb->len)
3932 goto drop;
3933
3934 if (!chan->ops->recv(chan->data, skb))
3935 goto done;
3936
3937 drop:
3938 kfree_skb(skb);
3939
3940 done:
3941 if (sk)
3942 bh_unlock_sock(sk);
3943 return 0;
3944 }
3945
3946 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3947 {
3948 struct l2cap_hdr *lh = (void *) skb->data;
3949 u16 cid, len;
3950 __le16 psm;
3951
3952 skb_pull(skb, L2CAP_HDR_SIZE);
3953 cid = __le16_to_cpu(lh->cid);
3954 len = __le16_to_cpu(lh->len);
3955
3956 if (len != skb->len) {
3957 kfree_skb(skb);
3958 return;
3959 }
3960
3961 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3962
3963 switch (cid) {
3964 case L2CAP_CID_LE_SIGNALING:
3965 case L2CAP_CID_SIGNALING:
3966 l2cap_sig_channel(conn, skb);
3967 break;
3968
3969 case L2CAP_CID_CONN_LESS:
3970 psm = get_unaligned_le16(skb->data);
3971 skb_pull(skb, 2);
3972 l2cap_conless_channel(conn, psm, skb);
3973 break;
3974
3975 case L2CAP_CID_LE_DATA:
3976 l2cap_att_channel(conn, cid, skb);
3977 break;
3978
3979 case L2CAP_CID_SMP:
3980 if (smp_sig_channel(conn, skb))
3981 l2cap_conn_del(conn->hcon, EACCES);
3982 break;
3983
3984 default:
3985 l2cap_data_channel(conn, cid, skb);
3986 break;
3987 }
3988 }
3989
3990 /* ---- L2CAP interface with lower layer (HCI) ---- */
3991
3992 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3993 {
3994 int exact = 0, lm1 = 0, lm2 = 0;
3995 struct l2cap_chan *c;
3996
3997 if (type != ACL_LINK)
3998 return -EINVAL;
3999
4000 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4001
4002 /* Find listening sockets and check their link_mode */
4003 read_lock(&chan_list_lock);
4004 list_for_each_entry(c, &chan_list, global_l) {
4005 struct sock *sk = c->sk;
4006
4007 if (c->state != BT_LISTEN)
4008 continue;
4009
4010 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4011 lm1 |= HCI_LM_ACCEPT;
4012 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4013 lm1 |= HCI_LM_MASTER;
4014 exact++;
4015 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4016 lm2 |= HCI_LM_ACCEPT;
4017 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4018 lm2 |= HCI_LM_MASTER;
4019 }
4020 }
4021 read_unlock(&chan_list_lock);
4022
4023 return exact ? lm1 : lm2;
4024 }
4025
4026 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4027 {
4028 struct l2cap_conn *conn;
4029
4030 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4031
4032 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4033 return -EINVAL;
4034
4035 if (!status) {
4036 conn = l2cap_conn_add(hcon, status);
4037 if (conn)
4038 l2cap_conn_ready(conn);
4039 } else
4040 l2cap_conn_del(hcon, bt_to_errno(status));
4041
4042 return 0;
4043 }
4044
4045 static int l2cap_disconn_ind(struct hci_conn *hcon)
4046 {
4047 struct l2cap_conn *conn = hcon->l2cap_data;
4048
4049 BT_DBG("hcon %p", hcon);
4050
4051 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4052 return 0x13;
4053
4054 return conn->disc_reason;
4055 }
4056
4057 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4058 {
4059 BT_DBG("hcon %p reason %d", hcon, reason);
4060
4061 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4062 return -EINVAL;
4063
4064 l2cap_conn_del(hcon, bt_to_errno(reason));
4065
4066 return 0;
4067 }
4068
4069 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4070 {
4071 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4072 return;
4073
4074 if (encrypt == 0x00) {
4075 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4076 __clear_chan_timer(chan);
4077 __set_chan_timer(chan, HZ * 5);
4078 } else if (chan->sec_level == BT_SECURITY_HIGH)
4079 l2cap_chan_close(chan, ECONNREFUSED);
4080 } else {
4081 if (chan->sec_level == BT_SECURITY_MEDIUM)
4082 __clear_chan_timer(chan);
4083 }
4084 }
4085
4086 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4087 {
4088 struct l2cap_conn *conn = hcon->l2cap_data;
4089 struct l2cap_chan *chan;
4090
4091 if (!conn)
4092 return 0;
4093
4094 BT_DBG("conn %p", conn);
4095
4096 if (hcon->type == LE_LINK) {
4097 smp_distribute_keys(conn, 0);
4098 del_timer(&conn->security_timer);
4099 }
4100
4101 read_lock(&conn->chan_lock);
4102
4103 list_for_each_entry(chan, &conn->chan_l, list) {
4104 struct sock *sk = chan->sk;
4105
4106 bh_lock_sock(sk);
4107
4108 BT_DBG("chan->scid %d", chan->scid);
4109
4110 if (chan->scid == L2CAP_CID_LE_DATA) {
4111 if (!status && encrypt) {
4112 chan->sec_level = hcon->sec_level;
4113 l2cap_chan_ready(sk);
4114 }
4115
4116 bh_unlock_sock(sk);
4117 continue;
4118 }
4119
4120 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4121 bh_unlock_sock(sk);
4122 continue;
4123 }
4124
4125 if (!status && (chan->state == BT_CONNECTED ||
4126 chan->state == BT_CONFIG)) {
4127 l2cap_check_encryption(chan, encrypt);
4128 bh_unlock_sock(sk);
4129 continue;
4130 }
4131
4132 if (chan->state == BT_CONNECT) {
4133 if (!status) {
4134 struct l2cap_conn_req req;
4135 req.scid = cpu_to_le16(chan->scid);
4136 req.psm = chan->psm;
4137
4138 chan->ident = l2cap_get_ident(conn);
4139 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4140
4141 l2cap_send_cmd(conn, chan->ident,
4142 L2CAP_CONN_REQ, sizeof(req), &req);
4143 } else {
4144 __clear_chan_timer(chan);
4145 __set_chan_timer(chan, HZ / 10);
4146 }
4147 } else if (chan->state == BT_CONNECT2) {
4148 struct l2cap_conn_rsp rsp;
4149 __u16 res, stat;
4150
4151 if (!status) {
4152 if (bt_sk(sk)->defer_setup) {
4153 struct sock *parent = bt_sk(sk)->parent;
4154 res = L2CAP_CR_PEND;
4155 stat = L2CAP_CS_AUTHOR_PEND;
4156 if (parent)
4157 parent->sk_data_ready(parent, 0);
4158 } else {
4159 l2cap_state_change(chan, BT_CONFIG);
4160 res = L2CAP_CR_SUCCESS;
4161 stat = L2CAP_CS_NO_INFO;
4162 }
4163 } else {
4164 l2cap_state_change(chan, BT_DISCONN);
4165 __set_chan_timer(chan, HZ / 10);
4166 res = L2CAP_CR_SEC_BLOCK;
4167 stat = L2CAP_CS_NO_INFO;
4168 }
4169
4170 rsp.scid = cpu_to_le16(chan->dcid);
4171 rsp.dcid = cpu_to_le16(chan->scid);
4172 rsp.result = cpu_to_le16(res);
4173 rsp.status = cpu_to_le16(stat);
4174 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4175 sizeof(rsp), &rsp);
4176 }
4177
4178 bh_unlock_sock(sk);
4179 }
4180
4181 read_unlock(&conn->chan_lock);
4182
4183 return 0;
4184 }
4185
4186 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4187 {
4188 struct l2cap_conn *conn = hcon->l2cap_data;
4189
4190 if (!conn)
4191 conn = l2cap_conn_add(hcon, 0);
4192
4193 if (!conn)
4194 goto drop;
4195
4196 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4197
4198 if (!(flags & ACL_CONT)) {
4199 struct l2cap_hdr *hdr;
4200 struct l2cap_chan *chan;
4201 u16 cid;
4202 int len;
4203
4204 if (conn->rx_len) {
4205 BT_ERR("Unexpected start frame (len %d)", skb->len);
4206 kfree_skb(conn->rx_skb);
4207 conn->rx_skb = NULL;
4208 conn->rx_len = 0;
4209 l2cap_conn_unreliable(conn, ECOMM);
4210 }
4211
4212 /* Start fragment always begin with Basic L2CAP header */
4213 if (skb->len < L2CAP_HDR_SIZE) {
4214 BT_ERR("Frame is too short (len %d)", skb->len);
4215 l2cap_conn_unreliable(conn, ECOMM);
4216 goto drop;
4217 }
4218
4219 hdr = (struct l2cap_hdr *) skb->data;
4220 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4221 cid = __le16_to_cpu(hdr->cid);
4222
4223 if (len == skb->len) {
4224 /* Complete frame received */
4225 l2cap_recv_frame(conn, skb);
4226 return 0;
4227 }
4228
4229 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4230
4231 if (skb->len > len) {
4232 BT_ERR("Frame is too long (len %d, expected len %d)",
4233 skb->len, len);
4234 l2cap_conn_unreliable(conn, ECOMM);
4235 goto drop;
4236 }
4237
4238 chan = l2cap_get_chan_by_scid(conn, cid);
4239
4240 if (chan && chan->sk) {
4241 struct sock *sk = chan->sk;
4242
4243 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4244 BT_ERR("Frame exceeding recv MTU (len %d, "
4245 "MTU %d)", len,
4246 chan->imtu);
4247 bh_unlock_sock(sk);
4248 l2cap_conn_unreliable(conn, ECOMM);
4249 goto drop;
4250 }
4251 bh_unlock_sock(sk);
4252 }
4253
4254 /* Allocate skb for the complete frame (with header) */
4255 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4256 if (!conn->rx_skb)
4257 goto drop;
4258
4259 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4260 skb->len);
4261 conn->rx_len = len - skb->len;
4262 } else {
4263 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4264
4265 if (!conn->rx_len) {
4266 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4267 l2cap_conn_unreliable(conn, ECOMM);
4268 goto drop;
4269 }
4270
4271 if (skb->len > conn->rx_len) {
4272 BT_ERR("Fragment is too long (len %d, expected %d)",
4273 skb->len, conn->rx_len);
4274 kfree_skb(conn->rx_skb);
4275 conn->rx_skb = NULL;
4276 conn->rx_len = 0;
4277 l2cap_conn_unreliable(conn, ECOMM);
4278 goto drop;
4279 }
4280
4281 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4282 skb->len);
4283 conn->rx_len -= skb->len;
4284
4285 if (!conn->rx_len) {
4286 /* Complete frame received */
4287 l2cap_recv_frame(conn, conn->rx_skb);
4288 conn->rx_skb = NULL;
4289 }
4290 }
4291
4292 drop:
4293 kfree_skb(skb);
4294 return 0;
4295 }
4296
4297 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4298 {
4299 struct l2cap_chan *c;
4300
4301 read_lock_bh(&chan_list_lock);
4302
4303 list_for_each_entry(c, &chan_list, global_l) {
4304 struct sock *sk = c->sk;
4305
4306 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4307 batostr(&bt_sk(sk)->src),
4308 batostr(&bt_sk(sk)->dst),
4309 c->state, __le16_to_cpu(c->psm),
4310 c->scid, c->dcid, c->imtu, c->omtu,
4311 c->sec_level, c->mode);
4312 }
4313
4314 read_unlock_bh(&chan_list_lock);
4315
4316 return 0;
4317 }
4318
4319 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4320 {
4321 return single_open(file, l2cap_debugfs_show, inode->i_private);
4322 }
4323
4324 static const struct file_operations l2cap_debugfs_fops = {
4325 .open = l2cap_debugfs_open,
4326 .read = seq_read,
4327 .llseek = seq_lseek,
4328 .release = single_release,
4329 };
4330
4331 static struct dentry *l2cap_debugfs;
4332
4333 static struct hci_proto l2cap_hci_proto = {
4334 .name = "L2CAP",
4335 .id = HCI_PROTO_L2CAP,
4336 .connect_ind = l2cap_connect_ind,
4337 .connect_cfm = l2cap_connect_cfm,
4338 .disconn_ind = l2cap_disconn_ind,
4339 .disconn_cfm = l2cap_disconn_cfm,
4340 .security_cfm = l2cap_security_cfm,
4341 .recv_acldata = l2cap_recv_acldata
4342 };
4343
4344 int __init l2cap_init(void)
4345 {
4346 int err;
4347
4348 err = l2cap_init_sockets();
4349 if (err < 0)
4350 return err;
4351
4352 err = hci_register_proto(&l2cap_hci_proto);
4353 if (err < 0) {
4354 BT_ERR("L2CAP protocol registration failed");
4355 bt_sock_unregister(BTPROTO_L2CAP);
4356 goto error;
4357 }
4358
4359 if (bt_debugfs) {
4360 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4361 bt_debugfs, NULL, &l2cap_debugfs_fops);
4362 if (!l2cap_debugfs)
4363 BT_ERR("Failed to create L2CAP debug file");
4364 }
4365
4366 return 0;
4367
4368 error:
4369 l2cap_cleanup_sockets();
4370 return err;
4371 }
4372
4373 void l2cap_exit(void)
4374 {
4375 debugfs_remove(l2cap_debugfs);
4376
4377 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4378 BT_ERR("L2CAP protocol unregistration failed");
4379
4380 l2cap_cleanup_sockets();
4381 }
4382
4383 module_param(disable_ertm, bool, 0644);
4384 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4385
4386 module_param(enable_hs, bool, 0644);
4387 MODULE_PARM_DESC(enable_hs, "Enable High Speed");
This page took 0.196303 seconds and 5 git commands to generate.