Merge branch 'fix/cxt-stable' into fix/hda
[deliverable/linux.git] / net / bluetooth / l2cap_core.c
1 /*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
7
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
13
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
26 */
27
28 /* Bluetooth L2CAP core. */
29
30 #include <linux/module.h>
31
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
50 #include <net/sock.h>
51
52 #include <asm/unaligned.h>
53
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
58
59 bool disable_ertm;
60
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
63
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
66
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
70 void *data);
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
74
75 /* ---- L2CAP channels ---- */
76
77 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
78 {
79 struct l2cap_chan *c;
80
81 list_for_each_entry(c, &conn->chan_l, list) {
82 if (c->dcid == cid)
83 return c;
84 }
85 return NULL;
86 }
87
88 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
89 {
90 struct l2cap_chan *c;
91
92 list_for_each_entry(c, &conn->chan_l, list) {
93 if (c->scid == cid)
94 return c;
95 }
96 return NULL;
97 }
98
99 /* Find channel with given SCID.
100 * Returns locked socket */
101 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
102 {
103 struct l2cap_chan *c;
104
105 mutex_lock(&conn->chan_lock);
106 c = __l2cap_get_chan_by_scid(conn, cid);
107 mutex_unlock(&conn->chan_lock);
108
109 return c;
110 }
111
112 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
113 {
114 struct l2cap_chan *c;
115
116 list_for_each_entry(c, &conn->chan_l, list) {
117 if (c->ident == ident)
118 return c;
119 }
120 return NULL;
121 }
122
123 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
124 {
125 struct l2cap_chan *c;
126
127 mutex_lock(&conn->chan_lock);
128 c = __l2cap_get_chan_by_ident(conn, ident);
129 mutex_unlock(&conn->chan_lock);
130
131 return c;
132 }
133
134 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
135 {
136 struct l2cap_chan *c;
137
138 list_for_each_entry(c, &chan_list, global_l) {
139 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
140 return c;
141 }
142 return NULL;
143 }
144
145 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
146 {
147 int err;
148
149 write_lock(&chan_list_lock);
150
151 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
152 err = -EADDRINUSE;
153 goto done;
154 }
155
156 if (psm) {
157 chan->psm = psm;
158 chan->sport = psm;
159 err = 0;
160 } else {
161 u16 p;
162
163 err = -EINVAL;
164 for (p = 0x1001; p < 0x1100; p += 2)
165 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
166 chan->psm = cpu_to_le16(p);
167 chan->sport = cpu_to_le16(p);
168 err = 0;
169 break;
170 }
171 }
172
173 done:
174 write_unlock(&chan_list_lock);
175 return err;
176 }
177
178 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
179 {
180 write_lock(&chan_list_lock);
181
182 chan->scid = scid;
183
184 write_unlock(&chan_list_lock);
185
186 return 0;
187 }
188
189 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
190 {
191 u16 cid = L2CAP_CID_DYN_START;
192
193 for (; cid < L2CAP_CID_DYN_END; cid++) {
194 if (!__l2cap_get_chan_by_scid(conn, cid))
195 return cid;
196 }
197
198 return 0;
199 }
200
201 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
202 {
203 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
204 state_to_string(state));
205
206 chan->state = state;
207 chan->ops->state_change(chan->data, state);
208 }
209
210 static void l2cap_state_change(struct l2cap_chan *chan, int state)
211 {
212 struct sock *sk = chan->sk;
213
214 lock_sock(sk);
215 __l2cap_state_change(chan, state);
216 release_sock(sk);
217 }
218
219 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
220 {
221 struct sock *sk = chan->sk;
222
223 sk->sk_err = err;
224 }
225
226 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
227 {
228 struct sock *sk = chan->sk;
229
230 lock_sock(sk);
231 __l2cap_chan_set_err(chan, err);
232 release_sock(sk);
233 }
234
235 static void l2cap_chan_timeout(struct work_struct *work)
236 {
237 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
238 chan_timer.work);
239 struct l2cap_conn *conn = chan->conn;
240 int reason;
241
242 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
243
244 mutex_lock(&conn->chan_lock);
245 l2cap_chan_lock(chan);
246
247 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
248 reason = ECONNREFUSED;
249 else if (chan->state == BT_CONNECT &&
250 chan->sec_level != BT_SECURITY_SDP)
251 reason = ECONNREFUSED;
252 else
253 reason = ETIMEDOUT;
254
255 l2cap_chan_close(chan, reason);
256
257 l2cap_chan_unlock(chan);
258
259 chan->ops->close(chan->data);
260 mutex_unlock(&conn->chan_lock);
261
262 l2cap_chan_put(chan);
263 }
264
265 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
266 {
267 struct l2cap_chan *chan;
268
269 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
270 if (!chan)
271 return NULL;
272
273 mutex_init(&chan->lock);
274
275 chan->sk = sk;
276
277 write_lock(&chan_list_lock);
278 list_add(&chan->global_l, &chan_list);
279 write_unlock(&chan_list_lock);
280
281 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
282
283 chan->state = BT_OPEN;
284
285 atomic_set(&chan->refcnt, 1);
286
287 BT_DBG("sk %p chan %p", sk, chan);
288
289 return chan;
290 }
291
292 void l2cap_chan_destroy(struct l2cap_chan *chan)
293 {
294 write_lock(&chan_list_lock);
295 list_del(&chan->global_l);
296 write_unlock(&chan_list_lock);
297
298 l2cap_chan_put(chan);
299 }
300
301 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
302 {
303 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
304 chan->psm, chan->dcid);
305
306 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
307
308 chan->conn = conn;
309
310 switch (chan->chan_type) {
311 case L2CAP_CHAN_CONN_ORIENTED:
312 if (conn->hcon->type == LE_LINK) {
313 /* LE connection */
314 chan->omtu = L2CAP_LE_DEFAULT_MTU;
315 chan->scid = L2CAP_CID_LE_DATA;
316 chan->dcid = L2CAP_CID_LE_DATA;
317 } else {
318 /* Alloc CID for connection-oriented socket */
319 chan->scid = l2cap_alloc_cid(conn);
320 chan->omtu = L2CAP_DEFAULT_MTU;
321 }
322 break;
323
324 case L2CAP_CHAN_CONN_LESS:
325 /* Connectionless socket */
326 chan->scid = L2CAP_CID_CONN_LESS;
327 chan->dcid = L2CAP_CID_CONN_LESS;
328 chan->omtu = L2CAP_DEFAULT_MTU;
329 break;
330
331 default:
332 /* Raw socket can send/recv signalling messages only */
333 chan->scid = L2CAP_CID_SIGNALING;
334 chan->dcid = L2CAP_CID_SIGNALING;
335 chan->omtu = L2CAP_DEFAULT_MTU;
336 }
337
338 chan->local_id = L2CAP_BESTEFFORT_ID;
339 chan->local_stype = L2CAP_SERV_BESTEFFORT;
340 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
341 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
342 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
343 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
344
345 l2cap_chan_hold(chan);
346
347 list_add(&chan->list, &conn->chan_l);
348 }
349
350 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
351 {
352 mutex_lock(&conn->chan_lock);
353 __l2cap_chan_add(conn, chan);
354 mutex_unlock(&conn->chan_lock);
355 }
356
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
358 {
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
362
363 __clear_chan_timer(chan);
364
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
366
367 if (conn) {
368 /* Delete from channel list */
369 list_del(&chan->list);
370
371 l2cap_chan_put(chan);
372
373 chan->conn = NULL;
374 hci_conn_put(conn->hcon);
375 }
376
377 lock_sock(sk);
378
379 __l2cap_state_change(chan, BT_CLOSED);
380 sock_set_flag(sk, SOCK_ZAPPED);
381
382 if (err)
383 __l2cap_chan_set_err(chan, err);
384
385 if (parent) {
386 bt_accept_unlink(sk);
387 parent->sk_data_ready(parent, 0);
388 } else
389 sk->sk_state_change(sk);
390
391 release_sock(sk);
392
393 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
394 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
395 return;
396
397 skb_queue_purge(&chan->tx_q);
398
399 if (chan->mode == L2CAP_MODE_ERTM) {
400 struct srej_list *l, *tmp;
401
402 __clear_retrans_timer(chan);
403 __clear_monitor_timer(chan);
404 __clear_ack_timer(chan);
405
406 skb_queue_purge(&chan->srej_q);
407
408 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
409 list_del(&l->list);
410 kfree(l);
411 }
412 }
413 }
414
415 static void l2cap_chan_cleanup_listen(struct sock *parent)
416 {
417 struct sock *sk;
418
419 BT_DBG("parent %p", parent);
420
421 /* Close not yet accepted channels */
422 while ((sk = bt_accept_dequeue(parent, NULL))) {
423 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
424
425 l2cap_chan_lock(chan);
426 __clear_chan_timer(chan);
427 l2cap_chan_close(chan, ECONNRESET);
428 l2cap_chan_unlock(chan);
429
430 chan->ops->close(chan->data);
431 }
432 }
433
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
435 {
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
438
439 BT_DBG("chan %p state %s sk %p", chan,
440 state_to_string(chan->state), sk);
441
442 switch (chan->state) {
443 case BT_LISTEN:
444 lock_sock(sk);
445 l2cap_chan_cleanup_listen(sk);
446
447 __l2cap_state_change(chan, BT_CLOSED);
448 sock_set_flag(sk, SOCK_ZAPPED);
449 release_sock(sk);
450 break;
451
452 case BT_CONNECTED:
453 case BT_CONFIG:
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason);
459 } else
460 l2cap_chan_del(chan, reason);
461 break;
462
463 case BT_CONNECT2:
464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
465 conn->hcon->type == ACL_LINK) {
466 struct l2cap_conn_rsp rsp;
467 __u16 result;
468
469 if (bt_sk(sk)->defer_setup)
470 result = L2CAP_CR_SEC_BLOCK;
471 else
472 result = L2CAP_CR_BAD_PSM;
473 l2cap_state_change(chan, BT_DISCONN);
474
475 rsp.scid = cpu_to_le16(chan->dcid);
476 rsp.dcid = cpu_to_le16(chan->scid);
477 rsp.result = cpu_to_le16(result);
478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
479 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
480 sizeof(rsp), &rsp);
481 }
482
483 l2cap_chan_del(chan, reason);
484 break;
485
486 case BT_CONNECT:
487 case BT_DISCONN:
488 l2cap_chan_del(chan, reason);
489 break;
490
491 default:
492 lock_sock(sk);
493 sock_set_flag(sk, SOCK_ZAPPED);
494 release_sock(sk);
495 break;
496 }
497 }
498
499 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
500 {
501 if (chan->chan_type == L2CAP_CHAN_RAW) {
502 switch (chan->sec_level) {
503 case BT_SECURITY_HIGH:
504 return HCI_AT_DEDICATED_BONDING_MITM;
505 case BT_SECURITY_MEDIUM:
506 return HCI_AT_DEDICATED_BONDING;
507 default:
508 return HCI_AT_NO_BONDING;
509 }
510 } else if (chan->psm == cpu_to_le16(0x0001)) {
511 if (chan->sec_level == BT_SECURITY_LOW)
512 chan->sec_level = BT_SECURITY_SDP;
513
514 if (chan->sec_level == BT_SECURITY_HIGH)
515 return HCI_AT_NO_BONDING_MITM;
516 else
517 return HCI_AT_NO_BONDING;
518 } else {
519 switch (chan->sec_level) {
520 case BT_SECURITY_HIGH:
521 return HCI_AT_GENERAL_BONDING_MITM;
522 case BT_SECURITY_MEDIUM:
523 return HCI_AT_GENERAL_BONDING;
524 default:
525 return HCI_AT_NO_BONDING;
526 }
527 }
528 }
529
530 /* Service level security */
531 int l2cap_chan_check_security(struct l2cap_chan *chan)
532 {
533 struct l2cap_conn *conn = chan->conn;
534 __u8 auth_type;
535
536 auth_type = l2cap_get_auth_type(chan);
537
538 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
539 }
540
541 static u8 l2cap_get_ident(struct l2cap_conn *conn)
542 {
543 u8 id;
544
545 /* Get next available identificator.
546 * 1 - 128 are used by kernel.
547 * 129 - 199 are reserved.
548 * 200 - 254 are used by utilities like l2ping, etc.
549 */
550
551 spin_lock(&conn->lock);
552
553 if (++conn->tx_ident > 128)
554 conn->tx_ident = 1;
555
556 id = conn->tx_ident;
557
558 spin_unlock(&conn->lock);
559
560 return id;
561 }
562
563 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
564 {
565 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
566 u8 flags;
567
568 BT_DBG("code 0x%2.2x", code);
569
570 if (!skb)
571 return;
572
573 if (lmp_no_flush_capable(conn->hcon->hdev))
574 flags = ACL_START_NO_FLUSH;
575 else
576 flags = ACL_START;
577
578 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
579 skb->priority = HCI_PRIO_MAX;
580
581 hci_send_acl(conn->hchan, skb, flags);
582 }
583
584 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
585 {
586 struct hci_conn *hcon = chan->conn->hcon;
587 u16 flags;
588
589 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
590 skb->priority);
591
592 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
593 lmp_no_flush_capable(hcon->hdev))
594 flags = ACL_START_NO_FLUSH;
595 else
596 flags = ACL_START;
597
598 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
599 hci_send_acl(chan->conn->hchan, skb, flags);
600 }
601
602 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
603 {
604 struct sk_buff *skb;
605 struct l2cap_hdr *lh;
606 struct l2cap_conn *conn = chan->conn;
607 int count, hlen;
608
609 if (chan->state != BT_CONNECTED)
610 return;
611
612 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
613 hlen = L2CAP_EXT_HDR_SIZE;
614 else
615 hlen = L2CAP_ENH_HDR_SIZE;
616
617 if (chan->fcs == L2CAP_FCS_CRC16)
618 hlen += L2CAP_FCS_SIZE;
619
620 BT_DBG("chan %p, control 0x%8.8x", chan, control);
621
622 count = min_t(unsigned int, conn->mtu, hlen);
623
624 control |= __set_sframe(chan);
625
626 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
627 control |= __set_ctrl_final(chan);
628
629 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
630 control |= __set_ctrl_poll(chan);
631
632 skb = bt_skb_alloc(count, GFP_ATOMIC);
633 if (!skb)
634 return;
635
636 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
637 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
638 lh->cid = cpu_to_le16(chan->dcid);
639
640 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
641
642 if (chan->fcs == L2CAP_FCS_CRC16) {
643 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
644 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
645 }
646
647 skb->priority = HCI_PRIO_MAX;
648 l2cap_do_send(chan, skb);
649 }
650
651 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
652 {
653 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
654 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
655 set_bit(CONN_RNR_SENT, &chan->conn_state);
656 } else
657 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
658
659 control |= __set_reqseq(chan, chan->buffer_seq);
660
661 l2cap_send_sframe(chan, control);
662 }
663
664 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
665 {
666 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
667 }
668
669 static void l2cap_send_conn_req(struct l2cap_chan *chan)
670 {
671 struct l2cap_conn *conn = chan->conn;
672 struct l2cap_conn_req req;
673
674 req.scid = cpu_to_le16(chan->scid);
675 req.psm = chan->psm;
676
677 chan->ident = l2cap_get_ident(conn);
678
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
680
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
682 }
683
684 static void l2cap_do_start(struct l2cap_chan *chan)
685 {
686 struct l2cap_conn *conn = chan->conn;
687
688 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
689 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
690 return;
691
692 if (l2cap_chan_check_security(chan) &&
693 __l2cap_no_conn_pending(chan))
694 l2cap_send_conn_req(chan);
695 } else {
696 struct l2cap_info_req req;
697 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
698
699 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
700 conn->info_ident = l2cap_get_ident(conn);
701
702 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
703
704 l2cap_send_cmd(conn, conn->info_ident,
705 L2CAP_INFO_REQ, sizeof(req), &req);
706 }
707 }
708
709 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
710 {
711 u32 local_feat_mask = l2cap_feat_mask;
712 if (!disable_ertm)
713 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
714
715 switch (mode) {
716 case L2CAP_MODE_ERTM:
717 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
718 case L2CAP_MODE_STREAMING:
719 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
720 default:
721 return 0x00;
722 }
723 }
724
725 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
726 {
727 struct sock *sk = chan->sk;
728 struct l2cap_disconn_req req;
729
730 if (!conn)
731 return;
732
733 if (chan->mode == L2CAP_MODE_ERTM) {
734 __clear_retrans_timer(chan);
735 __clear_monitor_timer(chan);
736 __clear_ack_timer(chan);
737 }
738
739 req.dcid = cpu_to_le16(chan->dcid);
740 req.scid = cpu_to_le16(chan->scid);
741 l2cap_send_cmd(conn, l2cap_get_ident(conn),
742 L2CAP_DISCONN_REQ, sizeof(req), &req);
743
744 lock_sock(sk);
745 __l2cap_state_change(chan, BT_DISCONN);
746 __l2cap_chan_set_err(chan, err);
747 release_sock(sk);
748 }
749
750 /* ---- L2CAP connections ---- */
751 static void l2cap_conn_start(struct l2cap_conn *conn)
752 {
753 struct l2cap_chan *chan, *tmp;
754
755 BT_DBG("conn %p", conn);
756
757 mutex_lock(&conn->chan_lock);
758
759 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
761
762 l2cap_chan_lock(chan);
763
764 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
765 l2cap_chan_unlock(chan);
766 continue;
767 }
768
769 if (chan->state == BT_CONNECT) {
770 if (!l2cap_chan_check_security(chan) ||
771 !__l2cap_no_conn_pending(chan)) {
772 l2cap_chan_unlock(chan);
773 continue;
774 }
775
776 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
777 && test_bit(CONF_STATE2_DEVICE,
778 &chan->conf_state)) {
779 l2cap_chan_close(chan, ECONNRESET);
780 l2cap_chan_unlock(chan);
781 continue;
782 }
783
784 l2cap_send_conn_req(chan);
785
786 } else if (chan->state == BT_CONNECT2) {
787 struct l2cap_conn_rsp rsp;
788 char buf[128];
789 rsp.scid = cpu_to_le16(chan->dcid);
790 rsp.dcid = cpu_to_le16(chan->scid);
791
792 if (l2cap_chan_check_security(chan)) {
793 lock_sock(sk);
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
798 if (parent)
799 parent->sk_data_ready(parent, 0);
800
801 } else {
802 __l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 }
806 release_sock(sk);
807 } else {
808 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
809 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
810 }
811
812 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
813 sizeof(rsp), &rsp);
814
815 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
816 rsp.result != L2CAP_CR_SUCCESS) {
817 l2cap_chan_unlock(chan);
818 continue;
819 }
820
821 set_bit(CONF_REQ_SENT, &chan->conf_state);
822 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
823 l2cap_build_conf_req(chan, buf), buf);
824 chan->num_conf_req++;
825 }
826
827 l2cap_chan_unlock(chan);
828 }
829
830 mutex_unlock(&conn->chan_lock);
831 }
832
833 /* Find socket with cid and source bdaddr.
834 * Returns closest match, locked.
835 */
836 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
837 {
838 struct l2cap_chan *c, *c1 = NULL;
839
840 read_lock(&chan_list_lock);
841
842 list_for_each_entry(c, &chan_list, global_l) {
843 struct sock *sk = c->sk;
844
845 if (state && c->state != state)
846 continue;
847
848 if (c->scid == cid) {
849 /* Exact match. */
850 if (!bacmp(&bt_sk(sk)->src, src)) {
851 read_unlock(&chan_list_lock);
852 return c;
853 }
854
855 /* Closest match */
856 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
857 c1 = c;
858 }
859 }
860
861 read_unlock(&chan_list_lock);
862
863 return c1;
864 }
865
866 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
867 {
868 struct sock *parent, *sk;
869 struct l2cap_chan *chan, *pchan;
870
871 BT_DBG("");
872
873 /* Check if we have socket listening on cid */
874 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
875 conn->src);
876 if (!pchan)
877 return;
878
879 parent = pchan->sk;
880
881 lock_sock(parent);
882
883 /* Check for backlog size */
884 if (sk_acceptq_is_full(parent)) {
885 BT_DBG("backlog full %d", parent->sk_ack_backlog);
886 goto clean;
887 }
888
889 chan = pchan->ops->new_connection(pchan->data);
890 if (!chan)
891 goto clean;
892
893 sk = chan->sk;
894
895 hci_conn_hold(conn->hcon);
896
897 bacpy(&bt_sk(sk)->src, conn->src);
898 bacpy(&bt_sk(sk)->dst, conn->dst);
899
900 bt_accept_enqueue(parent, sk);
901
902 l2cap_chan_add(conn, chan);
903
904 __set_chan_timer(chan, sk->sk_sndtimeo);
905
906 __l2cap_state_change(chan, BT_CONNECTED);
907 parent->sk_data_ready(parent, 0);
908
909 clean:
910 release_sock(parent);
911 }
912
913 static void l2cap_chan_ready(struct l2cap_chan *chan)
914 {
915 struct sock *sk = chan->sk;
916 struct sock *parent;
917
918 lock_sock(sk);
919
920 parent = bt_sk(sk)->parent;
921
922 BT_DBG("sk %p, parent %p", sk, parent);
923
924 chan->conf_state = 0;
925 __clear_chan_timer(chan);
926
927 __l2cap_state_change(chan, BT_CONNECTED);
928 sk->sk_state_change(sk);
929
930 if (parent)
931 parent->sk_data_ready(parent, 0);
932
933 release_sock(sk);
934 }
935
936 static void l2cap_conn_ready(struct l2cap_conn *conn)
937 {
938 struct l2cap_chan *chan;
939
940 BT_DBG("conn %p", conn);
941
942 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
943 l2cap_le_conn_ready(conn);
944
945 if (conn->hcon->out && conn->hcon->type == LE_LINK)
946 smp_conn_security(conn, conn->hcon->pending_sec_level);
947
948 mutex_lock(&conn->chan_lock);
949
950 list_for_each_entry(chan, &conn->chan_l, list) {
951
952 l2cap_chan_lock(chan);
953
954 if (conn->hcon->type == LE_LINK) {
955 if (smp_conn_security(conn, chan->sec_level))
956 l2cap_chan_ready(chan);
957
958 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
959 struct sock *sk = chan->sk;
960 __clear_chan_timer(chan);
961 lock_sock(sk);
962 __l2cap_state_change(chan, BT_CONNECTED);
963 sk->sk_state_change(sk);
964 release_sock(sk);
965
966 } else if (chan->state == BT_CONNECT)
967 l2cap_do_start(chan);
968
969 l2cap_chan_unlock(chan);
970 }
971
972 mutex_unlock(&conn->chan_lock);
973 }
974
975 /* Notify sockets that we cannot guaranty reliability anymore */
976 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
977 {
978 struct l2cap_chan *chan;
979
980 BT_DBG("conn %p", conn);
981
982 mutex_lock(&conn->chan_lock);
983
984 list_for_each_entry(chan, &conn->chan_l, list) {
985 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
986 __l2cap_chan_set_err(chan, err);
987 }
988
989 mutex_unlock(&conn->chan_lock);
990 }
991
992 static void l2cap_info_timeout(struct work_struct *work)
993 {
994 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
995 info_timer.work);
996
997 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
998 conn->info_ident = 0;
999
1000 l2cap_conn_start(conn);
1001 }
1002
1003 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1004 {
1005 struct l2cap_conn *conn = hcon->l2cap_data;
1006 struct l2cap_chan *chan, *l;
1007
1008 if (!conn)
1009 return;
1010
1011 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1012
1013 kfree_skb(conn->rx_skb);
1014
1015 mutex_lock(&conn->chan_lock);
1016
1017 /* Kill channels */
1018 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1019 l2cap_chan_lock(chan);
1020
1021 l2cap_chan_del(chan, err);
1022
1023 l2cap_chan_unlock(chan);
1024
1025 chan->ops->close(chan->data);
1026 }
1027
1028 mutex_unlock(&conn->chan_lock);
1029
1030 hci_chan_del(conn->hchan);
1031
1032 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1033 cancel_delayed_work_sync(&conn->info_timer);
1034
1035 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1036 cancel_delayed_work_sync(&conn->security_timer);
1037 smp_chan_destroy(conn);
1038 }
1039
1040 hcon->l2cap_data = NULL;
1041 kfree(conn);
1042 }
1043
1044 static void security_timeout(struct work_struct *work)
1045 {
1046 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1047 security_timer.work);
1048
1049 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1050 }
1051
1052 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1053 {
1054 struct l2cap_conn *conn = hcon->l2cap_data;
1055 struct hci_chan *hchan;
1056
1057 if (conn || status)
1058 return conn;
1059
1060 hchan = hci_chan_create(hcon);
1061 if (!hchan)
1062 return NULL;
1063
1064 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1065 if (!conn) {
1066 hci_chan_del(hchan);
1067 return NULL;
1068 }
1069
1070 hcon->l2cap_data = conn;
1071 conn->hcon = hcon;
1072 conn->hchan = hchan;
1073
1074 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1075
1076 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1077 conn->mtu = hcon->hdev->le_mtu;
1078 else
1079 conn->mtu = hcon->hdev->acl_mtu;
1080
1081 conn->src = &hcon->hdev->bdaddr;
1082 conn->dst = &hcon->dst;
1083
1084 conn->feat_mask = 0;
1085
1086 spin_lock_init(&conn->lock);
1087 mutex_init(&conn->chan_lock);
1088
1089 INIT_LIST_HEAD(&conn->chan_l);
1090
1091 if (hcon->type == LE_LINK)
1092 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1093 else
1094 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1095
1096 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1097
1098 return conn;
1099 }
1100
1101 /* ---- Socket interface ---- */
1102
1103 /* Find socket with psm and source bdaddr.
1104 * Returns closest match.
1105 */
1106 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1107 {
1108 struct l2cap_chan *c, *c1 = NULL;
1109
1110 read_lock(&chan_list_lock);
1111
1112 list_for_each_entry(c, &chan_list, global_l) {
1113 struct sock *sk = c->sk;
1114
1115 if (state && c->state != state)
1116 continue;
1117
1118 if (c->psm == psm) {
1119 /* Exact match. */
1120 if (!bacmp(&bt_sk(sk)->src, src)) {
1121 read_unlock(&chan_list_lock);
1122 return c;
1123 }
1124
1125 /* Closest match */
1126 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1127 c1 = c;
1128 }
1129 }
1130
1131 read_unlock(&chan_list_lock);
1132
1133 return c1;
1134 }
1135
1136 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1137 {
1138 struct sock *sk = chan->sk;
1139 bdaddr_t *src = &bt_sk(sk)->src;
1140 struct l2cap_conn *conn;
1141 struct hci_conn *hcon;
1142 struct hci_dev *hdev;
1143 __u8 auth_type;
1144 int err;
1145
1146 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1147 chan->psm);
1148
1149 hdev = hci_get_route(dst, src);
1150 if (!hdev)
1151 return -EHOSTUNREACH;
1152
1153 hci_dev_lock(hdev);
1154
1155 l2cap_chan_lock(chan);
1156
1157 /* PSM must be odd and lsb of upper byte must be 0 */
1158 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1159 chan->chan_type != L2CAP_CHAN_RAW) {
1160 err = -EINVAL;
1161 goto done;
1162 }
1163
1164 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1165 err = -EINVAL;
1166 goto done;
1167 }
1168
1169 switch (chan->mode) {
1170 case L2CAP_MODE_BASIC:
1171 break;
1172 case L2CAP_MODE_ERTM:
1173 case L2CAP_MODE_STREAMING:
1174 if (!disable_ertm)
1175 break;
1176 /* fall through */
1177 default:
1178 err = -ENOTSUPP;
1179 goto done;
1180 }
1181
1182 lock_sock(sk);
1183
1184 switch (sk->sk_state) {
1185 case BT_CONNECT:
1186 case BT_CONNECT2:
1187 case BT_CONFIG:
1188 /* Already connecting */
1189 err = 0;
1190 release_sock(sk);
1191 goto done;
1192
1193 case BT_CONNECTED:
1194 /* Already connected */
1195 err = -EISCONN;
1196 release_sock(sk);
1197 goto done;
1198
1199 case BT_OPEN:
1200 case BT_BOUND:
1201 /* Can connect */
1202 break;
1203
1204 default:
1205 err = -EBADFD;
1206 release_sock(sk);
1207 goto done;
1208 }
1209
1210 /* Set destination address and psm */
1211 bacpy(&bt_sk(sk)->dst, dst);
1212
1213 release_sock(sk);
1214
1215 chan->psm = psm;
1216 chan->dcid = cid;
1217
1218 auth_type = l2cap_get_auth_type(chan);
1219
1220 if (chan->dcid == L2CAP_CID_LE_DATA)
1221 hcon = hci_connect(hdev, LE_LINK, dst,
1222 chan->sec_level, auth_type);
1223 else
1224 hcon = hci_connect(hdev, ACL_LINK, dst,
1225 chan->sec_level, auth_type);
1226
1227 if (IS_ERR(hcon)) {
1228 err = PTR_ERR(hcon);
1229 goto done;
1230 }
1231
1232 conn = l2cap_conn_add(hcon, 0);
1233 if (!conn) {
1234 hci_conn_put(hcon);
1235 err = -ENOMEM;
1236 goto done;
1237 }
1238
1239 /* Update source addr of the socket */
1240 bacpy(src, conn->src);
1241
1242 l2cap_chan_unlock(chan);
1243 l2cap_chan_add(conn, chan);
1244 l2cap_chan_lock(chan);
1245
1246 l2cap_state_change(chan, BT_CONNECT);
1247 __set_chan_timer(chan, sk->sk_sndtimeo);
1248
1249 if (hcon->state == BT_CONNECTED) {
1250 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1251 __clear_chan_timer(chan);
1252 if (l2cap_chan_check_security(chan))
1253 l2cap_state_change(chan, BT_CONNECTED);
1254 } else
1255 l2cap_do_start(chan);
1256 }
1257
1258 err = 0;
1259
1260 done:
1261 l2cap_chan_unlock(chan);
1262 hci_dev_unlock(hdev);
1263 hci_dev_put(hdev);
1264 return err;
1265 }
1266
1267 int __l2cap_wait_ack(struct sock *sk)
1268 {
1269 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1270 DECLARE_WAITQUEUE(wait, current);
1271 int err = 0;
1272 int timeo = HZ/5;
1273
1274 add_wait_queue(sk_sleep(sk), &wait);
1275 set_current_state(TASK_INTERRUPTIBLE);
1276 while (chan->unacked_frames > 0 && chan->conn) {
1277 if (!timeo)
1278 timeo = HZ/5;
1279
1280 if (signal_pending(current)) {
1281 err = sock_intr_errno(timeo);
1282 break;
1283 }
1284
1285 release_sock(sk);
1286 timeo = schedule_timeout(timeo);
1287 lock_sock(sk);
1288 set_current_state(TASK_INTERRUPTIBLE);
1289
1290 err = sock_error(sk);
1291 if (err)
1292 break;
1293 }
1294 set_current_state(TASK_RUNNING);
1295 remove_wait_queue(sk_sleep(sk), &wait);
1296 return err;
1297 }
1298
1299 static void l2cap_monitor_timeout(struct work_struct *work)
1300 {
1301 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1302 monitor_timer.work);
1303
1304 BT_DBG("chan %p", chan);
1305
1306 l2cap_chan_lock(chan);
1307
1308 if (chan->retry_count >= chan->remote_max_tx) {
1309 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1310 l2cap_chan_unlock(chan);
1311 return;
1312 }
1313
1314 chan->retry_count++;
1315 __set_monitor_timer(chan);
1316
1317 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1318 l2cap_chan_unlock(chan);
1319 }
1320
1321 static void l2cap_retrans_timeout(struct work_struct *work)
1322 {
1323 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1324 retrans_timer.work);
1325
1326 BT_DBG("chan %p", chan);
1327
1328 l2cap_chan_lock(chan);
1329
1330 chan->retry_count = 1;
1331 __set_monitor_timer(chan);
1332
1333 set_bit(CONN_WAIT_F, &chan->conn_state);
1334
1335 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1336
1337 l2cap_chan_unlock(chan);
1338 }
1339
1340 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1341 {
1342 struct sk_buff *skb;
1343
1344 while ((skb = skb_peek(&chan->tx_q)) &&
1345 chan->unacked_frames) {
1346 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1347 break;
1348
1349 skb = skb_dequeue(&chan->tx_q);
1350 kfree_skb(skb);
1351
1352 chan->unacked_frames--;
1353 }
1354
1355 if (!chan->unacked_frames)
1356 __clear_retrans_timer(chan);
1357 }
1358
1359 static void l2cap_streaming_send(struct l2cap_chan *chan)
1360 {
1361 struct sk_buff *skb;
1362 u32 control;
1363 u16 fcs;
1364
1365 while ((skb = skb_dequeue(&chan->tx_q))) {
1366 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1367 control |= __set_txseq(chan, chan->next_tx_seq);
1368 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1369
1370 if (chan->fcs == L2CAP_FCS_CRC16) {
1371 fcs = crc16(0, (u8 *)skb->data,
1372 skb->len - L2CAP_FCS_SIZE);
1373 put_unaligned_le16(fcs,
1374 skb->data + skb->len - L2CAP_FCS_SIZE);
1375 }
1376
1377 l2cap_do_send(chan, skb);
1378
1379 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1380 }
1381 }
1382
1383 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1384 {
1385 struct sk_buff *skb, *tx_skb;
1386 u16 fcs;
1387 u32 control;
1388
1389 skb = skb_peek(&chan->tx_q);
1390 if (!skb)
1391 return;
1392
1393 while (bt_cb(skb)->tx_seq != tx_seq) {
1394 if (skb_queue_is_last(&chan->tx_q, skb))
1395 return;
1396
1397 skb = skb_queue_next(&chan->tx_q, skb);
1398 }
1399
1400 if (chan->remote_max_tx &&
1401 bt_cb(skb)->retries == chan->remote_max_tx) {
1402 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1403 return;
1404 }
1405
1406 tx_skb = skb_clone(skb, GFP_ATOMIC);
1407 bt_cb(skb)->retries++;
1408
1409 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1410 control &= __get_sar_mask(chan);
1411
1412 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1413 control |= __set_ctrl_final(chan);
1414
1415 control |= __set_reqseq(chan, chan->buffer_seq);
1416 control |= __set_txseq(chan, tx_seq);
1417
1418 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1419
1420 if (chan->fcs == L2CAP_FCS_CRC16) {
1421 fcs = crc16(0, (u8 *)tx_skb->data,
1422 tx_skb->len - L2CAP_FCS_SIZE);
1423 put_unaligned_le16(fcs,
1424 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1425 }
1426
1427 l2cap_do_send(chan, tx_skb);
1428 }
1429
1430 static int l2cap_ertm_send(struct l2cap_chan *chan)
1431 {
1432 struct sk_buff *skb, *tx_skb;
1433 u16 fcs;
1434 u32 control;
1435 int nsent = 0;
1436
1437 if (chan->state != BT_CONNECTED)
1438 return -ENOTCONN;
1439
1440 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1441
1442 if (chan->remote_max_tx &&
1443 bt_cb(skb)->retries == chan->remote_max_tx) {
1444 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1445 break;
1446 }
1447
1448 tx_skb = skb_clone(skb, GFP_ATOMIC);
1449
1450 bt_cb(skb)->retries++;
1451
1452 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1453 control &= __get_sar_mask(chan);
1454
1455 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1456 control |= __set_ctrl_final(chan);
1457
1458 control |= __set_reqseq(chan, chan->buffer_seq);
1459 control |= __set_txseq(chan, chan->next_tx_seq);
1460
1461 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1462
1463 if (chan->fcs == L2CAP_FCS_CRC16) {
1464 fcs = crc16(0, (u8 *)skb->data,
1465 tx_skb->len - L2CAP_FCS_SIZE);
1466 put_unaligned_le16(fcs, skb->data +
1467 tx_skb->len - L2CAP_FCS_SIZE);
1468 }
1469
1470 l2cap_do_send(chan, tx_skb);
1471
1472 __set_retrans_timer(chan);
1473
1474 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1475
1476 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1477
1478 if (bt_cb(skb)->retries == 1) {
1479 chan->unacked_frames++;
1480
1481 if (!nsent++)
1482 __clear_ack_timer(chan);
1483 }
1484
1485 chan->frames_sent++;
1486
1487 if (skb_queue_is_last(&chan->tx_q, skb))
1488 chan->tx_send_head = NULL;
1489 else
1490 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1491 }
1492
1493 return nsent;
1494 }
1495
1496 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1497 {
1498 int ret;
1499
1500 if (!skb_queue_empty(&chan->tx_q))
1501 chan->tx_send_head = chan->tx_q.next;
1502
1503 chan->next_tx_seq = chan->expected_ack_seq;
1504 ret = l2cap_ertm_send(chan);
1505 return ret;
1506 }
1507
1508 static void __l2cap_send_ack(struct l2cap_chan *chan)
1509 {
1510 u32 control = 0;
1511
1512 control |= __set_reqseq(chan, chan->buffer_seq);
1513
1514 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1515 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1516 set_bit(CONN_RNR_SENT, &chan->conn_state);
1517 l2cap_send_sframe(chan, control);
1518 return;
1519 }
1520
1521 if (l2cap_ertm_send(chan) > 0)
1522 return;
1523
1524 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1525 l2cap_send_sframe(chan, control);
1526 }
1527
1528 static void l2cap_send_ack(struct l2cap_chan *chan)
1529 {
1530 __clear_ack_timer(chan);
1531 __l2cap_send_ack(chan);
1532 }
1533
1534 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1535 {
1536 struct srej_list *tail;
1537 u32 control;
1538
1539 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1540 control |= __set_ctrl_final(chan);
1541
1542 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1543 control |= __set_reqseq(chan, tail->tx_seq);
1544
1545 l2cap_send_sframe(chan, control);
1546 }
1547
1548 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
1549 struct msghdr *msg, int len,
1550 int count, struct sk_buff *skb)
1551 {
1552 struct l2cap_conn *conn = chan->conn;
1553 struct sk_buff **frag;
1554 int err, sent = 0;
1555
1556 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1557 return -EFAULT;
1558
1559 sent += count;
1560 len -= count;
1561
1562 /* Continuation fragments (no L2CAP header) */
1563 frag = &skb_shinfo(skb)->frag_list;
1564 while (len) {
1565 count = min_t(unsigned int, conn->mtu, len);
1566
1567 *frag = chan->ops->alloc_skb(chan, count,
1568 msg->msg_flags & MSG_DONTWAIT,
1569 &err);
1570
1571 if (!*frag)
1572 return err;
1573 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1574 return -EFAULT;
1575
1576 (*frag)->priority = skb->priority;
1577
1578 sent += count;
1579 len -= count;
1580
1581 frag = &(*frag)->next;
1582 }
1583
1584 return sent;
1585 }
1586
1587 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1588 struct msghdr *msg, size_t len,
1589 u32 priority)
1590 {
1591 struct l2cap_conn *conn = chan->conn;
1592 struct sk_buff *skb;
1593 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1594 struct l2cap_hdr *lh;
1595
1596 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1597
1598 count = min_t(unsigned int, (conn->mtu - hlen), len);
1599
1600 skb = chan->ops->alloc_skb(chan, count + hlen,
1601 msg->msg_flags & MSG_DONTWAIT, &err);
1602
1603 if (!skb)
1604 return ERR_PTR(err);
1605
1606 skb->priority = priority;
1607
1608 /* Create L2CAP header */
1609 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1610 lh->cid = cpu_to_le16(chan->dcid);
1611 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1612 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1613
1614 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1615 if (unlikely(err < 0)) {
1616 kfree_skb(skb);
1617 return ERR_PTR(err);
1618 }
1619 return skb;
1620 }
1621
1622 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1623 struct msghdr *msg, size_t len,
1624 u32 priority)
1625 {
1626 struct l2cap_conn *conn = chan->conn;
1627 struct sk_buff *skb;
1628 int err, count, hlen = L2CAP_HDR_SIZE;
1629 struct l2cap_hdr *lh;
1630
1631 BT_DBG("chan %p len %d", chan, (int)len);
1632
1633 count = min_t(unsigned int, (conn->mtu - hlen), len);
1634
1635 skb = chan->ops->alloc_skb(chan, count + hlen,
1636 msg->msg_flags & MSG_DONTWAIT, &err);
1637
1638 if (!skb)
1639 return ERR_PTR(err);
1640
1641 skb->priority = priority;
1642
1643 /* Create L2CAP header */
1644 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1645 lh->cid = cpu_to_le16(chan->dcid);
1646 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1647
1648 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1649 if (unlikely(err < 0)) {
1650 kfree_skb(skb);
1651 return ERR_PTR(err);
1652 }
1653 return skb;
1654 }
1655
1656 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1657 struct msghdr *msg, size_t len,
1658 u32 control, u16 sdulen)
1659 {
1660 struct l2cap_conn *conn = chan->conn;
1661 struct sk_buff *skb;
1662 int err, count, hlen;
1663 struct l2cap_hdr *lh;
1664
1665 BT_DBG("chan %p len %d", chan, (int)len);
1666
1667 if (!conn)
1668 return ERR_PTR(-ENOTCONN);
1669
1670 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1671 hlen = L2CAP_EXT_HDR_SIZE;
1672 else
1673 hlen = L2CAP_ENH_HDR_SIZE;
1674
1675 if (sdulen)
1676 hlen += L2CAP_SDULEN_SIZE;
1677
1678 if (chan->fcs == L2CAP_FCS_CRC16)
1679 hlen += L2CAP_FCS_SIZE;
1680
1681 count = min_t(unsigned int, (conn->mtu - hlen), len);
1682
1683 skb = chan->ops->alloc_skb(chan, count + hlen,
1684 msg->msg_flags & MSG_DONTWAIT, &err);
1685
1686 if (!skb)
1687 return ERR_PTR(err);
1688
1689 /* Create L2CAP header */
1690 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1691 lh->cid = cpu_to_le16(chan->dcid);
1692 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1693
1694 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1695
1696 if (sdulen)
1697 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1698
1699 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1700 if (unlikely(err < 0)) {
1701 kfree_skb(skb);
1702 return ERR_PTR(err);
1703 }
1704
1705 if (chan->fcs == L2CAP_FCS_CRC16)
1706 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1707
1708 bt_cb(skb)->retries = 0;
1709 return skb;
1710 }
1711
1712 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1713 {
1714 struct sk_buff *skb;
1715 struct sk_buff_head sar_queue;
1716 u32 control;
1717 size_t size = 0;
1718
1719 skb_queue_head_init(&sar_queue);
1720 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1721 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1722 if (IS_ERR(skb))
1723 return PTR_ERR(skb);
1724
1725 __skb_queue_tail(&sar_queue, skb);
1726 len -= chan->remote_mps;
1727 size += chan->remote_mps;
1728
1729 while (len > 0) {
1730 size_t buflen;
1731
1732 if (len > chan->remote_mps) {
1733 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1734 buflen = chan->remote_mps;
1735 } else {
1736 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1737 buflen = len;
1738 }
1739
1740 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1741 if (IS_ERR(skb)) {
1742 skb_queue_purge(&sar_queue);
1743 return PTR_ERR(skb);
1744 }
1745
1746 __skb_queue_tail(&sar_queue, skb);
1747 len -= buflen;
1748 size += buflen;
1749 }
1750 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1751 if (chan->tx_send_head == NULL)
1752 chan->tx_send_head = sar_queue.next;
1753
1754 return size;
1755 }
1756
1757 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1758 u32 priority)
1759 {
1760 struct sk_buff *skb;
1761 u32 control;
1762 int err;
1763
1764 /* Connectionless channel */
1765 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1766 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1767 if (IS_ERR(skb))
1768 return PTR_ERR(skb);
1769
1770 l2cap_do_send(chan, skb);
1771 return len;
1772 }
1773
1774 switch (chan->mode) {
1775 case L2CAP_MODE_BASIC:
1776 /* Check outgoing MTU */
1777 if (len > chan->omtu)
1778 return -EMSGSIZE;
1779
1780 /* Create a basic PDU */
1781 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1782 if (IS_ERR(skb))
1783 return PTR_ERR(skb);
1784
1785 l2cap_do_send(chan, skb);
1786 err = len;
1787 break;
1788
1789 case L2CAP_MODE_ERTM:
1790 case L2CAP_MODE_STREAMING:
1791 /* Entire SDU fits into one PDU */
1792 if (len <= chan->remote_mps) {
1793 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1794 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1795 0);
1796 if (IS_ERR(skb))
1797 return PTR_ERR(skb);
1798
1799 __skb_queue_tail(&chan->tx_q, skb);
1800
1801 if (chan->tx_send_head == NULL)
1802 chan->tx_send_head = skb;
1803
1804 } else {
1805 /* Segment SDU into multiples PDUs */
1806 err = l2cap_sar_segment_sdu(chan, msg, len);
1807 if (err < 0)
1808 return err;
1809 }
1810
1811 if (chan->mode == L2CAP_MODE_STREAMING) {
1812 l2cap_streaming_send(chan);
1813 err = len;
1814 break;
1815 }
1816
1817 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1818 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1819 err = len;
1820 break;
1821 }
1822
1823 err = l2cap_ertm_send(chan);
1824 if (err >= 0)
1825 err = len;
1826
1827 break;
1828
1829 default:
1830 BT_DBG("bad state %1.1x", chan->mode);
1831 err = -EBADFD;
1832 }
1833
1834 return err;
1835 }
1836
1837 /* Copy frame to all raw sockets on that connection */
1838 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1839 {
1840 struct sk_buff *nskb;
1841 struct l2cap_chan *chan;
1842
1843 BT_DBG("conn %p", conn);
1844
1845 mutex_lock(&conn->chan_lock);
1846
1847 list_for_each_entry(chan, &conn->chan_l, list) {
1848 struct sock *sk = chan->sk;
1849 if (chan->chan_type != L2CAP_CHAN_RAW)
1850 continue;
1851
1852 /* Don't send frame to the socket it came from */
1853 if (skb->sk == sk)
1854 continue;
1855 nskb = skb_clone(skb, GFP_ATOMIC);
1856 if (!nskb)
1857 continue;
1858
1859 if (chan->ops->recv(chan->data, nskb))
1860 kfree_skb(nskb);
1861 }
1862
1863 mutex_unlock(&conn->chan_lock);
1864 }
1865
1866 /* ---- L2CAP signalling commands ---- */
1867 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1868 u8 code, u8 ident, u16 dlen, void *data)
1869 {
1870 struct sk_buff *skb, **frag;
1871 struct l2cap_cmd_hdr *cmd;
1872 struct l2cap_hdr *lh;
1873 int len, count;
1874
1875 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1876 conn, code, ident, dlen);
1877
1878 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1879 count = min_t(unsigned int, conn->mtu, len);
1880
1881 skb = bt_skb_alloc(count, GFP_ATOMIC);
1882 if (!skb)
1883 return NULL;
1884
1885 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1886 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1887
1888 if (conn->hcon->type == LE_LINK)
1889 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1890 else
1891 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1892
1893 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1894 cmd->code = code;
1895 cmd->ident = ident;
1896 cmd->len = cpu_to_le16(dlen);
1897
1898 if (dlen) {
1899 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1900 memcpy(skb_put(skb, count), data, count);
1901 data += count;
1902 }
1903
1904 len -= skb->len;
1905
1906 /* Continuation fragments (no L2CAP header) */
1907 frag = &skb_shinfo(skb)->frag_list;
1908 while (len) {
1909 count = min_t(unsigned int, conn->mtu, len);
1910
1911 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1912 if (!*frag)
1913 goto fail;
1914
1915 memcpy(skb_put(*frag, count), data, count);
1916
1917 len -= count;
1918 data += count;
1919
1920 frag = &(*frag)->next;
1921 }
1922
1923 return skb;
1924
1925 fail:
1926 kfree_skb(skb);
1927 return NULL;
1928 }
1929
1930 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1931 {
1932 struct l2cap_conf_opt *opt = *ptr;
1933 int len;
1934
1935 len = L2CAP_CONF_OPT_SIZE + opt->len;
1936 *ptr += len;
1937
1938 *type = opt->type;
1939 *olen = opt->len;
1940
1941 switch (opt->len) {
1942 case 1:
1943 *val = *((u8 *) opt->val);
1944 break;
1945
1946 case 2:
1947 *val = get_unaligned_le16(opt->val);
1948 break;
1949
1950 case 4:
1951 *val = get_unaligned_le32(opt->val);
1952 break;
1953
1954 default:
1955 *val = (unsigned long) opt->val;
1956 break;
1957 }
1958
1959 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1960 return len;
1961 }
1962
1963 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1964 {
1965 struct l2cap_conf_opt *opt = *ptr;
1966
1967 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1968
1969 opt->type = type;
1970 opt->len = len;
1971
1972 switch (len) {
1973 case 1:
1974 *((u8 *) opt->val) = val;
1975 break;
1976
1977 case 2:
1978 put_unaligned_le16(val, opt->val);
1979 break;
1980
1981 case 4:
1982 put_unaligned_le32(val, opt->val);
1983 break;
1984
1985 default:
1986 memcpy(opt->val, (void *) val, len);
1987 break;
1988 }
1989
1990 *ptr += L2CAP_CONF_OPT_SIZE + len;
1991 }
1992
1993 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1994 {
1995 struct l2cap_conf_efs efs;
1996
1997 switch (chan->mode) {
1998 case L2CAP_MODE_ERTM:
1999 efs.id = chan->local_id;
2000 efs.stype = chan->local_stype;
2001 efs.msdu = cpu_to_le16(chan->local_msdu);
2002 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2003 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2004 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2005 break;
2006
2007 case L2CAP_MODE_STREAMING:
2008 efs.id = 1;
2009 efs.stype = L2CAP_SERV_BESTEFFORT;
2010 efs.msdu = cpu_to_le16(chan->local_msdu);
2011 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2012 efs.acc_lat = 0;
2013 efs.flush_to = 0;
2014 break;
2015
2016 default:
2017 return;
2018 }
2019
2020 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2021 (unsigned long) &efs);
2022 }
2023
2024 static void l2cap_ack_timeout(struct work_struct *work)
2025 {
2026 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2027 ack_timer.work);
2028
2029 BT_DBG("chan %p", chan);
2030
2031 l2cap_chan_lock(chan);
2032
2033 __l2cap_send_ack(chan);
2034
2035 l2cap_chan_unlock(chan);
2036
2037 l2cap_chan_put(chan);
2038 }
2039
2040 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2041 {
2042 chan->expected_ack_seq = 0;
2043 chan->unacked_frames = 0;
2044 chan->buffer_seq = 0;
2045 chan->num_acked = 0;
2046 chan->frames_sent = 0;
2047
2048 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2049 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2050 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2051
2052 skb_queue_head_init(&chan->srej_q);
2053
2054 INIT_LIST_HEAD(&chan->srej_l);
2055 }
2056
2057 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2058 {
2059 switch (mode) {
2060 case L2CAP_MODE_STREAMING:
2061 case L2CAP_MODE_ERTM:
2062 if (l2cap_mode_supported(mode, remote_feat_mask))
2063 return mode;
2064 /* fall through */
2065 default:
2066 return L2CAP_MODE_BASIC;
2067 }
2068 }
2069
2070 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2071 {
2072 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2073 }
2074
2075 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2076 {
2077 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2078 }
2079
2080 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2081 {
2082 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2083 __l2cap_ews_supported(chan)) {
2084 /* use extended control field */
2085 set_bit(FLAG_EXT_CTRL, &chan->flags);
2086 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2087 } else {
2088 chan->tx_win = min_t(u16, chan->tx_win,
2089 L2CAP_DEFAULT_TX_WINDOW);
2090 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2091 }
2092 }
2093
2094 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2095 {
2096 struct l2cap_conf_req *req = data;
2097 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2098 void *ptr = req->data;
2099 u16 size;
2100
2101 BT_DBG("chan %p", chan);
2102
2103 if (chan->num_conf_req || chan->num_conf_rsp)
2104 goto done;
2105
2106 switch (chan->mode) {
2107 case L2CAP_MODE_STREAMING:
2108 case L2CAP_MODE_ERTM:
2109 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2110 break;
2111
2112 if (__l2cap_efs_supported(chan))
2113 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2114
2115 /* fall through */
2116 default:
2117 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2118 break;
2119 }
2120
2121 done:
2122 if (chan->imtu != L2CAP_DEFAULT_MTU)
2123 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2124
2125 switch (chan->mode) {
2126 case L2CAP_MODE_BASIC:
2127 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2128 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2129 break;
2130
2131 rfc.mode = L2CAP_MODE_BASIC;
2132 rfc.txwin_size = 0;
2133 rfc.max_transmit = 0;
2134 rfc.retrans_timeout = 0;
2135 rfc.monitor_timeout = 0;
2136 rfc.max_pdu_size = 0;
2137
2138 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2139 (unsigned long) &rfc);
2140 break;
2141
2142 case L2CAP_MODE_ERTM:
2143 rfc.mode = L2CAP_MODE_ERTM;
2144 rfc.max_transmit = chan->max_tx;
2145 rfc.retrans_timeout = 0;
2146 rfc.monitor_timeout = 0;
2147
2148 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2149 L2CAP_EXT_HDR_SIZE -
2150 L2CAP_SDULEN_SIZE -
2151 L2CAP_FCS_SIZE);
2152 rfc.max_pdu_size = cpu_to_le16(size);
2153
2154 l2cap_txwin_setup(chan);
2155
2156 rfc.txwin_size = min_t(u16, chan->tx_win,
2157 L2CAP_DEFAULT_TX_WINDOW);
2158
2159 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2160 (unsigned long) &rfc);
2161
2162 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2163 l2cap_add_opt_efs(&ptr, chan);
2164
2165 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2166 break;
2167
2168 if (chan->fcs == L2CAP_FCS_NONE ||
2169 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2170 chan->fcs = L2CAP_FCS_NONE;
2171 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2172 }
2173
2174 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2176 chan->tx_win);
2177 break;
2178
2179 case L2CAP_MODE_STREAMING:
2180 rfc.mode = L2CAP_MODE_STREAMING;
2181 rfc.txwin_size = 0;
2182 rfc.max_transmit = 0;
2183 rfc.retrans_timeout = 0;
2184 rfc.monitor_timeout = 0;
2185
2186 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2187 L2CAP_EXT_HDR_SIZE -
2188 L2CAP_SDULEN_SIZE -
2189 L2CAP_FCS_SIZE);
2190 rfc.max_pdu_size = cpu_to_le16(size);
2191
2192 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2193 (unsigned long) &rfc);
2194
2195 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2196 l2cap_add_opt_efs(&ptr, chan);
2197
2198 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2199 break;
2200
2201 if (chan->fcs == L2CAP_FCS_NONE ||
2202 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2203 chan->fcs = L2CAP_FCS_NONE;
2204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2205 }
2206 break;
2207 }
2208
2209 req->dcid = cpu_to_le16(chan->dcid);
2210 req->flags = cpu_to_le16(0);
2211
2212 return ptr - data;
2213 }
2214
2215 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2216 {
2217 struct l2cap_conf_rsp *rsp = data;
2218 void *ptr = rsp->data;
2219 void *req = chan->conf_req;
2220 int len = chan->conf_len;
2221 int type, hint, olen;
2222 unsigned long val;
2223 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2224 struct l2cap_conf_efs efs;
2225 u8 remote_efs = 0;
2226 u16 mtu = L2CAP_DEFAULT_MTU;
2227 u16 result = L2CAP_CONF_SUCCESS;
2228 u16 size;
2229
2230 BT_DBG("chan %p", chan);
2231
2232 while (len >= L2CAP_CONF_OPT_SIZE) {
2233 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2234
2235 hint = type & L2CAP_CONF_HINT;
2236 type &= L2CAP_CONF_MASK;
2237
2238 switch (type) {
2239 case L2CAP_CONF_MTU:
2240 mtu = val;
2241 break;
2242
2243 case L2CAP_CONF_FLUSH_TO:
2244 chan->flush_to = val;
2245 break;
2246
2247 case L2CAP_CONF_QOS:
2248 break;
2249
2250 case L2CAP_CONF_RFC:
2251 if (olen == sizeof(rfc))
2252 memcpy(&rfc, (void *) val, olen);
2253 break;
2254
2255 case L2CAP_CONF_FCS:
2256 if (val == L2CAP_FCS_NONE)
2257 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2258 break;
2259
2260 case L2CAP_CONF_EFS:
2261 remote_efs = 1;
2262 if (olen == sizeof(efs))
2263 memcpy(&efs, (void *) val, olen);
2264 break;
2265
2266 case L2CAP_CONF_EWS:
2267 if (!enable_hs)
2268 return -ECONNREFUSED;
2269
2270 set_bit(FLAG_EXT_CTRL, &chan->flags);
2271 set_bit(CONF_EWS_RECV, &chan->conf_state);
2272 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2273 chan->remote_tx_win = val;
2274 break;
2275
2276 default:
2277 if (hint)
2278 break;
2279
2280 result = L2CAP_CONF_UNKNOWN;
2281 *((u8 *) ptr++) = type;
2282 break;
2283 }
2284 }
2285
2286 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2287 goto done;
2288
2289 switch (chan->mode) {
2290 case L2CAP_MODE_STREAMING:
2291 case L2CAP_MODE_ERTM:
2292 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2293 chan->mode = l2cap_select_mode(rfc.mode,
2294 chan->conn->feat_mask);
2295 break;
2296 }
2297
2298 if (remote_efs) {
2299 if (__l2cap_efs_supported(chan))
2300 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2301 else
2302 return -ECONNREFUSED;
2303 }
2304
2305 if (chan->mode != rfc.mode)
2306 return -ECONNREFUSED;
2307
2308 break;
2309 }
2310
2311 done:
2312 if (chan->mode != rfc.mode) {
2313 result = L2CAP_CONF_UNACCEPT;
2314 rfc.mode = chan->mode;
2315
2316 if (chan->num_conf_rsp == 1)
2317 return -ECONNREFUSED;
2318
2319 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2320 sizeof(rfc), (unsigned long) &rfc);
2321 }
2322
2323 if (result == L2CAP_CONF_SUCCESS) {
2324 /* Configure output options and let the other side know
2325 * which ones we don't like. */
2326
2327 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2328 result = L2CAP_CONF_UNACCEPT;
2329 else {
2330 chan->omtu = mtu;
2331 set_bit(CONF_MTU_DONE, &chan->conf_state);
2332 }
2333 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2334
2335 if (remote_efs) {
2336 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2337 efs.stype != L2CAP_SERV_NOTRAFIC &&
2338 efs.stype != chan->local_stype) {
2339
2340 result = L2CAP_CONF_UNACCEPT;
2341
2342 if (chan->num_conf_req >= 1)
2343 return -ECONNREFUSED;
2344
2345 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2346 sizeof(efs),
2347 (unsigned long) &efs);
2348 } else {
2349 /* Send PENDING Conf Rsp */
2350 result = L2CAP_CONF_PENDING;
2351 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2352 }
2353 }
2354
2355 switch (rfc.mode) {
2356 case L2CAP_MODE_BASIC:
2357 chan->fcs = L2CAP_FCS_NONE;
2358 set_bit(CONF_MODE_DONE, &chan->conf_state);
2359 break;
2360
2361 case L2CAP_MODE_ERTM:
2362 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2363 chan->remote_tx_win = rfc.txwin_size;
2364 else
2365 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2366
2367 chan->remote_max_tx = rfc.max_transmit;
2368
2369 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2370 chan->conn->mtu -
2371 L2CAP_EXT_HDR_SIZE -
2372 L2CAP_SDULEN_SIZE -
2373 L2CAP_FCS_SIZE);
2374 rfc.max_pdu_size = cpu_to_le16(size);
2375 chan->remote_mps = size;
2376
2377 rfc.retrans_timeout =
2378 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2379 rfc.monitor_timeout =
2380 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2381
2382 set_bit(CONF_MODE_DONE, &chan->conf_state);
2383
2384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2385 sizeof(rfc), (unsigned long) &rfc);
2386
2387 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2388 chan->remote_id = efs.id;
2389 chan->remote_stype = efs.stype;
2390 chan->remote_msdu = le16_to_cpu(efs.msdu);
2391 chan->remote_flush_to =
2392 le32_to_cpu(efs.flush_to);
2393 chan->remote_acc_lat =
2394 le32_to_cpu(efs.acc_lat);
2395 chan->remote_sdu_itime =
2396 le32_to_cpu(efs.sdu_itime);
2397 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2398 sizeof(efs), (unsigned long) &efs);
2399 }
2400 break;
2401
2402 case L2CAP_MODE_STREAMING:
2403 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2404 chan->conn->mtu -
2405 L2CAP_EXT_HDR_SIZE -
2406 L2CAP_SDULEN_SIZE -
2407 L2CAP_FCS_SIZE);
2408 rfc.max_pdu_size = cpu_to_le16(size);
2409 chan->remote_mps = size;
2410
2411 set_bit(CONF_MODE_DONE, &chan->conf_state);
2412
2413 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2414 sizeof(rfc), (unsigned long) &rfc);
2415
2416 break;
2417
2418 default:
2419 result = L2CAP_CONF_UNACCEPT;
2420
2421 memset(&rfc, 0, sizeof(rfc));
2422 rfc.mode = chan->mode;
2423 }
2424
2425 if (result == L2CAP_CONF_SUCCESS)
2426 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2427 }
2428 rsp->scid = cpu_to_le16(chan->dcid);
2429 rsp->result = cpu_to_le16(result);
2430 rsp->flags = cpu_to_le16(0x0000);
2431
2432 return ptr - data;
2433 }
2434
2435 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2436 {
2437 struct l2cap_conf_req *req = data;
2438 void *ptr = req->data;
2439 int type, olen;
2440 unsigned long val;
2441 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2442 struct l2cap_conf_efs efs;
2443
2444 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2445
2446 while (len >= L2CAP_CONF_OPT_SIZE) {
2447 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2448
2449 switch (type) {
2450 case L2CAP_CONF_MTU:
2451 if (val < L2CAP_DEFAULT_MIN_MTU) {
2452 *result = L2CAP_CONF_UNACCEPT;
2453 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2454 } else
2455 chan->imtu = val;
2456 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2457 break;
2458
2459 case L2CAP_CONF_FLUSH_TO:
2460 chan->flush_to = val;
2461 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2462 2, chan->flush_to);
2463 break;
2464
2465 case L2CAP_CONF_RFC:
2466 if (olen == sizeof(rfc))
2467 memcpy(&rfc, (void *)val, olen);
2468
2469 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2470 rfc.mode != chan->mode)
2471 return -ECONNREFUSED;
2472
2473 chan->fcs = 0;
2474
2475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2476 sizeof(rfc), (unsigned long) &rfc);
2477 break;
2478
2479 case L2CAP_CONF_EWS:
2480 chan->tx_win = min_t(u16, val,
2481 L2CAP_DEFAULT_EXT_WINDOW);
2482 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2483 chan->tx_win);
2484 break;
2485
2486 case L2CAP_CONF_EFS:
2487 if (olen == sizeof(efs))
2488 memcpy(&efs, (void *)val, olen);
2489
2490 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2491 efs.stype != L2CAP_SERV_NOTRAFIC &&
2492 efs.stype != chan->local_stype)
2493 return -ECONNREFUSED;
2494
2495 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2496 sizeof(efs), (unsigned long) &efs);
2497 break;
2498 }
2499 }
2500
2501 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2502 return -ECONNREFUSED;
2503
2504 chan->mode = rfc.mode;
2505
2506 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2507 switch (rfc.mode) {
2508 case L2CAP_MODE_ERTM:
2509 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2510 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2511 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2512
2513 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2514 chan->local_msdu = le16_to_cpu(efs.msdu);
2515 chan->local_sdu_itime =
2516 le32_to_cpu(efs.sdu_itime);
2517 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2518 chan->local_flush_to =
2519 le32_to_cpu(efs.flush_to);
2520 }
2521 break;
2522
2523 case L2CAP_MODE_STREAMING:
2524 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2525 }
2526 }
2527
2528 req->dcid = cpu_to_le16(chan->dcid);
2529 req->flags = cpu_to_le16(0x0000);
2530
2531 return ptr - data;
2532 }
2533
2534 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2535 {
2536 struct l2cap_conf_rsp *rsp = data;
2537 void *ptr = rsp->data;
2538
2539 BT_DBG("chan %p", chan);
2540
2541 rsp->scid = cpu_to_le16(chan->dcid);
2542 rsp->result = cpu_to_le16(result);
2543 rsp->flags = cpu_to_le16(flags);
2544
2545 return ptr - data;
2546 }
2547
2548 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2549 {
2550 struct l2cap_conn_rsp rsp;
2551 struct l2cap_conn *conn = chan->conn;
2552 u8 buf[128];
2553
2554 rsp.scid = cpu_to_le16(chan->dcid);
2555 rsp.dcid = cpu_to_le16(chan->scid);
2556 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2557 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2558 l2cap_send_cmd(conn, chan->ident,
2559 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2560
2561 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2562 return;
2563
2564 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2565 l2cap_build_conf_req(chan, buf), buf);
2566 chan->num_conf_req++;
2567 }
2568
2569 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2570 {
2571 int type, olen;
2572 unsigned long val;
2573 struct l2cap_conf_rfc rfc;
2574
2575 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2576
2577 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2578 return;
2579
2580 while (len >= L2CAP_CONF_OPT_SIZE) {
2581 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2582
2583 switch (type) {
2584 case L2CAP_CONF_RFC:
2585 if (olen == sizeof(rfc))
2586 memcpy(&rfc, (void *)val, olen);
2587 goto done;
2588 }
2589 }
2590
2591 /* Use sane default values in case a misbehaving remote device
2592 * did not send an RFC option.
2593 */
2594 rfc.mode = chan->mode;
2595 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2596 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2597 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2598
2599 BT_ERR("Expected RFC option was not found, using defaults");
2600
2601 done:
2602 switch (rfc.mode) {
2603 case L2CAP_MODE_ERTM:
2604 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2605 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2606 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2607 break;
2608 case L2CAP_MODE_STREAMING:
2609 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2610 }
2611 }
2612
2613 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2614 {
2615 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2616
2617 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2618 return 0;
2619
2620 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2621 cmd->ident == conn->info_ident) {
2622 cancel_delayed_work(&conn->info_timer);
2623
2624 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2625 conn->info_ident = 0;
2626
2627 l2cap_conn_start(conn);
2628 }
2629
2630 return 0;
2631 }
2632
2633 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2634 {
2635 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2636 struct l2cap_conn_rsp rsp;
2637 struct l2cap_chan *chan = NULL, *pchan;
2638 struct sock *parent, *sk = NULL;
2639 int result, status = L2CAP_CS_NO_INFO;
2640
2641 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2642 __le16 psm = req->psm;
2643
2644 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2645
2646 /* Check if we have socket listening on psm */
2647 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2648 if (!pchan) {
2649 result = L2CAP_CR_BAD_PSM;
2650 goto sendresp;
2651 }
2652
2653 parent = pchan->sk;
2654
2655 mutex_lock(&conn->chan_lock);
2656 lock_sock(parent);
2657
2658 /* Check if the ACL is secure enough (if not SDP) */
2659 if (psm != cpu_to_le16(0x0001) &&
2660 !hci_conn_check_link_mode(conn->hcon)) {
2661 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2662 result = L2CAP_CR_SEC_BLOCK;
2663 goto response;
2664 }
2665
2666 result = L2CAP_CR_NO_MEM;
2667
2668 /* Check for backlog size */
2669 if (sk_acceptq_is_full(parent)) {
2670 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2671 goto response;
2672 }
2673
2674 chan = pchan->ops->new_connection(pchan->data);
2675 if (!chan)
2676 goto response;
2677
2678 sk = chan->sk;
2679
2680 /* Check if we already have channel with that dcid */
2681 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2682 sock_set_flag(sk, SOCK_ZAPPED);
2683 chan->ops->close(chan->data);
2684 goto response;
2685 }
2686
2687 hci_conn_hold(conn->hcon);
2688
2689 bacpy(&bt_sk(sk)->src, conn->src);
2690 bacpy(&bt_sk(sk)->dst, conn->dst);
2691 chan->psm = psm;
2692 chan->dcid = scid;
2693
2694 bt_accept_enqueue(parent, sk);
2695
2696 __l2cap_chan_add(conn, chan);
2697
2698 dcid = chan->scid;
2699
2700 __set_chan_timer(chan, sk->sk_sndtimeo);
2701
2702 chan->ident = cmd->ident;
2703
2704 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2705 if (l2cap_chan_check_security(chan)) {
2706 if (bt_sk(sk)->defer_setup) {
2707 __l2cap_state_change(chan, BT_CONNECT2);
2708 result = L2CAP_CR_PEND;
2709 status = L2CAP_CS_AUTHOR_PEND;
2710 parent->sk_data_ready(parent, 0);
2711 } else {
2712 __l2cap_state_change(chan, BT_CONFIG);
2713 result = L2CAP_CR_SUCCESS;
2714 status = L2CAP_CS_NO_INFO;
2715 }
2716 } else {
2717 __l2cap_state_change(chan, BT_CONNECT2);
2718 result = L2CAP_CR_PEND;
2719 status = L2CAP_CS_AUTHEN_PEND;
2720 }
2721 } else {
2722 __l2cap_state_change(chan, BT_CONNECT2);
2723 result = L2CAP_CR_PEND;
2724 status = L2CAP_CS_NO_INFO;
2725 }
2726
2727 response:
2728 release_sock(parent);
2729 mutex_unlock(&conn->chan_lock);
2730
2731 sendresp:
2732 rsp.scid = cpu_to_le16(scid);
2733 rsp.dcid = cpu_to_le16(dcid);
2734 rsp.result = cpu_to_le16(result);
2735 rsp.status = cpu_to_le16(status);
2736 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2737
2738 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2739 struct l2cap_info_req info;
2740 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2741
2742 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2743 conn->info_ident = l2cap_get_ident(conn);
2744
2745 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
2746
2747 l2cap_send_cmd(conn, conn->info_ident,
2748 L2CAP_INFO_REQ, sizeof(info), &info);
2749 }
2750
2751 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2752 result == L2CAP_CR_SUCCESS) {
2753 u8 buf[128];
2754 set_bit(CONF_REQ_SENT, &chan->conf_state);
2755 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2756 l2cap_build_conf_req(chan, buf), buf);
2757 chan->num_conf_req++;
2758 }
2759
2760 return 0;
2761 }
2762
2763 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2764 {
2765 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2766 u16 scid, dcid, result, status;
2767 struct l2cap_chan *chan;
2768 u8 req[128];
2769 int err;
2770
2771 scid = __le16_to_cpu(rsp->scid);
2772 dcid = __le16_to_cpu(rsp->dcid);
2773 result = __le16_to_cpu(rsp->result);
2774 status = __le16_to_cpu(rsp->status);
2775
2776 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2777 dcid, scid, result, status);
2778
2779 mutex_lock(&conn->chan_lock);
2780
2781 if (scid) {
2782 chan = __l2cap_get_chan_by_scid(conn, scid);
2783 if (!chan) {
2784 err = -EFAULT;
2785 goto unlock;
2786 }
2787 } else {
2788 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2789 if (!chan) {
2790 err = -EFAULT;
2791 goto unlock;
2792 }
2793 }
2794
2795 err = 0;
2796
2797 l2cap_chan_lock(chan);
2798
2799 switch (result) {
2800 case L2CAP_CR_SUCCESS:
2801 l2cap_state_change(chan, BT_CONFIG);
2802 chan->ident = 0;
2803 chan->dcid = dcid;
2804 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2805
2806 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2807 break;
2808
2809 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2810 l2cap_build_conf_req(chan, req), req);
2811 chan->num_conf_req++;
2812 break;
2813
2814 case L2CAP_CR_PEND:
2815 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2816 break;
2817
2818 default:
2819 l2cap_chan_del(chan, ECONNREFUSED);
2820 break;
2821 }
2822
2823 l2cap_chan_unlock(chan);
2824
2825 unlock:
2826 mutex_unlock(&conn->chan_lock);
2827
2828 return err;
2829 }
2830
2831 static inline void set_default_fcs(struct l2cap_chan *chan)
2832 {
2833 /* FCS is enabled only in ERTM or streaming mode, if one or both
2834 * sides request it.
2835 */
2836 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2837 chan->fcs = L2CAP_FCS_NONE;
2838 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2839 chan->fcs = L2CAP_FCS_CRC16;
2840 }
2841
2842 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2843 {
2844 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2845 u16 dcid, flags;
2846 u8 rsp[64];
2847 struct l2cap_chan *chan;
2848 int len;
2849
2850 dcid = __le16_to_cpu(req->dcid);
2851 flags = __le16_to_cpu(req->flags);
2852
2853 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2854
2855 chan = l2cap_get_chan_by_scid(conn, dcid);
2856 if (!chan)
2857 return -ENOENT;
2858
2859 l2cap_chan_lock(chan);
2860
2861 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2862 struct l2cap_cmd_rej_cid rej;
2863
2864 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2865 rej.scid = cpu_to_le16(chan->scid);
2866 rej.dcid = cpu_to_le16(chan->dcid);
2867
2868 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2869 sizeof(rej), &rej);
2870 goto unlock;
2871 }
2872
2873 /* Reject if config buffer is too small. */
2874 len = cmd_len - sizeof(*req);
2875 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2876 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2877 l2cap_build_conf_rsp(chan, rsp,
2878 L2CAP_CONF_REJECT, flags), rsp);
2879 goto unlock;
2880 }
2881
2882 /* Store config. */
2883 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2884 chan->conf_len += len;
2885
2886 if (flags & 0x0001) {
2887 /* Incomplete config. Send empty response. */
2888 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2889 l2cap_build_conf_rsp(chan, rsp,
2890 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2891 goto unlock;
2892 }
2893
2894 /* Complete config. */
2895 len = l2cap_parse_conf_req(chan, rsp);
2896 if (len < 0) {
2897 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2898 goto unlock;
2899 }
2900
2901 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2902 chan->num_conf_rsp++;
2903
2904 /* Reset config buffer. */
2905 chan->conf_len = 0;
2906
2907 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2908 goto unlock;
2909
2910 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2911 set_default_fcs(chan);
2912
2913 l2cap_state_change(chan, BT_CONNECTED);
2914
2915 chan->next_tx_seq = 0;
2916 chan->expected_tx_seq = 0;
2917 skb_queue_head_init(&chan->tx_q);
2918 if (chan->mode == L2CAP_MODE_ERTM)
2919 l2cap_ertm_init(chan);
2920
2921 l2cap_chan_ready(chan);
2922 goto unlock;
2923 }
2924
2925 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2926 u8 buf[64];
2927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2928 l2cap_build_conf_req(chan, buf), buf);
2929 chan->num_conf_req++;
2930 }
2931
2932 /* Got Conf Rsp PENDING from remote side and asume we sent
2933 Conf Rsp PENDING in the code above */
2934 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2935 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2936
2937 /* check compatibility */
2938
2939 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2940 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2941
2942 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2943 l2cap_build_conf_rsp(chan, rsp,
2944 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2945 }
2946
2947 unlock:
2948 l2cap_chan_unlock(chan);
2949 return 0;
2950 }
2951
2952 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2953 {
2954 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2955 u16 scid, flags, result;
2956 struct l2cap_chan *chan;
2957 int len = cmd->len - sizeof(*rsp);
2958
2959 scid = __le16_to_cpu(rsp->scid);
2960 flags = __le16_to_cpu(rsp->flags);
2961 result = __le16_to_cpu(rsp->result);
2962
2963 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2964 scid, flags, result);
2965
2966 chan = l2cap_get_chan_by_scid(conn, scid);
2967 if (!chan)
2968 return 0;
2969
2970 l2cap_chan_lock(chan);
2971
2972 switch (result) {
2973 case L2CAP_CONF_SUCCESS:
2974 l2cap_conf_rfc_get(chan, rsp->data, len);
2975 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2976 break;
2977
2978 case L2CAP_CONF_PENDING:
2979 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2980
2981 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2982 char buf[64];
2983
2984 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2985 buf, &result);
2986 if (len < 0) {
2987 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2988 goto done;
2989 }
2990
2991 /* check compatibility */
2992
2993 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2994 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2995
2996 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2997 l2cap_build_conf_rsp(chan, buf,
2998 L2CAP_CONF_SUCCESS, 0x0000), buf);
2999 }
3000 goto done;
3001
3002 case L2CAP_CONF_UNACCEPT:
3003 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3004 char req[64];
3005
3006 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3007 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3008 goto done;
3009 }
3010
3011 /* throw out any old stored conf requests */
3012 result = L2CAP_CONF_SUCCESS;
3013 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3014 req, &result);
3015 if (len < 0) {
3016 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3017 goto done;
3018 }
3019
3020 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3021 L2CAP_CONF_REQ, len, req);
3022 chan->num_conf_req++;
3023 if (result != L2CAP_CONF_SUCCESS)
3024 goto done;
3025 break;
3026 }
3027
3028 default:
3029 l2cap_chan_set_err(chan, ECONNRESET);
3030
3031 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
3032 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3033 goto done;
3034 }
3035
3036 if (flags & 0x01)
3037 goto done;
3038
3039 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3040
3041 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3042 set_default_fcs(chan);
3043
3044 l2cap_state_change(chan, BT_CONNECTED);
3045 chan->next_tx_seq = 0;
3046 chan->expected_tx_seq = 0;
3047 skb_queue_head_init(&chan->tx_q);
3048 if (chan->mode == L2CAP_MODE_ERTM)
3049 l2cap_ertm_init(chan);
3050
3051 l2cap_chan_ready(chan);
3052 }
3053
3054 done:
3055 l2cap_chan_unlock(chan);
3056 return 0;
3057 }
3058
3059 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3060 {
3061 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3062 struct l2cap_disconn_rsp rsp;
3063 u16 dcid, scid;
3064 struct l2cap_chan *chan;
3065 struct sock *sk;
3066
3067 scid = __le16_to_cpu(req->scid);
3068 dcid = __le16_to_cpu(req->dcid);
3069
3070 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3071
3072 mutex_lock(&conn->chan_lock);
3073
3074 chan = __l2cap_get_chan_by_scid(conn, dcid);
3075 if (!chan) {
3076 mutex_unlock(&conn->chan_lock);
3077 return 0;
3078 }
3079
3080 l2cap_chan_lock(chan);
3081
3082 sk = chan->sk;
3083
3084 rsp.dcid = cpu_to_le16(chan->scid);
3085 rsp.scid = cpu_to_le16(chan->dcid);
3086 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3087
3088 lock_sock(sk);
3089 sk->sk_shutdown = SHUTDOWN_MASK;
3090 release_sock(sk);
3091
3092 l2cap_chan_del(chan, ECONNRESET);
3093
3094 l2cap_chan_unlock(chan);
3095
3096 chan->ops->close(chan->data);
3097
3098 mutex_unlock(&conn->chan_lock);
3099
3100 return 0;
3101 }
3102
3103 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3104 {
3105 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3106 u16 dcid, scid;
3107 struct l2cap_chan *chan;
3108
3109 scid = __le16_to_cpu(rsp->scid);
3110 dcid = __le16_to_cpu(rsp->dcid);
3111
3112 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3113
3114 mutex_lock(&conn->chan_lock);
3115
3116 chan = __l2cap_get_chan_by_scid(conn, scid);
3117 if (!chan) {
3118 mutex_unlock(&conn->chan_lock);
3119 return 0;
3120 }
3121
3122 l2cap_chan_lock(chan);
3123
3124 l2cap_chan_del(chan, 0);
3125
3126 l2cap_chan_unlock(chan);
3127
3128 chan->ops->close(chan->data);
3129
3130 mutex_unlock(&conn->chan_lock);
3131
3132 return 0;
3133 }
3134
3135 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3136 {
3137 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3138 u16 type;
3139
3140 type = __le16_to_cpu(req->type);
3141
3142 BT_DBG("type 0x%4.4x", type);
3143
3144 if (type == L2CAP_IT_FEAT_MASK) {
3145 u8 buf[8];
3146 u32 feat_mask = l2cap_feat_mask;
3147 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3148 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3149 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3150 if (!disable_ertm)
3151 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3152 | L2CAP_FEAT_FCS;
3153 if (enable_hs)
3154 feat_mask |= L2CAP_FEAT_EXT_FLOW
3155 | L2CAP_FEAT_EXT_WINDOW;
3156
3157 put_unaligned_le32(feat_mask, rsp->data);
3158 l2cap_send_cmd(conn, cmd->ident,
3159 L2CAP_INFO_RSP, sizeof(buf), buf);
3160 } else if (type == L2CAP_IT_FIXED_CHAN) {
3161 u8 buf[12];
3162 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3163
3164 if (enable_hs)
3165 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3166 else
3167 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3168
3169 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3170 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3171 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3172 l2cap_send_cmd(conn, cmd->ident,
3173 L2CAP_INFO_RSP, sizeof(buf), buf);
3174 } else {
3175 struct l2cap_info_rsp rsp;
3176 rsp.type = cpu_to_le16(type);
3177 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3178 l2cap_send_cmd(conn, cmd->ident,
3179 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3180 }
3181
3182 return 0;
3183 }
3184
3185 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3186 {
3187 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3188 u16 type, result;
3189
3190 type = __le16_to_cpu(rsp->type);
3191 result = __le16_to_cpu(rsp->result);
3192
3193 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3194
3195 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3196 if (cmd->ident != conn->info_ident ||
3197 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3198 return 0;
3199
3200 cancel_delayed_work(&conn->info_timer);
3201
3202 if (result != L2CAP_IR_SUCCESS) {
3203 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3204 conn->info_ident = 0;
3205
3206 l2cap_conn_start(conn);
3207
3208 return 0;
3209 }
3210
3211 switch (type) {
3212 case L2CAP_IT_FEAT_MASK:
3213 conn->feat_mask = get_unaligned_le32(rsp->data);
3214
3215 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3216 struct l2cap_info_req req;
3217 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3218
3219 conn->info_ident = l2cap_get_ident(conn);
3220
3221 l2cap_send_cmd(conn, conn->info_ident,
3222 L2CAP_INFO_REQ, sizeof(req), &req);
3223 } else {
3224 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3225 conn->info_ident = 0;
3226
3227 l2cap_conn_start(conn);
3228 }
3229 break;
3230
3231 case L2CAP_IT_FIXED_CHAN:
3232 conn->fixed_chan_mask = rsp->data[0];
3233 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3234 conn->info_ident = 0;
3235
3236 l2cap_conn_start(conn);
3237 break;
3238 }
3239
3240 return 0;
3241 }
3242
3243 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3244 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3245 void *data)
3246 {
3247 struct l2cap_create_chan_req *req = data;
3248 struct l2cap_create_chan_rsp rsp;
3249 u16 psm, scid;
3250
3251 if (cmd_len != sizeof(*req))
3252 return -EPROTO;
3253
3254 if (!enable_hs)
3255 return -EINVAL;
3256
3257 psm = le16_to_cpu(req->psm);
3258 scid = le16_to_cpu(req->scid);
3259
3260 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3261
3262 /* Placeholder: Always reject */
3263 rsp.dcid = 0;
3264 rsp.scid = cpu_to_le16(scid);
3265 rsp.result = L2CAP_CR_NO_MEM;
3266 rsp.status = L2CAP_CS_NO_INFO;
3267
3268 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3269 sizeof(rsp), &rsp);
3270
3271 return 0;
3272 }
3273
3274 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3275 struct l2cap_cmd_hdr *cmd, void *data)
3276 {
3277 BT_DBG("conn %p", conn);
3278
3279 return l2cap_connect_rsp(conn, cmd, data);
3280 }
3281
3282 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3283 u16 icid, u16 result)
3284 {
3285 struct l2cap_move_chan_rsp rsp;
3286
3287 BT_DBG("icid %d, result %d", icid, result);
3288
3289 rsp.icid = cpu_to_le16(icid);
3290 rsp.result = cpu_to_le16(result);
3291
3292 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3293 }
3294
3295 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3296 struct l2cap_chan *chan, u16 icid, u16 result)
3297 {
3298 struct l2cap_move_chan_cfm cfm;
3299 u8 ident;
3300
3301 BT_DBG("icid %d, result %d", icid, result);
3302
3303 ident = l2cap_get_ident(conn);
3304 if (chan)
3305 chan->ident = ident;
3306
3307 cfm.icid = cpu_to_le16(icid);
3308 cfm.result = cpu_to_le16(result);
3309
3310 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3311 }
3312
3313 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3314 u16 icid)
3315 {
3316 struct l2cap_move_chan_cfm_rsp rsp;
3317
3318 BT_DBG("icid %d", icid);
3319
3320 rsp.icid = cpu_to_le16(icid);
3321 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3322 }
3323
3324 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3325 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3326 {
3327 struct l2cap_move_chan_req *req = data;
3328 u16 icid = 0;
3329 u16 result = L2CAP_MR_NOT_ALLOWED;
3330
3331 if (cmd_len != sizeof(*req))
3332 return -EPROTO;
3333
3334 icid = le16_to_cpu(req->icid);
3335
3336 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3337
3338 if (!enable_hs)
3339 return -EINVAL;
3340
3341 /* Placeholder: Always refuse */
3342 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3343
3344 return 0;
3345 }
3346
3347 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3348 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3349 {
3350 struct l2cap_move_chan_rsp *rsp = data;
3351 u16 icid, result;
3352
3353 if (cmd_len != sizeof(*rsp))
3354 return -EPROTO;
3355
3356 icid = le16_to_cpu(rsp->icid);
3357 result = le16_to_cpu(rsp->result);
3358
3359 BT_DBG("icid %d, result %d", icid, result);
3360
3361 /* Placeholder: Always unconfirmed */
3362 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3363
3364 return 0;
3365 }
3366
3367 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3368 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3369 {
3370 struct l2cap_move_chan_cfm *cfm = data;
3371 u16 icid, result;
3372
3373 if (cmd_len != sizeof(*cfm))
3374 return -EPROTO;
3375
3376 icid = le16_to_cpu(cfm->icid);
3377 result = le16_to_cpu(cfm->result);
3378
3379 BT_DBG("icid %d, result %d", icid, result);
3380
3381 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3382
3383 return 0;
3384 }
3385
3386 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3387 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3388 {
3389 struct l2cap_move_chan_cfm_rsp *rsp = data;
3390 u16 icid;
3391
3392 if (cmd_len != sizeof(*rsp))
3393 return -EPROTO;
3394
3395 icid = le16_to_cpu(rsp->icid);
3396
3397 BT_DBG("icid %d", icid);
3398
3399 return 0;
3400 }
3401
3402 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3403 u16 to_multiplier)
3404 {
3405 u16 max_latency;
3406
3407 if (min > max || min < 6 || max > 3200)
3408 return -EINVAL;
3409
3410 if (to_multiplier < 10 || to_multiplier > 3200)
3411 return -EINVAL;
3412
3413 if (max >= to_multiplier * 8)
3414 return -EINVAL;
3415
3416 max_latency = (to_multiplier * 8 / max) - 1;
3417 if (latency > 499 || latency > max_latency)
3418 return -EINVAL;
3419
3420 return 0;
3421 }
3422
3423 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3424 struct l2cap_cmd_hdr *cmd, u8 *data)
3425 {
3426 struct hci_conn *hcon = conn->hcon;
3427 struct l2cap_conn_param_update_req *req;
3428 struct l2cap_conn_param_update_rsp rsp;
3429 u16 min, max, latency, to_multiplier, cmd_len;
3430 int err;
3431
3432 if (!(hcon->link_mode & HCI_LM_MASTER))
3433 return -EINVAL;
3434
3435 cmd_len = __le16_to_cpu(cmd->len);
3436 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3437 return -EPROTO;
3438
3439 req = (struct l2cap_conn_param_update_req *) data;
3440 min = __le16_to_cpu(req->min);
3441 max = __le16_to_cpu(req->max);
3442 latency = __le16_to_cpu(req->latency);
3443 to_multiplier = __le16_to_cpu(req->to_multiplier);
3444
3445 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3446 min, max, latency, to_multiplier);
3447
3448 memset(&rsp, 0, sizeof(rsp));
3449
3450 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3451 if (err)
3452 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3453 else
3454 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3455
3456 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3457 sizeof(rsp), &rsp);
3458
3459 if (!err)
3460 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3461
3462 return 0;
3463 }
3464
3465 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3466 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3467 {
3468 int err = 0;
3469
3470 switch (cmd->code) {
3471 case L2CAP_COMMAND_REJ:
3472 l2cap_command_rej(conn, cmd, data);
3473 break;
3474
3475 case L2CAP_CONN_REQ:
3476 err = l2cap_connect_req(conn, cmd, data);
3477 break;
3478
3479 case L2CAP_CONN_RSP:
3480 err = l2cap_connect_rsp(conn, cmd, data);
3481 break;
3482
3483 case L2CAP_CONF_REQ:
3484 err = l2cap_config_req(conn, cmd, cmd_len, data);
3485 break;
3486
3487 case L2CAP_CONF_RSP:
3488 err = l2cap_config_rsp(conn, cmd, data);
3489 break;
3490
3491 case L2CAP_DISCONN_REQ:
3492 err = l2cap_disconnect_req(conn, cmd, data);
3493 break;
3494
3495 case L2CAP_DISCONN_RSP:
3496 err = l2cap_disconnect_rsp(conn, cmd, data);
3497 break;
3498
3499 case L2CAP_ECHO_REQ:
3500 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3501 break;
3502
3503 case L2CAP_ECHO_RSP:
3504 break;
3505
3506 case L2CAP_INFO_REQ:
3507 err = l2cap_information_req(conn, cmd, data);
3508 break;
3509
3510 case L2CAP_INFO_RSP:
3511 err = l2cap_information_rsp(conn, cmd, data);
3512 break;
3513
3514 case L2CAP_CREATE_CHAN_REQ:
3515 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3516 break;
3517
3518 case L2CAP_CREATE_CHAN_RSP:
3519 err = l2cap_create_channel_rsp(conn, cmd, data);
3520 break;
3521
3522 case L2CAP_MOVE_CHAN_REQ:
3523 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3524 break;
3525
3526 case L2CAP_MOVE_CHAN_RSP:
3527 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3528 break;
3529
3530 case L2CAP_MOVE_CHAN_CFM:
3531 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3532 break;
3533
3534 case L2CAP_MOVE_CHAN_CFM_RSP:
3535 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3536 break;
3537
3538 default:
3539 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3540 err = -EINVAL;
3541 break;
3542 }
3543
3544 return err;
3545 }
3546
3547 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3548 struct l2cap_cmd_hdr *cmd, u8 *data)
3549 {
3550 switch (cmd->code) {
3551 case L2CAP_COMMAND_REJ:
3552 return 0;
3553
3554 case L2CAP_CONN_PARAM_UPDATE_REQ:
3555 return l2cap_conn_param_update_req(conn, cmd, data);
3556
3557 case L2CAP_CONN_PARAM_UPDATE_RSP:
3558 return 0;
3559
3560 default:
3561 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3562 return -EINVAL;
3563 }
3564 }
3565
3566 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3567 struct sk_buff *skb)
3568 {
3569 u8 *data = skb->data;
3570 int len = skb->len;
3571 struct l2cap_cmd_hdr cmd;
3572 int err;
3573
3574 l2cap_raw_recv(conn, skb);
3575
3576 while (len >= L2CAP_CMD_HDR_SIZE) {
3577 u16 cmd_len;
3578 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3579 data += L2CAP_CMD_HDR_SIZE;
3580 len -= L2CAP_CMD_HDR_SIZE;
3581
3582 cmd_len = le16_to_cpu(cmd.len);
3583
3584 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3585
3586 if (cmd_len > len || !cmd.ident) {
3587 BT_DBG("corrupted command");
3588 break;
3589 }
3590
3591 if (conn->hcon->type == LE_LINK)
3592 err = l2cap_le_sig_cmd(conn, &cmd, data);
3593 else
3594 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3595
3596 if (err) {
3597 struct l2cap_cmd_rej_unk rej;
3598
3599 BT_ERR("Wrong link type (%d)", err);
3600
3601 /* FIXME: Map err to a valid reason */
3602 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3603 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3604 }
3605
3606 data += cmd_len;
3607 len -= cmd_len;
3608 }
3609
3610 kfree_skb(skb);
3611 }
3612
3613 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3614 {
3615 u16 our_fcs, rcv_fcs;
3616 int hdr_size;
3617
3618 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3619 hdr_size = L2CAP_EXT_HDR_SIZE;
3620 else
3621 hdr_size = L2CAP_ENH_HDR_SIZE;
3622
3623 if (chan->fcs == L2CAP_FCS_CRC16) {
3624 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3625 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3626 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3627
3628 if (our_fcs != rcv_fcs)
3629 return -EBADMSG;
3630 }
3631 return 0;
3632 }
3633
3634 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3635 {
3636 u32 control = 0;
3637
3638 chan->frames_sent = 0;
3639
3640 control |= __set_reqseq(chan, chan->buffer_seq);
3641
3642 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3643 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3644 l2cap_send_sframe(chan, control);
3645 set_bit(CONN_RNR_SENT, &chan->conn_state);
3646 }
3647
3648 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3649 l2cap_retransmit_frames(chan);
3650
3651 l2cap_ertm_send(chan);
3652
3653 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3654 chan->frames_sent == 0) {
3655 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3656 l2cap_send_sframe(chan, control);
3657 }
3658 }
3659
3660 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3661 {
3662 struct sk_buff *next_skb;
3663 int tx_seq_offset, next_tx_seq_offset;
3664
3665 bt_cb(skb)->tx_seq = tx_seq;
3666 bt_cb(skb)->sar = sar;
3667
3668 next_skb = skb_peek(&chan->srej_q);
3669
3670 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3671
3672 while (next_skb) {
3673 if (bt_cb(next_skb)->tx_seq == tx_seq)
3674 return -EINVAL;
3675
3676 next_tx_seq_offset = __seq_offset(chan,
3677 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3678
3679 if (next_tx_seq_offset > tx_seq_offset) {
3680 __skb_queue_before(&chan->srej_q, next_skb, skb);
3681 return 0;
3682 }
3683
3684 if (skb_queue_is_last(&chan->srej_q, next_skb))
3685 next_skb = NULL;
3686 else
3687 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3688 }
3689
3690 __skb_queue_tail(&chan->srej_q, skb);
3691
3692 return 0;
3693 }
3694
3695 static void append_skb_frag(struct sk_buff *skb,
3696 struct sk_buff *new_frag, struct sk_buff **last_frag)
3697 {
3698 /* skb->len reflects data in skb as well as all fragments
3699 * skb->data_len reflects only data in fragments
3700 */
3701 if (!skb_has_frag_list(skb))
3702 skb_shinfo(skb)->frag_list = new_frag;
3703
3704 new_frag->next = NULL;
3705
3706 (*last_frag)->next = new_frag;
3707 *last_frag = new_frag;
3708
3709 skb->len += new_frag->len;
3710 skb->data_len += new_frag->len;
3711 skb->truesize += new_frag->truesize;
3712 }
3713
3714 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3715 {
3716 int err = -EINVAL;
3717
3718 switch (__get_ctrl_sar(chan, control)) {
3719 case L2CAP_SAR_UNSEGMENTED:
3720 if (chan->sdu)
3721 break;
3722
3723 err = chan->ops->recv(chan->data, skb);
3724 break;
3725
3726 case L2CAP_SAR_START:
3727 if (chan->sdu)
3728 break;
3729
3730 chan->sdu_len = get_unaligned_le16(skb->data);
3731 skb_pull(skb, L2CAP_SDULEN_SIZE);
3732
3733 if (chan->sdu_len > chan->imtu) {
3734 err = -EMSGSIZE;
3735 break;
3736 }
3737
3738 if (skb->len >= chan->sdu_len)
3739 break;
3740
3741 chan->sdu = skb;
3742 chan->sdu_last_frag = skb;
3743
3744 skb = NULL;
3745 err = 0;
3746 break;
3747
3748 case L2CAP_SAR_CONTINUE:
3749 if (!chan->sdu)
3750 break;
3751
3752 append_skb_frag(chan->sdu, skb,
3753 &chan->sdu_last_frag);
3754 skb = NULL;
3755
3756 if (chan->sdu->len >= chan->sdu_len)
3757 break;
3758
3759 err = 0;
3760 break;
3761
3762 case L2CAP_SAR_END:
3763 if (!chan->sdu)
3764 break;
3765
3766 append_skb_frag(chan->sdu, skb,
3767 &chan->sdu_last_frag);
3768 skb = NULL;
3769
3770 if (chan->sdu->len != chan->sdu_len)
3771 break;
3772
3773 err = chan->ops->recv(chan->data, chan->sdu);
3774
3775 if (!err) {
3776 /* Reassembly complete */
3777 chan->sdu = NULL;
3778 chan->sdu_last_frag = NULL;
3779 chan->sdu_len = 0;
3780 }
3781 break;
3782 }
3783
3784 if (err) {
3785 kfree_skb(skb);
3786 kfree_skb(chan->sdu);
3787 chan->sdu = NULL;
3788 chan->sdu_last_frag = NULL;
3789 chan->sdu_len = 0;
3790 }
3791
3792 return err;
3793 }
3794
3795 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3796 {
3797 BT_DBG("chan %p, Enter local busy", chan);
3798
3799 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3800
3801 __set_ack_timer(chan);
3802 }
3803
3804 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3805 {
3806 u32 control;
3807
3808 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3809 goto done;
3810
3811 control = __set_reqseq(chan, chan->buffer_seq);
3812 control |= __set_ctrl_poll(chan);
3813 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3814 l2cap_send_sframe(chan, control);
3815 chan->retry_count = 1;
3816
3817 __clear_retrans_timer(chan);
3818 __set_monitor_timer(chan);
3819
3820 set_bit(CONN_WAIT_F, &chan->conn_state);
3821
3822 done:
3823 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3824 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3825
3826 BT_DBG("chan %p, Exit local busy", chan);
3827 }
3828
3829 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3830 {
3831 if (chan->mode == L2CAP_MODE_ERTM) {
3832 if (busy)
3833 l2cap_ertm_enter_local_busy(chan);
3834 else
3835 l2cap_ertm_exit_local_busy(chan);
3836 }
3837 }
3838
3839 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3840 {
3841 struct sk_buff *skb;
3842 u32 control;
3843
3844 while ((skb = skb_peek(&chan->srej_q)) &&
3845 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3846 int err;
3847
3848 if (bt_cb(skb)->tx_seq != tx_seq)
3849 break;
3850
3851 skb = skb_dequeue(&chan->srej_q);
3852 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3853 err = l2cap_reassemble_sdu(chan, skb, control);
3854
3855 if (err < 0) {
3856 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3857 break;
3858 }
3859
3860 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3861 tx_seq = __next_seq(chan, tx_seq);
3862 }
3863 }
3864
3865 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3866 {
3867 struct srej_list *l, *tmp;
3868 u32 control;
3869
3870 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3871 if (l->tx_seq == tx_seq) {
3872 list_del(&l->list);
3873 kfree(l);
3874 return;
3875 }
3876 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3877 control |= __set_reqseq(chan, l->tx_seq);
3878 l2cap_send_sframe(chan, control);
3879 list_del(&l->list);
3880 list_add_tail(&l->list, &chan->srej_l);
3881 }
3882 }
3883
3884 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3885 {
3886 struct srej_list *new;
3887 u32 control;
3888
3889 while (tx_seq != chan->expected_tx_seq) {
3890 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3891 control |= __set_reqseq(chan, chan->expected_tx_seq);
3892 l2cap_send_sframe(chan, control);
3893
3894 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3895 if (!new)
3896 return -ENOMEM;
3897
3898 new->tx_seq = chan->expected_tx_seq;
3899
3900 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3901
3902 list_add_tail(&new->list, &chan->srej_l);
3903 }
3904
3905 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3906
3907 return 0;
3908 }
3909
3910 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3911 {
3912 u16 tx_seq = __get_txseq(chan, rx_control);
3913 u16 req_seq = __get_reqseq(chan, rx_control);
3914 u8 sar = __get_ctrl_sar(chan, rx_control);
3915 int tx_seq_offset, expected_tx_seq_offset;
3916 int num_to_ack = (chan->tx_win/6) + 1;
3917 int err = 0;
3918
3919 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3920 tx_seq, rx_control);
3921
3922 if (__is_ctrl_final(chan, rx_control) &&
3923 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3924 __clear_monitor_timer(chan);
3925 if (chan->unacked_frames > 0)
3926 __set_retrans_timer(chan);
3927 clear_bit(CONN_WAIT_F, &chan->conn_state);
3928 }
3929
3930 chan->expected_ack_seq = req_seq;
3931 l2cap_drop_acked_frames(chan);
3932
3933 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3934
3935 /* invalid tx_seq */
3936 if (tx_seq_offset >= chan->tx_win) {
3937 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3938 goto drop;
3939 }
3940
3941 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3942 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3943 l2cap_send_ack(chan);
3944 goto drop;
3945 }
3946
3947 if (tx_seq == chan->expected_tx_seq)
3948 goto expected;
3949
3950 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3951 struct srej_list *first;
3952
3953 first = list_first_entry(&chan->srej_l,
3954 struct srej_list, list);
3955 if (tx_seq == first->tx_seq) {
3956 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3957 l2cap_check_srej_gap(chan, tx_seq);
3958
3959 list_del(&first->list);
3960 kfree(first);
3961
3962 if (list_empty(&chan->srej_l)) {
3963 chan->buffer_seq = chan->buffer_seq_srej;
3964 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3965 l2cap_send_ack(chan);
3966 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3967 }
3968 } else {
3969 struct srej_list *l;
3970
3971 /* duplicated tx_seq */
3972 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3973 goto drop;
3974
3975 list_for_each_entry(l, &chan->srej_l, list) {
3976 if (l->tx_seq == tx_seq) {
3977 l2cap_resend_srejframe(chan, tx_seq);
3978 return 0;
3979 }
3980 }
3981
3982 err = l2cap_send_srejframe(chan, tx_seq);
3983 if (err < 0) {
3984 l2cap_send_disconn_req(chan->conn, chan, -err);
3985 return err;
3986 }
3987 }
3988 } else {
3989 expected_tx_seq_offset = __seq_offset(chan,
3990 chan->expected_tx_seq, chan->buffer_seq);
3991
3992 /* duplicated tx_seq */
3993 if (tx_seq_offset < expected_tx_seq_offset)
3994 goto drop;
3995
3996 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3997
3998 BT_DBG("chan %p, Enter SREJ", chan);
3999
4000 INIT_LIST_HEAD(&chan->srej_l);
4001 chan->buffer_seq_srej = chan->buffer_seq;
4002
4003 __skb_queue_head_init(&chan->srej_q);
4004 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4005
4006 /* Set P-bit only if there are some I-frames to ack. */
4007 if (__clear_ack_timer(chan))
4008 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4009
4010 err = l2cap_send_srejframe(chan, tx_seq);
4011 if (err < 0) {
4012 l2cap_send_disconn_req(chan->conn, chan, -err);
4013 return err;
4014 }
4015 }
4016 return 0;
4017
4018 expected:
4019 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4020
4021 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4022 bt_cb(skb)->tx_seq = tx_seq;
4023 bt_cb(skb)->sar = sar;
4024 __skb_queue_tail(&chan->srej_q, skb);
4025 return 0;
4026 }
4027
4028 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4029 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4030
4031 if (err < 0) {
4032 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4033 return err;
4034 }
4035
4036 if (__is_ctrl_final(chan, rx_control)) {
4037 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4038 l2cap_retransmit_frames(chan);
4039 }
4040
4041
4042 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4043 if (chan->num_acked == num_to_ack - 1)
4044 l2cap_send_ack(chan);
4045 else
4046 __set_ack_timer(chan);
4047
4048 return 0;
4049
4050 drop:
4051 kfree_skb(skb);
4052 return 0;
4053 }
4054
4055 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4056 {
4057 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4058 __get_reqseq(chan, rx_control), rx_control);
4059
4060 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4061 l2cap_drop_acked_frames(chan);
4062
4063 if (__is_ctrl_poll(chan, rx_control)) {
4064 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4065 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4066 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4067 (chan->unacked_frames > 0))
4068 __set_retrans_timer(chan);
4069
4070 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4071 l2cap_send_srejtail(chan);
4072 } else {
4073 l2cap_send_i_or_rr_or_rnr(chan);
4074 }
4075
4076 } else if (__is_ctrl_final(chan, rx_control)) {
4077 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4078
4079 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4080 l2cap_retransmit_frames(chan);
4081
4082 } else {
4083 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4084 (chan->unacked_frames > 0))
4085 __set_retrans_timer(chan);
4086
4087 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4088 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4089 l2cap_send_ack(chan);
4090 else
4091 l2cap_ertm_send(chan);
4092 }
4093 }
4094
4095 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4096 {
4097 u16 tx_seq = __get_reqseq(chan, rx_control);
4098
4099 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4100
4101 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4102
4103 chan->expected_ack_seq = tx_seq;
4104 l2cap_drop_acked_frames(chan);
4105
4106 if (__is_ctrl_final(chan, rx_control)) {
4107 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4108 l2cap_retransmit_frames(chan);
4109 } else {
4110 l2cap_retransmit_frames(chan);
4111
4112 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4113 set_bit(CONN_REJ_ACT, &chan->conn_state);
4114 }
4115 }
4116 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4117 {
4118 u16 tx_seq = __get_reqseq(chan, rx_control);
4119
4120 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4121
4122 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4123
4124 if (__is_ctrl_poll(chan, rx_control)) {
4125 chan->expected_ack_seq = tx_seq;
4126 l2cap_drop_acked_frames(chan);
4127
4128 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4129 l2cap_retransmit_one_frame(chan, tx_seq);
4130
4131 l2cap_ertm_send(chan);
4132
4133 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4134 chan->srej_save_reqseq = tx_seq;
4135 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4136 }
4137 } else if (__is_ctrl_final(chan, rx_control)) {
4138 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4139 chan->srej_save_reqseq == tx_seq)
4140 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4141 else
4142 l2cap_retransmit_one_frame(chan, tx_seq);
4143 } else {
4144 l2cap_retransmit_one_frame(chan, tx_seq);
4145 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4146 chan->srej_save_reqseq = tx_seq;
4147 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4148 }
4149 }
4150 }
4151
4152 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4153 {
4154 u16 tx_seq = __get_reqseq(chan, rx_control);
4155
4156 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4157
4158 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4159 chan->expected_ack_seq = tx_seq;
4160 l2cap_drop_acked_frames(chan);
4161
4162 if (__is_ctrl_poll(chan, rx_control))
4163 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4164
4165 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4166 __clear_retrans_timer(chan);
4167 if (__is_ctrl_poll(chan, rx_control))
4168 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4169 return;
4170 }
4171
4172 if (__is_ctrl_poll(chan, rx_control)) {
4173 l2cap_send_srejtail(chan);
4174 } else {
4175 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4176 l2cap_send_sframe(chan, rx_control);
4177 }
4178 }
4179
4180 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4181 {
4182 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4183
4184 if (__is_ctrl_final(chan, rx_control) &&
4185 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4186 __clear_monitor_timer(chan);
4187 if (chan->unacked_frames > 0)
4188 __set_retrans_timer(chan);
4189 clear_bit(CONN_WAIT_F, &chan->conn_state);
4190 }
4191
4192 switch (__get_ctrl_super(chan, rx_control)) {
4193 case L2CAP_SUPER_RR:
4194 l2cap_data_channel_rrframe(chan, rx_control);
4195 break;
4196
4197 case L2CAP_SUPER_REJ:
4198 l2cap_data_channel_rejframe(chan, rx_control);
4199 break;
4200
4201 case L2CAP_SUPER_SREJ:
4202 l2cap_data_channel_srejframe(chan, rx_control);
4203 break;
4204
4205 case L2CAP_SUPER_RNR:
4206 l2cap_data_channel_rnrframe(chan, rx_control);
4207 break;
4208 }
4209
4210 kfree_skb(skb);
4211 return 0;
4212 }
4213
4214 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4215 {
4216 u32 control;
4217 u16 req_seq;
4218 int len, next_tx_seq_offset, req_seq_offset;
4219
4220 control = __get_control(chan, skb->data);
4221 skb_pull(skb, __ctrl_size(chan));
4222 len = skb->len;
4223
4224 /*
4225 * We can just drop the corrupted I-frame here.
4226 * Receiver will miss it and start proper recovery
4227 * procedures and ask retransmission.
4228 */
4229 if (l2cap_check_fcs(chan, skb))
4230 goto drop;
4231
4232 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4233 len -= L2CAP_SDULEN_SIZE;
4234
4235 if (chan->fcs == L2CAP_FCS_CRC16)
4236 len -= L2CAP_FCS_SIZE;
4237
4238 if (len > chan->mps) {
4239 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4240 goto drop;
4241 }
4242
4243 req_seq = __get_reqseq(chan, control);
4244
4245 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4246
4247 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4248 chan->expected_ack_seq);
4249
4250 /* check for invalid req-seq */
4251 if (req_seq_offset > next_tx_seq_offset) {
4252 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4253 goto drop;
4254 }
4255
4256 if (!__is_sframe(chan, control)) {
4257 if (len < 0) {
4258 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4259 goto drop;
4260 }
4261
4262 l2cap_data_channel_iframe(chan, control, skb);
4263 } else {
4264 if (len != 0) {
4265 BT_ERR("%d", len);
4266 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4267 goto drop;
4268 }
4269
4270 l2cap_data_channel_sframe(chan, control, skb);
4271 }
4272
4273 return 0;
4274
4275 drop:
4276 kfree_skb(skb);
4277 return 0;
4278 }
4279
4280 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4281 {
4282 struct l2cap_chan *chan;
4283 u32 control;
4284 u16 tx_seq;
4285 int len;
4286
4287 chan = l2cap_get_chan_by_scid(conn, cid);
4288 if (!chan) {
4289 BT_DBG("unknown cid 0x%4.4x", cid);
4290 /* Drop packet and return */
4291 kfree_skb(skb);
4292 return 0;
4293 }
4294
4295 l2cap_chan_lock(chan);
4296
4297 BT_DBG("chan %p, len %d", chan, skb->len);
4298
4299 if (chan->state != BT_CONNECTED)
4300 goto drop;
4301
4302 switch (chan->mode) {
4303 case L2CAP_MODE_BASIC:
4304 /* If socket recv buffers overflows we drop data here
4305 * which is *bad* because L2CAP has to be reliable.
4306 * But we don't have any other choice. L2CAP doesn't
4307 * provide flow control mechanism. */
4308
4309 if (chan->imtu < skb->len)
4310 goto drop;
4311
4312 if (!chan->ops->recv(chan->data, skb))
4313 goto done;
4314 break;
4315
4316 case L2CAP_MODE_ERTM:
4317 l2cap_ertm_data_rcv(chan, skb);
4318
4319 goto done;
4320
4321 case L2CAP_MODE_STREAMING:
4322 control = __get_control(chan, skb->data);
4323 skb_pull(skb, __ctrl_size(chan));
4324 len = skb->len;
4325
4326 if (l2cap_check_fcs(chan, skb))
4327 goto drop;
4328
4329 if (__is_sar_start(chan, control))
4330 len -= L2CAP_SDULEN_SIZE;
4331
4332 if (chan->fcs == L2CAP_FCS_CRC16)
4333 len -= L2CAP_FCS_SIZE;
4334
4335 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4336 goto drop;
4337
4338 tx_seq = __get_txseq(chan, control);
4339
4340 if (chan->expected_tx_seq != tx_seq) {
4341 /* Frame(s) missing - must discard partial SDU */
4342 kfree_skb(chan->sdu);
4343 chan->sdu = NULL;
4344 chan->sdu_last_frag = NULL;
4345 chan->sdu_len = 0;
4346
4347 /* TODO: Notify userland of missing data */
4348 }
4349
4350 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4351
4352 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4353 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4354
4355 goto done;
4356
4357 default:
4358 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4359 break;
4360 }
4361
4362 drop:
4363 kfree_skb(skb);
4364
4365 done:
4366 l2cap_chan_unlock(chan);
4367
4368 return 0;
4369 }
4370
4371 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4372 {
4373 struct l2cap_chan *chan;
4374
4375 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4376 if (!chan)
4377 goto drop;
4378
4379 BT_DBG("chan %p, len %d", chan, skb->len);
4380
4381 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4382 goto drop;
4383
4384 if (chan->imtu < skb->len)
4385 goto drop;
4386
4387 if (!chan->ops->recv(chan->data, skb))
4388 return 0;
4389
4390 drop:
4391 kfree_skb(skb);
4392
4393 return 0;
4394 }
4395
4396 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4397 {
4398 struct l2cap_chan *chan;
4399
4400 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4401 if (!chan)
4402 goto drop;
4403
4404 BT_DBG("chan %p, len %d", chan, skb->len);
4405
4406 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4407 goto drop;
4408
4409 if (chan->imtu < skb->len)
4410 goto drop;
4411
4412 if (!chan->ops->recv(chan->data, skb))
4413 return 0;
4414
4415 drop:
4416 kfree_skb(skb);
4417
4418 return 0;
4419 }
4420
4421 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4422 {
4423 struct l2cap_hdr *lh = (void *) skb->data;
4424 u16 cid, len;
4425 __le16 psm;
4426
4427 skb_pull(skb, L2CAP_HDR_SIZE);
4428 cid = __le16_to_cpu(lh->cid);
4429 len = __le16_to_cpu(lh->len);
4430
4431 if (len != skb->len) {
4432 kfree_skb(skb);
4433 return;
4434 }
4435
4436 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4437
4438 switch (cid) {
4439 case L2CAP_CID_LE_SIGNALING:
4440 case L2CAP_CID_SIGNALING:
4441 l2cap_sig_channel(conn, skb);
4442 break;
4443
4444 case L2CAP_CID_CONN_LESS:
4445 psm = get_unaligned_le16(skb->data);
4446 skb_pull(skb, 2);
4447 l2cap_conless_channel(conn, psm, skb);
4448 break;
4449
4450 case L2CAP_CID_LE_DATA:
4451 l2cap_att_channel(conn, cid, skb);
4452 break;
4453
4454 case L2CAP_CID_SMP:
4455 if (smp_sig_channel(conn, skb))
4456 l2cap_conn_del(conn->hcon, EACCES);
4457 break;
4458
4459 default:
4460 l2cap_data_channel(conn, cid, skb);
4461 break;
4462 }
4463 }
4464
4465 /* ---- L2CAP interface with lower layer (HCI) ---- */
4466
4467 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4468 {
4469 int exact = 0, lm1 = 0, lm2 = 0;
4470 struct l2cap_chan *c;
4471
4472 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4473
4474 /* Find listening sockets and check their link_mode */
4475 read_lock(&chan_list_lock);
4476 list_for_each_entry(c, &chan_list, global_l) {
4477 struct sock *sk = c->sk;
4478
4479 if (c->state != BT_LISTEN)
4480 continue;
4481
4482 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4483 lm1 |= HCI_LM_ACCEPT;
4484 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4485 lm1 |= HCI_LM_MASTER;
4486 exact++;
4487 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4488 lm2 |= HCI_LM_ACCEPT;
4489 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4490 lm2 |= HCI_LM_MASTER;
4491 }
4492 }
4493 read_unlock(&chan_list_lock);
4494
4495 return exact ? lm1 : lm2;
4496 }
4497
4498 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4499 {
4500 struct l2cap_conn *conn;
4501
4502 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4503
4504 if (!status) {
4505 conn = l2cap_conn_add(hcon, status);
4506 if (conn)
4507 l2cap_conn_ready(conn);
4508 } else
4509 l2cap_conn_del(hcon, bt_to_errno(status));
4510
4511 return 0;
4512 }
4513
4514 int l2cap_disconn_ind(struct hci_conn *hcon)
4515 {
4516 struct l2cap_conn *conn = hcon->l2cap_data;
4517
4518 BT_DBG("hcon %p", hcon);
4519
4520 if (!conn)
4521 return HCI_ERROR_REMOTE_USER_TERM;
4522 return conn->disc_reason;
4523 }
4524
4525 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4526 {
4527 BT_DBG("hcon %p reason %d", hcon, reason);
4528
4529 l2cap_conn_del(hcon, bt_to_errno(reason));
4530 return 0;
4531 }
4532
4533 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4534 {
4535 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4536 return;
4537
4538 if (encrypt == 0x00) {
4539 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4540 __clear_chan_timer(chan);
4541 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4542 } else if (chan->sec_level == BT_SECURITY_HIGH)
4543 l2cap_chan_close(chan, ECONNREFUSED);
4544 } else {
4545 if (chan->sec_level == BT_SECURITY_MEDIUM)
4546 __clear_chan_timer(chan);
4547 }
4548 }
4549
4550 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4551 {
4552 struct l2cap_conn *conn = hcon->l2cap_data;
4553 struct l2cap_chan *chan;
4554
4555 if (!conn)
4556 return 0;
4557
4558 BT_DBG("conn %p", conn);
4559
4560 if (hcon->type == LE_LINK) {
4561 smp_distribute_keys(conn, 0);
4562 cancel_delayed_work(&conn->security_timer);
4563 }
4564
4565 mutex_lock(&conn->chan_lock);
4566
4567 list_for_each_entry(chan, &conn->chan_l, list) {
4568 l2cap_chan_lock(chan);
4569
4570 BT_DBG("chan->scid %d", chan->scid);
4571
4572 if (chan->scid == L2CAP_CID_LE_DATA) {
4573 if (!status && encrypt) {
4574 chan->sec_level = hcon->sec_level;
4575 l2cap_chan_ready(chan);
4576 }
4577
4578 l2cap_chan_unlock(chan);
4579 continue;
4580 }
4581
4582 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4583 l2cap_chan_unlock(chan);
4584 continue;
4585 }
4586
4587 if (!status && (chan->state == BT_CONNECTED ||
4588 chan->state == BT_CONFIG)) {
4589 l2cap_check_encryption(chan, encrypt);
4590 l2cap_chan_unlock(chan);
4591 continue;
4592 }
4593
4594 if (chan->state == BT_CONNECT) {
4595 if (!status) {
4596 l2cap_send_conn_req(chan);
4597 } else {
4598 __clear_chan_timer(chan);
4599 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4600 }
4601 } else if (chan->state == BT_CONNECT2) {
4602 struct sock *sk = chan->sk;
4603 struct l2cap_conn_rsp rsp;
4604 __u16 res, stat;
4605
4606 lock_sock(sk);
4607
4608 if (!status) {
4609 if (bt_sk(sk)->defer_setup) {
4610 struct sock *parent = bt_sk(sk)->parent;
4611 res = L2CAP_CR_PEND;
4612 stat = L2CAP_CS_AUTHOR_PEND;
4613 if (parent)
4614 parent->sk_data_ready(parent, 0);
4615 } else {
4616 __l2cap_state_change(chan, BT_CONFIG);
4617 res = L2CAP_CR_SUCCESS;
4618 stat = L2CAP_CS_NO_INFO;
4619 }
4620 } else {
4621 __l2cap_state_change(chan, BT_DISCONN);
4622 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4623 res = L2CAP_CR_SEC_BLOCK;
4624 stat = L2CAP_CS_NO_INFO;
4625 }
4626
4627 release_sock(sk);
4628
4629 rsp.scid = cpu_to_le16(chan->dcid);
4630 rsp.dcid = cpu_to_le16(chan->scid);
4631 rsp.result = cpu_to_le16(res);
4632 rsp.status = cpu_to_le16(stat);
4633 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4634 sizeof(rsp), &rsp);
4635 }
4636
4637 l2cap_chan_unlock(chan);
4638 }
4639
4640 mutex_unlock(&conn->chan_lock);
4641
4642 return 0;
4643 }
4644
4645 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4646 {
4647 struct l2cap_conn *conn = hcon->l2cap_data;
4648
4649 if (!conn)
4650 conn = l2cap_conn_add(hcon, 0);
4651
4652 if (!conn)
4653 goto drop;
4654
4655 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4656
4657 if (!(flags & ACL_CONT)) {
4658 struct l2cap_hdr *hdr;
4659 struct l2cap_chan *chan;
4660 u16 cid;
4661 int len;
4662
4663 if (conn->rx_len) {
4664 BT_ERR("Unexpected start frame (len %d)", skb->len);
4665 kfree_skb(conn->rx_skb);
4666 conn->rx_skb = NULL;
4667 conn->rx_len = 0;
4668 l2cap_conn_unreliable(conn, ECOMM);
4669 }
4670
4671 /* Start fragment always begin with Basic L2CAP header */
4672 if (skb->len < L2CAP_HDR_SIZE) {
4673 BT_ERR("Frame is too short (len %d)", skb->len);
4674 l2cap_conn_unreliable(conn, ECOMM);
4675 goto drop;
4676 }
4677
4678 hdr = (struct l2cap_hdr *) skb->data;
4679 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4680 cid = __le16_to_cpu(hdr->cid);
4681
4682 if (len == skb->len) {
4683 /* Complete frame received */
4684 l2cap_recv_frame(conn, skb);
4685 return 0;
4686 }
4687
4688 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4689
4690 if (skb->len > len) {
4691 BT_ERR("Frame is too long (len %d, expected len %d)",
4692 skb->len, len);
4693 l2cap_conn_unreliable(conn, ECOMM);
4694 goto drop;
4695 }
4696
4697 chan = l2cap_get_chan_by_scid(conn, cid);
4698
4699 if (chan && chan->sk) {
4700 struct sock *sk = chan->sk;
4701 lock_sock(sk);
4702
4703 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4704 BT_ERR("Frame exceeding recv MTU (len %d, "
4705 "MTU %d)", len,
4706 chan->imtu);
4707 release_sock(sk);
4708 l2cap_conn_unreliable(conn, ECOMM);
4709 goto drop;
4710 }
4711 release_sock(sk);
4712 }
4713
4714 /* Allocate skb for the complete frame (with header) */
4715 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4716 if (!conn->rx_skb)
4717 goto drop;
4718
4719 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4720 skb->len);
4721 conn->rx_len = len - skb->len;
4722 } else {
4723 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4724
4725 if (!conn->rx_len) {
4726 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4727 l2cap_conn_unreliable(conn, ECOMM);
4728 goto drop;
4729 }
4730
4731 if (skb->len > conn->rx_len) {
4732 BT_ERR("Fragment is too long (len %d, expected %d)",
4733 skb->len, conn->rx_len);
4734 kfree_skb(conn->rx_skb);
4735 conn->rx_skb = NULL;
4736 conn->rx_len = 0;
4737 l2cap_conn_unreliable(conn, ECOMM);
4738 goto drop;
4739 }
4740
4741 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4742 skb->len);
4743 conn->rx_len -= skb->len;
4744
4745 if (!conn->rx_len) {
4746 /* Complete frame received */
4747 l2cap_recv_frame(conn, conn->rx_skb);
4748 conn->rx_skb = NULL;
4749 }
4750 }
4751
4752 drop:
4753 kfree_skb(skb);
4754 return 0;
4755 }
4756
4757 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4758 {
4759 struct l2cap_chan *c;
4760
4761 read_lock(&chan_list_lock);
4762
4763 list_for_each_entry(c, &chan_list, global_l) {
4764 struct sock *sk = c->sk;
4765
4766 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4767 batostr(&bt_sk(sk)->src),
4768 batostr(&bt_sk(sk)->dst),
4769 c->state, __le16_to_cpu(c->psm),
4770 c->scid, c->dcid, c->imtu, c->omtu,
4771 c->sec_level, c->mode);
4772 }
4773
4774 read_unlock(&chan_list_lock);
4775
4776 return 0;
4777 }
4778
4779 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4780 {
4781 return single_open(file, l2cap_debugfs_show, inode->i_private);
4782 }
4783
4784 static const struct file_operations l2cap_debugfs_fops = {
4785 .open = l2cap_debugfs_open,
4786 .read = seq_read,
4787 .llseek = seq_lseek,
4788 .release = single_release,
4789 };
4790
4791 static struct dentry *l2cap_debugfs;
4792
4793 int __init l2cap_init(void)
4794 {
4795 int err;
4796
4797 err = l2cap_init_sockets();
4798 if (err < 0)
4799 return err;
4800
4801 if (bt_debugfs) {
4802 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4803 bt_debugfs, NULL, &l2cap_debugfs_fops);
4804 if (!l2cap_debugfs)
4805 BT_ERR("Failed to create L2CAP debug file");
4806 }
4807
4808 return 0;
4809 }
4810
4811 void l2cap_exit(void)
4812 {
4813 debugfs_remove(l2cap_debugfs);
4814 l2cap_cleanup_sockets();
4815 }
4816
4817 module_param(disable_ertm, bool, 0644);
4818 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
This page took 0.593282 seconds and 6 git commands to generate.